Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
33 /*-
152 * * The MLD subsystem lock ends up being system-wide for the moment,
153 * but could be per-VIMAGE later on.
158 * * MLD_LOCK covers per-link state and any global variables in this file.
160 * per-link state iterators.
192 * to a vnet in ifp->if_vnet.
200 (pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF) \
203 * VIMAGE-wide globals.
228 "Rate limit for MLDv2 Group-and-Source queries in seconds");
231 * Non-virtualized sysctls.
235 "Per-interface MLDv2 state");
262 * Router Alert hop-by-hop option header.
269 .ip6or_len = IP6OPT_RTALERT_LEN - 2,
281 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet; in mld_save_context()
283 m->m_pkthdr.rcvif = ifp; in mld_save_context()
284 m->m_pkthdr.flowid = ifp->if_index; in mld_save_context()
291 m->m_pkthdr.PH_loc.ptr = NULL; in mld_scrub_context()
292 m->m_pkthdr.flowid = 0; in mld_scrub_context()
307 KASSERT(curvnet == m->m_pkthdr.PH_loc.ptr, in mld_restore_context()
309 __func__, curvnet, m->m_pkthdr.PH_loc.ptr)); in mld_restore_context()
311 return (m->m_pkthdr.flowid); in mld_restore_context()
315 * Retrieve or set threshold between group-source queries in seconds.
335 if (error || !req->newptr) in sysctl_mld_gsr()
338 if (i < -1 || i >= 60) { in sysctl_mld_gsr()
372 if (req->newptr != NULL) in sysctl_mld_ifinfo()
393 if (ifp == mli->mli_ifp) { in sysctl_mld_ifinfo()
396 info.mli_version = mli->mli_version; in sysctl_mld_ifinfo()
397 info.mli_v1_timer = mli->mli_v1_timer; in sysctl_mld_ifinfo()
398 info.mli_v2_timer = mli->mli_v2_timer; in sysctl_mld_ifinfo()
399 info.mli_flags = mli->mli_flags; in sysctl_mld_ifinfo()
400 info.mli_rv = mli->mli_rv; in sysctl_mld_ifinfo()
401 info.mli_qi = mli->mli_qi; in sysctl_mld_ifinfo()
402 info.mli_qri = mli->mli_qri; in sysctl_mld_ifinfo()
403 info.mli_uri = mli->mli_uri; in sysctl_mld_ifinfo()
429 if (--limit == 0) in mld_dispatch_queue()
437 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
438 * and node-local addresses. However, kernel and socket consumers
444 * should be suppressed, or non-zero if reports should be issued.
477 mli->mli_ifp = ifp; in mld_domifattach()
478 mli->mli_version = MLD_VERSION_2; in mld_domifattach()
479 mli->mli_flags = 0; in mld_domifattach()
480 mli->mli_rv = MLD_RV_INIT; in mld_domifattach()
481 mli->mli_qi = MLD_QI_INIT; in mld_domifattach()
482 mli->mli_qri = MLD_QRI_INIT; in mld_domifattach()
483 mli->mli_uri = MLD_URI_INIT; in mld_domifattach()
484 mbufq_init(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS); in mld_domifattach()
485 if ((ifp->if_flags & IFF_MULTICAST) == 0) in mld_domifattach()
486 mli->mli_flags |= MLIF_SILENT; in mld_domifattach()
488 mli->mli_flags |= MLIF_USEALLOW; in mld_domifattach()
502 * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
529 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_ifdetach()
535 if (mli->mli_version == MLD_VERSION_2) { in mld_ifdetach()
542 if (inm->in6m_state == MLD_LEAVING_MEMBER) { in mld_ifdetach()
543 inm->in6m_state = MLD_NOT_MEMBER; in mld_ifdetach()
555 * Runs after link-layer cleanup; free MLD state.
582 if (mli->mli_ifp == ifp) { in mli_delete_locked()
586 mbufq_drain(&mli->mli_gq); in mli_delete_locked()
597 * Process a received MLDv1 general or address-specific query.
622 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_query()
629 * a router's link-local address. in mld_v1_input_query()
631 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { in mld_v1_input_query()
633 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v1_input_query()
642 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { in mld_v1_input_query()
645 * If this was not sent to the all-nodes group, ignore it. in mld_v1_input_query()
649 dst = ip6->ip6_dst; in mld_v1_input_query()
659 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v1_input_query()
672 timer = (ntohs(mld->mld_maxdelay) * MLD_FASTHZ) / MLD_TIMER_SCALE; in mld_v1_input_query()
683 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v1_input_query()
691 * MLDv1 Group-Specific Query. in mld_v1_input_query()
692 * If this is a group-specific MLDv1 query, we need only in mld_v1_input_query()
695 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v1_input_query()
698 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_query()
703 in6_clearscope(&mld->mld_addr); in mld_v1_input_query()
717 * below the threshold, reset it.
722 * for group and group-source query responses.
735 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v1_update_group()
736 if_name(inm->in6m_ifp), timer); in mld_v1_update_group()
740 switch (inm->in6m_state) { in mld_v1_update_group()
745 if (inm->in6m_timer != 0 && in mld_v1_update_group()
746 inm->in6m_timer <= timer) { in mld_v1_update_group()
757 CTR1(KTR_MLD, "%s: ->REPORTING", __func__); in mld_v1_update_group()
758 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v1_update_group()
759 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v1_update_group()
763 CTR1(KTR_MLD, "%s: ->AWAKENING", __func__); in mld_v1_update_group()
764 inm->in6m_state = MLD_AWAKENING_MEMBER; in mld_v1_update_group()
772 * Process a received MLDv2 general, group-specific or
773 * group-and-source-specific query.
798 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v2_input_query()
805 * a router's link-local address. in mld_v2_input_query()
807 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { in mld_v2_input_query()
809 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v2_input_query()
818 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ in mld_v2_input_query()
827 qrv = MLD_QRV(mld->mld_misc); in mld_v2_input_query()
834 qqi = mld->mld_qqi; in mld_v2_input_query()
836 qqi = MLD_QQIC_MANT(mld->mld_qqi) << in mld_v2_input_query()
837 (MLD_QQIC_EXP(mld->mld_qqi) + 3); in mld_v2_input_query()
840 nsrc = ntohs(mld->mld_numsrc); in mld_v2_input_query()
851 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { in mld_v2_input_query()
865 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v2_input_query()
879 if (mli->mli_version != MLD_VERSION_2) in mld_v2_input_query()
883 mli->mli_rv = qrv; in mld_v2_input_query()
884 mli->mli_qi = qqi; in mld_v2_input_query()
885 mli->mli_qri = maxdelay; in mld_v2_input_query()
894 * Schedule a current-state report on this ifp for in mld_v2_input_query()
904 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) { in mld_v2_input_query()
905 mli->mli_v2_timer = MLD_RANDOM_DELAY(timer); in mld_v2_input_query()
910 * MLDv2 Group-specific or Group-and-source-specific Query. in mld_v2_input_query()
912 * Group-source-specific queries are throttled on in mld_v2_input_query()
913 * a per-group basis to defeat denial-of-service attempts. in mld_v2_input_query()
917 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v2_input_query()
921 if (!ratecheck(&inm->in6m_lastgsrtv, in mld_v2_input_query()
935 * group-specific or group-and-source query. in mld_v2_input_query()
937 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) in mld_v2_input_query()
941 in6_clearscope(&mld->mld_addr); in mld_v2_input_query()
952 * Process a received MLDv2 group-specific or group-and-source-specific
968 switch (inm->in6m_state) { in mld_v2_process_group_query()
984 nsrc = ntohs(mld->mld_numsrc); in mld_v2_process_group_query()
987 KASSERT((m0->m_flags & M_PKTHDR) == 0 || in mld_v2_process_group_query()
988 m0->m_pkthdr.len >= off + sizeof(struct mldv2_query) + in mld_v2_process_group_query()
991 m0->m_pkthdr.len, off + sizeof(struct mldv2_query) + in mld_v2_process_group_query()
995 * Deal with group-specific queries upfront. in mld_v2_process_group_query()
997 * source-list state if it exists, and schedule a query response in mld_v2_process_group_query()
998 * for this group-specific query. in mld_v2_process_group_query()
1001 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || in mld_v2_process_group_query()
1002 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { in mld_v2_process_group_query()
1004 timer = min(inm->in6m_timer, timer); in mld_v2_process_group_query()
1006 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; in mld_v2_process_group_query()
1007 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1013 * Deal with the case where a group-and-source-specific query has in mld_v2_process_group_query()
1014 * been received but a group-specific query is already pending. in mld_v2_process_group_query()
1016 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { in mld_v2_process_group_query()
1017 timer = min(inm->in6m_timer, timer); in mld_v2_process_group_query()
1018 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1024 * Finally, deal with the case where a group-and-source-specific in mld_v2_process_group_query()
1025 * query has been received, where a response to a previous g-s-r in mld_v2_process_group_query()
1027 * In this case, we need to parse the source-list which the Querier in mld_v2_process_group_query()
1028 * has provided us with and check if we have any source list filter in mld_v2_process_group_query()
1031 * If we do, we must record them and schedule a current-state in mld_v2_process_group_query()
1034 if (inm->in6m_nsrc > 0) { in mld_v2_process_group_query()
1053 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; in mld_v2_process_group_query()
1054 inm->in6m_timer = MLD_RANDOM_DELAY(timer); in mld_v2_process_group_query()
1084 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_report()
1089 if (ifp->if_flags & IFF_LOOPBACK) in mld_v1_input_report()
1093 * MLDv1 reports must originate from a host's link-local address, in mld_v1_input_report()
1096 src = ip6->ip6_src; in mld_v1_input_report()
1100 ip6_sprintf(ip6tbuf, &ip6->ip6_src), in mld_v1_input_report()
1109 dst = ip6->ip6_dst; in mld_v1_input_report()
1111 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) || in mld_v1_input_report()
1112 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) { in mld_v1_input_report()
1114 ip6_sprintf(ip6tbuf, &ip6->ip6_dst), in mld_v1_input_report()
1122 * group. Assume we used the link-local address if available, in mld_v1_input_report()
1127 * performed for the on-wire address. in mld_v1_input_report()
1130 if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) || in mld_v1_input_report()
1133 ifa_free(&ia->ia_ifa); in mld_v1_input_report()
1137 ifa_free(&ia->ia_ifa); in mld_v1_input_report()
1140 ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, if_name(ifp)); in mld_v1_input_report()
1146 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) in mld_v1_input_report()
1147 in6_setscope(&mld->mld_addr, ifp, NULL); in mld_v1_input_report()
1158 inm = in6m_lookup_locked(ifp, &mld->mld_addr); in mld_v1_input_report()
1162 mli = inm->in6m_mli; in mld_v1_input_report()
1170 if (mli->mli_version == MLD_VERSION_2) in mld_v1_input_report()
1173 inm->in6m_timer = 0; in mld_v1_input_report()
1175 switch (inm->in6m_state) { in mld_v1_input_report()
1185 ip6_sprintf(ip6tbuf, &mld->mld_addr), in mld_v1_input_report()
1188 inm->in6m_state = MLD_LAZY_MEMBER; in mld_v1_input_report()
1202 in6_clearscope(&mld->mld_addr); in mld_v1_input_report()
1230 ifp = m->m_pkthdr.rcvif; in mld_input()
1233 if (m->m_len < off + sizeof(*mld)) { in mld_input()
1241 if (mld->mld_type == MLD_LISTENER_QUERY && in mld_input()
1247 if (m->m_len < off + mldlen) { in mld_input()
1262 switch (mld->mld_type) { in mld_input()
1321 * Fast timeout handler (per-vnet).
1328 struct mbufq scq; /* State-change packets */ in mld_fasttimo_vnet()
1359 if (mli->mli_v2_timer == 0) { in mld_fasttimo_vnet()
1361 } else if (--mli->mli_v2_timer == 0) { in mld_fasttimo_vnet()
1379 * MLD host report and state-change timer processing. in mld_fasttimo_vnet()
1383 ifp = mli->mli_ifp; in mld_fasttimo_vnet()
1385 if (mli->mli_version == MLD_VERSION_2) { in mld_fasttimo_vnet()
1386 uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri * in mld_fasttimo_vnet()
1393 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_fasttimo_vnet()
1397 switch (mli->mli_version) { in mld_fasttimo_vnet()
1409 switch (mli->mli_version) { in mld_fasttimo_vnet()
1449 if (inm->in6m_timer == 0) { in mld_v1_process_group_timer()
1451 } else if (--inm->in6m_timer == 0) { in mld_v1_process_group_timer()
1458 switch (inm->in6m_state) { in mld_v1_process_group_timer()
1468 inm->in6m_state = MLD_IDLE_MEMBER; in mld_v1_process_group_timer()
1504 * timer active. This is a no-op in this function; it is easier in mld_v2_process_group_timers()
1505 * to deal with it here than to complicate the slow-timeout path. in mld_v2_process_group_timers()
1507 if (inm->in6m_timer == 0) { in mld_v2_process_group_timers()
1509 } else if (--inm->in6m_timer == 0) { in mld_v2_process_group_timers()
1515 if (inm->in6m_sctimer == 0) { in mld_v2_process_group_timers()
1517 } else if (--inm->in6m_sctimer == 0) { in mld_v2_process_group_timers()
1528 switch (inm->in6m_state) { in mld_v2_process_group_timers()
1539 * Respond to a previously pending Group-Specific in mld_v2_process_group_timers()
1540 * or Group-and-Source-Specific query by enqueueing in mld_v2_process_group_timers()
1541 * the appropriate Current-State report for in mld_v2_process_group_timers()
1548 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER), in mld_v2_process_group_timers()
1552 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_process_group_timers()
1560 * State-change retransmission timer fired. in mld_v2_process_group_timers()
1562 * set the global pending state-change flag, and in mld_v2_process_group_timers()
1565 if (--inm->in6m_scrv > 0) { in mld_v2_process_group_timers()
1566 inm->in6m_sctimer = uri_fasthz; in mld_v2_process_group_timers()
1570 * Retransmit the previously computed state-change in mld_v2_process_group_timers()
1574 * a state-change. in mld_v2_process_group_timers()
1579 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_v2_process_group_timers()
1580 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_process_group_timers()
1581 if_name(inm->in6m_ifp)); in mld_v2_process_group_timers()
1590 if (inm->in6m_state == MLD_LEAVING_MEMBER && in mld_v2_process_group_timers()
1591 inm->in6m_scrv == 0) { in mld_v2_process_group_timers()
1592 inm->in6m_state = MLD_NOT_MEMBER; in mld_v2_process_group_timers()
1613 version, mli->mli_ifp, if_name(mli->mli_ifp)); in mld_set_version()
1620 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri; in mld_set_version()
1622 mli->mli_v1_timer = old_version_timer; in mld_set_version()
1625 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) { in mld_set_version()
1626 mli->mli_version = MLD_VERSION_1; in mld_set_version()
1633 * joined on it; state-change, general-query, and group-query timers.
1645 mli->mli_ifp, if_name(mli->mli_ifp)); in mld_v2_cancel_link_timers()
1652 * Fast-track this potentially expensive operation in mld_v2_cancel_link_timers()
1660 mli->mli_v2_timer = 0; in mld_v2_cancel_link_timers()
1662 ifp = mli->mli_ifp; in mld_v2_cancel_link_timers()
1666 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v2_cancel_link_timers()
1670 switch (inm->in6m_state) { in mld_v2_cancel_link_timers()
1684 if (inm->in6m_refcount == 1) in mld_v2_cancel_link_timers()
1693 inm->in6m_sctimer = 0; in mld_v2_cancel_link_timers()
1694 inm->in6m_timer = 0; in mld_v2_cancel_link_timers()
1695 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_cancel_link_timers()
1697 * Free any pending MLDv2 state-change records. in mld_v2_cancel_link_timers()
1699 mbufq_drain(&inm->in6m_scq); in mld_v2_cancel_link_timers()
1730 * Per-vnet slowtimo handler.
1756 if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) { in mld_v1_process_querier_timers()
1761 "%s: transition from v%d -> v%d on %p(%s)", in mld_v1_process_querier_timers()
1762 __func__, mli->mli_version, MLD_VERSION_2, in mld_v1_process_querier_timers()
1763 mli->mli_ifp, if_name(mli->mli_ifp)); in mld_v1_process_querier_timers()
1764 mli->mli_version = MLD_VERSION_2; in mld_v1_process_querier_timers()
1784 ifp = in6m->in6m_ifp; in mld_v1_transmit_report()
1789 /* ia may be NULL if link-local address is tentative. */ in mld_v1_transmit_report()
1794 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1801 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1804 mh->m_next = md; in mld_v1_transmit_report()
1812 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr); in mld_v1_transmit_report()
1813 mh->m_len = sizeof(struct ip6_hdr); in mld_v1_transmit_report()
1816 ip6->ip6_flow = 0; in mld_v1_transmit_report()
1817 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; in mld_v1_transmit_report()
1818 ip6->ip6_vfc |= IPV6_VERSION; in mld_v1_transmit_report()
1819 ip6->ip6_nxt = IPPROTO_ICMPV6; in mld_v1_transmit_report()
1820 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; in mld_v1_transmit_report()
1821 ip6->ip6_dst = in6m->in6m_addr; in mld_v1_transmit_report()
1823 md->m_len = sizeof(struct mld_hdr); in mld_v1_transmit_report()
1825 mld->mld_type = type; in mld_v1_transmit_report()
1826 mld->mld_code = 0; in mld_v1_transmit_report()
1827 mld->mld_cksum = 0; in mld_v1_transmit_report()
1828 mld->mld_maxdelay = 0; in mld_v1_transmit_report()
1829 mld->mld_reserved = 0; in mld_v1_transmit_report()
1830 mld->mld_addr = in6m->in6m_addr; in mld_v1_transmit_report()
1831 in6_clearscope(&mld->mld_addr); in mld_v1_transmit_report()
1832 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, in mld_v1_transmit_report()
1836 mh->m_flags |= M_MLDV1; in mld_v1_transmit_report()
1841 ifa_free(&ia->ia_ifa); in mld_v1_transmit_report()
1851 * has been any change between T0 (when the last state-change was issued)
1859 * If delay is non-zero, and the state change is an initial multicast
1863 * is sooner, a pending state-change timer or delay itself.
1882 if (inm->in6m_ifp == NULL) { in mld_change_state()
1888 * Try to detect if the upper layer just asked us to change state in mld_change_state()
1891 KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__)); in mld_change_state()
1892 ifp = inm->in6m_ifma->ifma_ifp; in mld_change_state()
1899 KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__)); in mld_change_state()
1910 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) { in mld_change_state()
1911 CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__, in mld_change_state()
1912 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode); in mld_change_state()
1913 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) { in mld_change_state()
1917 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) { in mld_change_state()
1939 * MLDv2 will schedule an MLDv2 state-change report containing the
1942 * If the delay argument is non-zero, then we must delay sending the
1959 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_initial_join()
1960 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_initial_join()
1965 ifp = inm->in6m_ifp; in mld_initial_join()
1970 KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__)); in mld_initial_join()
1981 if ((ifp->if_flags & IFF_LOOPBACK) || in mld_initial_join()
1982 (mli->mli_flags & MLIF_SILENT) || in mld_initial_join()
1983 !mld_is_addr_reported(&inm->in6m_addr)) { in mld_initial_join()
1986 inm->in6m_state = MLD_SILENT_MEMBER; in mld_initial_join()
1987 inm->in6m_timer = 0; in mld_initial_join()
1995 if (mli->mli_version == MLD_VERSION_2 && in mld_initial_join()
1996 inm->in6m_state == MLD_LEAVING_MEMBER) { in mld_initial_join()
1997 inm->in6m_refcount--; in mld_initial_join()
1998 MPASS(inm->in6m_refcount > 0); in mld_initial_join()
2000 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_initial_join()
2002 switch (mli->mli_version) { in mld_initial_join()
2013 inm->in6m_timer = max(delay, odelay); in mld_initial_join()
2016 inm->in6m_state = MLD_IDLE_MEMBER; in mld_initial_join()
2022 inm->in6m_timer = odelay; in mld_initial_join()
2036 * Immediately enqueue a State-Change Report for in mld_initial_join()
2041 mq = &inm->in6m_scq; in mld_initial_join()
2044 0, 0, (mli->mli_flags & MLIF_USEALLOW)); in mld_initial_join()
2048 error = retval * -1; in mld_initial_join()
2053 * Schedule transmission of pending state-change in mld_initial_join()
2056 * giving us an opportunity to merge the reports. in mld_initial_join()
2061 KASSERT(mli->mli_rv > 1, in mld_initial_join()
2063 mli->mli_rv)); in mld_initial_join()
2064 inm->in6m_scrv = mli->mli_rv; in mld_initial_join()
2066 if (inm->in6m_sctimer > 1) { in mld_initial_join()
2067 inm->in6m_sctimer = in mld_initial_join()
2068 min(inm->in6m_sctimer, delay); in mld_initial_join()
2070 inm->in6m_sctimer = delay; in mld_initial_join()
2072 inm->in6m_sctimer = 1; in mld_initial_join()
2081 * Only update the T0 state if state change is atomic, in mld_initial_join()
2087 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_initial_join()
2088 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_initial_join()
2089 if_name(inm->in6m_ifp)); in mld_initial_join()
2096 * Issue an intermediate state change during the life-cycle.
2108 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_handle_state_change()
2109 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_handle_state_change()
2111 ifp = inm->in6m_ifp; in mld_handle_state_change()
2116 KASSERT(mli && mli->mli_ifp == ifp, in mld_handle_state_change()
2119 if ((ifp->if_flags & IFF_LOOPBACK) || in mld_handle_state_change()
2120 (mli->mli_flags & MLIF_SILENT) || in mld_handle_state_change()
2121 !mld_is_addr_reported(&inm->in6m_addr) || in mld_handle_state_change()
2122 (mli->mli_version != MLD_VERSION_2)) { in mld_handle_state_change()
2123 if (!mld_is_addr_reported(&inm->in6m_addr)) { in mld_handle_state_change()
2129 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_handle_state_change()
2130 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_handle_state_change()
2131 if_name(inm->in6m_ifp)); in mld_handle_state_change()
2135 mbufq_drain(&inm->in6m_scq); in mld_handle_state_change()
2137 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, in mld_handle_state_change()
2138 (mli->mli_flags & MLIF_USEALLOW)); in mld_handle_state_change()
2141 return (-retval); in mld_handle_state_change()
2144 * If record(s) were enqueued, start the state-change in mld_handle_state_change()
2147 inm->in6m_scrv = mli->mli_rv; in mld_handle_state_change()
2148 inm->in6m_sctimer = 1; in mld_handle_state_change()
2159 * MLDv2 enqueues a state-change report containing a transition
2171 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2172 inm->in6m_ifp, if_name(inm->in6m_ifp)); in mld_final_leave()
2177 switch (inm->in6m_state) { in mld_final_leave()
2189 if (mli->mli_version == MLD_VERSION_1) { in mld_final_leave()
2191 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || in mld_final_leave()
2192 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) in mld_final_leave()
2199 inm->in6m_state = MLD_NOT_MEMBER; in mld_final_leave()
2201 } else if (mli->mli_version == MLD_VERSION_2) { in mld_final_leave()
2204 * Immediately enqueue a state-change report in mld_final_leave()
2206 * giving us an opportunity to merge reports. in mld_final_leave()
2208 mbufq_drain(&inm->in6m_scq); in mld_final_leave()
2209 inm->in6m_timer = 0; in mld_final_leave()
2210 inm->in6m_scrv = mli->mli_rv; in mld_final_leave()
2213 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2214 if_name(inm->in6m_ifp), inm->in6m_scrv); in mld_final_leave()
2215 if (inm->in6m_scrv == 0) { in mld_final_leave()
2216 inm->in6m_state = MLD_NOT_MEMBER; in mld_final_leave()
2217 inm->in6m_sctimer = 0; in mld_final_leave()
2224 &inm->in6m_scq, inm, 1, 0, 0, in mld_final_leave()
2225 (mli->mli_flags & MLIF_USEALLOW)); in mld_final_leave()
2230 inm->in6m_state = MLD_LEAVING_MEMBER; in mld_final_leave()
2231 inm->in6m_sctimer = 1; in mld_final_leave()
2245 CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__, in mld_final_leave()
2246 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_final_leave()
2247 if_name(inm->in6m_ifp)); in mld_final_leave()
2248 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; in mld_final_leave()
2250 __func__, &inm->in6m_addr, if_name(inm->in6m_ifp)); in mld_final_leave()
2256 * If is_state_change is zero, a current-state record is appended.
2257 * If is_state_change is non-zero, a state-change report is appended.
2259 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2265 * If is_source_query is non-zero, each source is checked to see if
2266 * it was recorded for a Group-Source query, and will be omitted if
2267 * it is not both in-mode and recorded.
2269 * If use_block_allow is non-zero, state change reports for initial join
2302 ifp = inm->in6m_ifp; in mld_v2_enqueue_group_record()
2313 mode = inm->in6m_st[1].iss_fmode; in mld_v2_enqueue_group_record()
2316 * If we did not transition out of ASM mode during t0->t1, in mld_v2_enqueue_group_record()
2320 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && in mld_v2_enqueue_group_record()
2321 inm->in6m_nsrc == 0) in mld_v2_enqueue_group_record()
2327 * If the mode did not change, and there are non-ASM in mld_v2_enqueue_group_record()
2341 if (mode != inm->in6m_st[0].iss_fmode) { in mld_v2_enqueue_group_record()
2385 KASSERT(inm->in6m_st[1].iss_asm == 0, in mld_v2_enqueue_group_record()
2387 __func__, inm, inm->in6m_st[1].iss_asm)); in mld_v2_enqueue_group_record()
2399 __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_enqueue_group_record()
2400 if_name(inm->in6m_ifp)); in mld_v2_enqueue_group_record()
2415 ip6_sprintf(ip6tbuf, &inm->in6m_addr), in mld_v2_enqueue_group_record()
2416 if_name(inm->in6m_ifp)); in mld_v2_enqueue_group_record()
2429 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && in mld_v2_enqueue_group_record()
2430 (m0->m_pkthdr.len + minrec0len) < in mld_v2_enqueue_group_record()
2431 (ifp->if_mtu - MLD_MTUSPACE)) { in mld_v2_enqueue_group_record()
2432 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - in mld_v2_enqueue_group_record()
2440 return (-ENOMEM); in mld_v2_enqueue_group_record()
2443 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_group_record()
2450 return (-ENOMEM); in mld_v2_enqueue_group_record()
2464 mr.mr_addr = inm->in6m_addr; in mld_v2_enqueue_group_record()
2470 return (-ENOMEM); in mld_v2_enqueue_group_record()
2482 * Only append sources which are in-mode at t1. If we are in mld_v2_enqueue_group_record()
2488 * to a group-source query. in mld_v2_enqueue_group_record()
2494 md->m_len - nbytes); in mld_v2_enqueue_group_record()
2501 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, in mld_v2_enqueue_group_record()
2504 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_group_record()
2513 if (is_source_query && ims->im6s_stp == 0) { in mld_v2_enqueue_group_record()
2520 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_group_record()
2525 return (-ENOMEM); in mld_v2_enqueue_group_record()
2534 pmr->mr_numsrc = htons(msrcs); in mld_v2_enqueue_group_record()
2550 m->m_pkthdr.vt_nrecs = 1; in mld_v2_enqueue_group_record()
2553 m->m_pkthdr.vt_nrecs++; in mld_v2_enqueue_group_record()
2569 return (-ENOMEM); in mld_v2_enqueue_group_record()
2575 return (-ENOMEM); in mld_v2_enqueue_group_record()
2585 return (-ENOMEM); in mld_v2_enqueue_group_record()
2587 m->m_pkthdr.vt_nrecs = 1; in mld_v2_enqueue_group_record()
2590 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_group_record()
2596 __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_group_record()
2604 if (is_source_query && ims->im6s_stp == 0) { in mld_v2_enqueue_group_record()
2611 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_group_record()
2616 return (-ENOMEM); in mld_v2_enqueue_group_record()
2622 pmr->mr_numsrc = htons(msrcs); in mld_v2_enqueue_group_record()
2647 * Source list filter state is held in an RB-tree. When the filter list
2652 * As we may potentially queue two record types, and the entire R-B tree
2658 * which makes things easier on us, and it may or may not be harder on
2685 if (inm->in6m_nsrc == 0 || in mld_v2_enqueue_filter_change()
2686 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) in mld_v2_enqueue_filter_change()
2689 ifp = inm->in6m_ifp; /* interface */ in mld_v2_enqueue_filter_change()
2690 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ in mld_v2_enqueue_filter_change()
2696 nbytes = 0; /* # of bytes appended to group's state-change queue */ in mld_v2_enqueue_filter_change()
2707 * The first kind of source we encounter tells us which in mld_v2_enqueue_filter_change()
2716 (m0->m_pkthdr.vt_nrecs + 1 <= in mld_v2_enqueue_filter_change()
2718 (m0->m_pkthdr.len + MINRECLEN) < in mld_v2_enqueue_filter_change()
2719 (ifp->if_mtu - MLD_MTUSPACE)) { in mld_v2_enqueue_filter_change()
2721 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - in mld_v2_enqueue_filter_change()
2733 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2735 m->m_pkthdr.vt_nrecs = 0; in mld_v2_enqueue_filter_change()
2737 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - in mld_v2_enqueue_filter_change()
2752 mr.mr_addr = inm->in6m_addr; in mld_v2_enqueue_filter_change()
2759 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2764 md = m_getptr(m, npbytes - in mld_v2_enqueue_filter_change()
2772 uint8_t *) + md->m_len - in mld_v2_enqueue_filter_change()
2779 * Only report deltas in-mode at t1. in mld_v2_enqueue_filter_change()
2787 &inm->in6m_srcs); in mld_v2_enqueue_filter_change()
2791 ip6_sprintf(ip6tbuf, &ims->im6s_addr)); in mld_v2_enqueue_filter_change()
2816 (void *)&ims->im6s_addr)) { in mld_v2_enqueue_filter_change()
2821 return (-ENOMEM); in mld_v2_enqueue_filter_change()
2835 npbytes -= sizeof(struct mldv2_record); in mld_v2_enqueue_filter_change()
2842 "%s: m_adj(m, -mr)", __func__); in mld_v2_enqueue_filter_change()
2843 m_adj(m, -((int)sizeof( in mld_v2_enqueue_filter_change()
2850 pmr->mr_type = MLD_ALLOW_NEW_SOURCES; in mld_v2_enqueue_filter_change()
2852 pmr->mr_type = MLD_BLOCK_OLD_SOURCES; in mld_v2_enqueue_filter_change()
2853 pmr->mr_numsrc = htons(rsrcs); in mld_v2_enqueue_filter_change()
2858 m->m_pkthdr.vt_nrecs++; in mld_v2_enqueue_filter_change()
2877 struct mbuf *m; /* pending state-change */ in mld_v2_merge_state_changes()
2878 struct mbuf *m0; /* copy of pending state-change */ in mld_v2_merge_state_changes()
2879 struct mbuf *mt; /* last state-change in packet */ in mld_v2_merge_state_changes()
2892 * copy of each queued state-change message before merging. in mld_v2_merge_state_changes()
2894 if (inm->in6m_scrv > 0) in mld_v2_merge_state_changes()
2897 gq = &inm->in6m_scq; in mld_v2_merge_state_changes()
2920 if ((mt->m_pkthdr.vt_nrecs + in mld_v2_merge_state_changes()
2921 m->m_pkthdr.vt_nrecs <= in mld_v2_merge_state_changes()
2923 (mt->m_pkthdr.len + recslen <= in mld_v2_merge_state_changes()
2924 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) in mld_v2_merge_state_changes()
2932 mt = m->m_nextpkt; in mld_v2_merge_state_changes()
2942 m = m0->m_nextpkt; in mld_v2_merge_state_changes()
2948 m0->m_nextpkt = NULL; in mld_v2_merge_state_changes()
2949 m = m->m_nextpkt; in mld_v2_merge_state_changes()
2963 m0->m_flags &= ~M_PKTHDR; in mld_v2_merge_state_changes()
2964 mt->m_pkthdr.len += recslen; in mld_v2_merge_state_changes()
2965 mt->m_pkthdr.vt_nrecs += in mld_v2_merge_state_changes()
2966 m0->m_pkthdr.vt_nrecs; in mld_v2_merge_state_changes()
2968 mtl->m_next = m0; in mld_v2_merge_state_changes()
2990 KASSERT(mli->mli_version == MLD_VERSION_2, in mld_v2_dispatch_general_query()
2991 ("%s: called when version %d", __func__, mli->mli_version)); in mld_v2_dispatch_general_query()
2999 if (!mbufq_empty(&mli->mli_gq)) in mld_v2_dispatch_general_query()
3002 ifp = mli->mli_ifp; in mld_v2_dispatch_general_query()
3004 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { in mld_v2_dispatch_general_query()
3008 KASSERT(ifp == inm->in6m_ifp, in mld_v2_dispatch_general_query()
3011 switch (inm->in6m_state) { in mld_v2_dispatch_general_query()
3020 inm->in6m_state = MLD_REPORTING_MEMBER; in mld_v2_dispatch_general_query()
3021 retval = mld_v2_enqueue_group_record(&mli->mli_gq, in mld_v2_dispatch_general_query()
3034 mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST); in mld_v2_dispatch_general_query()
3039 if (mbufq_first(&mli->mli_gq) != NULL) { in mld_v2_dispatch_general_query()
3040 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( in mld_v2_dispatch_general_query()
3049 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3051 * a link and uses a link-scope multicast address.
3097 if (m->m_flags & M_MLDV1) { in mld_dispatch_packet()
3110 m0->m_pkthdr.rcvif = V_loif; in mld_dispatch_packet()
3114 (void)in6_setscope(&ip6->ip6_dst, ifp, NULL); /* XXX LOR */ in mld_dispatch_packet()
3121 MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index); in mld_dispatch_packet()
3130 type = mld->mld_type; in mld_dispatch_packet()
3159 * KAME IPv6 requires that hop-by-hop options be passed separately,
3175 KASSERT((m->m_flags & M_PKTHDR), in mld_v2_encap_report()
3189 ifa_free(&ia->ia_ifa); in mld_v2_encap_report()
3198 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report); in mld_v2_encap_report()
3199 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + in mld_v2_encap_report()
3203 ip6->ip6_flow = 0; in mld_v2_encap_report()
3204 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; in mld_v2_encap_report()
3205 ip6->ip6_vfc |= IPV6_VERSION; in mld_v2_encap_report()
3206 ip6->ip6_nxt = IPPROTO_ICMPV6; in mld_v2_encap_report()
3207 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; in mld_v2_encap_report()
3209 ifa_free(&ia->ia_ifa); in mld_v2_encap_report()
3210 ip6->ip6_dst = in6addr_linklocal_allv2routers; in mld_v2_encap_report()
3214 mld->mld_type = MLDV2_LISTENER_REPORT; in mld_v2_encap_report()
3215 mld->mld_code = 0; in mld_v2_encap_report()
3216 mld->mld_cksum = 0; in mld_v2_encap_report()
3217 mld->mld_v2_reserved = 0; in mld_v2_encap_report()
3218 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs); in mld_v2_encap_report()
3219 m->m_pkthdr.vt_nrecs = 0; in mld_v2_encap_report()
3221 mh->m_next = m; in mld_v2_encap_report()
3222 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, in mld_v2_encap_report()