1caf43b02SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 482cd038dSYoshinobu Inoue * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 582cd038dSYoshinobu Inoue * All rights reserved. 621f08a07SBjoern A. Zeeb * Copyright (c) 2019 Netflix, Inc. 782cd038dSYoshinobu Inoue * 882cd038dSYoshinobu Inoue * Redistribution and use in source and binary forms, with or without 982cd038dSYoshinobu Inoue * modification, are permitted provided that the following conditions 1082cd038dSYoshinobu Inoue * are met: 1182cd038dSYoshinobu Inoue * 1. Redistributions of source code must retain the above copyright 1282cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer. 1382cd038dSYoshinobu Inoue * 2. Redistributions in binary form must reproduce the above copyright 1482cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer in the 1582cd038dSYoshinobu Inoue * documentation and/or other materials provided with the distribution. 1682cd038dSYoshinobu Inoue * 3. Neither the name of the project nor the names of its contributors 1782cd038dSYoshinobu Inoue * may be used to endorse or promote products derived from this software 1882cd038dSYoshinobu Inoue * without specific prior written permission. 1982cd038dSYoshinobu Inoue * 2082cd038dSYoshinobu Inoue * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 2182cd038dSYoshinobu Inoue * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2282cd038dSYoshinobu Inoue * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2382cd038dSYoshinobu Inoue * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 2482cd038dSYoshinobu Inoue * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2582cd038dSYoshinobu Inoue * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2682cd038dSYoshinobu Inoue * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2782cd038dSYoshinobu Inoue * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2882cd038dSYoshinobu Inoue * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2982cd038dSYoshinobu Inoue * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3082cd038dSYoshinobu Inoue * SUCH DAMAGE. 31b48287a3SDavid E. O'Brien * 32b48287a3SDavid E. O'Brien * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $ 3382cd038dSYoshinobu Inoue */ 3482cd038dSYoshinobu Inoue 35b48287a3SDavid E. O'Brien #include <sys/cdefs.h> 36b48287a3SDavid E. O'Brien __FBSDID("$FreeBSD$"); 37b48287a3SDavid E. O'Brien 38aaa46574SAdrian Chadd #include "opt_rss.h" 39aaa46574SAdrian Chadd 4082cd038dSYoshinobu Inoue #include <sys/param.h> 41f349c821SBjoern A. Zeeb #include <sys/systm.h> 421a3044faSBjoern A. Zeeb #include <sys/domain.h> 431a3044faSBjoern A. Zeeb #include <sys/eventhandler.h> 4480d7a853SJonathan T. Looney #include <sys/hash.h> 451a3044faSBjoern A. Zeeb #include <sys/kernel.h> 4682cd038dSYoshinobu Inoue #include <sys/malloc.h> 4782cd038dSYoshinobu Inoue #include <sys/mbuf.h> 4882cd038dSYoshinobu Inoue #include <sys/protosw.h> 4921f08a07SBjoern A. Zeeb #include <sys/queue.h> 5082cd038dSYoshinobu Inoue #include <sys/socket.h> 51757cb678SBjoern A. Zeeb #include <sys/sysctl.h> 5282cd038dSYoshinobu Inoue #include <sys/syslog.h> 5382cd038dSYoshinobu Inoue 5482cd038dSYoshinobu Inoue #include <net/if.h> 5576039bc8SGleb Smirnoff #include <net/if_var.h> 56aaa46574SAdrian Chadd #include <net/netisr.h> 5782cd038dSYoshinobu Inoue #include <net/route.h> 58eddfbb76SRobert Watson #include <net/vnet.h> 5982cd038dSYoshinobu Inoue 6082cd038dSYoshinobu Inoue #include <netinet/in.h> 6182cd038dSYoshinobu Inoue #include <netinet/in_var.h> 62686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h> 6382cd038dSYoshinobu Inoue #include <netinet6/ip6_var.h> 64686cdd19SJun-ichiro itojun Hagino #include <netinet/icmp6.h> 6523d374aaSBjoern A. Zeeb #include <netinet/in_systm.h> /* For ECN definitions. */ 6623d374aaSBjoern A. Zeeb #include <netinet/ip.h> /* For ECN definitions. */ 6782cd038dSYoshinobu Inoue 681a3044faSBjoern A. Zeeb #ifdef MAC 694b908c8bSRobert Watson #include <security/mac/mac_framework.h> 701a3044faSBjoern A. Zeeb #endif 714b908c8bSRobert Watson 72f1664f32SBjoern A. Zeeb /* 73f1664f32SBjoern A. Zeeb * A "big picture" of how IPv6 fragment queues are all linked together. 74f1664f32SBjoern A. Zeeb * 75f1664f32SBjoern A. Zeeb * struct ip6qbucket ip6qb[...]; hashed buckets 76f1664f32SBjoern A. Zeeb * |||||||| 77f1664f32SBjoern A. Zeeb * | 78f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding 79f1664f32SBjoern A. Zeeb * |||||||| fragmented packets 80f1664f32SBjoern A. Zeeb * | (1 per original packet) 81f1664f32SBjoern A. Zeeb * | 82f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6 83f1664f32SBjoern A. Zeeb * | *ip6af;fragment packets 84f1664f32SBjoern A. Zeeb * | for one original packet 85f1664f32SBjoern A. Zeeb * + *mbuf 86f1664f32SBjoern A. Zeeb */ 87f1664f32SBjoern A. Zeeb 8823d374aaSBjoern A. Zeeb /* Reassembly headers are stored in hash buckets. */ 892ceeacbeSJonathan T. Looney #define IP6REASS_NHASH_LOG2 10 9080d7a853SJonathan T. Looney #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2) 9180d7a853SJonathan T. Looney #define IP6REASS_HMASK (IP6REASS_NHASH - 1) 9280d7a853SJonathan T. Looney 9321f08a07SBjoern A. Zeeb TAILQ_HEAD(ip6qhead, ip6q); 9480d7a853SJonathan T. Looney struct ip6qbucket { 9521f08a07SBjoern A. Zeeb struct ip6qhead packets; 9680d7a853SJonathan T. Looney struct mtx lock; 971e9f3b73SJonathan T. Looney int count; 9880d7a853SJonathan T. Looney }; 9980d7a853SJonathan T. Looney 1001540a98eSBjoern A. Zeeb struct ip6asfrag { 10121f08a07SBjoern A. Zeeb TAILQ_ENTRY(ip6asfrag) ip6af_tq; 1021540a98eSBjoern A. Zeeb struct mbuf *ip6af_m; 103f1664f32SBjoern A. Zeeb int ip6af_offset; /* Offset in ip6af_m to next header. */ 104f1664f32SBjoern A. Zeeb int ip6af_frglen; /* Fragmentable part length. */ 105f1664f32SBjoern A. Zeeb int ip6af_off; /* Fragment offset. */ 106f1664f32SBjoern A. Zeeb bool ip6af_mff; /* More fragment bit in frag off. */ 1071540a98eSBjoern A. Zeeb }; 1081540a98eSBjoern A. Zeeb 109487a161cSBjoern A. Zeeb static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header"); 110487a161cSBjoern A. Zeeb 11167a10c46SBjoern A. Zeeb #ifdef VIMAGE 11267a10c46SBjoern A. Zeeb /* A flag to indicate if IPv6 fragmentation is initialized. */ 11367a10c46SBjoern A. Zeeb VNET_DEFINE_STATIC(bool, frag6_on); 11467a10c46SBjoern A. Zeeb #define V_frag6_on VNET(frag6_on) 11567a10c46SBjoern A. Zeeb #endif 11667a10c46SBjoern A. Zeeb 117757cb678SBjoern A. Zeeb /* System wide (global) maximum and count of packets in reassembly queues. */ 118757cb678SBjoern A. Zeeb static int ip6_maxfrags; 119757cb678SBjoern A. Zeeb static volatile u_int frag6_nfrags = 0; 120757cb678SBjoern A. Zeeb 121757cb678SBjoern A. Zeeb /* Maximum and current packets in per-VNET reassembly queue. */ 122757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragpackets); 12380d7a853SJonathan T. Looney VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets); 124757cb678SBjoern A. Zeeb #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets) 125757cb678SBjoern A. Zeeb #define V_frag6_nfragpackets VNET(frag6_nfragpackets) 126757cb678SBjoern A. Zeeb 127757cb678SBjoern A. Zeeb /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */ 128757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize); 129757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket); 130757cb678SBjoern A. Zeeb #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize) 131757cb678SBjoern A. Zeeb #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket) 132757cb678SBjoern A. Zeeb 133757cb678SBjoern A. Zeeb /* Per-VNET reassembly queue buckets. */ 1349cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]); 1359cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed); 1369cb1a47aSBjoern A. Zeeb #define V_ip6qb VNET(ip6qb) 1379cb1a47aSBjoern A. Zeeb #define V_ip6qb_hashseed VNET(ip6qb_hashseed) 13882cd038dSYoshinobu Inoue 1399cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock) 1409cb1a47aSBjoern A. Zeeb #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock) 1419cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED) 1429cb1a47aSBjoern A. Zeeb #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock) 14321f08a07SBjoern A. Zeeb #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets) 1449888c401SHajimu UMEMOTO 14582cd038dSYoshinobu Inoue /* 1462ceeacbeSJonathan T. Looney * By default, limit the number of IP6 fragments across all reassembly 1472ceeacbeSJonathan T. Looney * queues to 1/32 of the total number of mbuf clusters. 1482ceeacbeSJonathan T. Looney * 1492ceeacbeSJonathan T. Looney * Limit the total number of reassembly queues per VNET to the 1502ceeacbeSJonathan T. Looney * IP6 fragment limit, but ensure the limit will not allow any bucket 1512ceeacbeSJonathan T. Looney * to grow above 100 items. (The bucket limit is 1522ceeacbeSJonathan T. Looney * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 1532ceeacbeSJonathan T. Looney * multiplier to reach a 100-item limit.) 1542ceeacbeSJonathan T. Looney * The 100-item limit was chosen as brief testing seems to show that 1552ceeacbeSJonathan T. Looney * this produces "reasonable" performance on some subset of systems 1562ceeacbeSJonathan T. Looney * under DoS attack. 1572ceeacbeSJonathan T. Looney */ 1582ceeacbeSJonathan T. Looney #define IP6_MAXFRAGS (nmbclusters / 32) 1592ceeacbeSJonathan T. Looney #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50)) 1602ceeacbeSJonathan T. Looney 161757cb678SBjoern A. Zeeb 1622ceeacbeSJonathan T. Looney /* 163757cb678SBjoern A. Zeeb * Sysctls and helper function. 16482cd038dSYoshinobu Inoue */ 165757cb678SBjoern A. Zeeb SYSCTL_DECL(_net_inet6_ip6); 166757cb678SBjoern A. Zeeb 16765456706SBjoern A. Zeeb SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags, 16865456706SBjoern A. Zeeb CTLFLAG_RD, __DEVOLATILE(u_int *, &frag6_nfrags), 0, 16965456706SBjoern A. Zeeb "Global number of IPv6 fragments across all reassembly queues."); 17065456706SBjoern A. Zeeb 171757cb678SBjoern A. Zeeb static void 17209b361c7SBjoern A. Zeeb frag6_set_bucketsize(void) 1731e9f3b73SJonathan T. Looney { 1741e9f3b73SJonathan T. Looney int i; 1751e9f3b73SJonathan T. Looney 1761e9f3b73SJonathan T. Looney if ((i = V_ip6_maxfragpackets) > 0) 1771e9f3b73SJonathan T. Looney V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1); 1781e9f3b73SJonathan T. Looney } 1791e9f3b73SJonathan T. Looney 180757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags, 181757cb678SBjoern A. Zeeb CTLFLAG_RW, &ip6_maxfrags, 0, 182757cb678SBjoern A. Zeeb "Maximum allowed number of outstanding IPv6 packet fragments. " 183757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 184757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 185757cb678SBjoern A. Zeeb 186757cb678SBjoern A. Zeeb static int 187757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS) 188757cb678SBjoern A. Zeeb { 189757cb678SBjoern A. Zeeb int error, val; 190757cb678SBjoern A. Zeeb 191757cb678SBjoern A. Zeeb val = V_ip6_maxfragpackets; 192757cb678SBjoern A. Zeeb error = sysctl_handle_int(oidp, &val, 0, req); 193757cb678SBjoern A. Zeeb if (error != 0 || !req->newptr) 194757cb678SBjoern A. Zeeb return (error); 195757cb678SBjoern A. Zeeb V_ip6_maxfragpackets = val; 196757cb678SBjoern A. Zeeb frag6_set_bucketsize(); 197757cb678SBjoern A. Zeeb return (0); 198757cb678SBjoern A. Zeeb } 199757cb678SBjoern A. Zeeb SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets, 200757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0, 201757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets, "I", 202757cb678SBjoern A. Zeeb "Default maximum number of outstanding fragmented IPv6 packets. " 203757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a " 204757cb678SBjoern A. Zeeb "a value of -1 means no limit"); 205757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket, 206757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0, 207757cb678SBjoern A. Zeeb "Maximum allowed number of fragments per packet"); 208757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize, 209757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0, 210757cb678SBjoern A. Zeeb "Maximum number of reassembly queues per hash bucket"); 211757cb678SBjoern A. Zeeb 212757cb678SBjoern A. Zeeb 213757cb678SBjoern A. Zeeb /* 214c00464a2SBjoern A. Zeeb * Remove the IPv6 fragmentation header from the mbuf. 215c00464a2SBjoern A. Zeeb */ 216c00464a2SBjoern A. Zeeb int 217c00464a2SBjoern A. Zeeb ip6_deletefraghdr(struct mbuf *m, int offset, int wait) 218c00464a2SBjoern A. Zeeb { 2195778b399SBjoern A. Zeeb struct ip6_hdr *ip6; 220c00464a2SBjoern A. Zeeb struct mbuf *t; 221c00464a2SBjoern A. Zeeb 222c00464a2SBjoern A. Zeeb /* Delete frag6 header. */ 223c00464a2SBjoern A. Zeeb if (m->m_len >= offset + sizeof(struct ip6_frag)) { 2245778b399SBjoern A. Zeeb 225c00464a2SBjoern A. Zeeb /* This is the only possible case with !PULLDOWN_TEST. */ 2265778b399SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 227c00464a2SBjoern A. Zeeb bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), 228c00464a2SBjoern A. Zeeb offset); 229c00464a2SBjoern A. Zeeb m->m_data += sizeof(struct ip6_frag); 230c00464a2SBjoern A. Zeeb m->m_len -= sizeof(struct ip6_frag); 231c00464a2SBjoern A. Zeeb } else { 2325778b399SBjoern A. Zeeb 233c00464a2SBjoern A. Zeeb /* This comes with no copy if the boundary is on cluster. */ 234c00464a2SBjoern A. Zeeb if ((t = m_split(m, offset, wait)) == NULL) 235c00464a2SBjoern A. Zeeb return (ENOMEM); 236c00464a2SBjoern A. Zeeb m_adj(t, sizeof(struct ip6_frag)); 237c00464a2SBjoern A. Zeeb m_cat(m, t); 238c00464a2SBjoern A. Zeeb } 239c00464a2SBjoern A. Zeeb 240c00464a2SBjoern A. Zeeb m->m_flags |= M_FRAGMENTED; 241c00464a2SBjoern A. Zeeb return (0); 242c00464a2SBjoern A. Zeeb } 243c00464a2SBjoern A. Zeeb 244c00464a2SBjoern A. Zeeb /* 24523d374aaSBjoern A. Zeeb * Free a fragment reassembly header and all associated datagrams. 246757cb678SBjoern A. Zeeb */ 2474f590175SPaul Saab static void 248c00464a2SBjoern A. Zeeb frag6_freef(struct ip6q *q6, uint32_t bucket) 2494f590175SPaul Saab { 2505778b399SBjoern A. Zeeb struct ip6_hdr *ip6; 25121f08a07SBjoern A. Zeeb struct ip6asfrag *af6; 2525778b399SBjoern A. Zeeb struct mbuf *m; 2534f590175SPaul Saab 2549cb1a47aSBjoern A. Zeeb IP6QB_LOCK_ASSERT(bucket); 255c00464a2SBjoern A. Zeeb 25621f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) { 257c00464a2SBjoern A. Zeeb 258da89a0feSBjoern A. Zeeb m = af6->ip6af_m; 25921f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 260c00464a2SBjoern A. Zeeb 261c00464a2SBjoern A. Zeeb /* 262c00464a2SBjoern A. Zeeb * Return ICMP time exceeded error for the 1st fragment. 263c00464a2SBjoern A. Zeeb * Just free other fragments. 264c00464a2SBjoern A. Zeeb */ 265a55383e7SHans Petter Selasky if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) { 266c00464a2SBjoern A. Zeeb 26723d374aaSBjoern A. Zeeb /* Adjust pointer. */ 268c00464a2SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *); 269c00464a2SBjoern A. Zeeb 27023d374aaSBjoern A. Zeeb /* Restore source and destination addresses. */ 271c00464a2SBjoern A. Zeeb ip6->ip6_src = q6->ip6q_src; 272c00464a2SBjoern A. Zeeb ip6->ip6_dst = q6->ip6q_dst; 273c00464a2SBjoern A. Zeeb 274c00464a2SBjoern A. Zeeb icmp6_error(m, ICMP6_TIME_EXCEEDED, 275c00464a2SBjoern A. Zeeb ICMP6_TIME_EXCEED_REASSEMBLY, 0); 276c00464a2SBjoern A. Zeeb } else 277c00464a2SBjoern A. Zeeb m_freem(m); 27823d374aaSBjoern A. Zeeb 279c00464a2SBjoern A. Zeeb free(af6, M_FRAG6); 2802adfd64fSJonathan T. Looney } 28121f08a07SBjoern A. Zeeb 28221f08a07SBjoern A. Zeeb TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq); 28321f08a07SBjoern A. Zeeb V_ip6qb[bucket].count--; 284c00464a2SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 285c00464a2SBjoern A. Zeeb #ifdef MAC 286c00464a2SBjoern A. Zeeb mac_ip6q_destroy(q6); 287c00464a2SBjoern A. Zeeb #endif 288c00464a2SBjoern A. Zeeb free(q6, M_FRAG6); 289c00464a2SBjoern A. Zeeb atomic_subtract_int(&V_frag6_nfragpackets, 1); 29082cd038dSYoshinobu Inoue } 29182cd038dSYoshinobu Inoue 29282cd038dSYoshinobu Inoue /* 293a55383e7SHans Petter Selasky * Drain off all datagram fragments belonging to 294a55383e7SHans Petter Selasky * the given network interface. 295a55383e7SHans Petter Selasky */ 296a55383e7SHans Petter Selasky static void 297a55383e7SHans Petter Selasky frag6_cleanup(void *arg __unused, struct ifnet *ifp) 298a55383e7SHans Petter Selasky { 29921f08a07SBjoern A. Zeeb struct ip6qhead *head; 30021f08a07SBjoern A. Zeeb struct ip6q *q6; 301a55383e7SHans Petter Selasky struct ip6asfrag *af6; 30221f08a07SBjoern A. Zeeb uint32_t bucket; 303a55383e7SHans Petter Selasky 304a55383e7SHans Petter Selasky KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 305a55383e7SHans Petter Selasky 30667a10c46SBjoern A. Zeeb #ifdef VIMAGE 30767a10c46SBjoern A. Zeeb /* 30867a10c46SBjoern A. Zeeb * Skip processing if IPv6 reassembly is not initialised or 30967a10c46SBjoern A. Zeeb * torn down by frag6_destroy(). 31067a10c46SBjoern A. Zeeb */ 31167a10c46SBjoern A. Zeeb if (!V_frag6_on) 31267a10c46SBjoern A. Zeeb return; 31367a10c46SBjoern A. Zeeb #endif 31467a10c46SBjoern A. Zeeb 315a55383e7SHans Petter Selasky CURVNET_SET_QUIET(ifp->if_vnet); 31621f08a07SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 31721f08a07SBjoern A. Zeeb IP6QB_LOCK(bucket); 31821f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket); 319a55383e7SHans Petter Selasky /* Scan fragment list. */ 32021f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq) { 32121f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) { 322a55383e7SHans Petter Selasky 323f1664f32SBjoern A. Zeeb /* Clear no longer valid rcvif pointer. */ 324da89a0feSBjoern A. Zeeb if (af6->ip6af_m->m_pkthdr.rcvif == ifp) 325da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.rcvif = NULL; 326a55383e7SHans Petter Selasky } 327a55383e7SHans Petter Selasky } 32821f08a07SBjoern A. Zeeb IP6QB_UNLOCK(bucket); 329a55383e7SHans Petter Selasky } 330a55383e7SHans Petter Selasky CURVNET_RESTORE(); 331a55383e7SHans Petter Selasky } 332a55383e7SHans Petter Selasky EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0); 333a55383e7SHans Petter Selasky 334a55383e7SHans Petter Selasky /* 33523d374aaSBjoern A. Zeeb * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with 33623d374aaSBjoern A. Zeeb * each other, in terms of next header field handling in fragment header. 337686cdd19SJun-ichiro itojun Hagino * While the sender will use the same value for all of the fragmented packets, 33823d374aaSBjoern A. Zeeb * receiver is suggested not to check for consistency. 339686cdd19SJun-ichiro itojun Hagino * 34023d374aaSBjoern A. Zeeb * Fragment rules (p18,p19): 341686cdd19SJun-ichiro itojun Hagino * (2) A Fragment header containing: 34223d374aaSBjoern A. Zeeb * The Next Header value that identifies the first header 34323d374aaSBjoern A. Zeeb * after the Per-Fragment headers of the original packet. 344686cdd19SJun-ichiro itojun Hagino * -> next header field is same for all fragments 345686cdd19SJun-ichiro itojun Hagino * 34623d374aaSBjoern A. Zeeb * Reassembly rule (p20): 34723d374aaSBjoern A. Zeeb * The Next Header field of the last header of the Per-Fragment 34823d374aaSBjoern A. Zeeb * headers is obtained from the Next Header field of the first 349686cdd19SJun-ichiro itojun Hagino * fragment's Fragment header. 350686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 351686cdd19SJun-ichiro itojun Hagino * 352686cdd19SJun-ichiro itojun Hagino * The following note also contradicts with fragment rule - no one is going to 353686cdd19SJun-ichiro itojun Hagino * send different fragment with different next header field. 354686cdd19SJun-ichiro itojun Hagino * 35523d374aaSBjoern A. Zeeb * Additional note (p22) [not an error]: 356686cdd19SJun-ichiro itojun Hagino * The Next Header values in the Fragment headers of different 357686cdd19SJun-ichiro itojun Hagino * fragments of the same original packet may differ. Only the value 358686cdd19SJun-ichiro itojun Hagino * from the Offset zero fragment packet is used for reassembly. 359686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only 360686cdd19SJun-ichiro itojun Hagino * 361686cdd19SJun-ichiro itojun Hagino * There is no explicit reason given in the RFC. Historical reason maybe? 362686cdd19SJun-ichiro itojun Hagino */ 363686cdd19SJun-ichiro itojun Hagino /* 36423d374aaSBjoern A. Zeeb * Fragment input. 36582cd038dSYoshinobu Inoue */ 36682cd038dSYoshinobu Inoue int 3671272577eSXin LI frag6_input(struct mbuf **mp, int *offp, int proto) 36882cd038dSYoshinobu Inoue { 36921f08a07SBjoern A. Zeeb struct mbuf *m, *t; 37082cd038dSYoshinobu Inoue struct ip6_hdr *ip6; 37182cd038dSYoshinobu Inoue struct ip6_frag *ip6f; 37221f08a07SBjoern A. Zeeb struct ip6qhead *head; 37321f08a07SBjoern A. Zeeb struct ip6q *q6; 37421f08a07SBjoern A. Zeeb struct ip6asfrag *af6, *ip6af, *af6tmp; 37521f08a07SBjoern A. Zeeb struct in6_ifaddr *ia6; 37621f08a07SBjoern A. Zeeb struct ifnet *dstifp, *srcifp; 377505e91f5SKristof Provost uint32_t hashkey[(sizeof(struct in6_addr) * 2 + 378505e91f5SKristof Provost sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)]; 3799cb1a47aSBjoern A. Zeeb uint32_t bucket, *hashkeyp; 3805778b399SBjoern A. Zeeb int fragoff, frgpartlen; /* Must be larger than uint16_t. */ 3815778b399SBjoern A. Zeeb int nxt, offset, plen; 3825778b399SBjoern A. Zeeb uint8_t ecn, ecn0; 3835778b399SBjoern A. Zeeb bool only_frag; 384aaa46574SAdrian Chadd #ifdef RSS 385aaa46574SAdrian Chadd struct ip6_direct_ctx *ip6dc; 3865778b399SBjoern A. Zeeb struct m_tag *mtag; 387aaa46574SAdrian Chadd #endif 388aaa46574SAdrian Chadd 3895778b399SBjoern A. Zeeb m = *mp; 3905778b399SBjoern A. Zeeb offset = *offp; 3915778b399SBjoern A. Zeeb 39282cd038dSYoshinobu Inoue ip6 = mtod(m, struct ip6_hdr *); 393686cdd19SJun-ichiro itojun Hagino #ifndef PULLDOWN_TEST 394686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE); 39582cd038dSYoshinobu Inoue ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); 396686cdd19SJun-ichiro itojun Hagino #else 397686cdd19SJun-ichiro itojun Hagino IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 398686cdd19SJun-ichiro itojun Hagino if (ip6f == NULL) 39940e39bbbSHajimu UMEMOTO return (IPPROTO_DONE); 400686cdd19SJun-ichiro itojun Hagino #endif 40182cd038dSYoshinobu Inoue 40282cd038dSYoshinobu Inoue dstifp = NULL; 40323d374aaSBjoern A. Zeeb /* Find the destination interface of the packet. */ 4045778b399SBjoern A. Zeeb ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 4055778b399SBjoern A. Zeeb if (ia6 != NULL) { 4065778b399SBjoern A. Zeeb dstifp = ia6->ia_ifp; 4075778b399SBjoern A. Zeeb ifa_free(&ia6->ia_ifa); 4088c0fec80SRobert Watson } 40923d374aaSBjoern A. Zeeb 41023d374aaSBjoern A. Zeeb /* Jumbo payload cannot contain a fragment header. */ 41182cd038dSYoshinobu Inoue if (ip6->ip6_plen == 0) { 41282cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 41382cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 4145778b399SBjoern A. Zeeb return (IPPROTO_DONE); 41582cd038dSYoshinobu Inoue } 41682cd038dSYoshinobu Inoue 41782cd038dSYoshinobu Inoue /* 41823d374aaSBjoern A. Zeeb * Check whether fragment packet's fragment length is a 41923d374aaSBjoern A. Zeeb * multiple of 8 octets (unless it is the last one). 42082cd038dSYoshinobu Inoue * sizeof(struct ip6_frag) == 8 42182cd038dSYoshinobu Inoue * sizeof(struct ip6_hdr) = 40 42282cd038dSYoshinobu Inoue */ 42382cd038dSYoshinobu Inoue if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 42482cd038dSYoshinobu Inoue (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 42506cd0a3fSHajimu UMEMOTO icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 426686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_hdr, ip6_plen)); 42782cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 4285778b399SBjoern A. Zeeb return (IPPROTO_DONE); 42982cd038dSYoshinobu Inoue } 43082cd038dSYoshinobu Inoue 4319cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragments); 43282cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_reqd); 43382cd038dSYoshinobu Inoue 43423d374aaSBjoern A. Zeeb /* Offset now points to data portion. */ 43582cd038dSYoshinobu Inoue offset += sizeof(struct ip6_frag); 43682cd038dSYoshinobu Inoue 4374018ea9aSBjoern A. Zeeb /* 4382946a941STom Jones * Handle "atomic" fragments (offset and m bit set to 0) upfront, 43923d374aaSBjoern A. Zeeb * unrelated to any reassembly. Still need to remove the frag hdr. 44023d374aaSBjoern A. Zeeb * See RFC 6946 and section 4.5 of RFC 8200. 4414018ea9aSBjoern A. Zeeb */ 4424018ea9aSBjoern A. Zeeb if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) { 4432946a941STom Jones IP6STAT_INC(ip6s_atomicfrags); 44423d374aaSBjoern A. Zeeb /* XXX-BZ handle correctly. */ 4454018ea9aSBjoern A. Zeeb in6_ifstat_inc(dstifp, ifs6_reass_ok); 4464018ea9aSBjoern A. Zeeb *offp = offset; 447a4061289SAndrey V. Elsukov m->m_flags |= M_FRAGMENTED; 4484018ea9aSBjoern A. Zeeb return (ip6f->ip6f_nxt); 4494018ea9aSBjoern A. Zeeb } 4504018ea9aSBjoern A. Zeeb 4515f9f192dSJonathan T. Looney /* Get fragment length and discard 0-byte fragments. */ 4525f9f192dSJonathan T. Looney frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 4535f9f192dSJonathan T. Looney if (frgpartlen == 0) { 4545f9f192dSJonathan T. Looney icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 4555f9f192dSJonathan T. Looney offsetof(struct ip6_hdr, ip6_plen)); 4565f9f192dSJonathan T. Looney in6_ifstat_inc(dstifp, ifs6_reass_fail); 4575f9f192dSJonathan T. Looney IP6STAT_INC(ip6s_fragdropped); 4585778b399SBjoern A. Zeeb return (IPPROTO_DONE); 4595f9f192dSJonathan T. Looney } 4605f9f192dSJonathan T. Looney 461*efdfee93SBjoern A. Zeeb /* Store receive network interface pointer for later. */ 462*efdfee93SBjoern A. Zeeb srcifp = m->m_pkthdr.rcvif; 463*efdfee93SBjoern A. Zeeb 46423d374aaSBjoern A. Zeeb /* Generate a hash value for fragment bucket selection. */ 46580d7a853SJonathan T. Looney hashkeyp = hashkey; 46680d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr)); 46780d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 46880d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr)); 46980d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp); 47080d7a853SJonathan T. Looney *hashkeyp = ip6f->ip6f_ident; 4719cb1a47aSBjoern A. Zeeb bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed); 4729cb1a47aSBjoern A. Zeeb bucket &= IP6REASS_HMASK; 4739cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 47421f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket); 4759888c401SHajimu UMEMOTO 4769888c401SHajimu UMEMOTO /* 47723d374aaSBjoern A. Zeeb * Enforce upper bound on number of fragments for the entire system. 4789888c401SHajimu UMEMOTO * If maxfrag is 0, never accept fragments. 4799888c401SHajimu UMEMOTO * If maxfrag is -1, accept all fragments without limitation. 4809888c401SHajimu UMEMOTO */ 4812adfd64fSJonathan T. Looney if (ip6_maxfrags < 0) 4829888c401SHajimu UMEMOTO ; 4832adfd64fSJonathan T. Looney else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags) 4849888c401SHajimu UMEMOTO goto dropfrag; 48533841545SHajimu UMEMOTO 48621f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq) 48782cd038dSYoshinobu Inoue if (ip6f->ip6f_ident == q6->ip6q_ident && 48882cd038dSYoshinobu Inoue IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 4894b908c8bSRobert Watson IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst) 4904b908c8bSRobert Watson #ifdef MAC 4914b908c8bSRobert Watson && mac_ip6q_match(m, q6) 4924b908c8bSRobert Watson #endif 4934b908c8bSRobert Watson ) 49482cd038dSYoshinobu Inoue break; 49582cd038dSYoshinobu Inoue 4965778b399SBjoern A. Zeeb only_frag = false; 49721f08a07SBjoern A. Zeeb if (q6 == NULL) { 49823d374aaSBjoern A. Zeeb 4995778b399SBjoern A. Zeeb /* A first fragment to arrive creates a reassembly queue. */ 5005778b399SBjoern A. Zeeb only_frag = true; 50182cd038dSYoshinobu Inoue 50282cd038dSYoshinobu Inoue /* 50382cd038dSYoshinobu Inoue * Enforce upper bound on number of fragmented packets 50482cd038dSYoshinobu Inoue * for which we attempt reassembly; 5059888c401SHajimu UMEMOTO * If maxfragpackets is 0, never accept fragments. 5069888c401SHajimu UMEMOTO * If maxfragpackets is -1, accept all fragments without 5079888c401SHajimu UMEMOTO * limitation. 50882cd038dSYoshinobu Inoue */ 509603724d3SBjoern A. Zeeb if (V_ip6_maxfragpackets < 0) 51033841545SHajimu UMEMOTO ; 5119cb1a47aSBjoern A. Zeeb else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize || 5121e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >= 51380d7a853SJonathan T. Looney (u_int)V_ip6_maxfragpackets) 51433841545SHajimu UMEMOTO goto dropfrag; 51580d7a853SJonathan T. Looney atomic_add_int(&V_frag6_nfragpackets, 1); 51623d374aaSBjoern A. Zeeb 51723d374aaSBjoern A. Zeeb /* Allocate IPv6 fragement packet queue entry. */ 518487a161cSBjoern A. Zeeb q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6, 519487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 52082cd038dSYoshinobu Inoue if (q6 == NULL) 52182cd038dSYoshinobu Inoue goto dropfrag; 5224b908c8bSRobert Watson #ifdef MAC 5234b908c8bSRobert Watson if (mac_ip6q_init(q6, M_NOWAIT) != 0) { 524487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 5254b908c8bSRobert Watson goto dropfrag; 5264b908c8bSRobert Watson } 5274b908c8bSRobert Watson mac_ip6q_create(m, q6); 5284b908c8bSRobert Watson #endif 52982cd038dSYoshinobu Inoue 53023d374aaSBjoern A. Zeeb /* ip6q_nxt will be filled afterwards, from 1st fragment. */ 53121f08a07SBjoern A. Zeeb TAILQ_INIT(&q6->ip6q_frags); 53282cd038dSYoshinobu Inoue q6->ip6q_ident = ip6f->ip6f_ident; 53382cd038dSYoshinobu Inoue q6->ip6q_ttl = IPV6_FRAGTTL; 53482cd038dSYoshinobu Inoue q6->ip6q_src = ip6->ip6_src; 53582cd038dSYoshinobu Inoue q6->ip6q_dst = ip6->ip6_dst; 5365e9510e3SJINMEI Tatuya q6->ip6q_ecn = 5375e9510e3SJINMEI Tatuya (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 53882cd038dSYoshinobu Inoue q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 5399888c401SHajimu UMEMOTO 54021f08a07SBjoern A. Zeeb /* Add the fragemented packet to the bucket. */ 54121f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(head, q6, ip6q_tq); 54221f08a07SBjoern A. Zeeb V_ip6qb[bucket].count++; 54382cd038dSYoshinobu Inoue } 54482cd038dSYoshinobu Inoue 54582cd038dSYoshinobu Inoue /* 54623d374aaSBjoern A. Zeeb * If it is the 1st fragment, record the length of the 54782cd038dSYoshinobu Inoue * unfragmentable part and the next header of the fragment header. 54882cd038dSYoshinobu Inoue */ 54982cd038dSYoshinobu Inoue fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 55082cd038dSYoshinobu Inoue if (fragoff == 0) { 55106cd0a3fSHajimu UMEMOTO q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 55206cd0a3fSHajimu UMEMOTO sizeof(struct ip6_frag); 55382cd038dSYoshinobu Inoue q6->ip6q_nxt = ip6f->ip6f_nxt; 55482cd038dSYoshinobu Inoue } 55582cd038dSYoshinobu Inoue 55682cd038dSYoshinobu Inoue /* 55782cd038dSYoshinobu Inoue * Check that the reassembled packet would not exceed 65535 bytes 55882cd038dSYoshinobu Inoue * in size. 55982cd038dSYoshinobu Inoue * If it would exceed, discard the fragment and return an ICMP error. 56082cd038dSYoshinobu Inoue */ 56182cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen >= 0) { 56282cd038dSYoshinobu Inoue /* The 1st fragment has already arrived. */ 56382cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 56482cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 565686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 566686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 5679cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 56882cd038dSYoshinobu Inoue return (IPPROTO_DONE); 56982cd038dSYoshinobu Inoue } 57006cd0a3fSHajimu UMEMOTO } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 57182cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 572686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) + 573686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 5749cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 57582cd038dSYoshinobu Inoue return (IPPROTO_DONE); 57682cd038dSYoshinobu Inoue } 577f1664f32SBjoern A. Zeeb 57882cd038dSYoshinobu Inoue /* 57923d374aaSBjoern A. Zeeb * If it is the first fragment, do the above check for each 58082cd038dSYoshinobu Inoue * fragment already stored in the reassembly queue. 58182cd038dSYoshinobu Inoue */ 58282cd038dSYoshinobu Inoue if (fragoff == 0) { 58321f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) { 58482cd038dSYoshinobu Inoue 58582cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 58682cd038dSYoshinobu Inoue IPV6_MAXPACKET) { 58782cd038dSYoshinobu Inoue struct ip6_hdr *ip6err; 5885778b399SBjoern A. Zeeb struct mbuf *merr; 5895778b399SBjoern A. Zeeb int erroff; 5905778b399SBjoern A. Zeeb 591da89a0feSBjoern A. Zeeb merr = af6->ip6af_m; 5925778b399SBjoern A. Zeeb erroff = af6->ip6af_offset; 59382cd038dSYoshinobu Inoue 59423d374aaSBjoern A. Zeeb /* Dequeue the fragment. */ 59521f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 596487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 59782cd038dSYoshinobu Inoue 598a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */ 599a55383e7SHans Petter Selasky merr->m_pkthdr.rcvif = srcifp; 600a55383e7SHans Petter Selasky 60123d374aaSBjoern A. Zeeb /* Adjust pointer. */ 60282cd038dSYoshinobu Inoue ip6err = mtod(merr, struct ip6_hdr *); 60382cd038dSYoshinobu Inoue 60482cd038dSYoshinobu Inoue /* 60582cd038dSYoshinobu Inoue * Restore source and destination addresses 60682cd038dSYoshinobu Inoue * in the erroneous IPv6 header. 60782cd038dSYoshinobu Inoue */ 60882cd038dSYoshinobu Inoue ip6err->ip6_src = q6->ip6q_src; 60982cd038dSYoshinobu Inoue ip6err->ip6_dst = q6->ip6q_dst; 61082cd038dSYoshinobu Inoue 61182cd038dSYoshinobu Inoue icmp6_error(merr, ICMP6_PARAM_PROB, 61282cd038dSYoshinobu Inoue ICMP6_PARAMPROB_HEADER, 613686cdd19SJun-ichiro itojun Hagino erroff - sizeof(struct ip6_frag) + 614686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg)); 61582cd038dSYoshinobu Inoue } 61682cd038dSYoshinobu Inoue } 61782cd038dSYoshinobu Inoue } 61882cd038dSYoshinobu Inoue 61923d374aaSBjoern A. Zeeb /* Allocate an IPv6 fragement queue entry for this fragmented part. */ 620487a161cSBjoern A. Zeeb ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6, 621487a161cSBjoern A. Zeeb M_NOWAIT | M_ZERO); 622686cdd19SJun-ichiro itojun Hagino if (ip6af == NULL) 623686cdd19SJun-ichiro itojun Hagino goto dropfrag; 62421f08a07SBjoern A. Zeeb ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false; 62582cd038dSYoshinobu Inoue ip6af->ip6af_off = fragoff; 62682cd038dSYoshinobu Inoue ip6af->ip6af_frglen = frgpartlen; 62782cd038dSYoshinobu Inoue ip6af->ip6af_offset = offset; 628da89a0feSBjoern A. Zeeb ip6af->ip6af_m = m; 62982cd038dSYoshinobu Inoue 6305778b399SBjoern A. Zeeb if (only_frag) { 63121f08a07SBjoern A. Zeeb /* 63221f08a07SBjoern A. Zeeb * Do a manual insert rather than a hard-to-understand cast 63321f08a07SBjoern A. Zeeb * to a different type relying on data structure order to work. 63421f08a07SBjoern A. Zeeb */ 63521f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq); 63621f08a07SBjoern A. Zeeb goto postinsert; 63782cd038dSYoshinobu Inoue } 63882cd038dSYoshinobu Inoue 63923d374aaSBjoern A. Zeeb /* Do duplicate, condition, and boundry checks. */ 64082cd038dSYoshinobu Inoue /* 64159dfcba4SHajimu UMEMOTO * Handle ECN by comparing this segment with the first one; 64259dfcba4SHajimu UMEMOTO * if CE is set, do not lose CE. 64323d374aaSBjoern A. Zeeb * Drop if CE and not-ECT are mixed for the same packet. 64459dfcba4SHajimu UMEMOTO */ 64559dfcba4SHajimu UMEMOTO ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; 6465e9510e3SJINMEI Tatuya ecn0 = q6->ip6q_ecn; 64759dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_CE) { 64859dfcba4SHajimu UMEMOTO if (ecn0 == IPTOS_ECN_NOTECT) { 649487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 65059dfcba4SHajimu UMEMOTO goto dropfrag; 65159dfcba4SHajimu UMEMOTO } 65259dfcba4SHajimu UMEMOTO if (ecn0 != IPTOS_ECN_CE) 6535e9510e3SJINMEI Tatuya q6->ip6q_ecn = IPTOS_ECN_CE; 65459dfcba4SHajimu UMEMOTO } 65559dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { 656487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 65759dfcba4SHajimu UMEMOTO goto dropfrag; 65859dfcba4SHajimu UMEMOTO } 65959dfcba4SHajimu UMEMOTO 66023d374aaSBjoern A. Zeeb /* Find a fragmented part which begins after this one does. */ 66121f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) 66282cd038dSYoshinobu Inoue if (af6->ip6af_off > ip6af->ip6af_off) 66382cd038dSYoshinobu Inoue break; 66482cd038dSYoshinobu Inoue 66582cd038dSYoshinobu Inoue /* 66682cd038dSYoshinobu Inoue * If the incoming framgent overlaps some existing fragments in 66723d374aaSBjoern A. Zeeb * the reassembly queue, drop both the new fragment and the 66823d374aaSBjoern A. Zeeb * entire reassembly queue. However, if the new fragment 66923d374aaSBjoern A. Zeeb * is an exact duplicate of an existing fragment, only silently 67023d374aaSBjoern A. Zeeb * drop the existing fragment and leave the fragmentation queue 67123d374aaSBjoern A. Zeeb * unchanged, as allowed by the RFC. (RFC 8200, 4.5) 67282cd038dSYoshinobu Inoue */ 67321f08a07SBjoern A. Zeeb if (af6 != NULL) 67421f08a07SBjoern A. Zeeb af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq); 67521f08a07SBjoern A. Zeeb else 67621f08a07SBjoern A. Zeeb af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead); 67721f08a07SBjoern A. Zeeb if (af6tmp != NULL) { 67821f08a07SBjoern A. Zeeb if (af6tmp->ip6af_off + af6tmp->ip6af_frglen - 6795778b399SBjoern A. Zeeb ip6af->ip6af_off > 0) { 680487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 68182cd038dSYoshinobu Inoue goto dropfrag; 68282cd038dSYoshinobu Inoue } 68382cd038dSYoshinobu Inoue } 68421f08a07SBjoern A. Zeeb if (af6 != NULL) { 6855778b399SBjoern A. Zeeb if (ip6af->ip6af_off + ip6af->ip6af_frglen - 6865778b399SBjoern A. Zeeb af6->ip6af_off > 0) { 687487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 68882cd038dSYoshinobu Inoue goto dropfrag; 68982cd038dSYoshinobu Inoue } 69082cd038dSYoshinobu Inoue } 69182cd038dSYoshinobu Inoue 6924b908c8bSRobert Watson #ifdef MAC 6934b908c8bSRobert Watson mac_ip6q_update(m, q6); 6944b908c8bSRobert Watson #endif 69582cd038dSYoshinobu Inoue 69682cd038dSYoshinobu Inoue /* 69723d374aaSBjoern A. Zeeb * Stick new segment in its place; check for complete reassembly. 69823d374aaSBjoern A. Zeeb * If not complete, check fragment limit. Move to front of packet 69923d374aaSBjoern A. Zeeb * queue, as we are the most recently active fragmented packet. 70082cd038dSYoshinobu Inoue */ 70121f08a07SBjoern A. Zeeb if (af6 != NULL) 70221f08a07SBjoern A. Zeeb TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq); 70321f08a07SBjoern A. Zeeb else 70421f08a07SBjoern A. Zeeb TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq); 70521f08a07SBjoern A. Zeeb postinsert: 7062adfd64fSJonathan T. Looney atomic_add_int(&frag6_nfrags, 1); 7079888c401SHajimu UMEMOTO q6->ip6q_nfrag++; 7083c7165b3SBjoern A. Zeeb 7095778b399SBjoern A. Zeeb plen = 0; 71021f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) { 7115778b399SBjoern A. Zeeb if (af6->ip6af_off != plen) { 71203c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 713198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 7149cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket); 71503c99d76SJonathan T. Looney } 7169cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 7175778b399SBjoern A. Zeeb return (IPPROTO_DONE); 71882cd038dSYoshinobu Inoue } 7195778b399SBjoern A. Zeeb plen += af6->ip6af_frglen; 72082cd038dSYoshinobu Inoue } 72121f08a07SBjoern A. Zeeb af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead); 72221f08a07SBjoern A. Zeeb if (af6->ip6af_mff) { 72303c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) { 724198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag); 7259cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket); 72603c99d76SJonathan T. Looney } 7279cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 7285778b399SBjoern A. Zeeb return (IPPROTO_DONE); 72982cd038dSYoshinobu Inoue } 73082cd038dSYoshinobu Inoue 73123d374aaSBjoern A. Zeeb /* Reassembly is complete; concatenate fragments. */ 73221f08a07SBjoern A. Zeeb ip6af = TAILQ_FIRST(&q6->ip6q_frags); 733da89a0feSBjoern A. Zeeb t = m = ip6af->ip6af_m; 73421f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq); 73521f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) { 7369907aba3SAndrey V. Elsukov m->m_pkthdr.csum_flags &= 737da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_flags; 7389907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data += 739da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_data; 7409907aba3SAndrey V. Elsukov 74121f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq); 742*efdfee93SBjoern A. Zeeb t = m_last(t); 743da89a0feSBjoern A. Zeeb m_adj(af6->ip6af_m, af6->ip6af_offset); 744da89a0feSBjoern A. Zeeb m_demote_pkthdr(af6->ip6af_m); 745da89a0feSBjoern A. Zeeb m_cat(t, af6->ip6af_m); 746487a161cSBjoern A. Zeeb free(af6, M_FRAG6); 74782cd038dSYoshinobu Inoue } 74882cd038dSYoshinobu Inoue 7499907aba3SAndrey V. Elsukov while (m->m_pkthdr.csum_data & 0xffff0000) 7509907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 7519907aba3SAndrey V. Elsukov (m->m_pkthdr.csum_data >> 16); 7529907aba3SAndrey V. Elsukov 75323d374aaSBjoern A. Zeeb /* Adjust offset to point where the original next header starts. */ 75482cd038dSYoshinobu Inoue offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 755487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6); 756686cdd19SJun-ichiro itojun Hagino ip6 = mtod(m, struct ip6_hdr *); 7575778b399SBjoern A. Zeeb ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr)); 7585e9510e3SJINMEI Tatuya if (q6->ip6q_ecn == IPTOS_ECN_CE) 7595e9510e3SJINMEI Tatuya ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); 76082cd038dSYoshinobu Inoue nxt = q6->ip6q_nxt; 76182cd038dSYoshinobu Inoue 76221f08a07SBjoern A. Zeeb TAILQ_REMOVE(head, q6, ip6q_tq); 76321f08a07SBjoern A. Zeeb V_ip6qb[bucket].count--; 7642adfd64fSJonathan T. Looney atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag); 76521f08a07SBjoern A. Zeeb 76621f08a07SBjoern A. Zeeb if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) { 7674b908c8bSRobert Watson #ifdef MAC 7684b908c8bSRobert Watson mac_ip6q_destroy(q6); 7694b908c8bSRobert Watson #endif 770487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 77180d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 7720b438b0fSGleb Smirnoff 773686cdd19SJun-ichiro itojun Hagino goto dropfrag; 77482cd038dSYoshinobu Inoue } 77582cd038dSYoshinobu Inoue 77623d374aaSBjoern A. Zeeb /* Set nxt(-hdr field value) to the original value. */ 77768e0e5a6SAndrey V. Elsukov m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t), 77868e0e5a6SAndrey V. Elsukov (caddr_t)&nxt); 77982cd038dSYoshinobu Inoue 7804b908c8bSRobert Watson #ifdef MAC 7814b908c8bSRobert Watson mac_ip6q_reassemble(q6, m); 7824b908c8bSRobert Watson mac_ip6q_destroy(q6); 7834b908c8bSRobert Watson #endif 784487a161cSBjoern A. Zeeb free(q6, M_FRAG6); 78580d7a853SJonathan T. Looney atomic_subtract_int(&V_frag6_nfragpackets, 1); 78682cd038dSYoshinobu Inoue 78782cd038dSYoshinobu Inoue if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 7885778b399SBjoern A. Zeeb 7895778b399SBjoern A. Zeeb plen = 0; 79082cd038dSYoshinobu Inoue for (t = m; t; t = t->m_next) 79182cd038dSYoshinobu Inoue plen += t->m_len; 79282cd038dSYoshinobu Inoue m->m_pkthdr.len = plen; 793a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */ 794a55383e7SHans Petter Selasky m->m_pkthdr.rcvif = srcifp; 79582cd038dSYoshinobu Inoue } 79682cd038dSYoshinobu Inoue 797aaa46574SAdrian Chadd #ifdef RSS 798aaa46574SAdrian Chadd mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc), 799aaa46574SAdrian Chadd M_NOWAIT); 800aaa46574SAdrian Chadd if (mtag == NULL) 801aaa46574SAdrian Chadd goto dropfrag; 802aaa46574SAdrian Chadd 803aaa46574SAdrian Chadd ip6dc = (struct ip6_direct_ctx *)(mtag + 1); 804aaa46574SAdrian Chadd ip6dc->ip6dc_nxt = nxt; 805aaa46574SAdrian Chadd ip6dc->ip6dc_off = offset; 806aaa46574SAdrian Chadd 807aaa46574SAdrian Chadd m_tag_prepend(m, mtag); 808aaa46574SAdrian Chadd #endif 809aaa46574SAdrian Chadd 8109cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 8119cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_reassembled); 81282cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_ok); 81382cd038dSYoshinobu Inoue 814aaa46574SAdrian Chadd #ifdef RSS 81523d374aaSBjoern A. Zeeb /* Queue/dispatch for reprocessing. */ 816aaa46574SAdrian Chadd netisr_dispatch(NETISR_IPV6_DIRECT, m); 8175778b399SBjoern A. Zeeb return (IPPROTO_DONE); 818aaa46574SAdrian Chadd #endif 819aaa46574SAdrian Chadd 82023d374aaSBjoern A. Zeeb /* Tell launch routine the next header. */ 82182cd038dSYoshinobu Inoue *mp = m; 82282cd038dSYoshinobu Inoue *offp = offset; 82382cd038dSYoshinobu Inoue 8245778b399SBjoern A. Zeeb return (nxt); 82582cd038dSYoshinobu Inoue 82682cd038dSYoshinobu Inoue dropfrag: 8279cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 82882cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail); 8299cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 83082cd038dSYoshinobu Inoue m_freem(m); 8315778b399SBjoern A. Zeeb return (IPPROTO_DONE); 83282cd038dSYoshinobu Inoue } 83382cd038dSYoshinobu Inoue 83482cd038dSYoshinobu Inoue /* 83533841545SHajimu UMEMOTO * IPv6 reassembling timer processing; 83623d374aaSBjoern A. Zeeb * if a timer expires on a reassembly queue, discard it. 83782cd038dSYoshinobu Inoue */ 83882cd038dSYoshinobu Inoue void 8391272577eSXin LI frag6_slowtimo(void) 84082cd038dSYoshinobu Inoue { 8418b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter); 84221f08a07SBjoern A. Zeeb struct ip6qhead *head; 84321f08a07SBjoern A. Zeeb struct ip6q *q6, *q6tmp; 8449cb1a47aSBjoern A. Zeeb uint32_t bucket; 84582cd038dSYoshinobu Inoue 8465ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP(); 8478b615593SMarko Zec VNET_FOREACH(vnet_iter) { 8488b615593SMarko Zec CURVNET_SET(vnet_iter); 8499cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 8509cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 8519cb1a47aSBjoern A. Zeeb head = IP6QB_HEAD(bucket); 85221f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp) 85321f08a07SBjoern A. Zeeb if (--q6->ip6q_ttl == 0) { 854198fdaedSTom Jones IP6STAT_ADD(ip6s_fragtimeout, 85521f08a07SBjoern A. Zeeb q6->ip6q_nfrag); 85682cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 85721f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 85882cd038dSYoshinobu Inoue } 85982cd038dSYoshinobu Inoue /* 86082cd038dSYoshinobu Inoue * If we are over the maximum number of fragments 86182cd038dSYoshinobu Inoue * (due to the limit being lowered), drain off 86282cd038dSYoshinobu Inoue * enough to get down to the new limit. 8631e9f3b73SJonathan T. Looney * Note that we drain all reassembly queues if 8641e9f3b73SJonathan T. Looney * maxfragpackets is 0 (fragmentation is disabled), 86523d374aaSBjoern A. Zeeb * and do not enforce a limit when maxfragpackets 8661e9f3b73SJonathan T. Looney * is negative. 86782cd038dSYoshinobu Inoue */ 8681e9f3b73SJonathan T. Looney while ((V_ip6_maxfragpackets == 0 || 8691e9f3b73SJonathan T. Looney (V_ip6_maxfragpackets > 0 && 8709cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) && 87121f08a07SBjoern A. Zeeb (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) { 87221f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag); 87382cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 87421f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 87580d7a853SJonathan T. Looney } 8769cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 87782cd038dSYoshinobu Inoue } 8781e9f3b73SJonathan T. Looney /* 8791e9f3b73SJonathan T. Looney * If we are still over the maximum number of fragmented 8801e9f3b73SJonathan T. Looney * packets, drain off enough to get down to the new limit. 8811e9f3b73SJonathan T. Looney */ 8829cb1a47aSBjoern A. Zeeb bucket = 0; 8831e9f3b73SJonathan T. Looney while (V_ip6_maxfragpackets >= 0 && 8841e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) > 8851e9f3b73SJonathan T. Looney (u_int)V_ip6_maxfragpackets) { 8869cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket); 88721f08a07SBjoern A. Zeeb q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead); 88821f08a07SBjoern A. Zeeb if (q6 != NULL) { 88921f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag); 8901e9f3b73SJonathan T. Looney /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 89121f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 8921e9f3b73SJonathan T. Looney } 8939cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 8949cb1a47aSBjoern A. Zeeb bucket = (bucket + 1) % IP6REASS_NHASH; 8951e9f3b73SJonathan T. Looney } 8968b615593SMarko Zec CURVNET_RESTORE(); 8978b615593SMarko Zec } 8985ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 89982cd038dSYoshinobu Inoue } 90082cd038dSYoshinobu Inoue 90123d374aaSBjoern A. Zeeb /* 90223d374aaSBjoern A. Zeeb * Eventhandler to adjust limits in case nmbclusters change. 90323d374aaSBjoern A. Zeeb */ 904c00464a2SBjoern A. Zeeb static void 905c00464a2SBjoern A. Zeeb frag6_change(void *tag) 906c00464a2SBjoern A. Zeeb { 907c00464a2SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter); 908c00464a2SBjoern A. Zeeb 909c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 910c00464a2SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP(); 911c00464a2SBjoern A. Zeeb VNET_FOREACH(vnet_iter) { 912c00464a2SBjoern A. Zeeb CURVNET_SET(vnet_iter); 913c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 914c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 915c00464a2SBjoern A. Zeeb CURVNET_RESTORE(); 916c00464a2SBjoern A. Zeeb } 917c00464a2SBjoern A. Zeeb VNET_LIST_RUNLOCK_NOSLEEP(); 918c00464a2SBjoern A. Zeeb } 919c00464a2SBjoern A. Zeeb 920c00464a2SBjoern A. Zeeb /* 921c00464a2SBjoern A. Zeeb * Initialise reassembly queue and fragment identifier. 922c00464a2SBjoern A. Zeeb */ 923c00464a2SBjoern A. Zeeb void 924c00464a2SBjoern A. Zeeb frag6_init(void) 925c00464a2SBjoern A. Zeeb { 9269cb1a47aSBjoern A. Zeeb uint32_t bucket; 927c00464a2SBjoern A. Zeeb 928c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS; 929c00464a2SBjoern A. Zeeb frag6_set_bucketsize(); 9309cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 93121f08a07SBjoern A. Zeeb TAILQ_INIT(IP6QB_HEAD(bucket)); 932*efdfee93SBjoern A. Zeeb mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF); 9339cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count = 0; 934c00464a2SBjoern A. Zeeb } 9359cb1a47aSBjoern A. Zeeb V_ip6qb_hashseed = arc4random(); 936c00464a2SBjoern A. Zeeb V_ip6_maxfragsperpacket = 64; 93767a10c46SBjoern A. Zeeb #ifdef VIMAGE 93867a10c46SBjoern A. Zeeb V_frag6_on = true; 93967a10c46SBjoern A. Zeeb #endif 940c00464a2SBjoern A. Zeeb if (!IS_DEFAULT_VNET(curvnet)) 941c00464a2SBjoern A. Zeeb return; 942c00464a2SBjoern A. Zeeb 943c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS; 944c00464a2SBjoern A. Zeeb EVENTHANDLER_REGISTER(nmbclusters_change, 945c00464a2SBjoern A. Zeeb frag6_change, NULL, EVENTHANDLER_PRI_ANY); 946c00464a2SBjoern A. Zeeb } 947c00464a2SBjoern A. Zeeb 94882cd038dSYoshinobu Inoue /* 94982cd038dSYoshinobu Inoue * Drain off all datagram fragments. 95082cd038dSYoshinobu Inoue */ 95167a10c46SBjoern A. Zeeb static void 95267a10c46SBjoern A. Zeeb frag6_drain_one(void) 95382cd038dSYoshinobu Inoue { 95421f08a07SBjoern A. Zeeb struct ip6q *q6; 9559cb1a47aSBjoern A. Zeeb uint32_t bucket; 9569888c401SHajimu UMEMOTO 9579cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 95867a10c46SBjoern A. Zeeb IP6QB_LOCK(bucket); 95921f08a07SBjoern A. Zeeb while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) { 9609cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped); 96182cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 96221f08a07SBjoern A. Zeeb frag6_freef(q6, bucket); 96380d7a853SJonathan T. Looney } 9649cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket); 96582cd038dSYoshinobu Inoue } 96667a10c46SBjoern A. Zeeb } 96767a10c46SBjoern A. Zeeb 96867a10c46SBjoern A. Zeeb void 96967a10c46SBjoern A. Zeeb frag6_drain(void) 97067a10c46SBjoern A. Zeeb { 97167a10c46SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter); 97267a10c46SBjoern A. Zeeb 97367a10c46SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP(); 97467a10c46SBjoern A. Zeeb VNET_FOREACH(vnet_iter) { 97567a10c46SBjoern A. Zeeb CURVNET_SET(vnet_iter); 97667a10c46SBjoern A. Zeeb frag6_drain_one(); 9778b615593SMarko Zec CURVNET_RESTORE(); 9788b615593SMarko Zec } 9795ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP(); 98082cd038dSYoshinobu Inoue } 981e5ee7060SGleb Smirnoff 98267a10c46SBjoern A. Zeeb #ifdef VIMAGE 98367a10c46SBjoern A. Zeeb /* 98467a10c46SBjoern A. Zeeb * Clear up IPv6 reassembly structures. 98567a10c46SBjoern A. Zeeb */ 98667a10c46SBjoern A. Zeeb void 98767a10c46SBjoern A. Zeeb frag6_destroy(void) 98867a10c46SBjoern A. Zeeb { 98967a10c46SBjoern A. Zeeb uint32_t bucket; 99067a10c46SBjoern A. Zeeb 99167a10c46SBjoern A. Zeeb frag6_drain_one(); 99267a10c46SBjoern A. Zeeb V_frag6_on = false; 99367a10c46SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) { 99467a10c46SBjoern A. Zeeb KASSERT(V_ip6qb[bucket].count == 0, 99567a10c46SBjoern A. Zeeb ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__, 99667a10c46SBjoern A. Zeeb bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count)); 99767a10c46SBjoern A. Zeeb mtx_destroy(&V_ip6qb[bucket].lock); 99867a10c46SBjoern A. Zeeb } 99967a10c46SBjoern A. Zeeb } 100067a10c46SBjoern A. Zeeb #endif 1001