1caf43b02SWarner Losh /*-
251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni *
482cd038dSYoshinobu Inoue * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
582cd038dSYoshinobu Inoue * All rights reserved.
621f08a07SBjoern A. Zeeb * Copyright (c) 2019 Netflix, Inc.
782cd038dSYoshinobu Inoue *
882cd038dSYoshinobu Inoue * Redistribution and use in source and binary forms, with or without
982cd038dSYoshinobu Inoue * modification, are permitted provided that the following conditions
1082cd038dSYoshinobu Inoue * are met:
1182cd038dSYoshinobu Inoue * 1. Redistributions of source code must retain the above copyright
1282cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer.
1382cd038dSYoshinobu Inoue * 2. Redistributions in binary form must reproduce the above copyright
1482cd038dSYoshinobu Inoue * notice, this list of conditions and the following disclaimer in the
1582cd038dSYoshinobu Inoue * documentation and/or other materials provided with the distribution.
1682cd038dSYoshinobu Inoue * 3. Neither the name of the project nor the names of its contributors
1782cd038dSYoshinobu Inoue * may be used to endorse or promote products derived from this software
1882cd038dSYoshinobu Inoue * without specific prior written permission.
1982cd038dSYoshinobu Inoue *
2082cd038dSYoshinobu Inoue * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
2182cd038dSYoshinobu Inoue * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2282cd038dSYoshinobu Inoue * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2382cd038dSYoshinobu Inoue * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
2482cd038dSYoshinobu Inoue * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2582cd038dSYoshinobu Inoue * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2682cd038dSYoshinobu Inoue * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2782cd038dSYoshinobu Inoue * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2882cd038dSYoshinobu Inoue * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2982cd038dSYoshinobu Inoue * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3082cd038dSYoshinobu Inoue * SUCH DAMAGE.
31b48287a3SDavid E. O'Brien *
32b48287a3SDavid E. O'Brien * $KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
3382cd038dSYoshinobu Inoue */
3482cd038dSYoshinobu Inoue
35b48287a3SDavid E. O'Brien #include <sys/cdefs.h>
36aaa46574SAdrian Chadd #include "opt_rss.h"
37aaa46574SAdrian Chadd
3882cd038dSYoshinobu Inoue #include <sys/param.h>
39f349c821SBjoern A. Zeeb #include <sys/systm.h>
401a3044faSBjoern A. Zeeb #include <sys/domain.h>
411a3044faSBjoern A. Zeeb #include <sys/eventhandler.h>
4280d7a853SJonathan T. Looney #include <sys/hash.h>
431a3044faSBjoern A. Zeeb #include <sys/kernel.h>
4482cd038dSYoshinobu Inoue #include <sys/malloc.h>
4582cd038dSYoshinobu Inoue #include <sys/mbuf.h>
4682cd038dSYoshinobu Inoue #include <sys/protosw.h>
4721f08a07SBjoern A. Zeeb #include <sys/queue.h>
4882cd038dSYoshinobu Inoue #include <sys/socket.h>
49757cb678SBjoern A. Zeeb #include <sys/sysctl.h>
5082cd038dSYoshinobu Inoue #include <sys/syslog.h>
5182cd038dSYoshinobu Inoue
5282cd038dSYoshinobu Inoue #include <net/if.h>
5376039bc8SGleb Smirnoff #include <net/if_var.h>
543d0d5b21SJustin Hibbits #include <net/if_private.h>
55aaa46574SAdrian Chadd #include <net/netisr.h>
5682cd038dSYoshinobu Inoue #include <net/route.h>
57eddfbb76SRobert Watson #include <net/vnet.h>
5882cd038dSYoshinobu Inoue
5982cd038dSYoshinobu Inoue #include <netinet/in.h>
6082cd038dSYoshinobu Inoue #include <netinet/in_var.h>
61686cdd19SJun-ichiro itojun Hagino #include <netinet/ip6.h>
6282cd038dSYoshinobu Inoue #include <netinet6/ip6_var.h>
63686cdd19SJun-ichiro itojun Hagino #include <netinet/icmp6.h>
6423d374aaSBjoern A. Zeeb #include <netinet/in_systm.h> /* For ECN definitions. */
6523d374aaSBjoern A. Zeeb #include <netinet/ip.h> /* For ECN definitions. */
6682cd038dSYoshinobu Inoue
671a3044faSBjoern A. Zeeb #ifdef MAC
684b908c8bSRobert Watson #include <security/mac/mac_framework.h>
691a3044faSBjoern A. Zeeb #endif
704b908c8bSRobert Watson
71f1664f32SBjoern A. Zeeb /*
72f1664f32SBjoern A. Zeeb * A "big picture" of how IPv6 fragment queues are all linked together.
73f1664f32SBjoern A. Zeeb *
74f1664f32SBjoern A. Zeeb * struct ip6qbucket ip6qb[...]; hashed buckets
75f1664f32SBjoern A. Zeeb * ||||||||
76f1664f32SBjoern A. Zeeb * |
77f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6q, packets) *q6; tailq entries holding
78f1664f32SBjoern A. Zeeb * |||||||| fragmented packets
79f1664f32SBjoern A. Zeeb * | (1 per original packet)
80f1664f32SBjoern A. Zeeb * |
81f1664f32SBjoern A. Zeeb * +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6; tailq entries of IPv6
82f1664f32SBjoern A. Zeeb * | *ip6af;fragment packets
83f1664f32SBjoern A. Zeeb * | for one original packet
84f1664f32SBjoern A. Zeeb * + *mbuf
85f1664f32SBjoern A. Zeeb */
86f1664f32SBjoern A. Zeeb
8723d374aaSBjoern A. Zeeb /* Reassembly headers are stored in hash buckets. */
882ceeacbeSJonathan T. Looney #define IP6REASS_NHASH_LOG2 10
8980d7a853SJonathan T. Looney #define IP6REASS_NHASH (1 << IP6REASS_NHASH_LOG2)
9080d7a853SJonathan T. Looney #define IP6REASS_HMASK (IP6REASS_NHASH - 1)
9180d7a853SJonathan T. Looney
9221f08a07SBjoern A. Zeeb TAILQ_HEAD(ip6qhead, ip6q);
9380d7a853SJonathan T. Looney struct ip6qbucket {
9421f08a07SBjoern A. Zeeb struct ip6qhead packets;
9580d7a853SJonathan T. Looney struct mtx lock;
961e9f3b73SJonathan T. Looney int count;
9780d7a853SJonathan T. Looney };
9880d7a853SJonathan T. Looney
991540a98eSBjoern A. Zeeb struct ip6asfrag {
10021f08a07SBjoern A. Zeeb TAILQ_ENTRY(ip6asfrag) ip6af_tq;
1011540a98eSBjoern A. Zeeb struct mbuf *ip6af_m;
102f1664f32SBjoern A. Zeeb int ip6af_offset; /* Offset in ip6af_m to next header. */
103f1664f32SBjoern A. Zeeb int ip6af_frglen; /* Fragmentable part length. */
104f1664f32SBjoern A. Zeeb int ip6af_off; /* Fragment offset. */
105f1664f32SBjoern A. Zeeb bool ip6af_mff; /* More fragment bit in frag off. */
1061540a98eSBjoern A. Zeeb };
1071540a98eSBjoern A. Zeeb
108487a161cSBjoern A. Zeeb static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
109487a161cSBjoern A. Zeeb
11067a10c46SBjoern A. Zeeb #ifdef VIMAGE
11167a10c46SBjoern A. Zeeb /* A flag to indicate if IPv6 fragmentation is initialized. */
11267a10c46SBjoern A. Zeeb VNET_DEFINE_STATIC(bool, frag6_on);
11367a10c46SBjoern A. Zeeb #define V_frag6_on VNET(frag6_on)
11467a10c46SBjoern A. Zeeb #endif
11567a10c46SBjoern A. Zeeb
116757cb678SBjoern A. Zeeb /* System wide (global) maximum and count of packets in reassembly queues. */
117757cb678SBjoern A. Zeeb static int ip6_maxfrags;
118c17ae180SMateusz Guzik static u_int __exclusive_cache_line frag6_nfrags;
119757cb678SBjoern A. Zeeb
120757cb678SBjoern A. Zeeb /* Maximum and current packets in per-VNET reassembly queue. */
121757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragpackets);
12280d7a853SJonathan T. Looney VNET_DEFINE_STATIC(volatile u_int, frag6_nfragpackets);
123757cb678SBjoern A. Zeeb #define V_ip6_maxfragpackets VNET(ip6_maxfragpackets)
124757cb678SBjoern A. Zeeb #define V_frag6_nfragpackets VNET(frag6_nfragpackets)
125757cb678SBjoern A. Zeeb
126e32221a1SAlexander V. Chernikov /* Maximum per-VNET reassembly timeout (milliseconds) */
127e32221a1SAlexander V. Chernikov VNET_DEFINE_STATIC(u_int, ip6_fraglifetime) = IPV6_DEFFRAGTTL;
128e32221a1SAlexander V. Chernikov #define V_ip6_fraglifetime VNET(ip6_fraglifetime)
129e32221a1SAlexander V. Chernikov
130757cb678SBjoern A. Zeeb /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
131757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragbucketsize);
132757cb678SBjoern A. Zeeb VNET_DEFINE_STATIC(int, ip6_maxfragsperpacket);
133757cb678SBjoern A. Zeeb #define V_ip6_maxfragbucketsize VNET(ip6_maxfragbucketsize)
134757cb678SBjoern A. Zeeb #define V_ip6_maxfragsperpacket VNET(ip6_maxfragsperpacket)
135757cb678SBjoern A. Zeeb
136757cb678SBjoern A. Zeeb /* Per-VNET reassembly queue buckets. */
1379cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(struct ip6qbucket, ip6qb[IP6REASS_NHASH]);
1389cb1a47aSBjoern A. Zeeb VNET_DEFINE_STATIC(uint32_t, ip6qb_hashseed);
1399cb1a47aSBjoern A. Zeeb #define V_ip6qb VNET(ip6qb)
1409cb1a47aSBjoern A. Zeeb #define V_ip6qb_hashseed VNET(ip6qb_hashseed)
14182cd038dSYoshinobu Inoue
1429cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK(_b) mtx_lock(&V_ip6qb[(_b)].lock)
1439cb1a47aSBjoern A. Zeeb #define IP6QB_TRYLOCK(_b) mtx_trylock(&V_ip6qb[(_b)].lock)
1449cb1a47aSBjoern A. Zeeb #define IP6QB_LOCK_ASSERT(_b) mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
1459cb1a47aSBjoern A. Zeeb #define IP6QB_UNLOCK(_b) mtx_unlock(&V_ip6qb[(_b)].lock)
14621f08a07SBjoern A. Zeeb #define IP6QB_HEAD(_b) (&V_ip6qb[(_b)].packets)
1479888c401SHajimu UMEMOTO
14882cd038dSYoshinobu Inoue /*
1492ceeacbeSJonathan T. Looney * By default, limit the number of IP6 fragments across all reassembly
1502ceeacbeSJonathan T. Looney * queues to 1/32 of the total number of mbuf clusters.
1512ceeacbeSJonathan T. Looney *
1522ceeacbeSJonathan T. Looney * Limit the total number of reassembly queues per VNET to the
1532ceeacbeSJonathan T. Looney * IP6 fragment limit, but ensure the limit will not allow any bucket
1542ceeacbeSJonathan T. Looney * to grow above 100 items. (The bucket limit is
1552ceeacbeSJonathan T. Looney * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
1562ceeacbeSJonathan T. Looney * multiplier to reach a 100-item limit.)
1572ceeacbeSJonathan T. Looney * The 100-item limit was chosen as brief testing seems to show that
1582ceeacbeSJonathan T. Looney * this produces "reasonable" performance on some subset of systems
1592ceeacbeSJonathan T. Looney * under DoS attack.
1602ceeacbeSJonathan T. Looney */
1612ceeacbeSJonathan T. Looney #define IP6_MAXFRAGS (nmbclusters / 32)
1622ceeacbeSJonathan T. Looney #define IP6_MAXFRAGPACKETS (imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
1632ceeacbeSJonathan T. Looney
164e32221a1SAlexander V. Chernikov /* Interval between periodic reassembly queue inspections */
165e32221a1SAlexander V. Chernikov #define IP6_CALLOUT_INTERVAL_MS 500
166e32221a1SAlexander V. Chernikov
1672ceeacbeSJonathan T. Looney /*
168757cb678SBjoern A. Zeeb * Sysctls and helper function.
16982cd038dSYoshinobu Inoue */
170757cb678SBjoern A. Zeeb SYSCTL_DECL(_net_inet6_ip6);
171757cb678SBjoern A. Zeeb
17265456706SBjoern A. Zeeb SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
173c17ae180SMateusz Guzik CTLFLAG_RD, &frag6_nfrags, 0,
17465456706SBjoern A. Zeeb "Global number of IPv6 fragments across all reassembly queues.");
17565456706SBjoern A. Zeeb
176757cb678SBjoern A. Zeeb static void
frag6_set_bucketsize(void)17709b361c7SBjoern A. Zeeb frag6_set_bucketsize(void)
1781e9f3b73SJonathan T. Looney {
1791e9f3b73SJonathan T. Looney int i;
1801e9f3b73SJonathan T. Looney
1811e9f3b73SJonathan T. Looney if ((i = V_ip6_maxfragpackets) > 0)
1821e9f3b73SJonathan T. Looney V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
1831e9f3b73SJonathan T. Looney }
1841e9f3b73SJonathan T. Looney
185757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
186757cb678SBjoern A. Zeeb CTLFLAG_RW, &ip6_maxfrags, 0,
187757cb678SBjoern A. Zeeb "Maximum allowed number of outstanding IPv6 packet fragments. "
1883cf59750SGordon Bergling "A value of 0 means no fragmented packets will be accepted, while "
189757cb678SBjoern A. Zeeb "a value of -1 means no limit");
190757cb678SBjoern A. Zeeb
191757cb678SBjoern A. Zeeb static int
sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)192757cb678SBjoern A. Zeeb sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
193757cb678SBjoern A. Zeeb {
194757cb678SBjoern A. Zeeb int error, val;
195757cb678SBjoern A. Zeeb
196757cb678SBjoern A. Zeeb val = V_ip6_maxfragpackets;
197757cb678SBjoern A. Zeeb error = sysctl_handle_int(oidp, &val, 0, req);
198757cb678SBjoern A. Zeeb if (error != 0 || !req->newptr)
199757cb678SBjoern A. Zeeb return (error);
200757cb678SBjoern A. Zeeb V_ip6_maxfragpackets = val;
201757cb678SBjoern A. Zeeb frag6_set_bucketsize();
202757cb678SBjoern A. Zeeb return (0);
203757cb678SBjoern A. Zeeb }
204757cb678SBjoern A. Zeeb SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
2057029da5cSPawel Biernacki CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2067029da5cSPawel Biernacki NULL, 0, sysctl_ip6_maxfragpackets, "I",
207757cb678SBjoern A. Zeeb "Default maximum number of outstanding fragmented IPv6 packets. "
208757cb678SBjoern A. Zeeb "A value of 0 means no fragmented packets will be accepted, while a "
209757cb678SBjoern A. Zeeb "a value of -1 means no limit");
21053707abdSBjoern A. Zeeb SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
21153707abdSBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RD,
21253707abdSBjoern A. Zeeb __DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
21353707abdSBjoern A. Zeeb "Per-VNET number of IPv6 fragments across all reassembly queues.");
214757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
215757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
216757cb678SBjoern A. Zeeb "Maximum allowed number of fragments per packet");
217757cb678SBjoern A. Zeeb SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
218757cb678SBjoern A. Zeeb CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
219757cb678SBjoern A. Zeeb "Maximum number of reassembly queues per hash bucket");
220757cb678SBjoern A. Zeeb
221e32221a1SAlexander V. Chernikov static int
frag6_milli_to_callout_ticks(int ms)222e32221a1SAlexander V. Chernikov frag6_milli_to_callout_ticks(int ms)
223e32221a1SAlexander V. Chernikov {
224e32221a1SAlexander V. Chernikov return (ms / IP6_CALLOUT_INTERVAL_MS);
225e32221a1SAlexander V. Chernikov }
226e32221a1SAlexander V. Chernikov
227e32221a1SAlexander V. Chernikov static int
frag6_callout_ticks_to_milli(int ms)228e32221a1SAlexander V. Chernikov frag6_callout_ticks_to_milli(int ms)
229e32221a1SAlexander V. Chernikov {
230e32221a1SAlexander V. Chernikov return (ms * IP6_CALLOUT_INTERVAL_MS);
231e32221a1SAlexander V. Chernikov }
232e32221a1SAlexander V. Chernikov
233e32221a1SAlexander V. Chernikov _Static_assert(sizeof(((struct ip6q *)NULL)->ip6q_ttl) >= 2,
234e32221a1SAlexander V. Chernikov "ip6q_ttl field is not large enough");
235e32221a1SAlexander V. Chernikov
236e32221a1SAlexander V. Chernikov static int
sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS)237e32221a1SAlexander V. Chernikov sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS)
238e32221a1SAlexander V. Chernikov {
239e32221a1SAlexander V. Chernikov int error, val;
240e32221a1SAlexander V. Chernikov
241e32221a1SAlexander V. Chernikov val = V_ip6_fraglifetime;
242e32221a1SAlexander V. Chernikov error = sysctl_handle_int(oidp, &val, 0, req);
243e32221a1SAlexander V. Chernikov if (error != 0 || !req->newptr)
244e32221a1SAlexander V. Chernikov return (error);
245e32221a1SAlexander V. Chernikov if (val <= 0)
246e32221a1SAlexander V. Chernikov val = IPV6_DEFFRAGTTL;
247e32221a1SAlexander V. Chernikov
248e32221a1SAlexander V. Chernikov if (frag6_milli_to_callout_ticks(val) >= 65536)
249e32221a1SAlexander V. Chernikov val = frag6_callout_ticks_to_milli(65535);
250e32221a1SAlexander V. Chernikov #ifdef VIMAGE
251e32221a1SAlexander V. Chernikov if (!IS_DEFAULT_VNET(curvnet)) {
252e32221a1SAlexander V. Chernikov CURVNET_SET(vnet0);
253e32221a1SAlexander V. Chernikov int host_val = V_ip6_fraglifetime;
254e32221a1SAlexander V. Chernikov CURVNET_RESTORE();
255e32221a1SAlexander V. Chernikov
256e32221a1SAlexander V. Chernikov if (val > host_val)
257e32221a1SAlexander V. Chernikov val = host_val;
258e32221a1SAlexander V. Chernikov }
259e32221a1SAlexander V. Chernikov #endif
260e32221a1SAlexander V. Chernikov V_ip6_fraglifetime = val;
261e32221a1SAlexander V. Chernikov return (0);
262e32221a1SAlexander V. Chernikov }
263e32221a1SAlexander V. Chernikov SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, fraglifetime_ms,
264e32221a1SAlexander V. Chernikov CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
265e32221a1SAlexander V. Chernikov NULL, 0, sysctl_ip6_fraglifetime, "I",
266e32221a1SAlexander V. Chernikov "Fragment lifetime, in milliseconds");
267e32221a1SAlexander V. Chernikov
268757cb678SBjoern A. Zeeb /*
269c00464a2SBjoern A. Zeeb * Remove the IPv6 fragmentation header from the mbuf.
270c00464a2SBjoern A. Zeeb */
271c00464a2SBjoern A. Zeeb int
ip6_deletefraghdr(struct mbuf * m,int offset,int wait __unused)272a61b5cfbSBjoern A. Zeeb ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
273c00464a2SBjoern A. Zeeb {
2745778b399SBjoern A. Zeeb struct ip6_hdr *ip6;
275a61b5cfbSBjoern A. Zeeb
276a61b5cfbSBjoern A. Zeeb KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
277a61b5cfbSBjoern A. Zeeb ("%s: ext headers not contigous in mbuf %p m_len %d >= "
278a61b5cfbSBjoern A. Zeeb "offset %d + %zu\n", __func__, m, m->m_len, offset,
279a61b5cfbSBjoern A. Zeeb sizeof(struct ip6_frag)));
280c00464a2SBjoern A. Zeeb
281c00464a2SBjoern A. Zeeb /* Delete frag6 header. */
2825778b399SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *);
283a61b5cfbSBjoern A. Zeeb bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
284c00464a2SBjoern A. Zeeb m->m_data += sizeof(struct ip6_frag);
285c00464a2SBjoern A. Zeeb m->m_len -= sizeof(struct ip6_frag);
286c00464a2SBjoern A. Zeeb m->m_flags |= M_FRAGMENTED;
287a61b5cfbSBjoern A. Zeeb
288c00464a2SBjoern A. Zeeb return (0);
289c00464a2SBjoern A. Zeeb }
290c00464a2SBjoern A. Zeeb
2910736a380SMark Johnston static void
frag6_rmqueue(struct ip6q * q6,uint32_t bucket)2920736a380SMark Johnston frag6_rmqueue(struct ip6q *q6, uint32_t bucket)
2930736a380SMark Johnston {
2940736a380SMark Johnston IP6QB_LOCK_ASSERT(bucket);
2950736a380SMark Johnston
2960736a380SMark Johnston TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
2970736a380SMark Johnston V_ip6qb[bucket].count--;
2980736a380SMark Johnston #ifdef MAC
2990736a380SMark Johnston mac_ip6q_destroy(q6);
3000736a380SMark Johnston #endif
3010736a380SMark Johnston free(q6, M_FRAG6);
3020736a380SMark Johnston atomic_subtract_int(&V_frag6_nfragpackets, 1);
3030736a380SMark Johnston }
3040736a380SMark Johnston
305c00464a2SBjoern A. Zeeb /*
30623d374aaSBjoern A. Zeeb * Free a fragment reassembly header and all associated datagrams.
307757cb678SBjoern A. Zeeb */
3084f590175SPaul Saab static void
frag6_freef(struct ip6q * q6,uint32_t bucket)309c00464a2SBjoern A. Zeeb frag6_freef(struct ip6q *q6, uint32_t bucket)
3104f590175SPaul Saab {
3115778b399SBjoern A. Zeeb struct ip6_hdr *ip6;
31221f08a07SBjoern A. Zeeb struct ip6asfrag *af6;
3135778b399SBjoern A. Zeeb struct mbuf *m;
3144f590175SPaul Saab
3159cb1a47aSBjoern A. Zeeb IP6QB_LOCK_ASSERT(bucket);
316c00464a2SBjoern A. Zeeb
31721f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
318da89a0feSBjoern A. Zeeb m = af6->ip6af_m;
31921f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
320c00464a2SBjoern A. Zeeb
321c00464a2SBjoern A. Zeeb /*
322c00464a2SBjoern A. Zeeb * Return ICMP time exceeded error for the 1st fragment.
323c00464a2SBjoern A. Zeeb * Just free other fragments.
324c00464a2SBjoern A. Zeeb */
325a55383e7SHans Petter Selasky if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
32623d374aaSBjoern A. Zeeb /* Adjust pointer. */
327c00464a2SBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *);
328c00464a2SBjoern A. Zeeb
32923d374aaSBjoern A. Zeeb /* Restore source and destination addresses. */
330c00464a2SBjoern A. Zeeb ip6->ip6_src = q6->ip6q_src;
331c00464a2SBjoern A. Zeeb ip6->ip6_dst = q6->ip6q_dst;
332c00464a2SBjoern A. Zeeb
333c00464a2SBjoern A. Zeeb icmp6_error(m, ICMP6_TIME_EXCEEDED,
334c00464a2SBjoern A. Zeeb ICMP6_TIME_EXCEED_REASSEMBLY, 0);
335c00464a2SBjoern A. Zeeb } else
336c00464a2SBjoern A. Zeeb m_freem(m);
33723d374aaSBjoern A. Zeeb
338c00464a2SBjoern A. Zeeb free(af6, M_FRAG6);
3392adfd64fSJonathan T. Looney }
34021f08a07SBjoern A. Zeeb
341c00464a2SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
3420736a380SMark Johnston frag6_rmqueue(q6, bucket);
34382cd038dSYoshinobu Inoue }
34482cd038dSYoshinobu Inoue
34582cd038dSYoshinobu Inoue /*
346a55383e7SHans Petter Selasky * Drain off all datagram fragments belonging to
347a55383e7SHans Petter Selasky * the given network interface.
348a55383e7SHans Petter Selasky */
349a55383e7SHans Petter Selasky static void
frag6_cleanup(void * arg __unused,struct ifnet * ifp)350a55383e7SHans Petter Selasky frag6_cleanup(void *arg __unused, struct ifnet *ifp)
351a55383e7SHans Petter Selasky {
35221f08a07SBjoern A. Zeeb struct ip6qhead *head;
35321f08a07SBjoern A. Zeeb struct ip6q *q6;
354a55383e7SHans Petter Selasky struct ip6asfrag *af6;
35521f08a07SBjoern A. Zeeb uint32_t bucket;
356a55383e7SHans Petter Selasky
357a55383e7SHans Petter Selasky KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
358a55383e7SHans Petter Selasky
3596e6b5143SBjoern A. Zeeb CURVNET_SET_QUIET(ifp->if_vnet);
36067a10c46SBjoern A. Zeeb #ifdef VIMAGE
36167a10c46SBjoern A. Zeeb /*
36267a10c46SBjoern A. Zeeb * Skip processing if IPv6 reassembly is not initialised or
36367a10c46SBjoern A. Zeeb * torn down by frag6_destroy().
36467a10c46SBjoern A. Zeeb */
3656e6b5143SBjoern A. Zeeb if (!V_frag6_on) {
3666e6b5143SBjoern A. Zeeb CURVNET_RESTORE();
36767a10c46SBjoern A. Zeeb return;
3686e6b5143SBjoern A. Zeeb }
36967a10c46SBjoern A. Zeeb #endif
37067a10c46SBjoern A. Zeeb
37121f08a07SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
37221f08a07SBjoern A. Zeeb IP6QB_LOCK(bucket);
37321f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket);
374a55383e7SHans Petter Selasky /* Scan fragment list. */
37521f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq) {
37621f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
377f1664f32SBjoern A. Zeeb /* Clear no longer valid rcvif pointer. */
378da89a0feSBjoern A. Zeeb if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
379da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.rcvif = NULL;
380a55383e7SHans Petter Selasky }
381a55383e7SHans Petter Selasky }
38221f08a07SBjoern A. Zeeb IP6QB_UNLOCK(bucket);
383a55383e7SHans Petter Selasky }
384a55383e7SHans Petter Selasky CURVNET_RESTORE();
385a55383e7SHans Petter Selasky }
386a55383e7SHans Petter Selasky EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
387a55383e7SHans Petter Selasky
388a55383e7SHans Petter Selasky /*
38923d374aaSBjoern A. Zeeb * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
39023d374aaSBjoern A. Zeeb * each other, in terms of next header field handling in fragment header.
391686cdd19SJun-ichiro itojun Hagino * While the sender will use the same value for all of the fragmented packets,
39223d374aaSBjoern A. Zeeb * receiver is suggested not to check for consistency.
393686cdd19SJun-ichiro itojun Hagino *
39423d374aaSBjoern A. Zeeb * Fragment rules (p18,p19):
395686cdd19SJun-ichiro itojun Hagino * (2) A Fragment header containing:
39623d374aaSBjoern A. Zeeb * The Next Header value that identifies the first header
39723d374aaSBjoern A. Zeeb * after the Per-Fragment headers of the original packet.
398686cdd19SJun-ichiro itojun Hagino * -> next header field is same for all fragments
399686cdd19SJun-ichiro itojun Hagino *
40023d374aaSBjoern A. Zeeb * Reassembly rule (p20):
40123d374aaSBjoern A. Zeeb * The Next Header field of the last header of the Per-Fragment
40223d374aaSBjoern A. Zeeb * headers is obtained from the Next Header field of the first
403686cdd19SJun-ichiro itojun Hagino * fragment's Fragment header.
404686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only
405686cdd19SJun-ichiro itojun Hagino *
406686cdd19SJun-ichiro itojun Hagino * The following note also contradicts with fragment rule - no one is going to
407686cdd19SJun-ichiro itojun Hagino * send different fragment with different next header field.
408686cdd19SJun-ichiro itojun Hagino *
40923d374aaSBjoern A. Zeeb * Additional note (p22) [not an error]:
410686cdd19SJun-ichiro itojun Hagino * The Next Header values in the Fragment headers of different
411686cdd19SJun-ichiro itojun Hagino * fragments of the same original packet may differ. Only the value
412686cdd19SJun-ichiro itojun Hagino * from the Offset zero fragment packet is used for reassembly.
413686cdd19SJun-ichiro itojun Hagino * -> should grab it from the first fragment only
414686cdd19SJun-ichiro itojun Hagino *
415686cdd19SJun-ichiro itojun Hagino * There is no explicit reason given in the RFC. Historical reason maybe?
416686cdd19SJun-ichiro itojun Hagino */
417686cdd19SJun-ichiro itojun Hagino /*
41823d374aaSBjoern A. Zeeb * Fragment input.
41982cd038dSYoshinobu Inoue */
42082cd038dSYoshinobu Inoue int
frag6_input(struct mbuf ** mp,int * offp,int proto)4211272577eSXin LI frag6_input(struct mbuf **mp, int *offp, int proto)
42282cd038dSYoshinobu Inoue {
42321f08a07SBjoern A. Zeeb struct mbuf *m, *t;
42482cd038dSYoshinobu Inoue struct ip6_hdr *ip6;
42582cd038dSYoshinobu Inoue struct ip6_frag *ip6f;
42621f08a07SBjoern A. Zeeb struct ip6qhead *head;
42721f08a07SBjoern A. Zeeb struct ip6q *q6;
42821f08a07SBjoern A. Zeeb struct ip6asfrag *af6, *ip6af, *af6tmp;
42921f08a07SBjoern A. Zeeb struct in6_ifaddr *ia6;
43021f08a07SBjoern A. Zeeb struct ifnet *dstifp, *srcifp;
431505e91f5SKristof Provost uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
432505e91f5SKristof Provost sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
4339cb1a47aSBjoern A. Zeeb uint32_t bucket, *hashkeyp;
4345778b399SBjoern A. Zeeb int fragoff, frgpartlen; /* Must be larger than uint16_t. */
4355778b399SBjoern A. Zeeb int nxt, offset, plen;
4365778b399SBjoern A. Zeeb uint8_t ecn, ecn0;
4375778b399SBjoern A. Zeeb bool only_frag;
438aaa46574SAdrian Chadd #ifdef RSS
439aaa46574SAdrian Chadd struct ip6_direct_ctx *ip6dc;
4405778b399SBjoern A. Zeeb struct m_tag *mtag;
441aaa46574SAdrian Chadd #endif
442aaa46574SAdrian Chadd
4435778b399SBjoern A. Zeeb m = *mp;
4445778b399SBjoern A. Zeeb offset = *offp;
4455778b399SBjoern A. Zeeb
446c1131de6SBjoern A. Zeeb M_ASSERTPKTHDR(m);
447c1131de6SBjoern A. Zeeb
448a4adf6ccSBjoern A. Zeeb if (m->m_len < offset + sizeof(struct ip6_frag)) {
449a61b5cfbSBjoern A. Zeeb m = m_pullup(m, offset + sizeof(struct ip6_frag));
450a61b5cfbSBjoern A. Zeeb if (m == NULL) {
451a61b5cfbSBjoern A. Zeeb IP6STAT_INC(ip6s_exthdrtoolong);
452a61b5cfbSBjoern A. Zeeb *mp = NULL;
45340e39bbbSHajimu UMEMOTO return (IPPROTO_DONE);
454a61b5cfbSBjoern A. Zeeb }
455a4adf6ccSBjoern A. Zeeb }
456a61b5cfbSBjoern A. Zeeb ip6 = mtod(m, struct ip6_hdr *);
45782cd038dSYoshinobu Inoue
45882cd038dSYoshinobu Inoue dstifp = NULL;
45923d374aaSBjoern A. Zeeb /* Find the destination interface of the packet. */
4608268d82cSAlexander V. Chernikov ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
4618268d82cSAlexander V. Chernikov if (ia6 != NULL)
4625778b399SBjoern A. Zeeb dstifp = ia6->ia_ifp;
46323d374aaSBjoern A. Zeeb
46423d374aaSBjoern A. Zeeb /* Jumbo payload cannot contain a fragment header. */
46582cd038dSYoshinobu Inoue if (ip6->ip6_plen == 0) {
46682cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
46782cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail);
468a8fe77d8SBjoern A. Zeeb *mp = NULL;
4695778b399SBjoern A. Zeeb return (IPPROTO_DONE);
47082cd038dSYoshinobu Inoue }
47182cd038dSYoshinobu Inoue
47282cd038dSYoshinobu Inoue /*
47323d374aaSBjoern A. Zeeb * Check whether fragment packet's fragment length is a
47423d374aaSBjoern A. Zeeb * multiple of 8 octets (unless it is the last one).
47582cd038dSYoshinobu Inoue * sizeof(struct ip6_frag) == 8
47682cd038dSYoshinobu Inoue * sizeof(struct ip6_hdr) = 40
47782cd038dSYoshinobu Inoue */
478a61b5cfbSBjoern A. Zeeb ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
47982cd038dSYoshinobu Inoue if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
48082cd038dSYoshinobu Inoue (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
48106cd0a3fSHajimu UMEMOTO icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
482686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_hdr, ip6_plen));
48382cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail);
484a8fe77d8SBjoern A. Zeeb *mp = NULL;
4855778b399SBjoern A. Zeeb return (IPPROTO_DONE);
48682cd038dSYoshinobu Inoue }
48782cd038dSYoshinobu Inoue
4889cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragments);
48982cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_reqd);
49082cd038dSYoshinobu Inoue
4914018ea9aSBjoern A. Zeeb /*
4922946a941STom Jones * Handle "atomic" fragments (offset and m bit set to 0) upfront,
493c1131de6SBjoern A. Zeeb * unrelated to any reassembly. We need to remove the frag hdr
494c1131de6SBjoern A. Zeeb * which is ugly.
49523d374aaSBjoern A. Zeeb * See RFC 6946 and section 4.5 of RFC 8200.
4964018ea9aSBjoern A. Zeeb */
4974018ea9aSBjoern A. Zeeb if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
4982946a941STom Jones IP6STAT_INC(ip6s_atomicfrags);
499c1131de6SBjoern A. Zeeb nxt = ip6f->ip6f_nxt;
500c1131de6SBjoern A. Zeeb /*
501c1131de6SBjoern A. Zeeb * Set nxt(-hdr field value) to the original value.
502c1131de6SBjoern A. Zeeb * We cannot just set ip6->ip6_nxt as there might be
503c1131de6SBjoern A. Zeeb * an unfragmentable part with extension headers and
504c1131de6SBjoern A. Zeeb * we must update the last one.
505c1131de6SBjoern A. Zeeb */
506c1131de6SBjoern A. Zeeb m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
507c1131de6SBjoern A. Zeeb (caddr_t)&nxt);
508c1131de6SBjoern A. Zeeb ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
509c1131de6SBjoern A. Zeeb sizeof(struct ip6_frag));
510c1131de6SBjoern A. Zeeb if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
511c1131de6SBjoern A. Zeeb goto dropfrag2;
512c1131de6SBjoern A. Zeeb m->m_pkthdr.len -= sizeof(struct ip6_frag);
5134018ea9aSBjoern A. Zeeb in6_ifstat_inc(dstifp, ifs6_reass_ok);
514c1131de6SBjoern A. Zeeb *mp = m;
515c1131de6SBjoern A. Zeeb return (nxt);
5164018ea9aSBjoern A. Zeeb }
5174018ea9aSBjoern A. Zeeb
518c1131de6SBjoern A. Zeeb /* Offset now points to data portion. */
519c1131de6SBjoern A. Zeeb offset += sizeof(struct ip6_frag);
520c1131de6SBjoern A. Zeeb
5215f9f192dSJonathan T. Looney /* Get fragment length and discard 0-byte fragments. */
5225f9f192dSJonathan T. Looney frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
5235f9f192dSJonathan T. Looney if (frgpartlen == 0) {
5245f9f192dSJonathan T. Looney icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
5255f9f192dSJonathan T. Looney offsetof(struct ip6_hdr, ip6_plen));
5265f9f192dSJonathan T. Looney in6_ifstat_inc(dstifp, ifs6_reass_fail);
5275f9f192dSJonathan T. Looney IP6STAT_INC(ip6s_fragdropped);
528a8fe77d8SBjoern A. Zeeb *mp = NULL;
5295778b399SBjoern A. Zeeb return (IPPROTO_DONE);
5305f9f192dSJonathan T. Looney }
5315f9f192dSJonathan T. Looney
5327715d794SBjoern A. Zeeb /*
5337715d794SBjoern A. Zeeb * Enforce upper bound on number of fragments for the entire system.
5347715d794SBjoern A. Zeeb * If maxfrag is 0, never accept fragments.
5357715d794SBjoern A. Zeeb * If maxfrag is -1, accept all fragments without limitation.
5367715d794SBjoern A. Zeeb */
5377715d794SBjoern A. Zeeb if (ip6_maxfrags < 0)
5387715d794SBjoern A. Zeeb ;
5397715d794SBjoern A. Zeeb else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
5407715d794SBjoern A. Zeeb goto dropfrag2;
5417715d794SBjoern A. Zeeb
54230809ba9SBjoern A. Zeeb /*
54330809ba9SBjoern A. Zeeb * Validate that a full header chain to the ULP is present in the
54430809ba9SBjoern A. Zeeb * packet containing the first fragment as per RFC RFC7112 and
54530809ba9SBjoern A. Zeeb * RFC 8200 pages 18,19:
54630809ba9SBjoern A. Zeeb * The first fragment packet is composed of:
54730809ba9SBjoern A. Zeeb * (3) Extension headers, if any, and the Upper-Layer header. These
54830809ba9SBjoern A. Zeeb * headers must be in the first fragment. ...
54930809ba9SBjoern A. Zeeb */
55030809ba9SBjoern A. Zeeb fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
55130809ba9SBjoern A. Zeeb /* XXX TODO. thj has D16851 open for this. */
55230809ba9SBjoern A. Zeeb /* Send ICMPv6 4,3 in case of violation. */
55330809ba9SBjoern A. Zeeb
554efdfee93SBjoern A. Zeeb /* Store receive network interface pointer for later. */
555efdfee93SBjoern A. Zeeb srcifp = m->m_pkthdr.rcvif;
556efdfee93SBjoern A. Zeeb
55723d374aaSBjoern A. Zeeb /* Generate a hash value for fragment bucket selection. */
55880d7a853SJonathan T. Looney hashkeyp = hashkey;
55980d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
56080d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
56180d7a853SJonathan T. Looney memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
56280d7a853SJonathan T. Looney hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
56380d7a853SJonathan T. Looney *hashkeyp = ip6f->ip6f_ident;
5649cb1a47aSBjoern A. Zeeb bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
5659cb1a47aSBjoern A. Zeeb bucket &= IP6REASS_HMASK;
5669cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket);
56721f08a07SBjoern A. Zeeb head = IP6QB_HEAD(bucket);
5689888c401SHajimu UMEMOTO
56921f08a07SBjoern A. Zeeb TAILQ_FOREACH(q6, head, ip6q_tq)
57082cd038dSYoshinobu Inoue if (ip6f->ip6f_ident == q6->ip6q_ident &&
57182cd038dSYoshinobu Inoue IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
5724b908c8bSRobert Watson IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
5734b908c8bSRobert Watson #ifdef MAC
5744b908c8bSRobert Watson && mac_ip6q_match(m, q6)
5754b908c8bSRobert Watson #endif
5764b908c8bSRobert Watson )
57782cd038dSYoshinobu Inoue break;
57882cd038dSYoshinobu Inoue
5795778b399SBjoern A. Zeeb only_frag = false;
58021f08a07SBjoern A. Zeeb if (q6 == NULL) {
5815778b399SBjoern A. Zeeb /* A first fragment to arrive creates a reassembly queue. */
5825778b399SBjoern A. Zeeb only_frag = true;
58382cd038dSYoshinobu Inoue
58482cd038dSYoshinobu Inoue /*
58582cd038dSYoshinobu Inoue * Enforce upper bound on number of fragmented packets
58682cd038dSYoshinobu Inoue * for which we attempt reassembly;
5879888c401SHajimu UMEMOTO * If maxfragpackets is 0, never accept fragments.
5889888c401SHajimu UMEMOTO * If maxfragpackets is -1, accept all fragments without
5899888c401SHajimu UMEMOTO * limitation.
59082cd038dSYoshinobu Inoue */
591603724d3SBjoern A. Zeeb if (V_ip6_maxfragpackets < 0)
59233841545SHajimu UMEMOTO ;
5939cb1a47aSBjoern A. Zeeb else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
5941e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >=
59580d7a853SJonathan T. Looney (u_int)V_ip6_maxfragpackets)
59633841545SHajimu UMEMOTO goto dropfrag;
59723d374aaSBjoern A. Zeeb
59823d374aaSBjoern A. Zeeb /* Allocate IPv6 fragement packet queue entry. */
599f12a9a4cSMark Johnston q6 = malloc(sizeof(struct ip6q), M_FRAG6, M_NOWAIT | M_ZERO);
60082cd038dSYoshinobu Inoue if (q6 == NULL)
60182cd038dSYoshinobu Inoue goto dropfrag;
6024b908c8bSRobert Watson #ifdef MAC
6034b908c8bSRobert Watson if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
604487a161cSBjoern A. Zeeb free(q6, M_FRAG6);
6054b908c8bSRobert Watson goto dropfrag;
6064b908c8bSRobert Watson }
6074b908c8bSRobert Watson mac_ip6q_create(m, q6);
6084b908c8bSRobert Watson #endif
609702828f6SBjoern A. Zeeb atomic_add_int(&V_frag6_nfragpackets, 1);
61082cd038dSYoshinobu Inoue
61123d374aaSBjoern A. Zeeb /* ip6q_nxt will be filled afterwards, from 1st fragment. */
61221f08a07SBjoern A. Zeeb TAILQ_INIT(&q6->ip6q_frags);
61382cd038dSYoshinobu Inoue q6->ip6q_ident = ip6f->ip6f_ident;
614e32221a1SAlexander V. Chernikov q6->ip6q_ttl = frag6_milli_to_callout_ticks(V_ip6_fraglifetime);
61582cd038dSYoshinobu Inoue q6->ip6q_src = ip6->ip6_src;
61682cd038dSYoshinobu Inoue q6->ip6q_dst = ip6->ip6_dst;
617bb4a7d94SKristof Provost q6->ip6q_ecn = IPV6_ECN(ip6);
61882cd038dSYoshinobu Inoue q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */
6199888c401SHajimu UMEMOTO
62021f08a07SBjoern A. Zeeb /* Add the fragemented packet to the bucket. */
62121f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
62221f08a07SBjoern A. Zeeb V_ip6qb[bucket].count++;
62382cd038dSYoshinobu Inoue }
62482cd038dSYoshinobu Inoue
62582cd038dSYoshinobu Inoue /*
62623d374aaSBjoern A. Zeeb * If it is the 1st fragment, record the length of the
62782cd038dSYoshinobu Inoue * unfragmentable part and the next header of the fragment header.
628619456bbSBjoern A. Zeeb * Assume the first 1st fragement to arrive will be correct.
629619456bbSBjoern A. Zeeb * We do not have any duplicate checks here yet so another packet
630619456bbSBjoern A. Zeeb * with fragoff == 0 could come and overwrite the ip6q_unfrglen
631619456bbSBjoern A. Zeeb * and worse, the next header, at any time.
63282cd038dSYoshinobu Inoue */
633619456bbSBjoern A. Zeeb if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
63406cd0a3fSHajimu UMEMOTO q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
63506cd0a3fSHajimu UMEMOTO sizeof(struct ip6_frag);
63682cd038dSYoshinobu Inoue q6->ip6q_nxt = ip6f->ip6f_nxt;
637619456bbSBjoern A. Zeeb /* XXX ECN? */
63882cd038dSYoshinobu Inoue }
63982cd038dSYoshinobu Inoue
64082cd038dSYoshinobu Inoue /*
64182cd038dSYoshinobu Inoue * Check that the reassembled packet would not exceed 65535 bytes
64282cd038dSYoshinobu Inoue * in size.
64382cd038dSYoshinobu Inoue * If it would exceed, discard the fragment and return an ICMP error.
64482cd038dSYoshinobu Inoue */
64582cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen >= 0) {
64682cd038dSYoshinobu Inoue /* The 1st fragment has already arrived. */
64782cd038dSYoshinobu Inoue if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
6480736a380SMark Johnston if (only_frag)
6490736a380SMark Johnston frag6_rmqueue(q6, bucket);
650e5fffe9aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
65182cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
652686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) +
653686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg));
654a8fe77d8SBjoern A. Zeeb *mp = NULL;
65582cd038dSYoshinobu Inoue return (IPPROTO_DONE);
65682cd038dSYoshinobu Inoue }
65706cd0a3fSHajimu UMEMOTO } else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
6580736a380SMark Johnston if (only_frag)
6590736a380SMark Johnston frag6_rmqueue(q6, bucket);
660e5fffe9aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
66182cd038dSYoshinobu Inoue icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
662686cdd19SJun-ichiro itojun Hagino offset - sizeof(struct ip6_frag) +
663686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg));
664a8fe77d8SBjoern A. Zeeb *mp = NULL;
66582cd038dSYoshinobu Inoue return (IPPROTO_DONE);
66682cd038dSYoshinobu Inoue }
667f1664f32SBjoern A. Zeeb
66882cd038dSYoshinobu Inoue /*
66923d374aaSBjoern A. Zeeb * If it is the first fragment, do the above check for each
67082cd038dSYoshinobu Inoue * fragment already stored in the reassembly queue.
67182cd038dSYoshinobu Inoue */
672dda02192SBjoern A. Zeeb if (fragoff == 0 && !only_frag) {
67321f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
674dda02192SBjoern A. Zeeb if (q6->ip6q_unfrglen + af6->ip6af_off +
675dda02192SBjoern A. Zeeb af6->ip6af_frglen > IPV6_MAXPACKET) {
67682cd038dSYoshinobu Inoue struct ip6_hdr *ip6err;
6775778b399SBjoern A. Zeeb struct mbuf *merr;
6785778b399SBjoern A. Zeeb int erroff;
6795778b399SBjoern A. Zeeb
680da89a0feSBjoern A. Zeeb merr = af6->ip6af_m;
6815778b399SBjoern A. Zeeb erroff = af6->ip6af_offset;
68282cd038dSYoshinobu Inoue
68323d374aaSBjoern A. Zeeb /* Dequeue the fragment. */
68421f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
685dda02192SBjoern A. Zeeb q6->ip6q_nfrag--;
686dda02192SBjoern A. Zeeb atomic_subtract_int(&frag6_nfrags, 1);
687487a161cSBjoern A. Zeeb free(af6, M_FRAG6);
68882cd038dSYoshinobu Inoue
689a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */
690a55383e7SHans Petter Selasky merr->m_pkthdr.rcvif = srcifp;
691a55383e7SHans Petter Selasky
69223d374aaSBjoern A. Zeeb /* Adjust pointer. */
69382cd038dSYoshinobu Inoue ip6err = mtod(merr, struct ip6_hdr *);
69482cd038dSYoshinobu Inoue
69582cd038dSYoshinobu Inoue /*
69682cd038dSYoshinobu Inoue * Restore source and destination addresses
69782cd038dSYoshinobu Inoue * in the erroneous IPv6 header.
69882cd038dSYoshinobu Inoue */
69982cd038dSYoshinobu Inoue ip6err->ip6_src = q6->ip6q_src;
70082cd038dSYoshinobu Inoue ip6err->ip6_dst = q6->ip6q_dst;
70182cd038dSYoshinobu Inoue
70282cd038dSYoshinobu Inoue icmp6_error(merr, ICMP6_PARAM_PROB,
70382cd038dSYoshinobu Inoue ICMP6_PARAMPROB_HEADER,
704686cdd19SJun-ichiro itojun Hagino erroff - sizeof(struct ip6_frag) +
705686cdd19SJun-ichiro itojun Hagino offsetof(struct ip6_frag, ip6f_offlg));
70682cd038dSYoshinobu Inoue }
70782cd038dSYoshinobu Inoue }
70882cd038dSYoshinobu Inoue }
70982cd038dSYoshinobu Inoue
71023d374aaSBjoern A. Zeeb /* Allocate an IPv6 fragement queue entry for this fragmented part. */
711f12a9a4cSMark Johnston ip6af = malloc(sizeof(struct ip6asfrag), M_FRAG6, M_NOWAIT | M_ZERO);
712686cdd19SJun-ichiro itojun Hagino if (ip6af == NULL)
713686cdd19SJun-ichiro itojun Hagino goto dropfrag;
71421f08a07SBjoern A. Zeeb ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
71582cd038dSYoshinobu Inoue ip6af->ip6af_off = fragoff;
71682cd038dSYoshinobu Inoue ip6af->ip6af_frglen = frgpartlen;
71782cd038dSYoshinobu Inoue ip6af->ip6af_offset = offset;
718da89a0feSBjoern A. Zeeb ip6af->ip6af_m = m;
71982cd038dSYoshinobu Inoue
7205778b399SBjoern A. Zeeb if (only_frag) {
72121f08a07SBjoern A. Zeeb /*
72221f08a07SBjoern A. Zeeb * Do a manual insert rather than a hard-to-understand cast
72321f08a07SBjoern A. Zeeb * to a different type relying on data structure order to work.
72421f08a07SBjoern A. Zeeb */
72521f08a07SBjoern A. Zeeb TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
72621f08a07SBjoern A. Zeeb goto postinsert;
72782cd038dSYoshinobu Inoue }
72882cd038dSYoshinobu Inoue
72923d374aaSBjoern A. Zeeb /* Do duplicate, condition, and boundry checks. */
73082cd038dSYoshinobu Inoue /*
73159dfcba4SHajimu UMEMOTO * Handle ECN by comparing this segment with the first one;
73259dfcba4SHajimu UMEMOTO * if CE is set, do not lose CE.
73323d374aaSBjoern A. Zeeb * Drop if CE and not-ECT are mixed for the same packet.
73459dfcba4SHajimu UMEMOTO */
735bb4a7d94SKristof Provost ecn = IPV6_ECN(ip6);
7365e9510e3SJINMEI Tatuya ecn0 = q6->ip6q_ecn;
73759dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_CE) {
73859dfcba4SHajimu UMEMOTO if (ecn0 == IPTOS_ECN_NOTECT) {
739487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6);
74059dfcba4SHajimu UMEMOTO goto dropfrag;
74159dfcba4SHajimu UMEMOTO }
74259dfcba4SHajimu UMEMOTO if (ecn0 != IPTOS_ECN_CE)
7435e9510e3SJINMEI Tatuya q6->ip6q_ecn = IPTOS_ECN_CE;
74459dfcba4SHajimu UMEMOTO }
74559dfcba4SHajimu UMEMOTO if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
746487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6);
74759dfcba4SHajimu UMEMOTO goto dropfrag;
74859dfcba4SHajimu UMEMOTO }
74959dfcba4SHajimu UMEMOTO
75023d374aaSBjoern A. Zeeb /* Find a fragmented part which begins after this one does. */
75121f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
75282cd038dSYoshinobu Inoue if (af6->ip6af_off > ip6af->ip6af_off)
75382cd038dSYoshinobu Inoue break;
75482cd038dSYoshinobu Inoue
75582cd038dSYoshinobu Inoue /*
75682cd038dSYoshinobu Inoue * If the incoming framgent overlaps some existing fragments in
75723d374aaSBjoern A. Zeeb * the reassembly queue, drop both the new fragment and the
75823d374aaSBjoern A. Zeeb * entire reassembly queue. However, if the new fragment
75923d374aaSBjoern A. Zeeb * is an exact duplicate of an existing fragment, only silently
76023d374aaSBjoern A. Zeeb * drop the existing fragment and leave the fragmentation queue
76123d374aaSBjoern A. Zeeb * unchanged, as allowed by the RFC. (RFC 8200, 4.5)
76282cd038dSYoshinobu Inoue */
76321f08a07SBjoern A. Zeeb if (af6 != NULL)
76421f08a07SBjoern A. Zeeb af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
76521f08a07SBjoern A. Zeeb else
76621f08a07SBjoern A. Zeeb af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
76721f08a07SBjoern A. Zeeb if (af6tmp != NULL) {
76821f08a07SBjoern A. Zeeb if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
7695778b399SBjoern A. Zeeb ip6af->ip6af_off > 0) {
770cd188da2SBjoern A. Zeeb if (af6tmp->ip6af_off != ip6af->ip6af_off ||
771cd188da2SBjoern A. Zeeb af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
772cd188da2SBjoern A. Zeeb frag6_freef(q6, bucket);
773487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6);
77482cd038dSYoshinobu Inoue goto dropfrag;
77582cd038dSYoshinobu Inoue }
77682cd038dSYoshinobu Inoue }
77721f08a07SBjoern A. Zeeb if (af6 != NULL) {
7785778b399SBjoern A. Zeeb if (ip6af->ip6af_off + ip6af->ip6af_frglen -
7795778b399SBjoern A. Zeeb af6->ip6af_off > 0) {
780cd188da2SBjoern A. Zeeb if (af6->ip6af_off != ip6af->ip6af_off ||
781cd188da2SBjoern A. Zeeb af6->ip6af_frglen != ip6af->ip6af_frglen)
782cd188da2SBjoern A. Zeeb frag6_freef(q6, bucket);
783487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6);
78482cd038dSYoshinobu Inoue goto dropfrag;
78582cd038dSYoshinobu Inoue }
78682cd038dSYoshinobu Inoue }
78782cd038dSYoshinobu Inoue
7884b908c8bSRobert Watson #ifdef MAC
7894b908c8bSRobert Watson mac_ip6q_update(m, q6);
7904b908c8bSRobert Watson #endif
79182cd038dSYoshinobu Inoue
79282cd038dSYoshinobu Inoue /*
79323d374aaSBjoern A. Zeeb * Stick new segment in its place; check for complete reassembly.
79423d374aaSBjoern A. Zeeb * If not complete, check fragment limit. Move to front of packet
79523d374aaSBjoern A. Zeeb * queue, as we are the most recently active fragmented packet.
79682cd038dSYoshinobu Inoue */
79721f08a07SBjoern A. Zeeb if (af6 != NULL)
79821f08a07SBjoern A. Zeeb TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
79921f08a07SBjoern A. Zeeb else
80021f08a07SBjoern A. Zeeb TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
80121f08a07SBjoern A. Zeeb postinsert:
8022adfd64fSJonathan T. Looney atomic_add_int(&frag6_nfrags, 1);
8039888c401SHajimu UMEMOTO q6->ip6q_nfrag++;
8043c7165b3SBjoern A. Zeeb
8055778b399SBjoern A. Zeeb plen = 0;
80621f08a07SBjoern A. Zeeb TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
8075778b399SBjoern A. Zeeb if (af6->ip6af_off != plen) {
80803c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
809198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
8109cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket);
81103c99d76SJonathan T. Looney }
8129cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
813a8fe77d8SBjoern A. Zeeb *mp = NULL;
8145778b399SBjoern A. Zeeb return (IPPROTO_DONE);
81582cd038dSYoshinobu Inoue }
8165778b399SBjoern A. Zeeb plen += af6->ip6af_frglen;
81782cd038dSYoshinobu Inoue }
81821f08a07SBjoern A. Zeeb af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
81921f08a07SBjoern A. Zeeb if (af6->ip6af_mff) {
82003c99d76SJonathan T. Looney if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
821198fdaedSTom Jones IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
8229cb1a47aSBjoern A. Zeeb frag6_freef(q6, bucket);
82303c99d76SJonathan T. Looney }
8249cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
825a8fe77d8SBjoern A. Zeeb *mp = NULL;
8265778b399SBjoern A. Zeeb return (IPPROTO_DONE);
82782cd038dSYoshinobu Inoue }
82882cd038dSYoshinobu Inoue
82923d374aaSBjoern A. Zeeb /* Reassembly is complete; concatenate fragments. */
83021f08a07SBjoern A. Zeeb ip6af = TAILQ_FIRST(&q6->ip6q_frags);
831da89a0feSBjoern A. Zeeb t = m = ip6af->ip6af_m;
83221f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
83321f08a07SBjoern A. Zeeb while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
8349907aba3SAndrey V. Elsukov m->m_pkthdr.csum_flags &=
835da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_flags;
8369907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data +=
837da89a0feSBjoern A. Zeeb af6->ip6af_m->m_pkthdr.csum_data;
8389907aba3SAndrey V. Elsukov
83921f08a07SBjoern A. Zeeb TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
840efdfee93SBjoern A. Zeeb t = m_last(t);
841da89a0feSBjoern A. Zeeb m_adj(af6->ip6af_m, af6->ip6af_offset);
842da89a0feSBjoern A. Zeeb m_demote_pkthdr(af6->ip6af_m);
843da89a0feSBjoern A. Zeeb m_cat(t, af6->ip6af_m);
844487a161cSBjoern A. Zeeb free(af6, M_FRAG6);
84582cd038dSYoshinobu Inoue }
84682cd038dSYoshinobu Inoue
8479907aba3SAndrey V. Elsukov while (m->m_pkthdr.csum_data & 0xffff0000)
8489907aba3SAndrey V. Elsukov m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
8499907aba3SAndrey V. Elsukov (m->m_pkthdr.csum_data >> 16);
8509907aba3SAndrey V. Elsukov
85123d374aaSBjoern A. Zeeb /* Adjust offset to point where the original next header starts. */
85282cd038dSYoshinobu Inoue offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
853487a161cSBjoern A. Zeeb free(ip6af, M_FRAG6);
854ff3d1a3fSJonathan T. Looney if ((u_int)plen + (u_int)offset - sizeof(struct ip6_hdr) >
855ff3d1a3fSJonathan T. Looney IPV6_MAXPACKET) {
856ff3d1a3fSJonathan T. Looney frag6_freef(q6, bucket);
857ff3d1a3fSJonathan T. Looney goto dropfrag;
858ff3d1a3fSJonathan T. Looney }
859686cdd19SJun-ichiro itojun Hagino ip6 = mtod(m, struct ip6_hdr *);
8605778b399SBjoern A. Zeeb ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
8615e9510e3SJINMEI Tatuya if (q6->ip6q_ecn == IPTOS_ECN_CE)
8625e9510e3SJINMEI Tatuya ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
86382cd038dSYoshinobu Inoue nxt = q6->ip6q_nxt;
86482cd038dSYoshinobu Inoue
865a61b5cfbSBjoern A. Zeeb ip6_deletefraghdr(m, offset, M_NOWAIT);
86682cd038dSYoshinobu Inoue
86723d374aaSBjoern A. Zeeb /* Set nxt(-hdr field value) to the original value. */
86868e0e5a6SAndrey V. Elsukov m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
86968e0e5a6SAndrey V. Elsukov (caddr_t)&nxt);
87082cd038dSYoshinobu Inoue
8714b908c8bSRobert Watson #ifdef MAC
8724b908c8bSRobert Watson mac_ip6q_reassemble(q6, m);
8734b908c8bSRobert Watson #endif
8748d01ecd8SMark Johnston atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
8758d01ecd8SMark Johnston frag6_rmqueue(q6, bucket);
87682cd038dSYoshinobu Inoue
87782cd038dSYoshinobu Inoue if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
8785778b399SBjoern A. Zeeb
8795778b399SBjoern A. Zeeb plen = 0;
88082cd038dSYoshinobu Inoue for (t = m; t; t = t->m_next)
88182cd038dSYoshinobu Inoue plen += t->m_len;
88282cd038dSYoshinobu Inoue m->m_pkthdr.len = plen;
883a55383e7SHans Petter Selasky /* Set a valid receive interface pointer. */
884a55383e7SHans Petter Selasky m->m_pkthdr.rcvif = srcifp;
88582cd038dSYoshinobu Inoue }
88682cd038dSYoshinobu Inoue
887aaa46574SAdrian Chadd #ifdef RSS
888aaa46574SAdrian Chadd mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
889aaa46574SAdrian Chadd M_NOWAIT);
890aaa46574SAdrian Chadd if (mtag == NULL)
891aaa46574SAdrian Chadd goto dropfrag;
892aaa46574SAdrian Chadd
893aaa46574SAdrian Chadd ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
894aaa46574SAdrian Chadd ip6dc->ip6dc_nxt = nxt;
895aaa46574SAdrian Chadd ip6dc->ip6dc_off = offset;
896aaa46574SAdrian Chadd
897aaa46574SAdrian Chadd m_tag_prepend(m, mtag);
898aaa46574SAdrian Chadd #endif
899aaa46574SAdrian Chadd
9009cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
9019cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_reassembled);
90282cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_ok);
90382cd038dSYoshinobu Inoue
904aaa46574SAdrian Chadd #ifdef RSS
90523d374aaSBjoern A. Zeeb /* Queue/dispatch for reprocessing. */
906aaa46574SAdrian Chadd netisr_dispatch(NETISR_IPV6_DIRECT, m);
907a8fe77d8SBjoern A. Zeeb *mp = NULL;
9085778b399SBjoern A. Zeeb return (IPPROTO_DONE);
909aaa46574SAdrian Chadd #endif
910aaa46574SAdrian Chadd
91123d374aaSBjoern A. Zeeb /* Tell launch routine the next header. */
91282cd038dSYoshinobu Inoue *mp = m;
91382cd038dSYoshinobu Inoue *offp = offset;
91482cd038dSYoshinobu Inoue
9155778b399SBjoern A. Zeeb return (nxt);
91682cd038dSYoshinobu Inoue
91782cd038dSYoshinobu Inoue dropfrag:
9189cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
9197715d794SBjoern A. Zeeb dropfrag2:
92082cd038dSYoshinobu Inoue in6_ifstat_inc(dstifp, ifs6_reass_fail);
9219cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped);
92282cd038dSYoshinobu Inoue m_freem(m);
923a8fe77d8SBjoern A. Zeeb *mp = NULL;
9245778b399SBjoern A. Zeeb return (IPPROTO_DONE);
92582cd038dSYoshinobu Inoue }
92682cd038dSYoshinobu Inoue
92782cd038dSYoshinobu Inoue /*
92833841545SHajimu UMEMOTO * IPv6 reassembling timer processing;
92923d374aaSBjoern A. Zeeb * if a timer expires on a reassembly queue, discard it.
93082cd038dSYoshinobu Inoue */
931a0d7d247SGleb Smirnoff static struct callout frag6_callout;
932a0d7d247SGleb Smirnoff static void
frag6_slowtimo(void * arg __unused)933a0d7d247SGleb Smirnoff frag6_slowtimo(void *arg __unused)
93482cd038dSYoshinobu Inoue {
9358b615593SMarko Zec VNET_ITERATOR_DECL(vnet_iter);
93621f08a07SBjoern A. Zeeb struct ip6qhead *head;
93721f08a07SBjoern A. Zeeb struct ip6q *q6, *q6tmp;
9389cb1a47aSBjoern A. Zeeb uint32_t bucket;
93982cd038dSYoshinobu Inoue
9408afe9481SMateusz Guzik if (atomic_load_int(&frag6_nfrags) == 0)
941a0d7d247SGleb Smirnoff goto done;
9428afe9481SMateusz Guzik
9435ee847d3SRobert Watson VNET_LIST_RLOCK_NOSLEEP();
9448b615593SMarko Zec VNET_FOREACH(vnet_iter) {
9458b615593SMarko Zec CURVNET_SET(vnet_iter);
9469cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
9478afe9481SMateusz Guzik if (V_ip6qb[bucket].count == 0)
9488afe9481SMateusz Guzik continue;
9499cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket);
9509cb1a47aSBjoern A. Zeeb head = IP6QB_HEAD(bucket);
95121f08a07SBjoern A. Zeeb TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
95221f08a07SBjoern A. Zeeb if (--q6->ip6q_ttl == 0) {
953198fdaedSTom Jones IP6STAT_ADD(ip6s_fragtimeout,
95421f08a07SBjoern A. Zeeb q6->ip6q_nfrag);
95582cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
95621f08a07SBjoern A. Zeeb frag6_freef(q6, bucket);
95782cd038dSYoshinobu Inoue }
95882cd038dSYoshinobu Inoue /*
95982cd038dSYoshinobu Inoue * If we are over the maximum number of fragments
96082cd038dSYoshinobu Inoue * (due to the limit being lowered), drain off
96182cd038dSYoshinobu Inoue * enough to get down to the new limit.
9621e9f3b73SJonathan T. Looney * Note that we drain all reassembly queues if
9631e9f3b73SJonathan T. Looney * maxfragpackets is 0 (fragmentation is disabled),
96423d374aaSBjoern A. Zeeb * and do not enforce a limit when maxfragpackets
9651e9f3b73SJonathan T. Looney * is negative.
96682cd038dSYoshinobu Inoue */
9671e9f3b73SJonathan T. Looney while ((V_ip6_maxfragpackets == 0 ||
9681e9f3b73SJonathan T. Looney (V_ip6_maxfragpackets > 0 &&
9699cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
97021f08a07SBjoern A. Zeeb (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
97121f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
97282cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
97321f08a07SBjoern A. Zeeb frag6_freef(q6, bucket);
97480d7a853SJonathan T. Looney }
9759cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
97682cd038dSYoshinobu Inoue }
9771e9f3b73SJonathan T. Looney /*
9781e9f3b73SJonathan T. Looney * If we are still over the maximum number of fragmented
9791e9f3b73SJonathan T. Looney * packets, drain off enough to get down to the new limit.
9801e9f3b73SJonathan T. Looney */
9819cb1a47aSBjoern A. Zeeb bucket = 0;
9821e9f3b73SJonathan T. Looney while (V_ip6_maxfragpackets >= 0 &&
9831e9f3b73SJonathan T. Looney atomic_load_int(&V_frag6_nfragpackets) >
9841e9f3b73SJonathan T. Looney (u_int)V_ip6_maxfragpackets) {
9859cb1a47aSBjoern A. Zeeb IP6QB_LOCK(bucket);
98621f08a07SBjoern A. Zeeb q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
98721f08a07SBjoern A. Zeeb if (q6 != NULL) {
98821f08a07SBjoern A. Zeeb IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
9891e9f3b73SJonathan T. Looney /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
99021f08a07SBjoern A. Zeeb frag6_freef(q6, bucket);
9911e9f3b73SJonathan T. Looney }
9929cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
9939cb1a47aSBjoern A. Zeeb bucket = (bucket + 1) % IP6REASS_NHASH;
9941e9f3b73SJonathan T. Looney }
9958b615593SMarko Zec CURVNET_RESTORE();
9968b615593SMarko Zec }
9975ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP();
998a0d7d247SGleb Smirnoff done:
999e32221a1SAlexander V. Chernikov callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1000e32221a1SAlexander V. Chernikov SBT_1MS * 10, frag6_slowtimo, NULL, 0);
100182cd038dSYoshinobu Inoue }
100282cd038dSYoshinobu Inoue
1003a0d7d247SGleb Smirnoff static void
frag6_slowtimo_init(void * arg __unused)1004a0d7d247SGleb Smirnoff frag6_slowtimo_init(void *arg __unused)
1005a0d7d247SGleb Smirnoff {
1006a0d7d247SGleb Smirnoff
1007a0d7d247SGleb Smirnoff callout_init(&frag6_callout, 1);
1008e32221a1SAlexander V. Chernikov callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1009e32221a1SAlexander V. Chernikov SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1010a0d7d247SGleb Smirnoff }
1011a0d7d247SGleb Smirnoff SYSINIT(frag6, SI_SUB_VNET_DONE, SI_ORDER_ANY, frag6_slowtimo_init, NULL);
1012a0d7d247SGleb Smirnoff
101323d374aaSBjoern A. Zeeb /*
101423d374aaSBjoern A. Zeeb * Eventhandler to adjust limits in case nmbclusters change.
101523d374aaSBjoern A. Zeeb */
1016c00464a2SBjoern A. Zeeb static void
frag6_change(void * tag)1017c00464a2SBjoern A. Zeeb frag6_change(void *tag)
1018c00464a2SBjoern A. Zeeb {
1019c00464a2SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter);
1020c00464a2SBjoern A. Zeeb
1021c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS;
1022c00464a2SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP();
1023c00464a2SBjoern A. Zeeb VNET_FOREACH(vnet_iter) {
1024c00464a2SBjoern A. Zeeb CURVNET_SET(vnet_iter);
1025c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1026c00464a2SBjoern A. Zeeb frag6_set_bucketsize();
1027c00464a2SBjoern A. Zeeb CURVNET_RESTORE();
1028c00464a2SBjoern A. Zeeb }
1029c00464a2SBjoern A. Zeeb VNET_LIST_RUNLOCK_NOSLEEP();
1030c00464a2SBjoern A. Zeeb }
1031c00464a2SBjoern A. Zeeb
1032c00464a2SBjoern A. Zeeb /*
1033c00464a2SBjoern A. Zeeb * Initialise reassembly queue and fragment identifier.
1034c00464a2SBjoern A. Zeeb */
1035c00464a2SBjoern A. Zeeb void
frag6_init(void)1036c00464a2SBjoern A. Zeeb frag6_init(void)
1037c00464a2SBjoern A. Zeeb {
10389cb1a47aSBjoern A. Zeeb uint32_t bucket;
1039c00464a2SBjoern A. Zeeb
1040c00464a2SBjoern A. Zeeb V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1041c00464a2SBjoern A. Zeeb frag6_set_bucketsize();
10429cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
104321f08a07SBjoern A. Zeeb TAILQ_INIT(IP6QB_HEAD(bucket));
1044efdfee93SBjoern A. Zeeb mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
10459cb1a47aSBjoern A. Zeeb V_ip6qb[bucket].count = 0;
1046c00464a2SBjoern A. Zeeb }
10479cb1a47aSBjoern A. Zeeb V_ip6qb_hashseed = arc4random();
1048c00464a2SBjoern A. Zeeb V_ip6_maxfragsperpacket = 64;
104967a10c46SBjoern A. Zeeb #ifdef VIMAGE
105067a10c46SBjoern A. Zeeb V_frag6_on = true;
105167a10c46SBjoern A. Zeeb #endif
1052c00464a2SBjoern A. Zeeb if (!IS_DEFAULT_VNET(curvnet))
1053c00464a2SBjoern A. Zeeb return;
1054c00464a2SBjoern A. Zeeb
1055c00464a2SBjoern A. Zeeb ip6_maxfrags = IP6_MAXFRAGS;
1056c00464a2SBjoern A. Zeeb EVENTHANDLER_REGISTER(nmbclusters_change,
1057c00464a2SBjoern A. Zeeb frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1058c00464a2SBjoern A. Zeeb }
1059c00464a2SBjoern A. Zeeb
106082cd038dSYoshinobu Inoue /*
106182cd038dSYoshinobu Inoue * Drain off all datagram fragments.
106282cd038dSYoshinobu Inoue */
106367a10c46SBjoern A. Zeeb static void
frag6_drain_one(void)106467a10c46SBjoern A. Zeeb frag6_drain_one(void)
106582cd038dSYoshinobu Inoue {
106621f08a07SBjoern A. Zeeb struct ip6q *q6;
10679cb1a47aSBjoern A. Zeeb uint32_t bucket;
10689888c401SHajimu UMEMOTO
10699cb1a47aSBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
107067a10c46SBjoern A. Zeeb IP6QB_LOCK(bucket);
107121f08a07SBjoern A. Zeeb while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
10729cb8d207SAndrey V. Elsukov IP6STAT_INC(ip6s_fragdropped);
107382cd038dSYoshinobu Inoue /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
107421f08a07SBjoern A. Zeeb frag6_freef(q6, bucket);
107580d7a853SJonathan T. Looney }
10769cb1a47aSBjoern A. Zeeb IP6QB_UNLOCK(bucket);
107782cd038dSYoshinobu Inoue }
107867a10c46SBjoern A. Zeeb }
107967a10c46SBjoern A. Zeeb
108067a10c46SBjoern A. Zeeb void
frag6_drain(void * arg __unused,int flags __unused)1081*8ee127efSSHENGYI HONG frag6_drain(void *arg __unused, int flags __unused)
108267a10c46SBjoern A. Zeeb {
108367a10c46SBjoern A. Zeeb VNET_ITERATOR_DECL(vnet_iter);
108467a10c46SBjoern A. Zeeb
108567a10c46SBjoern A. Zeeb VNET_LIST_RLOCK_NOSLEEP();
108667a10c46SBjoern A. Zeeb VNET_FOREACH(vnet_iter) {
108767a10c46SBjoern A. Zeeb CURVNET_SET(vnet_iter);
108867a10c46SBjoern A. Zeeb frag6_drain_one();
10898b615593SMarko Zec CURVNET_RESTORE();
10908b615593SMarko Zec }
10915ee847d3SRobert Watson VNET_LIST_RUNLOCK_NOSLEEP();
109282cd038dSYoshinobu Inoue }
1093e5ee7060SGleb Smirnoff
109467a10c46SBjoern A. Zeeb #ifdef VIMAGE
109567a10c46SBjoern A. Zeeb /*
109667a10c46SBjoern A. Zeeb * Clear up IPv6 reassembly structures.
109767a10c46SBjoern A. Zeeb */
109867a10c46SBjoern A. Zeeb void
frag6_destroy(void)109967a10c46SBjoern A. Zeeb frag6_destroy(void)
110067a10c46SBjoern A. Zeeb {
110167a10c46SBjoern A. Zeeb uint32_t bucket;
110267a10c46SBjoern A. Zeeb
110367a10c46SBjoern A. Zeeb frag6_drain_one();
110467a10c46SBjoern A. Zeeb V_frag6_on = false;
110567a10c46SBjoern A. Zeeb for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
110667a10c46SBjoern A. Zeeb KASSERT(V_ip6qb[bucket].count == 0,
110767a10c46SBjoern A. Zeeb ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
110867a10c46SBjoern A. Zeeb bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
110967a10c46SBjoern A. Zeeb mtx_destroy(&V_ip6qb[bucket].lock);
111067a10c46SBjoern A. Zeeb }
111167a10c46SBjoern A. Zeeb }
111267a10c46SBjoern A. Zeeb #endif
1113