xref: /freebsd/sys/netinet6/frag6.c (revision 48c779cdecb5f803e5fe5d761987e976ca9609db)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5  * All rights reserved.
6  * Copyright (c) 2019 Netflix, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	$KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "opt_rss.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
44 #include <sys/hash.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mbuf.h>
48 #include <sys/protosw.h>
49 #include <sys/queue.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 #include <sys/syslog.h>
53 
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/netisr.h>
57 #include <net/route.h>
58 #include <net/vnet.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip6.h>
63 #include <netinet6/ip6_var.h>
64 #include <netinet/icmp6.h>
65 #include <netinet/in_systm.h>	/* For ECN definitions. */
66 #include <netinet/ip.h>		/* For ECN definitions. */
67 
68 #ifdef MAC
69 #include <security/mac/mac_framework.h>
70 #endif
71 
72 /*
73  * A "big picture" of how IPv6 fragment queues are all linked together.
74  *
75  * struct ip6qbucket ip6qb[...];			hashed buckets
76  * ||||||||
77  * |
78  * +--- TAILQ(struct ip6q, packets) *q6;		tailq entries holding
79  *      ||||||||					fragmented packets
80  *      |						(1 per original packet)
81  *      |
82  *      +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6;	tailq entries of IPv6
83  *           |                                   *ip6af;fragment packets
84  *           |						for one original packet
85  *           + *mbuf
86  */
87 
88 /* Reassembly headers are stored in hash buckets. */
89 #define	IP6REASS_NHASH_LOG2	10
90 #define	IP6REASS_NHASH		(1 << IP6REASS_NHASH_LOG2)
91 #define	IP6REASS_HMASK		(IP6REASS_NHASH - 1)
92 
93 TAILQ_HEAD(ip6qhead, ip6q);
94 struct ip6qbucket {
95 	struct ip6qhead	packets;
96 	struct mtx	lock;
97 	int		count;
98 };
99 
100 struct ip6asfrag {
101 	TAILQ_ENTRY(ip6asfrag) ip6af_tq;
102 	struct mbuf	*ip6af_m;
103 	int		ip6af_offset;	/* Offset in ip6af_m to next header. */
104 	int		ip6af_frglen;	/* Fragmentable part length. */
105 	int		ip6af_off;	/* Fragment offset. */
106 	bool		ip6af_mff;	/* More fragment bit in frag off. */
107 };
108 
109 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
110 
111 #ifdef VIMAGE
112 /* A flag to indicate if IPv6 fragmentation is initialized. */
113 VNET_DEFINE_STATIC(bool,		frag6_on);
114 #define	V_frag6_on			VNET(frag6_on)
115 #endif
116 
117 /* System wide (global) maximum and count of packets in reassembly queues. */
118 static int ip6_maxfrags;
119 static volatile u_int frag6_nfrags = 0;
120 
121 /* Maximum and current packets in per-VNET reassembly queue. */
122 VNET_DEFINE_STATIC(int,			ip6_maxfragpackets);
123 VNET_DEFINE_STATIC(volatile u_int,	frag6_nfragpackets);
124 #define	V_ip6_maxfragpackets		VNET(ip6_maxfragpackets)
125 #define	V_frag6_nfragpackets		VNET(frag6_nfragpackets)
126 
127 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
128 VNET_DEFINE_STATIC(int,			ip6_maxfragbucketsize);
129 VNET_DEFINE_STATIC(int,			ip6_maxfragsperpacket);
130 #define	V_ip6_maxfragbucketsize		VNET(ip6_maxfragbucketsize)
131 #define	V_ip6_maxfragsperpacket		VNET(ip6_maxfragsperpacket)
132 
133 /* Per-VNET reassembly queue buckets. */
134 VNET_DEFINE_STATIC(struct ip6qbucket,	ip6qb[IP6REASS_NHASH]);
135 VNET_DEFINE_STATIC(uint32_t,		ip6qb_hashseed);
136 #define	V_ip6qb				VNET(ip6qb)
137 #define	V_ip6qb_hashseed		VNET(ip6qb_hashseed)
138 
139 #define	IP6QB_LOCK(_b)		mtx_lock(&V_ip6qb[(_b)].lock)
140 #define	IP6QB_TRYLOCK(_b)	mtx_trylock(&V_ip6qb[(_b)].lock)
141 #define	IP6QB_LOCK_ASSERT(_b)	mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
142 #define	IP6QB_UNLOCK(_b)	mtx_unlock(&V_ip6qb[(_b)].lock)
143 #define	IP6QB_HEAD(_b)		(&V_ip6qb[(_b)].packets)
144 
145 /*
146  * By default, limit the number of IP6 fragments across all reassembly
147  * queues to  1/32 of the total number of mbuf clusters.
148  *
149  * Limit the total number of reassembly queues per VNET to the
150  * IP6 fragment limit, but ensure the limit will not allow any bucket
151  * to grow above 100 items. (The bucket limit is
152  * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
153  * multiplier to reach a 100-item limit.)
154  * The 100-item limit was chosen as brief testing seems to show that
155  * this produces "reasonable" performance on some subset of systems
156  * under DoS attack.
157  */
158 #define	IP6_MAXFRAGS		(nmbclusters / 32)
159 #define	IP6_MAXFRAGPACKETS	(imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
160 
161 
162 /*
163  * Sysctls and helper function.
164  */
165 SYSCTL_DECL(_net_inet6_ip6);
166 
167 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
168 	CTLFLAG_RD, __DEVOLATILE(u_int *, &frag6_nfrags), 0,
169 	"Global number of IPv6 fragments across all reassembly queues.");
170 
171 static void
172 frag6_set_bucketsize(void)
173 {
174 	int i;
175 
176 	if ((i = V_ip6_maxfragpackets) > 0)
177 		V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
178 }
179 
180 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
181 	CTLFLAG_RW, &ip6_maxfrags, 0,
182 	"Maximum allowed number of outstanding IPv6 packet fragments. "
183 	"A value of 0 means no fragmented packets will be accepted, while a "
184 	"a value of -1 means no limit");
185 
186 static int
187 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
188 {
189 	int error, val;
190 
191 	val = V_ip6_maxfragpackets;
192 	error = sysctl_handle_int(oidp, &val, 0, req);
193 	if (error != 0 || !req->newptr)
194 		return (error);
195 	V_ip6_maxfragpackets = val;
196 	frag6_set_bucketsize();
197 	return (0);
198 }
199 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
200 	CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
201 	sysctl_ip6_maxfragpackets, "I",
202 	"Default maximum number of outstanding fragmented IPv6 packets. "
203 	"A value of 0 means no fragmented packets will be accepted, while a "
204 	"a value of -1 means no limit");
205 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
206 	CTLFLAG_VNET | CTLFLAG_RD,
207 	__DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
208 	"Per-VNET number of IPv6 fragments across all reassembly queues.");
209 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
210 	CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
211 	"Maximum allowed number of fragments per packet");
212 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
213 	CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
214 	"Maximum number of reassembly queues per hash bucket");
215 
216 
217 /*
218  * Remove the IPv6 fragmentation header from the mbuf.
219  */
220 int
221 ip6_deletefraghdr(struct mbuf *m, int offset, int wait)
222 {
223 	struct ip6_hdr *ip6;
224 	struct mbuf *t;
225 
226 	/* Delete frag6 header. */
227 	if (m->m_len >= offset + sizeof(struct ip6_frag)) {
228 
229 		/* This is the only possible case with !PULLDOWN_TEST. */
230 		ip6 = mtod(m, struct ip6_hdr *);
231 		bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag),
232 		    offset);
233 		m->m_data += sizeof(struct ip6_frag);
234 		m->m_len -= sizeof(struct ip6_frag);
235 	} else {
236 
237 		/* This comes with no copy if the boundary is on cluster. */
238 		if ((t = m_split(m, offset, wait)) == NULL)
239 			return (ENOMEM);
240 		m_adj(t, sizeof(struct ip6_frag));
241 		m_cat(m, t);
242 	}
243 
244 	m->m_flags |= M_FRAGMENTED;
245 	return (0);
246 }
247 
248 /*
249  * Free a fragment reassembly header and all associated datagrams.
250  */
251 static void
252 frag6_freef(struct ip6q *q6, uint32_t bucket)
253 {
254 	struct ip6_hdr *ip6;
255 	struct ip6asfrag *af6;
256 	struct mbuf *m;
257 
258 	IP6QB_LOCK_ASSERT(bucket);
259 
260 	while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
261 
262 		m = af6->ip6af_m;
263 		TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
264 
265 		/*
266 		 * Return ICMP time exceeded error for the 1st fragment.
267 		 * Just free other fragments.
268 		 */
269 		if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
270 
271 			/* Adjust pointer. */
272 			ip6 = mtod(m, struct ip6_hdr *);
273 
274 			/* Restore source and destination addresses. */
275 			ip6->ip6_src = q6->ip6q_src;
276 			ip6->ip6_dst = q6->ip6q_dst;
277 
278 			icmp6_error(m, ICMP6_TIME_EXCEEDED,
279 			    ICMP6_TIME_EXCEED_REASSEMBLY, 0);
280 		} else
281 			m_freem(m);
282 
283 		free(af6, M_FRAG6);
284 	}
285 
286 	TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
287 	V_ip6qb[bucket].count--;
288 	atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
289 #ifdef MAC
290 	mac_ip6q_destroy(q6);
291 #endif
292 	free(q6, M_FRAG6);
293 	atomic_subtract_int(&V_frag6_nfragpackets, 1);
294 }
295 
296 /*
297  * Drain off all datagram fragments belonging to
298  * the given network interface.
299  */
300 static void
301 frag6_cleanup(void *arg __unused, struct ifnet *ifp)
302 {
303 	struct ip6qhead *head;
304 	struct ip6q *q6;
305 	struct ip6asfrag *af6;
306 	uint32_t bucket;
307 
308 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
309 
310 	CURVNET_SET_QUIET(ifp->if_vnet);
311 #ifdef VIMAGE
312 	/*
313 	 * Skip processing if IPv6 reassembly is not initialised or
314 	 * torn down by frag6_destroy().
315 	 */
316 	if (!V_frag6_on) {
317 		CURVNET_RESTORE();
318 		return;
319 	}
320 #endif
321 
322 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
323 		IP6QB_LOCK(bucket);
324 		head = IP6QB_HEAD(bucket);
325 		/* Scan fragment list. */
326 		TAILQ_FOREACH(q6, head, ip6q_tq) {
327 			TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
328 
329 				/* Clear no longer valid rcvif pointer. */
330 				if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
331 					af6->ip6af_m->m_pkthdr.rcvif = NULL;
332 			}
333 		}
334 		IP6QB_UNLOCK(bucket);
335 	}
336 	CURVNET_RESTORE();
337 }
338 EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
339 
340 /*
341  * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
342  * each other, in terms of next header field handling in fragment header.
343  * While the sender will use the same value for all of the fragmented packets,
344  * receiver is suggested not to check for consistency.
345  *
346  * Fragment rules (p18,p19):
347  *	(2)  A Fragment header containing:
348  *	The Next Header value that identifies the first header
349  *	after the Per-Fragment headers of the original packet.
350  *		-> next header field is same for all fragments
351  *
352  * Reassembly rule (p20):
353  *	The Next Header field of the last header of the Per-Fragment
354  *	headers is obtained from the Next Header field of the first
355  *	fragment's Fragment header.
356  *		-> should grab it from the first fragment only
357  *
358  * The following note also contradicts with fragment rule - no one is going to
359  * send different fragment with different next header field.
360  *
361  * Additional note (p22) [not an error]:
362  *	The Next Header values in the Fragment headers of different
363  *	fragments of the same original packet may differ.  Only the value
364  *	from the Offset zero fragment packet is used for reassembly.
365  *		-> should grab it from the first fragment only
366  *
367  * There is no explicit reason given in the RFC.  Historical reason maybe?
368  */
369 /*
370  * Fragment input.
371  */
372 int
373 frag6_input(struct mbuf **mp, int *offp, int proto)
374 {
375 	struct mbuf *m, *t;
376 	struct ip6_hdr *ip6;
377 	struct ip6_frag *ip6f;
378 	struct ip6qhead *head;
379 	struct ip6q *q6;
380 	struct ip6asfrag *af6, *ip6af, *af6tmp;
381 	struct in6_ifaddr *ia6;
382 	struct ifnet *dstifp, *srcifp;
383 	uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
384 		    sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
385 	uint32_t bucket, *hashkeyp;
386 	int fragoff, frgpartlen;	/* Must be larger than uint16_t. */
387 	int nxt, offset, plen;
388 	uint8_t ecn, ecn0;
389 	bool only_frag;
390 #ifdef RSS
391 	struct ip6_direct_ctx *ip6dc;
392 	struct m_tag *mtag;
393 #endif
394 
395 	m = *mp;
396 	offset = *offp;
397 
398 	M_ASSERTPKTHDR(m);
399 
400 	ip6 = mtod(m, struct ip6_hdr *);
401 #ifndef PULLDOWN_TEST
402 	IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE);
403 	ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
404 #else
405 	IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f));
406 	if (ip6f == NULL)
407 		return (IPPROTO_DONE);
408 #endif
409 
410 	dstifp = NULL;
411 	/* Find the destination interface of the packet. */
412 	ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
413 	if (ia6 != NULL) {
414 		dstifp = ia6->ia_ifp;
415 		ifa_free(&ia6->ia_ifa);
416 	}
417 
418 	/* Jumbo payload cannot contain a fragment header. */
419 	if (ip6->ip6_plen == 0) {
420 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
421 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
422 		return (IPPROTO_DONE);
423 	}
424 
425 	/*
426 	 * Check whether fragment packet's fragment length is a
427 	 * multiple of 8 octets (unless it is the last one).
428 	 * sizeof(struct ip6_frag) == 8
429 	 * sizeof(struct ip6_hdr) = 40
430 	 */
431 	if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
432 	    (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
433 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
434 		    offsetof(struct ip6_hdr, ip6_plen));
435 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
436 		return (IPPROTO_DONE);
437 	}
438 
439 	IP6STAT_INC(ip6s_fragments);
440 	in6_ifstat_inc(dstifp, ifs6_reass_reqd);
441 
442 	/*
443 	 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
444 	 * unrelated to any reassembly.  We need to remove the frag hdr
445 	 * which is ugly.
446 	 * See RFC 6946 and section 4.5 of RFC 8200.
447 	 */
448 	if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
449 		IP6STAT_INC(ip6s_atomicfrags);
450 		nxt = ip6f->ip6f_nxt;
451 		/*
452 		 * Set nxt(-hdr field value) to the original value.
453 		 * We cannot just set ip6->ip6_nxt as there might be
454 		 * an unfragmentable part with extension headers and
455 		 * we must update the last one.
456 		 */
457 		m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
458 		    (caddr_t)&nxt);
459 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
460 		    sizeof(struct ip6_frag));
461 		if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
462 			goto dropfrag2;
463 		m->m_pkthdr.len -= sizeof(struct ip6_frag);
464 		in6_ifstat_inc(dstifp, ifs6_reass_ok);
465 		*mp = m;
466 		return (nxt);
467 	}
468 
469 	/* Offset now points to data portion. */
470 	offset += sizeof(struct ip6_frag);
471 
472 	/* Get fragment length and discard 0-byte fragments. */
473 	frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
474 	if (frgpartlen == 0) {
475 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
476 		    offsetof(struct ip6_hdr, ip6_plen));
477 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
478 		IP6STAT_INC(ip6s_fragdropped);
479 		return (IPPROTO_DONE);
480 	}
481 
482 	/*
483 	 * Enforce upper bound on number of fragments for the entire system.
484 	 * If maxfrag is 0, never accept fragments.
485 	 * If maxfrag is -1, accept all fragments without limitation.
486 	 */
487 	if (ip6_maxfrags < 0)
488 		;
489 	else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
490 		goto dropfrag2;
491 
492 	/*
493 	 * Validate that a full header chain to the ULP is present in the
494 	 * packet containing the first fragment as per RFC RFC7112 and
495 	 * RFC 8200 pages 18,19:
496 	 * The first fragment packet is composed of:
497 	 * (3)  Extension headers, if any, and the Upper-Layer header.  These
498 	 *      headers must be in the first fragment.  ...
499 	 */
500 	fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
501 	/* XXX TODO.  thj has D16851 open for this. */
502 	/* Send ICMPv6 4,3 in case of violation. */
503 
504 	/* Store receive network interface pointer for later. */
505 	srcifp = m->m_pkthdr.rcvif;
506 
507 	/* Generate a hash value for fragment bucket selection. */
508 	hashkeyp = hashkey;
509 	memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
510 	hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
511 	memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
512 	hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
513 	*hashkeyp = ip6f->ip6f_ident;
514 	bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
515 	bucket &= IP6REASS_HMASK;
516 	IP6QB_LOCK(bucket);
517 	head = IP6QB_HEAD(bucket);
518 
519 	TAILQ_FOREACH(q6, head, ip6q_tq)
520 		if (ip6f->ip6f_ident == q6->ip6q_ident &&
521 		    IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
522 		    IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
523 #ifdef MAC
524 		    && mac_ip6q_match(m, q6)
525 #endif
526 		    )
527 			break;
528 
529 	only_frag = false;
530 	if (q6 == NULL) {
531 
532 		/* A first fragment to arrive creates a reassembly queue. */
533 		only_frag = true;
534 
535 		/*
536 		 * Enforce upper bound on number of fragmented packets
537 		 * for which we attempt reassembly;
538 		 * If maxfragpackets is 0, never accept fragments.
539 		 * If maxfragpackets is -1, accept all fragments without
540 		 * limitation.
541 		 */
542 		if (V_ip6_maxfragpackets < 0)
543 			;
544 		else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
545 		    atomic_load_int(&V_frag6_nfragpackets) >=
546 		    (u_int)V_ip6_maxfragpackets)
547 			goto dropfrag;
548 
549 		/* Allocate IPv6 fragement packet queue entry. */
550 		q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FRAG6,
551 		    M_NOWAIT | M_ZERO);
552 		if (q6 == NULL)
553 			goto dropfrag;
554 #ifdef MAC
555 		if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
556 			free(q6, M_FRAG6);
557 			goto dropfrag;
558 		}
559 		mac_ip6q_create(m, q6);
560 #endif
561 		atomic_add_int(&V_frag6_nfragpackets, 1);
562 
563 		/* ip6q_nxt will be filled afterwards, from 1st fragment. */
564 		TAILQ_INIT(&q6->ip6q_frags);
565 		q6->ip6q_ident	= ip6f->ip6f_ident;
566 		q6->ip6q_ttl	= IPV6_FRAGTTL;
567 		q6->ip6q_src	= ip6->ip6_src;
568 		q6->ip6q_dst	= ip6->ip6_dst;
569 		q6->ip6q_ecn	=
570 		    (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
571 		q6->ip6q_unfrglen = -1;	/* The 1st fragment has not arrived. */
572 
573 		/* Add the fragemented packet to the bucket. */
574 		TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
575 		V_ip6qb[bucket].count++;
576 	}
577 
578 	/*
579 	 * If it is the 1st fragment, record the length of the
580 	 * unfragmentable part and the next header of the fragment header.
581 	 * Assume the first 1st fragement to arrive will be correct.
582 	 * We do not have any duplicate checks here yet so another packet
583 	 * with fragoff == 0 could come and overwrite the ip6q_unfrglen
584 	 * and worse, the next header, at any time.
585 	 */
586 	if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
587 		q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
588 		    sizeof(struct ip6_frag);
589 		q6->ip6q_nxt = ip6f->ip6f_nxt;
590 		/* XXX ECN? */
591 	}
592 
593 	/*
594 	 * Check that the reassembled packet would not exceed 65535 bytes
595 	 * in size.
596 	 * If it would exceed, discard the fragment and return an ICMP error.
597 	 */
598 	if (q6->ip6q_unfrglen >= 0) {
599 		/* The 1st fragment has already arrived. */
600 		if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
601 			if (only_frag) {
602 				TAILQ_REMOVE(head, q6, ip6q_tq);
603 				V_ip6qb[bucket].count--;
604 				atomic_subtract_int(&V_frag6_nfragpackets, 1);
605 #ifdef MAC
606 				mac_ip6q_destroy(q6);
607 #endif
608 				free(q6, M_FRAG6);
609 			}
610 			IP6QB_UNLOCK(bucket);
611 			icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
612 			    offset - sizeof(struct ip6_frag) +
613 			    offsetof(struct ip6_frag, ip6f_offlg));
614 			return (IPPROTO_DONE);
615 		}
616 	} else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
617 		if (only_frag) {
618 			TAILQ_REMOVE(head, q6, ip6q_tq);
619 			V_ip6qb[bucket].count--;
620 			atomic_subtract_int(&V_frag6_nfragpackets, 1);
621 #ifdef MAC
622 			mac_ip6q_destroy(q6);
623 #endif
624 			free(q6, M_FRAG6);
625 		}
626 		IP6QB_UNLOCK(bucket);
627 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
628 		    offset - sizeof(struct ip6_frag) +
629 		    offsetof(struct ip6_frag, ip6f_offlg));
630 		return (IPPROTO_DONE);
631 	}
632 
633 	/*
634 	 * If it is the first fragment, do the above check for each
635 	 * fragment already stored in the reassembly queue.
636 	 */
637 	if (fragoff == 0 && !only_frag) {
638 		TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
639 
640 			if (q6->ip6q_unfrglen + af6->ip6af_off +
641 			    af6->ip6af_frglen > IPV6_MAXPACKET) {
642 				struct ip6_hdr *ip6err;
643 				struct mbuf *merr;
644 				int erroff;
645 
646 				merr = af6->ip6af_m;
647 				erroff = af6->ip6af_offset;
648 
649 				/* Dequeue the fragment. */
650 				TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
651 				q6->ip6q_nfrag--;
652 				atomic_subtract_int(&frag6_nfrags, 1);
653 				free(af6, M_FRAG6);
654 
655 				/* Set a valid receive interface pointer. */
656 				merr->m_pkthdr.rcvif = srcifp;
657 
658 				/* Adjust pointer. */
659 				ip6err = mtod(merr, struct ip6_hdr *);
660 
661 				/*
662 				 * Restore source and destination addresses
663 				 * in the erroneous IPv6 header.
664 				 */
665 				ip6err->ip6_src = q6->ip6q_src;
666 				ip6err->ip6_dst = q6->ip6q_dst;
667 
668 				icmp6_error(merr, ICMP6_PARAM_PROB,
669 				    ICMP6_PARAMPROB_HEADER,
670 				    erroff - sizeof(struct ip6_frag) +
671 				    offsetof(struct ip6_frag, ip6f_offlg));
672 			}
673 		}
674 	}
675 
676 	/* Allocate an IPv6 fragement queue entry for this fragmented part. */
677 	ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FRAG6,
678 	    M_NOWAIT | M_ZERO);
679 	if (ip6af == NULL)
680 		goto dropfrag;
681 	ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
682 	ip6af->ip6af_off = fragoff;
683 	ip6af->ip6af_frglen = frgpartlen;
684 	ip6af->ip6af_offset = offset;
685 	ip6af->ip6af_m = m;
686 
687 	if (only_frag) {
688 		/*
689 		 * Do a manual insert rather than a hard-to-understand cast
690 		 * to a different type relying on data structure order to work.
691 		 */
692 		TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
693 		goto postinsert;
694 	}
695 
696 	/* Do duplicate, condition, and boundry checks. */
697 	/*
698 	 * Handle ECN by comparing this segment with the first one;
699 	 * if CE is set, do not lose CE.
700 	 * Drop if CE and not-ECT are mixed for the same packet.
701 	 */
702 	ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK;
703 	ecn0 = q6->ip6q_ecn;
704 	if (ecn == IPTOS_ECN_CE) {
705 		if (ecn0 == IPTOS_ECN_NOTECT) {
706 			free(ip6af, M_FRAG6);
707 			goto dropfrag;
708 		}
709 		if (ecn0 != IPTOS_ECN_CE)
710 			q6->ip6q_ecn = IPTOS_ECN_CE;
711 	}
712 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
713 		free(ip6af, M_FRAG6);
714 		goto dropfrag;
715 	}
716 
717 	/* Find a fragmented part which begins after this one does. */
718 	TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
719 		if (af6->ip6af_off > ip6af->ip6af_off)
720 			break;
721 
722 	/*
723 	 * If the incoming framgent overlaps some existing fragments in
724 	 * the reassembly queue, drop both the new fragment and the
725 	 * entire reassembly queue.  However, if the new fragment
726 	 * is an exact duplicate of an existing fragment, only silently
727 	 * drop the existing fragment and leave the fragmentation queue
728 	 * unchanged, as allowed by the RFC.  (RFC 8200, 4.5)
729 	 */
730 	if (af6 != NULL)
731 		af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
732 	else
733 		af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
734 	if (af6tmp != NULL) {
735 		if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
736 		    ip6af->ip6af_off > 0) {
737 			if (af6tmp->ip6af_off != ip6af->ip6af_off ||
738 			    af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
739 				frag6_freef(q6, bucket);
740 			free(ip6af, M_FRAG6);
741 			goto dropfrag;
742 		}
743 	}
744 	if (af6 != NULL) {
745 		if (ip6af->ip6af_off + ip6af->ip6af_frglen -
746 		    af6->ip6af_off > 0) {
747 			if (af6->ip6af_off != ip6af->ip6af_off ||
748 			    af6->ip6af_frglen != ip6af->ip6af_frglen)
749 				frag6_freef(q6, bucket);
750 			free(ip6af, M_FRAG6);
751 			goto dropfrag;
752 		}
753 	}
754 
755 #ifdef MAC
756 	mac_ip6q_update(m, q6);
757 #endif
758 
759 	/*
760 	 * Stick new segment in its place; check for complete reassembly.
761 	 * If not complete, check fragment limit.  Move to front of packet
762 	 * queue, as we are the most recently active fragmented packet.
763 	 */
764 	if (af6 != NULL)
765 		TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
766 	else
767 		TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
768 postinsert:
769 	atomic_add_int(&frag6_nfrags, 1);
770 	q6->ip6q_nfrag++;
771 
772 	plen = 0;
773 	TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
774 		if (af6->ip6af_off != plen) {
775 			if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
776 				IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
777 				frag6_freef(q6, bucket);
778 			}
779 			IP6QB_UNLOCK(bucket);
780 			return (IPPROTO_DONE);
781 		}
782 		plen += af6->ip6af_frglen;
783 	}
784 	af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
785 	if (af6->ip6af_mff) {
786 		if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
787 			IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
788 			frag6_freef(q6, bucket);
789 		}
790 		IP6QB_UNLOCK(bucket);
791 		return (IPPROTO_DONE);
792 	}
793 
794 	/* Reassembly is complete; concatenate fragments. */
795 	ip6af = TAILQ_FIRST(&q6->ip6q_frags);
796 	t = m = ip6af->ip6af_m;
797 	TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
798 	while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
799 		m->m_pkthdr.csum_flags &=
800 		    af6->ip6af_m->m_pkthdr.csum_flags;
801 		m->m_pkthdr.csum_data +=
802 		    af6->ip6af_m->m_pkthdr.csum_data;
803 
804 		TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
805 		t = m_last(t);
806 		m_adj(af6->ip6af_m, af6->ip6af_offset);
807 		m_demote_pkthdr(af6->ip6af_m);
808 		m_cat(t, af6->ip6af_m);
809 		free(af6, M_FRAG6);
810 	}
811 
812 	while (m->m_pkthdr.csum_data & 0xffff0000)
813 		m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
814 		    (m->m_pkthdr.csum_data >> 16);
815 
816 	/* Adjust offset to point where the original next header starts. */
817 	offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
818 	free(ip6af, M_FRAG6);
819 	ip6 = mtod(m, struct ip6_hdr *);
820 	ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
821 	if (q6->ip6q_ecn == IPTOS_ECN_CE)
822 		ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
823 	nxt = q6->ip6q_nxt;
824 
825 	TAILQ_REMOVE(head, q6, ip6q_tq);
826 	V_ip6qb[bucket].count--;
827 	atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
828 
829 	if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0) {
830 #ifdef MAC
831 		mac_ip6q_destroy(q6);
832 #endif
833 		free(q6, M_FRAG6);
834 		atomic_subtract_int(&V_frag6_nfragpackets, 1);
835 
836 		goto dropfrag;
837 	}
838 
839 	/* Set nxt(-hdr field value) to the original value. */
840 	m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
841 	    (caddr_t)&nxt);
842 
843 #ifdef MAC
844 	mac_ip6q_reassemble(q6, m);
845 	mac_ip6q_destroy(q6);
846 #endif
847 	free(q6, M_FRAG6);
848 	atomic_subtract_int(&V_frag6_nfragpackets, 1);
849 
850 	if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
851 
852 		plen = 0;
853 		for (t = m; t; t = t->m_next)
854 			plen += t->m_len;
855 		m->m_pkthdr.len = plen;
856 		/* Set a valid receive interface pointer. */
857 		m->m_pkthdr.rcvif = srcifp;
858 	}
859 
860 #ifdef RSS
861 	mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
862 	    M_NOWAIT);
863 	if (mtag == NULL)
864 		goto dropfrag;
865 
866 	ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
867 	ip6dc->ip6dc_nxt = nxt;
868 	ip6dc->ip6dc_off = offset;
869 
870 	m_tag_prepend(m, mtag);
871 #endif
872 
873 	IP6QB_UNLOCK(bucket);
874 	IP6STAT_INC(ip6s_reassembled);
875 	in6_ifstat_inc(dstifp, ifs6_reass_ok);
876 
877 #ifdef RSS
878 	/* Queue/dispatch for reprocessing. */
879 	netisr_dispatch(NETISR_IPV6_DIRECT, m);
880 	return (IPPROTO_DONE);
881 #endif
882 
883 	/* Tell launch routine the next header. */
884 	*mp = m;
885 	*offp = offset;
886 
887 	return (nxt);
888 
889 dropfrag:
890 	IP6QB_UNLOCK(bucket);
891 dropfrag2:
892 	in6_ifstat_inc(dstifp, ifs6_reass_fail);
893 	IP6STAT_INC(ip6s_fragdropped);
894 	m_freem(m);
895 	return (IPPROTO_DONE);
896 }
897 
898 /*
899  * IPv6 reassembling timer processing;
900  * if a timer expires on a reassembly queue, discard it.
901  */
902 void
903 frag6_slowtimo(void)
904 {
905 	VNET_ITERATOR_DECL(vnet_iter);
906 	struct ip6qhead *head;
907 	struct ip6q *q6, *q6tmp;
908 	uint32_t bucket;
909 
910 	VNET_LIST_RLOCK_NOSLEEP();
911 	VNET_FOREACH(vnet_iter) {
912 		CURVNET_SET(vnet_iter);
913 		for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
914 			IP6QB_LOCK(bucket);
915 			head = IP6QB_HEAD(bucket);
916 			TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
917 				if (--q6->ip6q_ttl == 0) {
918 					IP6STAT_ADD(ip6s_fragtimeout,
919 						q6->ip6q_nfrag);
920 					/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
921 					frag6_freef(q6, bucket);
922 				}
923 			/*
924 			 * If we are over the maximum number of fragments
925 			 * (due to the limit being lowered), drain off
926 			 * enough to get down to the new limit.
927 			 * Note that we drain all reassembly queues if
928 			 * maxfragpackets is 0 (fragmentation is disabled),
929 			 * and do not enforce a limit when maxfragpackets
930 			 * is negative.
931 			 */
932 			while ((V_ip6_maxfragpackets == 0 ||
933 			    (V_ip6_maxfragpackets > 0 &&
934 			    V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
935 			    (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
936 				IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
937 				/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
938 				frag6_freef(q6, bucket);
939 			}
940 			IP6QB_UNLOCK(bucket);
941 		}
942 		/*
943 		 * If we are still over the maximum number of fragmented
944 		 * packets, drain off enough to get down to the new limit.
945 		 */
946 		bucket = 0;
947 		while (V_ip6_maxfragpackets >= 0 &&
948 		    atomic_load_int(&V_frag6_nfragpackets) >
949 		    (u_int)V_ip6_maxfragpackets) {
950 			IP6QB_LOCK(bucket);
951 			q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
952 			if (q6 != NULL) {
953 				IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
954 				/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
955 				frag6_freef(q6, bucket);
956 			}
957 			IP6QB_UNLOCK(bucket);
958 			bucket = (bucket + 1) % IP6REASS_NHASH;
959 		}
960 		CURVNET_RESTORE();
961 	}
962 	VNET_LIST_RUNLOCK_NOSLEEP();
963 }
964 
965 /*
966  * Eventhandler to adjust limits in case nmbclusters change.
967  */
968 static void
969 frag6_change(void *tag)
970 {
971 	VNET_ITERATOR_DECL(vnet_iter);
972 
973 	ip6_maxfrags = IP6_MAXFRAGS;
974 	VNET_LIST_RLOCK_NOSLEEP();
975 	VNET_FOREACH(vnet_iter) {
976 		CURVNET_SET(vnet_iter);
977 		V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
978 		frag6_set_bucketsize();
979 		CURVNET_RESTORE();
980 	}
981 	VNET_LIST_RUNLOCK_NOSLEEP();
982 }
983 
984 /*
985  * Initialise reassembly queue and fragment identifier.
986  */
987 void
988 frag6_init(void)
989 {
990 	uint32_t bucket;
991 
992 	V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
993 	frag6_set_bucketsize();
994 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
995 		TAILQ_INIT(IP6QB_HEAD(bucket));
996 		mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
997 		V_ip6qb[bucket].count = 0;
998 	}
999 	V_ip6qb_hashseed = arc4random();
1000 	V_ip6_maxfragsperpacket = 64;
1001 #ifdef VIMAGE
1002 	V_frag6_on = true;
1003 #endif
1004 	if (!IS_DEFAULT_VNET(curvnet))
1005 		return;
1006 
1007 	ip6_maxfrags = IP6_MAXFRAGS;
1008 	EVENTHANDLER_REGISTER(nmbclusters_change,
1009 	    frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1010 }
1011 
1012 /*
1013  * Drain off all datagram fragments.
1014  */
1015 static void
1016 frag6_drain_one(void)
1017 {
1018 	struct ip6q *q6;
1019 	uint32_t bucket;
1020 
1021 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1022 		IP6QB_LOCK(bucket);
1023 		while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
1024 			IP6STAT_INC(ip6s_fragdropped);
1025 			/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1026 			frag6_freef(q6, bucket);
1027 		}
1028 		IP6QB_UNLOCK(bucket);
1029 	}
1030 }
1031 
1032 void
1033 frag6_drain(void)
1034 {
1035 	VNET_ITERATOR_DECL(vnet_iter);
1036 
1037 	VNET_LIST_RLOCK_NOSLEEP();
1038 	VNET_FOREACH(vnet_iter) {
1039 		CURVNET_SET(vnet_iter);
1040 		frag6_drain_one();
1041 		CURVNET_RESTORE();
1042 	}
1043 	VNET_LIST_RUNLOCK_NOSLEEP();
1044 }
1045 
1046 #ifdef VIMAGE
1047 /*
1048  * Clear up IPv6 reassembly structures.
1049  */
1050 void
1051 frag6_destroy(void)
1052 {
1053 	uint32_t bucket;
1054 
1055 	frag6_drain_one();
1056 	V_frag6_on = false;
1057 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1058 		KASSERT(V_ip6qb[bucket].count == 0,
1059 		    ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
1060 		    bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
1061 		mtx_destroy(&V_ip6qb[bucket].lock);
1062 	}
1063 }
1064 #endif
1065