xref: /freebsd/sys/netinet6/frag6.c (revision 2e3f49888ec8851bafb22011533217487764fdb0)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5  * All rights reserved.
6  * Copyright (c) 2019 Netflix, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the project nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	$KAME: frag6.c,v 1.33 2002/01/07 11:34:48 kjc Exp $
33  */
34 
35 #include <sys/cdefs.h>
36 #include "opt_rss.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/domain.h>
41 #include <sys/eventhandler.h>
42 #include <sys/hash.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/protosw.h>
47 #include <sys/queue.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50 #include <sys/syslog.h>
51 
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_private.h>
55 #include <net/netisr.h>
56 #include <net/route.h>
57 #include <net/vnet.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip6.h>
62 #include <netinet6/ip6_var.h>
63 #include <netinet/icmp6.h>
64 #include <netinet/in_systm.h>	/* For ECN definitions. */
65 #include <netinet/ip.h>		/* For ECN definitions. */
66 
67 #ifdef MAC
68 #include <security/mac/mac_framework.h>
69 #endif
70 
71 /*
72  * A "big picture" of how IPv6 fragment queues are all linked together.
73  *
74  * struct ip6qbucket ip6qb[...];			hashed buckets
75  * ||||||||
76  * |
77  * +--- TAILQ(struct ip6q, packets) *q6;		tailq entries holding
78  *      ||||||||					fragmented packets
79  *      |						(1 per original packet)
80  *      |
81  *      +--- TAILQ(struct ip6asfrag, ip6q_frags) *af6;	tailq entries of IPv6
82  *           |                                   *ip6af;fragment packets
83  *           |						for one original packet
84  *           + *mbuf
85  */
86 
87 /* Reassembly headers are stored in hash buckets. */
88 #define	IP6REASS_NHASH_LOG2	10
89 #define	IP6REASS_NHASH		(1 << IP6REASS_NHASH_LOG2)
90 #define	IP6REASS_HMASK		(IP6REASS_NHASH - 1)
91 
92 TAILQ_HEAD(ip6qhead, ip6q);
93 struct ip6qbucket {
94 	struct ip6qhead	packets;
95 	struct mtx	lock;
96 	int		count;
97 };
98 
99 struct ip6asfrag {
100 	TAILQ_ENTRY(ip6asfrag) ip6af_tq;
101 	struct mbuf	*ip6af_m;
102 	int		ip6af_offset;	/* Offset in ip6af_m to next header. */
103 	int		ip6af_frglen;	/* Fragmentable part length. */
104 	int		ip6af_off;	/* Fragment offset. */
105 	bool		ip6af_mff;	/* More fragment bit in frag off. */
106 };
107 
108 static MALLOC_DEFINE(M_FRAG6, "frag6", "IPv6 fragment reassembly header");
109 
110 #ifdef VIMAGE
111 /* A flag to indicate if IPv6 fragmentation is initialized. */
112 VNET_DEFINE_STATIC(bool,		frag6_on);
113 #define	V_frag6_on			VNET(frag6_on)
114 #endif
115 
116 /* System wide (global) maximum and count of packets in reassembly queues. */
117 static int ip6_maxfrags;
118 static u_int __exclusive_cache_line frag6_nfrags;
119 
120 /* Maximum and current packets in per-VNET reassembly queue. */
121 VNET_DEFINE_STATIC(int,			ip6_maxfragpackets);
122 VNET_DEFINE_STATIC(volatile u_int,	frag6_nfragpackets);
123 #define	V_ip6_maxfragpackets		VNET(ip6_maxfragpackets)
124 #define	V_frag6_nfragpackets		VNET(frag6_nfragpackets)
125 
126 /* Maximum per-VNET reassembly timeout (milliseconds) */
127 VNET_DEFINE_STATIC(u_int,		ip6_fraglifetime) = IPV6_DEFFRAGTTL;
128 #define	V_ip6_fraglifetime		VNET(ip6_fraglifetime)
129 
130 /* Maximum per-VNET reassembly queues per bucket and fragments per packet. */
131 VNET_DEFINE_STATIC(int,			ip6_maxfragbucketsize);
132 VNET_DEFINE_STATIC(int,			ip6_maxfragsperpacket);
133 #define	V_ip6_maxfragbucketsize		VNET(ip6_maxfragbucketsize)
134 #define	V_ip6_maxfragsperpacket		VNET(ip6_maxfragsperpacket)
135 
136 /* Per-VNET reassembly queue buckets. */
137 VNET_DEFINE_STATIC(struct ip6qbucket,	ip6qb[IP6REASS_NHASH]);
138 VNET_DEFINE_STATIC(uint32_t,		ip6qb_hashseed);
139 #define	V_ip6qb				VNET(ip6qb)
140 #define	V_ip6qb_hashseed		VNET(ip6qb_hashseed)
141 
142 #define	IP6QB_LOCK(_b)		mtx_lock(&V_ip6qb[(_b)].lock)
143 #define	IP6QB_TRYLOCK(_b)	mtx_trylock(&V_ip6qb[(_b)].lock)
144 #define	IP6QB_LOCK_ASSERT(_b)	mtx_assert(&V_ip6qb[(_b)].lock, MA_OWNED)
145 #define	IP6QB_UNLOCK(_b)	mtx_unlock(&V_ip6qb[(_b)].lock)
146 #define	IP6QB_HEAD(_b)		(&V_ip6qb[(_b)].packets)
147 
148 /*
149  * By default, limit the number of IP6 fragments across all reassembly
150  * queues to  1/32 of the total number of mbuf clusters.
151  *
152  * Limit the total number of reassembly queues per VNET to the
153  * IP6 fragment limit, but ensure the limit will not allow any bucket
154  * to grow above 100 items. (The bucket limit is
155  * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct
156  * multiplier to reach a 100-item limit.)
157  * The 100-item limit was chosen as brief testing seems to show that
158  * this produces "reasonable" performance on some subset of systems
159  * under DoS attack.
160  */
161 #define	IP6_MAXFRAGS		(nmbclusters / 32)
162 #define	IP6_MAXFRAGPACKETS	(imin(IP6_MAXFRAGS, IP6REASS_NHASH * 50))
163 
164 /* Interval between periodic reassembly queue inspections */
165 #define	IP6_CALLOUT_INTERVAL_MS	500
166 
167 /*
168  * Sysctls and helper function.
169  */
170 SYSCTL_DECL(_net_inet6_ip6);
171 
172 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfrags,
173 	CTLFLAG_RD, &frag6_nfrags, 0,
174 	"Global number of IPv6 fragments across all reassembly queues.");
175 
176 static void
177 frag6_set_bucketsize(void)
178 {
179 	int i;
180 
181 	if ((i = V_ip6_maxfragpackets) > 0)
182 		V_ip6_maxfragbucketsize = imax(i / (IP6REASS_NHASH / 2), 1);
183 }
184 
185 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGS, maxfrags,
186 	CTLFLAG_RW, &ip6_maxfrags, 0,
187 	"Maximum allowed number of outstanding IPv6 packet fragments. "
188 	"A value of 0 means no fragmented packets will be accepted, while "
189 	"a value of -1 means no limit");
190 
191 static int
192 sysctl_ip6_maxfragpackets(SYSCTL_HANDLER_ARGS)
193 {
194 	int error, val;
195 
196 	val = V_ip6_maxfragpackets;
197 	error = sysctl_handle_int(oidp, &val, 0, req);
198 	if (error != 0 || !req->newptr)
199 		return (error);
200 	V_ip6_maxfragpackets = val;
201 	frag6_set_bucketsize();
202 	return (0);
203 }
204 SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, maxfragpackets,
205 	CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
206 	NULL, 0, sysctl_ip6_maxfragpackets, "I",
207 	"Default maximum number of outstanding fragmented IPv6 packets. "
208 	"A value of 0 means no fragmented packets will be accepted, while a "
209 	"a value of -1 means no limit");
210 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, frag6_nfragpackets,
211 	CTLFLAG_VNET | CTLFLAG_RD,
212 	__DEVOLATILE(u_int *, &VNET_NAME(frag6_nfragpackets)), 0,
213 	"Per-VNET number of IPv6 fragments across all reassembly queues.");
214 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGSPERPACKET, maxfragsperpacket,
215 	CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragsperpacket), 0,
216 	"Maximum allowed number of fragments per packet");
217 SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGBUCKETSIZE, maxfragbucketsize,
218 	CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(ip6_maxfragbucketsize), 0,
219 	"Maximum number of reassembly queues per hash bucket");
220 
221 static int
222 frag6_milli_to_callout_ticks(int ms)
223 {
224 	return (ms / IP6_CALLOUT_INTERVAL_MS);
225 }
226 
227 static int
228 frag6_callout_ticks_to_milli(int ms)
229 {
230 	return (ms * IP6_CALLOUT_INTERVAL_MS);
231 }
232 
233 _Static_assert(sizeof(((struct ip6q *)NULL)->ip6q_ttl) >= 2,
234     "ip6q_ttl field is not large enough");
235 
236 static int
237 sysctl_ip6_fraglifetime(SYSCTL_HANDLER_ARGS)
238 {
239 	int error, val;
240 
241 	val = V_ip6_fraglifetime;
242 	error = sysctl_handle_int(oidp, &val, 0, req);
243 	if (error != 0 || !req->newptr)
244 		return (error);
245 	if (val <= 0)
246 		val = IPV6_DEFFRAGTTL;
247 
248 	if (frag6_milli_to_callout_ticks(val) >= 65536)
249 		val = frag6_callout_ticks_to_milli(65535);
250 #ifdef VIMAGE
251 	if (!IS_DEFAULT_VNET(curvnet)) {
252 		CURVNET_SET(vnet0);
253 		int host_val = V_ip6_fraglifetime;
254 		CURVNET_RESTORE();
255 
256 		if (val > host_val)
257 			val = host_val;
258 	}
259 #endif
260 	V_ip6_fraglifetime = val;
261 	return (0);
262 }
263 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, fraglifetime_ms,
264 	CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
265 	NULL, 0, sysctl_ip6_fraglifetime, "I",
266 	"Fragment lifetime, in milliseconds");
267 
268 /*
269  * Remove the IPv6 fragmentation header from the mbuf.
270  */
271 int
272 ip6_deletefraghdr(struct mbuf *m, int offset, int wait __unused)
273 {
274 	struct ip6_hdr *ip6;
275 
276 	KASSERT(m->m_len >= offset + sizeof(struct ip6_frag),
277 	    ("%s: ext headers not contigous in mbuf %p m_len %d >= "
278 	    "offset %d + %zu\n", __func__, m, m->m_len, offset,
279 	    sizeof(struct ip6_frag)));
280 
281 	/* Delete frag6 header. */
282 	ip6 = mtod(m, struct ip6_hdr *);
283 	bcopy(ip6, (char *)ip6 + sizeof(struct ip6_frag), offset);
284 	m->m_data += sizeof(struct ip6_frag);
285 	m->m_len -= sizeof(struct ip6_frag);
286 	m->m_flags |= M_FRAGMENTED;
287 
288 	return (0);
289 }
290 
291 static void
292 frag6_rmqueue(struct ip6q *q6, uint32_t bucket)
293 {
294 	IP6QB_LOCK_ASSERT(bucket);
295 
296 	TAILQ_REMOVE(IP6QB_HEAD(bucket), q6, ip6q_tq);
297 	V_ip6qb[bucket].count--;
298 #ifdef MAC
299 	mac_ip6q_destroy(q6);
300 #endif
301 	free(q6, M_FRAG6);
302 	atomic_subtract_int(&V_frag6_nfragpackets, 1);
303 }
304 
305 /*
306  * Free a fragment reassembly header and all associated datagrams.
307  */
308 static void
309 frag6_freef(struct ip6q *q6, uint32_t bucket)
310 {
311 	struct ip6_hdr *ip6;
312 	struct ip6asfrag *af6;
313 	struct mbuf *m;
314 
315 	IP6QB_LOCK_ASSERT(bucket);
316 
317 	while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
318 		m = af6->ip6af_m;
319 		TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
320 
321 		/*
322 		 * Return ICMP time exceeded error for the 1st fragment.
323 		 * Just free other fragments.
324 		 */
325 		if (af6->ip6af_off == 0 && m->m_pkthdr.rcvif != NULL) {
326 			/* Adjust pointer. */
327 			ip6 = mtod(m, struct ip6_hdr *);
328 
329 			/* Restore source and destination addresses. */
330 			ip6->ip6_src = q6->ip6q_src;
331 			ip6->ip6_dst = q6->ip6q_dst;
332 
333 			icmp6_error(m, ICMP6_TIME_EXCEEDED,
334 			    ICMP6_TIME_EXCEED_REASSEMBLY, 0);
335 		} else
336 			m_freem(m);
337 
338 		free(af6, M_FRAG6);
339 	}
340 
341 	atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
342 	frag6_rmqueue(q6, bucket);
343 }
344 
345 /*
346  * Drain off all datagram fragments belonging to
347  * the given network interface.
348  */
349 static void
350 frag6_cleanup(void *arg __unused, struct ifnet *ifp)
351 {
352 	struct ip6qhead *head;
353 	struct ip6q *q6;
354 	struct ip6asfrag *af6;
355 	uint32_t bucket;
356 
357 	KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
358 
359 	CURVNET_SET_QUIET(ifp->if_vnet);
360 #ifdef VIMAGE
361 	/*
362 	 * Skip processing if IPv6 reassembly is not initialised or
363 	 * torn down by frag6_destroy().
364 	 */
365 	if (!V_frag6_on) {
366 		CURVNET_RESTORE();
367 		return;
368 	}
369 #endif
370 
371 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
372 		IP6QB_LOCK(bucket);
373 		head = IP6QB_HEAD(bucket);
374 		/* Scan fragment list. */
375 		TAILQ_FOREACH(q6, head, ip6q_tq) {
376 			TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
377 				/* Clear no longer valid rcvif pointer. */
378 				if (af6->ip6af_m->m_pkthdr.rcvif == ifp)
379 					af6->ip6af_m->m_pkthdr.rcvif = NULL;
380 			}
381 		}
382 		IP6QB_UNLOCK(bucket);
383 	}
384 	CURVNET_RESTORE();
385 }
386 EVENTHANDLER_DEFINE(ifnet_departure_event, frag6_cleanup, NULL, 0);
387 
388 /*
389  * Like in RFC2460, in RFC8200, fragment and reassembly rules do not agree with
390  * each other, in terms of next header field handling in fragment header.
391  * While the sender will use the same value for all of the fragmented packets,
392  * receiver is suggested not to check for consistency.
393  *
394  * Fragment rules (p18,p19):
395  *	(2)  A Fragment header containing:
396  *	The Next Header value that identifies the first header
397  *	after the Per-Fragment headers of the original packet.
398  *		-> next header field is same for all fragments
399  *
400  * Reassembly rule (p20):
401  *	The Next Header field of the last header of the Per-Fragment
402  *	headers is obtained from the Next Header field of the first
403  *	fragment's Fragment header.
404  *		-> should grab it from the first fragment only
405  *
406  * The following note also contradicts with fragment rule - no one is going to
407  * send different fragment with different next header field.
408  *
409  * Additional note (p22) [not an error]:
410  *	The Next Header values in the Fragment headers of different
411  *	fragments of the same original packet may differ.  Only the value
412  *	from the Offset zero fragment packet is used for reassembly.
413  *		-> should grab it from the first fragment only
414  *
415  * There is no explicit reason given in the RFC.  Historical reason maybe?
416  */
417 /*
418  * Fragment input.
419  */
420 int
421 frag6_input(struct mbuf **mp, int *offp, int proto)
422 {
423 	struct mbuf *m, *t;
424 	struct ip6_hdr *ip6;
425 	struct ip6_frag *ip6f;
426 	struct ip6qhead *head;
427 	struct ip6q *q6;
428 	struct ip6asfrag *af6, *ip6af, *af6tmp;
429 	struct in6_ifaddr *ia6;
430 	struct ifnet *dstifp, *srcifp;
431 	uint32_t hashkey[(sizeof(struct in6_addr) * 2 +
432 		    sizeof(ip6f->ip6f_ident)) / sizeof(uint32_t)];
433 	uint32_t bucket, *hashkeyp;
434 	int fragoff, frgpartlen;	/* Must be larger than uint16_t. */
435 	int nxt, offset, plen;
436 	uint8_t ecn, ecn0;
437 	bool only_frag;
438 #ifdef RSS
439 	struct ip6_direct_ctx *ip6dc;
440 	struct m_tag *mtag;
441 #endif
442 
443 	m = *mp;
444 	offset = *offp;
445 
446 	M_ASSERTPKTHDR(m);
447 
448 	if (m->m_len < offset + sizeof(struct ip6_frag)) {
449 		m = m_pullup(m, offset + sizeof(struct ip6_frag));
450 		if (m == NULL) {
451 			IP6STAT_INC(ip6s_exthdrtoolong);
452 			*mp = NULL;
453 			return (IPPROTO_DONE);
454 		}
455 	}
456 	ip6 = mtod(m, struct ip6_hdr *);
457 
458 	dstifp = NULL;
459 	/* Find the destination interface of the packet. */
460 	ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
461 	if (ia6 != NULL)
462 		dstifp = ia6->ia_ifp;
463 
464 	/* Jumbo payload cannot contain a fragment header. */
465 	if (ip6->ip6_plen == 0) {
466 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset);
467 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
468 		*mp = NULL;
469 		return (IPPROTO_DONE);
470 	}
471 
472 	/*
473 	 * Check whether fragment packet's fragment length is a
474 	 * multiple of 8 octets (unless it is the last one).
475 	 * sizeof(struct ip6_frag) == 8
476 	 * sizeof(struct ip6_hdr) = 40
477 	 */
478 	ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset);
479 	if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) &&
480 	    (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) {
481 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
482 		    offsetof(struct ip6_hdr, ip6_plen));
483 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
484 		*mp = NULL;
485 		return (IPPROTO_DONE);
486 	}
487 
488 	IP6STAT_INC(ip6s_fragments);
489 	in6_ifstat_inc(dstifp, ifs6_reass_reqd);
490 
491 	/*
492 	 * Handle "atomic" fragments (offset and m bit set to 0) upfront,
493 	 * unrelated to any reassembly.  We need to remove the frag hdr
494 	 * which is ugly.
495 	 * See RFC 6946 and section 4.5 of RFC 8200.
496 	 */
497 	if ((ip6f->ip6f_offlg & ~IP6F_RESERVED_MASK) == 0) {
498 		IP6STAT_INC(ip6s_atomicfrags);
499 		nxt = ip6f->ip6f_nxt;
500 		/*
501 		 * Set nxt(-hdr field value) to the original value.
502 		 * We cannot just set ip6->ip6_nxt as there might be
503 		 * an unfragmentable part with extension headers and
504 		 * we must update the last one.
505 		 */
506 		m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
507 		    (caddr_t)&nxt);
508 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) -
509 		    sizeof(struct ip6_frag));
510 		if (ip6_deletefraghdr(m, offset, M_NOWAIT) != 0)
511 			goto dropfrag2;
512 		m->m_pkthdr.len -= sizeof(struct ip6_frag);
513 		in6_ifstat_inc(dstifp, ifs6_reass_ok);
514 		*mp = m;
515 		return (nxt);
516 	}
517 
518 	/* Offset now points to data portion. */
519 	offset += sizeof(struct ip6_frag);
520 
521 	/* Get fragment length and discard 0-byte fragments. */
522 	frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset;
523 	if (frgpartlen == 0) {
524 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
525 		    offsetof(struct ip6_hdr, ip6_plen));
526 		in6_ifstat_inc(dstifp, ifs6_reass_fail);
527 		IP6STAT_INC(ip6s_fragdropped);
528 		*mp = NULL;
529 		return (IPPROTO_DONE);
530 	}
531 
532 	/*
533 	 * Enforce upper bound on number of fragments for the entire system.
534 	 * If maxfrag is 0, never accept fragments.
535 	 * If maxfrag is -1, accept all fragments without limitation.
536 	 */
537 	if (ip6_maxfrags < 0)
538 		;
539 	else if (atomic_load_int(&frag6_nfrags) >= (u_int)ip6_maxfrags)
540 		goto dropfrag2;
541 
542 	/*
543 	 * Validate that a full header chain to the ULP is present in the
544 	 * packet containing the first fragment as per RFC RFC7112 and
545 	 * RFC 8200 pages 18,19:
546 	 * The first fragment packet is composed of:
547 	 * (3)  Extension headers, if any, and the Upper-Layer header.  These
548 	 *      headers must be in the first fragment.  ...
549 	 */
550 	fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK);
551 	/* XXX TODO.  thj has D16851 open for this. */
552 	/* Send ICMPv6 4,3 in case of violation. */
553 
554 	/* Store receive network interface pointer for later. */
555 	srcifp = m->m_pkthdr.rcvif;
556 
557 	/* Generate a hash value for fragment bucket selection. */
558 	hashkeyp = hashkey;
559 	memcpy(hashkeyp, &ip6->ip6_src, sizeof(struct in6_addr));
560 	hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
561 	memcpy(hashkeyp, &ip6->ip6_dst, sizeof(struct in6_addr));
562 	hashkeyp += sizeof(struct in6_addr) / sizeof(*hashkeyp);
563 	*hashkeyp = ip6f->ip6f_ident;
564 	bucket = jenkins_hash32(hashkey, nitems(hashkey), V_ip6qb_hashseed);
565 	bucket &= IP6REASS_HMASK;
566 	IP6QB_LOCK(bucket);
567 	head = IP6QB_HEAD(bucket);
568 
569 	TAILQ_FOREACH(q6, head, ip6q_tq)
570 		if (ip6f->ip6f_ident == q6->ip6q_ident &&
571 		    IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) &&
572 		    IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)
573 #ifdef MAC
574 		    && mac_ip6q_match(m, q6)
575 #endif
576 		    )
577 			break;
578 
579 	only_frag = false;
580 	if (q6 == NULL) {
581 		/* A first fragment to arrive creates a reassembly queue. */
582 		only_frag = true;
583 
584 		/*
585 		 * Enforce upper bound on number of fragmented packets
586 		 * for which we attempt reassembly;
587 		 * If maxfragpackets is 0, never accept fragments.
588 		 * If maxfragpackets is -1, accept all fragments without
589 		 * limitation.
590 		 */
591 		if (V_ip6_maxfragpackets < 0)
592 			;
593 		else if (V_ip6qb[bucket].count >= V_ip6_maxfragbucketsize ||
594 		    atomic_load_int(&V_frag6_nfragpackets) >=
595 		    (u_int)V_ip6_maxfragpackets)
596 			goto dropfrag;
597 
598 		/* Allocate IPv6 fragement packet queue entry. */
599 		q6 = malloc(sizeof(struct ip6q), M_FRAG6, M_NOWAIT | M_ZERO);
600 		if (q6 == NULL)
601 			goto dropfrag;
602 #ifdef MAC
603 		if (mac_ip6q_init(q6, M_NOWAIT) != 0) {
604 			free(q6, M_FRAG6);
605 			goto dropfrag;
606 		}
607 		mac_ip6q_create(m, q6);
608 #endif
609 		atomic_add_int(&V_frag6_nfragpackets, 1);
610 
611 		/* ip6q_nxt will be filled afterwards, from 1st fragment. */
612 		TAILQ_INIT(&q6->ip6q_frags);
613 		q6->ip6q_ident	= ip6f->ip6f_ident;
614 		q6->ip6q_ttl	= frag6_milli_to_callout_ticks(V_ip6_fraglifetime);
615 		q6->ip6q_src	= ip6->ip6_src;
616 		q6->ip6q_dst	= ip6->ip6_dst;
617 		q6->ip6q_ecn	= IPV6_ECN(ip6);
618 		q6->ip6q_unfrglen = -1;	/* The 1st fragment has not arrived. */
619 
620 		/* Add the fragemented packet to the bucket. */
621 		TAILQ_INSERT_HEAD(head, q6, ip6q_tq);
622 		V_ip6qb[bucket].count++;
623 	}
624 
625 	/*
626 	 * If it is the 1st fragment, record the length of the
627 	 * unfragmentable part and the next header of the fragment header.
628 	 * Assume the first 1st fragement to arrive will be correct.
629 	 * We do not have any duplicate checks here yet so another packet
630 	 * with fragoff == 0 could come and overwrite the ip6q_unfrglen
631 	 * and worse, the next header, at any time.
632 	 */
633 	if (fragoff == 0 && q6->ip6q_unfrglen == -1) {
634 		q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) -
635 		    sizeof(struct ip6_frag);
636 		q6->ip6q_nxt = ip6f->ip6f_nxt;
637 		/* XXX ECN? */
638 	}
639 
640 	/*
641 	 * Check that the reassembled packet would not exceed 65535 bytes
642 	 * in size.
643 	 * If it would exceed, discard the fragment and return an ICMP error.
644 	 */
645 	if (q6->ip6q_unfrglen >= 0) {
646 		/* The 1st fragment has already arrived. */
647 		if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) {
648 			if (only_frag)
649 				frag6_rmqueue(q6, bucket);
650 			IP6QB_UNLOCK(bucket);
651 			icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
652 			    offset - sizeof(struct ip6_frag) +
653 			    offsetof(struct ip6_frag, ip6f_offlg));
654 			*mp = NULL;
655 			return (IPPROTO_DONE);
656 		}
657 	} else if (fragoff + frgpartlen > IPV6_MAXPACKET) {
658 		if (only_frag)
659 			frag6_rmqueue(q6, bucket);
660 		IP6QB_UNLOCK(bucket);
661 		icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
662 		    offset - sizeof(struct ip6_frag) +
663 		    offsetof(struct ip6_frag, ip6f_offlg));
664 		*mp = NULL;
665 		return (IPPROTO_DONE);
666 	}
667 
668 	/*
669 	 * If it is the first fragment, do the above check for each
670 	 * fragment already stored in the reassembly queue.
671 	 */
672 	if (fragoff == 0 && !only_frag) {
673 		TAILQ_FOREACH_SAFE(af6, &q6->ip6q_frags, ip6af_tq, af6tmp) {
674 			if (q6->ip6q_unfrglen + af6->ip6af_off +
675 			    af6->ip6af_frglen > IPV6_MAXPACKET) {
676 				struct ip6_hdr *ip6err;
677 				struct mbuf *merr;
678 				int erroff;
679 
680 				merr = af6->ip6af_m;
681 				erroff = af6->ip6af_offset;
682 
683 				/* Dequeue the fragment. */
684 				TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
685 				q6->ip6q_nfrag--;
686 				atomic_subtract_int(&frag6_nfrags, 1);
687 				free(af6, M_FRAG6);
688 
689 				/* Set a valid receive interface pointer. */
690 				merr->m_pkthdr.rcvif = srcifp;
691 
692 				/* Adjust pointer. */
693 				ip6err = mtod(merr, struct ip6_hdr *);
694 
695 				/*
696 				 * Restore source and destination addresses
697 				 * in the erroneous IPv6 header.
698 				 */
699 				ip6err->ip6_src = q6->ip6q_src;
700 				ip6err->ip6_dst = q6->ip6q_dst;
701 
702 				icmp6_error(merr, ICMP6_PARAM_PROB,
703 				    ICMP6_PARAMPROB_HEADER,
704 				    erroff - sizeof(struct ip6_frag) +
705 				    offsetof(struct ip6_frag, ip6f_offlg));
706 			}
707 		}
708 	}
709 
710 	/* Allocate an IPv6 fragement queue entry for this fragmented part. */
711 	ip6af = malloc(sizeof(struct ip6asfrag), M_FRAG6, M_NOWAIT | M_ZERO);
712 	if (ip6af == NULL)
713 		goto dropfrag;
714 	ip6af->ip6af_mff = (ip6f->ip6f_offlg & IP6F_MORE_FRAG) ? true : false;
715 	ip6af->ip6af_off = fragoff;
716 	ip6af->ip6af_frglen = frgpartlen;
717 	ip6af->ip6af_offset = offset;
718 	ip6af->ip6af_m = m;
719 
720 	if (only_frag) {
721 		/*
722 		 * Do a manual insert rather than a hard-to-understand cast
723 		 * to a different type relying on data structure order to work.
724 		 */
725 		TAILQ_INSERT_HEAD(&q6->ip6q_frags, ip6af, ip6af_tq);
726 		goto postinsert;
727 	}
728 
729 	/* Do duplicate, condition, and boundry checks. */
730 	/*
731 	 * Handle ECN by comparing this segment with the first one;
732 	 * if CE is set, do not lose CE.
733 	 * Drop if CE and not-ECT are mixed for the same packet.
734 	 */
735 	ecn = IPV6_ECN(ip6);
736 	ecn0 = q6->ip6q_ecn;
737 	if (ecn == IPTOS_ECN_CE) {
738 		if (ecn0 == IPTOS_ECN_NOTECT) {
739 			free(ip6af, M_FRAG6);
740 			goto dropfrag;
741 		}
742 		if (ecn0 != IPTOS_ECN_CE)
743 			q6->ip6q_ecn = IPTOS_ECN_CE;
744 	}
745 	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
746 		free(ip6af, M_FRAG6);
747 		goto dropfrag;
748 	}
749 
750 	/* Find a fragmented part which begins after this one does. */
751 	TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq)
752 		if (af6->ip6af_off > ip6af->ip6af_off)
753 			break;
754 
755 	/*
756 	 * If the incoming framgent overlaps some existing fragments in
757 	 * the reassembly queue, drop both the new fragment and the
758 	 * entire reassembly queue.  However, if the new fragment
759 	 * is an exact duplicate of an existing fragment, only silently
760 	 * drop the existing fragment and leave the fragmentation queue
761 	 * unchanged, as allowed by the RFC.  (RFC 8200, 4.5)
762 	 */
763 	if (af6 != NULL)
764 		af6tmp = TAILQ_PREV(af6, ip6fraghead, ip6af_tq);
765 	else
766 		af6tmp = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
767 	if (af6tmp != NULL) {
768 		if (af6tmp->ip6af_off + af6tmp->ip6af_frglen -
769 		    ip6af->ip6af_off > 0) {
770 			if (af6tmp->ip6af_off != ip6af->ip6af_off ||
771 			    af6tmp->ip6af_frglen != ip6af->ip6af_frglen)
772 				frag6_freef(q6, bucket);
773 			free(ip6af, M_FRAG6);
774 			goto dropfrag;
775 		}
776 	}
777 	if (af6 != NULL) {
778 		if (ip6af->ip6af_off + ip6af->ip6af_frglen -
779 		    af6->ip6af_off > 0) {
780 			if (af6->ip6af_off != ip6af->ip6af_off ||
781 			    af6->ip6af_frglen != ip6af->ip6af_frglen)
782 				frag6_freef(q6, bucket);
783 			free(ip6af, M_FRAG6);
784 			goto dropfrag;
785 		}
786 	}
787 
788 #ifdef MAC
789 	mac_ip6q_update(m, q6);
790 #endif
791 
792 	/*
793 	 * Stick new segment in its place; check for complete reassembly.
794 	 * If not complete, check fragment limit.  Move to front of packet
795 	 * queue, as we are the most recently active fragmented packet.
796 	 */
797 	if (af6 != NULL)
798 		TAILQ_INSERT_BEFORE(af6, ip6af, ip6af_tq);
799 	else
800 		TAILQ_INSERT_TAIL(&q6->ip6q_frags, ip6af, ip6af_tq);
801 postinsert:
802 	atomic_add_int(&frag6_nfrags, 1);
803 	q6->ip6q_nfrag++;
804 
805 	plen = 0;
806 	TAILQ_FOREACH(af6, &q6->ip6q_frags, ip6af_tq) {
807 		if (af6->ip6af_off != plen) {
808 			if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
809 				IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
810 				frag6_freef(q6, bucket);
811 			}
812 			IP6QB_UNLOCK(bucket);
813 			*mp = NULL;
814 			return (IPPROTO_DONE);
815 		}
816 		plen += af6->ip6af_frglen;
817 	}
818 	af6 = TAILQ_LAST(&q6->ip6q_frags, ip6fraghead);
819 	if (af6->ip6af_mff) {
820 		if (q6->ip6q_nfrag > V_ip6_maxfragsperpacket) {
821 			IP6STAT_ADD(ip6s_fragdropped, q6->ip6q_nfrag);
822 			frag6_freef(q6, bucket);
823 		}
824 		IP6QB_UNLOCK(bucket);
825 		*mp = NULL;
826 		return (IPPROTO_DONE);
827 	}
828 
829 	/* Reassembly is complete; concatenate fragments. */
830 	ip6af = TAILQ_FIRST(&q6->ip6q_frags);
831 	t = m = ip6af->ip6af_m;
832 	TAILQ_REMOVE(&q6->ip6q_frags, ip6af, ip6af_tq);
833 	while ((af6 = TAILQ_FIRST(&q6->ip6q_frags)) != NULL) {
834 		m->m_pkthdr.csum_flags &=
835 		    af6->ip6af_m->m_pkthdr.csum_flags;
836 		m->m_pkthdr.csum_data +=
837 		    af6->ip6af_m->m_pkthdr.csum_data;
838 
839 		TAILQ_REMOVE(&q6->ip6q_frags, af6, ip6af_tq);
840 		t = m_last(t);
841 		m_adj(af6->ip6af_m, af6->ip6af_offset);
842 		m_demote_pkthdr(af6->ip6af_m);
843 		m_cat(t, af6->ip6af_m);
844 		free(af6, M_FRAG6);
845 	}
846 
847 	while (m->m_pkthdr.csum_data & 0xffff0000)
848 		m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
849 		    (m->m_pkthdr.csum_data >> 16);
850 
851 	/* Adjust offset to point where the original next header starts. */
852 	offset = ip6af->ip6af_offset - sizeof(struct ip6_frag);
853 	free(ip6af, M_FRAG6);
854 	if ((u_int)plen + (u_int)offset - sizeof(struct ip6_hdr) >
855 	    IPV6_MAXPACKET) {
856 		frag6_freef(q6, bucket);
857 		goto dropfrag;
858 	}
859 	ip6 = mtod(m, struct ip6_hdr *);
860 	ip6->ip6_plen = htons((u_short)plen + offset - sizeof(struct ip6_hdr));
861 	if (q6->ip6q_ecn == IPTOS_ECN_CE)
862 		ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20);
863 	nxt = q6->ip6q_nxt;
864 
865 	ip6_deletefraghdr(m, offset, M_NOWAIT);
866 
867 	/* Set nxt(-hdr field value) to the original value. */
868 	m_copyback(m, ip6_get_prevhdr(m, offset), sizeof(uint8_t),
869 	    (caddr_t)&nxt);
870 
871 #ifdef MAC
872 	mac_ip6q_reassemble(q6, m);
873 #endif
874 	atomic_subtract_int(&frag6_nfrags, q6->ip6q_nfrag);
875 	frag6_rmqueue(q6, bucket);
876 
877 	if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */
878 
879 		plen = 0;
880 		for (t = m; t; t = t->m_next)
881 			plen += t->m_len;
882 		m->m_pkthdr.len = plen;
883 		/* Set a valid receive interface pointer. */
884 		m->m_pkthdr.rcvif = srcifp;
885 	}
886 
887 #ifdef RSS
888 	mtag = m_tag_alloc(MTAG_ABI_IPV6, IPV6_TAG_DIRECT, sizeof(*ip6dc),
889 	    M_NOWAIT);
890 	if (mtag == NULL)
891 		goto dropfrag;
892 
893 	ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
894 	ip6dc->ip6dc_nxt = nxt;
895 	ip6dc->ip6dc_off = offset;
896 
897 	m_tag_prepend(m, mtag);
898 #endif
899 
900 	IP6QB_UNLOCK(bucket);
901 	IP6STAT_INC(ip6s_reassembled);
902 	in6_ifstat_inc(dstifp, ifs6_reass_ok);
903 
904 #ifdef RSS
905 	/* Queue/dispatch for reprocessing. */
906 	netisr_dispatch(NETISR_IPV6_DIRECT, m);
907 	*mp = NULL;
908 	return (IPPROTO_DONE);
909 #endif
910 
911 	/* Tell launch routine the next header. */
912 	*mp = m;
913 	*offp = offset;
914 
915 	return (nxt);
916 
917 dropfrag:
918 	IP6QB_UNLOCK(bucket);
919 dropfrag2:
920 	in6_ifstat_inc(dstifp, ifs6_reass_fail);
921 	IP6STAT_INC(ip6s_fragdropped);
922 	m_freem(m);
923 	*mp = NULL;
924 	return (IPPROTO_DONE);
925 }
926 
927 /*
928  * IPv6 reassembling timer processing;
929  * if a timer expires on a reassembly queue, discard it.
930  */
931 static struct callout frag6_callout;
932 static void
933 frag6_slowtimo(void *arg __unused)
934 {
935 	VNET_ITERATOR_DECL(vnet_iter);
936 	struct ip6qhead *head;
937 	struct ip6q *q6, *q6tmp;
938 	uint32_t bucket;
939 
940 	if (atomic_load_int(&frag6_nfrags) == 0)
941 		goto done;
942 
943 	VNET_LIST_RLOCK_NOSLEEP();
944 	VNET_FOREACH(vnet_iter) {
945 		CURVNET_SET(vnet_iter);
946 		for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
947 			if (V_ip6qb[bucket].count == 0)
948 				continue;
949 			IP6QB_LOCK(bucket);
950 			head = IP6QB_HEAD(bucket);
951 			TAILQ_FOREACH_SAFE(q6, head, ip6q_tq, q6tmp)
952 				if (--q6->ip6q_ttl == 0) {
953 					IP6STAT_ADD(ip6s_fragtimeout,
954 						q6->ip6q_nfrag);
955 					/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
956 					frag6_freef(q6, bucket);
957 				}
958 			/*
959 			 * If we are over the maximum number of fragments
960 			 * (due to the limit being lowered), drain off
961 			 * enough to get down to the new limit.
962 			 * Note that we drain all reassembly queues if
963 			 * maxfragpackets is 0 (fragmentation is disabled),
964 			 * and do not enforce a limit when maxfragpackets
965 			 * is negative.
966 			 */
967 			while ((V_ip6_maxfragpackets == 0 ||
968 			    (V_ip6_maxfragpackets > 0 &&
969 			    V_ip6qb[bucket].count > V_ip6_maxfragbucketsize)) &&
970 			    (q6 = TAILQ_LAST(head, ip6qhead)) != NULL) {
971 				IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
972 				/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
973 				frag6_freef(q6, bucket);
974 			}
975 			IP6QB_UNLOCK(bucket);
976 		}
977 		/*
978 		 * If we are still over the maximum number of fragmented
979 		 * packets, drain off enough to get down to the new limit.
980 		 */
981 		bucket = 0;
982 		while (V_ip6_maxfragpackets >= 0 &&
983 		    atomic_load_int(&V_frag6_nfragpackets) >
984 		    (u_int)V_ip6_maxfragpackets) {
985 			IP6QB_LOCK(bucket);
986 			q6 = TAILQ_LAST(IP6QB_HEAD(bucket), ip6qhead);
987 			if (q6 != NULL) {
988 				IP6STAT_ADD(ip6s_fragoverflow, q6->ip6q_nfrag);
989 				/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
990 				frag6_freef(q6, bucket);
991 			}
992 			IP6QB_UNLOCK(bucket);
993 			bucket = (bucket + 1) % IP6REASS_NHASH;
994 		}
995 		CURVNET_RESTORE();
996 	}
997 	VNET_LIST_RUNLOCK_NOSLEEP();
998 done:
999 	callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1000 	    SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1001 }
1002 
1003 static void
1004 frag6_slowtimo_init(void *arg __unused)
1005 {
1006 
1007 	callout_init(&frag6_callout, 1);
1008 	callout_reset_sbt(&frag6_callout, SBT_1MS * IP6_CALLOUT_INTERVAL_MS,
1009 	    SBT_1MS * 10, frag6_slowtimo, NULL, 0);
1010 }
1011 SYSINIT(frag6, SI_SUB_VNET_DONE, SI_ORDER_ANY, frag6_slowtimo_init, NULL);
1012 
1013 /*
1014  * Eventhandler to adjust limits in case nmbclusters change.
1015  */
1016 static void
1017 frag6_change(void *tag)
1018 {
1019 	VNET_ITERATOR_DECL(vnet_iter);
1020 
1021 	ip6_maxfrags = IP6_MAXFRAGS;
1022 	VNET_LIST_RLOCK_NOSLEEP();
1023 	VNET_FOREACH(vnet_iter) {
1024 		CURVNET_SET(vnet_iter);
1025 		V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1026 		frag6_set_bucketsize();
1027 		CURVNET_RESTORE();
1028 	}
1029 	VNET_LIST_RUNLOCK_NOSLEEP();
1030 }
1031 
1032 /*
1033  * Initialise reassembly queue and fragment identifier.
1034  */
1035 void
1036 frag6_init(void)
1037 {
1038 	uint32_t bucket;
1039 
1040 	V_ip6_maxfragpackets = IP6_MAXFRAGPACKETS;
1041 	frag6_set_bucketsize();
1042 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1043 		TAILQ_INIT(IP6QB_HEAD(bucket));
1044 		mtx_init(&V_ip6qb[bucket].lock, "ip6qb", NULL, MTX_DEF);
1045 		V_ip6qb[bucket].count = 0;
1046 	}
1047 	V_ip6qb_hashseed = arc4random();
1048 	V_ip6_maxfragsperpacket = 64;
1049 #ifdef VIMAGE
1050 	V_frag6_on = true;
1051 #endif
1052 	if (!IS_DEFAULT_VNET(curvnet))
1053 		return;
1054 
1055 	ip6_maxfrags = IP6_MAXFRAGS;
1056 	EVENTHANDLER_REGISTER(nmbclusters_change,
1057 	    frag6_change, NULL, EVENTHANDLER_PRI_ANY);
1058 }
1059 
1060 /*
1061  * Drain off all datagram fragments.
1062  */
1063 static void
1064 frag6_drain_one(void)
1065 {
1066 	struct ip6q *q6;
1067 	uint32_t bucket;
1068 
1069 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1070 		IP6QB_LOCK(bucket);
1071 		while ((q6 = TAILQ_FIRST(IP6QB_HEAD(bucket))) != NULL) {
1072 			IP6STAT_INC(ip6s_fragdropped);
1073 			/* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */
1074 			frag6_freef(q6, bucket);
1075 		}
1076 		IP6QB_UNLOCK(bucket);
1077 	}
1078 }
1079 
1080 void
1081 frag6_drain(void)
1082 {
1083 	VNET_ITERATOR_DECL(vnet_iter);
1084 
1085 	VNET_LIST_RLOCK_NOSLEEP();
1086 	VNET_FOREACH(vnet_iter) {
1087 		CURVNET_SET(vnet_iter);
1088 		frag6_drain_one();
1089 		CURVNET_RESTORE();
1090 	}
1091 	VNET_LIST_RUNLOCK_NOSLEEP();
1092 }
1093 
1094 #ifdef VIMAGE
1095 /*
1096  * Clear up IPv6 reassembly structures.
1097  */
1098 void
1099 frag6_destroy(void)
1100 {
1101 	uint32_t bucket;
1102 
1103 	frag6_drain_one();
1104 	V_frag6_on = false;
1105 	for (bucket = 0; bucket < IP6REASS_NHASH; bucket++) {
1106 		KASSERT(V_ip6qb[bucket].count == 0,
1107 		    ("%s: V_ip6qb[%d] (%p) count not 0 (%d)", __func__,
1108 		    bucket, &V_ip6qb[bucket], V_ip6qb[bucket].count));
1109 		mtx_destroy(&V_ip6qb[bucket].lock);
1110 	}
1111 }
1112 #endif
1113