xref: /freebsd/sys/netpfil/pf/pf_norm.c (revision 301e69218ccbc314830fb989c852ae74ef2073d9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  *	$OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_pf.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/socket.h>
45 
46 #include <net/if.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_fsm.h>
58 #include <netinet/tcp_seq.h>
59 #include <netinet/sctp_constants.h>
60 #include <netinet/sctp_header.h>
61 
62 #ifdef INET6
63 #include <netinet/ip6.h>
64 #endif /* INET6 */
65 
66 struct pf_frent {
67 	TAILQ_ENTRY(pf_frent)	fr_next;
68 	struct mbuf	*fe_m;
69 	uint16_t	fe_hdrlen;	/* ipv4 header length with ip options
70 					   ipv6, extension, fragment header */
71 	uint16_t	fe_extoff;	/* last extension header offset or 0 */
72 	uint16_t	fe_len;		/* fragment length */
73 	uint16_t	fe_off;		/* fragment offset */
74 	uint16_t	fe_mff;		/* more fragment flag */
75 };
76 
77 struct pf_fragment_cmp {
78 	struct pf_addr	frc_src;
79 	struct pf_addr	frc_dst;
80 	uint32_t	frc_id;
81 	sa_family_t	frc_af;
82 	uint8_t		frc_proto;
83 };
84 
85 struct pf_fragment {
86 	struct pf_fragment_cmp	fr_key;
87 #define fr_src	fr_key.frc_src
88 #define fr_dst	fr_key.frc_dst
89 #define fr_id	fr_key.frc_id
90 #define fr_af	fr_key.frc_af
91 #define fr_proto	fr_key.frc_proto
92 
93 	/* pointers to queue element */
94 	struct pf_frent	*fr_firstoff[PF_FRAG_ENTRY_POINTS];
95 	/* count entries between pointers */
96 	uint8_t	fr_entries[PF_FRAG_ENTRY_POINTS];
97 	RB_ENTRY(pf_fragment) fr_entry;
98 	TAILQ_ENTRY(pf_fragment) frag_next;
99 	uint32_t	fr_timeout;
100 	uint16_t	fr_maxlen;	/* maximum length of single fragment */
101 	u_int16_t	fr_holes;	/* number of holes in the queue */
102 	TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
103 };
104 
105 struct pf_fragment_tag {
106 	uint16_t	ft_hdrlen;	/* header length of reassembled pkt */
107 	uint16_t	ft_extoff;	/* last extension header offset or 0 */
108 	uint16_t	ft_maxlen;	/* maximum fragment payload length */
109 	uint32_t	ft_id;		/* fragment id */
110 };
111 
112 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
113 #define V_pf_frag_mtx		VNET(pf_frag_mtx)
114 #define PF_FRAG_LOCK()		mtx_lock(&V_pf_frag_mtx)
115 #define PF_FRAG_UNLOCK()	mtx_unlock(&V_pf_frag_mtx)
116 #define PF_FRAG_ASSERT()	mtx_assert(&V_pf_frag_mtx, MA_OWNED)
117 
118 VNET_DEFINE(uma_zone_t, pf_state_scrub_z);	/* XXX: shared with pfsync */
119 
120 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
121 #define	V_pf_frent_z	VNET(pf_frent_z)
122 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
123 #define	V_pf_frag_z	VNET(pf_frag_z)
124 
125 TAILQ_HEAD(pf_fragqueue, pf_fragment);
126 TAILQ_HEAD(pf_cachequeue, pf_fragment);
127 VNET_DEFINE_STATIC(struct pf_fragqueue,	pf_fragqueue);
128 #define	V_pf_fragqueue			VNET(pf_fragqueue)
129 RB_HEAD(pf_frag_tree, pf_fragment);
130 VNET_DEFINE_STATIC(struct pf_frag_tree,	pf_frag_tree);
131 #define	V_pf_frag_tree			VNET(pf_frag_tree)
132 static int		 pf_frag_compare(struct pf_fragment *,
133 			    struct pf_fragment *);
134 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
135 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
136 
137 static void	pf_flush_fragments(void);
138 static void	pf_free_fragment(struct pf_fragment *);
139 static void	pf_remove_fragment(struct pf_fragment *);
140 
141 static struct pf_frent *pf_create_fragment(u_short *);
142 static int	pf_frent_holes(struct pf_frent *frent);
143 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
144 		    struct pf_frag_tree *tree);
145 static inline int	pf_frent_index(struct pf_frent *);
146 static int	pf_frent_insert(struct pf_fragment *,
147 			    struct pf_frent *, struct pf_frent *);
148 void			pf_frent_remove(struct pf_fragment *,
149 			    struct pf_frent *);
150 struct pf_frent		*pf_frent_previous(struct pf_fragment *,
151 			    struct pf_frent *);
152 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
153 		    struct pf_frent *, u_short *);
154 static struct mbuf *pf_join_fragment(struct pf_fragment *);
155 #ifdef INET
156 static int	pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
157 #endif	/* INET */
158 #ifdef INET6
159 static int	pf_reassemble6(struct mbuf **, struct ip6_hdr *,
160 		    struct ip6_frag *, uint16_t, uint16_t, u_short *);
161 #endif	/* INET6 */
162 
163 #define	DPFPRINTF(x) do {				\
164 	if (V_pf_status.debug >= PF_DEBUG_MISC) {	\
165 		printf("%s: ", __func__);		\
166 		printf x ;				\
167 	}						\
168 } while(0)
169 
170 #ifdef INET
171 static void
172 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
173 {
174 
175 	key->frc_src.v4 = ip->ip_src;
176 	key->frc_dst.v4 = ip->ip_dst;
177 	key->frc_af = AF_INET;
178 	key->frc_proto = ip->ip_p;
179 	key->frc_id = ip->ip_id;
180 }
181 #endif	/* INET */
182 
183 void
184 pf_normalize_init(void)
185 {
186 
187 	V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
188 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
189 	V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
190 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
191 	V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
192 	    sizeof(struct pf_state_scrub),  NULL, NULL, NULL, NULL,
193 	    UMA_ALIGN_PTR, 0);
194 
195 	mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
196 
197 	V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
198 	V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
199 	uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
200 	uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
201 
202 	TAILQ_INIT(&V_pf_fragqueue);
203 }
204 
205 void
206 pf_normalize_cleanup(void)
207 {
208 
209 	uma_zdestroy(V_pf_state_scrub_z);
210 	uma_zdestroy(V_pf_frent_z);
211 	uma_zdestroy(V_pf_frag_z);
212 
213 	mtx_destroy(&V_pf_frag_mtx);
214 }
215 
216 static int
217 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
218 {
219 	int	diff;
220 
221 	if ((diff = a->fr_id - b->fr_id) != 0)
222 		return (diff);
223 	if ((diff = a->fr_proto - b->fr_proto) != 0)
224 		return (diff);
225 	if ((diff = a->fr_af - b->fr_af) != 0)
226 		return (diff);
227 	if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
228 		return (diff);
229 	if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
230 		return (diff);
231 	return (0);
232 }
233 
234 void
235 pf_purge_expired_fragments(void)
236 {
237 	u_int32_t	expire = time_uptime -
238 			    V_pf_default_rule.timeout[PFTM_FRAG];
239 
240 	pf_purge_fragments(expire);
241 }
242 
243 void
244 pf_purge_fragments(uint32_t expire)
245 {
246 	struct pf_fragment	*frag;
247 
248 	PF_FRAG_LOCK();
249 	while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
250 		if (frag->fr_timeout > expire)
251 			break;
252 
253 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
254 		pf_free_fragment(frag);
255 	}
256 
257 	PF_FRAG_UNLOCK();
258 }
259 
260 /*
261  * Try to flush old fragments to make space for new ones
262  */
263 static void
264 pf_flush_fragments(void)
265 {
266 	struct pf_fragment	*frag;
267 	int			 goal;
268 
269 	PF_FRAG_ASSERT();
270 
271 	goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
272 	DPFPRINTF(("trying to free %d frag entriess\n", goal));
273 	while (goal < uma_zone_get_cur(V_pf_frent_z)) {
274 		frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
275 		if (frag)
276 			pf_free_fragment(frag);
277 		else
278 			break;
279 	}
280 }
281 
282 /* Frees the fragments and all associated entries */
283 static void
284 pf_free_fragment(struct pf_fragment *frag)
285 {
286 	struct pf_frent		*frent;
287 
288 	PF_FRAG_ASSERT();
289 
290 	/* Free all fragments */
291 	for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
292 	    frent = TAILQ_FIRST(&frag->fr_queue)) {
293 		TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
294 
295 		m_freem(frent->fe_m);
296 		uma_zfree(V_pf_frent_z, frent);
297 	}
298 
299 	pf_remove_fragment(frag);
300 }
301 
302 static struct pf_fragment *
303 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
304 {
305 	struct pf_fragment	*frag;
306 
307 	PF_FRAG_ASSERT();
308 
309 	frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
310 	if (frag != NULL) {
311 		/* XXX Are we sure we want to update the timeout? */
312 		frag->fr_timeout = time_uptime;
313 		TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
314 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
315 	}
316 
317 	return (frag);
318 }
319 
320 /* Removes a fragment from the fragment queue and frees the fragment */
321 static void
322 pf_remove_fragment(struct pf_fragment *frag)
323 {
324 
325 	PF_FRAG_ASSERT();
326 	KASSERT(frag, ("frag != NULL"));
327 
328 	RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
329 	TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
330 	uma_zfree(V_pf_frag_z, frag);
331 }
332 
333 static struct pf_frent *
334 pf_create_fragment(u_short *reason)
335 {
336 	struct pf_frent *frent;
337 
338 	PF_FRAG_ASSERT();
339 
340 	frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
341 	if (frent == NULL) {
342 		pf_flush_fragments();
343 		frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
344 		if (frent == NULL) {
345 			REASON_SET(reason, PFRES_MEMORY);
346 			return (NULL);
347 		}
348 	}
349 
350 	return (frent);
351 }
352 
353 /*
354  * Calculate the additional holes that were created in the fragment
355  * queue by inserting this fragment.  A fragment in the middle
356  * creates one more hole by splitting.  For each connected side,
357  * it loses one hole.
358  * Fragment entry must be in the queue when calling this function.
359  */
360 static int
361 pf_frent_holes(struct pf_frent *frent)
362 {
363 	struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
364 	struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
365 	int holes = 1;
366 
367 	if (prev == NULL) {
368 		if (frent->fe_off == 0)
369 			holes--;
370 	} else {
371 		KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
372 		if (frent->fe_off == prev->fe_off + prev->fe_len)
373 			holes--;
374 	}
375 	if (next == NULL) {
376 		if (!frent->fe_mff)
377 			holes--;
378 	} else {
379 		KASSERT(frent->fe_mff, ("frent->fe_mff"));
380 		if (next->fe_off == frent->fe_off + frent->fe_len)
381 			holes--;
382 	}
383 	return holes;
384 }
385 
386 static inline int
387 pf_frent_index(struct pf_frent *frent)
388 {
389 	/*
390 	 * We have an array of 16 entry points to the queue.  A full size
391 	 * 65535 octet IP packet can have 8192 fragments.  So the queue
392 	 * traversal length is at most 512 and at most 16 entry points are
393 	 * checked.  We need 128 additional bytes on a 64 bit architecture.
394 	 */
395 	CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
396 	    16 - 1);
397 	CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
398 
399 	return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
400 }
401 
402 static int
403 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
404     struct pf_frent *prev)
405 {
406 	int index;
407 
408 	CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
409 
410 	/*
411 	 * A packet has at most 65536 octets.  With 16 entry points, each one
412 	 * spawns 4096 octets.  We limit these to 64 fragments each, which
413 	 * means on average every fragment must have at least 64 octets.
414 	 */
415 	index = pf_frent_index(frent);
416 	if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
417 		return ENOBUFS;
418 	frag->fr_entries[index]++;
419 
420 	if (prev == NULL) {
421 		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
422 	} else {
423 		KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
424 		    ("overlapping fragment"));
425 		TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
426 	}
427 
428 	if (frag->fr_firstoff[index] == NULL) {
429 		KASSERT(prev == NULL || pf_frent_index(prev) < index,
430 		    ("prev == NULL || pf_frent_index(pref) < index"));
431 		frag->fr_firstoff[index] = frent;
432 	} else {
433 		if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
434 			KASSERT(prev == NULL || pf_frent_index(prev) < index,
435 			    ("prev == NULL || pf_frent_index(pref) < index"));
436 			frag->fr_firstoff[index] = frent;
437 		} else {
438 			KASSERT(prev != NULL, ("prev != NULL"));
439 			KASSERT(pf_frent_index(prev) == index,
440 			    ("pf_frent_index(prev) == index"));
441 		}
442 	}
443 
444 	frag->fr_holes += pf_frent_holes(frent);
445 
446 	return 0;
447 }
448 
449 void
450 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
451 {
452 #ifdef INVARIANTS
453 	struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
454 #endif
455 	struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
456 	int index;
457 
458 	frag->fr_holes -= pf_frent_holes(frent);
459 
460 	index = pf_frent_index(frent);
461 	KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
462 	if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
463 		if (next == NULL) {
464 			frag->fr_firstoff[index] = NULL;
465 		} else {
466 			KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
467 			    ("overlapping fragment"));
468 			if (pf_frent_index(next) == index) {
469 				frag->fr_firstoff[index] = next;
470 			} else {
471 				frag->fr_firstoff[index] = NULL;
472 			}
473 		}
474 	} else {
475 		KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
476 		    ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
477 		KASSERT(prev != NULL, ("prev != NULL"));
478 		KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
479 		    ("overlapping fragment"));
480 		KASSERT(pf_frent_index(prev) == index,
481 		    ("pf_frent_index(prev) == index"));
482 	}
483 
484 	TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
485 
486 	KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
487 	frag->fr_entries[index]--;
488 }
489 
490 struct pf_frent *
491 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
492 {
493 	struct pf_frent *prev, *next;
494 	int index;
495 
496 	/*
497 	 * If there are no fragments after frag, take the final one.  Assume
498 	 * that the global queue is not empty.
499 	 */
500 	prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
501 	KASSERT(prev != NULL, ("prev != NULL"));
502 	if (prev->fe_off <= frent->fe_off)
503 		return prev;
504 	/*
505 	 * We want to find a fragment entry that is before frag, but still
506 	 * close to it.  Find the first fragment entry that is in the same
507 	 * entry point or in the first entry point after that.  As we have
508 	 * already checked that there are entries behind frag, this will
509 	 * succeed.
510 	 */
511 	for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
512 	    index++) {
513 		prev = frag->fr_firstoff[index];
514 		if (prev != NULL)
515 			break;
516 	}
517 	KASSERT(prev != NULL, ("prev != NULL"));
518 	/*
519 	 * In prev we may have a fragment from the same entry point that is
520 	 * before frent, or one that is just one position behind frent.
521 	 * In the latter case, we go back one step and have the predecessor.
522 	 * There may be none if the new fragment will be the first one.
523 	 */
524 	if (prev->fe_off > frent->fe_off) {
525 		prev = TAILQ_PREV(prev, pf_fragq, fr_next);
526 		if (prev == NULL)
527 			return NULL;
528 		KASSERT(prev->fe_off <= frent->fe_off,
529 		    ("prev->fe_off <= frent->fe_off"));
530 		return prev;
531 	}
532 	/*
533 	 * In prev is the first fragment of the entry point.  The offset
534 	 * of frag is behind it.  Find the closest previous fragment.
535 	 */
536 	for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
537 	    next = TAILQ_NEXT(next, fr_next)) {
538 		if (next->fe_off > frent->fe_off)
539 			break;
540 		prev = next;
541 	}
542 	return prev;
543 }
544 
545 static struct pf_fragment *
546 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
547     u_short *reason)
548 {
549 	struct pf_frent		*after, *next, *prev;
550 	struct pf_fragment	*frag;
551 	uint16_t		total;
552 	int			old_index, new_index;
553 
554 	PF_FRAG_ASSERT();
555 
556 	/* No empty fragments. */
557 	if (frent->fe_len == 0) {
558 		DPFPRINTF(("bad fragment: len 0\n"));
559 		goto bad_fragment;
560 	}
561 
562 	/* All fragments are 8 byte aligned. */
563 	if (frent->fe_mff && (frent->fe_len & 0x7)) {
564 		DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
565 		goto bad_fragment;
566 	}
567 
568 	/* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
569 	if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
570 		DPFPRINTF(("bad fragment: max packet %d\n",
571 		    frent->fe_off + frent->fe_len));
572 		goto bad_fragment;
573 	}
574 
575 	DPFPRINTF((key->frc_af == AF_INET ?
576 	    "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
577 	    key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
578 
579 	/* Fully buffer all of the fragments in this fragment queue. */
580 	frag = pf_find_fragment(key, &V_pf_frag_tree);
581 
582 	/* Create a new reassembly queue for this packet. */
583 	if (frag == NULL) {
584 		frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
585 		if (frag == NULL) {
586 			pf_flush_fragments();
587 			frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
588 			if (frag == NULL) {
589 				REASON_SET(reason, PFRES_MEMORY);
590 				goto drop_fragment;
591 			}
592 		}
593 
594 		*(struct pf_fragment_cmp *)frag = *key;
595 		memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
596 		memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
597 		frag->fr_timeout = time_uptime;
598 		frag->fr_maxlen = frent->fe_len;
599 		frag->fr_holes = 1;
600 		TAILQ_INIT(&frag->fr_queue);
601 
602 		RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
603 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
604 
605 		/* We do not have a previous fragment, cannot fail. */
606 		pf_frent_insert(frag, frent, NULL);
607 
608 		return (frag);
609 	}
610 
611 	KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
612 
613 	/* Remember maximum fragment len for refragmentation. */
614 	if (frent->fe_len > frag->fr_maxlen)
615 		frag->fr_maxlen = frent->fe_len;
616 
617 	/* Maximum data we have seen already. */
618 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
619 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
620 
621 	/* Non terminal fragments must have more fragments flag. */
622 	if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
623 		goto bad_fragment;
624 
625 	/* Check if we saw the last fragment already. */
626 	if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
627 		if (frent->fe_off + frent->fe_len > total ||
628 		    (frent->fe_off + frent->fe_len == total && frent->fe_mff))
629 			goto bad_fragment;
630 	} else {
631 		if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
632 			goto bad_fragment;
633 	}
634 
635 	/* Find neighbors for newly inserted fragment */
636 	prev = pf_frent_previous(frag, frent);
637 	if (prev == NULL) {
638 		after = TAILQ_FIRST(&frag->fr_queue);
639 		KASSERT(after != NULL, ("after != NULL"));
640 	} else {
641 		after = TAILQ_NEXT(prev, fr_next);
642 	}
643 
644 	if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
645 		uint16_t precut;
646 
647 		precut = prev->fe_off + prev->fe_len - frent->fe_off;
648 		if (precut >= frent->fe_len)
649 			goto bad_fragment;
650 		DPFPRINTF(("overlap -%d\n", precut));
651 		m_adj(frent->fe_m, precut);
652 		frent->fe_off += precut;
653 		frent->fe_len -= precut;
654 	}
655 
656 	for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
657 	    after = next) {
658 		uint16_t aftercut;
659 
660 		aftercut = frent->fe_off + frent->fe_len - after->fe_off;
661 		DPFPRINTF(("adjust overlap %d\n", aftercut));
662 		if (aftercut < after->fe_len) {
663 			m_adj(after->fe_m, aftercut);
664 			old_index = pf_frent_index(after);
665 			after->fe_off += aftercut;
666 			after->fe_len -= aftercut;
667 			new_index = pf_frent_index(after);
668 			if (old_index != new_index) {
669 				DPFPRINTF(("frag index %d, new %d",
670 				    old_index, new_index));
671 				/* Fragment switched queue as fe_off changed */
672 				after->fe_off -= aftercut;
673 				after->fe_len += aftercut;
674 				/* Remove restored fragment from old queue */
675 				pf_frent_remove(frag, after);
676 				after->fe_off += aftercut;
677 				after->fe_len -= aftercut;
678 				/* Insert into correct queue */
679 				if (pf_frent_insert(frag, after, prev)) {
680 					DPFPRINTF(
681 					    ("fragment requeue limit exceeded"));
682 					m_freem(after->fe_m);
683 					uma_zfree(V_pf_frent_z, after);
684 					/* There is not way to recover */
685 					goto bad_fragment;
686 				}
687 			}
688 			break;
689 		}
690 
691 		/* This fragment is completely overlapped, lose it. */
692 		next = TAILQ_NEXT(after, fr_next);
693 		pf_frent_remove(frag, after);
694 		m_freem(after->fe_m);
695 		uma_zfree(V_pf_frent_z, after);
696 	}
697 
698 	/* If part of the queue gets too long, there is not way to recover. */
699 	if (pf_frent_insert(frag, frent, prev)) {
700 		DPFPRINTF(("fragment queue limit exceeded\n"));
701 		goto bad_fragment;
702 	}
703 
704 	return (frag);
705 
706 bad_fragment:
707 	REASON_SET(reason, PFRES_FRAG);
708 drop_fragment:
709 	uma_zfree(V_pf_frent_z, frent);
710 	return (NULL);
711 }
712 
713 static struct mbuf *
714 pf_join_fragment(struct pf_fragment *frag)
715 {
716 	struct mbuf *m, *m2;
717 	struct pf_frent	*frent, *next;
718 
719 	frent = TAILQ_FIRST(&frag->fr_queue);
720 	next = TAILQ_NEXT(frent, fr_next);
721 
722 	m = frent->fe_m;
723 	m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
724 	uma_zfree(V_pf_frent_z, frent);
725 	for (frent = next; frent != NULL; frent = next) {
726 		next = TAILQ_NEXT(frent, fr_next);
727 
728 		m2 = frent->fe_m;
729 		/* Strip off ip header. */
730 		m_adj(m2, frent->fe_hdrlen);
731 		/* Strip off any trailing bytes. */
732 		m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
733 
734 		uma_zfree(V_pf_frent_z, frent);
735 		m_cat(m, m2);
736 	}
737 
738 	/* Remove from fragment queue. */
739 	pf_remove_fragment(frag);
740 
741 	return (m);
742 }
743 
744 #ifdef INET
745 static int
746 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
747 {
748 	struct mbuf		*m = *m0;
749 	struct pf_frent		*frent;
750 	struct pf_fragment	*frag;
751 	struct pf_fragment_cmp	key;
752 	uint16_t		total, hdrlen;
753 
754 	/* Get an entry for the fragment queue */
755 	if ((frent = pf_create_fragment(reason)) == NULL)
756 		return (PF_DROP);
757 
758 	frent->fe_m = m;
759 	frent->fe_hdrlen = ip->ip_hl << 2;
760 	frent->fe_extoff = 0;
761 	frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
762 	frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
763 	frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
764 
765 	pf_ip2key(ip, dir, &key);
766 
767 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
768 		return (PF_DROP);
769 
770 	/* The mbuf is part of the fragment entry, no direct free or access */
771 	m = *m0 = NULL;
772 
773 	if (frag->fr_holes) {
774 		DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
775 		return (PF_PASS);  /* drop because *m0 is NULL, no error */
776 	}
777 
778 	/* We have all the data */
779 	frent = TAILQ_FIRST(&frag->fr_queue);
780 	KASSERT(frent != NULL, ("frent != NULL"));
781 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
782 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
783 	hdrlen = frent->fe_hdrlen;
784 
785 	m = *m0 = pf_join_fragment(frag);
786 	frag = NULL;
787 
788 	if (m->m_flags & M_PKTHDR) {
789 		int plen = 0;
790 		for (m = *m0; m; m = m->m_next)
791 			plen += m->m_len;
792 		m = *m0;
793 		m->m_pkthdr.len = plen;
794 	}
795 
796 	ip = mtod(m, struct ip *);
797 	ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
798 	    htons(hdrlen + total), 0);
799 	ip->ip_len = htons(hdrlen + total);
800 	ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
801 	    ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
802 	ip->ip_off &= ~(IP_MF|IP_OFFMASK);
803 
804 	if (hdrlen + total > IP_MAXPACKET) {
805 		DPFPRINTF(("drop: too big: %d\n", total));
806 		ip->ip_len = 0;
807 		REASON_SET(reason, PFRES_SHORT);
808 		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
809 		return (PF_DROP);
810 	}
811 
812 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
813 	return (PF_PASS);
814 }
815 #endif	/* INET */
816 
817 #ifdef INET6
818 static int
819 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
820     uint16_t hdrlen, uint16_t extoff, u_short *reason)
821 {
822 	struct mbuf		*m = *m0;
823 	struct pf_frent		*frent;
824 	struct pf_fragment	*frag;
825 	struct pf_fragment_cmp	 key;
826 	struct m_tag		*mtag;
827 	struct pf_fragment_tag	*ftag;
828 	int			 off;
829 	uint32_t		 frag_id;
830 	uint16_t		 total, maxlen;
831 	uint8_t			 proto;
832 
833 	PF_FRAG_LOCK();
834 
835 	/* Get an entry for the fragment queue. */
836 	if ((frent = pf_create_fragment(reason)) == NULL) {
837 		PF_FRAG_UNLOCK();
838 		return (PF_DROP);
839 	}
840 
841 	frent->fe_m = m;
842 	frent->fe_hdrlen = hdrlen;
843 	frent->fe_extoff = extoff;
844 	frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
845 	frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
846 	frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
847 
848 	key.frc_src.v6 = ip6->ip6_src;
849 	key.frc_dst.v6 = ip6->ip6_dst;
850 	key.frc_af = AF_INET6;
851 	/* Only the first fragment's protocol is relevant. */
852 	key.frc_proto = 0;
853 	key.frc_id = fraghdr->ip6f_ident;
854 
855 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
856 		PF_FRAG_UNLOCK();
857 		return (PF_DROP);
858 	}
859 
860 	/* The mbuf is part of the fragment entry, no direct free or access. */
861 	m = *m0 = NULL;
862 
863 	if (frag->fr_holes) {
864 		DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
865 		    frag->fr_holes));
866 		PF_FRAG_UNLOCK();
867 		return (PF_PASS);  /* Drop because *m0 is NULL, no error. */
868 	}
869 
870 	/* We have all the data. */
871 	frent = TAILQ_FIRST(&frag->fr_queue);
872 	KASSERT(frent != NULL, ("frent != NULL"));
873 	extoff = frent->fe_extoff;
874 	maxlen = frag->fr_maxlen;
875 	frag_id = frag->fr_id;
876 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
877 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
878 	hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
879 
880 	m = *m0 = pf_join_fragment(frag);
881 	frag = NULL;
882 
883 	PF_FRAG_UNLOCK();
884 
885 	/* Take protocol from first fragment header. */
886 	m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
887 	KASSERT(m, ("%s: short mbuf chain", __func__));
888 	proto = *(mtod(m, uint8_t *) + off);
889 	m = *m0;
890 
891 	/* Delete frag6 header */
892 	if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
893 		goto fail;
894 
895 	if (m->m_flags & M_PKTHDR) {
896 		int plen = 0;
897 		for (m = *m0; m; m = m->m_next)
898 			plen += m->m_len;
899 		m = *m0;
900 		m->m_pkthdr.len = plen;
901 	}
902 
903 	if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
904 	    sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
905 		goto fail;
906 	ftag = (struct pf_fragment_tag *)(mtag + 1);
907 	ftag->ft_hdrlen = hdrlen;
908 	ftag->ft_extoff = extoff;
909 	ftag->ft_maxlen = maxlen;
910 	ftag->ft_id = frag_id;
911 	m_tag_prepend(m, mtag);
912 
913 	ip6 = mtod(m, struct ip6_hdr *);
914 	ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
915 	if (extoff) {
916 		/* Write protocol into next field of last extension header. */
917 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
918 		    &off);
919 		KASSERT(m, ("%s: short mbuf chain", __func__));
920 		*(mtod(m, char *) + off) = proto;
921 		m = *m0;
922 	} else
923 		ip6->ip6_nxt = proto;
924 
925 	if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
926 		DPFPRINTF(("drop: too big: %d\n", total));
927 		ip6->ip6_plen = 0;
928 		REASON_SET(reason, PFRES_SHORT);
929 		/* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
930 		return (PF_DROP);
931 	}
932 
933 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
934 	return (PF_PASS);
935 
936 fail:
937 	REASON_SET(reason, PFRES_MEMORY);
938 	/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
939 	return (PF_DROP);
940 }
941 #endif	/* INET6 */
942 
943 #ifdef INET6
944 int
945 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
946     bool forward)
947 {
948 	struct mbuf		*m = *m0, *t;
949 	struct ip6_hdr		*hdr;
950 	struct pf_fragment_tag	*ftag = (struct pf_fragment_tag *)(mtag + 1);
951 	struct pf_pdesc		 pd;
952 	uint32_t		 frag_id;
953 	uint16_t		 hdrlen, extoff, maxlen;
954 	uint8_t			 proto;
955 	int			 error, action;
956 
957 	hdrlen = ftag->ft_hdrlen;
958 	extoff = ftag->ft_extoff;
959 	maxlen = ftag->ft_maxlen;
960 	frag_id = ftag->ft_id;
961 	m_tag_delete(m, mtag);
962 	mtag = NULL;
963 	ftag = NULL;
964 
965 	if (extoff) {
966 		int off;
967 
968 		/* Use protocol from next field of last extension header */
969 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
970 		    &off);
971 		KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
972 		proto = *(mtod(m, uint8_t *) + off);
973 		*(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
974 		m = *m0;
975 	} else {
976 		hdr = mtod(m, struct ip6_hdr *);
977 		proto = hdr->ip6_nxt;
978 		hdr->ip6_nxt = IPPROTO_FRAGMENT;
979 	}
980 
981 	/* In case of link-local traffic we'll need a scope set. */
982 	hdr = mtod(m, struct ip6_hdr *);
983 
984 	in6_setscope(&hdr->ip6_src, ifp, NULL);
985 	in6_setscope(&hdr->ip6_dst, ifp, NULL);
986 
987 	/* The MTU must be a multiple of 8 bytes, or we risk doing the
988 	 * fragmentation wrong. */
989 	maxlen = maxlen & ~7;
990 
991 	/*
992 	 * Maxlen may be less than 8 if there was only a single
993 	 * fragment.  As it was fragmented before, add a fragment
994 	 * header also for a single fragment.  If total or maxlen
995 	 * is less than 8, ip6_fragment() will return EMSGSIZE and
996 	 * we drop the packet.
997 	 */
998 	error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
999 	m = (*m0)->m_nextpkt;
1000 	(*m0)->m_nextpkt = NULL;
1001 	if (error == 0) {
1002 		/* The first mbuf contains the unfragmented packet. */
1003 		m_freem(*m0);
1004 		*m0 = NULL;
1005 		action = PF_PASS;
1006 	} else {
1007 		/* Drop expects an mbuf to free. */
1008 		DPFPRINTF(("refragment error %d\n", error));
1009 		action = PF_DROP;
1010 	}
1011 	for (; m; m = t) {
1012 		t = m->m_nextpkt;
1013 		m->m_nextpkt = NULL;
1014 		m->m_flags |= M_SKIP_FIREWALL;
1015 		memset(&pd, 0, sizeof(pd));
1016 		pd.pf_mtag = pf_find_mtag(m);
1017 		if (error == 0)
1018 			if (forward) {
1019 				MPASS(m->m_pkthdr.rcvif != NULL);
1020 				ip6_forward(m, 0);
1021 			} else {
1022 				(void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1023 				    NULL);
1024 			}
1025 		else
1026 			m_freem(m);
1027 	}
1028 
1029 	return (action);
1030 }
1031 #endif /* INET6 */
1032 
1033 #ifdef INET
1034 int
1035 pf_normalize_ip(struct mbuf **m0, struct pfi_kkif *kif, u_short *reason,
1036     struct pf_pdesc *pd)
1037 {
1038 	struct mbuf		*m = *m0;
1039 	struct pf_krule		*r;
1040 	struct ip		*h = mtod(m, struct ip *);
1041 	int			 mff = (ntohs(h->ip_off) & IP_MF);
1042 	int			 hlen = h->ip_hl << 2;
1043 	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1044 	u_int16_t		 max;
1045 	int			 ip_len;
1046 	int			 tag = -1;
1047 	int			 verdict;
1048 	int			 srs;
1049 
1050 	PF_RULES_RASSERT();
1051 
1052 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1053 	/* Check if there any scrub rules. Lack of scrub rules means enforced
1054 	 * packet normalization operation just like in OpenBSD. */
1055 	srs = (r != NULL);
1056 	while (r != NULL) {
1057 		pf_counter_u64_add(&r->evaluations, 1);
1058 		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1059 			r = r->skip[PF_SKIP_IFP].ptr;
1060 		else if (r->direction && r->direction != pd->dir)
1061 			r = r->skip[PF_SKIP_DIR].ptr;
1062 		else if (r->af && r->af != AF_INET)
1063 			r = r->skip[PF_SKIP_AF].ptr;
1064 		else if (r->proto && r->proto != h->ip_p)
1065 			r = r->skip[PF_SKIP_PROTO].ptr;
1066 		else if (PF_MISMATCHAW(&r->src.addr,
1067 		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1068 		    r->src.neg, kif, M_GETFIB(m)))
1069 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1070 		else if (PF_MISMATCHAW(&r->dst.addr,
1071 		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1072 		    r->dst.neg, NULL, M_GETFIB(m)))
1073 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1074 		else if (r->match_tag && !pf_match_tag(m, r, &tag,
1075 		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
1076 			r = TAILQ_NEXT(r, entries);
1077 		else
1078 			break;
1079 	}
1080 
1081 	if (srs) {
1082 		/* With scrub rules present IPv4 normalization happens only
1083 		 * if one of rules has matched and it's not a "no scrub" rule */
1084 		if (r == NULL || r->action == PF_NOSCRUB)
1085 			return (PF_PASS);
1086 
1087 		pf_counter_u64_critical_enter();
1088 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1089 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1090 		pf_counter_u64_critical_exit();
1091 		pf_rule_to_actions(r, &pd->act);
1092 	} else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) {
1093 		/* With no scrub rules IPv4 fragment reassembly depends on the
1094 		 * global switch. Fragments can be dropped early if reassembly
1095 		 * is disabled. */
1096 		REASON_SET(reason, PFRES_NORM);
1097 		goto drop;
1098 	}
1099 
1100 	/* Check for illegal packets */
1101 	if (hlen < (int)sizeof(struct ip)) {
1102 		REASON_SET(reason, PFRES_NORM);
1103 		goto drop;
1104 	}
1105 
1106 	if (hlen > ntohs(h->ip_len)) {
1107 		REASON_SET(reason, PFRES_NORM);
1108 		goto drop;
1109 	}
1110 
1111 	/* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1112 	if ((((r && r->rule_flag & PFRULE_NODF) ||
1113 	    (V_pf_status.reass & PF_REASS_NODF)) && h->ip_off & htons(IP_DF)
1114 	)) {
1115 		u_int16_t ip_off = h->ip_off;
1116 
1117 		h->ip_off &= htons(~IP_DF);
1118 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1119 	}
1120 
1121 	/* We will need other tests here */
1122 	if (!fragoff && !mff)
1123 		goto no_fragment;
1124 
1125 	/* We're dealing with a fragment now. Don't allow fragments
1126 	 * with IP_DF to enter the cache. If the flag was cleared by
1127 	 * no-df above, fine. Otherwise drop it.
1128 	 */
1129 	if (h->ip_off & htons(IP_DF)) {
1130 		DPFPRINTF(("IP_DF\n"));
1131 		goto bad;
1132 	}
1133 
1134 	ip_len = ntohs(h->ip_len) - hlen;
1135 
1136 	/* All fragments are 8 byte aligned */
1137 	if (mff && (ip_len & 0x7)) {
1138 		DPFPRINTF(("mff and %d\n", ip_len));
1139 		goto bad;
1140 	}
1141 
1142 	/* Respect maximum length */
1143 	if (fragoff + ip_len > IP_MAXPACKET) {
1144 		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1145 		goto bad;
1146 	}
1147 
1148 	if (r==NULL || !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) {
1149 		max = fragoff + ip_len;
1150 
1151 		/* Fully buffer all of the fragments
1152 		 * Might return a completely reassembled mbuf, or NULL */
1153 		PF_FRAG_LOCK();
1154 		DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1155 		verdict = pf_reassemble(m0, h, pd->dir, reason);
1156 		PF_FRAG_UNLOCK();
1157 
1158 		if (verdict != PF_PASS)
1159 			return (PF_DROP);
1160 
1161 		m = *m0;
1162 		if (m == NULL)
1163 			return (PF_DROP);
1164 
1165 		h = mtod(m, struct ip *);
1166 
1167  no_fragment:
1168 		/* At this point, only IP_DF is allowed in ip_off */
1169 		if (h->ip_off & ~htons(IP_DF)) {
1170 			u_int16_t ip_off = h->ip_off;
1171 
1172 			h->ip_off &= htons(IP_DF);
1173 			h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1174 		}
1175 	}
1176 
1177 	return (PF_PASS);
1178 
1179  bad:
1180 	DPFPRINTF(("dropping bad fragment\n"));
1181 	REASON_SET(reason, PFRES_FRAG);
1182  drop:
1183 	if (r != NULL && r->log)
1184 		PFLOG_PACKET(kif, m, AF_INET, *reason, r, NULL, NULL, pd, 1);
1185 
1186 	return (PF_DROP);
1187 }
1188 #endif
1189 
1190 #ifdef INET6
1191 int
1192 pf_normalize_ip6(struct mbuf **m0, struct pfi_kkif *kif,
1193     u_short *reason, struct pf_pdesc *pd)
1194 {
1195 	struct mbuf		*m = *m0;
1196 	struct pf_krule		*r;
1197 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
1198 	int			 extoff;
1199 	int			 off;
1200 	struct ip6_ext		 ext;
1201 	struct ip6_opt		 opt;
1202 	struct ip6_frag		 frag;
1203 	u_int32_t		 plen;
1204 	int			 optend;
1205 	int			 ooff;
1206 	u_int8_t		 proto;
1207 	int			 terminal;
1208 	int			 srs;
1209 
1210 	PF_RULES_RASSERT();
1211 
1212 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1213 	/* Check if there any scrub rules. Lack of scrub rules means enforced
1214 	 * packet normalization operation just like in OpenBSD. */
1215 	srs = (r != NULL);
1216 	while (r != NULL) {
1217 		pf_counter_u64_add(&r->evaluations, 1);
1218 		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1219 			r = r->skip[PF_SKIP_IFP].ptr;
1220 		else if (r->direction && r->direction != pd->dir)
1221 			r = r->skip[PF_SKIP_DIR].ptr;
1222 		else if (r->af && r->af != AF_INET6)
1223 			r = r->skip[PF_SKIP_AF].ptr;
1224 #if 0 /* header chain! */
1225 		else if (r->proto && r->proto != h->ip6_nxt)
1226 			r = r->skip[PF_SKIP_PROTO].ptr;
1227 #endif
1228 		else if (PF_MISMATCHAW(&r->src.addr,
1229 		    (struct pf_addr *)&h->ip6_src, AF_INET6,
1230 		    r->src.neg, kif, M_GETFIB(m)))
1231 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1232 		else if (PF_MISMATCHAW(&r->dst.addr,
1233 		    (struct pf_addr *)&h->ip6_dst, AF_INET6,
1234 		    r->dst.neg, NULL, M_GETFIB(m)))
1235 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1236 		else
1237 			break;
1238 	}
1239 
1240 	if (srs) {
1241 		/* With scrub rules present IPv6 normalization happens only
1242 		 * if one of rules has matched and it's not a "no scrub" rule */
1243 		if (r == NULL || r->action == PF_NOSCRUB)
1244 			return (PF_PASS);
1245 
1246 		pf_counter_u64_critical_enter();
1247 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1248 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1249 		pf_counter_u64_critical_exit();
1250 		pf_rule_to_actions(r, &pd->act);
1251 	}
1252 
1253 	/* Check for illegal packets */
1254 	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1255 		goto drop;
1256 
1257 again:
1258 	h = mtod(m, struct ip6_hdr *);
1259 	plen = ntohs(h->ip6_plen);
1260 	/* jumbo payload option not supported */
1261 	if (plen == 0)
1262 		goto drop;
1263 
1264 	extoff = 0;
1265 	off = sizeof(struct ip6_hdr);
1266 	proto = h->ip6_nxt;
1267 	terminal = 0;
1268 	do {
1269 		switch (proto) {
1270 		case IPPROTO_FRAGMENT:
1271 			goto fragment;
1272 			break;
1273 		case IPPROTO_AH:
1274 		case IPPROTO_ROUTING:
1275 		case IPPROTO_DSTOPTS:
1276 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1277 			    NULL, AF_INET6))
1278 				goto shortpkt;
1279 			extoff = off;
1280 			if (proto == IPPROTO_AH)
1281 				off += (ext.ip6e_len + 2) * 4;
1282 			else
1283 				off += (ext.ip6e_len + 1) * 8;
1284 			proto = ext.ip6e_nxt;
1285 			break;
1286 		case IPPROTO_HOPOPTS:
1287 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1288 			    NULL, AF_INET6))
1289 				goto shortpkt;
1290 			extoff = off;
1291 			optend = off + (ext.ip6e_len + 1) * 8;
1292 			ooff = off + sizeof(ext);
1293 			do {
1294 				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1295 				    sizeof(opt.ip6o_type), NULL, NULL,
1296 				    AF_INET6))
1297 					goto shortpkt;
1298 				if (opt.ip6o_type == IP6OPT_PAD1) {
1299 					ooff++;
1300 					continue;
1301 				}
1302 				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1303 				    NULL, NULL, AF_INET6))
1304 					goto shortpkt;
1305 				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1306 					goto drop;
1307 				if (opt.ip6o_type == IP6OPT_JUMBO)
1308 					goto drop;
1309 				ooff += sizeof(opt) + opt.ip6o_len;
1310 			} while (ooff < optend);
1311 
1312 			off = optend;
1313 			proto = ext.ip6e_nxt;
1314 			break;
1315 		default:
1316 			terminal = 1;
1317 			break;
1318 		}
1319 	} while (!terminal);
1320 
1321 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1322 		goto shortpkt;
1323 
1324 	return (PF_PASS);
1325 
1326  fragment:
1327 	if (pd->flags & PFDESC_IP_REAS)
1328 		return (PF_DROP);
1329 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1330 		goto shortpkt;
1331 
1332 	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1333 		goto shortpkt;
1334 
1335 	/* Offset now points to data portion. */
1336 	off += sizeof(frag);
1337 
1338 	/* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1339 	if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1340 		return (PF_DROP);
1341 	m = *m0;
1342 	if (m == NULL)
1343 		return (PF_DROP);
1344 
1345 	pd->flags |= PFDESC_IP_REAS;
1346 	goto again;
1347 
1348  shortpkt:
1349 	REASON_SET(reason, PFRES_SHORT);
1350 	if (r != NULL && r->log)
1351 		PFLOG_PACKET(kif, m, AF_INET6, *reason, r, NULL, NULL, pd, 1);
1352 	return (PF_DROP);
1353 
1354  drop:
1355 	REASON_SET(reason, PFRES_NORM);
1356 	if (r != NULL && r->log)
1357 		PFLOG_PACKET(kif, m, AF_INET6, *reason, r, NULL, NULL, pd, 1);
1358 	return (PF_DROP);
1359 }
1360 #endif /* INET6 */
1361 
1362 int
1363 pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff,
1364     int off, void *h, struct pf_pdesc *pd)
1365 {
1366 	struct pf_krule	*r, *rm = NULL;
1367 	struct tcphdr	*th = &pd->hdr.tcp;
1368 	int		 rewrite = 0;
1369 	u_short		 reason;
1370 	u_int8_t	 flags;
1371 	sa_family_t	 af = pd->af;
1372 	int		 srs;
1373 
1374 	PF_RULES_RASSERT();
1375 
1376 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1377 	/* Check if there any scrub rules. Lack of scrub rules means enforced
1378 	 * packet normalization operation just like in OpenBSD. */
1379 	srs = (r != NULL);
1380 	while (r != NULL) {
1381 		pf_counter_u64_add(&r->evaluations, 1);
1382 		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1383 			r = r->skip[PF_SKIP_IFP].ptr;
1384 		else if (r->direction && r->direction != pd->dir)
1385 			r = r->skip[PF_SKIP_DIR].ptr;
1386 		else if (r->af && r->af != af)
1387 			r = r->skip[PF_SKIP_AF].ptr;
1388 		else if (r->proto && r->proto != pd->proto)
1389 			r = r->skip[PF_SKIP_PROTO].ptr;
1390 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1391 		    r->src.neg, kif, M_GETFIB(m)))
1392 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1393 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1394 			    r->src.port[0], r->src.port[1], th->th_sport))
1395 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
1396 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1397 		    r->dst.neg, NULL, M_GETFIB(m)))
1398 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1399 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1400 			    r->dst.port[0], r->dst.port[1], th->th_dport))
1401 			r = r->skip[PF_SKIP_DST_PORT].ptr;
1402 		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1403 			    pf_osfp_fingerprint(pd, m, off, th),
1404 			    r->os_fingerprint))
1405 			r = TAILQ_NEXT(r, entries);
1406 		else {
1407 			rm = r;
1408 			break;
1409 		}
1410 	}
1411 
1412 	if (srs) {
1413 		/* With scrub rules present TCP normalization happens only
1414 		 * if one of rules has matched and it's not a "no scrub" rule */
1415 		if (rm == NULL || rm->action == PF_NOSCRUB)
1416 			return (PF_PASS);
1417 
1418 		pf_counter_u64_critical_enter();
1419 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1420 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1421 		pf_counter_u64_critical_exit();
1422 		pf_rule_to_actions(rm, &pd->act);
1423 	}
1424 
1425 	if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1426 		pd->flags |= PFDESC_TCP_NORM;
1427 
1428 	flags = th->th_flags;
1429 	if (flags & TH_SYN) {
1430 		/* Illegal packet */
1431 		if (flags & TH_RST)
1432 			goto tcp_drop;
1433 
1434 		if (flags & TH_FIN)
1435 			goto tcp_drop;
1436 	} else {
1437 		/* Illegal packet */
1438 		if (!(flags & (TH_ACK|TH_RST)))
1439 			goto tcp_drop;
1440 	}
1441 
1442 	if (!(flags & TH_ACK)) {
1443 		/* These flags are only valid if ACK is set */
1444 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1445 			goto tcp_drop;
1446 	}
1447 
1448 	/* Check for illegal header length */
1449 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1450 		goto tcp_drop;
1451 
1452 	/* If flags changed, or reserved data set, then adjust */
1453 	if (flags != th->th_flags || th->th_x2 != 0) {
1454 		u_int16_t	ov, nv;
1455 
1456 		ov = *(u_int16_t *)(&th->th_ack + 1);
1457 		th->th_flags = flags;
1458 		th->th_x2 = 0;
1459 		nv = *(u_int16_t *)(&th->th_ack + 1);
1460 
1461 		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1462 		rewrite = 1;
1463 	}
1464 
1465 	/* Remove urgent pointer, if TH_URG is not set */
1466 	if (!(flags & TH_URG) && th->th_urp) {
1467 		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1468 		    0, 0);
1469 		th->th_urp = 0;
1470 		rewrite = 1;
1471 	}
1472 
1473 	/* copy back packet headers if we sanitized */
1474 	if (rewrite)
1475 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
1476 
1477 	return (PF_PASS);
1478 
1479  tcp_drop:
1480 	REASON_SET(&reason, PFRES_NORM);
1481 	if (rm != NULL && r->log)
1482 		PFLOG_PACKET(kif, m, AF_INET, reason, r, NULL, NULL, pd, 1);
1483 	return (PF_DROP);
1484 }
1485 
1486 int
1487 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1488     struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1489 {
1490 	u_int32_t tsval, tsecr;
1491 	u_int8_t hdr[60];
1492 	u_int8_t *opt;
1493 
1494 	KASSERT((src->scrub == NULL),
1495 	    ("pf_normalize_tcp_init: src->scrub != NULL"));
1496 
1497 	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1498 	if (src->scrub == NULL)
1499 		return (1);
1500 
1501 	switch (pd->af) {
1502 #ifdef INET
1503 	case AF_INET: {
1504 		struct ip *h = mtod(m, struct ip *);
1505 		src->scrub->pfss_ttl = h->ip_ttl;
1506 		break;
1507 	}
1508 #endif /* INET */
1509 #ifdef INET6
1510 	case AF_INET6: {
1511 		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1512 		src->scrub->pfss_ttl = h->ip6_hlim;
1513 		break;
1514 	}
1515 #endif /* INET6 */
1516 	}
1517 
1518 	/*
1519 	 * All normalizations below are only begun if we see the start of
1520 	 * the connections.  They must all set an enabled bit in pfss_flags
1521 	 */
1522 	if ((th->th_flags & TH_SYN) == 0)
1523 		return (0);
1524 
1525 	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1526 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1527 		/* Diddle with TCP options */
1528 		int hlen;
1529 		opt = hdr + sizeof(struct tcphdr);
1530 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1531 		while (hlen >= TCPOLEN_TIMESTAMP) {
1532 			switch (*opt) {
1533 			case TCPOPT_EOL:	/* FALLTHROUGH */
1534 			case TCPOPT_NOP:
1535 				opt++;
1536 				hlen--;
1537 				break;
1538 			case TCPOPT_TIMESTAMP:
1539 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1540 					src->scrub->pfss_flags |=
1541 					    PFSS_TIMESTAMP;
1542 					src->scrub->pfss_ts_mod =
1543 					    htonl(arc4random());
1544 
1545 					/* note PFSS_PAWS not set yet */
1546 					memcpy(&tsval, &opt[2],
1547 					    sizeof(u_int32_t));
1548 					memcpy(&tsecr, &opt[6],
1549 					    sizeof(u_int32_t));
1550 					src->scrub->pfss_tsval0 = ntohl(tsval);
1551 					src->scrub->pfss_tsval = ntohl(tsval);
1552 					src->scrub->pfss_tsecr = ntohl(tsecr);
1553 					getmicrouptime(&src->scrub->pfss_last);
1554 				}
1555 				/* FALLTHROUGH */
1556 			default:
1557 				hlen -= MAX(opt[1], 2);
1558 				opt += MAX(opt[1], 2);
1559 				break;
1560 			}
1561 		}
1562 	}
1563 
1564 	return (0);
1565 }
1566 
1567 void
1568 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1569 {
1570 	uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1571 	uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1572 
1573 	/* Someday... flush the TCP segment reassembly descriptors. */
1574 }
1575 
1576 int
1577 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1578     u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1579     struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1580 {
1581 	struct timeval uptime;
1582 	u_int32_t tsval, tsecr;
1583 	u_int tsval_from_last;
1584 	u_int8_t hdr[60];
1585 	u_int8_t *opt;
1586 	int copyback = 0;
1587 	int got_ts = 0;
1588 	size_t startoff;
1589 
1590 	KASSERT((src->scrub || dst->scrub),
1591 	    ("%s: src->scrub && dst->scrub!", __func__));
1592 
1593 	/*
1594 	 * Enforce the minimum TTL seen for this connection.  Negate a common
1595 	 * technique to evade an intrusion detection system and confuse
1596 	 * firewall state code.
1597 	 */
1598 	switch (pd->af) {
1599 #ifdef INET
1600 	case AF_INET: {
1601 		if (src->scrub) {
1602 			struct ip *h = mtod(m, struct ip *);
1603 			if (h->ip_ttl > src->scrub->pfss_ttl)
1604 				src->scrub->pfss_ttl = h->ip_ttl;
1605 			h->ip_ttl = src->scrub->pfss_ttl;
1606 		}
1607 		break;
1608 	}
1609 #endif /* INET */
1610 #ifdef INET6
1611 	case AF_INET6: {
1612 		if (src->scrub) {
1613 			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1614 			if (h->ip6_hlim > src->scrub->pfss_ttl)
1615 				src->scrub->pfss_ttl = h->ip6_hlim;
1616 			h->ip6_hlim = src->scrub->pfss_ttl;
1617 		}
1618 		break;
1619 	}
1620 #endif /* INET6 */
1621 	}
1622 
1623 	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1624 	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1625 	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1626 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1627 		/* Diddle with TCP options */
1628 		int hlen;
1629 		opt = hdr + sizeof(struct tcphdr);
1630 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1631 		while (hlen >= TCPOLEN_TIMESTAMP) {
1632 			startoff = opt - (hdr + sizeof(struct tcphdr));
1633 			switch (*opt) {
1634 			case TCPOPT_EOL:	/* FALLTHROUGH */
1635 			case TCPOPT_NOP:
1636 				opt++;
1637 				hlen--;
1638 				break;
1639 			case TCPOPT_TIMESTAMP:
1640 				/* Modulate the timestamps.  Can be used for
1641 				 * NAT detection, OS uptime determination or
1642 				 * reboot detection.
1643 				 */
1644 
1645 				if (got_ts) {
1646 					/* Huh?  Multiple timestamps!? */
1647 					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1648 						DPFPRINTF(("multiple TS??\n"));
1649 						pf_print_state(state);
1650 						printf("\n");
1651 					}
1652 					REASON_SET(reason, PFRES_TS);
1653 					return (PF_DROP);
1654 				}
1655 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1656 					memcpy(&tsval, &opt[2],
1657 					    sizeof(u_int32_t));
1658 					if (tsval && src->scrub &&
1659 					    (src->scrub->pfss_flags &
1660 					    PFSS_TIMESTAMP)) {
1661 						tsval = ntohl(tsval);
1662 						pf_patch_32_unaligned(m,
1663 						    &th->th_sum,
1664 						    &opt[2],
1665 						    htonl(tsval +
1666 						    src->scrub->pfss_ts_mod),
1667 						    PF_ALGNMNT(startoff),
1668 						    0);
1669 						copyback = 1;
1670 					}
1671 
1672 					/* Modulate TS reply iff valid (!0) */
1673 					memcpy(&tsecr, &opt[6],
1674 					    sizeof(u_int32_t));
1675 					if (tsecr && dst->scrub &&
1676 					    (dst->scrub->pfss_flags &
1677 					    PFSS_TIMESTAMP)) {
1678 						tsecr = ntohl(tsecr)
1679 						    - dst->scrub->pfss_ts_mod;
1680 						pf_patch_32_unaligned(m,
1681 						    &th->th_sum,
1682 						    &opt[6],
1683 						    htonl(tsecr),
1684 						    PF_ALGNMNT(startoff),
1685 						    0);
1686 						copyback = 1;
1687 					}
1688 					got_ts = 1;
1689 				}
1690 				/* FALLTHROUGH */
1691 			default:
1692 				hlen -= MAX(opt[1], 2);
1693 				opt += MAX(opt[1], 2);
1694 				break;
1695 			}
1696 		}
1697 		if (copyback) {
1698 			/* Copyback the options, caller copys back header */
1699 			*writeback = 1;
1700 			m_copyback(m, off + sizeof(struct tcphdr),
1701 			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1702 			    sizeof(struct tcphdr));
1703 		}
1704 	}
1705 
1706 	/*
1707 	 * Must invalidate PAWS checks on connections idle for too long.
1708 	 * The fastest allowed timestamp clock is 1ms.  That turns out to
1709 	 * be about 24 days before it wraps.  XXX Right now our lowerbound
1710 	 * TS echo check only works for the first 12 days of a connection
1711 	 * when the TS has exhausted half its 32bit space
1712 	 */
1713 #define TS_MAX_IDLE	(24*24*60*60)
1714 #define TS_MAX_CONN	(12*24*60*60)	/* XXX remove when better tsecr check */
1715 
1716 	getmicrouptime(&uptime);
1717 	if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1718 	    (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1719 	    time_uptime - state->creation > TS_MAX_CONN))  {
1720 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1721 			DPFPRINTF(("src idled out of PAWS\n"));
1722 			pf_print_state(state);
1723 			printf("\n");
1724 		}
1725 		src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1726 		    | PFSS_PAWS_IDLED;
1727 	}
1728 	if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1729 	    uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1730 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1731 			DPFPRINTF(("dst idled out of PAWS\n"));
1732 			pf_print_state(state);
1733 			printf("\n");
1734 		}
1735 		dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1736 		    | PFSS_PAWS_IDLED;
1737 	}
1738 
1739 	if (got_ts && src->scrub && dst->scrub &&
1740 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1741 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1742 		/* Validate that the timestamps are "in-window".
1743 		 * RFC1323 describes TCP Timestamp options that allow
1744 		 * measurement of RTT (round trip time) and PAWS
1745 		 * (protection against wrapped sequence numbers).  PAWS
1746 		 * gives us a set of rules for rejecting packets on
1747 		 * long fat pipes (packets that were somehow delayed
1748 		 * in transit longer than the time it took to send the
1749 		 * full TCP sequence space of 4Gb).  We can use these
1750 		 * rules and infer a few others that will let us treat
1751 		 * the 32bit timestamp and the 32bit echoed timestamp
1752 		 * as sequence numbers to prevent a blind attacker from
1753 		 * inserting packets into a connection.
1754 		 *
1755 		 * RFC1323 tells us:
1756 		 *  - The timestamp on this packet must be greater than
1757 		 *    or equal to the last value echoed by the other
1758 		 *    endpoint.  The RFC says those will be discarded
1759 		 *    since it is a dup that has already been acked.
1760 		 *    This gives us a lowerbound on the timestamp.
1761 		 *        timestamp >= other last echoed timestamp
1762 		 *  - The timestamp will be less than or equal to
1763 		 *    the last timestamp plus the time between the
1764 		 *    last packet and now.  The RFC defines the max
1765 		 *    clock rate as 1ms.  We will allow clocks to be
1766 		 *    up to 10% fast and will allow a total difference
1767 		 *    or 30 seconds due to a route change.  And this
1768 		 *    gives us an upperbound on the timestamp.
1769 		 *        timestamp <= last timestamp + max ticks
1770 		 *    We have to be careful here.  Windows will send an
1771 		 *    initial timestamp of zero and then initialize it
1772 		 *    to a random value after the 3whs; presumably to
1773 		 *    avoid a DoS by having to call an expensive RNG
1774 		 *    during a SYN flood.  Proof MS has at least one
1775 		 *    good security geek.
1776 		 *
1777 		 *  - The TCP timestamp option must also echo the other
1778 		 *    endpoints timestamp.  The timestamp echoed is the
1779 		 *    one carried on the earliest unacknowledged segment
1780 		 *    on the left edge of the sequence window.  The RFC
1781 		 *    states that the host will reject any echoed
1782 		 *    timestamps that were larger than any ever sent.
1783 		 *    This gives us an upperbound on the TS echo.
1784 		 *        tescr <= largest_tsval
1785 		 *  - The lowerbound on the TS echo is a little more
1786 		 *    tricky to determine.  The other endpoint's echoed
1787 		 *    values will not decrease.  But there may be
1788 		 *    network conditions that re-order packets and
1789 		 *    cause our view of them to decrease.  For now the
1790 		 *    only lowerbound we can safely determine is that
1791 		 *    the TS echo will never be less than the original
1792 		 *    TS.  XXX There is probably a better lowerbound.
1793 		 *    Remove TS_MAX_CONN with better lowerbound check.
1794 		 *        tescr >= other original TS
1795 		 *
1796 		 * It is also important to note that the fastest
1797 		 * timestamp clock of 1ms will wrap its 32bit space in
1798 		 * 24 days.  So we just disable TS checking after 24
1799 		 * days of idle time.  We actually must use a 12d
1800 		 * connection limit until we can come up with a better
1801 		 * lowerbound to the TS echo check.
1802 		 */
1803 		struct timeval delta_ts;
1804 		int ts_fudge;
1805 
1806 		/*
1807 		 * PFTM_TS_DIFF is how many seconds of leeway to allow
1808 		 * a host's timestamp.  This can happen if the previous
1809 		 * packet got delayed in transit for much longer than
1810 		 * this packet.
1811 		 */
1812 		if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1813 			ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1814 
1815 		/* Calculate max ticks since the last timestamp */
1816 #define TS_MAXFREQ	1100		/* RFC max TS freq of 1Khz + 10% skew */
1817 #define TS_MICROSECS	1000000		/* microseconds per second */
1818 		delta_ts = uptime;
1819 		timevalsub(&delta_ts, &src->scrub->pfss_last);
1820 		tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1821 		tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1822 
1823 		if ((src->state >= TCPS_ESTABLISHED &&
1824 		    dst->state >= TCPS_ESTABLISHED) &&
1825 		    (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1826 		    SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1827 		    (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1828 		    SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1829 			/* Bad RFC1323 implementation or an insertion attack.
1830 			 *
1831 			 * - Solaris 2.6 and 2.7 are known to send another ACK
1832 			 *   after the FIN,FIN|ACK,ACK closing that carries
1833 			 *   an old timestamp.
1834 			 */
1835 
1836 			DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1837 			    SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1838 			    SEQ_GT(tsval, src->scrub->pfss_tsval +
1839 			    tsval_from_last) ? '1' : ' ',
1840 			    SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1841 			    SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1842 			DPFPRINTF((" tsval: %u  tsecr: %u  +ticks: %u  "
1843 			    "idle: %jus %lums\n",
1844 			    tsval, tsecr, tsval_from_last,
1845 			    (uintmax_t)delta_ts.tv_sec,
1846 			    delta_ts.tv_usec / 1000));
1847 			DPFPRINTF((" src->tsval: %u  tsecr: %u\n",
1848 			    src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1849 			DPFPRINTF((" dst->tsval: %u  tsecr: %u  tsval0: %u"
1850 			    "\n", dst->scrub->pfss_tsval,
1851 			    dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1852 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1853 				pf_print_state(state);
1854 				pf_print_flags(th->th_flags);
1855 				printf("\n");
1856 			}
1857 			REASON_SET(reason, PFRES_TS);
1858 			return (PF_DROP);
1859 		}
1860 
1861 		/* XXX I'd really like to require tsecr but it's optional */
1862 
1863 	} else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1864 	    ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1865 	    || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1866 	    src->scrub && dst->scrub &&
1867 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1868 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1869 		/* Didn't send a timestamp.  Timestamps aren't really useful
1870 		 * when:
1871 		 *  - connection opening or closing (often not even sent).
1872 		 *    but we must not let an attacker to put a FIN on a
1873 		 *    data packet to sneak it through our ESTABLISHED check.
1874 		 *  - on a TCP reset.  RFC suggests not even looking at TS.
1875 		 *  - on an empty ACK.  The TS will not be echoed so it will
1876 		 *    probably not help keep the RTT calculation in sync and
1877 		 *    there isn't as much danger when the sequence numbers
1878 		 *    got wrapped.  So some stacks don't include TS on empty
1879 		 *    ACKs :-(
1880 		 *
1881 		 * To minimize the disruption to mostly RFC1323 conformant
1882 		 * stacks, we will only require timestamps on data packets.
1883 		 *
1884 		 * And what do ya know, we cannot require timestamps on data
1885 		 * packets.  There appear to be devices that do legitimate
1886 		 * TCP connection hijacking.  There are HTTP devices that allow
1887 		 * a 3whs (with timestamps) and then buffer the HTTP request.
1888 		 * If the intermediate device has the HTTP response cache, it
1889 		 * will spoof the response but not bother timestamping its
1890 		 * packets.  So we can look for the presence of a timestamp in
1891 		 * the first data packet and if there, require it in all future
1892 		 * packets.
1893 		 */
1894 
1895 		if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1896 			/*
1897 			 * Hey!  Someone tried to sneak a packet in.  Or the
1898 			 * stack changed its RFC1323 behavior?!?!
1899 			 */
1900 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1901 				DPFPRINTF(("Did not receive expected RFC1323 "
1902 				    "timestamp\n"));
1903 				pf_print_state(state);
1904 				pf_print_flags(th->th_flags);
1905 				printf("\n");
1906 			}
1907 			REASON_SET(reason, PFRES_TS);
1908 			return (PF_DROP);
1909 		}
1910 	}
1911 
1912 	/*
1913 	 * We will note if a host sends his data packets with or without
1914 	 * timestamps.  And require all data packets to contain a timestamp
1915 	 * if the first does.  PAWS implicitly requires that all data packets be
1916 	 * timestamped.  But I think there are middle-man devices that hijack
1917 	 * TCP streams immediately after the 3whs and don't timestamp their
1918 	 * packets (seen in a WWW accelerator or cache).
1919 	 */
1920 	if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1921 	    (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1922 		if (got_ts)
1923 			src->scrub->pfss_flags |= PFSS_DATA_TS;
1924 		else {
1925 			src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1926 			if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1927 			    (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1928 				/* Don't warn if other host rejected RFC1323 */
1929 				DPFPRINTF(("Broken RFC1323 stack did not "
1930 				    "timestamp data packet. Disabled PAWS "
1931 				    "security.\n"));
1932 				pf_print_state(state);
1933 				pf_print_flags(th->th_flags);
1934 				printf("\n");
1935 			}
1936 		}
1937 	}
1938 
1939 	/*
1940 	 * Update PAWS values
1941 	 */
1942 	if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1943 	    (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1944 		getmicrouptime(&src->scrub->pfss_last);
1945 		if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1946 		    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1947 			src->scrub->pfss_tsval = tsval;
1948 
1949 		if (tsecr) {
1950 			if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1951 			    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1952 				src->scrub->pfss_tsecr = tsecr;
1953 
1954 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1955 			    (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1956 			    src->scrub->pfss_tsval0 == 0)) {
1957 				/* tsval0 MUST be the lowest timestamp */
1958 				src->scrub->pfss_tsval0 = tsval;
1959 			}
1960 
1961 			/* Only fully initialized after a TS gets echoed */
1962 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1963 				src->scrub->pfss_flags |= PFSS_PAWS;
1964 		}
1965 	}
1966 
1967 	/* I have a dream....  TCP segment reassembly.... */
1968 	return (0);
1969 }
1970 
1971 int
1972 pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd)
1973 {
1974 	struct tcphdr	*th = &pd->hdr.tcp;
1975 	u_int16_t	*mss;
1976 	int		 thoff;
1977 	int		 opt, cnt, optlen = 0;
1978 	u_char		 opts[TCP_MAXOLEN];
1979 	u_char		*optp = opts;
1980 	size_t		 startoff;
1981 
1982 	thoff = th->th_off << 2;
1983 	cnt = thoff - sizeof(struct tcphdr);
1984 
1985 	if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1986 	    NULL, NULL, pd->af))
1987 		return (0);
1988 
1989 	for (; cnt > 0; cnt -= optlen, optp += optlen) {
1990 		startoff = optp - opts;
1991 		opt = optp[0];
1992 		if (opt == TCPOPT_EOL)
1993 			break;
1994 		if (opt == TCPOPT_NOP)
1995 			optlen = 1;
1996 		else {
1997 			if (cnt < 2)
1998 				break;
1999 			optlen = optp[1];
2000 			if (optlen < 2 || optlen > cnt)
2001 				break;
2002 		}
2003 		switch (opt) {
2004 		case TCPOPT_MAXSEG:
2005 			mss = (u_int16_t *)(optp + 2);
2006 			if ((ntohs(*mss)) > pd->act.max_mss) {
2007 				pf_patch_16_unaligned(m,
2008 				    &th->th_sum,
2009 				    mss, htons(pd->act.max_mss),
2010 				    PF_ALGNMNT(startoff),
2011 				    0);
2012 				m_copyback(m, off + sizeof(*th),
2013 				    thoff - sizeof(*th), opts);
2014 				m_copyback(m, off, sizeof(*th), (caddr_t)th);
2015 			}
2016 			break;
2017 		default:
2018 			break;
2019 		}
2020 	}
2021 
2022 	return (0);
2023 }
2024 
2025 static int
2026 pf_scan_sctp(struct mbuf *m, int ipoff, int off, struct pf_pdesc *pd)
2027 {
2028 	struct sctp_chunkhdr ch = { };
2029 	int chunk_off = sizeof(struct sctphdr);
2030 	int chunk_start;
2031 
2032 	while (off + chunk_off < pd->tot_len) {
2033 		if (!pf_pull_hdr(m, off + chunk_off, &ch, sizeof(ch), NULL,
2034 		    NULL, pd->af))
2035 			return (PF_DROP);
2036 
2037 		/* Length includes the header, this must be at least 4. */
2038 		if (ntohs(ch.chunk_length) < 4)
2039 			return (PF_DROP);
2040 
2041 		chunk_start = chunk_off;
2042 		chunk_off += roundup(ntohs(ch.chunk_length), 4);
2043 
2044 		switch (ch.chunk_type) {
2045 		case SCTP_INITIATION: {
2046 			struct sctp_init_chunk init;
2047 
2048 			if (!pf_pull_hdr(m, off + chunk_start, &init,
2049 			    sizeof(init), NULL, NULL, pd->af))
2050 				return (PF_DROP);
2051 
2052 			/*
2053 			 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2054 			 * any value except 0."
2055 			 */
2056 			if (init.init.initiate_tag == 0)
2057 				return (PF_DROP);
2058 			if (init.init.num_inbound_streams == 0)
2059 				return (PF_DROP);
2060 			if (init.init.num_outbound_streams == 0)
2061 				return (PF_DROP);
2062 			if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2063 				return (PF_DROP);
2064 
2065 			/*
2066 			 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2067 			 * verification tag.
2068 			 */
2069 			if (pd->hdr.sctp.v_tag != 0)
2070 				return (PF_DROP);
2071 
2072 			pd->sctp_initiate_tag = init.init.initiate_tag;
2073 
2074 			pd->sctp_flags |= PFDESC_SCTP_INIT;
2075 			break;
2076 		}
2077 		case SCTP_INITIATION_ACK:
2078 			pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2079 			break;
2080 		case SCTP_ABORT_ASSOCIATION:
2081 			pd->sctp_flags |= PFDESC_SCTP_ABORT;
2082 			break;
2083 		case SCTP_SHUTDOWN:
2084 		case SCTP_SHUTDOWN_ACK:
2085 			pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2086 			break;
2087 		case SCTP_SHUTDOWN_COMPLETE:
2088 			pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2089 			break;
2090 		case SCTP_COOKIE_ECHO:
2091 		case SCTP_COOKIE_ACK:
2092 			pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2093 			break;
2094 		case SCTP_DATA:
2095 			pd->sctp_flags |= PFDESC_SCTP_DATA;
2096 			break;
2097 		default:
2098 			pd->sctp_flags |= PFDESC_SCTP_OTHER;
2099 			break;
2100 		}
2101 	}
2102 
2103 	/* Validate chunk lengths vs. packet length. */
2104 	if (off + chunk_off != pd->tot_len)
2105 		return (PF_DROP);
2106 
2107 	/*
2108 	 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2109 	 * one in a packet.
2110 	 */
2111 	if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2112 	    (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2113 		return (PF_DROP);
2114 	if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2115 	    (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2116 		return (PF_DROP);
2117 	if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2118 	    (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2119 		return (PF_DROP);
2120 
2121 	return (PF_PASS);
2122 }
2123 
2124 int
2125 pf_normalize_sctp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
2126     int off, void *h, struct pf_pdesc *pd)
2127 {
2128 	struct pf_krule	*r, *rm = NULL;
2129 	struct sctphdr	*sh = &pd->hdr.sctp;
2130 	u_short		 reason;
2131 	sa_family_t	 af = pd->af;
2132 	int		 srs;
2133 
2134 	PF_RULES_RASSERT();
2135 
2136 	/* Unconditionally scan the SCTP packet, because we need to look for
2137 	 * things like shutdown and asconf chunks. */
2138 	if (pf_scan_sctp(m, ipoff, off, pd) != PF_PASS)
2139 		goto sctp_drop;
2140 
2141 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2142 	/* Check if there any scrub rules. Lack of scrub rules means enforced
2143 	 * packet normalization operation just like in OpenBSD. */
2144 	srs = (r != NULL);
2145 	while (r != NULL) {
2146 		pf_counter_u64_add(&r->evaluations, 1);
2147 		if (pfi_kkif_match(r->kif, kif) == r->ifnot)
2148 			r = r->skip[PF_SKIP_IFP].ptr;
2149 		else if (r->direction && r->direction != dir)
2150 			r = r->skip[PF_SKIP_DIR].ptr;
2151 		else if (r->af && r->af != af)
2152 			r = r->skip[PF_SKIP_AF].ptr;
2153 		else if (r->proto && r->proto != pd->proto)
2154 			r = r->skip[PF_SKIP_PROTO].ptr;
2155 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2156 		    r->src.neg, kif, M_GETFIB(m)))
2157 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2158 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
2159 			    r->src.port[0], r->src.port[1], sh->src_port))
2160 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
2161 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2162 		    r->dst.neg, NULL, M_GETFIB(m)))
2163 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
2164 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2165 			    r->dst.port[0], r->dst.port[1], sh->dest_port))
2166 			r = r->skip[PF_SKIP_DST_PORT].ptr;
2167 		else {
2168 			rm = r;
2169 			break;
2170 		}
2171 	}
2172 
2173 	if (srs) {
2174 		/* With scrub rules present SCTP normalization happens only
2175 		 * if one of rules has matched and it's not a "no scrub" rule */
2176 		if (rm == NULL || rm->action == PF_NOSCRUB)
2177 			return (PF_PASS);
2178 
2179 		pf_counter_u64_critical_enter();
2180 		pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
2181 		pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
2182 		pf_counter_u64_critical_exit();
2183 	}
2184 
2185 	/* Verify we're a multiple of 4 bytes long */
2186 	if ((pd->tot_len - off - sizeof(struct sctphdr)) % 4)
2187 		goto sctp_drop;
2188 
2189 	/* INIT chunk needs to be the only chunk */
2190 	if (pd->sctp_flags & PFDESC_SCTP_INIT)
2191 		if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2192 			goto sctp_drop;
2193 
2194 	return (PF_PASS);
2195 
2196 sctp_drop:
2197 	REASON_SET(&reason, PFRES_NORM);
2198 	if (rm != NULL && r->log)
2199 		PFLOG_PACKET(kif, m, AF_INET, reason, r, NULL, NULL, pd,
2200 		    1);
2201 
2202 	return (PF_DROP);
2203 }
2204 
2205 #ifdef INET
2206 void
2207 pf_scrub_ip(struct mbuf **m0, struct pf_pdesc *pd)
2208 {
2209 	struct mbuf		*m = *m0;
2210 	struct ip		*h = mtod(m, struct ip *);
2211 
2212 	/* Clear IP_DF if no-df was requested */
2213 	if (pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
2214 		u_int16_t ip_off = h->ip_off;
2215 
2216 		h->ip_off &= htons(~IP_DF);
2217 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2218 	}
2219 
2220 	/* Enforce a minimum ttl, may cause endless packet loops */
2221 	if (pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) {
2222 		u_int16_t ip_ttl = h->ip_ttl;
2223 
2224 		h->ip_ttl = pd->act.min_ttl;
2225 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2226 	}
2227 
2228 	/* Enforce tos */
2229 	if (pd->act.flags & PFSTATE_SETTOS) {
2230 		u_int16_t	ov, nv;
2231 
2232 		ov = *(u_int16_t *)h;
2233 		h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2234 		nv = *(u_int16_t *)h;
2235 
2236 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2237 	}
2238 
2239 	/* random-id, but not for fragments */
2240 	if (pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2241 		uint16_t ip_id = h->ip_id;
2242 
2243 		ip_fillid(h);
2244 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2245 	}
2246 }
2247 #endif /* INET */
2248 
2249 #ifdef INET6
2250 void
2251 pf_scrub_ip6(struct mbuf **m0, struct pf_pdesc *pd)
2252 {
2253 	struct mbuf		*m = *m0;
2254 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
2255 
2256 	/* Enforce a minimum ttl, may cause endless packet loops */
2257 	if (pd->act.min_ttl && h->ip6_hlim < pd->act.min_ttl)
2258 		h->ip6_hlim = pd->act.min_ttl;
2259 
2260 	/* Enforce tos. Set traffic class bits */
2261 	if (pd->act.flags & PFSTATE_SETTOS) {
2262 		h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2263 		h->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h)) << 20);
2264 	}
2265 }
2266 #endif
2267