xref: /freebsd/sys/netpfil/pf/pf_norm.c (revision 4928135658a9d0eaee37003df6137ab363fcb0b4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  *	$OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "opt_inet.h"
35 #include "opt_inet6.h"
36 #include "opt_pf.h"
37 
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/refcount.h>
44 #include <sys/rwlock.h>
45 #include <sys/socket.h>
46 
47 #include <net/if.h>
48 #include <net/vnet.h>
49 #include <net/pfvar.h>
50 #include <net/if_pflog.h>
51 
52 #include <netinet/in.h>
53 #include <netinet/ip.h>
54 #include <netinet/ip_var.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcp_fsm.h>
58 #include <netinet/tcp_seq.h>
59 
60 #ifdef INET6
61 #include <netinet/ip6.h>
62 #endif /* INET6 */
63 
64 struct pf_frent {
65 	TAILQ_ENTRY(pf_frent)	fr_next;
66 	struct mbuf	*fe_m;
67 	uint16_t	fe_hdrlen;	/* ipv4 header length with ip options
68 					   ipv6, extension, fragment header */
69 	uint16_t	fe_extoff;	/* last extension header offset or 0 */
70 	uint16_t	fe_len;		/* fragment length */
71 	uint16_t	fe_off;		/* fragment offset */
72 	uint16_t	fe_mff;		/* more fragment flag */
73 };
74 
75 struct pf_fragment_cmp {
76 	struct pf_addr	frc_src;
77 	struct pf_addr	frc_dst;
78 	uint32_t	frc_id;
79 	sa_family_t	frc_af;
80 	uint8_t		frc_proto;
81 };
82 
83 struct pf_fragment {
84 	struct pf_fragment_cmp	fr_key;
85 #define fr_src	fr_key.frc_src
86 #define fr_dst	fr_key.frc_dst
87 #define fr_id	fr_key.frc_id
88 #define fr_af	fr_key.frc_af
89 #define fr_proto	fr_key.frc_proto
90 
91 	RB_ENTRY(pf_fragment) fr_entry;
92 	TAILQ_ENTRY(pf_fragment) frag_next;
93 	uint32_t	fr_timeout;
94 	uint16_t	fr_maxlen;	/* maximum length of single fragment */
95 	TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
96 };
97 
98 struct pf_fragment_tag {
99 	uint16_t	ft_hdrlen;	/* header length of reassembled pkt */
100 	uint16_t	ft_extoff;	/* last extension header offset or 0 */
101 	uint16_t	ft_maxlen;	/* maximum fragment payload length */
102 	uint32_t	ft_id;		/* fragment id */
103 };
104 
105 static struct mtx pf_frag_mtx;
106 MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF);
107 #define PF_FRAG_LOCK()		mtx_lock(&pf_frag_mtx)
108 #define PF_FRAG_UNLOCK()	mtx_unlock(&pf_frag_mtx)
109 #define PF_FRAG_ASSERT()	mtx_assert(&pf_frag_mtx, MA_OWNED)
110 
111 VNET_DEFINE(uma_zone_t, pf_state_scrub_z);	/* XXX: shared with pfsync */
112 
113 static VNET_DEFINE(uma_zone_t, pf_frent_z);
114 #define	V_pf_frent_z	VNET(pf_frent_z)
115 static VNET_DEFINE(uma_zone_t, pf_frag_z);
116 #define	V_pf_frag_z	VNET(pf_frag_z)
117 
118 TAILQ_HEAD(pf_fragqueue, pf_fragment);
119 TAILQ_HEAD(pf_cachequeue, pf_fragment);
120 static VNET_DEFINE(struct pf_fragqueue,	pf_fragqueue);
121 #define	V_pf_fragqueue			VNET(pf_fragqueue)
122 RB_HEAD(pf_frag_tree, pf_fragment);
123 static VNET_DEFINE(struct pf_frag_tree,	pf_frag_tree);
124 #define	V_pf_frag_tree			VNET(pf_frag_tree)
125 static int		 pf_frag_compare(struct pf_fragment *,
126 			    struct pf_fragment *);
127 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
128 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
129 
130 static void	pf_flush_fragments(void);
131 static void	pf_free_fragment(struct pf_fragment *);
132 static void	pf_remove_fragment(struct pf_fragment *);
133 static int	pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
134 		    struct tcphdr *, int, sa_family_t);
135 static struct pf_frent *pf_create_fragment(u_short *);
136 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
137 		    struct pf_frag_tree *tree);
138 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
139 		    struct pf_frent *, u_short *);
140 static int	pf_isfull_fragment(struct pf_fragment *);
141 static struct mbuf *pf_join_fragment(struct pf_fragment *);
142 #ifdef INET
143 static void	pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
144 static int	pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
145 #endif	/* INET */
146 #ifdef INET6
147 static int	pf_reassemble6(struct mbuf **, struct ip6_hdr *,
148 		    struct ip6_frag *, uint16_t, uint16_t, u_short *);
149 static void	pf_scrub_ip6(struct mbuf **, uint8_t);
150 #endif	/* INET6 */
151 
152 #define	DPFPRINTF(x) do {				\
153 	if (V_pf_status.debug >= PF_DEBUG_MISC) {	\
154 		printf("%s: ", __func__);		\
155 		printf x ;				\
156 	}						\
157 } while(0)
158 
159 #ifdef INET
160 static void
161 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
162 {
163 
164 	key->frc_src.v4 = ip->ip_src;
165 	key->frc_dst.v4 = ip->ip_dst;
166 	key->frc_af = AF_INET;
167 	key->frc_proto = ip->ip_p;
168 	key->frc_id = ip->ip_id;
169 }
170 #endif	/* INET */
171 
172 void
173 pf_normalize_init(void)
174 {
175 
176 	V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
177 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
178 	V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
179 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
180 	V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
181 	    sizeof(struct pf_state_scrub),  NULL, NULL, NULL, NULL,
182 	    UMA_ALIGN_PTR, 0);
183 
184 	V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
185 	V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
186 	uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
187 	uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
188 
189 	TAILQ_INIT(&V_pf_fragqueue);
190 }
191 
192 void
193 pf_normalize_cleanup(void)
194 {
195 
196 	uma_zdestroy(V_pf_state_scrub_z);
197 	uma_zdestroy(V_pf_frent_z);
198 	uma_zdestroy(V_pf_frag_z);
199 }
200 
201 static int
202 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
203 {
204 	int	diff;
205 
206 	if ((diff = a->fr_id - b->fr_id) != 0)
207 		return (diff);
208 	if ((diff = a->fr_proto - b->fr_proto) != 0)
209 		return (diff);
210 	if ((diff = a->fr_af - b->fr_af) != 0)
211 		return (diff);
212 	if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
213 		return (diff);
214 	if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
215 		return (diff);
216 	return (0);
217 }
218 
219 void
220 pf_purge_expired_fragments(void)
221 {
222 	u_int32_t	expire = time_uptime -
223 			    V_pf_default_rule.timeout[PFTM_FRAG];
224 
225 	pf_purge_fragments(expire);
226 }
227 
228 void
229 pf_purge_fragments(uint32_t expire)
230 {
231 	struct pf_fragment	*frag;
232 
233 	PF_FRAG_LOCK();
234 	while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
235 		if (frag->fr_timeout > expire)
236 			break;
237 
238 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
239 		pf_free_fragment(frag);
240 	}
241 
242 	PF_FRAG_UNLOCK();
243 }
244 
245 /*
246  * Try to flush old fragments to make space for new ones
247  */
248 static void
249 pf_flush_fragments(void)
250 {
251 	struct pf_fragment	*frag;
252 	int			 goal;
253 
254 	PF_FRAG_ASSERT();
255 
256 	goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
257 	DPFPRINTF(("trying to free %d frag entriess\n", goal));
258 	while (goal < uma_zone_get_cur(V_pf_frent_z)) {
259 		frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
260 		if (frag)
261 			pf_free_fragment(frag);
262 		else
263 			break;
264 	}
265 }
266 
267 /* Frees the fragments and all associated entries */
268 static void
269 pf_free_fragment(struct pf_fragment *frag)
270 {
271 	struct pf_frent		*frent;
272 
273 	PF_FRAG_ASSERT();
274 
275 	/* Free all fragments */
276 	for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
277 	    frent = TAILQ_FIRST(&frag->fr_queue)) {
278 		TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
279 
280 		m_freem(frent->fe_m);
281 		uma_zfree(V_pf_frent_z, frent);
282 	}
283 
284 	pf_remove_fragment(frag);
285 }
286 
287 static struct pf_fragment *
288 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
289 {
290 	struct pf_fragment	*frag;
291 
292 	PF_FRAG_ASSERT();
293 
294 	frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
295 	if (frag != NULL) {
296 		/* XXX Are we sure we want to update the timeout? */
297 		frag->fr_timeout = time_uptime;
298 		TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
299 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
300 	}
301 
302 	return (frag);
303 }
304 
305 /* Removes a fragment from the fragment queue and frees the fragment */
306 static void
307 pf_remove_fragment(struct pf_fragment *frag)
308 {
309 
310 	PF_FRAG_ASSERT();
311 
312 	RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
313 	TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
314 	uma_zfree(V_pf_frag_z, frag);
315 }
316 
317 static struct pf_frent *
318 pf_create_fragment(u_short *reason)
319 {
320 	struct pf_frent *frent;
321 
322 	PF_FRAG_ASSERT();
323 
324 	frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
325 	if (frent == NULL) {
326 		pf_flush_fragments();
327 		frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
328 		if (frent == NULL) {
329 			REASON_SET(reason, PFRES_MEMORY);
330 			return (NULL);
331 		}
332 	}
333 
334 	return (frent);
335 }
336 
337 static struct pf_fragment *
338 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
339 		u_short *reason)
340 {
341 	struct pf_frent		*after, *next, *prev;
342 	struct pf_fragment	*frag;
343 	uint16_t		total;
344 
345 	PF_FRAG_ASSERT();
346 
347 	/* No empty fragments. */
348 	if (frent->fe_len == 0) {
349 		DPFPRINTF(("bad fragment: len 0"));
350 		goto bad_fragment;
351 	}
352 
353 	/* All fragments are 8 byte aligned. */
354 	if (frent->fe_mff && (frent->fe_len & 0x7)) {
355 		DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len));
356 		goto bad_fragment;
357 	}
358 
359 	/* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
360 	if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
361 		DPFPRINTF(("bad fragment: max packet %d",
362 		    frent->fe_off + frent->fe_len));
363 		goto bad_fragment;
364 	}
365 
366 	DPFPRINTF((key->frc_af == AF_INET ?
367 	    "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d",
368 	    key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
369 
370 	/* Fully buffer all of the fragments in this fragment queue. */
371 	frag = pf_find_fragment(key, &V_pf_frag_tree);
372 
373 	/* Create a new reassembly queue for this packet. */
374 	if (frag == NULL) {
375 		frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
376 		if (frag == NULL) {
377 			pf_flush_fragments();
378 			frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
379 			if (frag == NULL) {
380 				REASON_SET(reason, PFRES_MEMORY);
381 				goto drop_fragment;
382 			}
383 		}
384 
385 		*(struct pf_fragment_cmp *)frag = *key;
386 		frag->fr_timeout = time_uptime;
387 		frag->fr_maxlen = frent->fe_len;
388 		TAILQ_INIT(&frag->fr_queue);
389 
390 		RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
391 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
392 
393 		/* We do not have a previous fragment. */
394 		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
395 
396 		return (frag);
397 	}
398 
399 	KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
400 
401 	/* Remember maximum fragment len for refragmentation. */
402 	if (frent->fe_len > frag->fr_maxlen)
403 		frag->fr_maxlen = frent->fe_len;
404 
405 	/* Maximum data we have seen already. */
406 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
407 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
408 
409 	/* Non terminal fragments must have more fragments flag. */
410 	if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
411 		goto bad_fragment;
412 
413 	/* Check if we saw the last fragment already. */
414 	if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
415 		if (frent->fe_off + frent->fe_len > total ||
416 		    (frent->fe_off + frent->fe_len == total && frent->fe_mff))
417 			goto bad_fragment;
418 	} else {
419 		if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
420 			goto bad_fragment;
421 	}
422 
423 	/* Find a fragment after the current one. */
424 	prev = NULL;
425 	TAILQ_FOREACH(after, &frag->fr_queue, fr_next) {
426 		if (after->fe_off > frent->fe_off)
427 			break;
428 		prev = after;
429 	}
430 
431 	KASSERT(prev != NULL || after != NULL,
432 	    ("prev != NULL || after != NULL"));
433 
434 	if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
435 		uint16_t precut;
436 
437 		precut = prev->fe_off + prev->fe_len - frent->fe_off;
438 		if (precut >= frent->fe_len)
439 			goto bad_fragment;
440 		DPFPRINTF(("overlap -%d", precut));
441 		m_adj(frent->fe_m, precut);
442 		frent->fe_off += precut;
443 		frent->fe_len -= precut;
444 	}
445 
446 	for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
447 	    after = next) {
448 		uint16_t aftercut;
449 
450 		aftercut = frent->fe_off + frent->fe_len - after->fe_off;
451 		DPFPRINTF(("adjust overlap %d", aftercut));
452 		if (aftercut < after->fe_len) {
453 			m_adj(after->fe_m, aftercut);
454 			after->fe_off += aftercut;
455 			after->fe_len -= aftercut;
456 			break;
457 		}
458 
459 		/* This fragment is completely overlapped, lose it. */
460 		next = TAILQ_NEXT(after, fr_next);
461 		m_freem(after->fe_m);
462 		TAILQ_REMOVE(&frag->fr_queue, after, fr_next);
463 		uma_zfree(V_pf_frent_z, after);
464 	}
465 
466 	if (prev == NULL)
467 		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
468 	else
469 		TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
470 
471 	return (frag);
472 
473 bad_fragment:
474 	REASON_SET(reason, PFRES_FRAG);
475 drop_fragment:
476 	uma_zfree(V_pf_frent_z, frent);
477 	return (NULL);
478 }
479 
480 static int
481 pf_isfull_fragment(struct pf_fragment *frag)
482 {
483 	struct pf_frent	*frent, *next;
484 	uint16_t off, total;
485 
486 	/* Check if we are completely reassembled */
487 	if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff)
488 		return (0);
489 
490 	/* Maximum data we have seen already */
491 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
492 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
493 
494 	/* Check if we have all the data */
495 	off = 0;
496 	for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) {
497 		next = TAILQ_NEXT(frent, fr_next);
498 
499 		off += frent->fe_len;
500 		if (off < total && (next == NULL || next->fe_off != off)) {
501 			DPFPRINTF(("missing fragment at %d, next %d, total %d",
502 			    off, next == NULL ? -1 : next->fe_off, total));
503 			return (0);
504 		}
505 	}
506 	DPFPRINTF(("%d < %d?", off, total));
507 	if (off < total)
508 		return (0);
509 	KASSERT(off == total, ("off == total"));
510 
511 	return (1);
512 }
513 
514 static struct mbuf *
515 pf_join_fragment(struct pf_fragment *frag)
516 {
517 	struct mbuf *m, *m2;
518 	struct pf_frent	*frent, *next;
519 
520 	frent = TAILQ_FIRST(&frag->fr_queue);
521 	next = TAILQ_NEXT(frent, fr_next);
522 
523 	m = frent->fe_m;
524 	m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
525 	uma_zfree(V_pf_frent_z, frent);
526 	for (frent = next; frent != NULL; frent = next) {
527 		next = TAILQ_NEXT(frent, fr_next);
528 
529 		m2 = frent->fe_m;
530 		/* Strip off ip header. */
531 		m_adj(m2, frent->fe_hdrlen);
532 		/* Strip off any trailing bytes. */
533 		m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
534 
535 		uma_zfree(V_pf_frent_z, frent);
536 		m_cat(m, m2);
537 	}
538 
539 	/* Remove from fragment queue. */
540 	pf_remove_fragment(frag);
541 
542 	return (m);
543 }
544 
545 #ifdef INET
546 static int
547 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
548 {
549 	struct mbuf		*m = *m0;
550 	struct pf_frent		*frent;
551 	struct pf_fragment	*frag;
552 	struct pf_fragment_cmp	key;
553 	uint16_t		total, hdrlen;
554 
555 	/* Get an entry for the fragment queue */
556 	if ((frent = pf_create_fragment(reason)) == NULL)
557 		return (PF_DROP);
558 
559 	frent->fe_m = m;
560 	frent->fe_hdrlen = ip->ip_hl << 2;
561 	frent->fe_extoff = 0;
562 	frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
563 	frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
564 	frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
565 
566 	pf_ip2key(ip, dir, &key);
567 
568 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
569 		return (PF_DROP);
570 
571 	/* The mbuf is part of the fragment entry, no direct free or access */
572 	m = *m0 = NULL;
573 
574 	if (!pf_isfull_fragment(frag))
575 		return (PF_PASS);  /* drop because *m0 is NULL, no error */
576 
577 	/* We have all the data */
578 	frent = TAILQ_FIRST(&frag->fr_queue);
579 	KASSERT(frent != NULL, ("frent != NULL"));
580 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
581 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
582 	hdrlen = frent->fe_hdrlen;
583 
584 	m = *m0 = pf_join_fragment(frag);
585 	frag = NULL;
586 
587 	if (m->m_flags & M_PKTHDR) {
588 		int plen = 0;
589 		for (m = *m0; m; m = m->m_next)
590 			plen += m->m_len;
591 		m = *m0;
592 		m->m_pkthdr.len = plen;
593 	}
594 
595 	ip = mtod(m, struct ip *);
596 	ip->ip_len = htons(hdrlen + total);
597 	ip->ip_off &= ~(IP_MF|IP_OFFMASK);
598 
599 	if (hdrlen + total > IP_MAXPACKET) {
600 		DPFPRINTF(("drop: too big: %d", total));
601 		ip->ip_len = 0;
602 		REASON_SET(reason, PFRES_SHORT);
603 		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
604 		return (PF_DROP);
605 	}
606 
607 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
608 	return (PF_PASS);
609 }
610 #endif	/* INET */
611 
612 #ifdef INET6
613 static int
614 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
615     uint16_t hdrlen, uint16_t extoff, u_short *reason)
616 {
617 	struct mbuf		*m = *m0;
618 	struct pf_frent		*frent;
619 	struct pf_fragment	*frag;
620 	struct pf_fragment_cmp	 key;
621 	struct m_tag		*mtag;
622 	struct pf_fragment_tag	*ftag;
623 	int			 off;
624 	uint32_t		 frag_id;
625 	uint16_t		 total, maxlen;
626 	uint8_t			 proto;
627 
628 	PF_FRAG_LOCK();
629 
630 	/* Get an entry for the fragment queue. */
631 	if ((frent = pf_create_fragment(reason)) == NULL) {
632 		PF_FRAG_UNLOCK();
633 		return (PF_DROP);
634 	}
635 
636 	frent->fe_m = m;
637 	frent->fe_hdrlen = hdrlen;
638 	frent->fe_extoff = extoff;
639 	frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
640 	frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
641 	frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
642 
643 	key.frc_src.v6 = ip6->ip6_src;
644 	key.frc_dst.v6 = ip6->ip6_dst;
645 	key.frc_af = AF_INET6;
646 	/* Only the first fragment's protocol is relevant. */
647 	key.frc_proto = 0;
648 	key.frc_id = fraghdr->ip6f_ident;
649 
650 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
651 		PF_FRAG_UNLOCK();
652 		return (PF_DROP);
653 	}
654 
655 	/* The mbuf is part of the fragment entry, no direct free or access. */
656 	m = *m0 = NULL;
657 
658 	if (!pf_isfull_fragment(frag)) {
659 		PF_FRAG_UNLOCK();
660 		return (PF_PASS);  /* Drop because *m0 is NULL, no error. */
661 	}
662 
663 	/* We have all the data. */
664 	extoff = frent->fe_extoff;
665 	maxlen = frag->fr_maxlen;
666 	frag_id = frag->fr_id;
667 	frent = TAILQ_FIRST(&frag->fr_queue);
668 	KASSERT(frent != NULL, ("frent != NULL"));
669 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
670 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
671 	hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
672 
673 	m = *m0 = pf_join_fragment(frag);
674 	frag = NULL;
675 
676 	PF_FRAG_UNLOCK();
677 
678 	/* Take protocol from first fragment header. */
679 	m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
680 	KASSERT(m, ("%s: short mbuf chain", __func__));
681 	proto = *(mtod(m, caddr_t) + off);
682 	m = *m0;
683 
684 	/* Delete frag6 header */
685 	if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
686 		goto fail;
687 
688 	if (m->m_flags & M_PKTHDR) {
689 		int plen = 0;
690 		for (m = *m0; m; m = m->m_next)
691 			plen += m->m_len;
692 		m = *m0;
693 		m->m_pkthdr.len = plen;
694 	}
695 
696 	if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag),
697 	    M_NOWAIT)) == NULL)
698 		goto fail;
699 	ftag = (struct pf_fragment_tag *)(mtag + 1);
700 	ftag->ft_hdrlen = hdrlen;
701 	ftag->ft_extoff = extoff;
702 	ftag->ft_maxlen = maxlen;
703 	ftag->ft_id = frag_id;
704 	m_tag_prepend(m, mtag);
705 
706 	ip6 = mtod(m, struct ip6_hdr *);
707 	ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
708 	if (extoff) {
709 		/* Write protocol into next field of last extension header. */
710 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
711 		    &off);
712 		KASSERT(m, ("%s: short mbuf chain", __func__));
713 		*(mtod(m, char *) + off) = proto;
714 		m = *m0;
715 	} else
716 		ip6->ip6_nxt = proto;
717 
718 	if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
719 		DPFPRINTF(("drop: too big: %d", total));
720 		ip6->ip6_plen = 0;
721 		REASON_SET(reason, PFRES_SHORT);
722 		/* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
723 		return (PF_DROP);
724 	}
725 
726 	DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen)));
727 	return (PF_PASS);
728 
729 fail:
730 	REASON_SET(reason, PFRES_MEMORY);
731 	/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
732 	return (PF_DROP);
733 }
734 #endif	/* INET6 */
735 
736 #ifdef INET6
737 int
738 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
739 {
740 	struct mbuf		*m = *m0, *t;
741 	struct pf_fragment_tag	*ftag = (struct pf_fragment_tag *)(mtag + 1);
742 	struct pf_pdesc		 pd;
743 	uint32_t		 frag_id;
744 	uint16_t		 hdrlen, extoff, maxlen;
745 	uint8_t			 proto;
746 	int			 error, action;
747 
748 	hdrlen = ftag->ft_hdrlen;
749 	extoff = ftag->ft_extoff;
750 	maxlen = ftag->ft_maxlen;
751 	frag_id = ftag->ft_id;
752 	m_tag_delete(m, mtag);
753 	mtag = NULL;
754 	ftag = NULL;
755 
756 	if (extoff) {
757 		int off;
758 
759 		/* Use protocol from next field of last extension header */
760 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
761 		    &off);
762 		KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
763 		proto = *(mtod(m, caddr_t) + off);
764 		*(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
765 		m = *m0;
766 	} else {
767 		struct ip6_hdr *hdr;
768 
769 		hdr = mtod(m, struct ip6_hdr *);
770 		proto = hdr->ip6_nxt;
771 		hdr->ip6_nxt = IPPROTO_FRAGMENT;
772 	}
773 
774 	/* The MTU must be a multiple of 8 bytes, or we risk doing the
775 	 * fragmentation wrong. */
776 	maxlen = maxlen & ~7;
777 
778 	/*
779 	 * Maxlen may be less than 8 if there was only a single
780 	 * fragment.  As it was fragmented before, add a fragment
781 	 * header also for a single fragment.  If total or maxlen
782 	 * is less than 8, ip6_fragment() will return EMSGSIZE and
783 	 * we drop the packet.
784 	 */
785 	error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
786 	m = (*m0)->m_nextpkt;
787 	(*m0)->m_nextpkt = NULL;
788 	if (error == 0) {
789 		/* The first mbuf contains the unfragmented packet. */
790 		m_freem(*m0);
791 		*m0 = NULL;
792 		action = PF_PASS;
793 	} else {
794 		/* Drop expects an mbuf to free. */
795 		DPFPRINTF(("refragment error %d", error));
796 		action = PF_DROP;
797 	}
798 	for (t = m; m; m = t) {
799 		t = m->m_nextpkt;
800 		m->m_nextpkt = NULL;
801 		m->m_flags |= M_SKIP_FIREWALL;
802 		memset(&pd, 0, sizeof(pd));
803 		pd.pf_mtag = pf_find_mtag(m);
804 		if (error == 0)
805 			ip6_forward(m, 0);
806 		else
807 			m_freem(m);
808 	}
809 
810 	return (action);
811 }
812 #endif /* INET6 */
813 
814 #ifdef INET
815 int
816 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
817     struct pf_pdesc *pd)
818 {
819 	struct mbuf		*m = *m0;
820 	struct pf_rule		*r;
821 	struct ip		*h = mtod(m, struct ip *);
822 	int			 mff = (ntohs(h->ip_off) & IP_MF);
823 	int			 hlen = h->ip_hl << 2;
824 	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
825 	u_int16_t		 max;
826 	int			 ip_len;
827 	int			 ip_off;
828 	int			 tag = -1;
829 	int			 verdict;
830 
831 	PF_RULES_RASSERT();
832 
833 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
834 	while (r != NULL) {
835 		r->evaluations++;
836 		if (pfi_kif_match(r->kif, kif) == r->ifnot)
837 			r = r->skip[PF_SKIP_IFP].ptr;
838 		else if (r->direction && r->direction != dir)
839 			r = r->skip[PF_SKIP_DIR].ptr;
840 		else if (r->af && r->af != AF_INET)
841 			r = r->skip[PF_SKIP_AF].ptr;
842 		else if (r->proto && r->proto != h->ip_p)
843 			r = r->skip[PF_SKIP_PROTO].ptr;
844 		else if (PF_MISMATCHAW(&r->src.addr,
845 		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
846 		    r->src.neg, kif, M_GETFIB(m)))
847 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
848 		else if (PF_MISMATCHAW(&r->dst.addr,
849 		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
850 		    r->dst.neg, NULL, M_GETFIB(m)))
851 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
852 		else if (r->match_tag && !pf_match_tag(m, r, &tag,
853 		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
854 			r = TAILQ_NEXT(r, entries);
855 		else
856 			break;
857 	}
858 
859 	if (r == NULL || r->action == PF_NOSCRUB)
860 		return (PF_PASS);
861 	else {
862 		r->packets[dir == PF_OUT]++;
863 		r->bytes[dir == PF_OUT] += pd->tot_len;
864 	}
865 
866 	/* Check for illegal packets */
867 	if (hlen < (int)sizeof(struct ip)) {
868 		REASON_SET(reason, PFRES_NORM);
869 		goto drop;
870 	}
871 
872 	if (hlen > ntohs(h->ip_len)) {
873 		REASON_SET(reason, PFRES_NORM);
874 		goto drop;
875 	}
876 
877 	/* Clear IP_DF if the rule uses the no-df option */
878 	if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
879 		u_int16_t ip_off = h->ip_off;
880 
881 		h->ip_off &= htons(~IP_DF);
882 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
883 	}
884 
885 	/* We will need other tests here */
886 	if (!fragoff && !mff)
887 		goto no_fragment;
888 
889 	/* We're dealing with a fragment now. Don't allow fragments
890 	 * with IP_DF to enter the cache. If the flag was cleared by
891 	 * no-df above, fine. Otherwise drop it.
892 	 */
893 	if (h->ip_off & htons(IP_DF)) {
894 		DPFPRINTF(("IP_DF\n"));
895 		goto bad;
896 	}
897 
898 	ip_len = ntohs(h->ip_len) - hlen;
899 	ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
900 
901 	/* All fragments are 8 byte aligned */
902 	if (mff && (ip_len & 0x7)) {
903 		DPFPRINTF(("mff and %d\n", ip_len));
904 		goto bad;
905 	}
906 
907 	/* Respect maximum length */
908 	if (fragoff + ip_len > IP_MAXPACKET) {
909 		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
910 		goto bad;
911 	}
912 	max = fragoff + ip_len;
913 
914 	/* Fully buffer all of the fragments
915 	 * Might return a completely reassembled mbuf, or NULL */
916 	PF_FRAG_LOCK();
917 	DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
918 	verdict = pf_reassemble(m0, h, dir, reason);
919 	PF_FRAG_UNLOCK();
920 
921 	if (verdict != PF_PASS)
922 		return (PF_DROP);
923 
924 	m = *m0;
925 	if (m == NULL)
926 		return (PF_DROP);
927 
928 	h = mtod(m, struct ip *);
929 
930  no_fragment:
931 	/* At this point, only IP_DF is allowed in ip_off */
932 	if (h->ip_off & ~htons(IP_DF)) {
933 		u_int16_t ip_off = h->ip_off;
934 
935 		h->ip_off &= htons(IP_DF);
936 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
937 	}
938 
939 	pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
940 
941 	return (PF_PASS);
942 
943  bad:
944 	DPFPRINTF(("dropping bad fragment\n"));
945 	REASON_SET(reason, PFRES_FRAG);
946  drop:
947 	if (r != NULL && r->log)
948 		PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
949 		    1);
950 
951 	return (PF_DROP);
952 }
953 #endif
954 
955 #ifdef INET6
956 int
957 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif,
958     u_short *reason, struct pf_pdesc *pd)
959 {
960 	struct mbuf		*m = *m0;
961 	struct pf_rule		*r;
962 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
963 	int			 extoff;
964 	int			 off;
965 	struct ip6_ext		 ext;
966 	struct ip6_opt		 opt;
967 	struct ip6_opt_jumbo	 jumbo;
968 	struct ip6_frag		 frag;
969 	u_int32_t		 jumbolen = 0, plen;
970 	int			 optend;
971 	int			 ooff;
972 	u_int8_t		 proto;
973 	int			 terminal;
974 
975 	PF_RULES_RASSERT();
976 
977 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
978 	while (r != NULL) {
979 		r->evaluations++;
980 		if (pfi_kif_match(r->kif, kif) == r->ifnot)
981 			r = r->skip[PF_SKIP_IFP].ptr;
982 		else if (r->direction && r->direction != dir)
983 			r = r->skip[PF_SKIP_DIR].ptr;
984 		else if (r->af && r->af != AF_INET6)
985 			r = r->skip[PF_SKIP_AF].ptr;
986 #if 0 /* header chain! */
987 		else if (r->proto && r->proto != h->ip6_nxt)
988 			r = r->skip[PF_SKIP_PROTO].ptr;
989 #endif
990 		else if (PF_MISMATCHAW(&r->src.addr,
991 		    (struct pf_addr *)&h->ip6_src, AF_INET6,
992 		    r->src.neg, kif, M_GETFIB(m)))
993 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
994 		else if (PF_MISMATCHAW(&r->dst.addr,
995 		    (struct pf_addr *)&h->ip6_dst, AF_INET6,
996 		    r->dst.neg, NULL, M_GETFIB(m)))
997 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
998 		else
999 			break;
1000 	}
1001 
1002 	if (r == NULL || r->action == PF_NOSCRUB)
1003 		return (PF_PASS);
1004 	else {
1005 		r->packets[dir == PF_OUT]++;
1006 		r->bytes[dir == PF_OUT] += pd->tot_len;
1007 	}
1008 
1009 	/* Check for illegal packets */
1010 	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1011 		goto drop;
1012 
1013 	extoff = 0;
1014 	off = sizeof(struct ip6_hdr);
1015 	proto = h->ip6_nxt;
1016 	terminal = 0;
1017 	do {
1018 		switch (proto) {
1019 		case IPPROTO_FRAGMENT:
1020 			goto fragment;
1021 			break;
1022 		case IPPROTO_AH:
1023 		case IPPROTO_ROUTING:
1024 		case IPPROTO_DSTOPTS:
1025 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1026 			    NULL, AF_INET6))
1027 				goto shortpkt;
1028 			extoff = off;
1029 			if (proto == IPPROTO_AH)
1030 				off += (ext.ip6e_len + 2) * 4;
1031 			else
1032 				off += (ext.ip6e_len + 1) * 8;
1033 			proto = ext.ip6e_nxt;
1034 			break;
1035 		case IPPROTO_HOPOPTS:
1036 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1037 			    NULL, AF_INET6))
1038 				goto shortpkt;
1039 			extoff = off;
1040 			optend = off + (ext.ip6e_len + 1) * 8;
1041 			ooff = off + sizeof(ext);
1042 			do {
1043 				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1044 				    sizeof(opt.ip6o_type), NULL, NULL,
1045 				    AF_INET6))
1046 					goto shortpkt;
1047 				if (opt.ip6o_type == IP6OPT_PAD1) {
1048 					ooff++;
1049 					continue;
1050 				}
1051 				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1052 				    NULL, NULL, AF_INET6))
1053 					goto shortpkt;
1054 				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1055 					goto drop;
1056 				switch (opt.ip6o_type) {
1057 				case IP6OPT_JUMBO:
1058 					if (h->ip6_plen != 0)
1059 						goto drop;
1060 					if (!pf_pull_hdr(m, ooff, &jumbo,
1061 					    sizeof(jumbo), NULL, NULL,
1062 					    AF_INET6))
1063 						goto shortpkt;
1064 					memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1065 					    sizeof(jumbolen));
1066 					jumbolen = ntohl(jumbolen);
1067 					if (jumbolen <= IPV6_MAXPACKET)
1068 						goto drop;
1069 					if (sizeof(struct ip6_hdr) + jumbolen !=
1070 					    m->m_pkthdr.len)
1071 						goto drop;
1072 					break;
1073 				default:
1074 					break;
1075 				}
1076 				ooff += sizeof(opt) + opt.ip6o_len;
1077 			} while (ooff < optend);
1078 
1079 			off = optend;
1080 			proto = ext.ip6e_nxt;
1081 			break;
1082 		default:
1083 			terminal = 1;
1084 			break;
1085 		}
1086 	} while (!terminal);
1087 
1088 	/* jumbo payload option must be present, or plen > 0 */
1089 	if (ntohs(h->ip6_plen) == 0)
1090 		plen = jumbolen;
1091 	else
1092 		plen = ntohs(h->ip6_plen);
1093 	if (plen == 0)
1094 		goto drop;
1095 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1096 		goto shortpkt;
1097 
1098 	pf_scrub_ip6(&m, r->min_ttl);
1099 
1100 	return (PF_PASS);
1101 
1102  fragment:
1103 	/* Jumbo payload packets cannot be fragmented. */
1104 	plen = ntohs(h->ip6_plen);
1105 	if (plen == 0 || jumbolen)
1106 		goto drop;
1107 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1108 		goto shortpkt;
1109 
1110 	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1111 		goto shortpkt;
1112 
1113 	/* Offset now points to data portion. */
1114 	off += sizeof(frag);
1115 
1116 	/* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1117 	if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1118 		return (PF_DROP);
1119 	m = *m0;
1120 	if (m == NULL)
1121 		return (PF_DROP);
1122 
1123 	pd->flags |= PFDESC_IP_REAS;
1124 	return (PF_PASS);
1125 
1126  shortpkt:
1127 	REASON_SET(reason, PFRES_SHORT);
1128 	if (r != NULL && r->log)
1129 		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1130 		    1);
1131 	return (PF_DROP);
1132 
1133  drop:
1134 	REASON_SET(reason, PFRES_NORM);
1135 	if (r != NULL && r->log)
1136 		PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd,
1137 		    1);
1138 	return (PF_DROP);
1139 }
1140 #endif /* INET6 */
1141 
1142 int
1143 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff,
1144     int off, void *h, struct pf_pdesc *pd)
1145 {
1146 	struct pf_rule	*r, *rm = NULL;
1147 	struct tcphdr	*th = pd->hdr.tcp;
1148 	int		 rewrite = 0;
1149 	u_short		 reason;
1150 	u_int8_t	 flags;
1151 	sa_family_t	 af = pd->af;
1152 
1153 	PF_RULES_RASSERT();
1154 
1155 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1156 	while (r != NULL) {
1157 		r->evaluations++;
1158 		if (pfi_kif_match(r->kif, kif) == r->ifnot)
1159 			r = r->skip[PF_SKIP_IFP].ptr;
1160 		else if (r->direction && r->direction != dir)
1161 			r = r->skip[PF_SKIP_DIR].ptr;
1162 		else if (r->af && r->af != af)
1163 			r = r->skip[PF_SKIP_AF].ptr;
1164 		else if (r->proto && r->proto != pd->proto)
1165 			r = r->skip[PF_SKIP_PROTO].ptr;
1166 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1167 		    r->src.neg, kif, M_GETFIB(m)))
1168 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1169 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1170 			    r->src.port[0], r->src.port[1], th->th_sport))
1171 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
1172 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1173 		    r->dst.neg, NULL, M_GETFIB(m)))
1174 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1175 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1176 			    r->dst.port[0], r->dst.port[1], th->th_dport))
1177 			r = r->skip[PF_SKIP_DST_PORT].ptr;
1178 		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1179 			    pf_osfp_fingerprint(pd, m, off, th),
1180 			    r->os_fingerprint))
1181 			r = TAILQ_NEXT(r, entries);
1182 		else {
1183 			rm = r;
1184 			break;
1185 		}
1186 	}
1187 
1188 	if (rm == NULL || rm->action == PF_NOSCRUB)
1189 		return (PF_PASS);
1190 	else {
1191 		r->packets[dir == PF_OUT]++;
1192 		r->bytes[dir == PF_OUT] += pd->tot_len;
1193 	}
1194 
1195 	if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1196 		pd->flags |= PFDESC_TCP_NORM;
1197 
1198 	flags = th->th_flags;
1199 	if (flags & TH_SYN) {
1200 		/* Illegal packet */
1201 		if (flags & TH_RST)
1202 			goto tcp_drop;
1203 
1204 		if (flags & TH_FIN)
1205 			goto tcp_drop;
1206 	} else {
1207 		/* Illegal packet */
1208 		if (!(flags & (TH_ACK|TH_RST)))
1209 			goto tcp_drop;
1210 	}
1211 
1212 	if (!(flags & TH_ACK)) {
1213 		/* These flags are only valid if ACK is set */
1214 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1215 			goto tcp_drop;
1216 	}
1217 
1218 	/* Check for illegal header length */
1219 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1220 		goto tcp_drop;
1221 
1222 	/* If flags changed, or reserved data set, then adjust */
1223 	if (flags != th->th_flags || th->th_x2 != 0) {
1224 		u_int16_t	ov, nv;
1225 
1226 		ov = *(u_int16_t *)(&th->th_ack + 1);
1227 		th->th_flags = flags;
1228 		th->th_x2 = 0;
1229 		nv = *(u_int16_t *)(&th->th_ack + 1);
1230 
1231 		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1232 		rewrite = 1;
1233 	}
1234 
1235 	/* Remove urgent pointer, if TH_URG is not set */
1236 	if (!(flags & TH_URG) && th->th_urp) {
1237 		th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1238 		    0, 0);
1239 		th->th_urp = 0;
1240 		rewrite = 1;
1241 	}
1242 
1243 	/* Process options */
1244 	if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af))
1245 		rewrite = 1;
1246 
1247 	/* copy back packet headers if we sanitized */
1248 	if (rewrite)
1249 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
1250 
1251 	return (PF_PASS);
1252 
1253  tcp_drop:
1254 	REASON_SET(&reason, PFRES_NORM);
1255 	if (rm != NULL && r->log)
1256 		PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd,
1257 		    1);
1258 	return (PF_DROP);
1259 }
1260 
1261 int
1262 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1263     struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1264 {
1265 	u_int32_t tsval, tsecr;
1266 	u_int8_t hdr[60];
1267 	u_int8_t *opt;
1268 
1269 	KASSERT((src->scrub == NULL),
1270 	    ("pf_normalize_tcp_init: src->scrub != NULL"));
1271 
1272 	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1273 	if (src->scrub == NULL)
1274 		return (1);
1275 
1276 	switch (pd->af) {
1277 #ifdef INET
1278 	case AF_INET: {
1279 		struct ip *h = mtod(m, struct ip *);
1280 		src->scrub->pfss_ttl = h->ip_ttl;
1281 		break;
1282 	}
1283 #endif /* INET */
1284 #ifdef INET6
1285 	case AF_INET6: {
1286 		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1287 		src->scrub->pfss_ttl = h->ip6_hlim;
1288 		break;
1289 	}
1290 #endif /* INET6 */
1291 	}
1292 
1293 
1294 	/*
1295 	 * All normalizations below are only begun if we see the start of
1296 	 * the connections.  They must all set an enabled bit in pfss_flags
1297 	 */
1298 	if ((th->th_flags & TH_SYN) == 0)
1299 		return (0);
1300 
1301 
1302 	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1303 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1304 		/* Diddle with TCP options */
1305 		int hlen;
1306 		opt = hdr + sizeof(struct tcphdr);
1307 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1308 		while (hlen >= TCPOLEN_TIMESTAMP) {
1309 			switch (*opt) {
1310 			case TCPOPT_EOL:	/* FALLTHROUGH */
1311 			case TCPOPT_NOP:
1312 				opt++;
1313 				hlen--;
1314 				break;
1315 			case TCPOPT_TIMESTAMP:
1316 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1317 					src->scrub->pfss_flags |=
1318 					    PFSS_TIMESTAMP;
1319 					src->scrub->pfss_ts_mod =
1320 					    htonl(arc4random());
1321 
1322 					/* note PFSS_PAWS not set yet */
1323 					memcpy(&tsval, &opt[2],
1324 					    sizeof(u_int32_t));
1325 					memcpy(&tsecr, &opt[6],
1326 					    sizeof(u_int32_t));
1327 					src->scrub->pfss_tsval0 = ntohl(tsval);
1328 					src->scrub->pfss_tsval = ntohl(tsval);
1329 					src->scrub->pfss_tsecr = ntohl(tsecr);
1330 					getmicrouptime(&src->scrub->pfss_last);
1331 				}
1332 				/* FALLTHROUGH */
1333 			default:
1334 				hlen -= MAX(opt[1], 2);
1335 				opt += MAX(opt[1], 2);
1336 				break;
1337 			}
1338 		}
1339 	}
1340 
1341 	return (0);
1342 }
1343 
1344 void
1345 pf_normalize_tcp_cleanup(struct pf_state *state)
1346 {
1347 	if (state->src.scrub)
1348 		uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1349 	if (state->dst.scrub)
1350 		uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1351 
1352 	/* Someday... flush the TCP segment reassembly descriptors. */
1353 }
1354 
1355 int
1356 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1357     u_short *reason, struct tcphdr *th, struct pf_state *state,
1358     struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1359 {
1360 	struct timeval uptime;
1361 	u_int32_t tsval, tsecr;
1362 	u_int tsval_from_last;
1363 	u_int8_t hdr[60];
1364 	u_int8_t *opt;
1365 	int copyback = 0;
1366 	int got_ts = 0;
1367 
1368 	KASSERT((src->scrub || dst->scrub),
1369 	    ("%s: src->scrub && dst->scrub!", __func__));
1370 
1371 	/*
1372 	 * Enforce the minimum TTL seen for this connection.  Negate a common
1373 	 * technique to evade an intrusion detection system and confuse
1374 	 * firewall state code.
1375 	 */
1376 	switch (pd->af) {
1377 #ifdef INET
1378 	case AF_INET: {
1379 		if (src->scrub) {
1380 			struct ip *h = mtod(m, struct ip *);
1381 			if (h->ip_ttl > src->scrub->pfss_ttl)
1382 				src->scrub->pfss_ttl = h->ip_ttl;
1383 			h->ip_ttl = src->scrub->pfss_ttl;
1384 		}
1385 		break;
1386 	}
1387 #endif /* INET */
1388 #ifdef INET6
1389 	case AF_INET6: {
1390 		if (src->scrub) {
1391 			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1392 			if (h->ip6_hlim > src->scrub->pfss_ttl)
1393 				src->scrub->pfss_ttl = h->ip6_hlim;
1394 			h->ip6_hlim = src->scrub->pfss_ttl;
1395 		}
1396 		break;
1397 	}
1398 #endif /* INET6 */
1399 	}
1400 
1401 	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1402 	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1403 	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1404 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1405 		/* Diddle with TCP options */
1406 		int hlen;
1407 		opt = hdr + sizeof(struct tcphdr);
1408 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1409 		while (hlen >= TCPOLEN_TIMESTAMP) {
1410 			switch (*opt) {
1411 			case TCPOPT_EOL:	/* FALLTHROUGH */
1412 			case TCPOPT_NOP:
1413 				opt++;
1414 				hlen--;
1415 				break;
1416 			case TCPOPT_TIMESTAMP:
1417 				/* Modulate the timestamps.  Can be used for
1418 				 * NAT detection, OS uptime determination or
1419 				 * reboot detection.
1420 				 */
1421 
1422 				if (got_ts) {
1423 					/* Huh?  Multiple timestamps!? */
1424 					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1425 						DPFPRINTF(("multiple TS??"));
1426 						pf_print_state(state);
1427 						printf("\n");
1428 					}
1429 					REASON_SET(reason, PFRES_TS);
1430 					return (PF_DROP);
1431 				}
1432 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1433 					memcpy(&tsval, &opt[2],
1434 					    sizeof(u_int32_t));
1435 					if (tsval && src->scrub &&
1436 					    (src->scrub->pfss_flags &
1437 					    PFSS_TIMESTAMP)) {
1438 						tsval = ntohl(tsval);
1439 						pf_change_proto_a(m, &opt[2],
1440 						    &th->th_sum,
1441 						    htonl(tsval +
1442 						    src->scrub->pfss_ts_mod),
1443 						    0);
1444 						copyback = 1;
1445 					}
1446 
1447 					/* Modulate TS reply iff valid (!0) */
1448 					memcpy(&tsecr, &opt[6],
1449 					    sizeof(u_int32_t));
1450 					if (tsecr && dst->scrub &&
1451 					    (dst->scrub->pfss_flags &
1452 					    PFSS_TIMESTAMP)) {
1453 						tsecr = ntohl(tsecr)
1454 						    - dst->scrub->pfss_ts_mod;
1455 						pf_change_proto_a(m, &opt[6],
1456 						    &th->th_sum, htonl(tsecr),
1457 						    0);
1458 						copyback = 1;
1459 					}
1460 					got_ts = 1;
1461 				}
1462 				/* FALLTHROUGH */
1463 			default:
1464 				hlen -= MAX(opt[1], 2);
1465 				opt += MAX(opt[1], 2);
1466 				break;
1467 			}
1468 		}
1469 		if (copyback) {
1470 			/* Copyback the options, caller copys back header */
1471 			*writeback = 1;
1472 			m_copyback(m, off + sizeof(struct tcphdr),
1473 			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1474 			    sizeof(struct tcphdr));
1475 		}
1476 	}
1477 
1478 
1479 	/*
1480 	 * Must invalidate PAWS checks on connections idle for too long.
1481 	 * The fastest allowed timestamp clock is 1ms.  That turns out to
1482 	 * be about 24 days before it wraps.  XXX Right now our lowerbound
1483 	 * TS echo check only works for the first 12 days of a connection
1484 	 * when the TS has exhausted half its 32bit space
1485 	 */
1486 #define TS_MAX_IDLE	(24*24*60*60)
1487 #define TS_MAX_CONN	(12*24*60*60)	/* XXX remove when better tsecr check */
1488 
1489 	getmicrouptime(&uptime);
1490 	if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1491 	    (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1492 	    time_uptime - state->creation > TS_MAX_CONN))  {
1493 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1494 			DPFPRINTF(("src idled out of PAWS\n"));
1495 			pf_print_state(state);
1496 			printf("\n");
1497 		}
1498 		src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1499 		    | PFSS_PAWS_IDLED;
1500 	}
1501 	if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1502 	    uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1503 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1504 			DPFPRINTF(("dst idled out of PAWS\n"));
1505 			pf_print_state(state);
1506 			printf("\n");
1507 		}
1508 		dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1509 		    | PFSS_PAWS_IDLED;
1510 	}
1511 
1512 	if (got_ts && src->scrub && dst->scrub &&
1513 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1514 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1515 		/* Validate that the timestamps are "in-window".
1516 		 * RFC1323 describes TCP Timestamp options that allow
1517 		 * measurement of RTT (round trip time) and PAWS
1518 		 * (protection against wrapped sequence numbers).  PAWS
1519 		 * gives us a set of rules for rejecting packets on
1520 		 * long fat pipes (packets that were somehow delayed
1521 		 * in transit longer than the time it took to send the
1522 		 * full TCP sequence space of 4Gb).  We can use these
1523 		 * rules and infer a few others that will let us treat
1524 		 * the 32bit timestamp and the 32bit echoed timestamp
1525 		 * as sequence numbers to prevent a blind attacker from
1526 		 * inserting packets into a connection.
1527 		 *
1528 		 * RFC1323 tells us:
1529 		 *  - The timestamp on this packet must be greater than
1530 		 *    or equal to the last value echoed by the other
1531 		 *    endpoint.  The RFC says those will be discarded
1532 		 *    since it is a dup that has already been acked.
1533 		 *    This gives us a lowerbound on the timestamp.
1534 		 *        timestamp >= other last echoed timestamp
1535 		 *  - The timestamp will be less than or equal to
1536 		 *    the last timestamp plus the time between the
1537 		 *    last packet and now.  The RFC defines the max
1538 		 *    clock rate as 1ms.  We will allow clocks to be
1539 		 *    up to 10% fast and will allow a total difference
1540 		 *    or 30 seconds due to a route change.  And this
1541 		 *    gives us an upperbound on the timestamp.
1542 		 *        timestamp <= last timestamp + max ticks
1543 		 *    We have to be careful here.  Windows will send an
1544 		 *    initial timestamp of zero and then initialize it
1545 		 *    to a random value after the 3whs; presumably to
1546 		 *    avoid a DoS by having to call an expensive RNG
1547 		 *    during a SYN flood.  Proof MS has at least one
1548 		 *    good security geek.
1549 		 *
1550 		 *  - The TCP timestamp option must also echo the other
1551 		 *    endpoints timestamp.  The timestamp echoed is the
1552 		 *    one carried on the earliest unacknowledged segment
1553 		 *    on the left edge of the sequence window.  The RFC
1554 		 *    states that the host will reject any echoed
1555 		 *    timestamps that were larger than any ever sent.
1556 		 *    This gives us an upperbound on the TS echo.
1557 		 *        tescr <= largest_tsval
1558 		 *  - The lowerbound on the TS echo is a little more
1559 		 *    tricky to determine.  The other endpoint's echoed
1560 		 *    values will not decrease.  But there may be
1561 		 *    network conditions that re-order packets and
1562 		 *    cause our view of them to decrease.  For now the
1563 		 *    only lowerbound we can safely determine is that
1564 		 *    the TS echo will never be less than the original
1565 		 *    TS.  XXX There is probably a better lowerbound.
1566 		 *    Remove TS_MAX_CONN with better lowerbound check.
1567 		 *        tescr >= other original TS
1568 		 *
1569 		 * It is also important to note that the fastest
1570 		 * timestamp clock of 1ms will wrap its 32bit space in
1571 		 * 24 days.  So we just disable TS checking after 24
1572 		 * days of idle time.  We actually must use a 12d
1573 		 * connection limit until we can come up with a better
1574 		 * lowerbound to the TS echo check.
1575 		 */
1576 		struct timeval delta_ts;
1577 		int ts_fudge;
1578 
1579 
1580 		/*
1581 		 * PFTM_TS_DIFF is how many seconds of leeway to allow
1582 		 * a host's timestamp.  This can happen if the previous
1583 		 * packet got delayed in transit for much longer than
1584 		 * this packet.
1585 		 */
1586 		if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1587 			ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1588 
1589 		/* Calculate max ticks since the last timestamp */
1590 #define TS_MAXFREQ	1100		/* RFC max TS freq of 1Khz + 10% skew */
1591 #define TS_MICROSECS	1000000		/* microseconds per second */
1592 		delta_ts = uptime;
1593 		timevalsub(&delta_ts, &src->scrub->pfss_last);
1594 		tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1595 		tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1596 
1597 		if ((src->state >= TCPS_ESTABLISHED &&
1598 		    dst->state >= TCPS_ESTABLISHED) &&
1599 		    (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1600 		    SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1601 		    (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1602 		    SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1603 			/* Bad RFC1323 implementation or an insertion attack.
1604 			 *
1605 			 * - Solaris 2.6 and 2.7 are known to send another ACK
1606 			 *   after the FIN,FIN|ACK,ACK closing that carries
1607 			 *   an old timestamp.
1608 			 */
1609 
1610 			DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1611 			    SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1612 			    SEQ_GT(tsval, src->scrub->pfss_tsval +
1613 			    tsval_from_last) ? '1' : ' ',
1614 			    SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1615 			    SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1616 			DPFPRINTF((" tsval: %u  tsecr: %u  +ticks: %u  "
1617 			    "idle: %jus %lums\n",
1618 			    tsval, tsecr, tsval_from_last,
1619 			    (uintmax_t)delta_ts.tv_sec,
1620 			    delta_ts.tv_usec / 1000));
1621 			DPFPRINTF((" src->tsval: %u  tsecr: %u\n",
1622 			    src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1623 			DPFPRINTF((" dst->tsval: %u  tsecr: %u  tsval0: %u"
1624 			    "\n", dst->scrub->pfss_tsval,
1625 			    dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1626 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1627 				pf_print_state(state);
1628 				pf_print_flags(th->th_flags);
1629 				printf("\n");
1630 			}
1631 			REASON_SET(reason, PFRES_TS);
1632 			return (PF_DROP);
1633 		}
1634 
1635 		/* XXX I'd really like to require tsecr but it's optional */
1636 
1637 	} else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1638 	    ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1639 	    || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1640 	    src->scrub && dst->scrub &&
1641 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1642 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1643 		/* Didn't send a timestamp.  Timestamps aren't really useful
1644 		 * when:
1645 		 *  - connection opening or closing (often not even sent).
1646 		 *    but we must not let an attacker to put a FIN on a
1647 		 *    data packet to sneak it through our ESTABLISHED check.
1648 		 *  - on a TCP reset.  RFC suggests not even looking at TS.
1649 		 *  - on an empty ACK.  The TS will not be echoed so it will
1650 		 *    probably not help keep the RTT calculation in sync and
1651 		 *    there isn't as much danger when the sequence numbers
1652 		 *    got wrapped.  So some stacks don't include TS on empty
1653 		 *    ACKs :-(
1654 		 *
1655 		 * To minimize the disruption to mostly RFC1323 conformant
1656 		 * stacks, we will only require timestamps on data packets.
1657 		 *
1658 		 * And what do ya know, we cannot require timestamps on data
1659 		 * packets.  There appear to be devices that do legitimate
1660 		 * TCP connection hijacking.  There are HTTP devices that allow
1661 		 * a 3whs (with timestamps) and then buffer the HTTP request.
1662 		 * If the intermediate device has the HTTP response cache, it
1663 		 * will spoof the response but not bother timestamping its
1664 		 * packets.  So we can look for the presence of a timestamp in
1665 		 * the first data packet and if there, require it in all future
1666 		 * packets.
1667 		 */
1668 
1669 		if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1670 			/*
1671 			 * Hey!  Someone tried to sneak a packet in.  Or the
1672 			 * stack changed its RFC1323 behavior?!?!
1673 			 */
1674 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1675 				DPFPRINTF(("Did not receive expected RFC1323 "
1676 				    "timestamp\n"));
1677 				pf_print_state(state);
1678 				pf_print_flags(th->th_flags);
1679 				printf("\n");
1680 			}
1681 			REASON_SET(reason, PFRES_TS);
1682 			return (PF_DROP);
1683 		}
1684 	}
1685 
1686 
1687 	/*
1688 	 * We will note if a host sends his data packets with or without
1689 	 * timestamps.  And require all data packets to contain a timestamp
1690 	 * if the first does.  PAWS implicitly requires that all data packets be
1691 	 * timestamped.  But I think there are middle-man devices that hijack
1692 	 * TCP streams immediately after the 3whs and don't timestamp their
1693 	 * packets (seen in a WWW accelerator or cache).
1694 	 */
1695 	if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1696 	    (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1697 		if (got_ts)
1698 			src->scrub->pfss_flags |= PFSS_DATA_TS;
1699 		else {
1700 			src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1701 			if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1702 			    (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1703 				/* Don't warn if other host rejected RFC1323 */
1704 				DPFPRINTF(("Broken RFC1323 stack did not "
1705 				    "timestamp data packet. Disabled PAWS "
1706 				    "security.\n"));
1707 				pf_print_state(state);
1708 				pf_print_flags(th->th_flags);
1709 				printf("\n");
1710 			}
1711 		}
1712 	}
1713 
1714 
1715 	/*
1716 	 * Update PAWS values
1717 	 */
1718 	if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1719 	    (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1720 		getmicrouptime(&src->scrub->pfss_last);
1721 		if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1722 		    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1723 			src->scrub->pfss_tsval = tsval;
1724 
1725 		if (tsecr) {
1726 			if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1727 			    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1728 				src->scrub->pfss_tsecr = tsecr;
1729 
1730 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1731 			    (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1732 			    src->scrub->pfss_tsval0 == 0)) {
1733 				/* tsval0 MUST be the lowest timestamp */
1734 				src->scrub->pfss_tsval0 = tsval;
1735 			}
1736 
1737 			/* Only fully initialized after a TS gets echoed */
1738 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1739 				src->scrub->pfss_flags |= PFSS_PAWS;
1740 		}
1741 	}
1742 
1743 	/* I have a dream....  TCP segment reassembly.... */
1744 	return (0);
1745 }
1746 
1747 static int
1748 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1749     int off, sa_family_t af)
1750 {
1751 	u_int16_t	*mss;
1752 	int		 thoff;
1753 	int		 opt, cnt, optlen = 0;
1754 	int		 rewrite = 0;
1755 	u_char		 opts[TCP_MAXOLEN];
1756 	u_char		*optp = opts;
1757 
1758 	thoff = th->th_off << 2;
1759 	cnt = thoff - sizeof(struct tcphdr);
1760 
1761 	if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
1762 	    NULL, NULL, af))
1763 		return (rewrite);
1764 
1765 	for (; cnt > 0; cnt -= optlen, optp += optlen) {
1766 		opt = optp[0];
1767 		if (opt == TCPOPT_EOL)
1768 			break;
1769 		if (opt == TCPOPT_NOP)
1770 			optlen = 1;
1771 		else {
1772 			if (cnt < 2)
1773 				break;
1774 			optlen = optp[1];
1775 			if (optlen < 2 || optlen > cnt)
1776 				break;
1777 		}
1778 		switch (opt) {
1779 		case TCPOPT_MAXSEG:
1780 			mss = (u_int16_t *)(optp + 2);
1781 			if ((ntohs(*mss)) > r->max_mss) {
1782 				th->th_sum = pf_proto_cksum_fixup(m,
1783 				    th->th_sum, *mss, htons(r->max_mss), 0);
1784 				*mss = htons(r->max_mss);
1785 				rewrite = 1;
1786 			}
1787 			break;
1788 		default:
1789 			break;
1790 		}
1791 	}
1792 
1793 	if (rewrite)
1794 		m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts);
1795 
1796 	return (rewrite);
1797 }
1798 
1799 #ifdef INET
1800 static void
1801 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos)
1802 {
1803 	struct mbuf		*m = *m0;
1804 	struct ip		*h = mtod(m, struct ip *);
1805 
1806 	/* Clear IP_DF if no-df was requested */
1807 	if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
1808 		u_int16_t ip_off = h->ip_off;
1809 
1810 		h->ip_off &= htons(~IP_DF);
1811 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1812 	}
1813 
1814 	/* Enforce a minimum ttl, may cause endless packet loops */
1815 	if (min_ttl && h->ip_ttl < min_ttl) {
1816 		u_int16_t ip_ttl = h->ip_ttl;
1817 
1818 		h->ip_ttl = min_ttl;
1819 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
1820 	}
1821 
1822 	/* Enforce tos */
1823 	if (flags & PFRULE_SET_TOS) {
1824 		u_int16_t	ov, nv;
1825 
1826 		ov = *(u_int16_t *)h;
1827 		h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK);
1828 		nv = *(u_int16_t *)h;
1829 
1830 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
1831 	}
1832 
1833 	/* random-id, but not for fragments */
1834 	if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
1835 		uint16_t ip_id = h->ip_id;
1836 
1837 		ip_fillid(h);
1838 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
1839 	}
1840 }
1841 #endif /* INET */
1842 
1843 #ifdef INET6
1844 static void
1845 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl)
1846 {
1847 	struct mbuf		*m = *m0;
1848 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
1849 
1850 	/* Enforce a minimum ttl, may cause endless packet loops */
1851 	if (min_ttl && h->ip6_hlim < min_ttl)
1852 		h->ip6_hlim = min_ttl;
1853 }
1854 #endif
1855