xref: /freebsd/sys/netpfil/pf/pf_norm.c (revision 70703aa922b41faedfd72211633884bb580ceeac)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  *	$OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29  */
30 
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35 
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43 
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/vnet.h>
47 #include <net/pfvar.h>
48 #include <net/if_pflog.h>
49 
50 #include <netinet/in.h>
51 #include <netinet/ip.h>
52 #include <netinet/ip_var.h>
53 #include <netinet6/in6_var.h>
54 #include <netinet6/nd6.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet6/scope6_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_fsm.h>
59 #include <netinet/tcp_seq.h>
60 #include <netinet/sctp_constants.h>
61 #include <netinet/sctp_header.h>
62 
63 #ifdef INET6
64 #include <netinet/ip6.h>
65 #endif /* INET6 */
66 
67 struct pf_frent {
68 	TAILQ_ENTRY(pf_frent)	fr_next;
69 	struct mbuf	*fe_m;
70 	uint16_t	fe_hdrlen;	/* ipv4 header length with ip options
71 					   ipv6, extension, fragment header */
72 	uint16_t	fe_extoff;	/* last extension header offset or 0 */
73 	uint16_t	fe_len;		/* fragment length */
74 	uint16_t	fe_off;		/* fragment offset */
75 	uint16_t	fe_mff;		/* more fragment flag */
76 };
77 
78 struct pf_fragment_cmp {
79 	struct pf_addr	frc_src;
80 	struct pf_addr	frc_dst;
81 	uint32_t	frc_id;
82 	sa_family_t	frc_af;
83 	uint8_t		frc_proto;
84 };
85 
86 struct pf_fragment {
87 	struct pf_fragment_cmp	fr_key;
88 #define fr_src	fr_key.frc_src
89 #define fr_dst	fr_key.frc_dst
90 #define fr_id	fr_key.frc_id
91 #define fr_af	fr_key.frc_af
92 #define fr_proto	fr_key.frc_proto
93 
94 	/* pointers to queue element */
95 	struct pf_frent	*fr_firstoff[PF_FRAG_ENTRY_POINTS];
96 	/* count entries between pointers */
97 	uint8_t	fr_entries[PF_FRAG_ENTRY_POINTS];
98 	RB_ENTRY(pf_fragment) fr_entry;
99 	TAILQ_ENTRY(pf_fragment) frag_next;
100 	uint32_t	fr_timeout;
101 	TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
102 	uint16_t	fr_maxlen;	/* maximum length of single fragment */
103 	u_int16_t	fr_holes;	/* number of holes in the queue */
104 };
105 
106 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
107 #define V_pf_frag_mtx		VNET(pf_frag_mtx)
108 #define PF_FRAG_LOCK()		mtx_lock(&V_pf_frag_mtx)
109 #define PF_FRAG_UNLOCK()	mtx_unlock(&V_pf_frag_mtx)
110 #define PF_FRAG_ASSERT()	mtx_assert(&V_pf_frag_mtx, MA_OWNED)
111 
112 VNET_DEFINE(uma_zone_t, pf_state_scrub_z);	/* XXX: shared with pfsync */
113 
114 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
115 #define	V_pf_frent_z	VNET(pf_frent_z)
116 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
117 #define	V_pf_frag_z	VNET(pf_frag_z)
118 
119 TAILQ_HEAD(pf_fragqueue, pf_fragment);
120 TAILQ_HEAD(pf_cachequeue, pf_fragment);
121 VNET_DEFINE_STATIC(struct pf_fragqueue,	pf_fragqueue);
122 #define	V_pf_fragqueue			VNET(pf_fragqueue)
123 RB_HEAD(pf_frag_tree, pf_fragment);
124 VNET_DEFINE_STATIC(struct pf_frag_tree,	pf_frag_tree);
125 #define	V_pf_frag_tree			VNET(pf_frag_tree)
126 static int		 pf_frag_compare(struct pf_fragment *,
127 			    struct pf_fragment *);
128 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
129 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
130 
131 static void	pf_flush_fragments(void);
132 static void	pf_free_fragment(struct pf_fragment *);
133 
134 static struct pf_frent *pf_create_fragment(u_short *);
135 static int	pf_frent_holes(struct pf_frent *frent);
136 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
137 		    struct pf_frag_tree *tree);
138 static inline int	pf_frent_index(struct pf_frent *);
139 static int	pf_frent_insert(struct pf_fragment *,
140 			    struct pf_frent *, struct pf_frent *);
141 void			pf_frent_remove(struct pf_fragment *,
142 			    struct pf_frent *);
143 struct pf_frent		*pf_frent_previous(struct pf_fragment *,
144 			    struct pf_frent *);
145 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
146 		    struct pf_frent *, u_short *);
147 static struct mbuf *pf_join_fragment(struct pf_fragment *);
148 #ifdef INET
149 static int	pf_reassemble(struct mbuf **, int, u_short *);
150 #endif	/* INET */
151 #ifdef INET6
152 static int	pf_reassemble6(struct mbuf **,
153 		    struct ip6_frag *, uint16_t, uint16_t, u_short *);
154 #endif	/* INET6 */
155 
156 #define	DPFPRINTF(x) do {				\
157 	if (V_pf_status.debug >= PF_DEBUG_MISC) {	\
158 		printf("%s: ", __func__);		\
159 		printf x ;				\
160 	}						\
161 } while(0)
162 
163 #ifdef INET
164 static void
pf_ip2key(struct ip * ip,int dir,struct pf_fragment_cmp * key)165 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
166 {
167 
168 	key->frc_src.v4 = ip->ip_src;
169 	key->frc_dst.v4 = ip->ip_dst;
170 	key->frc_af = AF_INET;
171 	key->frc_proto = ip->ip_p;
172 	key->frc_id = ip->ip_id;
173 }
174 #endif	/* INET */
175 
176 void
pf_normalize_init(void)177 pf_normalize_init(void)
178 {
179 
180 	V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
181 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
182 	V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
183 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
184 	V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
185 	    sizeof(struct pf_state_scrub),  NULL, NULL, NULL, NULL,
186 	    UMA_ALIGN_PTR, 0);
187 
188 	mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
189 
190 	V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
191 	V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
192 	uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
193 	uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
194 
195 	TAILQ_INIT(&V_pf_fragqueue);
196 }
197 
198 void
pf_normalize_cleanup(void)199 pf_normalize_cleanup(void)
200 {
201 
202 	uma_zdestroy(V_pf_state_scrub_z);
203 	uma_zdestroy(V_pf_frent_z);
204 	uma_zdestroy(V_pf_frag_z);
205 
206 	mtx_destroy(&V_pf_frag_mtx);
207 }
208 
209 static int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)210 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
211 {
212 	int	diff;
213 
214 	if ((diff = a->fr_id - b->fr_id) != 0)
215 		return (diff);
216 	if ((diff = a->fr_proto - b->fr_proto) != 0)
217 		return (diff);
218 	if ((diff = a->fr_af - b->fr_af) != 0)
219 		return (diff);
220 	if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
221 		return (diff);
222 	if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
223 		return (diff);
224 	return (0);
225 }
226 
227 void
pf_purge_expired_fragments(void)228 pf_purge_expired_fragments(void)
229 {
230 	u_int32_t	expire = time_uptime -
231 			    V_pf_default_rule.timeout[PFTM_FRAG];
232 
233 	pf_purge_fragments(expire);
234 }
235 
236 void
pf_purge_fragments(uint32_t expire)237 pf_purge_fragments(uint32_t expire)
238 {
239 	struct pf_fragment	*frag;
240 
241 	PF_FRAG_LOCK();
242 	while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
243 		if (frag->fr_timeout > expire)
244 			break;
245 
246 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
247 		pf_free_fragment(frag);
248 	}
249 
250 	PF_FRAG_UNLOCK();
251 }
252 
253 /*
254  * Try to flush old fragments to make space for new ones
255  */
256 static void
pf_flush_fragments(void)257 pf_flush_fragments(void)
258 {
259 	struct pf_fragment	*frag;
260 	int			 goal;
261 
262 	PF_FRAG_ASSERT();
263 
264 	goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
265 	DPFPRINTF(("trying to free %d frag entriess\n", goal));
266 	while (goal < uma_zone_get_cur(V_pf_frent_z)) {
267 		frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
268 		if (frag)
269 			pf_free_fragment(frag);
270 		else
271 			break;
272 	}
273 }
274 
275 /*
276  * Remove a fragment from the fragment queue, free its fragment entries,
277  * and free the fragment itself.
278  */
279 static void
pf_free_fragment(struct pf_fragment * frag)280 pf_free_fragment(struct pf_fragment *frag)
281 {
282 	struct pf_frent		*frent;
283 
284 	PF_FRAG_ASSERT();
285 
286 	RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
287 	TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
288 
289 	/* Free all fragment entries */
290 	while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
291 		TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
292 
293 		m_freem(frent->fe_m);
294 		uma_zfree(V_pf_frent_z, frent);
295 	}
296 
297 	uma_zfree(V_pf_frag_z, frag);
298 }
299 
300 static struct pf_fragment *
pf_find_fragment(struct pf_fragment_cmp * key,struct pf_frag_tree * tree)301 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
302 {
303 	struct pf_fragment	*frag;
304 
305 	PF_FRAG_ASSERT();
306 
307 	frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
308 	if (frag != NULL) {
309 		TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
310 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
311 	}
312 
313 	return (frag);
314 }
315 
316 static struct pf_frent *
pf_create_fragment(u_short * reason)317 pf_create_fragment(u_short *reason)
318 {
319 	struct pf_frent *frent;
320 
321 	PF_FRAG_ASSERT();
322 
323 	frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
324 	if (frent == NULL) {
325 		pf_flush_fragments();
326 		frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
327 		if (frent == NULL) {
328 			REASON_SET(reason, PFRES_MEMORY);
329 			return (NULL);
330 		}
331 	}
332 
333 	return (frent);
334 }
335 
336 /*
337  * Calculate the additional holes that were created in the fragment
338  * queue by inserting this fragment.  A fragment in the middle
339  * creates one more hole by splitting.  For each connected side,
340  * it loses one hole.
341  * Fragment entry must be in the queue when calling this function.
342  */
343 static int
pf_frent_holes(struct pf_frent * frent)344 pf_frent_holes(struct pf_frent *frent)
345 {
346 	struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
347 	struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
348 	int holes = 1;
349 
350 	if (prev == NULL) {
351 		if (frent->fe_off == 0)
352 			holes--;
353 	} else {
354 		KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
355 		if (frent->fe_off == prev->fe_off + prev->fe_len)
356 			holes--;
357 	}
358 	if (next == NULL) {
359 		if (!frent->fe_mff)
360 			holes--;
361 	} else {
362 		KASSERT(frent->fe_mff, ("frent->fe_mff"));
363 		if (next->fe_off == frent->fe_off + frent->fe_len)
364 			holes--;
365 	}
366 	return holes;
367 }
368 
369 static inline int
pf_frent_index(struct pf_frent * frent)370 pf_frent_index(struct pf_frent *frent)
371 {
372 	/*
373 	 * We have an array of 16 entry points to the queue.  A full size
374 	 * 65535 octet IP packet can have 8192 fragments.  So the queue
375 	 * traversal length is at most 512 and at most 16 entry points are
376 	 * checked.  We need 128 additional bytes on a 64 bit architecture.
377 	 */
378 	CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
379 	    16 - 1);
380 	CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
381 
382 	return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
383 }
384 
385 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)386 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
387     struct pf_frent *prev)
388 {
389 	int index;
390 
391 	CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
392 
393 	/*
394 	 * A packet has at most 65536 octets.  With 16 entry points, each one
395 	 * spawns 4096 octets.  We limit these to 64 fragments each, which
396 	 * means on average every fragment must have at least 64 octets.
397 	 */
398 	index = pf_frent_index(frent);
399 	if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
400 		return ENOBUFS;
401 	frag->fr_entries[index]++;
402 
403 	if (prev == NULL) {
404 		TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
405 	} else {
406 		KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
407 		    ("overlapping fragment"));
408 		TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
409 	}
410 
411 	if (frag->fr_firstoff[index] == NULL) {
412 		KASSERT(prev == NULL || pf_frent_index(prev) < index,
413 		    ("prev == NULL || pf_frent_index(pref) < index"));
414 		frag->fr_firstoff[index] = frent;
415 	} else {
416 		if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
417 			KASSERT(prev == NULL || pf_frent_index(prev) < index,
418 			    ("prev == NULL || pf_frent_index(pref) < index"));
419 			frag->fr_firstoff[index] = frent;
420 		} else {
421 			KASSERT(prev != NULL, ("prev != NULL"));
422 			KASSERT(pf_frent_index(prev) == index,
423 			    ("pf_frent_index(prev) == index"));
424 		}
425 	}
426 
427 	frag->fr_holes += pf_frent_holes(frent);
428 
429 	return 0;
430 }
431 
432 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)433 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
434 {
435 #ifdef INVARIANTS
436 	struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
437 #endif
438 	struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
439 	int index;
440 
441 	frag->fr_holes -= pf_frent_holes(frent);
442 
443 	index = pf_frent_index(frent);
444 	KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
445 	if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
446 		if (next == NULL) {
447 			frag->fr_firstoff[index] = NULL;
448 		} else {
449 			KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
450 			    ("overlapping fragment"));
451 			if (pf_frent_index(next) == index) {
452 				frag->fr_firstoff[index] = next;
453 			} else {
454 				frag->fr_firstoff[index] = NULL;
455 			}
456 		}
457 	} else {
458 		KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
459 		    ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
460 		KASSERT(prev != NULL, ("prev != NULL"));
461 		KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
462 		    ("overlapping fragment"));
463 		KASSERT(pf_frent_index(prev) == index,
464 		    ("pf_frent_index(prev) == index"));
465 	}
466 
467 	TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
468 
469 	KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
470 	frag->fr_entries[index]--;
471 }
472 
473 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)474 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
475 {
476 	struct pf_frent *prev, *next;
477 	int index;
478 
479 	/*
480 	 * If there are no fragments after frag, take the final one.  Assume
481 	 * that the global queue is not empty.
482 	 */
483 	prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
484 	KASSERT(prev != NULL, ("prev != NULL"));
485 	if (prev->fe_off <= frent->fe_off)
486 		return prev;
487 	/*
488 	 * We want to find a fragment entry that is before frag, but still
489 	 * close to it.  Find the first fragment entry that is in the same
490 	 * entry point or in the first entry point after that.  As we have
491 	 * already checked that there are entries behind frag, this will
492 	 * succeed.
493 	 */
494 	for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
495 	    index++) {
496 		prev = frag->fr_firstoff[index];
497 		if (prev != NULL)
498 			break;
499 	}
500 	KASSERT(prev != NULL, ("prev != NULL"));
501 	/*
502 	 * In prev we may have a fragment from the same entry point that is
503 	 * before frent, or one that is just one position behind frent.
504 	 * In the latter case, we go back one step and have the predecessor.
505 	 * There may be none if the new fragment will be the first one.
506 	 */
507 	if (prev->fe_off > frent->fe_off) {
508 		prev = TAILQ_PREV(prev, pf_fragq, fr_next);
509 		if (prev == NULL)
510 			return NULL;
511 		KASSERT(prev->fe_off <= frent->fe_off,
512 		    ("prev->fe_off <= frent->fe_off"));
513 		return prev;
514 	}
515 	/*
516 	 * In prev is the first fragment of the entry point.  The offset
517 	 * of frag is behind it.  Find the closest previous fragment.
518 	 */
519 	for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
520 	    next = TAILQ_NEXT(next, fr_next)) {
521 		if (next->fe_off > frent->fe_off)
522 			break;
523 		prev = next;
524 	}
525 	return prev;
526 }
527 
528 static struct pf_fragment *
pf_fillup_fragment(struct pf_fragment_cmp * key,struct pf_frent * frent,u_short * reason)529 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
530     u_short *reason)
531 {
532 	struct pf_frent		*after, *next, *prev;
533 	struct pf_fragment	*frag;
534 	uint16_t		total;
535 
536 	PF_FRAG_ASSERT();
537 
538 	/* No empty fragments. */
539 	if (frent->fe_len == 0) {
540 		DPFPRINTF(("bad fragment: len 0\n"));
541 		goto bad_fragment;
542 	}
543 
544 	/* All fragments are 8 byte aligned. */
545 	if (frent->fe_mff && (frent->fe_len & 0x7)) {
546 		DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
547 		goto bad_fragment;
548 	}
549 
550 	/* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
551 	if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
552 		DPFPRINTF(("bad fragment: max packet %d\n",
553 		    frent->fe_off + frent->fe_len));
554 		goto bad_fragment;
555 	}
556 
557 	DPFPRINTF((key->frc_af == AF_INET ?
558 	    "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
559 	    key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
560 
561 	/* Fully buffer all of the fragments in this fragment queue. */
562 	frag = pf_find_fragment(key, &V_pf_frag_tree);
563 
564 	/* Create a new reassembly queue for this packet. */
565 	if (frag == NULL) {
566 		frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
567 		if (frag == NULL) {
568 			pf_flush_fragments();
569 			frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
570 			if (frag == NULL) {
571 				REASON_SET(reason, PFRES_MEMORY);
572 				goto drop_fragment;
573 			}
574 		}
575 
576 		*(struct pf_fragment_cmp *)frag = *key;
577 		memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
578 		memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
579 		frag->fr_timeout = time_uptime;
580 		TAILQ_INIT(&frag->fr_queue);
581 		frag->fr_maxlen = frent->fe_len;
582 		frag->fr_holes = 1;
583 
584 		RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
585 		TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
586 
587 		/* We do not have a previous fragment, cannot fail. */
588 		pf_frent_insert(frag, frent, NULL);
589 
590 		return (frag);
591 	}
592 
593 	KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
594 
595 	/* Remember maximum fragment len for refragmentation. */
596 	if (frent->fe_len > frag->fr_maxlen)
597 		frag->fr_maxlen = frent->fe_len;
598 
599 	/* Maximum data we have seen already. */
600 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
601 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
602 
603 	/* Non terminal fragments must have more fragments flag. */
604 	if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
605 		goto bad_fragment;
606 
607 	/* Check if we saw the last fragment already. */
608 	if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
609 		if (frent->fe_off + frent->fe_len > total ||
610 		    (frent->fe_off + frent->fe_len == total && frent->fe_mff))
611 			goto bad_fragment;
612 	} else {
613 		if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
614 			goto bad_fragment;
615 	}
616 
617 	/* Find neighbors for newly inserted fragment */
618 	prev = pf_frent_previous(frag, frent);
619 	if (prev == NULL) {
620 		after = TAILQ_FIRST(&frag->fr_queue);
621 		KASSERT(after != NULL, ("after != NULL"));
622 	} else {
623 		after = TAILQ_NEXT(prev, fr_next);
624 	}
625 
626 	if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
627 		uint16_t precut;
628 
629 		if (frag->fr_af == AF_INET6)
630 			goto free_fragment;
631 
632 		precut = prev->fe_off + prev->fe_len - frent->fe_off;
633 		if (precut >= frent->fe_len) {
634 			DPFPRINTF(("new frag overlapped\n"));
635 			goto drop_fragment;
636 		}
637 		DPFPRINTF(("frag head overlap %d\n", precut));
638 		m_adj(frent->fe_m, precut);
639 		frent->fe_off += precut;
640 		frent->fe_len -= precut;
641 	}
642 
643 	for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
644 	    after = next) {
645 		uint16_t aftercut;
646 
647 		aftercut = frent->fe_off + frent->fe_len - after->fe_off;
648 		if (aftercut < after->fe_len) {
649 			DPFPRINTF(("frag tail overlap %d", aftercut));
650 			m_adj(after->fe_m, aftercut);
651 			/* Fragment may switch queue as fe_off changes */
652 			pf_frent_remove(frag, after);
653 			after->fe_off += aftercut;
654 			after->fe_len -= aftercut;
655 			/* Insert into correct queue */
656 			if (pf_frent_insert(frag, after, prev)) {
657 				DPFPRINTF(("fragment requeue limit exceeded"));
658 				m_freem(after->fe_m);
659 				uma_zfree(V_pf_frent_z, after);
660 				/* There is not way to recover */
661 				goto free_fragment;
662 			}
663 			break;
664 		}
665 
666 		/* This fragment is completely overlapped, lose it. */
667 		DPFPRINTF(("old frag overlapped\n"));
668 		next = TAILQ_NEXT(after, fr_next);
669 		pf_frent_remove(frag, after);
670 		m_freem(after->fe_m);
671 		uma_zfree(V_pf_frent_z, after);
672 	}
673 
674 	/* If part of the queue gets too long, there is not way to recover. */
675 	if (pf_frent_insert(frag, frent, prev)) {
676 		DPFPRINTF(("fragment queue limit exceeded\n"));
677 		goto bad_fragment;
678 	}
679 
680 	return (frag);
681 
682 free_fragment:
683 	/*
684 	 * RFC 5722, Errata 3089:  When reassembling an IPv6 datagram, if one
685 	 * or more its constituent fragments is determined to be an overlapping
686 	 * fragment, the entire datagram (and any constituent fragments) MUST
687 	 * be silently discarded.
688 	 */
689 	DPFPRINTF(("flush overlapping fragments\n"));
690 	pf_free_fragment(frag);
691 
692 bad_fragment:
693 	REASON_SET(reason, PFRES_FRAG);
694 drop_fragment:
695 	uma_zfree(V_pf_frent_z, frent);
696 	return (NULL);
697 }
698 
699 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)700 pf_join_fragment(struct pf_fragment *frag)
701 {
702 	struct mbuf *m, *m2;
703 	struct pf_frent	*frent;
704 
705 	frent = TAILQ_FIRST(&frag->fr_queue);
706 	TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
707 
708 	m = frent->fe_m;
709 	if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
710 		m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
711 	uma_zfree(V_pf_frent_z, frent);
712 	while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
713 		TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
714 
715 		m2 = frent->fe_m;
716 		/* Strip off ip header. */
717 		m_adj(m2, frent->fe_hdrlen);
718 		/* Strip off any trailing bytes. */
719 		if (frent->fe_len < m2->m_pkthdr.len)
720 			m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
721 
722 		uma_zfree(V_pf_frent_z, frent);
723 		m_cat(m, m2);
724 	}
725 
726 	/* Remove from fragment queue. */
727 	pf_free_fragment(frag);
728 
729 	return (m);
730 }
731 
732 #ifdef INET
733 static int
pf_reassemble(struct mbuf ** m0,int dir,u_short * reason)734 pf_reassemble(struct mbuf **m0, int dir, u_short *reason)
735 {
736 	struct mbuf		*m = *m0;
737 	struct ip		*ip = mtod(m, struct ip *);
738 	struct pf_frent		*frent;
739 	struct pf_fragment	*frag;
740 	struct m_tag		*mtag;
741 	struct pf_fragment_tag	*ftag;
742 	struct pf_fragment_cmp	key;
743 	uint16_t		total, hdrlen;
744 	uint32_t		 frag_id;
745 	uint16_t		 maxlen;
746 
747 	/* Get an entry for the fragment queue */
748 	if ((frent = pf_create_fragment(reason)) == NULL)
749 		return (PF_DROP);
750 
751 	frent->fe_m = m;
752 	frent->fe_hdrlen = ip->ip_hl << 2;
753 	frent->fe_extoff = 0;
754 	frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
755 	frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
756 	frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
757 
758 	pf_ip2key(ip, dir, &key);
759 
760 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
761 		return (PF_DROP);
762 
763 	/* The mbuf is part of the fragment entry, no direct free or access */
764 	m = *m0 = NULL;
765 
766 	if (frag->fr_holes) {
767 		DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
768 		return (PF_PASS);  /* drop because *m0 is NULL, no error */
769 	}
770 
771 	/* We have all the data */
772 	frent = TAILQ_FIRST(&frag->fr_queue);
773 	KASSERT(frent != NULL, ("frent != NULL"));
774 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
775 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
776 	hdrlen = frent->fe_hdrlen;
777 
778 	maxlen = frag->fr_maxlen;
779 	frag_id = frag->fr_id;
780 	m = *m0 = pf_join_fragment(frag);
781 	frag = NULL;
782 
783 	if (m->m_flags & M_PKTHDR) {
784 		int plen = 0;
785 		for (m = *m0; m; m = m->m_next)
786 			plen += m->m_len;
787 		m = *m0;
788 		m->m_pkthdr.len = plen;
789 	}
790 
791 	if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
792 	    sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) {
793 		REASON_SET(reason, PFRES_SHORT);
794 		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
795 		return (PF_DROP);
796 	}
797 	ftag = (struct pf_fragment_tag *)(mtag + 1);
798 	ftag->ft_hdrlen = hdrlen;
799 	ftag->ft_extoff = 0;
800 	ftag->ft_maxlen = maxlen;
801 	ftag->ft_id = frag_id;
802 	m_tag_prepend(m, mtag);
803 
804 	ip = mtod(m, struct ip *);
805 	ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
806 	    htons(hdrlen + total), 0);
807 	ip->ip_len = htons(hdrlen + total);
808 	ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
809 	    ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
810 	ip->ip_off &= ~(IP_MF|IP_OFFMASK);
811 
812 	if (hdrlen + total > IP_MAXPACKET) {
813 		DPFPRINTF(("drop: too big: %d\n", total));
814 		ip->ip_len = 0;
815 		REASON_SET(reason, PFRES_SHORT);
816 		/* PF_DROP requires a valid mbuf *m0 in pf_test() */
817 		return (PF_DROP);
818 	}
819 
820 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
821 	return (PF_PASS);
822 }
823 #endif	/* INET */
824 
825 #ifdef INET6
826 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)827 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
828     uint16_t hdrlen, uint16_t extoff, u_short *reason)
829 {
830 	struct mbuf		*m = *m0;
831 	struct ip6_hdr		*ip6 = mtod(m, struct ip6_hdr *);
832 	struct pf_frent		*frent;
833 	struct pf_fragment	*frag;
834 	struct pf_fragment_cmp	 key;
835 	struct m_tag		*mtag;
836 	struct pf_fragment_tag	*ftag;
837 	int			 off;
838 	uint32_t		 frag_id;
839 	uint16_t		 total, maxlen;
840 	uint8_t			 proto;
841 
842 	PF_FRAG_LOCK();
843 
844 	/* Get an entry for the fragment queue. */
845 	if ((frent = pf_create_fragment(reason)) == NULL) {
846 		PF_FRAG_UNLOCK();
847 		return (PF_DROP);
848 	}
849 
850 	frent->fe_m = m;
851 	frent->fe_hdrlen = hdrlen;
852 	frent->fe_extoff = extoff;
853 	frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
854 	frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
855 	frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
856 
857 	key.frc_src.v6 = ip6->ip6_src;
858 	key.frc_dst.v6 = ip6->ip6_dst;
859 	key.frc_af = AF_INET6;
860 	/* Only the first fragment's protocol is relevant. */
861 	key.frc_proto = 0;
862 	key.frc_id = fraghdr->ip6f_ident;
863 
864 	if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
865 		PF_FRAG_UNLOCK();
866 		return (PF_DROP);
867 	}
868 
869 	/* The mbuf is part of the fragment entry, no direct free or access. */
870 	m = *m0 = NULL;
871 
872 	if (frag->fr_holes) {
873 		DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
874 		    frag->fr_holes));
875 		PF_FRAG_UNLOCK();
876 		return (PF_PASS);  /* Drop because *m0 is NULL, no error. */
877 	}
878 
879 	/* We have all the data. */
880 	frent = TAILQ_FIRST(&frag->fr_queue);
881 	KASSERT(frent != NULL, ("frent != NULL"));
882 	extoff = frent->fe_extoff;
883 	maxlen = frag->fr_maxlen;
884 	frag_id = frag->fr_id;
885 	total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
886 		TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
887 	hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
888 
889 	m = *m0 = pf_join_fragment(frag);
890 	frag = NULL;
891 
892 	PF_FRAG_UNLOCK();
893 
894 	/* Take protocol from first fragment header. */
895 	m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
896 	KASSERT(m, ("%s: short mbuf chain", __func__));
897 	proto = *(mtod(m, uint8_t *) + off);
898 	m = *m0;
899 
900 	/* Delete frag6 header */
901 	if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
902 		goto fail;
903 
904 	if (m->m_flags & M_PKTHDR) {
905 		int plen = 0;
906 		for (m = *m0; m; m = m->m_next)
907 			plen += m->m_len;
908 		m = *m0;
909 		m->m_pkthdr.len = plen;
910 	}
911 
912 	if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
913 	    sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
914 		goto fail;
915 	ftag = (struct pf_fragment_tag *)(mtag + 1);
916 	ftag->ft_hdrlen = hdrlen;
917 	ftag->ft_extoff = extoff;
918 	ftag->ft_maxlen = maxlen;
919 	ftag->ft_id = frag_id;
920 	m_tag_prepend(m, mtag);
921 
922 	ip6 = mtod(m, struct ip6_hdr *);
923 	ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
924 	if (extoff) {
925 		/* Write protocol into next field of last extension header. */
926 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
927 		    &off);
928 		KASSERT(m, ("%s: short mbuf chain", __func__));
929 		*(mtod(m, char *) + off) = proto;
930 		m = *m0;
931 	} else
932 		ip6->ip6_nxt = proto;
933 
934 	if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
935 		DPFPRINTF(("drop: too big: %d\n", total));
936 		ip6->ip6_plen = 0;
937 		REASON_SET(reason, PFRES_SHORT);
938 		/* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
939 		return (PF_DROP);
940 	}
941 
942 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
943 	return (PF_PASS);
944 
945 fail:
946 	REASON_SET(reason, PFRES_MEMORY);
947 	/* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
948 	return (PF_DROP);
949 }
950 #endif	/* INET6 */
951 
952 #ifdef INET6
953 int
pf_max_frag_size(struct mbuf * m)954 pf_max_frag_size(struct mbuf *m)
955 {
956 	struct m_tag *tag;
957 	struct pf_fragment_tag *ftag;
958 
959 	tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
960 	if (tag == NULL)
961 		return (m->m_pkthdr.len);
962 
963 	ftag = (struct pf_fragment_tag *)(tag + 1);
964 
965 	return (ftag->ft_maxlen);
966 }
967 
968 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,struct ifnet * rt,bool forward)969 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
970     struct ifnet *rt, bool forward)
971 {
972 	struct mbuf		*m = *m0, *t;
973 	struct ip6_hdr		*hdr;
974 	struct pf_fragment_tag	*ftag = (struct pf_fragment_tag *)(mtag + 1);
975 	struct pf_pdesc		 pd;
976 	uint32_t		 frag_id;
977 	uint16_t		 hdrlen, extoff, maxlen;
978 	uint8_t			 proto;
979 	int			 error, action;
980 
981 	hdrlen = ftag->ft_hdrlen;
982 	extoff = ftag->ft_extoff;
983 	maxlen = ftag->ft_maxlen;
984 	frag_id = ftag->ft_id;
985 	m_tag_delete(m, mtag);
986 	mtag = NULL;
987 	ftag = NULL;
988 
989 	if (extoff) {
990 		int off;
991 
992 		/* Use protocol from next field of last extension header */
993 		m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
994 		    &off);
995 		KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
996 		proto = *(mtod(m, uint8_t *) + off);
997 		*(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
998 		m = *m0;
999 	} else {
1000 		hdr = mtod(m, struct ip6_hdr *);
1001 		proto = hdr->ip6_nxt;
1002 		hdr->ip6_nxt = IPPROTO_FRAGMENT;
1003 	}
1004 
1005 	/* In case of link-local traffic we'll need a scope set. */
1006 	hdr = mtod(m, struct ip6_hdr *);
1007 
1008 	in6_setscope(&hdr->ip6_src, ifp, NULL);
1009 	in6_setscope(&hdr->ip6_dst, ifp, NULL);
1010 
1011 	/* The MTU must be a multiple of 8 bytes, or we risk doing the
1012 	 * fragmentation wrong. */
1013 	maxlen = maxlen & ~7;
1014 
1015 	/*
1016 	 * Maxlen may be less than 8 if there was only a single
1017 	 * fragment.  As it was fragmented before, add a fragment
1018 	 * header also for a single fragment.  If total or maxlen
1019 	 * is less than 8, ip6_fragment() will return EMSGSIZE and
1020 	 * we drop the packet.
1021 	 */
1022 	error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1023 	m = (*m0)->m_nextpkt;
1024 	(*m0)->m_nextpkt = NULL;
1025 	if (error == 0) {
1026 		/* The first mbuf contains the unfragmented packet. */
1027 		m_freem(*m0);
1028 		*m0 = NULL;
1029 		action = PF_PASS;
1030 	} else {
1031 		/* Drop expects an mbuf to free. */
1032 		DPFPRINTF(("refragment error %d\n", error));
1033 		action = PF_DROP;
1034 	}
1035 	for (; m; m = t) {
1036 		t = m->m_nextpkt;
1037 		m->m_nextpkt = NULL;
1038 		m->m_flags |= M_SKIP_FIREWALL;
1039 		memset(&pd, 0, sizeof(pd));
1040 		pd.pf_mtag = pf_find_mtag(m);
1041 		if (error != 0) {
1042 			m_freem(m);
1043 			continue;
1044 		}
1045 		if (rt != NULL) {
1046 			struct sockaddr_in6	dst;
1047 			hdr = mtod(m, struct ip6_hdr *);
1048 
1049 			bzero(&dst, sizeof(dst));
1050 			dst.sin6_family = AF_INET6;
1051 			dst.sin6_len = sizeof(dst);
1052 			dst.sin6_addr = hdr->ip6_dst;
1053 
1054 			nd6_output_ifp(rt, rt, m, &dst, NULL);
1055 		} else if (forward) {
1056 			MPASS(m->m_pkthdr.rcvif != NULL);
1057 			ip6_forward(m, 0);
1058 		} else {
1059 			(void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1060 			    NULL);
1061 		}
1062 	}
1063 
1064 	return (action);
1065 }
1066 #endif /* INET6 */
1067 
1068 #ifdef INET
1069 int
pf_normalize_ip(u_short * reason,struct pf_pdesc * pd)1070 pf_normalize_ip(u_short *reason, struct pf_pdesc *pd)
1071 {
1072 	struct pf_krule		*r;
1073 	struct ip		*h = mtod(pd->m, struct ip *);
1074 	int			 mff = (ntohs(h->ip_off) & IP_MF);
1075 	int			 hlen = h->ip_hl << 2;
1076 	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1077 	u_int16_t		 max;
1078 	int			 ip_len;
1079 	int			 tag = -1;
1080 	int			 verdict;
1081 	bool			 scrub_compat;
1082 
1083 	PF_RULES_RASSERT();
1084 
1085 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1086 	/*
1087 	 * Check if there are any scrub rules, matching or not.
1088 	 * Lack of scrub rules means:
1089 	 *  - enforced packet normalization operation just like in OpenBSD
1090 	 *  - fragment reassembly depends on V_pf_status.reass
1091 	 * With scrub rules:
1092 	 *  - packet normalization is performed if there is a matching scrub rule
1093 	 *  - fragment reassembly is performed if the matching rule has no
1094 	 *    PFRULE_FRAGMENT_NOREASS flag
1095 	 */
1096 	scrub_compat = (r != NULL);
1097 	while (r != NULL) {
1098 		pf_counter_u64_add(&r->evaluations, 1);
1099 		if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1100 			r = r->skip[PF_SKIP_IFP];
1101 		else if (r->direction && r->direction != pd->dir)
1102 			r = r->skip[PF_SKIP_DIR];
1103 		else if (r->af && r->af != AF_INET)
1104 			r = r->skip[PF_SKIP_AF];
1105 		else if (r->proto && r->proto != h->ip_p)
1106 			r = r->skip[PF_SKIP_PROTO];
1107 		else if (PF_MISMATCHAW(&r->src.addr,
1108 		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1109 		    r->src.neg, pd->kif, M_GETFIB(pd->m)))
1110 			r = r->skip[PF_SKIP_SRC_ADDR];
1111 		else if (PF_MISMATCHAW(&r->dst.addr,
1112 		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1113 		    r->dst.neg, NULL, M_GETFIB(pd->m)))
1114 			r = r->skip[PF_SKIP_DST_ADDR];
1115 		else if (r->match_tag && !pf_match_tag(pd->m, r, &tag,
1116 		    pd->pf_mtag ? pd->pf_mtag->tag : 0))
1117 			r = TAILQ_NEXT(r, entries);
1118 		else
1119 			break;
1120 	}
1121 
1122 	if (scrub_compat) {
1123 		/* With scrub rules present IPv4 normalization happens only
1124 		 * if one of rules has matched and it's not a "no scrub" rule */
1125 		if (r == NULL || r->action == PF_NOSCRUB)
1126 			return (PF_PASS);
1127 
1128 		pf_counter_u64_critical_enter();
1129 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1130 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1131 		pf_counter_u64_critical_exit();
1132 		pf_rule_to_actions(r, &pd->act);
1133 	}
1134 
1135 	/* Check for illegal packets */
1136 	if (hlen < (int)sizeof(struct ip)) {
1137 		REASON_SET(reason, PFRES_NORM);
1138 		goto drop;
1139 	}
1140 
1141 	if (hlen > ntohs(h->ip_len)) {
1142 		REASON_SET(reason, PFRES_NORM);
1143 		goto drop;
1144 	}
1145 
1146 	/* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1147 	if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1148 	    (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1149 	    (h->ip_off & htons(IP_DF))
1150 	) {
1151 		u_int16_t ip_off = h->ip_off;
1152 
1153 		h->ip_off &= htons(~IP_DF);
1154 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1155 	}
1156 
1157 	/* We will need other tests here */
1158 	if (!fragoff && !mff)
1159 		goto no_fragment;
1160 
1161 	/* We're dealing with a fragment now. Don't allow fragments
1162 	 * with IP_DF to enter the cache. If the flag was cleared by
1163 	 * no-df above, fine. Otherwise drop it.
1164 	 */
1165 	if (h->ip_off & htons(IP_DF)) {
1166 		DPFPRINTF(("IP_DF\n"));
1167 		goto bad;
1168 	}
1169 
1170 	ip_len = ntohs(h->ip_len) - hlen;
1171 
1172 	/* All fragments are 8 byte aligned */
1173 	if (mff && (ip_len & 0x7)) {
1174 		DPFPRINTF(("mff and %d\n", ip_len));
1175 		goto bad;
1176 	}
1177 
1178 	/* Respect maximum length */
1179 	if (fragoff + ip_len > IP_MAXPACKET) {
1180 		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1181 		goto bad;
1182 	}
1183 
1184 	if ((!scrub_compat && V_pf_status.reass) ||
1185 	    (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1186 	) {
1187 		max = fragoff + ip_len;
1188 
1189 		/* Fully buffer all of the fragments
1190 		 * Might return a completely reassembled mbuf, or NULL */
1191 		PF_FRAG_LOCK();
1192 		DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1193 		verdict = pf_reassemble(&pd->m, pd->dir, reason);
1194 		PF_FRAG_UNLOCK();
1195 
1196 		if (verdict != PF_PASS)
1197 			return (PF_DROP);
1198 
1199 		if (pd->m == NULL)
1200 			return (PF_DROP);
1201 
1202 		h = mtod(pd->m, struct ip *);
1203 		pd->tot_len = htons(h->ip_len);
1204 
1205  no_fragment:
1206 		/* At this point, only IP_DF is allowed in ip_off */
1207 		if (h->ip_off & ~htons(IP_DF)) {
1208 			u_int16_t ip_off = h->ip_off;
1209 
1210 			h->ip_off &= htons(IP_DF);
1211 			h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1212 		}
1213 	}
1214 
1215 	return (PF_PASS);
1216 
1217  bad:
1218 	DPFPRINTF(("dropping bad fragment\n"));
1219 	REASON_SET(reason, PFRES_FRAG);
1220  drop:
1221 	if (r != NULL && r->log)
1222 		PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1, NULL);
1223 
1224 	return (PF_DROP);
1225 }
1226 #endif
1227 
1228 #ifdef INET6
1229 int
pf_normalize_ip6(int off,u_short * reason,struct pf_pdesc * pd)1230 pf_normalize_ip6(int off, u_short *reason,
1231     struct pf_pdesc *pd)
1232 {
1233 	struct pf_krule		*r;
1234 	struct ip6_hdr		*h;
1235 	struct ip6_frag		 frag;
1236 	bool			 scrub_compat;
1237 
1238 	PF_RULES_RASSERT();
1239 
1240 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1241 	/*
1242 	 * Check if there are any scrub rules, matching or not.
1243 	 * Lack of scrub rules means:
1244 	 *  - enforced packet normalization operation just like in OpenBSD
1245 	 * With scrub rules:
1246 	 *  - packet normalization is performed if there is a matching scrub rule
1247 	 * XXX: Fragment reassembly always performed for IPv6!
1248 	 */
1249 	scrub_compat = (r != NULL);
1250 	while (r != NULL) {
1251 		pf_counter_u64_add(&r->evaluations, 1);
1252 		if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1253 			r = r->skip[PF_SKIP_IFP];
1254 		else if (r->direction && r->direction != pd->dir)
1255 			r = r->skip[PF_SKIP_DIR];
1256 		else if (r->af && r->af != AF_INET6)
1257 			r = r->skip[PF_SKIP_AF];
1258 		else if (r->proto && r->proto != pd->proto)
1259 			r = r->skip[PF_SKIP_PROTO];
1260 		else if (PF_MISMATCHAW(&r->src.addr,
1261 		    (struct pf_addr *)&pd->src, AF_INET6,
1262 		    r->src.neg, pd->kif, M_GETFIB(pd->m)))
1263 			r = r->skip[PF_SKIP_SRC_ADDR];
1264 		else if (PF_MISMATCHAW(&r->dst.addr,
1265 		    (struct pf_addr *)&pd->dst, AF_INET6,
1266 		    r->dst.neg, NULL, M_GETFIB(pd->m)))
1267 			r = r->skip[PF_SKIP_DST_ADDR];
1268 		else
1269 			break;
1270 	}
1271 
1272 	if (scrub_compat) {
1273 		/* With scrub rules present IPv6 normalization happens only
1274 		 * if one of rules has matched and it's not a "no scrub" rule */
1275 		if (r == NULL || r->action == PF_NOSCRUB)
1276 			return (PF_PASS);
1277 
1278 		pf_counter_u64_critical_enter();
1279 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1280 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1281 		pf_counter_u64_critical_exit();
1282 		pf_rule_to_actions(r, &pd->act);
1283 	}
1284 
1285 	if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
1286 		return (PF_DROP);
1287 
1288 	/* Offset now points to data portion. */
1289 	off += sizeof(frag);
1290 
1291 	if (pd->virtual_proto == PF_VPROTO_FRAGMENT) {
1292 		/* Returns PF_DROP or *m0 is NULL or completely reassembled
1293 		 * mbuf. */
1294 		if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS)
1295 			return (PF_DROP);
1296 		if (pd->m == NULL)
1297 			return (PF_DROP);
1298 		h = mtod(pd->m, struct ip6_hdr *);
1299 		pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
1300 	}
1301 
1302 	return (PF_PASS);
1303 }
1304 #endif /* INET6 */
1305 
1306 int
pf_normalize_tcp(struct pf_pdesc * pd)1307 pf_normalize_tcp(struct pf_pdesc *pd)
1308 {
1309 	struct pf_krule	*r, *rm = NULL;
1310 	struct tcphdr	*th = &pd->hdr.tcp;
1311 	int		 rewrite = 0;
1312 	u_short		 reason;
1313 	u_int16_t	 flags;
1314 	sa_family_t	 af = pd->af;
1315 	int		 srs;
1316 
1317 	PF_RULES_RASSERT();
1318 
1319 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1320 	/* Check if there any scrub rules. Lack of scrub rules means enforced
1321 	 * packet normalization operation just like in OpenBSD. */
1322 	srs = (r != NULL);
1323 	while (r != NULL) {
1324 		pf_counter_u64_add(&r->evaluations, 1);
1325 		if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1326 			r = r->skip[PF_SKIP_IFP];
1327 		else if (r->direction && r->direction != pd->dir)
1328 			r = r->skip[PF_SKIP_DIR];
1329 		else if (r->af && r->af != af)
1330 			r = r->skip[PF_SKIP_AF];
1331 		else if (r->proto && r->proto != pd->proto)
1332 			r = r->skip[PF_SKIP_PROTO];
1333 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1334 		    r->src.neg, pd->kif, M_GETFIB(pd->m)))
1335 			r = r->skip[PF_SKIP_SRC_ADDR];
1336 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1337 			    r->src.port[0], r->src.port[1], th->th_sport))
1338 			r = r->skip[PF_SKIP_SRC_PORT];
1339 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1340 		    r->dst.neg, NULL, M_GETFIB(pd->m)))
1341 			r = r->skip[PF_SKIP_DST_ADDR];
1342 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1343 			    r->dst.port[0], r->dst.port[1], th->th_dport))
1344 			r = r->skip[PF_SKIP_DST_PORT];
1345 		else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1346 			    pf_osfp_fingerprint(pd, th),
1347 			    r->os_fingerprint))
1348 			r = TAILQ_NEXT(r, entries);
1349 		else {
1350 			rm = r;
1351 			break;
1352 		}
1353 	}
1354 
1355 	if (srs) {
1356 		/* With scrub rules present TCP normalization happens only
1357 		 * if one of rules has matched and it's not a "no scrub" rule */
1358 		if (rm == NULL || rm->action == PF_NOSCRUB)
1359 			return (PF_PASS);
1360 
1361 		pf_counter_u64_critical_enter();
1362 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1363 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1364 		pf_counter_u64_critical_exit();
1365 		pf_rule_to_actions(rm, &pd->act);
1366 	}
1367 
1368 	if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1369 		pd->flags |= PFDESC_TCP_NORM;
1370 
1371 	flags = tcp_get_flags(th);
1372 	if (flags & TH_SYN) {
1373 		/* Illegal packet */
1374 		if (flags & TH_RST)
1375 			goto tcp_drop;
1376 
1377 		if (flags & TH_FIN)
1378 			goto tcp_drop;
1379 	} else {
1380 		/* Illegal packet */
1381 		if (!(flags & (TH_ACK|TH_RST)))
1382 			goto tcp_drop;
1383 	}
1384 
1385 	if (!(flags & TH_ACK)) {
1386 		/* These flags are only valid if ACK is set */
1387 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1388 			goto tcp_drop;
1389 	}
1390 
1391 	/* Check for illegal header length */
1392 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1393 		goto tcp_drop;
1394 
1395 	/* If flags changed, or reserved data set, then adjust */
1396 	if (flags != tcp_get_flags(th) ||
1397 	    (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1398 		u_int16_t	ov, nv;
1399 
1400 		ov = *(u_int16_t *)(&th->th_ack + 1);
1401 		flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1402 		tcp_set_flags(th, flags);
1403 		nv = *(u_int16_t *)(&th->th_ack + 1);
1404 
1405 		th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0);
1406 		rewrite = 1;
1407 	}
1408 
1409 	/* Remove urgent pointer, if TH_URG is not set */
1410 	if (!(flags & TH_URG) && th->th_urp) {
1411 		th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp,
1412 		    0, 0);
1413 		th->th_urp = 0;
1414 		rewrite = 1;
1415 	}
1416 
1417 	/* copy back packet headers if we sanitized */
1418 	if (rewrite)
1419 		m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1420 
1421 	return (PF_PASS);
1422 
1423  tcp_drop:
1424 	REASON_SET(&reason, PFRES_NORM);
1425 	if (rm != NULL && r->log)
1426 		PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL);
1427 	return (PF_DROP);
1428 }
1429 
1430 int
pf_normalize_tcp_init(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src,struct pf_state_peer * dst)1431 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
1432     struct pf_state_peer *src, struct pf_state_peer *dst)
1433 {
1434 	u_int32_t tsval, tsecr;
1435 	u_int8_t hdr[60];
1436 	u_int8_t *opt;
1437 
1438 	KASSERT((src->scrub == NULL),
1439 	    ("pf_normalize_tcp_init: src->scrub != NULL"));
1440 
1441 	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1442 	if (src->scrub == NULL)
1443 		return (1);
1444 
1445 	switch (pd->af) {
1446 #ifdef INET
1447 	case AF_INET: {
1448 		struct ip *h = mtod(pd->m, struct ip *);
1449 		src->scrub->pfss_ttl = h->ip_ttl;
1450 		break;
1451 	}
1452 #endif /* INET */
1453 #ifdef INET6
1454 	case AF_INET6: {
1455 		struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1456 		src->scrub->pfss_ttl = h->ip6_hlim;
1457 		break;
1458 	}
1459 #endif /* INET6 */
1460 	}
1461 
1462 	/*
1463 	 * All normalizations below are only begun if we see the start of
1464 	 * the connections.  They must all set an enabled bit in pfss_flags
1465 	 */
1466 	if ((tcp_get_flags(th) & TH_SYN) == 0)
1467 		return (0);
1468 
1469 	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1470 	    pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1471 		/* Diddle with TCP options */
1472 		int hlen;
1473 		opt = hdr + sizeof(struct tcphdr);
1474 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1475 		while (hlen >= TCPOLEN_TIMESTAMP) {
1476 			switch (*opt) {
1477 			case TCPOPT_EOL:	/* FALLTHROUGH */
1478 			case TCPOPT_NOP:
1479 				opt++;
1480 				hlen--;
1481 				break;
1482 			case TCPOPT_TIMESTAMP:
1483 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1484 					src->scrub->pfss_flags |=
1485 					    PFSS_TIMESTAMP;
1486 					src->scrub->pfss_ts_mod =
1487 					    htonl(arc4random());
1488 
1489 					/* note PFSS_PAWS not set yet */
1490 					memcpy(&tsval, &opt[2],
1491 					    sizeof(u_int32_t));
1492 					memcpy(&tsecr, &opt[6],
1493 					    sizeof(u_int32_t));
1494 					src->scrub->pfss_tsval0 = ntohl(tsval);
1495 					src->scrub->pfss_tsval = ntohl(tsval);
1496 					src->scrub->pfss_tsecr = ntohl(tsecr);
1497 					getmicrouptime(&src->scrub->pfss_last);
1498 				}
1499 				/* FALLTHROUGH */
1500 			default:
1501 				hlen -= MAX(opt[1], 2);
1502 				opt += MAX(opt[1], 2);
1503 				break;
1504 			}
1505 		}
1506 	}
1507 
1508 	return (0);
1509 }
1510 
1511 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1512 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1513 {
1514 	/* XXX Note: this also cleans up SCTP. */
1515 	uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1516 	uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1517 
1518 	/* Someday... flush the TCP segment reassembly descriptors. */
1519 }
1520 int
pf_normalize_sctp_init(struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1521 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src,
1522     struct pf_state_peer *dst)
1523 {
1524 	src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1525 	if (src->scrub == NULL)
1526 		return (1);
1527 
1528 	dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1529 	if (dst->scrub == NULL) {
1530 		uma_zfree(V_pf_state_scrub_z, src);
1531 		return (1);
1532 	}
1533 
1534 	dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1535 
1536 	return (0);
1537 }
1538 
1539 int
pf_normalize_tcp_stateful(struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1540 pf_normalize_tcp_stateful(struct pf_pdesc *pd,
1541     u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1542     struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1543 {
1544 	struct timeval uptime;
1545 	u_int32_t tsval, tsecr;
1546 	u_int tsval_from_last;
1547 	u_int8_t hdr[60];
1548 	u_int8_t *opt;
1549 	int copyback = 0;
1550 	int got_ts = 0;
1551 	size_t startoff;
1552 
1553 	KASSERT((src->scrub || dst->scrub),
1554 	    ("%s: src->scrub && dst->scrub!", __func__));
1555 
1556 	/*
1557 	 * Enforce the minimum TTL seen for this connection.  Negate a common
1558 	 * technique to evade an intrusion detection system and confuse
1559 	 * firewall state code.
1560 	 */
1561 	switch (pd->af) {
1562 #ifdef INET
1563 	case AF_INET: {
1564 		if (src->scrub) {
1565 			struct ip *h = mtod(pd->m, struct ip *);
1566 			if (h->ip_ttl > src->scrub->pfss_ttl)
1567 				src->scrub->pfss_ttl = h->ip_ttl;
1568 			h->ip_ttl = src->scrub->pfss_ttl;
1569 		}
1570 		break;
1571 	}
1572 #endif /* INET */
1573 #ifdef INET6
1574 	case AF_INET6: {
1575 		if (src->scrub) {
1576 			struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1577 			if (h->ip6_hlim > src->scrub->pfss_ttl)
1578 				src->scrub->pfss_ttl = h->ip6_hlim;
1579 			h->ip6_hlim = src->scrub->pfss_ttl;
1580 		}
1581 		break;
1582 	}
1583 #endif /* INET6 */
1584 	}
1585 
1586 	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1587 	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1588 	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1589 	    pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1590 		/* Diddle with TCP options */
1591 		int hlen;
1592 		opt = hdr + sizeof(struct tcphdr);
1593 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1594 		while (hlen >= TCPOLEN_TIMESTAMP) {
1595 			startoff = opt - (hdr + sizeof(struct tcphdr));
1596 			switch (*opt) {
1597 			case TCPOPT_EOL:	/* FALLTHROUGH */
1598 			case TCPOPT_NOP:
1599 				opt++;
1600 				hlen--;
1601 				break;
1602 			case TCPOPT_TIMESTAMP:
1603 				/* Modulate the timestamps.  Can be used for
1604 				 * NAT detection, OS uptime determination or
1605 				 * reboot detection.
1606 				 */
1607 
1608 				if (got_ts) {
1609 					/* Huh?  Multiple timestamps!? */
1610 					if (V_pf_status.debug >= PF_DEBUG_MISC) {
1611 						DPFPRINTF(("multiple TS??\n"));
1612 						pf_print_state(state);
1613 						printf("\n");
1614 					}
1615 					REASON_SET(reason, PFRES_TS);
1616 					return (PF_DROP);
1617 				}
1618 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1619 					memcpy(&tsval, &opt[2],
1620 					    sizeof(u_int32_t));
1621 					if (tsval && src->scrub &&
1622 					    (src->scrub->pfss_flags &
1623 					    PFSS_TIMESTAMP)) {
1624 						tsval = ntohl(tsval);
1625 						pf_patch_32_unaligned(pd->m,
1626 						    &th->th_sum,
1627 						    &opt[2],
1628 						    htonl(tsval +
1629 						    src->scrub->pfss_ts_mod),
1630 						    PF_ALGNMNT(startoff),
1631 						    0);
1632 						copyback = 1;
1633 					}
1634 
1635 					/* Modulate TS reply iff valid (!0) */
1636 					memcpy(&tsecr, &opt[6],
1637 					    sizeof(u_int32_t));
1638 					if (tsecr && dst->scrub &&
1639 					    (dst->scrub->pfss_flags &
1640 					    PFSS_TIMESTAMP)) {
1641 						tsecr = ntohl(tsecr)
1642 						    - dst->scrub->pfss_ts_mod;
1643 						pf_patch_32_unaligned(pd->m,
1644 						    &th->th_sum,
1645 						    &opt[6],
1646 						    htonl(tsecr),
1647 						    PF_ALGNMNT(startoff),
1648 						    0);
1649 						copyback = 1;
1650 					}
1651 					got_ts = 1;
1652 				}
1653 				/* FALLTHROUGH */
1654 			default:
1655 				hlen -= MAX(opt[1], 2);
1656 				opt += MAX(opt[1], 2);
1657 				break;
1658 			}
1659 		}
1660 		if (copyback) {
1661 			/* Copyback the options, caller copys back header */
1662 			*writeback = 1;
1663 			m_copyback(pd->m, pd->off + sizeof(struct tcphdr),
1664 			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1665 			    sizeof(struct tcphdr));
1666 		}
1667 	}
1668 
1669 	/*
1670 	 * Must invalidate PAWS checks on connections idle for too long.
1671 	 * The fastest allowed timestamp clock is 1ms.  That turns out to
1672 	 * be about 24 days before it wraps.  XXX Right now our lowerbound
1673 	 * TS echo check only works for the first 12 days of a connection
1674 	 * when the TS has exhausted half its 32bit space
1675 	 */
1676 #define TS_MAX_IDLE	(24*24*60*60)
1677 #define TS_MAX_CONN	(12*24*60*60)	/* XXX remove when better tsecr check */
1678 
1679 	getmicrouptime(&uptime);
1680 	if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1681 	    (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1682 	    time_uptime - (state->creation / 1000) > TS_MAX_CONN))  {
1683 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1684 			DPFPRINTF(("src idled out of PAWS\n"));
1685 			pf_print_state(state);
1686 			printf("\n");
1687 		}
1688 		src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1689 		    | PFSS_PAWS_IDLED;
1690 	}
1691 	if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1692 	    uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1693 		if (V_pf_status.debug >= PF_DEBUG_MISC) {
1694 			DPFPRINTF(("dst idled out of PAWS\n"));
1695 			pf_print_state(state);
1696 			printf("\n");
1697 		}
1698 		dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1699 		    | PFSS_PAWS_IDLED;
1700 	}
1701 
1702 	if (got_ts && src->scrub && dst->scrub &&
1703 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1704 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1705 		/* Validate that the timestamps are "in-window".
1706 		 * RFC1323 describes TCP Timestamp options that allow
1707 		 * measurement of RTT (round trip time) and PAWS
1708 		 * (protection against wrapped sequence numbers).  PAWS
1709 		 * gives us a set of rules for rejecting packets on
1710 		 * long fat pipes (packets that were somehow delayed
1711 		 * in transit longer than the time it took to send the
1712 		 * full TCP sequence space of 4Gb).  We can use these
1713 		 * rules and infer a few others that will let us treat
1714 		 * the 32bit timestamp and the 32bit echoed timestamp
1715 		 * as sequence numbers to prevent a blind attacker from
1716 		 * inserting packets into a connection.
1717 		 *
1718 		 * RFC1323 tells us:
1719 		 *  - The timestamp on this packet must be greater than
1720 		 *    or equal to the last value echoed by the other
1721 		 *    endpoint.  The RFC says those will be discarded
1722 		 *    since it is a dup that has already been acked.
1723 		 *    This gives us a lowerbound on the timestamp.
1724 		 *        timestamp >= other last echoed timestamp
1725 		 *  - The timestamp will be less than or equal to
1726 		 *    the last timestamp plus the time between the
1727 		 *    last packet and now.  The RFC defines the max
1728 		 *    clock rate as 1ms.  We will allow clocks to be
1729 		 *    up to 10% fast and will allow a total difference
1730 		 *    or 30 seconds due to a route change.  And this
1731 		 *    gives us an upperbound on the timestamp.
1732 		 *        timestamp <= last timestamp + max ticks
1733 		 *    We have to be careful here.  Windows will send an
1734 		 *    initial timestamp of zero and then initialize it
1735 		 *    to a random value after the 3whs; presumably to
1736 		 *    avoid a DoS by having to call an expensive RNG
1737 		 *    during a SYN flood.  Proof MS has at least one
1738 		 *    good security geek.
1739 		 *
1740 		 *  - The TCP timestamp option must also echo the other
1741 		 *    endpoints timestamp.  The timestamp echoed is the
1742 		 *    one carried on the earliest unacknowledged segment
1743 		 *    on the left edge of the sequence window.  The RFC
1744 		 *    states that the host will reject any echoed
1745 		 *    timestamps that were larger than any ever sent.
1746 		 *    This gives us an upperbound on the TS echo.
1747 		 *        tescr <= largest_tsval
1748 		 *  - The lowerbound on the TS echo is a little more
1749 		 *    tricky to determine.  The other endpoint's echoed
1750 		 *    values will not decrease.  But there may be
1751 		 *    network conditions that re-order packets and
1752 		 *    cause our view of them to decrease.  For now the
1753 		 *    only lowerbound we can safely determine is that
1754 		 *    the TS echo will never be less than the original
1755 		 *    TS.  XXX There is probably a better lowerbound.
1756 		 *    Remove TS_MAX_CONN with better lowerbound check.
1757 		 *        tescr >= other original TS
1758 		 *
1759 		 * It is also important to note that the fastest
1760 		 * timestamp clock of 1ms will wrap its 32bit space in
1761 		 * 24 days.  So we just disable TS checking after 24
1762 		 * days of idle time.  We actually must use a 12d
1763 		 * connection limit until we can come up with a better
1764 		 * lowerbound to the TS echo check.
1765 		 */
1766 		struct timeval delta_ts;
1767 		int ts_fudge;
1768 
1769 		/*
1770 		 * PFTM_TS_DIFF is how many seconds of leeway to allow
1771 		 * a host's timestamp.  This can happen if the previous
1772 		 * packet got delayed in transit for much longer than
1773 		 * this packet.
1774 		 */
1775 		if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0)
1776 			ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1777 
1778 		/* Calculate max ticks since the last timestamp */
1779 #define TS_MAXFREQ	1100		/* RFC max TS freq of 1Khz + 10% skew */
1780 #define TS_MICROSECS	1000000		/* microseconds per second */
1781 		delta_ts = uptime;
1782 		timevalsub(&delta_ts, &src->scrub->pfss_last);
1783 		tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1784 		tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1785 
1786 		if ((src->state >= TCPS_ESTABLISHED &&
1787 		    dst->state >= TCPS_ESTABLISHED) &&
1788 		    (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1789 		    SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1790 		    (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1791 		    SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1792 			/* Bad RFC1323 implementation or an insertion attack.
1793 			 *
1794 			 * - Solaris 2.6 and 2.7 are known to send another ACK
1795 			 *   after the FIN,FIN|ACK,ACK closing that carries
1796 			 *   an old timestamp.
1797 			 */
1798 
1799 			DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1800 			    SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1801 			    SEQ_GT(tsval, src->scrub->pfss_tsval +
1802 			    tsval_from_last) ? '1' : ' ',
1803 			    SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1804 			    SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1805 			DPFPRINTF((" tsval: %u  tsecr: %u  +ticks: %u  "
1806 			    "idle: %jus %lums\n",
1807 			    tsval, tsecr, tsval_from_last,
1808 			    (uintmax_t)delta_ts.tv_sec,
1809 			    delta_ts.tv_usec / 1000));
1810 			DPFPRINTF((" src->tsval: %u  tsecr: %u\n",
1811 			    src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1812 			DPFPRINTF((" dst->tsval: %u  tsecr: %u  tsval0: %u"
1813 			    "\n", dst->scrub->pfss_tsval,
1814 			    dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1815 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1816 				pf_print_state(state);
1817 				pf_print_flags(tcp_get_flags(th));
1818 				printf("\n");
1819 			}
1820 			REASON_SET(reason, PFRES_TS);
1821 			return (PF_DROP);
1822 		}
1823 
1824 		/* XXX I'd really like to require tsecr but it's optional */
1825 
1826 	} else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 &&
1827 	    ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1828 	    || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) &&
1829 	    src->scrub && dst->scrub &&
1830 	    (src->scrub->pfss_flags & PFSS_PAWS) &&
1831 	    (dst->scrub->pfss_flags & PFSS_PAWS)) {
1832 		/* Didn't send a timestamp.  Timestamps aren't really useful
1833 		 * when:
1834 		 *  - connection opening or closing (often not even sent).
1835 		 *    but we must not let an attacker to put a FIN on a
1836 		 *    data packet to sneak it through our ESTABLISHED check.
1837 		 *  - on a TCP reset.  RFC suggests not even looking at TS.
1838 		 *  - on an empty ACK.  The TS will not be echoed so it will
1839 		 *    probably not help keep the RTT calculation in sync and
1840 		 *    there isn't as much danger when the sequence numbers
1841 		 *    got wrapped.  So some stacks don't include TS on empty
1842 		 *    ACKs :-(
1843 		 *
1844 		 * To minimize the disruption to mostly RFC1323 conformant
1845 		 * stacks, we will only require timestamps on data packets.
1846 		 *
1847 		 * And what do ya know, we cannot require timestamps on data
1848 		 * packets.  There appear to be devices that do legitimate
1849 		 * TCP connection hijacking.  There are HTTP devices that allow
1850 		 * a 3whs (with timestamps) and then buffer the HTTP request.
1851 		 * If the intermediate device has the HTTP response cache, it
1852 		 * will spoof the response but not bother timestamping its
1853 		 * packets.  So we can look for the presence of a timestamp in
1854 		 * the first data packet and if there, require it in all future
1855 		 * packets.
1856 		 */
1857 
1858 		if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1859 			/*
1860 			 * Hey!  Someone tried to sneak a packet in.  Or the
1861 			 * stack changed its RFC1323 behavior?!?!
1862 			 */
1863 			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1864 				DPFPRINTF(("Did not receive expected RFC1323 "
1865 				    "timestamp\n"));
1866 				pf_print_state(state);
1867 				pf_print_flags(tcp_get_flags(th));
1868 				printf("\n");
1869 			}
1870 			REASON_SET(reason, PFRES_TS);
1871 			return (PF_DROP);
1872 		}
1873 	}
1874 
1875 	/*
1876 	 * We will note if a host sends his data packets with or without
1877 	 * timestamps.  And require all data packets to contain a timestamp
1878 	 * if the first does.  PAWS implicitly requires that all data packets be
1879 	 * timestamped.  But I think there are middle-man devices that hijack
1880 	 * TCP streams immediately after the 3whs and don't timestamp their
1881 	 * packets (seen in a WWW accelerator or cache).
1882 	 */
1883 	if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1884 	    (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1885 		if (got_ts)
1886 			src->scrub->pfss_flags |= PFSS_DATA_TS;
1887 		else {
1888 			src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1889 			if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1890 			    (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1891 				/* Don't warn if other host rejected RFC1323 */
1892 				DPFPRINTF(("Broken RFC1323 stack did not "
1893 				    "timestamp data packet. Disabled PAWS "
1894 				    "security.\n"));
1895 				pf_print_state(state);
1896 				pf_print_flags(tcp_get_flags(th));
1897 				printf("\n");
1898 			}
1899 		}
1900 	}
1901 
1902 	/*
1903 	 * Update PAWS values
1904 	 */
1905 	if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1906 	    (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1907 		getmicrouptime(&src->scrub->pfss_last);
1908 		if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1909 		    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1910 			src->scrub->pfss_tsval = tsval;
1911 
1912 		if (tsecr) {
1913 			if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1914 			    (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1915 				src->scrub->pfss_tsecr = tsecr;
1916 
1917 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1918 			    (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1919 			    src->scrub->pfss_tsval0 == 0)) {
1920 				/* tsval0 MUST be the lowest timestamp */
1921 				src->scrub->pfss_tsval0 = tsval;
1922 			}
1923 
1924 			/* Only fully initialized after a TS gets echoed */
1925 			if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1926 				src->scrub->pfss_flags |= PFSS_PAWS;
1927 		}
1928 	}
1929 
1930 	/* I have a dream....  TCP segment reassembly.... */
1931 	return (0);
1932 }
1933 
1934 int
pf_normalize_mss(struct pf_pdesc * pd)1935 pf_normalize_mss(struct pf_pdesc *pd)
1936 {
1937 	struct tcphdr	*th = &pd->hdr.tcp;
1938 	u_int16_t	*mss;
1939 	int		 thoff;
1940 	int		 opt, cnt, optlen = 0;
1941 	u_char		 opts[TCP_MAXOLEN];
1942 	u_char		*optp = opts;
1943 	size_t		 startoff;
1944 
1945 	thoff = th->th_off << 2;
1946 	cnt = thoff - sizeof(struct tcphdr);
1947 
1948 	if (cnt <= 0 || cnt > MAX_TCPOPTLEN || !pf_pull_hdr(pd->m,
1949 	    pd->off + sizeof(*th), opts, cnt, NULL, NULL, pd->af))
1950 		return (0);
1951 
1952 	for (; cnt > 0; cnt -= optlen, optp += optlen) {
1953 		startoff = optp - opts;
1954 		opt = optp[0];
1955 		if (opt == TCPOPT_EOL)
1956 			break;
1957 		if (opt == TCPOPT_NOP)
1958 			optlen = 1;
1959 		else {
1960 			if (cnt < 2)
1961 				break;
1962 			optlen = optp[1];
1963 			if (optlen < 2 || optlen > cnt)
1964 				break;
1965 		}
1966 		switch (opt) {
1967 		case TCPOPT_MAXSEG:
1968 			mss = (u_int16_t *)(optp + 2);
1969 			if ((ntohs(*mss)) > pd->act.max_mss) {
1970 				pf_patch_16_unaligned(pd->m,
1971 				    &th->th_sum,
1972 				    mss, htons(pd->act.max_mss),
1973 				    PF_ALGNMNT(startoff),
1974 				    0);
1975 				m_copyback(pd->m, pd->off + sizeof(*th),
1976 				    thoff - sizeof(*th), opts);
1977 				m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1978 			}
1979 			break;
1980 		default:
1981 			break;
1982 		}
1983 	}
1984 
1985 	return (0);
1986 }
1987 
1988 int
pf_scan_sctp(struct pf_pdesc * pd)1989 pf_scan_sctp(struct pf_pdesc *pd)
1990 {
1991 	struct sctp_chunkhdr ch = { };
1992 	int chunk_off = sizeof(struct sctphdr);
1993 	int chunk_start;
1994 	int ret;
1995 
1996 	while (pd->off + chunk_off < pd->tot_len) {
1997 		if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
1998 		    NULL, pd->af))
1999 			return (PF_DROP);
2000 
2001 		/* Length includes the header, this must be at least 4. */
2002 		if (ntohs(ch.chunk_length) < 4)
2003 			return (PF_DROP);
2004 
2005 		chunk_start = chunk_off;
2006 		chunk_off += roundup(ntohs(ch.chunk_length), 4);
2007 
2008 		switch (ch.chunk_type) {
2009 		case SCTP_INITIATION:
2010 		case SCTP_INITIATION_ACK: {
2011 			struct sctp_init_chunk init;
2012 
2013 			if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
2014 			    sizeof(init), NULL, NULL, pd->af))
2015 				return (PF_DROP);
2016 
2017 			/*
2018 			 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2019 			 * any value except 0."
2020 			 */
2021 			if (init.init.initiate_tag == 0)
2022 				return (PF_DROP);
2023 			if (init.init.num_inbound_streams == 0)
2024 				return (PF_DROP);
2025 			if (init.init.num_outbound_streams == 0)
2026 				return (PF_DROP);
2027 			if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2028 				return (PF_DROP);
2029 
2030 			/*
2031 			 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2032 			 * verification tag.
2033 			 */
2034 			if (ch.chunk_type == SCTP_INITIATION &&
2035 			    pd->hdr.sctp.v_tag != 0)
2036 				return (PF_DROP);
2037 
2038 			pd->sctp_initiate_tag = init.init.initiate_tag;
2039 
2040 			if (ch.chunk_type == SCTP_INITIATION)
2041 				pd->sctp_flags |= PFDESC_SCTP_INIT;
2042 			else
2043 				pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2044 
2045 			ret = pf_multihome_scan_init(pd->off + chunk_start,
2046 			    ntohs(init.ch.chunk_length), pd);
2047 			if (ret != PF_PASS)
2048 				return (ret);
2049 
2050 			break;
2051 		}
2052 		case SCTP_ABORT_ASSOCIATION:
2053 			pd->sctp_flags |= PFDESC_SCTP_ABORT;
2054 			break;
2055 		case SCTP_SHUTDOWN:
2056 		case SCTP_SHUTDOWN_ACK:
2057 			pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2058 			break;
2059 		case SCTP_SHUTDOWN_COMPLETE:
2060 			pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2061 			break;
2062 		case SCTP_COOKIE_ECHO:
2063 			pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2064 			break;
2065 		case SCTP_COOKIE_ACK:
2066 			pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2067 			break;
2068 		case SCTP_DATA:
2069 			pd->sctp_flags |= PFDESC_SCTP_DATA;
2070 			break;
2071 		case SCTP_HEARTBEAT_REQUEST:
2072 			pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2073 			break;
2074 		case SCTP_HEARTBEAT_ACK:
2075 			pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2076 			break;
2077 		case SCTP_ASCONF:
2078 			pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2079 
2080 			ret = pf_multihome_scan_asconf(pd->off + chunk_start,
2081 			    ntohs(ch.chunk_length), pd);
2082 			if (ret != PF_PASS)
2083 				return (ret);
2084 			break;
2085 		default:
2086 			pd->sctp_flags |= PFDESC_SCTP_OTHER;
2087 			break;
2088 		}
2089 	}
2090 
2091 	/* Validate chunk lengths vs. packet length. */
2092 	if (pd->off + chunk_off != pd->tot_len)
2093 		return (PF_DROP);
2094 
2095 	/*
2096 	 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2097 	 * one in a packet.
2098 	 */
2099 	if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2100 	    (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2101 		return (PF_DROP);
2102 	if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2103 	    (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2104 		return (PF_DROP);
2105 	if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2106 	    (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2107 		return (PF_DROP);
2108 	if ((pd->sctp_flags & PFDESC_SCTP_ABORT) &&
2109 	    (pd->sctp_flags & PFDESC_SCTP_DATA)) {
2110 		/*
2111 		 * RFC4960 3.3.7: DATA chunks MUST NOT be
2112 		 * bundled with ABORT.
2113 		 */
2114 		return (PF_DROP);
2115 	}
2116 
2117 	return (PF_PASS);
2118 }
2119 
2120 int
pf_normalize_sctp(struct pf_pdesc * pd)2121 pf_normalize_sctp(struct pf_pdesc *pd)
2122 {
2123 	struct pf_krule	*r, *rm = NULL;
2124 	struct sctphdr	*sh = &pd->hdr.sctp;
2125 	u_short		 reason;
2126 	sa_family_t	 af = pd->af;
2127 	int		 srs;
2128 
2129 	PF_RULES_RASSERT();
2130 
2131 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2132 	/* Check if there any scrub rules. Lack of scrub rules means enforced
2133 	 * packet normalization operation just like in OpenBSD. */
2134 	srs = (r != NULL);
2135 	while (r != NULL) {
2136 		pf_counter_u64_add(&r->evaluations, 1);
2137 		if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
2138 			r = r->skip[PF_SKIP_IFP];
2139 		else if (r->direction && r->direction != pd->dir)
2140 			r = r->skip[PF_SKIP_DIR];
2141 		else if (r->af && r->af != af)
2142 			r = r->skip[PF_SKIP_AF];
2143 		else if (r->proto && r->proto != pd->proto)
2144 			r = r->skip[PF_SKIP_PROTO];
2145 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2146 		    r->src.neg, pd->kif, M_GETFIB(pd->m)))
2147 			r = r->skip[PF_SKIP_SRC_ADDR];
2148 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
2149 			    r->src.port[0], r->src.port[1], sh->src_port))
2150 			r = r->skip[PF_SKIP_SRC_PORT];
2151 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2152 		    r->dst.neg, NULL, M_GETFIB(pd->m)))
2153 			r = r->skip[PF_SKIP_DST_ADDR];
2154 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2155 			    r->dst.port[0], r->dst.port[1], sh->dest_port))
2156 			r = r->skip[PF_SKIP_DST_PORT];
2157 		else {
2158 			rm = r;
2159 			break;
2160 		}
2161 	}
2162 
2163 	if (srs) {
2164 		/* With scrub rules present SCTP normalization happens only
2165 		 * if one of rules has matched and it's not a "no scrub" rule */
2166 		if (rm == NULL || rm->action == PF_NOSCRUB)
2167 			return (PF_PASS);
2168 
2169 		pf_counter_u64_critical_enter();
2170 		pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
2171 		pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
2172 		pf_counter_u64_critical_exit();
2173 	}
2174 
2175 	/* Verify we're a multiple of 4 bytes long */
2176 	if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4)
2177 		goto sctp_drop;
2178 
2179 	/* INIT chunk needs to be the only chunk */
2180 	if (pd->sctp_flags & PFDESC_SCTP_INIT)
2181 		if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2182 			goto sctp_drop;
2183 
2184 	return (PF_PASS);
2185 
2186 sctp_drop:
2187 	REASON_SET(&reason, PFRES_NORM);
2188 	if (rm != NULL && r->log)
2189 		PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd,
2190 		    1, NULL);
2191 
2192 	return (PF_DROP);
2193 }
2194 
2195 #if defined(INET) || defined(INET6)
2196 void
pf_scrub(struct pf_pdesc * pd)2197 pf_scrub(struct pf_pdesc *pd)
2198 {
2199 
2200 	struct ip		*h = mtod(pd->m, struct ip *);
2201 #ifdef INET6
2202 	struct ip6_hdr		*h6 = mtod(pd->m, struct ip6_hdr *);
2203 #endif
2204 
2205 	/* Clear IP_DF if no-df was requested */
2206 	if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF &&
2207 	    h->ip_off & htons(IP_DF))
2208 	{
2209 		u_int16_t ip_off = h->ip_off;
2210 
2211 		h->ip_off &= htons(~IP_DF);
2212 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2213 	}
2214 
2215 	/* Enforce a minimum ttl, may cause endless packet loops */
2216 	if (pd->af == AF_INET && pd->act.min_ttl &&
2217 	    h->ip_ttl < pd->act.min_ttl) {
2218 		u_int16_t ip_ttl = h->ip_ttl;
2219 
2220 		h->ip_ttl = pd->act.min_ttl;
2221 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2222 	}
2223 #ifdef INET6
2224 	/* Enforce a minimum ttl, may cause endless packet loops */
2225 	if (pd->af == AF_INET6 && pd->act.min_ttl &&
2226 	    h6->ip6_hlim < pd->act.min_ttl)
2227 		h6->ip6_hlim = pd->act.min_ttl;
2228 #endif
2229 	/* Enforce tos */
2230 	if (pd->act.flags & PFSTATE_SETTOS) {
2231 		switch (pd->af) {
2232 		case AF_INET: {
2233 			u_int16_t	ov, nv;
2234 
2235 			ov = *(u_int16_t *)h;
2236 			h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2237 			nv = *(u_int16_t *)h;
2238 
2239 			h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2240 			break;
2241 		}
2242 #ifdef INET6
2243 		case AF_INET6:
2244 			h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2245 			h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20);
2246 			break;
2247 #endif
2248 		}
2249 	}
2250 
2251 	/* random-id, but not for fragments */
2252 #ifdef INET
2253 	if (pd->af == AF_INET &&
2254 	    pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2255 		uint16_t ip_id = h->ip_id;
2256 
2257 		ip_fillid(h, V_ip_random_id);
2258 		h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2259 	}
2260 #endif
2261 }
2262 #endif
2263