1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/in6_var.h>
55 #include <netinet6/nd6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet6/scope6_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_fsm.h>
60 #include <netinet/tcp_seq.h>
61 #include <netinet/sctp_constants.h>
62 #include <netinet/sctp_header.h>
63
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67
68 struct pf_frent {
69 TAILQ_ENTRY(pf_frent) fr_next;
70 struct mbuf *fe_m;
71 uint16_t fe_hdrlen; /* ipv4 header length with ip options
72 ipv6, extension, fragment header */
73 uint16_t fe_extoff; /* last extension header offset or 0 */
74 uint16_t fe_len; /* fragment length */
75 uint16_t fe_off; /* fragment offset */
76 uint16_t fe_mff; /* more fragment flag */
77 };
78
79 RB_HEAD(pf_frag_tree, pf_fragment);
80 struct pf_frnode {
81 struct pf_addr fn_src; /* ip source address */
82 struct pf_addr fn_dst; /* ip destination address */
83 sa_family_t fn_af; /* address family */
84 u_int8_t fn_proto; /* protocol for fragments in fn_tree */
85 u_int32_t fn_fragments; /* number of entries in fn_tree */
86
87 RB_ENTRY(pf_frnode) fn_entry;
88 struct pf_frag_tree fn_tree; /* matching fragments, lookup by id */
89 };
90
91 struct pf_fragment {
92 uint32_t fr_id; /* fragment id for reassemble */
93
94 /* pointers to queue element */
95 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
96 /* count entries between pointers */
97 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
98 RB_ENTRY(pf_fragment) fr_entry;
99 TAILQ_ENTRY(pf_fragment) frag_next;
100 uint32_t fr_timeout;
101 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
102 uint16_t fr_maxlen; /* maximum length of single fragment */
103 u_int16_t fr_holes; /* number of holes in the queue */
104 struct pf_frnode *fr_node; /* ip src/dst/proto/af for fragments */
105 };
106
107 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
108 #define V_pf_frag_mtx VNET(pf_frag_mtx)
109 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
110 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
111 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
112
113 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
114
115 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
116 #define V_pf_frent_z VNET(pf_frent_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pf_frnode_z);
118 #define V_pf_frnode_z VNET(pf_frnode_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
120 #define V_pf_frag_z VNET(pf_frag_z)
121
122 TAILQ_HEAD(pf_fragqueue, pf_fragment);
123 TAILQ_HEAD(pf_cachequeue, pf_fragment);
124 RB_HEAD(pf_frnode_tree, pf_frnode);
125 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
126 #define V_pf_fragqueue VNET(pf_fragqueue)
127 static __inline int pf_frnode_compare(struct pf_frnode *,
128 struct pf_frnode *);
129 VNET_DEFINE_STATIC(struct pf_frnode_tree, pf_frnode_tree);
130 #define V_pf_frnode_tree VNET(pf_frnode_tree)
131 RB_PROTOTYPE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
132 RB_GENERATE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
133
134 static int pf_frag_compare(struct pf_fragment *,
135 struct pf_fragment *);
136 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
137 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
138
139 static void pf_flush_fragments(void);
140 static void pf_free_fragment(struct pf_fragment *);
141
142 static struct pf_frent *pf_create_fragment(u_short *);
143 static int pf_frent_holes(struct pf_frent *frent);
144 static struct pf_fragment *pf_find_fragment(struct pf_frnode *, u_int32_t);
145 static inline int pf_frent_index(struct pf_frent *);
146 static int pf_frent_insert(struct pf_fragment *,
147 struct pf_frent *, struct pf_frent *);
148 void pf_frent_remove(struct pf_fragment *,
149 struct pf_frent *);
150 struct pf_frent *pf_frent_previous(struct pf_fragment *,
151 struct pf_frent *);
152 static struct pf_fragment *pf_fillup_fragment(struct pf_frnode *, u_int32_t,
153 struct pf_frent *, u_short *);
154 static struct mbuf *pf_join_fragment(struct pf_fragment *);
155 #ifdef INET
156 static int pf_reassemble(struct mbuf **, u_short *);
157 #endif /* INET */
158 #ifdef INET6
159 static int pf_reassemble6(struct mbuf **,
160 struct ip6_frag *, uint16_t, uint16_t, u_short *);
161 #endif /* INET6 */
162
163 #ifdef INET
164 static void
pf_ip2key(struct ip * ip,struct pf_frnode * key)165 pf_ip2key(struct ip *ip, struct pf_frnode *key)
166 {
167
168 key->fn_src.v4 = ip->ip_src;
169 key->fn_dst.v4 = ip->ip_dst;
170 key->fn_af = AF_INET;
171 key->fn_proto = ip->ip_p;
172 }
173 #endif /* INET */
174
175 void
pf_normalize_init(void)176 pf_normalize_init(void)
177 {
178
179 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
180 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
181 V_pf_frnode_z = uma_zcreate("pf fragment node",
182 sizeof(struct pf_frnode), NULL, NULL, NULL, NULL,
183 UMA_ALIGN_PTR, 0);
184 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
185 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
186 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
187 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
188 UMA_ALIGN_PTR, 0);
189
190 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
191
192 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
193 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
194 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
195 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
196
197 TAILQ_INIT(&V_pf_fragqueue);
198 }
199
200 void
pf_normalize_cleanup(void)201 pf_normalize_cleanup(void)
202 {
203
204 uma_zdestroy(V_pf_state_scrub_z);
205 uma_zdestroy(V_pf_frent_z);
206 uma_zdestroy(V_pf_frnode_z);
207 uma_zdestroy(V_pf_frag_z);
208
209 mtx_destroy(&V_pf_frag_mtx);
210 }
211
212 static int
pf_frnode_compare(struct pf_frnode * a,struct pf_frnode * b)213 pf_frnode_compare(struct pf_frnode *a, struct pf_frnode *b)
214 {
215 int diff;
216
217 if ((diff = a->fn_proto - b->fn_proto) != 0)
218 return (diff);
219 if ((diff = a->fn_af - b->fn_af) != 0)
220 return (diff);
221 if ((diff = pf_addr_cmp(&a->fn_src, &b->fn_src, a->fn_af)) != 0)
222 return (diff);
223 if ((diff = pf_addr_cmp(&a->fn_dst, &b->fn_dst, a->fn_af)) != 0)
224 return (diff);
225 return (0);
226 }
227
228 static __inline int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)229 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
230 {
231 int diff;
232
233 if ((diff = a->fr_id - b->fr_id) != 0)
234 return (diff);
235
236 return (0);
237 }
238
239 void
pf_purge_expired_fragments(void)240 pf_purge_expired_fragments(void)
241 {
242 u_int32_t expire = time_uptime -
243 V_pf_default_rule.timeout[PFTM_FRAG];
244
245 pf_purge_fragments(expire);
246 }
247
248 void
pf_purge_fragments(uint32_t expire)249 pf_purge_fragments(uint32_t expire)
250 {
251 struct pf_fragment *frag;
252
253 PF_FRAG_LOCK();
254 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
255 if (frag->fr_timeout > expire)
256 break;
257
258 DPFPRINTF(PF_DEBUG_MISC, "expiring %d(%p)",
259 frag->fr_id, frag);
260 pf_free_fragment(frag);
261 }
262
263 PF_FRAG_UNLOCK();
264 }
265
266 /*
267 * Try to flush old fragments to make space for new ones
268 */
269 static void
pf_flush_fragments(void)270 pf_flush_fragments(void)
271 {
272 struct pf_fragment *frag;
273 int goal;
274
275 PF_FRAG_ASSERT();
276
277 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
278 DPFPRINTF(PF_DEBUG_MISC, "trying to free %d frag entriess", goal);
279 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
280 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
281 if (frag)
282 pf_free_fragment(frag);
283 else
284 break;
285 }
286 }
287
288 /*
289 * Remove a fragment from the fragment queue, free its fragment entries,
290 * and free the fragment itself.
291 */
292 static void
pf_free_fragment(struct pf_fragment * frag)293 pf_free_fragment(struct pf_fragment *frag)
294 {
295 struct pf_frent *frent;
296 struct pf_frnode *frnode;
297
298 PF_FRAG_ASSERT();
299
300 frnode = frag->fr_node;
301 RB_REMOVE(pf_frag_tree, &frnode->fn_tree, frag);
302 MPASS(frnode->fn_fragments >= 1);
303 frnode->fn_fragments--;
304 if (frnode->fn_fragments == 0) {
305 MPASS(RB_EMPTY(&frnode->fn_tree));
306 RB_REMOVE(pf_frnode_tree, &V_pf_frnode_tree, frnode);
307 uma_zfree(V_pf_frnode_z, frnode);
308 }
309
310 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
311
312 /* Free all fragment entries */
313 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
314 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
315
316 m_freem(frent->fe_m);
317 uma_zfree(V_pf_frent_z, frent);
318 }
319
320 uma_zfree(V_pf_frag_z, frag);
321 }
322
323 static struct pf_fragment *
pf_find_fragment(struct pf_frnode * key,uint32_t id)324 pf_find_fragment(struct pf_frnode *key, uint32_t id)
325 {
326 struct pf_fragment *frag, idkey;
327 struct pf_frnode *frnode;
328
329 PF_FRAG_ASSERT();
330
331 frnode = RB_FIND(pf_frnode_tree, &V_pf_frnode_tree, key);
332 if (frnode == NULL)
333 return (NULL);
334 MPASS(frnode->fn_fragments >= 1);
335 idkey.fr_id = id;
336 frag = RB_FIND(pf_frag_tree, &frnode->fn_tree, &idkey);
337 if (frag == NULL)
338 return (NULL);
339 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
340 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
341
342 return (frag);
343 }
344
345 static struct pf_frent *
pf_create_fragment(u_short * reason)346 pf_create_fragment(u_short *reason)
347 {
348 struct pf_frent *frent;
349
350 PF_FRAG_ASSERT();
351
352 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
353 if (frent == NULL) {
354 pf_flush_fragments();
355 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
356 if (frent == NULL) {
357 REASON_SET(reason, PFRES_MEMORY);
358 return (NULL);
359 }
360 }
361
362 return (frent);
363 }
364
365 /*
366 * Calculate the additional holes that were created in the fragment
367 * queue by inserting this fragment. A fragment in the middle
368 * creates one more hole by splitting. For each connected side,
369 * it loses one hole.
370 * Fragment entry must be in the queue when calling this function.
371 */
372 static int
pf_frent_holes(struct pf_frent * frent)373 pf_frent_holes(struct pf_frent *frent)
374 {
375 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
376 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
377 int holes = 1;
378
379 if (prev == NULL) {
380 if (frent->fe_off == 0)
381 holes--;
382 } else {
383 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
384 if (frent->fe_off == prev->fe_off + prev->fe_len)
385 holes--;
386 }
387 if (next == NULL) {
388 if (!frent->fe_mff)
389 holes--;
390 } else {
391 KASSERT(frent->fe_mff, ("frent->fe_mff"));
392 if (next->fe_off == frent->fe_off + frent->fe_len)
393 holes--;
394 }
395 return holes;
396 }
397
398 static inline int
pf_frent_index(struct pf_frent * frent)399 pf_frent_index(struct pf_frent *frent)
400 {
401 /*
402 * We have an array of 16 entry points to the queue. A full size
403 * 65535 octet IP packet can have 8192 fragments. So the queue
404 * traversal length is at most 512 and at most 16 entry points are
405 * checked. We need 128 additional bytes on a 64 bit architecture.
406 */
407 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
408 16 - 1);
409 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
410
411 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
412 }
413
414 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)415 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
416 struct pf_frent *prev)
417 {
418 int index;
419
420 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
421
422 /*
423 * A packet has at most 65536 octets. With 16 entry points, each one
424 * spawns 4096 octets. We limit these to 64 fragments each, which
425 * means on average every fragment must have at least 64 octets.
426 */
427 index = pf_frent_index(frent);
428 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
429 return ENOBUFS;
430 frag->fr_entries[index]++;
431
432 if (prev == NULL) {
433 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
434 } else {
435 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
436 ("overlapping fragment"));
437 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
438 }
439
440 if (frag->fr_firstoff[index] == NULL) {
441 KASSERT(prev == NULL || pf_frent_index(prev) < index,
442 ("prev == NULL || pf_frent_index(pref) < index"));
443 frag->fr_firstoff[index] = frent;
444 } else {
445 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
446 KASSERT(prev == NULL || pf_frent_index(prev) < index,
447 ("prev == NULL || pf_frent_index(pref) < index"));
448 frag->fr_firstoff[index] = frent;
449 } else {
450 KASSERT(prev != NULL, ("prev != NULL"));
451 KASSERT(pf_frent_index(prev) == index,
452 ("pf_frent_index(prev) == index"));
453 }
454 }
455
456 frag->fr_holes += pf_frent_holes(frent);
457
458 return 0;
459 }
460
461 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)462 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
463 {
464 #ifdef INVARIANTS
465 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
466 #endif /* INVARIANTS */
467 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
468 int index;
469
470 frag->fr_holes -= pf_frent_holes(frent);
471
472 index = pf_frent_index(frent);
473 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
474 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
475 if (next == NULL) {
476 frag->fr_firstoff[index] = NULL;
477 } else {
478 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
479 ("overlapping fragment"));
480 if (pf_frent_index(next) == index) {
481 frag->fr_firstoff[index] = next;
482 } else {
483 frag->fr_firstoff[index] = NULL;
484 }
485 }
486 } else {
487 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
488 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
489 KASSERT(prev != NULL, ("prev != NULL"));
490 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
491 ("overlapping fragment"));
492 KASSERT(pf_frent_index(prev) == index,
493 ("pf_frent_index(prev) == index"));
494 }
495
496 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
497
498 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
499 frag->fr_entries[index]--;
500 }
501
502 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)503 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
504 {
505 struct pf_frent *prev, *next;
506 int index;
507
508 /*
509 * If there are no fragments after frag, take the final one. Assume
510 * that the global queue is not empty.
511 */
512 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
513 KASSERT(prev != NULL, ("prev != NULL"));
514 if (prev->fe_off <= frent->fe_off)
515 return prev;
516 /*
517 * We want to find a fragment entry that is before frag, but still
518 * close to it. Find the first fragment entry that is in the same
519 * entry point or in the first entry point after that. As we have
520 * already checked that there are entries behind frag, this will
521 * succeed.
522 */
523 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
524 index++) {
525 prev = frag->fr_firstoff[index];
526 if (prev != NULL)
527 break;
528 }
529 KASSERT(prev != NULL, ("prev != NULL"));
530 /*
531 * In prev we may have a fragment from the same entry point that is
532 * before frent, or one that is just one position behind frent.
533 * In the latter case, we go back one step and have the predecessor.
534 * There may be none if the new fragment will be the first one.
535 */
536 if (prev->fe_off > frent->fe_off) {
537 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
538 if (prev == NULL)
539 return NULL;
540 KASSERT(prev->fe_off <= frent->fe_off,
541 ("prev->fe_off <= frent->fe_off"));
542 return prev;
543 }
544 /*
545 * In prev is the first fragment of the entry point. The offset
546 * of frag is behind it. Find the closest previous fragment.
547 */
548 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
549 next = TAILQ_NEXT(next, fr_next)) {
550 if (next->fe_off > frent->fe_off)
551 break;
552 prev = next;
553 }
554 return prev;
555 }
556
557 static struct pf_fragment *
pf_fillup_fragment(struct pf_frnode * key,uint32_t id,struct pf_frent * frent,u_short * reason)558 pf_fillup_fragment(struct pf_frnode *key, uint32_t id,
559 struct pf_frent *frent, u_short *reason)
560 {
561 struct pf_frent *after, *next, *prev;
562 struct pf_fragment *frag;
563 struct pf_frnode *frnode;
564 uint16_t total;
565
566 PF_FRAG_ASSERT();
567
568 /* No empty fragments. */
569 if (frent->fe_len == 0) {
570 DPFPRINTF(PF_DEBUG_MISC, "bad fragment: len 0");
571 goto bad_fragment;
572 }
573
574 /* All fragments are 8 byte aligned. */
575 if (frent->fe_mff && (frent->fe_len & 0x7)) {
576 DPFPRINTF(PF_DEBUG_MISC, "bad fragment: mff and len %d",
577 frent->fe_len);
578 goto bad_fragment;
579 }
580
581 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
582 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
583 DPFPRINTF(PF_DEBUG_MISC, "bad fragment: max packet %d",
584 frent->fe_off + frent->fe_len);
585 goto bad_fragment;
586 }
587
588 if (key->fn_af == AF_INET)
589 DPFPRINTF(PF_DEBUG_MISC, "reass frag %d @ %d-%d\n",
590 id, frent->fe_off, frent->fe_off + frent->fe_len);
591 else
592 DPFPRINTF(PF_DEBUG_MISC, "reass frag %#08x @ %d-%d",
593 id, frent->fe_off, frent->fe_off + frent->fe_len);
594
595 /* Fully buffer all of the fragments in this fragment queue. */
596 frag = pf_find_fragment(key, id);
597
598 /* Create a new reassembly queue for this packet. */
599 if (frag == NULL) {
600 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
601 if (frag == NULL) {
602 pf_flush_fragments();
603 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
604 if (frag == NULL) {
605 REASON_SET(reason, PFRES_MEMORY);
606 goto drop_fragment;
607 }
608 }
609
610 frnode = RB_FIND(pf_frnode_tree, &V_pf_frnode_tree, key);
611 if (frnode == NULL) {
612 frnode = uma_zalloc(V_pf_frnode_z, M_NOWAIT);
613 if (frnode == NULL) {
614 pf_flush_fragments();
615 frnode = uma_zalloc(V_pf_frnode_z, M_NOWAIT);
616 if (frnode == NULL) {
617 REASON_SET(reason, PFRES_MEMORY);
618 uma_zfree(V_pf_frag_z, frag);
619 goto drop_fragment;
620 }
621 }
622 *frnode = *key;
623 RB_INIT(&frnode->fn_tree);
624 frnode->fn_fragments = 0;
625 }
626 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
627 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
628 frag->fr_timeout = time_uptime;
629 TAILQ_INIT(&frag->fr_queue);
630 frag->fr_maxlen = frent->fe_len;
631 frag->fr_holes = 1;
632
633 frag->fr_id = id;
634 frag->fr_node = frnode;
635 /* RB_INSERT cannot fail as pf_find_fragment() found nothing */
636 RB_INSERT(pf_frag_tree, &frnode->fn_tree, frag);
637 frnode->fn_fragments++;
638 if (frnode->fn_fragments == 1)
639 RB_INSERT(pf_frnode_tree, &V_pf_frnode_tree, frnode);
640
641 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
642
643 /* We do not have a previous fragment, cannot fail. */
644 pf_frent_insert(frag, frent, NULL);
645
646 return (frag);
647 }
648
649 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
650 MPASS(frag->fr_node);
651
652 /* Remember maximum fragment len for refragmentation. */
653 if (frent->fe_len > frag->fr_maxlen)
654 frag->fr_maxlen = frent->fe_len;
655
656 /* Maximum data we have seen already. */
657 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
658 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
659
660 /* Non terminal fragments must have more fragments flag. */
661 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
662 goto free_ipv6_fragment;
663
664 /* Check if we saw the last fragment already. */
665 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
666 if (frent->fe_off + frent->fe_len > total ||
667 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
668 goto free_ipv6_fragment;
669 } else {
670 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
671 goto free_ipv6_fragment;
672 }
673
674 /* Find neighbors for newly inserted fragment */
675 prev = pf_frent_previous(frag, frent);
676 if (prev == NULL) {
677 after = TAILQ_FIRST(&frag->fr_queue);
678 KASSERT(after != NULL, ("after != NULL"));
679 } else {
680 after = TAILQ_NEXT(prev, fr_next);
681 }
682
683 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
684 uint16_t precut;
685
686 if (frag->fr_node->fn_af == AF_INET6)
687 goto free_fragment;
688
689 precut = prev->fe_off + prev->fe_len - frent->fe_off;
690 if (precut >= frent->fe_len) {
691 DPFPRINTF(PF_DEBUG_MISC, "new frag overlapped");
692 goto drop_fragment;
693 }
694 DPFPRINTF(PF_DEBUG_MISC, "frag head overlap %d", precut);
695 m_adj(frent->fe_m, precut);
696 frent->fe_off += precut;
697 frent->fe_len -= precut;
698 }
699
700 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
701 after = next) {
702 uint16_t aftercut;
703
704 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
705 if (aftercut < after->fe_len) {
706 DPFPRINTF(PF_DEBUG_MISC, "frag tail overlap %d",
707 aftercut);
708 m_adj(after->fe_m, aftercut);
709 /* Fragment may switch queue as fe_off changes */
710 pf_frent_remove(frag, after);
711 after->fe_off += aftercut;
712 after->fe_len -= aftercut;
713 /* Insert into correct queue */
714 if (pf_frent_insert(frag, after, prev)) {
715 DPFPRINTF(PF_DEBUG_MISC,
716 "fragment requeue limit exceeded");
717 m_freem(after->fe_m);
718 uma_zfree(V_pf_frent_z, after);
719 /* There is not way to recover */
720 goto free_fragment;
721 }
722 break;
723 }
724
725 /* This fragment is completely overlapped, lose it. */
726 DPFPRINTF(PF_DEBUG_MISC, "old frag overlapped");
727 next = TAILQ_NEXT(after, fr_next);
728 pf_frent_remove(frag, after);
729 m_freem(after->fe_m);
730 uma_zfree(V_pf_frent_z, after);
731 }
732
733 /* If part of the queue gets too long, there is not way to recover. */
734 if (pf_frent_insert(frag, frent, prev)) {
735 DPFPRINTF(PF_DEBUG_MISC, "fragment queue limit exceeded");
736 goto bad_fragment;
737 }
738
739 return (frag);
740
741 free_ipv6_fragment:
742 if (frag->fr_node->fn_af == AF_INET)
743 goto bad_fragment;
744 free_fragment:
745 /*
746 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one
747 * or more its constituent fragments is determined to be an overlapping
748 * fragment, the entire datagram (and any constituent fragments) MUST
749 * be silently discarded.
750 */
751 DPFPRINTF(PF_DEBUG_MISC, "flush overlapping fragments");
752 pf_free_fragment(frag);
753
754 bad_fragment:
755 REASON_SET(reason, PFRES_FRAG);
756 drop_fragment:
757 uma_zfree(V_pf_frent_z, frent);
758 return (NULL);
759 }
760
761 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)762 pf_join_fragment(struct pf_fragment *frag)
763 {
764 struct mbuf *m, *m2;
765 struct pf_frent *frent;
766
767 frent = TAILQ_FIRST(&frag->fr_queue);
768 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
769
770 m = frent->fe_m;
771 if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
772 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
773 uma_zfree(V_pf_frent_z, frent);
774 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
775 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
776
777 m2 = frent->fe_m;
778 /* Strip off ip header. */
779 m_adj(m2, frent->fe_hdrlen);
780 /* Strip off any trailing bytes. */
781 if (frent->fe_len < m2->m_pkthdr.len)
782 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
783
784 uma_zfree(V_pf_frent_z, frent);
785 m_cat(m, m2);
786 }
787
788 /* Remove from fragment queue. */
789 pf_free_fragment(frag);
790
791 return (m);
792 }
793
794 #ifdef INET
795 static int
pf_reassemble(struct mbuf ** m0,u_short * reason)796 pf_reassemble(struct mbuf **m0, u_short *reason)
797 {
798 struct mbuf *m = *m0;
799 struct ip *ip = mtod(m, struct ip *);
800 struct pf_frent *frent;
801 struct pf_fragment *frag;
802 struct m_tag *mtag;
803 struct pf_fragment_tag *ftag;
804 struct pf_frnode key;
805 uint16_t total, hdrlen;
806 uint32_t frag_id;
807 uint16_t maxlen;
808
809 /* Get an entry for the fragment queue */
810 if ((frent = pf_create_fragment(reason)) == NULL)
811 return (PF_DROP);
812
813 frent->fe_m = m;
814 frent->fe_hdrlen = ip->ip_hl << 2;
815 frent->fe_extoff = 0;
816 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
817 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
818 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
819
820 pf_ip2key(ip, &key);
821
822 if ((frag = pf_fillup_fragment(&key, ip->ip_id, frent, reason)) == NULL)
823 return (PF_DROP);
824
825 /* The mbuf is part of the fragment entry, no direct free or access */
826 m = *m0 = NULL;
827
828 if (frag->fr_holes) {
829 DPFPRINTF(PF_DEBUG_MISC, "frag %d, holes %d",
830 frag->fr_id, frag->fr_holes);
831 return (PF_PASS); /* drop because *m0 is NULL, no error */
832 }
833
834 /* We have all the data */
835 frent = TAILQ_FIRST(&frag->fr_queue);
836 KASSERT(frent != NULL, ("frent != NULL"));
837 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
838 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
839 hdrlen = frent->fe_hdrlen;
840
841 maxlen = frag->fr_maxlen;
842 frag_id = frag->fr_id;
843 m = *m0 = pf_join_fragment(frag);
844 frag = NULL;
845
846 if (m->m_flags & M_PKTHDR) {
847 int plen = 0;
848 for (m = *m0; m; m = m->m_next)
849 plen += m->m_len;
850 m = *m0;
851 m->m_pkthdr.len = plen;
852 }
853
854 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
855 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) {
856 REASON_SET(reason, PFRES_SHORT);
857 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
858 return (PF_DROP);
859 }
860 ftag = (struct pf_fragment_tag *)(mtag + 1);
861 ftag->ft_hdrlen = hdrlen;
862 ftag->ft_extoff = 0;
863 ftag->ft_maxlen = maxlen;
864 ftag->ft_id = frag_id;
865 m_tag_prepend(m, mtag);
866
867 ip = mtod(m, struct ip *);
868 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
869 htons(hdrlen + total), 0);
870 ip->ip_len = htons(hdrlen + total);
871 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
872 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
873 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
874
875 if (hdrlen + total > IP_MAXPACKET) {
876 DPFPRINTF(PF_DEBUG_MISC, "drop: too big: %d", total);
877 ip->ip_len = 0;
878 REASON_SET(reason, PFRES_SHORT);
879 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
880 return (PF_DROP);
881 }
882
883 DPFPRINTF(PF_DEBUG_MISC, "complete: %p(%d)", m, ntohs(ip->ip_len));
884 return (PF_PASS);
885 }
886 #endif /* INET */
887
888 #ifdef INET6
889 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)890 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
891 uint16_t hdrlen, uint16_t extoff, u_short *reason)
892 {
893 struct mbuf *m = *m0;
894 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
895 struct pf_frent *frent;
896 struct pf_fragment *frag;
897 struct pf_frnode key;
898 struct m_tag *mtag;
899 struct pf_fragment_tag *ftag;
900 int off;
901 uint32_t frag_id;
902 uint16_t total, maxlen;
903 uint8_t proto;
904
905 PF_FRAG_LOCK();
906
907 /* Get an entry for the fragment queue. */
908 if ((frent = pf_create_fragment(reason)) == NULL) {
909 PF_FRAG_UNLOCK();
910 return (PF_DROP);
911 }
912
913 frent->fe_m = m;
914 frent->fe_hdrlen = hdrlen;
915 frent->fe_extoff = extoff;
916 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
917 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
918 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
919
920 key.fn_src.v6 = ip6->ip6_src;
921 key.fn_dst.v6 = ip6->ip6_dst;
922 key.fn_af = AF_INET6;
923 /* Only the first fragment's protocol is relevant. */
924 key.fn_proto = 0;
925
926 if ((frag = pf_fillup_fragment(&key, fraghdr->ip6f_ident, frent, reason)) == NULL) {
927 PF_FRAG_UNLOCK();
928 return (PF_DROP);
929 }
930
931 /* The mbuf is part of the fragment entry, no direct free or access. */
932 m = *m0 = NULL;
933
934 if (frag->fr_holes) {
935 DPFPRINTF(PF_DEBUG_MISC, "frag %d, holes %d", frag->fr_id,
936 frag->fr_holes);
937 PF_FRAG_UNLOCK();
938 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
939 }
940
941 /* We have all the data. */
942 frent = TAILQ_FIRST(&frag->fr_queue);
943 KASSERT(frent != NULL, ("frent != NULL"));
944 extoff = frent->fe_extoff;
945 maxlen = frag->fr_maxlen;
946 frag_id = frag->fr_id;
947 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
948 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
949 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
950
951 m = *m0 = pf_join_fragment(frag);
952 frag = NULL;
953
954 PF_FRAG_UNLOCK();
955
956 /* Take protocol from first fragment header. */
957 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
958 KASSERT(m, ("%s: short mbuf chain", __func__));
959 proto = *(mtod(m, uint8_t *) + off);
960 m = *m0;
961
962 /* Delete frag6 header */
963 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
964 goto fail;
965
966 if (m->m_flags & M_PKTHDR) {
967 int plen = 0;
968 for (m = *m0; m; m = m->m_next)
969 plen += m->m_len;
970 m = *m0;
971 m->m_pkthdr.len = plen;
972 }
973
974 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
975 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
976 goto fail;
977 ftag = (struct pf_fragment_tag *)(mtag + 1);
978 ftag->ft_hdrlen = hdrlen;
979 ftag->ft_extoff = extoff;
980 ftag->ft_maxlen = maxlen;
981 ftag->ft_id = frag_id;
982 m_tag_prepend(m, mtag);
983
984 ip6 = mtod(m, struct ip6_hdr *);
985 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
986 if (extoff) {
987 /* Write protocol into next field of last extension header. */
988 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
989 &off);
990 KASSERT(m, ("%s: short mbuf chain", __func__));
991 *(mtod(m, char *) + off) = proto;
992 m = *m0;
993 } else
994 ip6->ip6_nxt = proto;
995
996 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
997 DPFPRINTF(PF_DEBUG_MISC, "drop: too big: %d", total);
998 ip6->ip6_plen = 0;
999 REASON_SET(reason, PFRES_SHORT);
1000 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
1001 return (PF_DROP);
1002 }
1003
1004 DPFPRINTF(PF_DEBUG_MISC, "complete: %p(%d)", m,
1005 ntohs(ip6->ip6_plen));
1006 return (PF_PASS);
1007
1008 fail:
1009 REASON_SET(reason, PFRES_MEMORY);
1010 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
1011 return (PF_DROP);
1012 }
1013 #endif /* INET6 */
1014
1015 #ifdef INET6
1016 int
pf_max_frag_size(struct mbuf * m)1017 pf_max_frag_size(struct mbuf *m)
1018 {
1019 struct m_tag *tag;
1020 struct pf_fragment_tag *ftag;
1021
1022 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
1023 if (tag == NULL)
1024 return (m->m_pkthdr.len);
1025
1026 ftag = (struct pf_fragment_tag *)(tag + 1);
1027
1028 return (ftag->ft_maxlen);
1029 }
1030
1031 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,struct ifnet * rt,bool forward)1032 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
1033 struct ifnet *rt, bool forward)
1034 {
1035 struct mbuf *m = *m0, *t;
1036 struct ip6_hdr *hdr;
1037 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
1038 struct pf_pdesc pd;
1039 uint32_t frag_id;
1040 uint16_t hdrlen, extoff, maxlen;
1041 uint8_t proto;
1042 int error, action;
1043
1044 hdrlen = ftag->ft_hdrlen;
1045 extoff = ftag->ft_extoff;
1046 maxlen = ftag->ft_maxlen;
1047 frag_id = ftag->ft_id;
1048 m_tag_delete(m, mtag);
1049 mtag = NULL;
1050 ftag = NULL;
1051
1052 if (extoff) {
1053 int off;
1054
1055 /* Use protocol from next field of last extension header */
1056 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1057 &off);
1058 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1059 proto = *(mtod(m, uint8_t *) + off);
1060 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1061 m = *m0;
1062 } else {
1063 hdr = mtod(m, struct ip6_hdr *);
1064 proto = hdr->ip6_nxt;
1065 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1066 }
1067
1068 /* In case of link-local traffic we'll need a scope set. */
1069 hdr = mtod(m, struct ip6_hdr *);
1070
1071 in6_setscope(&hdr->ip6_src, ifp, NULL);
1072 in6_setscope(&hdr->ip6_dst, ifp, NULL);
1073
1074 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1075 * fragmentation wrong. */
1076 maxlen = maxlen & ~7;
1077
1078 /*
1079 * Maxlen may be less than 8 if there was only a single
1080 * fragment. As it was fragmented before, add a fragment
1081 * header also for a single fragment. If total or maxlen
1082 * is less than 8, ip6_fragment() will return EMSGSIZE and
1083 * we drop the packet.
1084 */
1085 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1086 m = (*m0)->m_nextpkt;
1087 (*m0)->m_nextpkt = NULL;
1088 if (error == 0) {
1089 /* The first mbuf contains the unfragmented packet. */
1090 m_freem(*m0);
1091 *m0 = NULL;
1092 action = PF_PASS;
1093 } else {
1094 /* Drop expects an mbuf to free. */
1095 DPFPRINTF(PF_DEBUG_MISC, "refragment error %d", error);
1096 action = PF_DROP;
1097 }
1098 for (; m; m = t) {
1099 t = m->m_nextpkt;
1100 m->m_nextpkt = NULL;
1101 m->m_flags |= M_SKIP_FIREWALL;
1102 memset(&pd, 0, sizeof(pd));
1103 pd.pf_mtag = pf_find_mtag(m);
1104 if (error != 0) {
1105 m_freem(m);
1106 continue;
1107 }
1108 if (rt != NULL) {
1109 struct sockaddr_in6 dst;
1110 hdr = mtod(m, struct ip6_hdr *);
1111
1112 bzero(&dst, sizeof(dst));
1113 dst.sin6_family = AF_INET6;
1114 dst.sin6_len = sizeof(dst);
1115 dst.sin6_addr = hdr->ip6_dst;
1116
1117 if (m->m_pkthdr.len <= if_getmtu(ifp)) {
1118 nd6_output_ifp(rt, rt, m, &dst, NULL);
1119 } else {
1120 in6_ifstat_inc(ifp, ifs6_in_toobig);
1121 icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0,
1122 if_getmtu(ifp));
1123 }
1124 } else if (forward) {
1125 MPASS(m->m_pkthdr.rcvif != NULL);
1126 ip6_forward(m, 0);
1127 } else {
1128 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1129 NULL);
1130 }
1131 }
1132
1133 return (action);
1134 }
1135 #endif /* INET6 */
1136
1137 #ifdef INET
1138 int
pf_normalize_ip(u_short * reason,struct pf_pdesc * pd)1139 pf_normalize_ip(u_short *reason, struct pf_pdesc *pd)
1140 {
1141 struct pf_krule *r;
1142 struct ip *h = mtod(pd->m, struct ip *);
1143 int mff = (ntohs(h->ip_off) & IP_MF);
1144 int hlen = h->ip_hl << 2;
1145 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1146 u_int16_t max;
1147 int ip_len;
1148 int tag = -1;
1149 int verdict;
1150 bool scrub_compat;
1151
1152 PF_RULES_RASSERT();
1153
1154 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1155 /*
1156 * Check if there are any scrub rules, matching or not.
1157 * Lack of scrub rules means:
1158 * - enforced packet normalization operation just like in OpenBSD
1159 * - fragment reassembly depends on V_pf_status.reass
1160 * With scrub rules:
1161 * - packet normalization is performed if there is a matching scrub rule
1162 * - fragment reassembly is performed if the matching rule has no
1163 * PFRULE_FRAGMENT_NOREASS flag
1164 */
1165 scrub_compat = (r != NULL);
1166 while (r != NULL) {
1167 pf_counter_u64_add(&r->evaluations, 1);
1168 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1169 r = r->skip[PF_SKIP_IFP];
1170 else if (r->direction && r->direction != pd->dir)
1171 r = r->skip[PF_SKIP_DIR];
1172 else if (r->af && r->af != AF_INET)
1173 r = r->skip[PF_SKIP_AF];
1174 else if (r->proto && r->proto != h->ip_p)
1175 r = r->skip[PF_SKIP_PROTO];
1176 else if (PF_MISMATCHAW(&r->src.addr,
1177 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1178 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1179 r = r->skip[PF_SKIP_SRC_ADDR];
1180 else if (PF_MISMATCHAW(&r->dst.addr,
1181 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1182 r->dst.neg, NULL, M_GETFIB(pd->m)))
1183 r = r->skip[PF_SKIP_DST_ADDR];
1184 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag,
1185 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1186 r = TAILQ_NEXT(r, entries);
1187 else
1188 break;
1189 }
1190
1191 if (scrub_compat) {
1192 /* With scrub rules present IPv4 normalization happens only
1193 * if one of rules has matched and it's not a "no scrub" rule */
1194 if (r == NULL || r->action == PF_NOSCRUB)
1195 return (PF_PASS);
1196
1197 pf_counter_u64_critical_enter();
1198 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1199 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1200 pf_counter_u64_critical_exit();
1201 pf_rule_to_actions(r, &pd->act);
1202 }
1203
1204 /* Check for illegal packets */
1205 if (hlen < (int)sizeof(struct ip)) {
1206 REASON_SET(reason, PFRES_NORM);
1207 goto drop;
1208 }
1209
1210 if (hlen > ntohs(h->ip_len)) {
1211 REASON_SET(reason, PFRES_NORM);
1212 goto drop;
1213 }
1214
1215 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1216 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1217 (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1218 (h->ip_off & htons(IP_DF))
1219 ) {
1220 u_int16_t ip_off = h->ip_off;
1221
1222 h->ip_off &= htons(~IP_DF);
1223 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1224 }
1225
1226 /* We will need other tests here */
1227 if (!fragoff && !mff)
1228 goto no_fragment;
1229
1230 /* We're dealing with a fragment now. Don't allow fragments
1231 * with IP_DF to enter the cache. If the flag was cleared by
1232 * no-df above, fine. Otherwise drop it.
1233 */
1234 if (h->ip_off & htons(IP_DF)) {
1235 DPFPRINTF(PF_DEBUG_MISC, "IP_DF");
1236 goto bad;
1237 }
1238
1239 ip_len = ntohs(h->ip_len) - hlen;
1240
1241 /* All fragments are 8 byte aligned */
1242 if (mff && (ip_len & 0x7)) {
1243 DPFPRINTF(PF_DEBUG_MISC, "mff and %d", ip_len);
1244 goto bad;
1245 }
1246
1247 /* Respect maximum length */
1248 if (fragoff + ip_len > IP_MAXPACKET) {
1249 DPFPRINTF(PF_DEBUG_MISC, "max packet %d", fragoff + ip_len);
1250 goto bad;
1251 }
1252
1253 if ((!scrub_compat && V_pf_status.reass) ||
1254 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1255 ) {
1256 max = fragoff + ip_len;
1257
1258 /* Fully buffer all of the fragments
1259 * Might return a completely reassembled mbuf, or NULL */
1260 PF_FRAG_LOCK();
1261 DPFPRINTF(PF_DEBUG_MISC, "reass frag %d @ %d-%d",
1262 h->ip_id, fragoff, max);
1263 verdict = pf_reassemble(&pd->m, reason);
1264 PF_FRAG_UNLOCK();
1265
1266 if (verdict != PF_PASS)
1267 return (PF_DROP);
1268
1269 if (pd->m == NULL)
1270 return (PF_DROP);
1271
1272 h = mtod(pd->m, struct ip *);
1273 pd->tot_len = htons(h->ip_len);
1274
1275 no_fragment:
1276 /* At this point, only IP_DF is allowed in ip_off */
1277 if (h->ip_off & ~htons(IP_DF)) {
1278 u_int16_t ip_off = h->ip_off;
1279
1280 h->ip_off &= htons(IP_DF);
1281 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1282 }
1283 }
1284
1285 return (PF_PASS);
1286
1287 bad:
1288 DPFPRINTF(PF_DEBUG_MISC, "dropping bad fragment");
1289 REASON_SET(reason, PFRES_FRAG);
1290 drop:
1291 if (r != NULL && r->log)
1292 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1, NULL);
1293
1294 return (PF_DROP);
1295 }
1296 #endif
1297
1298 #ifdef INET6
1299 int
pf_normalize_ip6(int off,u_short * reason,struct pf_pdesc * pd)1300 pf_normalize_ip6(int off, u_short *reason,
1301 struct pf_pdesc *pd)
1302 {
1303 struct pf_krule *r;
1304 struct ip6_hdr *h;
1305 struct ip6_frag frag;
1306 bool scrub_compat;
1307
1308 PF_RULES_RASSERT();
1309
1310 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1311 /*
1312 * Check if there are any scrub rules, matching or not.
1313 * Lack of scrub rules means:
1314 * - enforced packet normalization operation just like in OpenBSD
1315 * With scrub rules:
1316 * - packet normalization is performed if there is a matching scrub rule
1317 * XXX: Fragment reassembly always performed for IPv6!
1318 */
1319 scrub_compat = (r != NULL);
1320 while (r != NULL) {
1321 pf_counter_u64_add(&r->evaluations, 1);
1322 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1323 r = r->skip[PF_SKIP_IFP];
1324 else if (r->direction && r->direction != pd->dir)
1325 r = r->skip[PF_SKIP_DIR];
1326 else if (r->af && r->af != AF_INET6)
1327 r = r->skip[PF_SKIP_AF];
1328 else if (r->proto && r->proto != pd->proto)
1329 r = r->skip[PF_SKIP_PROTO];
1330 else if (PF_MISMATCHAW(&r->src.addr,
1331 (struct pf_addr *)&pd->src, AF_INET6,
1332 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1333 r = r->skip[PF_SKIP_SRC_ADDR];
1334 else if (PF_MISMATCHAW(&r->dst.addr,
1335 (struct pf_addr *)&pd->dst, AF_INET6,
1336 r->dst.neg, NULL, M_GETFIB(pd->m)))
1337 r = r->skip[PF_SKIP_DST_ADDR];
1338 else
1339 break;
1340 }
1341
1342 if (scrub_compat) {
1343 /* With scrub rules present IPv6 normalization happens only
1344 * if one of rules has matched and it's not a "no scrub" rule */
1345 if (r == NULL || r->action == PF_NOSCRUB)
1346 return (PF_PASS);
1347
1348 pf_counter_u64_critical_enter();
1349 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1350 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1351 pf_counter_u64_critical_exit();
1352 pf_rule_to_actions(r, &pd->act);
1353 }
1354
1355 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
1356 return (PF_DROP);
1357
1358 /* Offset now points to data portion. */
1359 off += sizeof(frag);
1360
1361 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) {
1362 /* Returns PF_DROP or *m0 is NULL or completely reassembled
1363 * mbuf. */
1364 if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS)
1365 return (PF_DROP);
1366 if (pd->m == NULL)
1367 return (PF_DROP);
1368 h = mtod(pd->m, struct ip6_hdr *);
1369 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
1370 }
1371
1372 return (PF_PASS);
1373 }
1374 #endif /* INET6 */
1375
1376 int
pf_normalize_tcp(struct pf_pdesc * pd)1377 pf_normalize_tcp(struct pf_pdesc *pd)
1378 {
1379 struct pf_krule *r, *rm = NULL;
1380 struct tcphdr *th = &pd->hdr.tcp;
1381 int rewrite = 0;
1382 u_short reason;
1383 u_int16_t flags;
1384 sa_family_t af = pd->af;
1385 int srs;
1386
1387 PF_RULES_RASSERT();
1388
1389 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1390 /* Check if there any scrub rules. Lack of scrub rules means enforced
1391 * packet normalization operation just like in OpenBSD. */
1392 srs = (r != NULL);
1393 while (r != NULL) {
1394 pf_counter_u64_add(&r->evaluations, 1);
1395 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1396 r = r->skip[PF_SKIP_IFP];
1397 else if (r->direction && r->direction != pd->dir)
1398 r = r->skip[PF_SKIP_DIR];
1399 else if (r->af && r->af != af)
1400 r = r->skip[PF_SKIP_AF];
1401 else if (r->proto && r->proto != pd->proto)
1402 r = r->skip[PF_SKIP_PROTO];
1403 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1404 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1405 r = r->skip[PF_SKIP_SRC_ADDR];
1406 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1407 r->src.port[0], r->src.port[1], th->th_sport))
1408 r = r->skip[PF_SKIP_SRC_PORT];
1409 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1410 r->dst.neg, NULL, M_GETFIB(pd->m)))
1411 r = r->skip[PF_SKIP_DST_ADDR];
1412 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1413 r->dst.port[0], r->dst.port[1], th->th_dport))
1414 r = r->skip[PF_SKIP_DST_PORT];
1415 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1416 pf_osfp_fingerprint(pd, th),
1417 r->os_fingerprint))
1418 r = TAILQ_NEXT(r, entries);
1419 else {
1420 rm = r;
1421 break;
1422 }
1423 }
1424
1425 if (srs) {
1426 /* With scrub rules present TCP normalization happens only
1427 * if one of rules has matched and it's not a "no scrub" rule */
1428 if (rm == NULL || rm->action == PF_NOSCRUB)
1429 return (PF_PASS);
1430
1431 pf_counter_u64_critical_enter();
1432 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1433 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1434 pf_counter_u64_critical_exit();
1435 pf_rule_to_actions(rm, &pd->act);
1436 }
1437
1438 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1439 pd->flags |= PFDESC_TCP_NORM;
1440
1441 flags = tcp_get_flags(th);
1442 if (flags & TH_SYN) {
1443 /* Illegal packet */
1444 if (flags & TH_RST)
1445 goto tcp_drop;
1446
1447 if (flags & TH_FIN)
1448 goto tcp_drop;
1449 } else {
1450 /* Illegal packet */
1451 if (!(flags & (TH_ACK|TH_RST)))
1452 goto tcp_drop;
1453 }
1454
1455 if (!(flags & TH_ACK)) {
1456 /* These flags are only valid if ACK is set */
1457 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1458 goto tcp_drop;
1459 }
1460
1461 /* Check for illegal header length */
1462 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1463 goto tcp_drop;
1464
1465 /* If flags changed, or reserved data set, then adjust */
1466 if (flags != tcp_get_flags(th) ||
1467 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1468 u_int16_t ov, nv;
1469
1470 ov = *(u_int16_t *)(&th->th_ack + 1);
1471 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1472 tcp_set_flags(th, flags);
1473 nv = *(u_int16_t *)(&th->th_ack + 1);
1474
1475 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0);
1476 rewrite = 1;
1477 }
1478
1479 /* Remove urgent pointer, if TH_URG is not set */
1480 if (!(flags & TH_URG) && th->th_urp) {
1481 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp,
1482 0, 0);
1483 th->th_urp = 0;
1484 rewrite = 1;
1485 }
1486
1487 /* copy back packet headers if we sanitized */
1488 if (rewrite)
1489 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1490
1491 return (PF_PASS);
1492
1493 tcp_drop:
1494 REASON_SET(&reason, PFRES_NORM);
1495 if (rm != NULL && r->log)
1496 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL);
1497 return (PF_DROP);
1498 }
1499
1500 int
pf_normalize_tcp_init(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src)1501 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
1502 struct pf_state_peer *src)
1503 {
1504 u_int32_t tsval, tsecr;
1505 int olen;
1506 uint8_t opts[MAX_TCPOPTLEN], *opt;
1507
1508 KASSERT((src->scrub == NULL),
1509 ("pf_normalize_tcp_init: src->scrub != NULL"));
1510
1511 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1512 if (src->scrub == NULL)
1513 return (1);
1514
1515 switch (pd->af) {
1516 #ifdef INET
1517 case AF_INET: {
1518 struct ip *h = mtod(pd->m, struct ip *);
1519 src->scrub->pfss_ttl = h->ip_ttl;
1520 break;
1521 }
1522 #endif /* INET */
1523 #ifdef INET6
1524 case AF_INET6: {
1525 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1526 src->scrub->pfss_ttl = h->ip6_hlim;
1527 break;
1528 }
1529 #endif /* INET6 */
1530 default:
1531 unhandled_af(pd->af);
1532 }
1533
1534 /*
1535 * All normalizations below are only begun if we see the start of
1536 * the connections. They must all set an enabled bit in pfss_flags
1537 */
1538 if ((tcp_get_flags(th) & TH_SYN) == 0)
1539 return (0);
1540
1541 olen = (th->th_off << 2) - sizeof(*th);
1542 if (olen < TCPOLEN_TIMESTAMP || !pf_pull_hdr(pd->m,
1543 pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af))
1544 return (0);
1545
1546 opt = opts;
1547 while ((opt = pf_find_tcpopt(opt, opts, olen,
1548 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1549 src->scrub->pfss_flags |= PFSS_TIMESTAMP;
1550 src->scrub->pfss_ts_mod = arc4random();
1551 /* note PFSS_PAWS not set yet */
1552 memcpy(&tsval, &opt[2], sizeof(u_int32_t));
1553 memcpy(&tsecr, &opt[6], sizeof(u_int32_t));
1554 src->scrub->pfss_tsval0 = ntohl(tsval);
1555 src->scrub->pfss_tsval = ntohl(tsval);
1556 src->scrub->pfss_tsecr = ntohl(tsecr);
1557 getmicrouptime(&src->scrub->pfss_last);
1558
1559 opt += opt[1];
1560 }
1561
1562 return (0);
1563 }
1564
1565 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1566 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1567 {
1568 /* XXX Note: this also cleans up SCTP. */
1569 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1570 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1571
1572 /* Someday... flush the TCP segment reassembly descriptors. */
1573 }
1574 int
pf_normalize_sctp_init(struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1575 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src,
1576 struct pf_state_peer *dst)
1577 {
1578 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1579 if (src->scrub == NULL)
1580 return (1);
1581
1582 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1583 if (dst->scrub == NULL) {
1584 uma_zfree(V_pf_state_scrub_z, src);
1585 return (1);
1586 }
1587
1588 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1589
1590 return (0);
1591 }
1592
1593 int
pf_normalize_tcp_stateful(struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1594 pf_normalize_tcp_stateful(struct pf_pdesc *pd,
1595 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1596 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1597 {
1598 struct timeval uptime;
1599 u_int tsval_from_last;
1600 uint32_t tsval, tsecr;
1601 int copyback = 0;
1602 int got_ts = 0;
1603 int olen;
1604 uint8_t opts[MAX_TCPOPTLEN], *opt;
1605
1606 KASSERT((src->scrub || dst->scrub),
1607 ("%s: src->scrub && dst->scrub!", __func__));
1608
1609 /*
1610 * Enforce the minimum TTL seen for this connection. Negate a common
1611 * technique to evade an intrusion detection system and confuse
1612 * firewall state code.
1613 */
1614 switch (pd->af) {
1615 #ifdef INET
1616 case AF_INET: {
1617 if (src->scrub) {
1618 struct ip *h = mtod(pd->m, struct ip *);
1619 if (h->ip_ttl > src->scrub->pfss_ttl)
1620 src->scrub->pfss_ttl = h->ip_ttl;
1621 h->ip_ttl = src->scrub->pfss_ttl;
1622 }
1623 break;
1624 }
1625 #endif /* INET */
1626 #ifdef INET6
1627 case AF_INET6: {
1628 if (src->scrub) {
1629 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1630 if (h->ip6_hlim > src->scrub->pfss_ttl)
1631 src->scrub->pfss_ttl = h->ip6_hlim;
1632 h->ip6_hlim = src->scrub->pfss_ttl;
1633 }
1634 break;
1635 }
1636 #endif /* INET6 */
1637 default:
1638 unhandled_af(pd->af);
1639 }
1640
1641 olen = (th->th_off << 2) - sizeof(*th);
1642
1643 if (olen >= TCPOLEN_TIMESTAMP &&
1644 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1645 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1646 pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af)) {
1647 /* Modulate the timestamps. Can be used for NAT detection, OS
1648 * uptime determination or reboot detection.
1649 */
1650 opt = opts;
1651 while ((opt = pf_find_tcpopt(opt, opts, olen,
1652 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1653 uint8_t *ts = opt + 2;
1654 uint8_t *tsr = opt + 6;
1655
1656 if (got_ts) {
1657 /* Huh? Multiple timestamps!? */
1658 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1659 printf("pf: %s: multiple TS??", __func__);
1660 pf_print_state(state);
1661 printf("\n");
1662 }
1663 REASON_SET(reason, PFRES_TS);
1664 return (PF_DROP);
1665 }
1666
1667 memcpy(&tsval, ts, sizeof(u_int32_t));
1668 memcpy(&tsecr, tsr, sizeof(u_int32_t));
1669
1670 /* modulate TS */
1671 if (tsval && src->scrub &&
1672 (src->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1673 /* tsval used further on */
1674 tsval = ntohl(tsval);
1675 pf_patch_32(pd,
1676 ts, htonl(tsval + src->scrub->pfss_ts_mod),
1677 PF_ALGNMNT(ts - opts));
1678 copyback = 1;
1679 }
1680
1681 /* modulate TS reply if any (!0) */
1682 if (tsecr && dst->scrub &&
1683 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1684 /* tsecr used further on */
1685 tsecr = ntohl(tsecr) - dst->scrub->pfss_ts_mod;
1686 pf_patch_32(pd, tsr, htonl(tsecr),
1687 PF_ALGNMNT(tsr - opts));
1688 copyback = 1;
1689 }
1690
1691 got_ts = 1;
1692 opt += opt[1];
1693 }
1694
1695 if (copyback) {
1696 /* Copyback the options, caller copys back header */
1697 *writeback = 1;
1698 m_copyback(pd->m, pd->off + sizeof(*th), olen, opts);
1699 }
1700 }
1701
1702 /*
1703 * Must invalidate PAWS checks on connections idle for too long.
1704 * The fastest allowed timestamp clock is 1ms. That turns out to
1705 * be about 24 days before it wraps. XXX Right now our lowerbound
1706 * TS echo check only works for the first 12 days of a connection
1707 * when the TS has exhausted half its 32bit space
1708 */
1709 #define TS_MAX_IDLE (24*24*60*60)
1710 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1711
1712 getmicrouptime(&uptime);
1713 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1714 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1715 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) {
1716 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1717 DPFPRINTF(PF_DEBUG_MISC, "src idled out of PAWS");
1718 pf_print_state(state);
1719 printf("\n");
1720 }
1721 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1722 | PFSS_PAWS_IDLED;
1723 }
1724 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1725 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1726 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1727 DPFPRINTF(PF_DEBUG_MISC, "dst idled out of PAWS");
1728 pf_print_state(state);
1729 printf("\n");
1730 }
1731 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1732 | PFSS_PAWS_IDLED;
1733 }
1734
1735 if (got_ts && src->scrub && dst->scrub &&
1736 (src->scrub->pfss_flags & PFSS_PAWS) &&
1737 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1738 /* Validate that the timestamps are "in-window".
1739 * RFC1323 describes TCP Timestamp options that allow
1740 * measurement of RTT (round trip time) and PAWS
1741 * (protection against wrapped sequence numbers). PAWS
1742 * gives us a set of rules for rejecting packets on
1743 * long fat pipes (packets that were somehow delayed
1744 * in transit longer than the time it took to send the
1745 * full TCP sequence space of 4Gb). We can use these
1746 * rules and infer a few others that will let us treat
1747 * the 32bit timestamp and the 32bit echoed timestamp
1748 * as sequence numbers to prevent a blind attacker from
1749 * inserting packets into a connection.
1750 *
1751 * RFC1323 tells us:
1752 * - The timestamp on this packet must be greater than
1753 * or equal to the last value echoed by the other
1754 * endpoint. The RFC says those will be discarded
1755 * since it is a dup that has already been acked.
1756 * This gives us a lowerbound on the timestamp.
1757 * timestamp >= other last echoed timestamp
1758 * - The timestamp will be less than or equal to
1759 * the last timestamp plus the time between the
1760 * last packet and now. The RFC defines the max
1761 * clock rate as 1ms. We will allow clocks to be
1762 * up to 10% fast and will allow a total difference
1763 * or 30 seconds due to a route change. And this
1764 * gives us an upperbound on the timestamp.
1765 * timestamp <= last timestamp + max ticks
1766 * We have to be careful here. Windows will send an
1767 * initial timestamp of zero and then initialize it
1768 * to a random value after the 3whs; presumably to
1769 * avoid a DoS by having to call an expensive RNG
1770 * during a SYN flood. Proof MS has at least one
1771 * good security geek.
1772 *
1773 * - The TCP timestamp option must also echo the other
1774 * endpoints timestamp. The timestamp echoed is the
1775 * one carried on the earliest unacknowledged segment
1776 * on the left edge of the sequence window. The RFC
1777 * states that the host will reject any echoed
1778 * timestamps that were larger than any ever sent.
1779 * This gives us an upperbound on the TS echo.
1780 * tescr <= largest_tsval
1781 * - The lowerbound on the TS echo is a little more
1782 * tricky to determine. The other endpoint's echoed
1783 * values will not decrease. But there may be
1784 * network conditions that re-order packets and
1785 * cause our view of them to decrease. For now the
1786 * only lowerbound we can safely determine is that
1787 * the TS echo will never be less than the original
1788 * TS. XXX There is probably a better lowerbound.
1789 * Remove TS_MAX_CONN with better lowerbound check.
1790 * tescr >= other original TS
1791 *
1792 * It is also important to note that the fastest
1793 * timestamp clock of 1ms will wrap its 32bit space in
1794 * 24 days. So we just disable TS checking after 24
1795 * days of idle time. We actually must use a 12d
1796 * connection limit until we can come up with a better
1797 * lowerbound to the TS echo check.
1798 */
1799 struct timeval delta_ts;
1800 int ts_fudge;
1801
1802 /*
1803 * PFTM_TS_DIFF is how many seconds of leeway to allow
1804 * a host's timestamp. This can happen if the previous
1805 * packet got delayed in transit for much longer than
1806 * this packet.
1807 */
1808 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0)
1809 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1810
1811 /* Calculate max ticks since the last timestamp */
1812 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1813 #define TS_MICROSECS 1000000 /* microseconds per second */
1814 delta_ts = uptime;
1815 timevalsub(&delta_ts, &src->scrub->pfss_last);
1816 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1817 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1818
1819 if ((src->state >= TCPS_ESTABLISHED &&
1820 dst->state >= TCPS_ESTABLISHED) &&
1821 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1822 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1823 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1824 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1825 /* Bad RFC1323 implementation or an insertion attack.
1826 *
1827 * - Solaris 2.6 and 2.7 are known to send another ACK
1828 * after the FIN,FIN|ACK,ACK closing that carries
1829 * an old timestamp.
1830 */
1831
1832 DPFPRINTF(PF_DEBUG_MISC, "Timestamp failed %c%c%c%c",
1833 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1834 SEQ_GT(tsval, src->scrub->pfss_tsval +
1835 tsval_from_last) ? '1' : ' ',
1836 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1837 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ');
1838 DPFPRINTF(PF_DEBUG_MISC, " tsval: %u tsecr: %u +ticks: "
1839 "%u idle: %jus %lums",
1840 tsval, tsecr, tsval_from_last,
1841 (uintmax_t)delta_ts.tv_sec,
1842 delta_ts.tv_usec / 1000);
1843 DPFPRINTF(PF_DEBUG_MISC, " src->tsval: %u tsecr: %u",
1844 src->scrub->pfss_tsval, src->scrub->pfss_tsecr);
1845 DPFPRINTF(PF_DEBUG_MISC, " dst->tsval: %u tsecr: %u "
1846 "tsval0: %u", dst->scrub->pfss_tsval,
1847 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0);
1848 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1849 pf_print_state(state);
1850 pf_print_flags(tcp_get_flags(th));
1851 printf("\n");
1852 }
1853 REASON_SET(reason, PFRES_TS);
1854 return (PF_DROP);
1855 }
1856
1857 /* XXX I'd really like to require tsecr but it's optional */
1858
1859 } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 &&
1860 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1861 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) &&
1862 src->scrub && dst->scrub &&
1863 (src->scrub->pfss_flags & PFSS_PAWS) &&
1864 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1865 /* Didn't send a timestamp. Timestamps aren't really useful
1866 * when:
1867 * - connection opening or closing (often not even sent).
1868 * but we must not let an attacker to put a FIN on a
1869 * data packet to sneak it through our ESTABLISHED check.
1870 * - on a TCP reset. RFC suggests not even looking at TS.
1871 * - on an empty ACK. The TS will not be echoed so it will
1872 * probably not help keep the RTT calculation in sync and
1873 * there isn't as much danger when the sequence numbers
1874 * got wrapped. So some stacks don't include TS on empty
1875 * ACKs :-(
1876 *
1877 * To minimize the disruption to mostly RFC1323 conformant
1878 * stacks, we will only require timestamps on data packets.
1879 *
1880 * And what do ya know, we cannot require timestamps on data
1881 * packets. There appear to be devices that do legitimate
1882 * TCP connection hijacking. There are HTTP devices that allow
1883 * a 3whs (with timestamps) and then buffer the HTTP request.
1884 * If the intermediate device has the HTTP response cache, it
1885 * will spoof the response but not bother timestamping its
1886 * packets. So we can look for the presence of a timestamp in
1887 * the first data packet and if there, require it in all future
1888 * packets.
1889 */
1890
1891 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1892 /*
1893 * Hey! Someone tried to sneak a packet in. Or the
1894 * stack changed its RFC1323 behavior?!?!
1895 */
1896 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1897 DPFPRINTF(PF_DEBUG_MISC, "Did not receive expected "
1898 "RFC1323 timestamp");
1899 pf_print_state(state);
1900 pf_print_flags(tcp_get_flags(th));
1901 printf("\n");
1902 }
1903 REASON_SET(reason, PFRES_TS);
1904 return (PF_DROP);
1905 }
1906 }
1907
1908 /*
1909 * We will note if a host sends his data packets with or without
1910 * timestamps. And require all data packets to contain a timestamp
1911 * if the first does. PAWS implicitly requires that all data packets be
1912 * timestamped. But I think there are middle-man devices that hijack
1913 * TCP streams immediately after the 3whs and don't timestamp their
1914 * packets (seen in a WWW accelerator or cache).
1915 */
1916 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1917 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1918 if (got_ts)
1919 src->scrub->pfss_flags |= PFSS_DATA_TS;
1920 else {
1921 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1922 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1923 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1924 /* Don't warn if other host rejected RFC1323 */
1925 DPFPRINTF(PF_DEBUG_MISC, "Broken RFC1323 stack did "
1926 "not timestamp data packet. Disabled PAWS "
1927 "security.");
1928 pf_print_state(state);
1929 pf_print_flags(tcp_get_flags(th));
1930 printf("\n");
1931 }
1932 }
1933 }
1934
1935 /*
1936 * Update PAWS values
1937 */
1938 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1939 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1940 getmicrouptime(&src->scrub->pfss_last);
1941 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1942 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1943 src->scrub->pfss_tsval = tsval;
1944
1945 if (tsecr) {
1946 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1947 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1948 src->scrub->pfss_tsecr = tsecr;
1949
1950 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1951 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1952 src->scrub->pfss_tsval0 == 0)) {
1953 /* tsval0 MUST be the lowest timestamp */
1954 src->scrub->pfss_tsval0 = tsval;
1955 }
1956
1957 /* Only fully initialized after a TS gets echoed */
1958 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1959 src->scrub->pfss_flags |= PFSS_PAWS;
1960 }
1961 }
1962
1963 /* I have a dream.... TCP segment reassembly.... */
1964 return (0);
1965 }
1966
1967 int
pf_normalize_mss(struct pf_pdesc * pd)1968 pf_normalize_mss(struct pf_pdesc *pd)
1969 {
1970 int olen, optsoff;
1971 uint8_t opts[MAX_TCPOPTLEN], *opt;
1972
1973 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
1974 optsoff = pd->off + sizeof(struct tcphdr);
1975 if (olen < TCPOLEN_MAXSEG ||
1976 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
1977 return (0);
1978
1979 opt = opts;
1980 while ((opt = pf_find_tcpopt(opt, opts, olen,
1981 TCPOPT_MAXSEG, TCPOLEN_MAXSEG)) != NULL) {
1982 uint16_t mss;
1983 uint8_t *mssp = opt + 2;
1984 memcpy(&mss, mssp, sizeof(mss));
1985 if (ntohs(mss) > pd->act.max_mss) {
1986 size_t mssoffopts = mssp - opts;
1987 pf_patch_16(pd, &mss,
1988 htons(pd->act.max_mss), PF_ALGNMNT(mssoffopts));
1989 m_copyback(pd->m, optsoff + mssoffopts,
1990 sizeof(mss), (caddr_t)&mss);
1991 m_copyback(pd->m, pd->off,
1992 sizeof(struct tcphdr), (caddr_t)&pd->hdr.tcp);
1993 }
1994
1995 opt += opt[1];
1996 }
1997
1998 return (0);
1999 }
2000
2001 int
pf_scan_sctp(struct pf_pdesc * pd)2002 pf_scan_sctp(struct pf_pdesc *pd)
2003 {
2004 struct sctp_chunkhdr ch = { };
2005 int chunk_off = sizeof(struct sctphdr);
2006 int chunk_start;
2007 int ret;
2008
2009 while (pd->off + chunk_off < pd->tot_len) {
2010 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
2011 NULL, pd->af))
2012 return (PF_DROP);
2013
2014 /* Length includes the header, this must be at least 4. */
2015 if (ntohs(ch.chunk_length) < 4)
2016 return (PF_DROP);
2017
2018 chunk_start = chunk_off;
2019 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2020
2021 switch (ch.chunk_type) {
2022 case SCTP_INITIATION:
2023 case SCTP_INITIATION_ACK: {
2024 struct sctp_init_chunk init;
2025
2026 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
2027 sizeof(init), NULL, NULL, pd->af))
2028 return (PF_DROP);
2029
2030 /*
2031 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2032 * any value except 0."
2033 */
2034 if (init.init.initiate_tag == 0)
2035 return (PF_DROP);
2036 if (init.init.num_inbound_streams == 0)
2037 return (PF_DROP);
2038 if (init.init.num_outbound_streams == 0)
2039 return (PF_DROP);
2040 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2041 return (PF_DROP);
2042
2043 /*
2044 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2045 * verification tag.
2046 */
2047 if (ch.chunk_type == SCTP_INITIATION &&
2048 pd->hdr.sctp.v_tag != 0)
2049 return (PF_DROP);
2050
2051 pd->sctp_initiate_tag = init.init.initiate_tag;
2052
2053 if (ch.chunk_type == SCTP_INITIATION)
2054 pd->sctp_flags |= PFDESC_SCTP_INIT;
2055 else
2056 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2057
2058 ret = pf_multihome_scan_init(pd->off + chunk_start,
2059 ntohs(init.ch.chunk_length), pd);
2060 if (ret != PF_PASS)
2061 return (ret);
2062
2063 break;
2064 }
2065 case SCTP_ABORT_ASSOCIATION:
2066 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2067 break;
2068 case SCTP_SHUTDOWN:
2069 case SCTP_SHUTDOWN_ACK:
2070 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2071 break;
2072 case SCTP_SHUTDOWN_COMPLETE:
2073 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2074 break;
2075 case SCTP_COOKIE_ECHO:
2076 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2077 break;
2078 case SCTP_COOKIE_ACK:
2079 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2080 break;
2081 case SCTP_DATA:
2082 pd->sctp_flags |= PFDESC_SCTP_DATA;
2083 break;
2084 case SCTP_HEARTBEAT_REQUEST:
2085 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2086 break;
2087 case SCTP_HEARTBEAT_ACK:
2088 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2089 break;
2090 case SCTP_ASCONF:
2091 pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2092
2093 ret = pf_multihome_scan_asconf(pd->off + chunk_start,
2094 ntohs(ch.chunk_length), pd);
2095 if (ret != PF_PASS)
2096 return (ret);
2097 break;
2098 default:
2099 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2100 break;
2101 }
2102 }
2103
2104 /* Validate chunk lengths vs. packet length. */
2105 if (pd->off + chunk_off != pd->tot_len)
2106 return (PF_DROP);
2107
2108 /*
2109 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2110 * one in a packet.
2111 */
2112 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2113 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2114 return (PF_DROP);
2115 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2116 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2117 return (PF_DROP);
2118 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2119 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2120 return (PF_DROP);
2121 if ((pd->sctp_flags & PFDESC_SCTP_ABORT) &&
2122 (pd->sctp_flags & PFDESC_SCTP_DATA)) {
2123 /*
2124 * RFC4960 3.3.7: DATA chunks MUST NOT be
2125 * bundled with ABORT.
2126 */
2127 return (PF_DROP);
2128 }
2129
2130 return (PF_PASS);
2131 }
2132
2133 int
pf_normalize_sctp(struct pf_pdesc * pd)2134 pf_normalize_sctp(struct pf_pdesc *pd)
2135 {
2136 struct pf_krule *r, *rm = NULL;
2137 struct sctphdr *sh = &pd->hdr.sctp;
2138 u_short reason;
2139 sa_family_t af = pd->af;
2140 int srs;
2141
2142 PF_RULES_RASSERT();
2143
2144 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2145 /* Check if there any scrub rules. Lack of scrub rules means enforced
2146 * packet normalization operation just like in OpenBSD. */
2147 srs = (r != NULL);
2148 while (r != NULL) {
2149 pf_counter_u64_add(&r->evaluations, 1);
2150 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
2151 r = r->skip[PF_SKIP_IFP];
2152 else if (r->direction && r->direction != pd->dir)
2153 r = r->skip[PF_SKIP_DIR];
2154 else if (r->af && r->af != af)
2155 r = r->skip[PF_SKIP_AF];
2156 else if (r->proto && r->proto != pd->proto)
2157 r = r->skip[PF_SKIP_PROTO];
2158 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2159 r->src.neg, pd->kif, M_GETFIB(pd->m)))
2160 r = r->skip[PF_SKIP_SRC_ADDR];
2161 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2162 r->src.port[0], r->src.port[1], sh->src_port))
2163 r = r->skip[PF_SKIP_SRC_PORT];
2164 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2165 r->dst.neg, NULL, M_GETFIB(pd->m)))
2166 r = r->skip[PF_SKIP_DST_ADDR];
2167 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2168 r->dst.port[0], r->dst.port[1], sh->dest_port))
2169 r = r->skip[PF_SKIP_DST_PORT];
2170 else {
2171 rm = r;
2172 break;
2173 }
2174 }
2175
2176 if (srs) {
2177 /* With scrub rules present SCTP normalization happens only
2178 * if one of rules has matched and it's not a "no scrub" rule */
2179 if (rm == NULL || rm->action == PF_NOSCRUB)
2180 return (PF_PASS);
2181
2182 pf_counter_u64_critical_enter();
2183 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
2184 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
2185 pf_counter_u64_critical_exit();
2186 }
2187
2188 /* Verify we're a multiple of 4 bytes long */
2189 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4)
2190 goto sctp_drop;
2191
2192 /* INIT chunk needs to be the only chunk */
2193 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2194 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2195 goto sctp_drop;
2196
2197 return (PF_PASS);
2198
2199 sctp_drop:
2200 REASON_SET(&reason, PFRES_NORM);
2201 if (rm != NULL && r->log)
2202 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd,
2203 1, NULL);
2204
2205 return (PF_DROP);
2206 }
2207
2208 #if defined(INET) || defined(INET6)
2209 void
pf_scrub(struct pf_pdesc * pd)2210 pf_scrub(struct pf_pdesc *pd)
2211 {
2212
2213 struct ip *h = mtod(pd->m, struct ip *);
2214 #ifdef INET6
2215 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *);
2216 #endif /* INET6 */
2217
2218 /* Clear IP_DF if no-df was requested */
2219 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF &&
2220 h->ip_off & htons(IP_DF))
2221 {
2222 u_int16_t ip_off = h->ip_off;
2223
2224 h->ip_off &= htons(~IP_DF);
2225 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2226 }
2227
2228 /* Enforce a minimum ttl, may cause endless packet loops */
2229 if (pd->af == AF_INET && pd->act.min_ttl &&
2230 h->ip_ttl < pd->act.min_ttl) {
2231 u_int16_t ip_ttl = h->ip_ttl;
2232
2233 h->ip_ttl = pd->act.min_ttl;
2234 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2235 }
2236 #ifdef INET6
2237 /* Enforce a minimum ttl, may cause endless packet loops */
2238 if (pd->af == AF_INET6 && pd->act.min_ttl &&
2239 h6->ip6_hlim < pd->act.min_ttl)
2240 h6->ip6_hlim = pd->act.min_ttl;
2241 #endif /* INET6 */
2242 /* Enforce tos */
2243 if (pd->act.flags & PFSTATE_SETTOS) {
2244 switch (pd->af) {
2245 case AF_INET: {
2246 u_int16_t ov, nv;
2247
2248 ov = *(u_int16_t *)h;
2249 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2250 nv = *(u_int16_t *)h;
2251
2252 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2253 break;
2254 }
2255 #ifdef INET6
2256 case AF_INET6:
2257 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2258 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20);
2259 break;
2260 #endif /* INET6 */
2261 }
2262 }
2263
2264 /* random-id, but not for fragments */
2265 #ifdef INET
2266 if (pd->af == AF_INET &&
2267 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2268 uint16_t ip_id = h->ip_id;
2269
2270 ip_fillid(h, V_ip_random_id);
2271 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2272 }
2273 #endif /* INET */
2274 }
2275 #endif /* INET || INET6 */
2276