1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/in6_var.h>
55 #include <netinet6/nd6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet6/scope6_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_fsm.h>
60 #include <netinet/tcp_seq.h>
61 #include <netinet/sctp_constants.h>
62 #include <netinet/sctp_header.h>
63
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67
68 struct pf_frent {
69 TAILQ_ENTRY(pf_frent) fr_next;
70 struct mbuf *fe_m;
71 uint16_t fe_hdrlen; /* ipv4 header length with ip options
72 ipv6, extension, fragment header */
73 uint16_t fe_extoff; /* last extension header offset or 0 */
74 uint16_t fe_len; /* fragment length */
75 uint16_t fe_off; /* fragment offset */
76 uint16_t fe_mff; /* more fragment flag */
77 };
78
79 RB_HEAD(pf_frag_tree, pf_fragment);
80 struct pf_frnode {
81 struct pf_addr fn_src; /* ip source address */
82 struct pf_addr fn_dst; /* ip destination address */
83 sa_family_t fn_af; /* address family */
84 u_int8_t fn_proto; /* protocol for fragments in fn_tree */
85 u_int32_t fn_fragments; /* number of entries in fn_tree */
86
87 RB_ENTRY(pf_frnode) fn_entry;
88 struct pf_frag_tree fn_tree; /* matching fragments, lookup by id */
89 };
90
91 struct pf_fragment {
92 uint32_t fr_id; /* fragment id for reassemble */
93
94 /* pointers to queue element */
95 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
96 /* count entries between pointers */
97 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
98 RB_ENTRY(pf_fragment) fr_entry;
99 TAILQ_ENTRY(pf_fragment) frag_next;
100 uint32_t fr_timeout;
101 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
102 uint16_t fr_maxlen; /* maximum length of single fragment */
103 u_int16_t fr_holes; /* number of holes in the queue */
104 struct pf_frnode *fr_node; /* ip src/dst/proto/af for fragments */
105 };
106
107 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
108 #define V_pf_frag_mtx VNET(pf_frag_mtx)
109 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
110 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
111 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
112
113 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
114
115 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
116 #define V_pf_frent_z VNET(pf_frent_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pf_frnode_z);
118 #define V_pf_frnode_z VNET(pf_frnode_z)
119 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
120 #define V_pf_frag_z VNET(pf_frag_z)
121
122 TAILQ_HEAD(pf_fragqueue, pf_fragment);
123 TAILQ_HEAD(pf_cachequeue, pf_fragment);
124 RB_HEAD(pf_frnode_tree, pf_frnode);
125 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
126 #define V_pf_fragqueue VNET(pf_fragqueue)
127 static __inline int pf_frnode_compare(struct pf_frnode *,
128 struct pf_frnode *);
129 VNET_DEFINE_STATIC(struct pf_frnode_tree, pf_frnode_tree);
130 #define V_pf_frnode_tree VNET(pf_frnode_tree)
131 RB_PROTOTYPE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
132 RB_GENERATE(pf_frnode_tree, pf_frnode, fn_entry, pf_frnode_compare);
133
134 static int pf_frag_compare(struct pf_fragment *,
135 struct pf_fragment *);
136 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
137 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
138
139 static void pf_flush_fragments(void);
140 static void pf_free_fragment(struct pf_fragment *);
141
142 static struct pf_frent *pf_create_fragment(u_short *);
143 static int pf_frent_holes(struct pf_frent *frent);
144 static struct pf_fragment *pf_find_fragment(struct pf_frnode *, u_int32_t);
145 static inline int pf_frent_index(struct pf_frent *);
146 static int pf_frent_insert(struct pf_fragment *,
147 struct pf_frent *, struct pf_frent *);
148 void pf_frent_remove(struct pf_fragment *,
149 struct pf_frent *);
150 struct pf_frent *pf_frent_previous(struct pf_fragment *,
151 struct pf_frent *);
152 static struct pf_fragment *pf_fillup_fragment(struct pf_frnode *, u_int32_t,
153 struct pf_frent *, u_short *);
154 static struct mbuf *pf_join_fragment(struct pf_fragment *);
155 #ifdef INET
156 static int pf_reassemble(struct mbuf **, u_short *);
157 #endif /* INET */
158 #ifdef INET6
159 static int pf_reassemble6(struct mbuf **,
160 struct ip6_frag *, uint16_t, uint16_t, u_short *);
161 #endif /* INET6 */
162
163 #define DPFPRINTF(x) do { \
164 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
165 printf("%s: ", __func__); \
166 printf x ; \
167 } \
168 } while(0)
169
170 #ifdef INET
171 static void
pf_ip2key(struct ip * ip,struct pf_frnode * key)172 pf_ip2key(struct ip *ip, struct pf_frnode *key)
173 {
174
175 key->fn_src.v4 = ip->ip_src;
176 key->fn_dst.v4 = ip->ip_dst;
177 key->fn_af = AF_INET;
178 key->fn_proto = ip->ip_p;
179 }
180 #endif /* INET */
181
182 void
pf_normalize_init(void)183 pf_normalize_init(void)
184 {
185
186 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
187 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
188 V_pf_frnode_z = uma_zcreate("pf fragment node",
189 sizeof(struct pf_frnode), NULL, NULL, NULL, NULL,
190 UMA_ALIGN_PTR, 0);
191 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
192 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
193 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
194 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
195 UMA_ALIGN_PTR, 0);
196
197 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
198
199 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
200 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
201 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
202 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
203
204 TAILQ_INIT(&V_pf_fragqueue);
205 }
206
207 void
pf_normalize_cleanup(void)208 pf_normalize_cleanup(void)
209 {
210
211 uma_zdestroy(V_pf_state_scrub_z);
212 uma_zdestroy(V_pf_frent_z);
213 uma_zdestroy(V_pf_frnode_z);
214 uma_zdestroy(V_pf_frag_z);
215
216 mtx_destroy(&V_pf_frag_mtx);
217 }
218
219 static int
pf_frnode_compare(struct pf_frnode * a,struct pf_frnode * b)220 pf_frnode_compare(struct pf_frnode *a, struct pf_frnode *b)
221 {
222 int diff;
223
224 if ((diff = a->fn_proto - b->fn_proto) != 0)
225 return (diff);
226 if ((diff = a->fn_af - b->fn_af) != 0)
227 return (diff);
228 if ((diff = pf_addr_cmp(&a->fn_src, &b->fn_src, a->fn_af)) != 0)
229 return (diff);
230 if ((diff = pf_addr_cmp(&a->fn_dst, &b->fn_dst, a->fn_af)) != 0)
231 return (diff);
232 return (0);
233 }
234
235 static __inline int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)236 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
237 {
238 int diff;
239
240 if ((diff = a->fr_id - b->fr_id) != 0)
241 return (diff);
242
243 return (0);
244 }
245
246 void
pf_purge_expired_fragments(void)247 pf_purge_expired_fragments(void)
248 {
249 u_int32_t expire = time_uptime -
250 V_pf_default_rule.timeout[PFTM_FRAG];
251
252 pf_purge_fragments(expire);
253 }
254
255 void
pf_purge_fragments(uint32_t expire)256 pf_purge_fragments(uint32_t expire)
257 {
258 struct pf_fragment *frag;
259
260 PF_FRAG_LOCK();
261 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
262 if (frag->fr_timeout > expire)
263 break;
264
265 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
266 pf_free_fragment(frag);
267 }
268
269 PF_FRAG_UNLOCK();
270 }
271
272 /*
273 * Try to flush old fragments to make space for new ones
274 */
275 static void
pf_flush_fragments(void)276 pf_flush_fragments(void)
277 {
278 struct pf_fragment *frag;
279 int goal;
280
281 PF_FRAG_ASSERT();
282
283 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
284 DPFPRINTF(("trying to free %d frag entriess\n", goal));
285 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
286 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
287 if (frag)
288 pf_free_fragment(frag);
289 else
290 break;
291 }
292 }
293
294 /*
295 * Remove a fragment from the fragment queue, free its fragment entries,
296 * and free the fragment itself.
297 */
298 static void
pf_free_fragment(struct pf_fragment * frag)299 pf_free_fragment(struct pf_fragment *frag)
300 {
301 struct pf_frent *frent;
302 struct pf_frnode *frnode;
303
304 PF_FRAG_ASSERT();
305
306 frnode = frag->fr_node;
307 RB_REMOVE(pf_frag_tree, &frnode->fn_tree, frag);
308 MPASS(frnode->fn_fragments >= 1);
309 frnode->fn_fragments--;
310 if (frnode->fn_fragments == 0) {
311 MPASS(RB_EMPTY(&frnode->fn_tree));
312 RB_REMOVE(pf_frnode_tree, &V_pf_frnode_tree, frnode);
313 uma_zfree(V_pf_frnode_z, frnode);
314 }
315
316 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
317
318 /* Free all fragment entries */
319 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
320 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
321
322 m_freem(frent->fe_m);
323 uma_zfree(V_pf_frent_z, frent);
324 }
325
326 uma_zfree(V_pf_frag_z, frag);
327 }
328
329 static struct pf_fragment *
pf_find_fragment(struct pf_frnode * key,uint32_t id)330 pf_find_fragment(struct pf_frnode *key, uint32_t id)
331 {
332 struct pf_fragment *frag, idkey;
333 struct pf_frnode *frnode;
334
335 PF_FRAG_ASSERT();
336
337 frnode = RB_FIND(pf_frnode_tree, &V_pf_frnode_tree, key);
338 if (frnode == NULL)
339 return (NULL);
340 MPASS(frnode->fn_fragments >= 1);
341 idkey.fr_id = id;
342 frag = RB_FIND(pf_frag_tree, &frnode->fn_tree, &idkey);
343 if (frag == NULL)
344 return (NULL);
345 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
346 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
347
348 return (frag);
349 }
350
351 static struct pf_frent *
pf_create_fragment(u_short * reason)352 pf_create_fragment(u_short *reason)
353 {
354 struct pf_frent *frent;
355
356 PF_FRAG_ASSERT();
357
358 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
359 if (frent == NULL) {
360 pf_flush_fragments();
361 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
362 if (frent == NULL) {
363 REASON_SET(reason, PFRES_MEMORY);
364 return (NULL);
365 }
366 }
367
368 return (frent);
369 }
370
371 /*
372 * Calculate the additional holes that were created in the fragment
373 * queue by inserting this fragment. A fragment in the middle
374 * creates one more hole by splitting. For each connected side,
375 * it loses one hole.
376 * Fragment entry must be in the queue when calling this function.
377 */
378 static int
pf_frent_holes(struct pf_frent * frent)379 pf_frent_holes(struct pf_frent *frent)
380 {
381 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
382 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
383 int holes = 1;
384
385 if (prev == NULL) {
386 if (frent->fe_off == 0)
387 holes--;
388 } else {
389 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
390 if (frent->fe_off == prev->fe_off + prev->fe_len)
391 holes--;
392 }
393 if (next == NULL) {
394 if (!frent->fe_mff)
395 holes--;
396 } else {
397 KASSERT(frent->fe_mff, ("frent->fe_mff"));
398 if (next->fe_off == frent->fe_off + frent->fe_len)
399 holes--;
400 }
401 return holes;
402 }
403
404 static inline int
pf_frent_index(struct pf_frent * frent)405 pf_frent_index(struct pf_frent *frent)
406 {
407 /*
408 * We have an array of 16 entry points to the queue. A full size
409 * 65535 octet IP packet can have 8192 fragments. So the queue
410 * traversal length is at most 512 and at most 16 entry points are
411 * checked. We need 128 additional bytes on a 64 bit architecture.
412 */
413 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
414 16 - 1);
415 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
416
417 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
418 }
419
420 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)421 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
422 struct pf_frent *prev)
423 {
424 int index;
425
426 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
427
428 /*
429 * A packet has at most 65536 octets. With 16 entry points, each one
430 * spawns 4096 octets. We limit these to 64 fragments each, which
431 * means on average every fragment must have at least 64 octets.
432 */
433 index = pf_frent_index(frent);
434 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
435 return ENOBUFS;
436 frag->fr_entries[index]++;
437
438 if (prev == NULL) {
439 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
440 } else {
441 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
442 ("overlapping fragment"));
443 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
444 }
445
446 if (frag->fr_firstoff[index] == NULL) {
447 KASSERT(prev == NULL || pf_frent_index(prev) < index,
448 ("prev == NULL || pf_frent_index(pref) < index"));
449 frag->fr_firstoff[index] = frent;
450 } else {
451 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
452 KASSERT(prev == NULL || pf_frent_index(prev) < index,
453 ("prev == NULL || pf_frent_index(pref) < index"));
454 frag->fr_firstoff[index] = frent;
455 } else {
456 KASSERT(prev != NULL, ("prev != NULL"));
457 KASSERT(pf_frent_index(prev) == index,
458 ("pf_frent_index(prev) == index"));
459 }
460 }
461
462 frag->fr_holes += pf_frent_holes(frent);
463
464 return 0;
465 }
466
467 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)468 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
469 {
470 #ifdef INVARIANTS
471 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
472 #endif /* INVARIANTS */
473 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
474 int index;
475
476 frag->fr_holes -= pf_frent_holes(frent);
477
478 index = pf_frent_index(frent);
479 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
480 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
481 if (next == NULL) {
482 frag->fr_firstoff[index] = NULL;
483 } else {
484 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
485 ("overlapping fragment"));
486 if (pf_frent_index(next) == index) {
487 frag->fr_firstoff[index] = next;
488 } else {
489 frag->fr_firstoff[index] = NULL;
490 }
491 }
492 } else {
493 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
494 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
495 KASSERT(prev != NULL, ("prev != NULL"));
496 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
497 ("overlapping fragment"));
498 KASSERT(pf_frent_index(prev) == index,
499 ("pf_frent_index(prev) == index"));
500 }
501
502 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
503
504 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
505 frag->fr_entries[index]--;
506 }
507
508 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)509 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
510 {
511 struct pf_frent *prev, *next;
512 int index;
513
514 /*
515 * If there are no fragments after frag, take the final one. Assume
516 * that the global queue is not empty.
517 */
518 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
519 KASSERT(prev != NULL, ("prev != NULL"));
520 if (prev->fe_off <= frent->fe_off)
521 return prev;
522 /*
523 * We want to find a fragment entry that is before frag, but still
524 * close to it. Find the first fragment entry that is in the same
525 * entry point or in the first entry point after that. As we have
526 * already checked that there are entries behind frag, this will
527 * succeed.
528 */
529 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
530 index++) {
531 prev = frag->fr_firstoff[index];
532 if (prev != NULL)
533 break;
534 }
535 KASSERT(prev != NULL, ("prev != NULL"));
536 /*
537 * In prev we may have a fragment from the same entry point that is
538 * before frent, or one that is just one position behind frent.
539 * In the latter case, we go back one step and have the predecessor.
540 * There may be none if the new fragment will be the first one.
541 */
542 if (prev->fe_off > frent->fe_off) {
543 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
544 if (prev == NULL)
545 return NULL;
546 KASSERT(prev->fe_off <= frent->fe_off,
547 ("prev->fe_off <= frent->fe_off"));
548 return prev;
549 }
550 /*
551 * In prev is the first fragment of the entry point. The offset
552 * of frag is behind it. Find the closest previous fragment.
553 */
554 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
555 next = TAILQ_NEXT(next, fr_next)) {
556 if (next->fe_off > frent->fe_off)
557 break;
558 prev = next;
559 }
560 return prev;
561 }
562
563 static struct pf_fragment *
pf_fillup_fragment(struct pf_frnode * key,uint32_t id,struct pf_frent * frent,u_short * reason)564 pf_fillup_fragment(struct pf_frnode *key, uint32_t id,
565 struct pf_frent *frent, u_short *reason)
566 {
567 struct pf_frent *after, *next, *prev;
568 struct pf_fragment *frag;
569 struct pf_frnode *frnode;
570 uint16_t total;
571
572 PF_FRAG_ASSERT();
573
574 /* No empty fragments. */
575 if (frent->fe_len == 0) {
576 DPFPRINTF(("bad fragment: len 0\n"));
577 goto bad_fragment;
578 }
579
580 /* All fragments are 8 byte aligned. */
581 if (frent->fe_mff && (frent->fe_len & 0x7)) {
582 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
583 goto bad_fragment;
584 }
585
586 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
587 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
588 DPFPRINTF(("bad fragment: max packet %d\n",
589 frent->fe_off + frent->fe_len));
590 goto bad_fragment;
591 }
592
593 DPFPRINTF((key->fn_af == AF_INET ?
594 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
595 id, frent->fe_off, frent->fe_off + frent->fe_len));
596
597 /* Fully buffer all of the fragments in this fragment queue. */
598 frag = pf_find_fragment(key, id);
599
600 /* Create a new reassembly queue for this packet. */
601 if (frag == NULL) {
602 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
603 if (frag == NULL) {
604 pf_flush_fragments();
605 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
606 if (frag == NULL) {
607 REASON_SET(reason, PFRES_MEMORY);
608 goto drop_fragment;
609 }
610 }
611
612 frnode = RB_FIND(pf_frnode_tree, &V_pf_frnode_tree, key);
613 if (frnode == NULL) {
614 frnode = uma_zalloc(V_pf_frnode_z, M_NOWAIT);
615 if (frnode == NULL) {
616 pf_flush_fragments();
617 frnode = uma_zalloc(V_pf_frnode_z, M_NOWAIT);
618 if (frnode == NULL) {
619 REASON_SET(reason, PFRES_MEMORY);
620 uma_zfree(V_pf_frag_z, frag);
621 goto drop_fragment;
622 }
623 }
624 *frnode = *key;
625 RB_INIT(&frnode->fn_tree);
626 frnode->fn_fragments = 0;
627 }
628 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
629 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
630 frag->fr_timeout = time_uptime;
631 TAILQ_INIT(&frag->fr_queue);
632 frag->fr_maxlen = frent->fe_len;
633 frag->fr_holes = 1;
634
635 frag->fr_id = id;
636 frag->fr_node = frnode;
637 /* RB_INSERT cannot fail as pf_find_fragment() found nothing */
638 RB_INSERT(pf_frag_tree, &frnode->fn_tree, frag);
639 frnode->fn_fragments++;
640 if (frnode->fn_fragments == 1)
641 RB_INSERT(pf_frnode_tree, &V_pf_frnode_tree, frnode);
642
643 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
644
645 /* We do not have a previous fragment, cannot fail. */
646 pf_frent_insert(frag, frent, NULL);
647
648 return (frag);
649 }
650
651 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
652 MPASS(frag->fr_node);
653
654 /* Remember maximum fragment len for refragmentation. */
655 if (frent->fe_len > frag->fr_maxlen)
656 frag->fr_maxlen = frent->fe_len;
657
658 /* Maximum data we have seen already. */
659 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
660 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
661
662 /* Non terminal fragments must have more fragments flag. */
663 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
664 goto free_ipv6_fragment;
665
666 /* Check if we saw the last fragment already. */
667 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
668 if (frent->fe_off + frent->fe_len > total ||
669 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
670 goto free_ipv6_fragment;
671 } else {
672 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
673 goto free_ipv6_fragment;
674 }
675
676 /* Find neighbors for newly inserted fragment */
677 prev = pf_frent_previous(frag, frent);
678 if (prev == NULL) {
679 after = TAILQ_FIRST(&frag->fr_queue);
680 KASSERT(after != NULL, ("after != NULL"));
681 } else {
682 after = TAILQ_NEXT(prev, fr_next);
683 }
684
685 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
686 uint16_t precut;
687
688 if (frag->fr_node->fn_af == AF_INET6)
689 goto free_fragment;
690
691 precut = prev->fe_off + prev->fe_len - frent->fe_off;
692 if (precut >= frent->fe_len) {
693 DPFPRINTF(("new frag overlapped\n"));
694 goto drop_fragment;
695 }
696 DPFPRINTF(("frag head overlap %d\n", precut));
697 m_adj(frent->fe_m, precut);
698 frent->fe_off += precut;
699 frent->fe_len -= precut;
700 }
701
702 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
703 after = next) {
704 uint16_t aftercut;
705
706 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
707 if (aftercut < after->fe_len) {
708 DPFPRINTF(("frag tail overlap %d", aftercut));
709 m_adj(after->fe_m, aftercut);
710 /* Fragment may switch queue as fe_off changes */
711 pf_frent_remove(frag, after);
712 after->fe_off += aftercut;
713 after->fe_len -= aftercut;
714 /* Insert into correct queue */
715 if (pf_frent_insert(frag, after, prev)) {
716 DPFPRINTF(("fragment requeue limit exceeded"));
717 m_freem(after->fe_m);
718 uma_zfree(V_pf_frent_z, after);
719 /* There is not way to recover */
720 goto free_fragment;
721 }
722 break;
723 }
724
725 /* This fragment is completely overlapped, lose it. */
726 DPFPRINTF(("old frag overlapped\n"));
727 next = TAILQ_NEXT(after, fr_next);
728 pf_frent_remove(frag, after);
729 m_freem(after->fe_m);
730 uma_zfree(V_pf_frent_z, after);
731 }
732
733 /* If part of the queue gets too long, there is not way to recover. */
734 if (pf_frent_insert(frag, frent, prev)) {
735 DPFPRINTF(("fragment queue limit exceeded\n"));
736 goto bad_fragment;
737 }
738
739 return (frag);
740
741 free_ipv6_fragment:
742 if (frag->fr_node->fn_af == AF_INET)
743 goto bad_fragment;
744 free_fragment:
745 /*
746 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one
747 * or more its constituent fragments is determined to be an overlapping
748 * fragment, the entire datagram (and any constituent fragments) MUST
749 * be silently discarded.
750 */
751 DPFPRINTF(("flush overlapping fragments\n"));
752 pf_free_fragment(frag);
753
754 bad_fragment:
755 REASON_SET(reason, PFRES_FRAG);
756 drop_fragment:
757 uma_zfree(V_pf_frent_z, frent);
758 return (NULL);
759 }
760
761 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)762 pf_join_fragment(struct pf_fragment *frag)
763 {
764 struct mbuf *m, *m2;
765 struct pf_frent *frent;
766
767 frent = TAILQ_FIRST(&frag->fr_queue);
768 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
769
770 m = frent->fe_m;
771 if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
772 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
773 uma_zfree(V_pf_frent_z, frent);
774 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
775 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
776
777 m2 = frent->fe_m;
778 /* Strip off ip header. */
779 m_adj(m2, frent->fe_hdrlen);
780 /* Strip off any trailing bytes. */
781 if (frent->fe_len < m2->m_pkthdr.len)
782 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
783
784 uma_zfree(V_pf_frent_z, frent);
785 m_cat(m, m2);
786 }
787
788 /* Remove from fragment queue. */
789 pf_free_fragment(frag);
790
791 return (m);
792 }
793
794 #ifdef INET
795 static int
pf_reassemble(struct mbuf ** m0,u_short * reason)796 pf_reassemble(struct mbuf **m0, u_short *reason)
797 {
798 struct mbuf *m = *m0;
799 struct ip *ip = mtod(m, struct ip *);
800 struct pf_frent *frent;
801 struct pf_fragment *frag;
802 struct m_tag *mtag;
803 struct pf_fragment_tag *ftag;
804 struct pf_frnode key;
805 uint16_t total, hdrlen;
806 uint32_t frag_id;
807 uint16_t maxlen;
808
809 /* Get an entry for the fragment queue */
810 if ((frent = pf_create_fragment(reason)) == NULL)
811 return (PF_DROP);
812
813 frent->fe_m = m;
814 frent->fe_hdrlen = ip->ip_hl << 2;
815 frent->fe_extoff = 0;
816 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
817 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
818 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
819
820 pf_ip2key(ip, &key);
821
822 if ((frag = pf_fillup_fragment(&key, ip->ip_id, frent, reason)) == NULL)
823 return (PF_DROP);
824
825 /* The mbuf is part of the fragment entry, no direct free or access */
826 m = *m0 = NULL;
827
828 if (frag->fr_holes) {
829 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
830 return (PF_PASS); /* drop because *m0 is NULL, no error */
831 }
832
833 /* We have all the data */
834 frent = TAILQ_FIRST(&frag->fr_queue);
835 KASSERT(frent != NULL, ("frent != NULL"));
836 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
837 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
838 hdrlen = frent->fe_hdrlen;
839
840 maxlen = frag->fr_maxlen;
841 frag_id = frag->fr_id;
842 m = *m0 = pf_join_fragment(frag);
843 frag = NULL;
844
845 if (m->m_flags & M_PKTHDR) {
846 int plen = 0;
847 for (m = *m0; m; m = m->m_next)
848 plen += m->m_len;
849 m = *m0;
850 m->m_pkthdr.len = plen;
851 }
852
853 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
854 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) {
855 REASON_SET(reason, PFRES_SHORT);
856 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
857 return (PF_DROP);
858 }
859 ftag = (struct pf_fragment_tag *)(mtag + 1);
860 ftag->ft_hdrlen = hdrlen;
861 ftag->ft_extoff = 0;
862 ftag->ft_maxlen = maxlen;
863 ftag->ft_id = frag_id;
864 m_tag_prepend(m, mtag);
865
866 ip = mtod(m, struct ip *);
867 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
868 htons(hdrlen + total), 0);
869 ip->ip_len = htons(hdrlen + total);
870 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
871 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
872 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
873
874 if (hdrlen + total > IP_MAXPACKET) {
875 DPFPRINTF(("drop: too big: %d\n", total));
876 ip->ip_len = 0;
877 REASON_SET(reason, PFRES_SHORT);
878 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
879 return (PF_DROP);
880 }
881
882 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
883 return (PF_PASS);
884 }
885 #endif /* INET */
886
887 #ifdef INET6
888 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)889 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
890 uint16_t hdrlen, uint16_t extoff, u_short *reason)
891 {
892 struct mbuf *m = *m0;
893 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
894 struct pf_frent *frent;
895 struct pf_fragment *frag;
896 struct pf_frnode key;
897 struct m_tag *mtag;
898 struct pf_fragment_tag *ftag;
899 int off;
900 uint32_t frag_id;
901 uint16_t total, maxlen;
902 uint8_t proto;
903
904 PF_FRAG_LOCK();
905
906 /* Get an entry for the fragment queue. */
907 if ((frent = pf_create_fragment(reason)) == NULL) {
908 PF_FRAG_UNLOCK();
909 return (PF_DROP);
910 }
911
912 frent->fe_m = m;
913 frent->fe_hdrlen = hdrlen;
914 frent->fe_extoff = extoff;
915 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
916 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
917 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
918
919 key.fn_src.v6 = ip6->ip6_src;
920 key.fn_dst.v6 = ip6->ip6_dst;
921 key.fn_af = AF_INET6;
922 /* Only the first fragment's protocol is relevant. */
923 key.fn_proto = 0;
924
925 if ((frag = pf_fillup_fragment(&key, fraghdr->ip6f_ident, frent, reason)) == NULL) {
926 PF_FRAG_UNLOCK();
927 return (PF_DROP);
928 }
929
930 /* The mbuf is part of the fragment entry, no direct free or access. */
931 m = *m0 = NULL;
932
933 if (frag->fr_holes) {
934 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
935 frag->fr_holes));
936 PF_FRAG_UNLOCK();
937 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
938 }
939
940 /* We have all the data. */
941 frent = TAILQ_FIRST(&frag->fr_queue);
942 KASSERT(frent != NULL, ("frent != NULL"));
943 extoff = frent->fe_extoff;
944 maxlen = frag->fr_maxlen;
945 frag_id = frag->fr_id;
946 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
947 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
948 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
949
950 m = *m0 = pf_join_fragment(frag);
951 frag = NULL;
952
953 PF_FRAG_UNLOCK();
954
955 /* Take protocol from first fragment header. */
956 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
957 KASSERT(m, ("%s: short mbuf chain", __func__));
958 proto = *(mtod(m, uint8_t *) + off);
959 m = *m0;
960
961 /* Delete frag6 header */
962 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
963 goto fail;
964
965 if (m->m_flags & M_PKTHDR) {
966 int plen = 0;
967 for (m = *m0; m; m = m->m_next)
968 plen += m->m_len;
969 m = *m0;
970 m->m_pkthdr.len = plen;
971 }
972
973 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
974 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
975 goto fail;
976 ftag = (struct pf_fragment_tag *)(mtag + 1);
977 ftag->ft_hdrlen = hdrlen;
978 ftag->ft_extoff = extoff;
979 ftag->ft_maxlen = maxlen;
980 ftag->ft_id = frag_id;
981 m_tag_prepend(m, mtag);
982
983 ip6 = mtod(m, struct ip6_hdr *);
984 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
985 if (extoff) {
986 /* Write protocol into next field of last extension header. */
987 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
988 &off);
989 KASSERT(m, ("%s: short mbuf chain", __func__));
990 *(mtod(m, char *) + off) = proto;
991 m = *m0;
992 } else
993 ip6->ip6_nxt = proto;
994
995 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
996 DPFPRINTF(("drop: too big: %d\n", total));
997 ip6->ip6_plen = 0;
998 REASON_SET(reason, PFRES_SHORT);
999 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
1000 return (PF_DROP);
1001 }
1002
1003 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
1004 return (PF_PASS);
1005
1006 fail:
1007 REASON_SET(reason, PFRES_MEMORY);
1008 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
1009 return (PF_DROP);
1010 }
1011 #endif /* INET6 */
1012
1013 #ifdef INET6
1014 int
pf_max_frag_size(struct mbuf * m)1015 pf_max_frag_size(struct mbuf *m)
1016 {
1017 struct m_tag *tag;
1018 struct pf_fragment_tag *ftag;
1019
1020 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
1021 if (tag == NULL)
1022 return (m->m_pkthdr.len);
1023
1024 ftag = (struct pf_fragment_tag *)(tag + 1);
1025
1026 return (ftag->ft_maxlen);
1027 }
1028
1029 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,struct ifnet * rt,bool forward)1030 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
1031 struct ifnet *rt, bool forward)
1032 {
1033 struct mbuf *m = *m0, *t;
1034 struct ip6_hdr *hdr;
1035 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
1036 struct pf_pdesc pd;
1037 uint32_t frag_id;
1038 uint16_t hdrlen, extoff, maxlen;
1039 uint8_t proto;
1040 int error, action;
1041
1042 hdrlen = ftag->ft_hdrlen;
1043 extoff = ftag->ft_extoff;
1044 maxlen = ftag->ft_maxlen;
1045 frag_id = ftag->ft_id;
1046 m_tag_delete(m, mtag);
1047 mtag = NULL;
1048 ftag = NULL;
1049
1050 if (extoff) {
1051 int off;
1052
1053 /* Use protocol from next field of last extension header */
1054 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
1055 &off);
1056 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
1057 proto = *(mtod(m, uint8_t *) + off);
1058 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
1059 m = *m0;
1060 } else {
1061 hdr = mtod(m, struct ip6_hdr *);
1062 proto = hdr->ip6_nxt;
1063 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1064 }
1065
1066 /* In case of link-local traffic we'll need a scope set. */
1067 hdr = mtod(m, struct ip6_hdr *);
1068
1069 in6_setscope(&hdr->ip6_src, ifp, NULL);
1070 in6_setscope(&hdr->ip6_dst, ifp, NULL);
1071
1072 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1073 * fragmentation wrong. */
1074 maxlen = maxlen & ~7;
1075
1076 /*
1077 * Maxlen may be less than 8 if there was only a single
1078 * fragment. As it was fragmented before, add a fragment
1079 * header also for a single fragment. If total or maxlen
1080 * is less than 8, ip6_fragment() will return EMSGSIZE and
1081 * we drop the packet.
1082 */
1083 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1084 m = (*m0)->m_nextpkt;
1085 (*m0)->m_nextpkt = NULL;
1086 if (error == 0) {
1087 /* The first mbuf contains the unfragmented packet. */
1088 m_freem(*m0);
1089 *m0 = NULL;
1090 action = PF_PASS;
1091 } else {
1092 /* Drop expects an mbuf to free. */
1093 DPFPRINTF(("refragment error %d\n", error));
1094 action = PF_DROP;
1095 }
1096 for (; m; m = t) {
1097 t = m->m_nextpkt;
1098 m->m_nextpkt = NULL;
1099 m->m_flags |= M_SKIP_FIREWALL;
1100 memset(&pd, 0, sizeof(pd));
1101 pd.pf_mtag = pf_find_mtag(m);
1102 if (error != 0) {
1103 m_freem(m);
1104 continue;
1105 }
1106 if (rt != NULL) {
1107 struct sockaddr_in6 dst;
1108 hdr = mtod(m, struct ip6_hdr *);
1109
1110 bzero(&dst, sizeof(dst));
1111 dst.sin6_family = AF_INET6;
1112 dst.sin6_len = sizeof(dst);
1113 dst.sin6_addr = hdr->ip6_dst;
1114
1115 if (m->m_pkthdr.len <= if_getmtu(ifp)) {
1116 nd6_output_ifp(rt, rt, m, &dst, NULL);
1117 } else {
1118 in6_ifstat_inc(ifp, ifs6_in_toobig);
1119 icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0,
1120 if_getmtu(ifp));
1121 }
1122 } else if (forward) {
1123 MPASS(m->m_pkthdr.rcvif != NULL);
1124 ip6_forward(m, 0);
1125 } else {
1126 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1127 NULL);
1128 }
1129 }
1130
1131 return (action);
1132 }
1133 #endif /* INET6 */
1134
1135 #ifdef INET
1136 int
pf_normalize_ip(u_short * reason,struct pf_pdesc * pd)1137 pf_normalize_ip(u_short *reason, struct pf_pdesc *pd)
1138 {
1139 struct pf_krule *r;
1140 struct ip *h = mtod(pd->m, struct ip *);
1141 int mff = (ntohs(h->ip_off) & IP_MF);
1142 int hlen = h->ip_hl << 2;
1143 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1144 u_int16_t max;
1145 int ip_len;
1146 int tag = -1;
1147 int verdict;
1148 bool scrub_compat;
1149
1150 PF_RULES_RASSERT();
1151
1152 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1153 /*
1154 * Check if there are any scrub rules, matching or not.
1155 * Lack of scrub rules means:
1156 * - enforced packet normalization operation just like in OpenBSD
1157 * - fragment reassembly depends on V_pf_status.reass
1158 * With scrub rules:
1159 * - packet normalization is performed if there is a matching scrub rule
1160 * - fragment reassembly is performed if the matching rule has no
1161 * PFRULE_FRAGMENT_NOREASS flag
1162 */
1163 scrub_compat = (r != NULL);
1164 while (r != NULL) {
1165 pf_counter_u64_add(&r->evaluations, 1);
1166 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1167 r = r->skip[PF_SKIP_IFP];
1168 else if (r->direction && r->direction != pd->dir)
1169 r = r->skip[PF_SKIP_DIR];
1170 else if (r->af && r->af != AF_INET)
1171 r = r->skip[PF_SKIP_AF];
1172 else if (r->proto && r->proto != h->ip_p)
1173 r = r->skip[PF_SKIP_PROTO];
1174 else if (PF_MISMATCHAW(&r->src.addr,
1175 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1176 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1177 r = r->skip[PF_SKIP_SRC_ADDR];
1178 else if (PF_MISMATCHAW(&r->dst.addr,
1179 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1180 r->dst.neg, NULL, M_GETFIB(pd->m)))
1181 r = r->skip[PF_SKIP_DST_ADDR];
1182 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag,
1183 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1184 r = TAILQ_NEXT(r, entries);
1185 else
1186 break;
1187 }
1188
1189 if (scrub_compat) {
1190 /* With scrub rules present IPv4 normalization happens only
1191 * if one of rules has matched and it's not a "no scrub" rule */
1192 if (r == NULL || r->action == PF_NOSCRUB)
1193 return (PF_PASS);
1194
1195 pf_counter_u64_critical_enter();
1196 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1197 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1198 pf_counter_u64_critical_exit();
1199 pf_rule_to_actions(r, &pd->act);
1200 }
1201
1202 /* Check for illegal packets */
1203 if (hlen < (int)sizeof(struct ip)) {
1204 REASON_SET(reason, PFRES_NORM);
1205 goto drop;
1206 }
1207
1208 if (hlen > ntohs(h->ip_len)) {
1209 REASON_SET(reason, PFRES_NORM);
1210 goto drop;
1211 }
1212
1213 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1214 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1215 (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1216 (h->ip_off & htons(IP_DF))
1217 ) {
1218 u_int16_t ip_off = h->ip_off;
1219
1220 h->ip_off &= htons(~IP_DF);
1221 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1222 }
1223
1224 /* We will need other tests here */
1225 if (!fragoff && !mff)
1226 goto no_fragment;
1227
1228 /* We're dealing with a fragment now. Don't allow fragments
1229 * with IP_DF to enter the cache. If the flag was cleared by
1230 * no-df above, fine. Otherwise drop it.
1231 */
1232 if (h->ip_off & htons(IP_DF)) {
1233 DPFPRINTF(("IP_DF\n"));
1234 goto bad;
1235 }
1236
1237 ip_len = ntohs(h->ip_len) - hlen;
1238
1239 /* All fragments are 8 byte aligned */
1240 if (mff && (ip_len & 0x7)) {
1241 DPFPRINTF(("mff and %d\n", ip_len));
1242 goto bad;
1243 }
1244
1245 /* Respect maximum length */
1246 if (fragoff + ip_len > IP_MAXPACKET) {
1247 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1248 goto bad;
1249 }
1250
1251 if ((!scrub_compat && V_pf_status.reass) ||
1252 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1253 ) {
1254 max = fragoff + ip_len;
1255
1256 /* Fully buffer all of the fragments
1257 * Might return a completely reassembled mbuf, or NULL */
1258 PF_FRAG_LOCK();
1259 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1260 verdict = pf_reassemble(&pd->m, reason);
1261 PF_FRAG_UNLOCK();
1262
1263 if (verdict != PF_PASS)
1264 return (PF_DROP);
1265
1266 if (pd->m == NULL)
1267 return (PF_DROP);
1268
1269 h = mtod(pd->m, struct ip *);
1270 pd->tot_len = htons(h->ip_len);
1271
1272 no_fragment:
1273 /* At this point, only IP_DF is allowed in ip_off */
1274 if (h->ip_off & ~htons(IP_DF)) {
1275 u_int16_t ip_off = h->ip_off;
1276
1277 h->ip_off &= htons(IP_DF);
1278 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1279 }
1280 }
1281
1282 return (PF_PASS);
1283
1284 bad:
1285 DPFPRINTF(("dropping bad fragment\n"));
1286 REASON_SET(reason, PFRES_FRAG);
1287 drop:
1288 if (r != NULL && r->log)
1289 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1, NULL);
1290
1291 return (PF_DROP);
1292 }
1293 #endif
1294
1295 #ifdef INET6
1296 int
pf_normalize_ip6(int off,u_short * reason,struct pf_pdesc * pd)1297 pf_normalize_ip6(int off, u_short *reason,
1298 struct pf_pdesc *pd)
1299 {
1300 struct pf_krule *r;
1301 struct ip6_hdr *h;
1302 struct ip6_frag frag;
1303 bool scrub_compat;
1304
1305 PF_RULES_RASSERT();
1306
1307 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1308 /*
1309 * Check if there are any scrub rules, matching or not.
1310 * Lack of scrub rules means:
1311 * - enforced packet normalization operation just like in OpenBSD
1312 * With scrub rules:
1313 * - packet normalization is performed if there is a matching scrub rule
1314 * XXX: Fragment reassembly always performed for IPv6!
1315 */
1316 scrub_compat = (r != NULL);
1317 while (r != NULL) {
1318 pf_counter_u64_add(&r->evaluations, 1);
1319 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1320 r = r->skip[PF_SKIP_IFP];
1321 else if (r->direction && r->direction != pd->dir)
1322 r = r->skip[PF_SKIP_DIR];
1323 else if (r->af && r->af != AF_INET6)
1324 r = r->skip[PF_SKIP_AF];
1325 else if (r->proto && r->proto != pd->proto)
1326 r = r->skip[PF_SKIP_PROTO];
1327 else if (PF_MISMATCHAW(&r->src.addr,
1328 (struct pf_addr *)&pd->src, AF_INET6,
1329 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1330 r = r->skip[PF_SKIP_SRC_ADDR];
1331 else if (PF_MISMATCHAW(&r->dst.addr,
1332 (struct pf_addr *)&pd->dst, AF_INET6,
1333 r->dst.neg, NULL, M_GETFIB(pd->m)))
1334 r = r->skip[PF_SKIP_DST_ADDR];
1335 else
1336 break;
1337 }
1338
1339 if (scrub_compat) {
1340 /* With scrub rules present IPv6 normalization happens only
1341 * if one of rules has matched and it's not a "no scrub" rule */
1342 if (r == NULL || r->action == PF_NOSCRUB)
1343 return (PF_PASS);
1344
1345 pf_counter_u64_critical_enter();
1346 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1347 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1348 pf_counter_u64_critical_exit();
1349 pf_rule_to_actions(r, &pd->act);
1350 }
1351
1352 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
1353 return (PF_DROP);
1354
1355 /* Offset now points to data portion. */
1356 off += sizeof(frag);
1357
1358 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) {
1359 /* Returns PF_DROP or *m0 is NULL or completely reassembled
1360 * mbuf. */
1361 if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS)
1362 return (PF_DROP);
1363 if (pd->m == NULL)
1364 return (PF_DROP);
1365 h = mtod(pd->m, struct ip6_hdr *);
1366 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
1367 }
1368
1369 return (PF_PASS);
1370 }
1371 #endif /* INET6 */
1372
1373 int
pf_normalize_tcp(struct pf_pdesc * pd)1374 pf_normalize_tcp(struct pf_pdesc *pd)
1375 {
1376 struct pf_krule *r, *rm = NULL;
1377 struct tcphdr *th = &pd->hdr.tcp;
1378 int rewrite = 0;
1379 u_short reason;
1380 u_int16_t flags;
1381 sa_family_t af = pd->af;
1382 int srs;
1383
1384 PF_RULES_RASSERT();
1385
1386 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1387 /* Check if there any scrub rules. Lack of scrub rules means enforced
1388 * packet normalization operation just like in OpenBSD. */
1389 srs = (r != NULL);
1390 while (r != NULL) {
1391 pf_counter_u64_add(&r->evaluations, 1);
1392 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1393 r = r->skip[PF_SKIP_IFP];
1394 else if (r->direction && r->direction != pd->dir)
1395 r = r->skip[PF_SKIP_DIR];
1396 else if (r->af && r->af != af)
1397 r = r->skip[PF_SKIP_AF];
1398 else if (r->proto && r->proto != pd->proto)
1399 r = r->skip[PF_SKIP_PROTO];
1400 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1401 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1402 r = r->skip[PF_SKIP_SRC_ADDR];
1403 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1404 r->src.port[0], r->src.port[1], th->th_sport))
1405 r = r->skip[PF_SKIP_SRC_PORT];
1406 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1407 r->dst.neg, NULL, M_GETFIB(pd->m)))
1408 r = r->skip[PF_SKIP_DST_ADDR];
1409 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1410 r->dst.port[0], r->dst.port[1], th->th_dport))
1411 r = r->skip[PF_SKIP_DST_PORT];
1412 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1413 pf_osfp_fingerprint(pd, th),
1414 r->os_fingerprint))
1415 r = TAILQ_NEXT(r, entries);
1416 else {
1417 rm = r;
1418 break;
1419 }
1420 }
1421
1422 if (srs) {
1423 /* With scrub rules present TCP normalization happens only
1424 * if one of rules has matched and it's not a "no scrub" rule */
1425 if (rm == NULL || rm->action == PF_NOSCRUB)
1426 return (PF_PASS);
1427
1428 pf_counter_u64_critical_enter();
1429 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1430 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1431 pf_counter_u64_critical_exit();
1432 pf_rule_to_actions(rm, &pd->act);
1433 }
1434
1435 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1436 pd->flags |= PFDESC_TCP_NORM;
1437
1438 flags = tcp_get_flags(th);
1439 if (flags & TH_SYN) {
1440 /* Illegal packet */
1441 if (flags & TH_RST)
1442 goto tcp_drop;
1443
1444 if (flags & TH_FIN)
1445 goto tcp_drop;
1446 } else {
1447 /* Illegal packet */
1448 if (!(flags & (TH_ACK|TH_RST)))
1449 goto tcp_drop;
1450 }
1451
1452 if (!(flags & TH_ACK)) {
1453 /* These flags are only valid if ACK is set */
1454 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1455 goto tcp_drop;
1456 }
1457
1458 /* Check for illegal header length */
1459 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1460 goto tcp_drop;
1461
1462 /* If flags changed, or reserved data set, then adjust */
1463 if (flags != tcp_get_flags(th) ||
1464 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1465 u_int16_t ov, nv;
1466
1467 ov = *(u_int16_t *)(&th->th_ack + 1);
1468 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1469 tcp_set_flags(th, flags);
1470 nv = *(u_int16_t *)(&th->th_ack + 1);
1471
1472 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0);
1473 rewrite = 1;
1474 }
1475
1476 /* Remove urgent pointer, if TH_URG is not set */
1477 if (!(flags & TH_URG) && th->th_urp) {
1478 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp,
1479 0, 0);
1480 th->th_urp = 0;
1481 rewrite = 1;
1482 }
1483
1484 /* copy back packet headers if we sanitized */
1485 if (rewrite)
1486 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1487
1488 return (PF_PASS);
1489
1490 tcp_drop:
1491 REASON_SET(&reason, PFRES_NORM);
1492 if (rm != NULL && r->log)
1493 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL);
1494 return (PF_DROP);
1495 }
1496
1497 int
pf_normalize_tcp_init(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src)1498 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
1499 struct pf_state_peer *src)
1500 {
1501 u_int32_t tsval, tsecr;
1502 int olen;
1503 uint8_t opts[MAX_TCPOPTLEN], *opt;
1504
1505 KASSERT((src->scrub == NULL),
1506 ("pf_normalize_tcp_init: src->scrub != NULL"));
1507
1508 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1509 if (src->scrub == NULL)
1510 return (1);
1511
1512 switch (pd->af) {
1513 #ifdef INET
1514 case AF_INET: {
1515 struct ip *h = mtod(pd->m, struct ip *);
1516 src->scrub->pfss_ttl = h->ip_ttl;
1517 break;
1518 }
1519 #endif /* INET */
1520 #ifdef INET6
1521 case AF_INET6: {
1522 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1523 src->scrub->pfss_ttl = h->ip6_hlim;
1524 break;
1525 }
1526 #endif /* INET6 */
1527 default:
1528 unhandled_af(pd->af);
1529 }
1530
1531 /*
1532 * All normalizations below are only begun if we see the start of
1533 * the connections. They must all set an enabled bit in pfss_flags
1534 */
1535 if ((tcp_get_flags(th) & TH_SYN) == 0)
1536 return (0);
1537
1538 olen = (th->th_off << 2) - sizeof(*th);
1539 if (olen < TCPOLEN_TIMESTAMP || !pf_pull_hdr(pd->m,
1540 pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af))
1541 return (0);
1542
1543 opt = opts;
1544 while ((opt = pf_find_tcpopt(opt, opts, olen,
1545 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1546 src->scrub->pfss_flags |= PFSS_TIMESTAMP;
1547 src->scrub->pfss_ts_mod = arc4random();
1548 /* note PFSS_PAWS not set yet */
1549 memcpy(&tsval, &opt[2], sizeof(u_int32_t));
1550 memcpy(&tsecr, &opt[6], sizeof(u_int32_t));
1551 src->scrub->pfss_tsval0 = ntohl(tsval);
1552 src->scrub->pfss_tsval = ntohl(tsval);
1553 src->scrub->pfss_tsecr = ntohl(tsecr);
1554 getmicrouptime(&src->scrub->pfss_last);
1555
1556 opt += opt[1];
1557 }
1558
1559 return (0);
1560 }
1561
1562 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1563 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1564 {
1565 /* XXX Note: this also cleans up SCTP. */
1566 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1567 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1568
1569 /* Someday... flush the TCP segment reassembly descriptors. */
1570 }
1571 int
pf_normalize_sctp_init(struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1572 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src,
1573 struct pf_state_peer *dst)
1574 {
1575 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1576 if (src->scrub == NULL)
1577 return (1);
1578
1579 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1580 if (dst->scrub == NULL) {
1581 uma_zfree(V_pf_state_scrub_z, src);
1582 return (1);
1583 }
1584
1585 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1586
1587 return (0);
1588 }
1589
1590 int
pf_normalize_tcp_stateful(struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1591 pf_normalize_tcp_stateful(struct pf_pdesc *pd,
1592 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1593 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1594 {
1595 struct timeval uptime;
1596 u_int tsval_from_last;
1597 uint32_t tsval, tsecr;
1598 int copyback = 0;
1599 int got_ts = 0;
1600 int olen;
1601 uint8_t opts[MAX_TCPOPTLEN], *opt;
1602
1603 KASSERT((src->scrub || dst->scrub),
1604 ("%s: src->scrub && dst->scrub!", __func__));
1605
1606 /*
1607 * Enforce the minimum TTL seen for this connection. Negate a common
1608 * technique to evade an intrusion detection system and confuse
1609 * firewall state code.
1610 */
1611 switch (pd->af) {
1612 #ifdef INET
1613 case AF_INET: {
1614 if (src->scrub) {
1615 struct ip *h = mtod(pd->m, struct ip *);
1616 if (h->ip_ttl > src->scrub->pfss_ttl)
1617 src->scrub->pfss_ttl = h->ip_ttl;
1618 h->ip_ttl = src->scrub->pfss_ttl;
1619 }
1620 break;
1621 }
1622 #endif /* INET */
1623 #ifdef INET6
1624 case AF_INET6: {
1625 if (src->scrub) {
1626 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1627 if (h->ip6_hlim > src->scrub->pfss_ttl)
1628 src->scrub->pfss_ttl = h->ip6_hlim;
1629 h->ip6_hlim = src->scrub->pfss_ttl;
1630 }
1631 break;
1632 }
1633 #endif /* INET6 */
1634 default:
1635 unhandled_af(pd->af);
1636 }
1637
1638 olen = (th->th_off << 2) - sizeof(*th);
1639
1640 if (olen >= TCPOLEN_TIMESTAMP &&
1641 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1642 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1643 pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, olen, NULL, NULL, pd->af)) {
1644 /* Modulate the timestamps. Can be used for NAT detection, OS
1645 * uptime determination or reboot detection.
1646 */
1647 opt = opts;
1648 while ((opt = pf_find_tcpopt(opt, opts, olen,
1649 TCPOPT_TIMESTAMP, TCPOLEN_TIMESTAMP)) != NULL) {
1650 uint8_t *ts = opt + 2;
1651 uint8_t *tsr = opt + 6;
1652
1653 if (got_ts) {
1654 /* Huh? Multiple timestamps!? */
1655 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1656 printf("pf: %s: multiple TS??", __func__);
1657 pf_print_state(state);
1658 printf("\n");
1659 }
1660 REASON_SET(reason, PFRES_TS);
1661 return (PF_DROP);
1662 }
1663
1664 memcpy(&tsval, ts, sizeof(u_int32_t));
1665 memcpy(&tsecr, tsr, sizeof(u_int32_t));
1666
1667 /* modulate TS */
1668 if (tsval && src->scrub &&
1669 (src->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1670 /* tsval used further on */
1671 tsval = ntohl(tsval);
1672 pf_patch_32(pd,
1673 ts, htonl(tsval + src->scrub->pfss_ts_mod),
1674 PF_ALGNMNT(ts - opts));
1675 copyback = 1;
1676 }
1677
1678 /* modulate TS reply if any (!0) */
1679 if (tsecr && dst->scrub &&
1680 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1681 /* tsecr used further on */
1682 tsecr = ntohl(tsecr) - dst->scrub->pfss_ts_mod;
1683 pf_patch_32(pd, tsr, htonl(tsecr),
1684 PF_ALGNMNT(tsr - opts));
1685 copyback = 1;
1686 }
1687
1688 got_ts = 1;
1689 opt += opt[1];
1690 }
1691
1692 if (copyback) {
1693 /* Copyback the options, caller copys back header */
1694 *writeback = 1;
1695 m_copyback(pd->m, pd->off + sizeof(*th), olen, opts);
1696 }
1697 }
1698
1699 /*
1700 * Must invalidate PAWS checks on connections idle for too long.
1701 * The fastest allowed timestamp clock is 1ms. That turns out to
1702 * be about 24 days before it wraps. XXX Right now our lowerbound
1703 * TS echo check only works for the first 12 days of a connection
1704 * when the TS has exhausted half its 32bit space
1705 */
1706 #define TS_MAX_IDLE (24*24*60*60)
1707 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1708
1709 getmicrouptime(&uptime);
1710 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1711 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1712 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) {
1713 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1714 DPFPRINTF(("src idled out of PAWS\n"));
1715 pf_print_state(state);
1716 printf("\n");
1717 }
1718 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1719 | PFSS_PAWS_IDLED;
1720 }
1721 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1722 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1723 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1724 DPFPRINTF(("dst idled out of PAWS\n"));
1725 pf_print_state(state);
1726 printf("\n");
1727 }
1728 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1729 | PFSS_PAWS_IDLED;
1730 }
1731
1732 if (got_ts && src->scrub && dst->scrub &&
1733 (src->scrub->pfss_flags & PFSS_PAWS) &&
1734 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1735 /* Validate that the timestamps are "in-window".
1736 * RFC1323 describes TCP Timestamp options that allow
1737 * measurement of RTT (round trip time) and PAWS
1738 * (protection against wrapped sequence numbers). PAWS
1739 * gives us a set of rules for rejecting packets on
1740 * long fat pipes (packets that were somehow delayed
1741 * in transit longer than the time it took to send the
1742 * full TCP sequence space of 4Gb). We can use these
1743 * rules and infer a few others that will let us treat
1744 * the 32bit timestamp and the 32bit echoed timestamp
1745 * as sequence numbers to prevent a blind attacker from
1746 * inserting packets into a connection.
1747 *
1748 * RFC1323 tells us:
1749 * - The timestamp on this packet must be greater than
1750 * or equal to the last value echoed by the other
1751 * endpoint. The RFC says those will be discarded
1752 * since it is a dup that has already been acked.
1753 * This gives us a lowerbound on the timestamp.
1754 * timestamp >= other last echoed timestamp
1755 * - The timestamp will be less than or equal to
1756 * the last timestamp plus the time between the
1757 * last packet and now. The RFC defines the max
1758 * clock rate as 1ms. We will allow clocks to be
1759 * up to 10% fast and will allow a total difference
1760 * or 30 seconds due to a route change. And this
1761 * gives us an upperbound on the timestamp.
1762 * timestamp <= last timestamp + max ticks
1763 * We have to be careful here. Windows will send an
1764 * initial timestamp of zero and then initialize it
1765 * to a random value after the 3whs; presumably to
1766 * avoid a DoS by having to call an expensive RNG
1767 * during a SYN flood. Proof MS has at least one
1768 * good security geek.
1769 *
1770 * - The TCP timestamp option must also echo the other
1771 * endpoints timestamp. The timestamp echoed is the
1772 * one carried on the earliest unacknowledged segment
1773 * on the left edge of the sequence window. The RFC
1774 * states that the host will reject any echoed
1775 * timestamps that were larger than any ever sent.
1776 * This gives us an upperbound on the TS echo.
1777 * tescr <= largest_tsval
1778 * - The lowerbound on the TS echo is a little more
1779 * tricky to determine. The other endpoint's echoed
1780 * values will not decrease. But there may be
1781 * network conditions that re-order packets and
1782 * cause our view of them to decrease. For now the
1783 * only lowerbound we can safely determine is that
1784 * the TS echo will never be less than the original
1785 * TS. XXX There is probably a better lowerbound.
1786 * Remove TS_MAX_CONN with better lowerbound check.
1787 * tescr >= other original TS
1788 *
1789 * It is also important to note that the fastest
1790 * timestamp clock of 1ms will wrap its 32bit space in
1791 * 24 days. So we just disable TS checking after 24
1792 * days of idle time. We actually must use a 12d
1793 * connection limit until we can come up with a better
1794 * lowerbound to the TS echo check.
1795 */
1796 struct timeval delta_ts;
1797 int ts_fudge;
1798
1799 /*
1800 * PFTM_TS_DIFF is how many seconds of leeway to allow
1801 * a host's timestamp. This can happen if the previous
1802 * packet got delayed in transit for much longer than
1803 * this packet.
1804 */
1805 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0)
1806 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1807
1808 /* Calculate max ticks since the last timestamp */
1809 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1810 #define TS_MICROSECS 1000000 /* microseconds per second */
1811 delta_ts = uptime;
1812 timevalsub(&delta_ts, &src->scrub->pfss_last);
1813 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1814 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1815
1816 if ((src->state >= TCPS_ESTABLISHED &&
1817 dst->state >= TCPS_ESTABLISHED) &&
1818 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1819 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1820 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1821 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1822 /* Bad RFC1323 implementation or an insertion attack.
1823 *
1824 * - Solaris 2.6 and 2.7 are known to send another ACK
1825 * after the FIN,FIN|ACK,ACK closing that carries
1826 * an old timestamp.
1827 */
1828
1829 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1830 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1831 SEQ_GT(tsval, src->scrub->pfss_tsval +
1832 tsval_from_last) ? '1' : ' ',
1833 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1834 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1835 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1836 "idle: %jus %lums\n",
1837 tsval, tsecr, tsval_from_last,
1838 (uintmax_t)delta_ts.tv_sec,
1839 delta_ts.tv_usec / 1000));
1840 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1841 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1842 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1843 "\n", dst->scrub->pfss_tsval,
1844 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1845 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1846 pf_print_state(state);
1847 pf_print_flags(tcp_get_flags(th));
1848 printf("\n");
1849 }
1850 REASON_SET(reason, PFRES_TS);
1851 return (PF_DROP);
1852 }
1853
1854 /* XXX I'd really like to require tsecr but it's optional */
1855
1856 } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 &&
1857 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1858 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) &&
1859 src->scrub && dst->scrub &&
1860 (src->scrub->pfss_flags & PFSS_PAWS) &&
1861 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1862 /* Didn't send a timestamp. Timestamps aren't really useful
1863 * when:
1864 * - connection opening or closing (often not even sent).
1865 * but we must not let an attacker to put a FIN on a
1866 * data packet to sneak it through our ESTABLISHED check.
1867 * - on a TCP reset. RFC suggests not even looking at TS.
1868 * - on an empty ACK. The TS will not be echoed so it will
1869 * probably not help keep the RTT calculation in sync and
1870 * there isn't as much danger when the sequence numbers
1871 * got wrapped. So some stacks don't include TS on empty
1872 * ACKs :-(
1873 *
1874 * To minimize the disruption to mostly RFC1323 conformant
1875 * stacks, we will only require timestamps on data packets.
1876 *
1877 * And what do ya know, we cannot require timestamps on data
1878 * packets. There appear to be devices that do legitimate
1879 * TCP connection hijacking. There are HTTP devices that allow
1880 * a 3whs (with timestamps) and then buffer the HTTP request.
1881 * If the intermediate device has the HTTP response cache, it
1882 * will spoof the response but not bother timestamping its
1883 * packets. So we can look for the presence of a timestamp in
1884 * the first data packet and if there, require it in all future
1885 * packets.
1886 */
1887
1888 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1889 /*
1890 * Hey! Someone tried to sneak a packet in. Or the
1891 * stack changed its RFC1323 behavior?!?!
1892 */
1893 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1894 DPFPRINTF(("Did not receive expected RFC1323 "
1895 "timestamp\n"));
1896 pf_print_state(state);
1897 pf_print_flags(tcp_get_flags(th));
1898 printf("\n");
1899 }
1900 REASON_SET(reason, PFRES_TS);
1901 return (PF_DROP);
1902 }
1903 }
1904
1905 /*
1906 * We will note if a host sends his data packets with or without
1907 * timestamps. And require all data packets to contain a timestamp
1908 * if the first does. PAWS implicitly requires that all data packets be
1909 * timestamped. But I think there are middle-man devices that hijack
1910 * TCP streams immediately after the 3whs and don't timestamp their
1911 * packets (seen in a WWW accelerator or cache).
1912 */
1913 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1914 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1915 if (got_ts)
1916 src->scrub->pfss_flags |= PFSS_DATA_TS;
1917 else {
1918 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1919 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1920 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1921 /* Don't warn if other host rejected RFC1323 */
1922 DPFPRINTF(("Broken RFC1323 stack did not "
1923 "timestamp data packet. Disabled PAWS "
1924 "security.\n"));
1925 pf_print_state(state);
1926 pf_print_flags(tcp_get_flags(th));
1927 printf("\n");
1928 }
1929 }
1930 }
1931
1932 /*
1933 * Update PAWS values
1934 */
1935 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1936 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1937 getmicrouptime(&src->scrub->pfss_last);
1938 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1939 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1940 src->scrub->pfss_tsval = tsval;
1941
1942 if (tsecr) {
1943 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1944 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1945 src->scrub->pfss_tsecr = tsecr;
1946
1947 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1948 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1949 src->scrub->pfss_tsval0 == 0)) {
1950 /* tsval0 MUST be the lowest timestamp */
1951 src->scrub->pfss_tsval0 = tsval;
1952 }
1953
1954 /* Only fully initialized after a TS gets echoed */
1955 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1956 src->scrub->pfss_flags |= PFSS_PAWS;
1957 }
1958 }
1959
1960 /* I have a dream.... TCP segment reassembly.... */
1961 return (0);
1962 }
1963
1964 int
pf_normalize_mss(struct pf_pdesc * pd)1965 pf_normalize_mss(struct pf_pdesc *pd)
1966 {
1967 int olen, optsoff;
1968 uint8_t opts[MAX_TCPOPTLEN], *opt;
1969
1970 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
1971 optsoff = pd->off + sizeof(struct tcphdr);
1972 if (olen < TCPOLEN_MAXSEG ||
1973 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, NULL, pd->af))
1974 return (0);
1975
1976 opt = opts;
1977 while ((opt = pf_find_tcpopt(opt, opts, olen,
1978 TCPOPT_MAXSEG, TCPOLEN_MAXSEG)) != NULL) {
1979 uint16_t mss;
1980 uint8_t *mssp = opt + 2;
1981 memcpy(&mss, mssp, sizeof(mss));
1982 if (ntohs(mss) > pd->act.max_mss) {
1983 size_t mssoffopts = mssp - opts;
1984 pf_patch_16(pd, &mss,
1985 htons(pd->act.max_mss), PF_ALGNMNT(mssoffopts));
1986 m_copyback(pd->m, optsoff + mssoffopts,
1987 sizeof(mss), (caddr_t)&mss);
1988 m_copyback(pd->m, pd->off,
1989 sizeof(struct tcphdr), (caddr_t)&pd->hdr.tcp);
1990 }
1991
1992 opt += opt[1];
1993 }
1994
1995 return (0);
1996 }
1997
1998 int
pf_scan_sctp(struct pf_pdesc * pd)1999 pf_scan_sctp(struct pf_pdesc *pd)
2000 {
2001 struct sctp_chunkhdr ch = { };
2002 int chunk_off = sizeof(struct sctphdr);
2003 int chunk_start;
2004 int ret;
2005
2006 while (pd->off + chunk_off < pd->tot_len) {
2007 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
2008 NULL, pd->af))
2009 return (PF_DROP);
2010
2011 /* Length includes the header, this must be at least 4. */
2012 if (ntohs(ch.chunk_length) < 4)
2013 return (PF_DROP);
2014
2015 chunk_start = chunk_off;
2016 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2017
2018 switch (ch.chunk_type) {
2019 case SCTP_INITIATION:
2020 case SCTP_INITIATION_ACK: {
2021 struct sctp_init_chunk init;
2022
2023 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
2024 sizeof(init), NULL, NULL, pd->af))
2025 return (PF_DROP);
2026
2027 /*
2028 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2029 * any value except 0."
2030 */
2031 if (init.init.initiate_tag == 0)
2032 return (PF_DROP);
2033 if (init.init.num_inbound_streams == 0)
2034 return (PF_DROP);
2035 if (init.init.num_outbound_streams == 0)
2036 return (PF_DROP);
2037 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2038 return (PF_DROP);
2039
2040 /*
2041 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2042 * verification tag.
2043 */
2044 if (ch.chunk_type == SCTP_INITIATION &&
2045 pd->hdr.sctp.v_tag != 0)
2046 return (PF_DROP);
2047
2048 pd->sctp_initiate_tag = init.init.initiate_tag;
2049
2050 if (ch.chunk_type == SCTP_INITIATION)
2051 pd->sctp_flags |= PFDESC_SCTP_INIT;
2052 else
2053 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2054
2055 ret = pf_multihome_scan_init(pd->off + chunk_start,
2056 ntohs(init.ch.chunk_length), pd);
2057 if (ret != PF_PASS)
2058 return (ret);
2059
2060 break;
2061 }
2062 case SCTP_ABORT_ASSOCIATION:
2063 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2064 break;
2065 case SCTP_SHUTDOWN:
2066 case SCTP_SHUTDOWN_ACK:
2067 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2068 break;
2069 case SCTP_SHUTDOWN_COMPLETE:
2070 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2071 break;
2072 case SCTP_COOKIE_ECHO:
2073 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2074 break;
2075 case SCTP_COOKIE_ACK:
2076 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2077 break;
2078 case SCTP_DATA:
2079 pd->sctp_flags |= PFDESC_SCTP_DATA;
2080 break;
2081 case SCTP_HEARTBEAT_REQUEST:
2082 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2083 break;
2084 case SCTP_HEARTBEAT_ACK:
2085 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2086 break;
2087 case SCTP_ASCONF:
2088 pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2089
2090 ret = pf_multihome_scan_asconf(pd->off + chunk_start,
2091 ntohs(ch.chunk_length), pd);
2092 if (ret != PF_PASS)
2093 return (ret);
2094 break;
2095 default:
2096 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2097 break;
2098 }
2099 }
2100
2101 /* Validate chunk lengths vs. packet length. */
2102 if (pd->off + chunk_off != pd->tot_len)
2103 return (PF_DROP);
2104
2105 /*
2106 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2107 * one in a packet.
2108 */
2109 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2110 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2111 return (PF_DROP);
2112 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2113 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2114 return (PF_DROP);
2115 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2116 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2117 return (PF_DROP);
2118 if ((pd->sctp_flags & PFDESC_SCTP_ABORT) &&
2119 (pd->sctp_flags & PFDESC_SCTP_DATA)) {
2120 /*
2121 * RFC4960 3.3.7: DATA chunks MUST NOT be
2122 * bundled with ABORT.
2123 */
2124 return (PF_DROP);
2125 }
2126
2127 return (PF_PASS);
2128 }
2129
2130 int
pf_normalize_sctp(struct pf_pdesc * pd)2131 pf_normalize_sctp(struct pf_pdesc *pd)
2132 {
2133 struct pf_krule *r, *rm = NULL;
2134 struct sctphdr *sh = &pd->hdr.sctp;
2135 u_short reason;
2136 sa_family_t af = pd->af;
2137 int srs;
2138
2139 PF_RULES_RASSERT();
2140
2141 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2142 /* Check if there any scrub rules. Lack of scrub rules means enforced
2143 * packet normalization operation just like in OpenBSD. */
2144 srs = (r != NULL);
2145 while (r != NULL) {
2146 pf_counter_u64_add(&r->evaluations, 1);
2147 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
2148 r = r->skip[PF_SKIP_IFP];
2149 else if (r->direction && r->direction != pd->dir)
2150 r = r->skip[PF_SKIP_DIR];
2151 else if (r->af && r->af != af)
2152 r = r->skip[PF_SKIP_AF];
2153 else if (r->proto && r->proto != pd->proto)
2154 r = r->skip[PF_SKIP_PROTO];
2155 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2156 r->src.neg, pd->kif, M_GETFIB(pd->m)))
2157 r = r->skip[PF_SKIP_SRC_ADDR];
2158 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2159 r->src.port[0], r->src.port[1], sh->src_port))
2160 r = r->skip[PF_SKIP_SRC_PORT];
2161 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2162 r->dst.neg, NULL, M_GETFIB(pd->m)))
2163 r = r->skip[PF_SKIP_DST_ADDR];
2164 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2165 r->dst.port[0], r->dst.port[1], sh->dest_port))
2166 r = r->skip[PF_SKIP_DST_PORT];
2167 else {
2168 rm = r;
2169 break;
2170 }
2171 }
2172
2173 if (srs) {
2174 /* With scrub rules present SCTP normalization happens only
2175 * if one of rules has matched and it's not a "no scrub" rule */
2176 if (rm == NULL || rm->action == PF_NOSCRUB)
2177 return (PF_PASS);
2178
2179 pf_counter_u64_critical_enter();
2180 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
2181 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
2182 pf_counter_u64_critical_exit();
2183 }
2184
2185 /* Verify we're a multiple of 4 bytes long */
2186 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4)
2187 goto sctp_drop;
2188
2189 /* INIT chunk needs to be the only chunk */
2190 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2191 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2192 goto sctp_drop;
2193
2194 return (PF_PASS);
2195
2196 sctp_drop:
2197 REASON_SET(&reason, PFRES_NORM);
2198 if (rm != NULL && r->log)
2199 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd,
2200 1, NULL);
2201
2202 return (PF_DROP);
2203 }
2204
2205 #if defined(INET) || defined(INET6)
2206 void
pf_scrub(struct pf_pdesc * pd)2207 pf_scrub(struct pf_pdesc *pd)
2208 {
2209
2210 struct ip *h = mtod(pd->m, struct ip *);
2211 #ifdef INET6
2212 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *);
2213 #endif /* INET6 */
2214
2215 /* Clear IP_DF if no-df was requested */
2216 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF &&
2217 h->ip_off & htons(IP_DF))
2218 {
2219 u_int16_t ip_off = h->ip_off;
2220
2221 h->ip_off &= htons(~IP_DF);
2222 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2223 }
2224
2225 /* Enforce a minimum ttl, may cause endless packet loops */
2226 if (pd->af == AF_INET && pd->act.min_ttl &&
2227 h->ip_ttl < pd->act.min_ttl) {
2228 u_int16_t ip_ttl = h->ip_ttl;
2229
2230 h->ip_ttl = pd->act.min_ttl;
2231 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2232 }
2233 #ifdef INET6
2234 /* Enforce a minimum ttl, may cause endless packet loops */
2235 if (pd->af == AF_INET6 && pd->act.min_ttl &&
2236 h6->ip6_hlim < pd->act.min_ttl)
2237 h6->ip6_hlim = pd->act.min_ttl;
2238 #endif /* INET6 */
2239 /* Enforce tos */
2240 if (pd->act.flags & PFSTATE_SETTOS) {
2241 switch (pd->af) {
2242 case AF_INET: {
2243 u_int16_t ov, nv;
2244
2245 ov = *(u_int16_t *)h;
2246 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2247 nv = *(u_int16_t *)h;
2248
2249 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2250 break;
2251 }
2252 #ifdef INET6
2253 case AF_INET6:
2254 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2255 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20);
2256 break;
2257 #endif /* INET6 */
2258 }
2259 }
2260
2261 /* random-id, but not for fragments */
2262 #ifdef INET
2263 if (pd->af == AF_INET &&
2264 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2265 uint16_t ip_id = h->ip_id;
2266
2267 ip_fillid(h, V_ip_random_id);
2268 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2269 }
2270 #endif /* INET */
2271 }
2272 #endif /* INET || INET6 */
2273