1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/vnet.h>
47 #include <net/pfvar.h>
48 #include <net/if_pflog.h>
49
50 #include <netinet/in.h>
51 #include <netinet/ip.h>
52 #include <netinet/ip_var.h>
53 #include <netinet6/in6_var.h>
54 #include <netinet6/nd6.h>
55 #include <netinet6/ip6_var.h>
56 #include <netinet6/scope6_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcp_fsm.h>
59 #include <netinet/tcp_seq.h>
60 #include <netinet/sctp_constants.h>
61 #include <netinet/sctp_header.h>
62
63 #ifdef INET6
64 #include <netinet/ip6.h>
65 #endif /* INET6 */
66
67 struct pf_frent {
68 TAILQ_ENTRY(pf_frent) fr_next;
69 struct mbuf *fe_m;
70 uint16_t fe_hdrlen; /* ipv4 header length with ip options
71 ipv6, extension, fragment header */
72 uint16_t fe_extoff; /* last extension header offset or 0 */
73 uint16_t fe_len; /* fragment length */
74 uint16_t fe_off; /* fragment offset */
75 uint16_t fe_mff; /* more fragment flag */
76 };
77
78 struct pf_fragment_cmp {
79 struct pf_addr frc_src;
80 struct pf_addr frc_dst;
81 uint32_t frc_id;
82 sa_family_t frc_af;
83 uint8_t frc_proto;
84 };
85
86 struct pf_fragment {
87 struct pf_fragment_cmp fr_key;
88 #define fr_src fr_key.frc_src
89 #define fr_dst fr_key.frc_dst
90 #define fr_id fr_key.frc_id
91 #define fr_af fr_key.frc_af
92 #define fr_proto fr_key.frc_proto
93
94 /* pointers to queue element */
95 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
96 /* count entries between pointers */
97 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
98 RB_ENTRY(pf_fragment) fr_entry;
99 TAILQ_ENTRY(pf_fragment) frag_next;
100 uint32_t fr_timeout;
101 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
102 uint16_t fr_maxlen; /* maximum length of single fragment */
103 u_int16_t fr_holes; /* number of holes in the queue */
104 };
105
106 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
107 #define V_pf_frag_mtx VNET(pf_frag_mtx)
108 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
109 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
110 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
111
112 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
113
114 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
115 #define V_pf_frent_z VNET(pf_frent_z)
116 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
117 #define V_pf_frag_z VNET(pf_frag_z)
118
119 TAILQ_HEAD(pf_fragqueue, pf_fragment);
120 TAILQ_HEAD(pf_cachequeue, pf_fragment);
121 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
122 #define V_pf_fragqueue VNET(pf_fragqueue)
123 RB_HEAD(pf_frag_tree, pf_fragment);
124 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
125 #define V_pf_frag_tree VNET(pf_frag_tree)
126 static int pf_frag_compare(struct pf_fragment *,
127 struct pf_fragment *);
128 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
129 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
130
131 static void pf_flush_fragments(void);
132 static void pf_free_fragment(struct pf_fragment *);
133
134 static struct pf_frent *pf_create_fragment(u_short *);
135 static int pf_frent_holes(struct pf_frent *frent);
136 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
137 struct pf_frag_tree *tree);
138 static inline int pf_frent_index(struct pf_frent *);
139 static int pf_frent_insert(struct pf_fragment *,
140 struct pf_frent *, struct pf_frent *);
141 void pf_frent_remove(struct pf_fragment *,
142 struct pf_frent *);
143 struct pf_frent *pf_frent_previous(struct pf_fragment *,
144 struct pf_frent *);
145 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
146 struct pf_frent *, u_short *);
147 static struct mbuf *pf_join_fragment(struct pf_fragment *);
148 #ifdef INET
149 static int pf_reassemble(struct mbuf **, int, u_short *);
150 #endif /* INET */
151 #ifdef INET6
152 static int pf_reassemble6(struct mbuf **,
153 struct ip6_frag *, uint16_t, uint16_t, u_short *);
154 #endif /* INET6 */
155
156 #define DPFPRINTF(x) do { \
157 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
158 printf("%s: ", __func__); \
159 printf x ; \
160 } \
161 } while(0)
162
163 #ifdef INET
164 static void
pf_ip2key(struct ip * ip,int dir,struct pf_fragment_cmp * key)165 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
166 {
167
168 key->frc_src.v4 = ip->ip_src;
169 key->frc_dst.v4 = ip->ip_dst;
170 key->frc_af = AF_INET;
171 key->frc_proto = ip->ip_p;
172 key->frc_id = ip->ip_id;
173 }
174 #endif /* INET */
175
176 void
pf_normalize_init(void)177 pf_normalize_init(void)
178 {
179
180 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
181 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
182 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
183 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
184 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
185 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
186 UMA_ALIGN_PTR, 0);
187
188 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
189
190 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
191 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
192 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
193 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
194
195 TAILQ_INIT(&V_pf_fragqueue);
196 }
197
198 void
pf_normalize_cleanup(void)199 pf_normalize_cleanup(void)
200 {
201
202 uma_zdestroy(V_pf_state_scrub_z);
203 uma_zdestroy(V_pf_frent_z);
204 uma_zdestroy(V_pf_frag_z);
205
206 mtx_destroy(&V_pf_frag_mtx);
207 }
208
209 static int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)210 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
211 {
212 int diff;
213
214 if ((diff = a->fr_id - b->fr_id) != 0)
215 return (diff);
216 if ((diff = a->fr_proto - b->fr_proto) != 0)
217 return (diff);
218 if ((diff = a->fr_af - b->fr_af) != 0)
219 return (diff);
220 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
221 return (diff);
222 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
223 return (diff);
224 return (0);
225 }
226
227 void
pf_purge_expired_fragments(void)228 pf_purge_expired_fragments(void)
229 {
230 u_int32_t expire = time_uptime -
231 V_pf_default_rule.timeout[PFTM_FRAG];
232
233 pf_purge_fragments(expire);
234 }
235
236 void
pf_purge_fragments(uint32_t expire)237 pf_purge_fragments(uint32_t expire)
238 {
239 struct pf_fragment *frag;
240
241 PF_FRAG_LOCK();
242 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
243 if (frag->fr_timeout > expire)
244 break;
245
246 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
247 pf_free_fragment(frag);
248 }
249
250 PF_FRAG_UNLOCK();
251 }
252
253 /*
254 * Try to flush old fragments to make space for new ones
255 */
256 static void
pf_flush_fragments(void)257 pf_flush_fragments(void)
258 {
259 struct pf_fragment *frag;
260 int goal;
261
262 PF_FRAG_ASSERT();
263
264 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
265 DPFPRINTF(("trying to free %d frag entriess\n", goal));
266 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
267 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
268 if (frag)
269 pf_free_fragment(frag);
270 else
271 break;
272 }
273 }
274
275 /*
276 * Remove a fragment from the fragment queue, free its fragment entries,
277 * and free the fragment itself.
278 */
279 static void
pf_free_fragment(struct pf_fragment * frag)280 pf_free_fragment(struct pf_fragment *frag)
281 {
282 struct pf_frent *frent;
283
284 PF_FRAG_ASSERT();
285
286 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
287 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
288
289 /* Free all fragment entries */
290 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
291 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
292
293 m_freem(frent->fe_m);
294 uma_zfree(V_pf_frent_z, frent);
295 }
296
297 uma_zfree(V_pf_frag_z, frag);
298 }
299
300 static struct pf_fragment *
pf_find_fragment(struct pf_fragment_cmp * key,struct pf_frag_tree * tree)301 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
302 {
303 struct pf_fragment *frag;
304
305 PF_FRAG_ASSERT();
306
307 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
308 if (frag != NULL) {
309 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
310 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
311 }
312
313 return (frag);
314 }
315
316 static struct pf_frent *
pf_create_fragment(u_short * reason)317 pf_create_fragment(u_short *reason)
318 {
319 struct pf_frent *frent;
320
321 PF_FRAG_ASSERT();
322
323 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
324 if (frent == NULL) {
325 pf_flush_fragments();
326 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
327 if (frent == NULL) {
328 REASON_SET(reason, PFRES_MEMORY);
329 return (NULL);
330 }
331 }
332
333 return (frent);
334 }
335
336 /*
337 * Calculate the additional holes that were created in the fragment
338 * queue by inserting this fragment. A fragment in the middle
339 * creates one more hole by splitting. For each connected side,
340 * it loses one hole.
341 * Fragment entry must be in the queue when calling this function.
342 */
343 static int
pf_frent_holes(struct pf_frent * frent)344 pf_frent_holes(struct pf_frent *frent)
345 {
346 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
347 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
348 int holes = 1;
349
350 if (prev == NULL) {
351 if (frent->fe_off == 0)
352 holes--;
353 } else {
354 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
355 if (frent->fe_off == prev->fe_off + prev->fe_len)
356 holes--;
357 }
358 if (next == NULL) {
359 if (!frent->fe_mff)
360 holes--;
361 } else {
362 KASSERT(frent->fe_mff, ("frent->fe_mff"));
363 if (next->fe_off == frent->fe_off + frent->fe_len)
364 holes--;
365 }
366 return holes;
367 }
368
369 static inline int
pf_frent_index(struct pf_frent * frent)370 pf_frent_index(struct pf_frent *frent)
371 {
372 /*
373 * We have an array of 16 entry points to the queue. A full size
374 * 65535 octet IP packet can have 8192 fragments. So the queue
375 * traversal length is at most 512 and at most 16 entry points are
376 * checked. We need 128 additional bytes on a 64 bit architecture.
377 */
378 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
379 16 - 1);
380 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
381
382 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
383 }
384
385 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)386 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
387 struct pf_frent *prev)
388 {
389 int index;
390
391 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
392
393 /*
394 * A packet has at most 65536 octets. With 16 entry points, each one
395 * spawns 4096 octets. We limit these to 64 fragments each, which
396 * means on average every fragment must have at least 64 octets.
397 */
398 index = pf_frent_index(frent);
399 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
400 return ENOBUFS;
401 frag->fr_entries[index]++;
402
403 if (prev == NULL) {
404 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
405 } else {
406 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
407 ("overlapping fragment"));
408 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
409 }
410
411 if (frag->fr_firstoff[index] == NULL) {
412 KASSERT(prev == NULL || pf_frent_index(prev) < index,
413 ("prev == NULL || pf_frent_index(pref) < index"));
414 frag->fr_firstoff[index] = frent;
415 } else {
416 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
417 KASSERT(prev == NULL || pf_frent_index(prev) < index,
418 ("prev == NULL || pf_frent_index(pref) < index"));
419 frag->fr_firstoff[index] = frent;
420 } else {
421 KASSERT(prev != NULL, ("prev != NULL"));
422 KASSERT(pf_frent_index(prev) == index,
423 ("pf_frent_index(prev) == index"));
424 }
425 }
426
427 frag->fr_holes += pf_frent_holes(frent);
428
429 return 0;
430 }
431
432 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)433 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
434 {
435 #ifdef INVARIANTS
436 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
437 #endif
438 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
439 int index;
440
441 frag->fr_holes -= pf_frent_holes(frent);
442
443 index = pf_frent_index(frent);
444 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
445 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
446 if (next == NULL) {
447 frag->fr_firstoff[index] = NULL;
448 } else {
449 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
450 ("overlapping fragment"));
451 if (pf_frent_index(next) == index) {
452 frag->fr_firstoff[index] = next;
453 } else {
454 frag->fr_firstoff[index] = NULL;
455 }
456 }
457 } else {
458 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
459 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
460 KASSERT(prev != NULL, ("prev != NULL"));
461 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
462 ("overlapping fragment"));
463 KASSERT(pf_frent_index(prev) == index,
464 ("pf_frent_index(prev) == index"));
465 }
466
467 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
468
469 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
470 frag->fr_entries[index]--;
471 }
472
473 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)474 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
475 {
476 struct pf_frent *prev, *next;
477 int index;
478
479 /*
480 * If there are no fragments after frag, take the final one. Assume
481 * that the global queue is not empty.
482 */
483 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
484 KASSERT(prev != NULL, ("prev != NULL"));
485 if (prev->fe_off <= frent->fe_off)
486 return prev;
487 /*
488 * We want to find a fragment entry that is before frag, but still
489 * close to it. Find the first fragment entry that is in the same
490 * entry point or in the first entry point after that. As we have
491 * already checked that there are entries behind frag, this will
492 * succeed.
493 */
494 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
495 index++) {
496 prev = frag->fr_firstoff[index];
497 if (prev != NULL)
498 break;
499 }
500 KASSERT(prev != NULL, ("prev != NULL"));
501 /*
502 * In prev we may have a fragment from the same entry point that is
503 * before frent, or one that is just one position behind frent.
504 * In the latter case, we go back one step and have the predecessor.
505 * There may be none if the new fragment will be the first one.
506 */
507 if (prev->fe_off > frent->fe_off) {
508 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
509 if (prev == NULL)
510 return NULL;
511 KASSERT(prev->fe_off <= frent->fe_off,
512 ("prev->fe_off <= frent->fe_off"));
513 return prev;
514 }
515 /*
516 * In prev is the first fragment of the entry point. The offset
517 * of frag is behind it. Find the closest previous fragment.
518 */
519 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
520 next = TAILQ_NEXT(next, fr_next)) {
521 if (next->fe_off > frent->fe_off)
522 break;
523 prev = next;
524 }
525 return prev;
526 }
527
528 static struct pf_fragment *
pf_fillup_fragment(struct pf_fragment_cmp * key,struct pf_frent * frent,u_short * reason)529 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
530 u_short *reason)
531 {
532 struct pf_frent *after, *next, *prev;
533 struct pf_fragment *frag;
534 uint16_t total;
535
536 PF_FRAG_ASSERT();
537
538 /* No empty fragments. */
539 if (frent->fe_len == 0) {
540 DPFPRINTF(("bad fragment: len 0\n"));
541 goto bad_fragment;
542 }
543
544 /* All fragments are 8 byte aligned. */
545 if (frent->fe_mff && (frent->fe_len & 0x7)) {
546 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
547 goto bad_fragment;
548 }
549
550 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
551 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
552 DPFPRINTF(("bad fragment: max packet %d\n",
553 frent->fe_off + frent->fe_len));
554 goto bad_fragment;
555 }
556
557 DPFPRINTF((key->frc_af == AF_INET ?
558 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
559 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
560
561 /* Fully buffer all of the fragments in this fragment queue. */
562 frag = pf_find_fragment(key, &V_pf_frag_tree);
563
564 /* Create a new reassembly queue for this packet. */
565 if (frag == NULL) {
566 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
567 if (frag == NULL) {
568 pf_flush_fragments();
569 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
570 if (frag == NULL) {
571 REASON_SET(reason, PFRES_MEMORY);
572 goto drop_fragment;
573 }
574 }
575
576 *(struct pf_fragment_cmp *)frag = *key;
577 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
578 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
579 frag->fr_timeout = time_uptime;
580 TAILQ_INIT(&frag->fr_queue);
581 frag->fr_maxlen = frent->fe_len;
582 frag->fr_holes = 1;
583
584 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
585 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
586
587 /* We do not have a previous fragment, cannot fail. */
588 pf_frent_insert(frag, frent, NULL);
589
590 return (frag);
591 }
592
593 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
594
595 /* Remember maximum fragment len for refragmentation. */
596 if (frent->fe_len > frag->fr_maxlen)
597 frag->fr_maxlen = frent->fe_len;
598
599 /* Maximum data we have seen already. */
600 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
601 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
602
603 /* Non terminal fragments must have more fragments flag. */
604 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
605 goto bad_fragment;
606
607 /* Check if we saw the last fragment already. */
608 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
609 if (frent->fe_off + frent->fe_len > total ||
610 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
611 goto bad_fragment;
612 } else {
613 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
614 goto bad_fragment;
615 }
616
617 /* Find neighbors for newly inserted fragment */
618 prev = pf_frent_previous(frag, frent);
619 if (prev == NULL) {
620 after = TAILQ_FIRST(&frag->fr_queue);
621 KASSERT(after != NULL, ("after != NULL"));
622 } else {
623 after = TAILQ_NEXT(prev, fr_next);
624 }
625
626 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
627 uint16_t precut;
628
629 if (frag->fr_af == AF_INET6)
630 goto free_fragment;
631
632 precut = prev->fe_off + prev->fe_len - frent->fe_off;
633 if (precut >= frent->fe_len) {
634 DPFPRINTF(("new frag overlapped\n"));
635 goto drop_fragment;
636 }
637 DPFPRINTF(("frag head overlap %d\n", precut));
638 m_adj(frent->fe_m, precut);
639 frent->fe_off += precut;
640 frent->fe_len -= precut;
641 }
642
643 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
644 after = next) {
645 uint16_t aftercut;
646
647 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
648 if (aftercut < after->fe_len) {
649 DPFPRINTF(("frag tail overlap %d", aftercut));
650 m_adj(after->fe_m, aftercut);
651 /* Fragment may switch queue as fe_off changes */
652 pf_frent_remove(frag, after);
653 after->fe_off += aftercut;
654 after->fe_len -= aftercut;
655 /* Insert into correct queue */
656 if (pf_frent_insert(frag, after, prev)) {
657 DPFPRINTF(("fragment requeue limit exceeded"));
658 m_freem(after->fe_m);
659 uma_zfree(V_pf_frent_z, after);
660 /* There is not way to recover */
661 goto free_fragment;
662 }
663 break;
664 }
665
666 /* This fragment is completely overlapped, lose it. */
667 DPFPRINTF(("old frag overlapped\n"));
668 next = TAILQ_NEXT(after, fr_next);
669 pf_frent_remove(frag, after);
670 m_freem(after->fe_m);
671 uma_zfree(V_pf_frent_z, after);
672 }
673
674 /* If part of the queue gets too long, there is not way to recover. */
675 if (pf_frent_insert(frag, frent, prev)) {
676 DPFPRINTF(("fragment queue limit exceeded\n"));
677 goto bad_fragment;
678 }
679
680 return (frag);
681
682 free_fragment:
683 /*
684 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one
685 * or more its constituent fragments is determined to be an overlapping
686 * fragment, the entire datagram (and any constituent fragments) MUST
687 * be silently discarded.
688 */
689 DPFPRINTF(("flush overlapping fragments\n"));
690 pf_free_fragment(frag);
691
692 bad_fragment:
693 REASON_SET(reason, PFRES_FRAG);
694 drop_fragment:
695 uma_zfree(V_pf_frent_z, frent);
696 return (NULL);
697 }
698
699 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)700 pf_join_fragment(struct pf_fragment *frag)
701 {
702 struct mbuf *m, *m2;
703 struct pf_frent *frent;
704
705 frent = TAILQ_FIRST(&frag->fr_queue);
706 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
707
708 m = frent->fe_m;
709 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
710 uma_zfree(V_pf_frent_z, frent);
711 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
712 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
713
714 m2 = frent->fe_m;
715 /* Strip off ip header. */
716 m_adj(m2, frent->fe_hdrlen);
717 /* Strip off any trailing bytes. */
718 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
719
720 uma_zfree(V_pf_frent_z, frent);
721 m_cat(m, m2);
722 }
723
724 /* Remove from fragment queue. */
725 pf_free_fragment(frag);
726
727 return (m);
728 }
729
730 #ifdef INET
731 static int
pf_reassemble(struct mbuf ** m0,int dir,u_short * reason)732 pf_reassemble(struct mbuf **m0, int dir, u_short *reason)
733 {
734 struct mbuf *m = *m0;
735 struct ip *ip = mtod(m, struct ip *);
736 struct pf_frent *frent;
737 struct pf_fragment *frag;
738 struct m_tag *mtag;
739 struct pf_fragment_tag *ftag;
740 struct pf_fragment_cmp key;
741 uint16_t total, hdrlen;
742 uint32_t frag_id;
743 uint16_t maxlen;
744
745 /* Get an entry for the fragment queue */
746 if ((frent = pf_create_fragment(reason)) == NULL)
747 return (PF_DROP);
748
749 frent->fe_m = m;
750 frent->fe_hdrlen = ip->ip_hl << 2;
751 frent->fe_extoff = 0;
752 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
753 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
754 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
755
756 pf_ip2key(ip, dir, &key);
757
758 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
759 return (PF_DROP);
760
761 /* The mbuf is part of the fragment entry, no direct free or access */
762 m = *m0 = NULL;
763
764 if (frag->fr_holes) {
765 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
766 return (PF_PASS); /* drop because *m0 is NULL, no error */
767 }
768
769 /* We have all the data */
770 frent = TAILQ_FIRST(&frag->fr_queue);
771 KASSERT(frent != NULL, ("frent != NULL"));
772 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
773 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
774 hdrlen = frent->fe_hdrlen;
775
776 maxlen = frag->fr_maxlen;
777 frag_id = frag->fr_id;
778 m = *m0 = pf_join_fragment(frag);
779 frag = NULL;
780
781 if (m->m_flags & M_PKTHDR) {
782 int plen = 0;
783 for (m = *m0; m; m = m->m_next)
784 plen += m->m_len;
785 m = *m0;
786 m->m_pkthdr.len = plen;
787 }
788
789 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
790 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) {
791 REASON_SET(reason, PFRES_SHORT);
792 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
793 return (PF_DROP);
794 }
795 ftag = (struct pf_fragment_tag *)(mtag + 1);
796 ftag->ft_hdrlen = hdrlen;
797 ftag->ft_extoff = 0;
798 ftag->ft_maxlen = maxlen;
799 ftag->ft_id = frag_id;
800 m_tag_prepend(m, mtag);
801
802 ip = mtod(m, struct ip *);
803 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
804 htons(hdrlen + total), 0);
805 ip->ip_len = htons(hdrlen + total);
806 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
807 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
808 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
809
810 if (hdrlen + total > IP_MAXPACKET) {
811 DPFPRINTF(("drop: too big: %d\n", total));
812 ip->ip_len = 0;
813 REASON_SET(reason, PFRES_SHORT);
814 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
815 return (PF_DROP);
816 }
817
818 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
819 return (PF_PASS);
820 }
821 #endif /* INET */
822
823 #ifdef INET6
824 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)825 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
826 uint16_t hdrlen, uint16_t extoff, u_short *reason)
827 {
828 struct mbuf *m = *m0;
829 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
830 struct pf_frent *frent;
831 struct pf_fragment *frag;
832 struct pf_fragment_cmp key;
833 struct m_tag *mtag;
834 struct pf_fragment_tag *ftag;
835 int off;
836 uint32_t frag_id;
837 uint16_t total, maxlen;
838 uint8_t proto;
839
840 PF_FRAG_LOCK();
841
842 /* Get an entry for the fragment queue. */
843 if ((frent = pf_create_fragment(reason)) == NULL) {
844 PF_FRAG_UNLOCK();
845 return (PF_DROP);
846 }
847
848 frent->fe_m = m;
849 frent->fe_hdrlen = hdrlen;
850 frent->fe_extoff = extoff;
851 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
852 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
853 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
854
855 key.frc_src.v6 = ip6->ip6_src;
856 key.frc_dst.v6 = ip6->ip6_dst;
857 key.frc_af = AF_INET6;
858 /* Only the first fragment's protocol is relevant. */
859 key.frc_proto = 0;
860 key.frc_id = fraghdr->ip6f_ident;
861
862 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
863 PF_FRAG_UNLOCK();
864 return (PF_DROP);
865 }
866
867 /* The mbuf is part of the fragment entry, no direct free or access. */
868 m = *m0 = NULL;
869
870 if (frag->fr_holes) {
871 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
872 frag->fr_holes));
873 PF_FRAG_UNLOCK();
874 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
875 }
876
877 /* We have all the data. */
878 frent = TAILQ_FIRST(&frag->fr_queue);
879 KASSERT(frent != NULL, ("frent != NULL"));
880 extoff = frent->fe_extoff;
881 maxlen = frag->fr_maxlen;
882 frag_id = frag->fr_id;
883 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
884 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
885 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
886
887 m = *m0 = pf_join_fragment(frag);
888 frag = NULL;
889
890 PF_FRAG_UNLOCK();
891
892 /* Take protocol from first fragment header. */
893 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
894 KASSERT(m, ("%s: short mbuf chain", __func__));
895 proto = *(mtod(m, uint8_t *) + off);
896 m = *m0;
897
898 /* Delete frag6 header */
899 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
900 goto fail;
901
902 if (m->m_flags & M_PKTHDR) {
903 int plen = 0;
904 for (m = *m0; m; m = m->m_next)
905 plen += m->m_len;
906 m = *m0;
907 m->m_pkthdr.len = plen;
908 }
909
910 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
911 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
912 goto fail;
913 ftag = (struct pf_fragment_tag *)(mtag + 1);
914 ftag->ft_hdrlen = hdrlen;
915 ftag->ft_extoff = extoff;
916 ftag->ft_maxlen = maxlen;
917 ftag->ft_id = frag_id;
918 m_tag_prepend(m, mtag);
919
920 ip6 = mtod(m, struct ip6_hdr *);
921 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
922 if (extoff) {
923 /* Write protocol into next field of last extension header. */
924 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
925 &off);
926 KASSERT(m, ("%s: short mbuf chain", __func__));
927 *(mtod(m, char *) + off) = proto;
928 m = *m0;
929 } else
930 ip6->ip6_nxt = proto;
931
932 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
933 DPFPRINTF(("drop: too big: %d\n", total));
934 ip6->ip6_plen = 0;
935 REASON_SET(reason, PFRES_SHORT);
936 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
937 return (PF_DROP);
938 }
939
940 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
941 return (PF_PASS);
942
943 fail:
944 REASON_SET(reason, PFRES_MEMORY);
945 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
946 return (PF_DROP);
947 }
948 #endif /* INET6 */
949
950 #ifdef INET6
951 int
pf_max_frag_size(struct mbuf * m)952 pf_max_frag_size(struct mbuf *m)
953 {
954 struct m_tag *tag;
955 struct pf_fragment_tag *ftag;
956
957 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
958 if (tag == NULL)
959 return (m->m_pkthdr.len);
960
961 ftag = (struct pf_fragment_tag *)(tag + 1);
962
963 return (ftag->ft_maxlen);
964 }
965
966 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,struct ifnet * rt,bool forward)967 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
968 struct ifnet *rt, bool forward)
969 {
970 struct mbuf *m = *m0, *t;
971 struct ip6_hdr *hdr;
972 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
973 struct pf_pdesc pd;
974 uint32_t frag_id;
975 uint16_t hdrlen, extoff, maxlen;
976 uint8_t proto;
977 int error, action;
978
979 hdrlen = ftag->ft_hdrlen;
980 extoff = ftag->ft_extoff;
981 maxlen = ftag->ft_maxlen;
982 frag_id = ftag->ft_id;
983 m_tag_delete(m, mtag);
984 mtag = NULL;
985 ftag = NULL;
986
987 if (extoff) {
988 int off;
989
990 /* Use protocol from next field of last extension header */
991 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
992 &off);
993 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
994 proto = *(mtod(m, uint8_t *) + off);
995 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
996 m = *m0;
997 } else {
998 hdr = mtod(m, struct ip6_hdr *);
999 proto = hdr->ip6_nxt;
1000 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1001 }
1002
1003 /* In case of link-local traffic we'll need a scope set. */
1004 hdr = mtod(m, struct ip6_hdr *);
1005
1006 in6_setscope(&hdr->ip6_src, ifp, NULL);
1007 in6_setscope(&hdr->ip6_dst, ifp, NULL);
1008
1009 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1010 * fragmentation wrong. */
1011 maxlen = maxlen & ~7;
1012
1013 /*
1014 * Maxlen may be less than 8 if there was only a single
1015 * fragment. As it was fragmented before, add a fragment
1016 * header also for a single fragment. If total or maxlen
1017 * is less than 8, ip6_fragment() will return EMSGSIZE and
1018 * we drop the packet.
1019 */
1020 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1021 m = (*m0)->m_nextpkt;
1022 (*m0)->m_nextpkt = NULL;
1023 if (error == 0) {
1024 /* The first mbuf contains the unfragmented packet. */
1025 m_freem(*m0);
1026 *m0 = NULL;
1027 action = PF_PASS;
1028 } else {
1029 /* Drop expects an mbuf to free. */
1030 DPFPRINTF(("refragment error %d\n", error));
1031 action = PF_DROP;
1032 }
1033 for (; m; m = t) {
1034 t = m->m_nextpkt;
1035 m->m_nextpkt = NULL;
1036 m->m_flags |= M_SKIP_FIREWALL;
1037 memset(&pd, 0, sizeof(pd));
1038 pd.pf_mtag = pf_find_mtag(m);
1039 if (error != 0) {
1040 m_freem(m);
1041 continue;
1042 }
1043 if (rt != NULL) {
1044 struct sockaddr_in6 dst;
1045 hdr = mtod(m, struct ip6_hdr *);
1046
1047 bzero(&dst, sizeof(dst));
1048 dst.sin6_family = AF_INET6;
1049 dst.sin6_len = sizeof(dst);
1050 dst.sin6_addr = hdr->ip6_dst;
1051
1052 nd6_output_ifp(rt, rt, m, &dst, NULL);
1053 } else if (forward) {
1054 MPASS(m->m_pkthdr.rcvif != NULL);
1055 ip6_forward(m, 0);
1056 } else {
1057 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1058 NULL);
1059 }
1060 }
1061
1062 return (action);
1063 }
1064 #endif /* INET6 */
1065
1066 #ifdef INET
1067 int
pf_normalize_ip(u_short * reason,struct pf_pdesc * pd)1068 pf_normalize_ip(u_short *reason, struct pf_pdesc *pd)
1069 {
1070 struct pf_krule *r;
1071 struct ip *h = mtod(pd->m, struct ip *);
1072 int mff = (ntohs(h->ip_off) & IP_MF);
1073 int hlen = h->ip_hl << 2;
1074 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1075 u_int16_t max;
1076 int ip_len;
1077 int tag = -1;
1078 int verdict;
1079 bool scrub_compat;
1080
1081 PF_RULES_RASSERT();
1082
1083 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1084 /*
1085 * Check if there are any scrub rules, matching or not.
1086 * Lack of scrub rules means:
1087 * - enforced packet normalization operation just like in OpenBSD
1088 * - fragment reassembly depends on V_pf_status.reass
1089 * With scrub rules:
1090 * - packet normalization is performed if there is a matching scrub rule
1091 * - fragment reassembly is performed if the matching rule has no
1092 * PFRULE_FRAGMENT_NOREASS flag
1093 */
1094 scrub_compat = (r != NULL);
1095 while (r != NULL) {
1096 pf_counter_u64_add(&r->evaluations, 1);
1097 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1098 r = r->skip[PF_SKIP_IFP];
1099 else if (r->direction && r->direction != pd->dir)
1100 r = r->skip[PF_SKIP_DIR];
1101 else if (r->af && r->af != AF_INET)
1102 r = r->skip[PF_SKIP_AF];
1103 else if (r->proto && r->proto != h->ip_p)
1104 r = r->skip[PF_SKIP_PROTO];
1105 else if (PF_MISMATCHAW(&r->src.addr,
1106 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1107 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1108 r = r->skip[PF_SKIP_SRC_ADDR];
1109 else if (PF_MISMATCHAW(&r->dst.addr,
1110 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1111 r->dst.neg, NULL, M_GETFIB(pd->m)))
1112 r = r->skip[PF_SKIP_DST_ADDR];
1113 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag,
1114 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1115 r = TAILQ_NEXT(r, entries);
1116 else
1117 break;
1118 }
1119
1120 if (scrub_compat) {
1121 /* With scrub rules present IPv4 normalization happens only
1122 * if one of rules has matched and it's not a "no scrub" rule */
1123 if (r == NULL || r->action == PF_NOSCRUB)
1124 return (PF_PASS);
1125
1126 pf_counter_u64_critical_enter();
1127 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1128 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1129 pf_counter_u64_critical_exit();
1130 pf_rule_to_actions(r, &pd->act);
1131 }
1132
1133 /* Check for illegal packets */
1134 if (hlen < (int)sizeof(struct ip)) {
1135 REASON_SET(reason, PFRES_NORM);
1136 goto drop;
1137 }
1138
1139 if (hlen > ntohs(h->ip_len)) {
1140 REASON_SET(reason, PFRES_NORM);
1141 goto drop;
1142 }
1143
1144 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1145 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1146 (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1147 (h->ip_off & htons(IP_DF))
1148 ) {
1149 u_int16_t ip_off = h->ip_off;
1150
1151 h->ip_off &= htons(~IP_DF);
1152 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1153 }
1154
1155 /* We will need other tests here */
1156 if (!fragoff && !mff)
1157 goto no_fragment;
1158
1159 /* We're dealing with a fragment now. Don't allow fragments
1160 * with IP_DF to enter the cache. If the flag was cleared by
1161 * no-df above, fine. Otherwise drop it.
1162 */
1163 if (h->ip_off & htons(IP_DF)) {
1164 DPFPRINTF(("IP_DF\n"));
1165 goto bad;
1166 }
1167
1168 ip_len = ntohs(h->ip_len) - hlen;
1169
1170 /* All fragments are 8 byte aligned */
1171 if (mff && (ip_len & 0x7)) {
1172 DPFPRINTF(("mff and %d\n", ip_len));
1173 goto bad;
1174 }
1175
1176 /* Respect maximum length */
1177 if (fragoff + ip_len > IP_MAXPACKET) {
1178 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1179 goto bad;
1180 }
1181
1182 if ((!scrub_compat && V_pf_status.reass) ||
1183 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1184 ) {
1185 max = fragoff + ip_len;
1186
1187 /* Fully buffer all of the fragments
1188 * Might return a completely reassembled mbuf, or NULL */
1189 PF_FRAG_LOCK();
1190 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1191 verdict = pf_reassemble(&pd->m, pd->dir, reason);
1192 PF_FRAG_UNLOCK();
1193
1194 if (verdict != PF_PASS)
1195 return (PF_DROP);
1196
1197 if (pd->m == NULL)
1198 return (PF_DROP);
1199
1200 h = mtod(pd->m, struct ip *);
1201 pd->tot_len = htons(h->ip_len);
1202
1203 no_fragment:
1204 /* At this point, only IP_DF is allowed in ip_off */
1205 if (h->ip_off & ~htons(IP_DF)) {
1206 u_int16_t ip_off = h->ip_off;
1207
1208 h->ip_off &= htons(IP_DF);
1209 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1210 }
1211 }
1212
1213 return (PF_PASS);
1214
1215 bad:
1216 DPFPRINTF(("dropping bad fragment\n"));
1217 REASON_SET(reason, PFRES_FRAG);
1218 drop:
1219 if (r != NULL && r->log)
1220 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1);
1221
1222 return (PF_DROP);
1223 }
1224 #endif
1225
1226 #ifdef INET6
1227 int
pf_normalize_ip6(int off,u_short * reason,struct pf_pdesc * pd)1228 pf_normalize_ip6(int off, u_short *reason,
1229 struct pf_pdesc *pd)
1230 {
1231 struct pf_krule *r;
1232 struct ip6_hdr *h;
1233 struct ip6_frag frag;
1234 bool scrub_compat;
1235
1236 PF_RULES_RASSERT();
1237
1238 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1239 /*
1240 * Check if there are any scrub rules, matching or not.
1241 * Lack of scrub rules means:
1242 * - enforced packet normalization operation just like in OpenBSD
1243 * With scrub rules:
1244 * - packet normalization is performed if there is a matching scrub rule
1245 * XXX: Fragment reassembly always performed for IPv6!
1246 */
1247 scrub_compat = (r != NULL);
1248 while (r != NULL) {
1249 pf_counter_u64_add(&r->evaluations, 1);
1250 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1251 r = r->skip[PF_SKIP_IFP];
1252 else if (r->direction && r->direction != pd->dir)
1253 r = r->skip[PF_SKIP_DIR];
1254 else if (r->af && r->af != AF_INET6)
1255 r = r->skip[PF_SKIP_AF];
1256 else if (r->proto && r->proto != pd->proto)
1257 r = r->skip[PF_SKIP_PROTO];
1258 else if (PF_MISMATCHAW(&r->src.addr,
1259 (struct pf_addr *)&pd->src, AF_INET6,
1260 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1261 r = r->skip[PF_SKIP_SRC_ADDR];
1262 else if (PF_MISMATCHAW(&r->dst.addr,
1263 (struct pf_addr *)&pd->dst, AF_INET6,
1264 r->dst.neg, NULL, M_GETFIB(pd->m)))
1265 r = r->skip[PF_SKIP_DST_ADDR];
1266 else
1267 break;
1268 }
1269
1270 if (scrub_compat) {
1271 /* With scrub rules present IPv6 normalization happens only
1272 * if one of rules has matched and it's not a "no scrub" rule */
1273 if (r == NULL || r->action == PF_NOSCRUB)
1274 return (PF_PASS);
1275
1276 pf_counter_u64_critical_enter();
1277 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1278 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1279 pf_counter_u64_critical_exit();
1280 pf_rule_to_actions(r, &pd->act);
1281 }
1282
1283 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
1284 return (PF_DROP);
1285
1286 /* Offset now points to data portion. */
1287 off += sizeof(frag);
1288
1289 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) {
1290 /* Returns PF_DROP or *m0 is NULL or completely reassembled
1291 * mbuf. */
1292 if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS)
1293 return (PF_DROP);
1294 if (pd->m == NULL)
1295 return (PF_DROP);
1296 h = mtod(pd->m, struct ip6_hdr *);
1297 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
1298 }
1299
1300 return (PF_PASS);
1301 }
1302 #endif /* INET6 */
1303
1304 int
pf_normalize_tcp(struct pf_pdesc * pd)1305 pf_normalize_tcp(struct pf_pdesc *pd)
1306 {
1307 struct pf_krule *r, *rm = NULL;
1308 struct tcphdr *th = &pd->hdr.tcp;
1309 int rewrite = 0;
1310 u_short reason;
1311 u_int16_t flags;
1312 sa_family_t af = pd->af;
1313 int srs;
1314
1315 PF_RULES_RASSERT();
1316
1317 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1318 /* Check if there any scrub rules. Lack of scrub rules means enforced
1319 * packet normalization operation just like in OpenBSD. */
1320 srs = (r != NULL);
1321 while (r != NULL) {
1322 pf_counter_u64_add(&r->evaluations, 1);
1323 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1324 r = r->skip[PF_SKIP_IFP];
1325 else if (r->direction && r->direction != pd->dir)
1326 r = r->skip[PF_SKIP_DIR];
1327 else if (r->af && r->af != af)
1328 r = r->skip[PF_SKIP_AF];
1329 else if (r->proto && r->proto != pd->proto)
1330 r = r->skip[PF_SKIP_PROTO];
1331 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1332 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1333 r = r->skip[PF_SKIP_SRC_ADDR];
1334 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1335 r->src.port[0], r->src.port[1], th->th_sport))
1336 r = r->skip[PF_SKIP_SRC_PORT];
1337 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1338 r->dst.neg, NULL, M_GETFIB(pd->m)))
1339 r = r->skip[PF_SKIP_DST_ADDR];
1340 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1341 r->dst.port[0], r->dst.port[1], th->th_dport))
1342 r = r->skip[PF_SKIP_DST_PORT];
1343 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1344 pf_osfp_fingerprint(pd, th),
1345 r->os_fingerprint))
1346 r = TAILQ_NEXT(r, entries);
1347 else {
1348 rm = r;
1349 break;
1350 }
1351 }
1352
1353 if (srs) {
1354 /* With scrub rules present TCP normalization happens only
1355 * if one of rules has matched and it's not a "no scrub" rule */
1356 if (rm == NULL || rm->action == PF_NOSCRUB)
1357 return (PF_PASS);
1358
1359 pf_counter_u64_critical_enter();
1360 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1361 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1362 pf_counter_u64_critical_exit();
1363 pf_rule_to_actions(rm, &pd->act);
1364 }
1365
1366 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1367 pd->flags |= PFDESC_TCP_NORM;
1368
1369 flags = tcp_get_flags(th);
1370 if (flags & TH_SYN) {
1371 /* Illegal packet */
1372 if (flags & TH_RST)
1373 goto tcp_drop;
1374
1375 if (flags & TH_FIN)
1376 goto tcp_drop;
1377 } else {
1378 /* Illegal packet */
1379 if (!(flags & (TH_ACK|TH_RST)))
1380 goto tcp_drop;
1381 }
1382
1383 if (!(flags & TH_ACK)) {
1384 /* These flags are only valid if ACK is set */
1385 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1386 goto tcp_drop;
1387 }
1388
1389 /* Check for illegal header length */
1390 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1391 goto tcp_drop;
1392
1393 /* If flags changed, or reserved data set, then adjust */
1394 if (flags != tcp_get_flags(th) ||
1395 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1396 u_int16_t ov, nv;
1397
1398 ov = *(u_int16_t *)(&th->th_ack + 1);
1399 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1400 tcp_set_flags(th, flags);
1401 nv = *(u_int16_t *)(&th->th_ack + 1);
1402
1403 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0);
1404 rewrite = 1;
1405 }
1406
1407 /* Remove urgent pointer, if TH_URG is not set */
1408 if (!(flags & TH_URG) && th->th_urp) {
1409 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp,
1410 0, 0);
1411 th->th_urp = 0;
1412 rewrite = 1;
1413 }
1414
1415 /* copy back packet headers if we sanitized */
1416 if (rewrite)
1417 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1418
1419 return (PF_PASS);
1420
1421 tcp_drop:
1422 REASON_SET(&reason, PFRES_NORM);
1423 if (rm != NULL && r->log)
1424 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1);
1425 return (PF_DROP);
1426 }
1427
1428 int
pf_normalize_tcp_init(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src,struct pf_state_peer * dst)1429 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
1430 struct pf_state_peer *src, struct pf_state_peer *dst)
1431 {
1432 u_int32_t tsval, tsecr;
1433 u_int8_t hdr[60];
1434 u_int8_t *opt;
1435
1436 KASSERT((src->scrub == NULL),
1437 ("pf_normalize_tcp_init: src->scrub != NULL"));
1438
1439 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1440 if (src->scrub == NULL)
1441 return (1);
1442
1443 switch (pd->af) {
1444 #ifdef INET
1445 case AF_INET: {
1446 struct ip *h = mtod(pd->m, struct ip *);
1447 src->scrub->pfss_ttl = h->ip_ttl;
1448 break;
1449 }
1450 #endif /* INET */
1451 #ifdef INET6
1452 case AF_INET6: {
1453 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1454 src->scrub->pfss_ttl = h->ip6_hlim;
1455 break;
1456 }
1457 #endif /* INET6 */
1458 }
1459
1460 /*
1461 * All normalizations below are only begun if we see the start of
1462 * the connections. They must all set an enabled bit in pfss_flags
1463 */
1464 if ((tcp_get_flags(th) & TH_SYN) == 0)
1465 return (0);
1466
1467 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1468 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1469 /* Diddle with TCP options */
1470 int hlen;
1471 opt = hdr + sizeof(struct tcphdr);
1472 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1473 while (hlen >= TCPOLEN_TIMESTAMP) {
1474 switch (*opt) {
1475 case TCPOPT_EOL: /* FALLTHROUGH */
1476 case TCPOPT_NOP:
1477 opt++;
1478 hlen--;
1479 break;
1480 case TCPOPT_TIMESTAMP:
1481 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1482 src->scrub->pfss_flags |=
1483 PFSS_TIMESTAMP;
1484 src->scrub->pfss_ts_mod =
1485 htonl(arc4random());
1486
1487 /* note PFSS_PAWS not set yet */
1488 memcpy(&tsval, &opt[2],
1489 sizeof(u_int32_t));
1490 memcpy(&tsecr, &opt[6],
1491 sizeof(u_int32_t));
1492 src->scrub->pfss_tsval0 = ntohl(tsval);
1493 src->scrub->pfss_tsval = ntohl(tsval);
1494 src->scrub->pfss_tsecr = ntohl(tsecr);
1495 getmicrouptime(&src->scrub->pfss_last);
1496 }
1497 /* FALLTHROUGH */
1498 default:
1499 hlen -= MAX(opt[1], 2);
1500 opt += MAX(opt[1], 2);
1501 break;
1502 }
1503 }
1504 }
1505
1506 return (0);
1507 }
1508
1509 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1510 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1511 {
1512 /* XXX Note: this also cleans up SCTP. */
1513 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1514 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1515
1516 /* Someday... flush the TCP segment reassembly descriptors. */
1517 }
1518 int
pf_normalize_sctp_init(struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1519 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src,
1520 struct pf_state_peer *dst)
1521 {
1522 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1523 if (src->scrub == NULL)
1524 return (1);
1525
1526 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1527 if (dst->scrub == NULL) {
1528 uma_zfree(V_pf_state_scrub_z, src);
1529 return (1);
1530 }
1531
1532 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1533
1534 return (0);
1535 }
1536
1537 int
pf_normalize_tcp_stateful(struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1538 pf_normalize_tcp_stateful(struct pf_pdesc *pd,
1539 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1540 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1541 {
1542 struct timeval uptime;
1543 u_int32_t tsval, tsecr;
1544 u_int tsval_from_last;
1545 u_int8_t hdr[60];
1546 u_int8_t *opt;
1547 int copyback = 0;
1548 int got_ts = 0;
1549 size_t startoff;
1550
1551 KASSERT((src->scrub || dst->scrub),
1552 ("%s: src->scrub && dst->scrub!", __func__));
1553
1554 /*
1555 * Enforce the minimum TTL seen for this connection. Negate a common
1556 * technique to evade an intrusion detection system and confuse
1557 * firewall state code.
1558 */
1559 switch (pd->af) {
1560 #ifdef INET
1561 case AF_INET: {
1562 if (src->scrub) {
1563 struct ip *h = mtod(pd->m, struct ip *);
1564 if (h->ip_ttl > src->scrub->pfss_ttl)
1565 src->scrub->pfss_ttl = h->ip_ttl;
1566 h->ip_ttl = src->scrub->pfss_ttl;
1567 }
1568 break;
1569 }
1570 #endif /* INET */
1571 #ifdef INET6
1572 case AF_INET6: {
1573 if (src->scrub) {
1574 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1575 if (h->ip6_hlim > src->scrub->pfss_ttl)
1576 src->scrub->pfss_ttl = h->ip6_hlim;
1577 h->ip6_hlim = src->scrub->pfss_ttl;
1578 }
1579 break;
1580 }
1581 #endif /* INET6 */
1582 }
1583
1584 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1585 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1586 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1587 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1588 /* Diddle with TCP options */
1589 int hlen;
1590 opt = hdr + sizeof(struct tcphdr);
1591 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1592 while (hlen >= TCPOLEN_TIMESTAMP) {
1593 startoff = opt - (hdr + sizeof(struct tcphdr));
1594 switch (*opt) {
1595 case TCPOPT_EOL: /* FALLTHROUGH */
1596 case TCPOPT_NOP:
1597 opt++;
1598 hlen--;
1599 break;
1600 case TCPOPT_TIMESTAMP:
1601 /* Modulate the timestamps. Can be used for
1602 * NAT detection, OS uptime determination or
1603 * reboot detection.
1604 */
1605
1606 if (got_ts) {
1607 /* Huh? Multiple timestamps!? */
1608 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1609 DPFPRINTF(("multiple TS??\n"));
1610 pf_print_state(state);
1611 printf("\n");
1612 }
1613 REASON_SET(reason, PFRES_TS);
1614 return (PF_DROP);
1615 }
1616 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1617 memcpy(&tsval, &opt[2],
1618 sizeof(u_int32_t));
1619 if (tsval && src->scrub &&
1620 (src->scrub->pfss_flags &
1621 PFSS_TIMESTAMP)) {
1622 tsval = ntohl(tsval);
1623 pf_patch_32_unaligned(pd->m,
1624 &th->th_sum,
1625 &opt[2],
1626 htonl(tsval +
1627 src->scrub->pfss_ts_mod),
1628 PF_ALGNMNT(startoff),
1629 0);
1630 copyback = 1;
1631 }
1632
1633 /* Modulate TS reply iff valid (!0) */
1634 memcpy(&tsecr, &opt[6],
1635 sizeof(u_int32_t));
1636 if (tsecr && dst->scrub &&
1637 (dst->scrub->pfss_flags &
1638 PFSS_TIMESTAMP)) {
1639 tsecr = ntohl(tsecr)
1640 - dst->scrub->pfss_ts_mod;
1641 pf_patch_32_unaligned(pd->m,
1642 &th->th_sum,
1643 &opt[6],
1644 htonl(tsecr),
1645 PF_ALGNMNT(startoff),
1646 0);
1647 copyback = 1;
1648 }
1649 got_ts = 1;
1650 }
1651 /* FALLTHROUGH */
1652 default:
1653 hlen -= MAX(opt[1], 2);
1654 opt += MAX(opt[1], 2);
1655 break;
1656 }
1657 }
1658 if (copyback) {
1659 /* Copyback the options, caller copys back header */
1660 *writeback = 1;
1661 m_copyback(pd->m, pd->off + sizeof(struct tcphdr),
1662 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1663 sizeof(struct tcphdr));
1664 }
1665 }
1666
1667 /*
1668 * Must invalidate PAWS checks on connections idle for too long.
1669 * The fastest allowed timestamp clock is 1ms. That turns out to
1670 * be about 24 days before it wraps. XXX Right now our lowerbound
1671 * TS echo check only works for the first 12 days of a connection
1672 * when the TS has exhausted half its 32bit space
1673 */
1674 #define TS_MAX_IDLE (24*24*60*60)
1675 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1676
1677 getmicrouptime(&uptime);
1678 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1679 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1680 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) {
1681 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1682 DPFPRINTF(("src idled out of PAWS\n"));
1683 pf_print_state(state);
1684 printf("\n");
1685 }
1686 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1687 | PFSS_PAWS_IDLED;
1688 }
1689 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1690 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1691 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1692 DPFPRINTF(("dst idled out of PAWS\n"));
1693 pf_print_state(state);
1694 printf("\n");
1695 }
1696 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1697 | PFSS_PAWS_IDLED;
1698 }
1699
1700 if (got_ts && src->scrub && dst->scrub &&
1701 (src->scrub->pfss_flags & PFSS_PAWS) &&
1702 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1703 /* Validate that the timestamps are "in-window".
1704 * RFC1323 describes TCP Timestamp options that allow
1705 * measurement of RTT (round trip time) and PAWS
1706 * (protection against wrapped sequence numbers). PAWS
1707 * gives us a set of rules for rejecting packets on
1708 * long fat pipes (packets that were somehow delayed
1709 * in transit longer than the time it took to send the
1710 * full TCP sequence space of 4Gb). We can use these
1711 * rules and infer a few others that will let us treat
1712 * the 32bit timestamp and the 32bit echoed timestamp
1713 * as sequence numbers to prevent a blind attacker from
1714 * inserting packets into a connection.
1715 *
1716 * RFC1323 tells us:
1717 * - The timestamp on this packet must be greater than
1718 * or equal to the last value echoed by the other
1719 * endpoint. The RFC says those will be discarded
1720 * since it is a dup that has already been acked.
1721 * This gives us a lowerbound on the timestamp.
1722 * timestamp >= other last echoed timestamp
1723 * - The timestamp will be less than or equal to
1724 * the last timestamp plus the time between the
1725 * last packet and now. The RFC defines the max
1726 * clock rate as 1ms. We will allow clocks to be
1727 * up to 10% fast and will allow a total difference
1728 * or 30 seconds due to a route change. And this
1729 * gives us an upperbound on the timestamp.
1730 * timestamp <= last timestamp + max ticks
1731 * We have to be careful here. Windows will send an
1732 * initial timestamp of zero and then initialize it
1733 * to a random value after the 3whs; presumably to
1734 * avoid a DoS by having to call an expensive RNG
1735 * during a SYN flood. Proof MS has at least one
1736 * good security geek.
1737 *
1738 * - The TCP timestamp option must also echo the other
1739 * endpoints timestamp. The timestamp echoed is the
1740 * one carried on the earliest unacknowledged segment
1741 * on the left edge of the sequence window. The RFC
1742 * states that the host will reject any echoed
1743 * timestamps that were larger than any ever sent.
1744 * This gives us an upperbound on the TS echo.
1745 * tescr <= largest_tsval
1746 * - The lowerbound on the TS echo is a little more
1747 * tricky to determine. The other endpoint's echoed
1748 * values will not decrease. But there may be
1749 * network conditions that re-order packets and
1750 * cause our view of them to decrease. For now the
1751 * only lowerbound we can safely determine is that
1752 * the TS echo will never be less than the original
1753 * TS. XXX There is probably a better lowerbound.
1754 * Remove TS_MAX_CONN with better lowerbound check.
1755 * tescr >= other original TS
1756 *
1757 * It is also important to note that the fastest
1758 * timestamp clock of 1ms will wrap its 32bit space in
1759 * 24 days. So we just disable TS checking after 24
1760 * days of idle time. We actually must use a 12d
1761 * connection limit until we can come up with a better
1762 * lowerbound to the TS echo check.
1763 */
1764 struct timeval delta_ts;
1765 int ts_fudge;
1766
1767 /*
1768 * PFTM_TS_DIFF is how many seconds of leeway to allow
1769 * a host's timestamp. This can happen if the previous
1770 * packet got delayed in transit for much longer than
1771 * this packet.
1772 */
1773 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0)
1774 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1775
1776 /* Calculate max ticks since the last timestamp */
1777 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1778 #define TS_MICROSECS 1000000 /* microseconds per second */
1779 delta_ts = uptime;
1780 timevalsub(&delta_ts, &src->scrub->pfss_last);
1781 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1782 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1783
1784 if ((src->state >= TCPS_ESTABLISHED &&
1785 dst->state >= TCPS_ESTABLISHED) &&
1786 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1787 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1788 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1789 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1790 /* Bad RFC1323 implementation or an insertion attack.
1791 *
1792 * - Solaris 2.6 and 2.7 are known to send another ACK
1793 * after the FIN,FIN|ACK,ACK closing that carries
1794 * an old timestamp.
1795 */
1796
1797 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1798 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1799 SEQ_GT(tsval, src->scrub->pfss_tsval +
1800 tsval_from_last) ? '1' : ' ',
1801 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1802 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1803 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1804 "idle: %jus %lums\n",
1805 tsval, tsecr, tsval_from_last,
1806 (uintmax_t)delta_ts.tv_sec,
1807 delta_ts.tv_usec / 1000));
1808 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1809 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1810 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1811 "\n", dst->scrub->pfss_tsval,
1812 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1813 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1814 pf_print_state(state);
1815 pf_print_flags(tcp_get_flags(th));
1816 printf("\n");
1817 }
1818 REASON_SET(reason, PFRES_TS);
1819 return (PF_DROP);
1820 }
1821
1822 /* XXX I'd really like to require tsecr but it's optional */
1823
1824 } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 &&
1825 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1826 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) &&
1827 src->scrub && dst->scrub &&
1828 (src->scrub->pfss_flags & PFSS_PAWS) &&
1829 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1830 /* Didn't send a timestamp. Timestamps aren't really useful
1831 * when:
1832 * - connection opening or closing (often not even sent).
1833 * but we must not let an attacker to put a FIN on a
1834 * data packet to sneak it through our ESTABLISHED check.
1835 * - on a TCP reset. RFC suggests not even looking at TS.
1836 * - on an empty ACK. The TS will not be echoed so it will
1837 * probably not help keep the RTT calculation in sync and
1838 * there isn't as much danger when the sequence numbers
1839 * got wrapped. So some stacks don't include TS on empty
1840 * ACKs :-(
1841 *
1842 * To minimize the disruption to mostly RFC1323 conformant
1843 * stacks, we will only require timestamps on data packets.
1844 *
1845 * And what do ya know, we cannot require timestamps on data
1846 * packets. There appear to be devices that do legitimate
1847 * TCP connection hijacking. There are HTTP devices that allow
1848 * a 3whs (with timestamps) and then buffer the HTTP request.
1849 * If the intermediate device has the HTTP response cache, it
1850 * will spoof the response but not bother timestamping its
1851 * packets. So we can look for the presence of a timestamp in
1852 * the first data packet and if there, require it in all future
1853 * packets.
1854 */
1855
1856 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1857 /*
1858 * Hey! Someone tried to sneak a packet in. Or the
1859 * stack changed its RFC1323 behavior?!?!
1860 */
1861 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1862 DPFPRINTF(("Did not receive expected RFC1323 "
1863 "timestamp\n"));
1864 pf_print_state(state);
1865 pf_print_flags(tcp_get_flags(th));
1866 printf("\n");
1867 }
1868 REASON_SET(reason, PFRES_TS);
1869 return (PF_DROP);
1870 }
1871 }
1872
1873 /*
1874 * We will note if a host sends his data packets with or without
1875 * timestamps. And require all data packets to contain a timestamp
1876 * if the first does. PAWS implicitly requires that all data packets be
1877 * timestamped. But I think there are middle-man devices that hijack
1878 * TCP streams immediately after the 3whs and don't timestamp their
1879 * packets (seen in a WWW accelerator or cache).
1880 */
1881 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1882 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1883 if (got_ts)
1884 src->scrub->pfss_flags |= PFSS_DATA_TS;
1885 else {
1886 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1887 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1888 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1889 /* Don't warn if other host rejected RFC1323 */
1890 DPFPRINTF(("Broken RFC1323 stack did not "
1891 "timestamp data packet. Disabled PAWS "
1892 "security.\n"));
1893 pf_print_state(state);
1894 pf_print_flags(tcp_get_flags(th));
1895 printf("\n");
1896 }
1897 }
1898 }
1899
1900 /*
1901 * Update PAWS values
1902 */
1903 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1904 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1905 getmicrouptime(&src->scrub->pfss_last);
1906 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1907 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1908 src->scrub->pfss_tsval = tsval;
1909
1910 if (tsecr) {
1911 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1912 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1913 src->scrub->pfss_tsecr = tsecr;
1914
1915 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1916 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1917 src->scrub->pfss_tsval0 == 0)) {
1918 /* tsval0 MUST be the lowest timestamp */
1919 src->scrub->pfss_tsval0 = tsval;
1920 }
1921
1922 /* Only fully initialized after a TS gets echoed */
1923 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1924 src->scrub->pfss_flags |= PFSS_PAWS;
1925 }
1926 }
1927
1928 /* I have a dream.... TCP segment reassembly.... */
1929 return (0);
1930 }
1931
1932 int
pf_normalize_mss(struct pf_pdesc * pd)1933 pf_normalize_mss(struct pf_pdesc *pd)
1934 {
1935 struct tcphdr *th = &pd->hdr.tcp;
1936 u_int16_t *mss;
1937 int thoff;
1938 int opt, cnt, optlen = 0;
1939 u_char opts[TCP_MAXOLEN];
1940 u_char *optp = opts;
1941 size_t startoff;
1942
1943 thoff = th->th_off << 2;
1944 cnt = thoff - sizeof(struct tcphdr);
1945
1946 if (cnt <= 0 || cnt > MAX_TCPOPTLEN || !pf_pull_hdr(pd->m,
1947 pd->off + sizeof(*th), opts, cnt, NULL, NULL, pd->af))
1948 return (0);
1949
1950 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1951 startoff = optp - opts;
1952 opt = optp[0];
1953 if (opt == TCPOPT_EOL)
1954 break;
1955 if (opt == TCPOPT_NOP)
1956 optlen = 1;
1957 else {
1958 if (cnt < 2)
1959 break;
1960 optlen = optp[1];
1961 if (optlen < 2 || optlen > cnt)
1962 break;
1963 }
1964 switch (opt) {
1965 case TCPOPT_MAXSEG:
1966 mss = (u_int16_t *)(optp + 2);
1967 if ((ntohs(*mss)) > pd->act.max_mss) {
1968 pf_patch_16_unaligned(pd->m,
1969 &th->th_sum,
1970 mss, htons(pd->act.max_mss),
1971 PF_ALGNMNT(startoff),
1972 0);
1973 m_copyback(pd->m, pd->off + sizeof(*th),
1974 thoff - sizeof(*th), opts);
1975 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1976 }
1977 break;
1978 default:
1979 break;
1980 }
1981 }
1982
1983 return (0);
1984 }
1985
1986 int
pf_scan_sctp(struct pf_pdesc * pd)1987 pf_scan_sctp(struct pf_pdesc *pd)
1988 {
1989 struct sctp_chunkhdr ch = { };
1990 int chunk_off = sizeof(struct sctphdr);
1991 int chunk_start;
1992 int ret;
1993
1994 while (pd->off + chunk_off < pd->tot_len) {
1995 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
1996 NULL, pd->af))
1997 return (PF_DROP);
1998
1999 /* Length includes the header, this must be at least 4. */
2000 if (ntohs(ch.chunk_length) < 4)
2001 return (PF_DROP);
2002
2003 chunk_start = chunk_off;
2004 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2005
2006 switch (ch.chunk_type) {
2007 case SCTP_INITIATION:
2008 case SCTP_INITIATION_ACK: {
2009 struct sctp_init_chunk init;
2010
2011 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
2012 sizeof(init), NULL, NULL, pd->af))
2013 return (PF_DROP);
2014
2015 /*
2016 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2017 * any value except 0."
2018 */
2019 if (init.init.initiate_tag == 0)
2020 return (PF_DROP);
2021 if (init.init.num_inbound_streams == 0)
2022 return (PF_DROP);
2023 if (init.init.num_outbound_streams == 0)
2024 return (PF_DROP);
2025 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2026 return (PF_DROP);
2027
2028 /*
2029 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2030 * verification tag.
2031 */
2032 if (ch.chunk_type == SCTP_INITIATION &&
2033 pd->hdr.sctp.v_tag != 0)
2034 return (PF_DROP);
2035
2036 pd->sctp_initiate_tag = init.init.initiate_tag;
2037
2038 if (ch.chunk_type == SCTP_INITIATION)
2039 pd->sctp_flags |= PFDESC_SCTP_INIT;
2040 else
2041 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2042
2043 ret = pf_multihome_scan_init(pd->off + chunk_start,
2044 ntohs(init.ch.chunk_length), pd);
2045 if (ret != PF_PASS)
2046 return (ret);
2047
2048 break;
2049 }
2050 case SCTP_ABORT_ASSOCIATION:
2051 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2052 break;
2053 case SCTP_SHUTDOWN:
2054 case SCTP_SHUTDOWN_ACK:
2055 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2056 break;
2057 case SCTP_SHUTDOWN_COMPLETE:
2058 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2059 break;
2060 case SCTP_COOKIE_ECHO:
2061 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2062 break;
2063 case SCTP_COOKIE_ACK:
2064 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2065 break;
2066 case SCTP_DATA:
2067 pd->sctp_flags |= PFDESC_SCTP_DATA;
2068 break;
2069 case SCTP_HEARTBEAT_REQUEST:
2070 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2071 break;
2072 case SCTP_HEARTBEAT_ACK:
2073 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2074 break;
2075 case SCTP_ASCONF:
2076 pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2077
2078 ret = pf_multihome_scan_asconf(pd->off + chunk_start,
2079 ntohs(ch.chunk_length), pd);
2080 if (ret != PF_PASS)
2081 return (ret);
2082 break;
2083 default:
2084 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2085 break;
2086 }
2087 }
2088
2089 /* Validate chunk lengths vs. packet length. */
2090 if (pd->off + chunk_off != pd->tot_len)
2091 return (PF_DROP);
2092
2093 /*
2094 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2095 * one in a packet.
2096 */
2097 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2098 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2099 return (PF_DROP);
2100 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2101 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2102 return (PF_DROP);
2103 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2104 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2105 return (PF_DROP);
2106 if ((pd->sctp_flags & PFDESC_SCTP_ABORT) &&
2107 (pd->sctp_flags & PFDESC_SCTP_DATA)) {
2108 /*
2109 * RFC4960 3.3.7: DATA chunks MUST NOT be
2110 * bundled with ABORT.
2111 */
2112 return (PF_DROP);
2113 }
2114
2115 return (PF_PASS);
2116 }
2117
2118 int
pf_normalize_sctp(struct pf_pdesc * pd)2119 pf_normalize_sctp(struct pf_pdesc *pd)
2120 {
2121 struct pf_krule *r, *rm = NULL;
2122 struct sctphdr *sh = &pd->hdr.sctp;
2123 u_short reason;
2124 sa_family_t af = pd->af;
2125 int srs;
2126
2127 PF_RULES_RASSERT();
2128
2129 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2130 /* Check if there any scrub rules. Lack of scrub rules means enforced
2131 * packet normalization operation just like in OpenBSD. */
2132 srs = (r != NULL);
2133 while (r != NULL) {
2134 pf_counter_u64_add(&r->evaluations, 1);
2135 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
2136 r = r->skip[PF_SKIP_IFP];
2137 else if (r->direction && r->direction != pd->dir)
2138 r = r->skip[PF_SKIP_DIR];
2139 else if (r->af && r->af != af)
2140 r = r->skip[PF_SKIP_AF];
2141 else if (r->proto && r->proto != pd->proto)
2142 r = r->skip[PF_SKIP_PROTO];
2143 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2144 r->src.neg, pd->kif, M_GETFIB(pd->m)))
2145 r = r->skip[PF_SKIP_SRC_ADDR];
2146 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2147 r->src.port[0], r->src.port[1], sh->src_port))
2148 r = r->skip[PF_SKIP_SRC_PORT];
2149 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2150 r->dst.neg, NULL, M_GETFIB(pd->m)))
2151 r = r->skip[PF_SKIP_DST_ADDR];
2152 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2153 r->dst.port[0], r->dst.port[1], sh->dest_port))
2154 r = r->skip[PF_SKIP_DST_PORT];
2155 else {
2156 rm = r;
2157 break;
2158 }
2159 }
2160
2161 if (srs) {
2162 /* With scrub rules present SCTP normalization happens only
2163 * if one of rules has matched and it's not a "no scrub" rule */
2164 if (rm == NULL || rm->action == PF_NOSCRUB)
2165 return (PF_PASS);
2166
2167 pf_counter_u64_critical_enter();
2168 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
2169 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
2170 pf_counter_u64_critical_exit();
2171 }
2172
2173 /* Verify we're a multiple of 4 bytes long */
2174 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4)
2175 goto sctp_drop;
2176
2177 /* INIT chunk needs to be the only chunk */
2178 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2179 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2180 goto sctp_drop;
2181
2182 return (PF_PASS);
2183
2184 sctp_drop:
2185 REASON_SET(&reason, PFRES_NORM);
2186 if (rm != NULL && r->log)
2187 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd,
2188 1);
2189
2190 return (PF_DROP);
2191 }
2192
2193 #if defined(INET) || defined(INET6)
2194 void
pf_scrub(struct pf_pdesc * pd)2195 pf_scrub(struct pf_pdesc *pd)
2196 {
2197
2198 struct ip *h = mtod(pd->m, struct ip *);
2199 #ifdef INET6
2200 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *);
2201 #endif
2202
2203 /* Clear IP_DF if no-df was requested */
2204 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF &&
2205 h->ip_off & htons(IP_DF))
2206 {
2207 u_int16_t ip_off = h->ip_off;
2208
2209 h->ip_off &= htons(~IP_DF);
2210 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2211 }
2212
2213 /* Enforce a minimum ttl, may cause endless packet loops */
2214 if (pd->af == AF_INET && pd->act.min_ttl &&
2215 h->ip_ttl < pd->act.min_ttl) {
2216 u_int16_t ip_ttl = h->ip_ttl;
2217
2218 h->ip_ttl = pd->act.min_ttl;
2219 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2220 }
2221 #ifdef INET6
2222 /* Enforce a minimum ttl, may cause endless packet loops */
2223 if (pd->af == AF_INET6 && pd->act.min_ttl &&
2224 h6->ip6_hlim < pd->act.min_ttl)
2225 h6->ip6_hlim = pd->act.min_ttl;
2226 #endif
2227 /* Enforce tos */
2228 if (pd->act.flags & PFSTATE_SETTOS) {
2229 switch (pd->af) {
2230 case AF_INET: {
2231 u_int16_t ov, nv;
2232
2233 ov = *(u_int16_t *)h;
2234 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2235 nv = *(u_int16_t *)h;
2236
2237 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2238 break;
2239 }
2240 #ifdef INET6
2241 case AF_INET6:
2242 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2243 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20);
2244 break;
2245 #endif
2246 }
2247 }
2248
2249 /* random-id, but not for fragments */
2250 #ifdef INET
2251 if (pd->af == AF_INET &&
2252 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2253 uint16_t ip_id = h->ip_id;
2254
2255 ip_fillid(h);
2256 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2257 }
2258 #endif
2259 }
2260 #endif
2261