1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <net/pfvar.h>
49 #include <net/if_pflog.h>
50
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip_var.h>
54 #include <netinet6/in6_var.h>
55 #include <netinet6/nd6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet6/scope6_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcp_fsm.h>
60 #include <netinet/tcp_seq.h>
61 #include <netinet/sctp_constants.h>
62 #include <netinet/sctp_header.h>
63
64 #ifdef INET6
65 #include <netinet/ip6.h>
66 #endif /* INET6 */
67
68 struct pf_frent {
69 TAILQ_ENTRY(pf_frent) fr_next;
70 struct mbuf *fe_m;
71 uint16_t fe_hdrlen; /* ipv4 header length with ip options
72 ipv6, extension, fragment header */
73 uint16_t fe_extoff; /* last extension header offset or 0 */
74 uint16_t fe_len; /* fragment length */
75 uint16_t fe_off; /* fragment offset */
76 uint16_t fe_mff; /* more fragment flag */
77 };
78
79 struct pf_fragment_cmp {
80 struct pf_addr frc_src;
81 struct pf_addr frc_dst;
82 uint32_t frc_id;
83 sa_family_t frc_af;
84 uint8_t frc_proto;
85 };
86
87 struct pf_fragment {
88 struct pf_fragment_cmp fr_key;
89 #define fr_src fr_key.frc_src
90 #define fr_dst fr_key.frc_dst
91 #define fr_id fr_key.frc_id
92 #define fr_af fr_key.frc_af
93 #define fr_proto fr_key.frc_proto
94
95 /* pointers to queue element */
96 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
97 /* count entries between pointers */
98 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
99 RB_ENTRY(pf_fragment) fr_entry;
100 TAILQ_ENTRY(pf_fragment) frag_next;
101 uint32_t fr_timeout;
102 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
103 uint16_t fr_maxlen; /* maximum length of single fragment */
104 u_int16_t fr_holes; /* number of holes in the queue */
105 };
106
107 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
108 #define V_pf_frag_mtx VNET(pf_frag_mtx)
109 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
110 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
111 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
112
113 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
114
115 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
116 #define V_pf_frent_z VNET(pf_frent_z)
117 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
118 #define V_pf_frag_z VNET(pf_frag_z)
119
120 TAILQ_HEAD(pf_fragqueue, pf_fragment);
121 TAILQ_HEAD(pf_cachequeue, pf_fragment);
122 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
123 #define V_pf_fragqueue VNET(pf_fragqueue)
124 RB_HEAD(pf_frag_tree, pf_fragment);
125 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
126 #define V_pf_frag_tree VNET(pf_frag_tree)
127 static int pf_frag_compare(struct pf_fragment *,
128 struct pf_fragment *);
129 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
130 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
131
132 static void pf_flush_fragments(void);
133 static void pf_free_fragment(struct pf_fragment *);
134
135 static struct pf_frent *pf_create_fragment(u_short *);
136 static int pf_frent_holes(struct pf_frent *frent);
137 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
138 struct pf_frag_tree *tree);
139 static inline int pf_frent_index(struct pf_frent *);
140 static int pf_frent_insert(struct pf_fragment *,
141 struct pf_frent *, struct pf_frent *);
142 void pf_frent_remove(struct pf_fragment *,
143 struct pf_frent *);
144 struct pf_frent *pf_frent_previous(struct pf_fragment *,
145 struct pf_frent *);
146 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
147 struct pf_frent *, u_short *);
148 static struct mbuf *pf_join_fragment(struct pf_fragment *);
149 #ifdef INET
150 static int pf_reassemble(struct mbuf **, int, u_short *);
151 #endif /* INET */
152 #ifdef INET6
153 static int pf_reassemble6(struct mbuf **,
154 struct ip6_frag *, uint16_t, uint16_t, u_short *);
155 #endif /* INET6 */
156
157 #define DPFPRINTF(x) do { \
158 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
159 printf("%s: ", __func__); \
160 printf x ; \
161 } \
162 } while(0)
163
164 #ifdef INET
165 static void
pf_ip2key(struct ip * ip,int dir,struct pf_fragment_cmp * key)166 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
167 {
168
169 key->frc_src.v4 = ip->ip_src;
170 key->frc_dst.v4 = ip->ip_dst;
171 key->frc_af = AF_INET;
172 key->frc_proto = ip->ip_p;
173 key->frc_id = ip->ip_id;
174 }
175 #endif /* INET */
176
177 void
pf_normalize_init(void)178 pf_normalize_init(void)
179 {
180
181 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
182 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
183 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
184 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
185 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
186 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
187 UMA_ALIGN_PTR, 0);
188
189 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
190
191 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
192 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
193 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
194 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
195
196 TAILQ_INIT(&V_pf_fragqueue);
197 }
198
199 void
pf_normalize_cleanup(void)200 pf_normalize_cleanup(void)
201 {
202
203 uma_zdestroy(V_pf_state_scrub_z);
204 uma_zdestroy(V_pf_frent_z);
205 uma_zdestroy(V_pf_frag_z);
206
207 mtx_destroy(&V_pf_frag_mtx);
208 }
209
210 static int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)211 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
212 {
213 int diff;
214
215 if ((diff = a->fr_id - b->fr_id) != 0)
216 return (diff);
217 if ((diff = a->fr_proto - b->fr_proto) != 0)
218 return (diff);
219 if ((diff = a->fr_af - b->fr_af) != 0)
220 return (diff);
221 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
222 return (diff);
223 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
224 return (diff);
225 return (0);
226 }
227
228 void
pf_purge_expired_fragments(void)229 pf_purge_expired_fragments(void)
230 {
231 u_int32_t expire = time_uptime -
232 V_pf_default_rule.timeout[PFTM_FRAG];
233
234 pf_purge_fragments(expire);
235 }
236
237 void
pf_purge_fragments(uint32_t expire)238 pf_purge_fragments(uint32_t expire)
239 {
240 struct pf_fragment *frag;
241
242 PF_FRAG_LOCK();
243 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
244 if (frag->fr_timeout > expire)
245 break;
246
247 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
248 pf_free_fragment(frag);
249 }
250
251 PF_FRAG_UNLOCK();
252 }
253
254 /*
255 * Try to flush old fragments to make space for new ones
256 */
257 static void
pf_flush_fragments(void)258 pf_flush_fragments(void)
259 {
260 struct pf_fragment *frag;
261 int goal;
262
263 PF_FRAG_ASSERT();
264
265 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
266 DPFPRINTF(("trying to free %d frag entriess\n", goal));
267 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
268 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
269 if (frag)
270 pf_free_fragment(frag);
271 else
272 break;
273 }
274 }
275
276 /*
277 * Remove a fragment from the fragment queue, free its fragment entries,
278 * and free the fragment itself.
279 */
280 static void
pf_free_fragment(struct pf_fragment * frag)281 pf_free_fragment(struct pf_fragment *frag)
282 {
283 struct pf_frent *frent;
284
285 PF_FRAG_ASSERT();
286
287 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
288 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
289
290 /* Free all fragment entries */
291 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
292 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
293
294 m_freem(frent->fe_m);
295 uma_zfree(V_pf_frent_z, frent);
296 }
297
298 uma_zfree(V_pf_frag_z, frag);
299 }
300
301 static struct pf_fragment *
pf_find_fragment(struct pf_fragment_cmp * key,struct pf_frag_tree * tree)302 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
303 {
304 struct pf_fragment *frag;
305
306 PF_FRAG_ASSERT();
307
308 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
309 if (frag != NULL) {
310 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
311 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
312 }
313
314 return (frag);
315 }
316
317 static struct pf_frent *
pf_create_fragment(u_short * reason)318 pf_create_fragment(u_short *reason)
319 {
320 struct pf_frent *frent;
321
322 PF_FRAG_ASSERT();
323
324 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
325 if (frent == NULL) {
326 pf_flush_fragments();
327 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
328 if (frent == NULL) {
329 REASON_SET(reason, PFRES_MEMORY);
330 return (NULL);
331 }
332 }
333
334 return (frent);
335 }
336
337 /*
338 * Calculate the additional holes that were created in the fragment
339 * queue by inserting this fragment. A fragment in the middle
340 * creates one more hole by splitting. For each connected side,
341 * it loses one hole.
342 * Fragment entry must be in the queue when calling this function.
343 */
344 static int
pf_frent_holes(struct pf_frent * frent)345 pf_frent_holes(struct pf_frent *frent)
346 {
347 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
348 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
349 int holes = 1;
350
351 if (prev == NULL) {
352 if (frent->fe_off == 0)
353 holes--;
354 } else {
355 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
356 if (frent->fe_off == prev->fe_off + prev->fe_len)
357 holes--;
358 }
359 if (next == NULL) {
360 if (!frent->fe_mff)
361 holes--;
362 } else {
363 KASSERT(frent->fe_mff, ("frent->fe_mff"));
364 if (next->fe_off == frent->fe_off + frent->fe_len)
365 holes--;
366 }
367 return holes;
368 }
369
370 static inline int
pf_frent_index(struct pf_frent * frent)371 pf_frent_index(struct pf_frent *frent)
372 {
373 /*
374 * We have an array of 16 entry points to the queue. A full size
375 * 65535 octet IP packet can have 8192 fragments. So the queue
376 * traversal length is at most 512 and at most 16 entry points are
377 * checked. We need 128 additional bytes on a 64 bit architecture.
378 */
379 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
380 16 - 1);
381 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
382
383 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
384 }
385
386 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)387 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
388 struct pf_frent *prev)
389 {
390 int index;
391
392 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
393
394 /*
395 * A packet has at most 65536 octets. With 16 entry points, each one
396 * spawns 4096 octets. We limit these to 64 fragments each, which
397 * means on average every fragment must have at least 64 octets.
398 */
399 index = pf_frent_index(frent);
400 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
401 return ENOBUFS;
402 frag->fr_entries[index]++;
403
404 if (prev == NULL) {
405 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
406 } else {
407 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
408 ("overlapping fragment"));
409 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
410 }
411
412 if (frag->fr_firstoff[index] == NULL) {
413 KASSERT(prev == NULL || pf_frent_index(prev) < index,
414 ("prev == NULL || pf_frent_index(pref) < index"));
415 frag->fr_firstoff[index] = frent;
416 } else {
417 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
418 KASSERT(prev == NULL || pf_frent_index(prev) < index,
419 ("prev == NULL || pf_frent_index(pref) < index"));
420 frag->fr_firstoff[index] = frent;
421 } else {
422 KASSERT(prev != NULL, ("prev != NULL"));
423 KASSERT(pf_frent_index(prev) == index,
424 ("pf_frent_index(prev) == index"));
425 }
426 }
427
428 frag->fr_holes += pf_frent_holes(frent);
429
430 return 0;
431 }
432
433 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)434 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
435 {
436 #ifdef INVARIANTS
437 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
438 #endif /* INVARIANTS */
439 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
440 int index;
441
442 frag->fr_holes -= pf_frent_holes(frent);
443
444 index = pf_frent_index(frent);
445 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
446 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
447 if (next == NULL) {
448 frag->fr_firstoff[index] = NULL;
449 } else {
450 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
451 ("overlapping fragment"));
452 if (pf_frent_index(next) == index) {
453 frag->fr_firstoff[index] = next;
454 } else {
455 frag->fr_firstoff[index] = NULL;
456 }
457 }
458 } else {
459 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
460 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
461 KASSERT(prev != NULL, ("prev != NULL"));
462 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
463 ("overlapping fragment"));
464 KASSERT(pf_frent_index(prev) == index,
465 ("pf_frent_index(prev) == index"));
466 }
467
468 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
469
470 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
471 frag->fr_entries[index]--;
472 }
473
474 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)475 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
476 {
477 struct pf_frent *prev, *next;
478 int index;
479
480 /*
481 * If there are no fragments after frag, take the final one. Assume
482 * that the global queue is not empty.
483 */
484 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
485 KASSERT(prev != NULL, ("prev != NULL"));
486 if (prev->fe_off <= frent->fe_off)
487 return prev;
488 /*
489 * We want to find a fragment entry that is before frag, but still
490 * close to it. Find the first fragment entry that is in the same
491 * entry point or in the first entry point after that. As we have
492 * already checked that there are entries behind frag, this will
493 * succeed.
494 */
495 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
496 index++) {
497 prev = frag->fr_firstoff[index];
498 if (prev != NULL)
499 break;
500 }
501 KASSERT(prev != NULL, ("prev != NULL"));
502 /*
503 * In prev we may have a fragment from the same entry point that is
504 * before frent, or one that is just one position behind frent.
505 * In the latter case, we go back one step and have the predecessor.
506 * There may be none if the new fragment will be the first one.
507 */
508 if (prev->fe_off > frent->fe_off) {
509 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
510 if (prev == NULL)
511 return NULL;
512 KASSERT(prev->fe_off <= frent->fe_off,
513 ("prev->fe_off <= frent->fe_off"));
514 return prev;
515 }
516 /*
517 * In prev is the first fragment of the entry point. The offset
518 * of frag is behind it. Find the closest previous fragment.
519 */
520 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
521 next = TAILQ_NEXT(next, fr_next)) {
522 if (next->fe_off > frent->fe_off)
523 break;
524 prev = next;
525 }
526 return prev;
527 }
528
529 static struct pf_fragment *
pf_fillup_fragment(struct pf_fragment_cmp * key,struct pf_frent * frent,u_short * reason)530 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
531 u_short *reason)
532 {
533 struct pf_frent *after, *next, *prev;
534 struct pf_fragment *frag;
535 uint16_t total;
536
537 PF_FRAG_ASSERT();
538
539 /* No empty fragments. */
540 if (frent->fe_len == 0) {
541 DPFPRINTF(("bad fragment: len 0\n"));
542 goto bad_fragment;
543 }
544
545 /* All fragments are 8 byte aligned. */
546 if (frent->fe_mff && (frent->fe_len & 0x7)) {
547 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
548 goto bad_fragment;
549 }
550
551 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
552 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
553 DPFPRINTF(("bad fragment: max packet %d\n",
554 frent->fe_off + frent->fe_len));
555 goto bad_fragment;
556 }
557
558 DPFPRINTF((key->frc_af == AF_INET ?
559 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
560 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
561
562 /* Fully buffer all of the fragments in this fragment queue. */
563 frag = pf_find_fragment(key, &V_pf_frag_tree);
564
565 /* Create a new reassembly queue for this packet. */
566 if (frag == NULL) {
567 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
568 if (frag == NULL) {
569 pf_flush_fragments();
570 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
571 if (frag == NULL) {
572 REASON_SET(reason, PFRES_MEMORY);
573 goto drop_fragment;
574 }
575 }
576
577 *(struct pf_fragment_cmp *)frag = *key;
578 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
579 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
580 frag->fr_timeout = time_uptime;
581 TAILQ_INIT(&frag->fr_queue);
582 frag->fr_maxlen = frent->fe_len;
583 frag->fr_holes = 1;
584
585 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
586 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
587
588 /* We do not have a previous fragment, cannot fail. */
589 pf_frent_insert(frag, frent, NULL);
590
591 return (frag);
592 }
593
594 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
595
596 /* Remember maximum fragment len for refragmentation. */
597 if (frent->fe_len > frag->fr_maxlen)
598 frag->fr_maxlen = frent->fe_len;
599
600 /* Maximum data we have seen already. */
601 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
602 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
603
604 /* Non terminal fragments must have more fragments flag. */
605 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
606 goto bad_fragment;
607
608 /* Check if we saw the last fragment already. */
609 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
610 if (frent->fe_off + frent->fe_len > total ||
611 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
612 goto bad_fragment;
613 } else {
614 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
615 goto bad_fragment;
616 }
617
618 /* Find neighbors for newly inserted fragment */
619 prev = pf_frent_previous(frag, frent);
620 if (prev == NULL) {
621 after = TAILQ_FIRST(&frag->fr_queue);
622 KASSERT(after != NULL, ("after != NULL"));
623 } else {
624 after = TAILQ_NEXT(prev, fr_next);
625 }
626
627 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
628 uint16_t precut;
629
630 if (frag->fr_af == AF_INET6)
631 goto free_fragment;
632
633 precut = prev->fe_off + prev->fe_len - frent->fe_off;
634 if (precut >= frent->fe_len) {
635 DPFPRINTF(("new frag overlapped\n"));
636 goto drop_fragment;
637 }
638 DPFPRINTF(("frag head overlap %d\n", precut));
639 m_adj(frent->fe_m, precut);
640 frent->fe_off += precut;
641 frent->fe_len -= precut;
642 }
643
644 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
645 after = next) {
646 uint16_t aftercut;
647
648 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
649 if (aftercut < after->fe_len) {
650 DPFPRINTF(("frag tail overlap %d", aftercut));
651 m_adj(after->fe_m, aftercut);
652 /* Fragment may switch queue as fe_off changes */
653 pf_frent_remove(frag, after);
654 after->fe_off += aftercut;
655 after->fe_len -= aftercut;
656 /* Insert into correct queue */
657 if (pf_frent_insert(frag, after, prev)) {
658 DPFPRINTF(("fragment requeue limit exceeded"));
659 m_freem(after->fe_m);
660 uma_zfree(V_pf_frent_z, after);
661 /* There is not way to recover */
662 goto free_fragment;
663 }
664 break;
665 }
666
667 /* This fragment is completely overlapped, lose it. */
668 DPFPRINTF(("old frag overlapped\n"));
669 next = TAILQ_NEXT(after, fr_next);
670 pf_frent_remove(frag, after);
671 m_freem(after->fe_m);
672 uma_zfree(V_pf_frent_z, after);
673 }
674
675 /* If part of the queue gets too long, there is not way to recover. */
676 if (pf_frent_insert(frag, frent, prev)) {
677 DPFPRINTF(("fragment queue limit exceeded\n"));
678 goto bad_fragment;
679 }
680
681 return (frag);
682
683 free_fragment:
684 /*
685 * RFC 5722, Errata 3089: When reassembling an IPv6 datagram, if one
686 * or more its constituent fragments is determined to be an overlapping
687 * fragment, the entire datagram (and any constituent fragments) MUST
688 * be silently discarded.
689 */
690 DPFPRINTF(("flush overlapping fragments\n"));
691 pf_free_fragment(frag);
692
693 bad_fragment:
694 REASON_SET(reason, PFRES_FRAG);
695 drop_fragment:
696 uma_zfree(V_pf_frent_z, frent);
697 return (NULL);
698 }
699
700 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)701 pf_join_fragment(struct pf_fragment *frag)
702 {
703 struct mbuf *m, *m2;
704 struct pf_frent *frent;
705
706 frent = TAILQ_FIRST(&frag->fr_queue);
707 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
708
709 m = frent->fe_m;
710 if ((frent->fe_hdrlen + frent->fe_len) < m->m_pkthdr.len)
711 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
712 uma_zfree(V_pf_frent_z, frent);
713 while ((frent = TAILQ_FIRST(&frag->fr_queue)) != NULL) {
714 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
715
716 m2 = frent->fe_m;
717 /* Strip off ip header. */
718 m_adj(m2, frent->fe_hdrlen);
719 /* Strip off any trailing bytes. */
720 if (frent->fe_len < m2->m_pkthdr.len)
721 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
722
723 uma_zfree(V_pf_frent_z, frent);
724 m_cat(m, m2);
725 }
726
727 /* Remove from fragment queue. */
728 pf_free_fragment(frag);
729
730 return (m);
731 }
732
733 #ifdef INET
734 static int
pf_reassemble(struct mbuf ** m0,int dir,u_short * reason)735 pf_reassemble(struct mbuf **m0, int dir, u_short *reason)
736 {
737 struct mbuf *m = *m0;
738 struct ip *ip = mtod(m, struct ip *);
739 struct pf_frent *frent;
740 struct pf_fragment *frag;
741 struct m_tag *mtag;
742 struct pf_fragment_tag *ftag;
743 struct pf_fragment_cmp key;
744 uint16_t total, hdrlen;
745 uint32_t frag_id;
746 uint16_t maxlen;
747
748 /* Get an entry for the fragment queue */
749 if ((frent = pf_create_fragment(reason)) == NULL)
750 return (PF_DROP);
751
752 frent->fe_m = m;
753 frent->fe_hdrlen = ip->ip_hl << 2;
754 frent->fe_extoff = 0;
755 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
756 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
757 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
758
759 pf_ip2key(ip, dir, &key);
760
761 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
762 return (PF_DROP);
763
764 /* The mbuf is part of the fragment entry, no direct free or access */
765 m = *m0 = NULL;
766
767 if (frag->fr_holes) {
768 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
769 return (PF_PASS); /* drop because *m0 is NULL, no error */
770 }
771
772 /* We have all the data */
773 frent = TAILQ_FIRST(&frag->fr_queue);
774 KASSERT(frent != NULL, ("frent != NULL"));
775 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
776 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
777 hdrlen = frent->fe_hdrlen;
778
779 maxlen = frag->fr_maxlen;
780 frag_id = frag->fr_id;
781 m = *m0 = pf_join_fragment(frag);
782 frag = NULL;
783
784 if (m->m_flags & M_PKTHDR) {
785 int plen = 0;
786 for (m = *m0; m; m = m->m_next)
787 plen += m->m_len;
788 m = *m0;
789 m->m_pkthdr.len = plen;
790 }
791
792 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
793 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) {
794 REASON_SET(reason, PFRES_SHORT);
795 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
796 return (PF_DROP);
797 }
798 ftag = (struct pf_fragment_tag *)(mtag + 1);
799 ftag->ft_hdrlen = hdrlen;
800 ftag->ft_extoff = 0;
801 ftag->ft_maxlen = maxlen;
802 ftag->ft_id = frag_id;
803 m_tag_prepend(m, mtag);
804
805 ip = mtod(m, struct ip *);
806 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
807 htons(hdrlen + total), 0);
808 ip->ip_len = htons(hdrlen + total);
809 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
810 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
811 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
812
813 if (hdrlen + total > IP_MAXPACKET) {
814 DPFPRINTF(("drop: too big: %d\n", total));
815 ip->ip_len = 0;
816 REASON_SET(reason, PFRES_SHORT);
817 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
818 return (PF_DROP);
819 }
820
821 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
822 return (PF_PASS);
823 }
824 #endif /* INET */
825
826 #ifdef INET6
827 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)828 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr,
829 uint16_t hdrlen, uint16_t extoff, u_short *reason)
830 {
831 struct mbuf *m = *m0;
832 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
833 struct pf_frent *frent;
834 struct pf_fragment *frag;
835 struct pf_fragment_cmp key;
836 struct m_tag *mtag;
837 struct pf_fragment_tag *ftag;
838 int off;
839 uint32_t frag_id;
840 uint16_t total, maxlen;
841 uint8_t proto;
842
843 PF_FRAG_LOCK();
844
845 /* Get an entry for the fragment queue. */
846 if ((frent = pf_create_fragment(reason)) == NULL) {
847 PF_FRAG_UNLOCK();
848 return (PF_DROP);
849 }
850
851 frent->fe_m = m;
852 frent->fe_hdrlen = hdrlen;
853 frent->fe_extoff = extoff;
854 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
855 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
856 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
857
858 key.frc_src.v6 = ip6->ip6_src;
859 key.frc_dst.v6 = ip6->ip6_dst;
860 key.frc_af = AF_INET6;
861 /* Only the first fragment's protocol is relevant. */
862 key.frc_proto = 0;
863 key.frc_id = fraghdr->ip6f_ident;
864
865 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
866 PF_FRAG_UNLOCK();
867 return (PF_DROP);
868 }
869
870 /* The mbuf is part of the fragment entry, no direct free or access. */
871 m = *m0 = NULL;
872
873 if (frag->fr_holes) {
874 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
875 frag->fr_holes));
876 PF_FRAG_UNLOCK();
877 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
878 }
879
880 /* We have all the data. */
881 frent = TAILQ_FIRST(&frag->fr_queue);
882 KASSERT(frent != NULL, ("frent != NULL"));
883 extoff = frent->fe_extoff;
884 maxlen = frag->fr_maxlen;
885 frag_id = frag->fr_id;
886 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
887 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
888 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
889
890 m = *m0 = pf_join_fragment(frag);
891 frag = NULL;
892
893 PF_FRAG_UNLOCK();
894
895 /* Take protocol from first fragment header. */
896 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
897 KASSERT(m, ("%s: short mbuf chain", __func__));
898 proto = *(mtod(m, uint8_t *) + off);
899 m = *m0;
900
901 /* Delete frag6 header */
902 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
903 goto fail;
904
905 if (m->m_flags & M_PKTHDR) {
906 int plen = 0;
907 for (m = *m0; m; m = m->m_next)
908 plen += m->m_len;
909 m = *m0;
910 m->m_pkthdr.len = plen;
911 }
912
913 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
914 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
915 goto fail;
916 ftag = (struct pf_fragment_tag *)(mtag + 1);
917 ftag->ft_hdrlen = hdrlen;
918 ftag->ft_extoff = extoff;
919 ftag->ft_maxlen = maxlen;
920 ftag->ft_id = frag_id;
921 m_tag_prepend(m, mtag);
922
923 ip6 = mtod(m, struct ip6_hdr *);
924 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
925 if (extoff) {
926 /* Write protocol into next field of last extension header. */
927 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
928 &off);
929 KASSERT(m, ("%s: short mbuf chain", __func__));
930 *(mtod(m, char *) + off) = proto;
931 m = *m0;
932 } else
933 ip6->ip6_nxt = proto;
934
935 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
936 DPFPRINTF(("drop: too big: %d\n", total));
937 ip6->ip6_plen = 0;
938 REASON_SET(reason, PFRES_SHORT);
939 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
940 return (PF_DROP);
941 }
942
943 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
944 return (PF_PASS);
945
946 fail:
947 REASON_SET(reason, PFRES_MEMORY);
948 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
949 return (PF_DROP);
950 }
951 #endif /* INET6 */
952
953 #ifdef INET6
954 int
pf_max_frag_size(struct mbuf * m)955 pf_max_frag_size(struct mbuf *m)
956 {
957 struct m_tag *tag;
958 struct pf_fragment_tag *ftag;
959
960 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
961 if (tag == NULL)
962 return (m->m_pkthdr.len);
963
964 ftag = (struct pf_fragment_tag *)(tag + 1);
965
966 return (ftag->ft_maxlen);
967 }
968
969 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,struct ifnet * rt,bool forward)970 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
971 struct ifnet *rt, bool forward)
972 {
973 struct mbuf *m = *m0, *t;
974 struct ip6_hdr *hdr;
975 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
976 struct pf_pdesc pd;
977 uint32_t frag_id;
978 uint16_t hdrlen, extoff, maxlen;
979 uint8_t proto;
980 int error, action;
981
982 hdrlen = ftag->ft_hdrlen;
983 extoff = ftag->ft_extoff;
984 maxlen = ftag->ft_maxlen;
985 frag_id = ftag->ft_id;
986 m_tag_delete(m, mtag);
987 mtag = NULL;
988 ftag = NULL;
989
990 if (extoff) {
991 int off;
992
993 /* Use protocol from next field of last extension header */
994 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
995 &off);
996 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
997 proto = *(mtod(m, uint8_t *) + off);
998 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
999 m = *m0;
1000 } else {
1001 hdr = mtod(m, struct ip6_hdr *);
1002 proto = hdr->ip6_nxt;
1003 hdr->ip6_nxt = IPPROTO_FRAGMENT;
1004 }
1005
1006 /* In case of link-local traffic we'll need a scope set. */
1007 hdr = mtod(m, struct ip6_hdr *);
1008
1009 in6_setscope(&hdr->ip6_src, ifp, NULL);
1010 in6_setscope(&hdr->ip6_dst, ifp, NULL);
1011
1012 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1013 * fragmentation wrong. */
1014 maxlen = maxlen & ~7;
1015
1016 /*
1017 * Maxlen may be less than 8 if there was only a single
1018 * fragment. As it was fragmented before, add a fragment
1019 * header also for a single fragment. If total or maxlen
1020 * is less than 8, ip6_fragment() will return EMSGSIZE and
1021 * we drop the packet.
1022 */
1023 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1024 m = (*m0)->m_nextpkt;
1025 (*m0)->m_nextpkt = NULL;
1026 if (error == 0) {
1027 /* The first mbuf contains the unfragmented packet. */
1028 m_freem(*m0);
1029 *m0 = NULL;
1030 action = PF_PASS;
1031 } else {
1032 /* Drop expects an mbuf to free. */
1033 DPFPRINTF(("refragment error %d\n", error));
1034 action = PF_DROP;
1035 }
1036 for (; m; m = t) {
1037 t = m->m_nextpkt;
1038 m->m_nextpkt = NULL;
1039 m->m_flags |= M_SKIP_FIREWALL;
1040 memset(&pd, 0, sizeof(pd));
1041 pd.pf_mtag = pf_find_mtag(m);
1042 if (error != 0) {
1043 m_freem(m);
1044 continue;
1045 }
1046 if (rt != NULL) {
1047 struct sockaddr_in6 dst;
1048 hdr = mtod(m, struct ip6_hdr *);
1049
1050 bzero(&dst, sizeof(dst));
1051 dst.sin6_family = AF_INET6;
1052 dst.sin6_len = sizeof(dst);
1053 dst.sin6_addr = hdr->ip6_dst;
1054
1055 if (m->m_pkthdr.len <= if_getmtu(ifp)) {
1056 nd6_output_ifp(rt, rt, m, &dst, NULL);
1057 } else {
1058 in6_ifstat_inc(ifp, ifs6_in_toobig);
1059 icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0,
1060 if_getmtu(ifp));
1061 }
1062 } else if (forward) {
1063 MPASS(m->m_pkthdr.rcvif != NULL);
1064 ip6_forward(m, 0);
1065 } else {
1066 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1067 NULL);
1068 }
1069 }
1070
1071 return (action);
1072 }
1073 #endif /* INET6 */
1074
1075 #ifdef INET
1076 int
pf_normalize_ip(u_short * reason,struct pf_pdesc * pd)1077 pf_normalize_ip(u_short *reason, struct pf_pdesc *pd)
1078 {
1079 struct pf_krule *r;
1080 struct ip *h = mtod(pd->m, struct ip *);
1081 int mff = (ntohs(h->ip_off) & IP_MF);
1082 int hlen = h->ip_hl << 2;
1083 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1084 u_int16_t max;
1085 int ip_len;
1086 int tag = -1;
1087 int verdict;
1088 bool scrub_compat;
1089
1090 PF_RULES_RASSERT();
1091
1092 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1093 /*
1094 * Check if there are any scrub rules, matching or not.
1095 * Lack of scrub rules means:
1096 * - enforced packet normalization operation just like in OpenBSD
1097 * - fragment reassembly depends on V_pf_status.reass
1098 * With scrub rules:
1099 * - packet normalization is performed if there is a matching scrub rule
1100 * - fragment reassembly is performed if the matching rule has no
1101 * PFRULE_FRAGMENT_NOREASS flag
1102 */
1103 scrub_compat = (r != NULL);
1104 while (r != NULL) {
1105 pf_counter_u64_add(&r->evaluations, 1);
1106 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1107 r = r->skip[PF_SKIP_IFP];
1108 else if (r->direction && r->direction != pd->dir)
1109 r = r->skip[PF_SKIP_DIR];
1110 else if (r->af && r->af != AF_INET)
1111 r = r->skip[PF_SKIP_AF];
1112 else if (r->proto && r->proto != h->ip_p)
1113 r = r->skip[PF_SKIP_PROTO];
1114 else if (PF_MISMATCHAW(&r->src.addr,
1115 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1116 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1117 r = r->skip[PF_SKIP_SRC_ADDR];
1118 else if (PF_MISMATCHAW(&r->dst.addr,
1119 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1120 r->dst.neg, NULL, M_GETFIB(pd->m)))
1121 r = r->skip[PF_SKIP_DST_ADDR];
1122 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag,
1123 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1124 r = TAILQ_NEXT(r, entries);
1125 else
1126 break;
1127 }
1128
1129 if (scrub_compat) {
1130 /* With scrub rules present IPv4 normalization happens only
1131 * if one of rules has matched and it's not a "no scrub" rule */
1132 if (r == NULL || r->action == PF_NOSCRUB)
1133 return (PF_PASS);
1134
1135 pf_counter_u64_critical_enter();
1136 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1137 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1138 pf_counter_u64_critical_exit();
1139 pf_rule_to_actions(r, &pd->act);
1140 }
1141
1142 /* Check for illegal packets */
1143 if (hlen < (int)sizeof(struct ip)) {
1144 REASON_SET(reason, PFRES_NORM);
1145 goto drop;
1146 }
1147
1148 if (hlen > ntohs(h->ip_len)) {
1149 REASON_SET(reason, PFRES_NORM);
1150 goto drop;
1151 }
1152
1153 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1154 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1155 (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1156 (h->ip_off & htons(IP_DF))
1157 ) {
1158 u_int16_t ip_off = h->ip_off;
1159
1160 h->ip_off &= htons(~IP_DF);
1161 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1162 }
1163
1164 /* We will need other tests here */
1165 if (!fragoff && !mff)
1166 goto no_fragment;
1167
1168 /* We're dealing with a fragment now. Don't allow fragments
1169 * with IP_DF to enter the cache. If the flag was cleared by
1170 * no-df above, fine. Otherwise drop it.
1171 */
1172 if (h->ip_off & htons(IP_DF)) {
1173 DPFPRINTF(("IP_DF\n"));
1174 goto bad;
1175 }
1176
1177 ip_len = ntohs(h->ip_len) - hlen;
1178
1179 /* All fragments are 8 byte aligned */
1180 if (mff && (ip_len & 0x7)) {
1181 DPFPRINTF(("mff and %d\n", ip_len));
1182 goto bad;
1183 }
1184
1185 /* Respect maximum length */
1186 if (fragoff + ip_len > IP_MAXPACKET) {
1187 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1188 goto bad;
1189 }
1190
1191 if ((!scrub_compat && V_pf_status.reass) ||
1192 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1193 ) {
1194 max = fragoff + ip_len;
1195
1196 /* Fully buffer all of the fragments
1197 * Might return a completely reassembled mbuf, or NULL */
1198 PF_FRAG_LOCK();
1199 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1200 verdict = pf_reassemble(&pd->m, pd->dir, reason);
1201 PF_FRAG_UNLOCK();
1202
1203 if (verdict != PF_PASS)
1204 return (PF_DROP);
1205
1206 if (pd->m == NULL)
1207 return (PF_DROP);
1208
1209 h = mtod(pd->m, struct ip *);
1210 pd->tot_len = htons(h->ip_len);
1211
1212 no_fragment:
1213 /* At this point, only IP_DF is allowed in ip_off */
1214 if (h->ip_off & ~htons(IP_DF)) {
1215 u_int16_t ip_off = h->ip_off;
1216
1217 h->ip_off &= htons(IP_DF);
1218 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1219 }
1220 }
1221
1222 return (PF_PASS);
1223
1224 bad:
1225 DPFPRINTF(("dropping bad fragment\n"));
1226 REASON_SET(reason, PFRES_FRAG);
1227 drop:
1228 if (r != NULL && r->log)
1229 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1, NULL);
1230
1231 return (PF_DROP);
1232 }
1233 #endif
1234
1235 #ifdef INET6
1236 int
pf_normalize_ip6(int off,u_short * reason,struct pf_pdesc * pd)1237 pf_normalize_ip6(int off, u_short *reason,
1238 struct pf_pdesc *pd)
1239 {
1240 struct pf_krule *r;
1241 struct ip6_hdr *h;
1242 struct ip6_frag frag;
1243 bool scrub_compat;
1244
1245 PF_RULES_RASSERT();
1246
1247 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1248 /*
1249 * Check if there are any scrub rules, matching or not.
1250 * Lack of scrub rules means:
1251 * - enforced packet normalization operation just like in OpenBSD
1252 * With scrub rules:
1253 * - packet normalization is performed if there is a matching scrub rule
1254 * XXX: Fragment reassembly always performed for IPv6!
1255 */
1256 scrub_compat = (r != NULL);
1257 while (r != NULL) {
1258 pf_counter_u64_add(&r->evaluations, 1);
1259 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1260 r = r->skip[PF_SKIP_IFP];
1261 else if (r->direction && r->direction != pd->dir)
1262 r = r->skip[PF_SKIP_DIR];
1263 else if (r->af && r->af != AF_INET6)
1264 r = r->skip[PF_SKIP_AF];
1265 else if (r->proto && r->proto != pd->proto)
1266 r = r->skip[PF_SKIP_PROTO];
1267 else if (PF_MISMATCHAW(&r->src.addr,
1268 (struct pf_addr *)&pd->src, AF_INET6,
1269 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1270 r = r->skip[PF_SKIP_SRC_ADDR];
1271 else if (PF_MISMATCHAW(&r->dst.addr,
1272 (struct pf_addr *)&pd->dst, AF_INET6,
1273 r->dst.neg, NULL, M_GETFIB(pd->m)))
1274 r = r->skip[PF_SKIP_DST_ADDR];
1275 else
1276 break;
1277 }
1278
1279 if (scrub_compat) {
1280 /* With scrub rules present IPv6 normalization happens only
1281 * if one of rules has matched and it's not a "no scrub" rule */
1282 if (r == NULL || r->action == PF_NOSCRUB)
1283 return (PF_PASS);
1284
1285 pf_counter_u64_critical_enter();
1286 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1287 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1288 pf_counter_u64_critical_exit();
1289 pf_rule_to_actions(r, &pd->act);
1290 }
1291
1292 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6))
1293 return (PF_DROP);
1294
1295 /* Offset now points to data portion. */
1296 off += sizeof(frag);
1297
1298 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) {
1299 /* Returns PF_DROP or *m0 is NULL or completely reassembled
1300 * mbuf. */
1301 if (pf_reassemble6(&pd->m, &frag, off, pd->extoff, reason) != PF_PASS)
1302 return (PF_DROP);
1303 if (pd->m == NULL)
1304 return (PF_DROP);
1305 h = mtod(pd->m, struct ip6_hdr *);
1306 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
1307 }
1308
1309 return (PF_PASS);
1310 }
1311 #endif /* INET6 */
1312
1313 int
pf_normalize_tcp(struct pf_pdesc * pd)1314 pf_normalize_tcp(struct pf_pdesc *pd)
1315 {
1316 struct pf_krule *r, *rm = NULL;
1317 struct tcphdr *th = &pd->hdr.tcp;
1318 int rewrite = 0;
1319 u_short reason;
1320 u_int16_t flags;
1321 sa_family_t af = pd->af;
1322 int srs;
1323
1324 PF_RULES_RASSERT();
1325
1326 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1327 /* Check if there any scrub rules. Lack of scrub rules means enforced
1328 * packet normalization operation just like in OpenBSD. */
1329 srs = (r != NULL);
1330 while (r != NULL) {
1331 pf_counter_u64_add(&r->evaluations, 1);
1332 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
1333 r = r->skip[PF_SKIP_IFP];
1334 else if (r->direction && r->direction != pd->dir)
1335 r = r->skip[PF_SKIP_DIR];
1336 else if (r->af && r->af != af)
1337 r = r->skip[PF_SKIP_AF];
1338 else if (r->proto && r->proto != pd->proto)
1339 r = r->skip[PF_SKIP_PROTO];
1340 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1341 r->src.neg, pd->kif, M_GETFIB(pd->m)))
1342 r = r->skip[PF_SKIP_SRC_ADDR];
1343 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1344 r->src.port[0], r->src.port[1], th->th_sport))
1345 r = r->skip[PF_SKIP_SRC_PORT];
1346 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1347 r->dst.neg, NULL, M_GETFIB(pd->m)))
1348 r = r->skip[PF_SKIP_DST_ADDR];
1349 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1350 r->dst.port[0], r->dst.port[1], th->th_dport))
1351 r = r->skip[PF_SKIP_DST_PORT];
1352 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1353 pf_osfp_fingerprint(pd, th),
1354 r->os_fingerprint))
1355 r = TAILQ_NEXT(r, entries);
1356 else {
1357 rm = r;
1358 break;
1359 }
1360 }
1361
1362 if (srs) {
1363 /* With scrub rules present TCP normalization happens only
1364 * if one of rules has matched and it's not a "no scrub" rule */
1365 if (rm == NULL || rm->action == PF_NOSCRUB)
1366 return (PF_PASS);
1367
1368 pf_counter_u64_critical_enter();
1369 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1370 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1371 pf_counter_u64_critical_exit();
1372 pf_rule_to_actions(rm, &pd->act);
1373 }
1374
1375 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1376 pd->flags |= PFDESC_TCP_NORM;
1377
1378 flags = tcp_get_flags(th);
1379 if (flags & TH_SYN) {
1380 /* Illegal packet */
1381 if (flags & TH_RST)
1382 goto tcp_drop;
1383
1384 if (flags & TH_FIN)
1385 goto tcp_drop;
1386 } else {
1387 /* Illegal packet */
1388 if (!(flags & (TH_ACK|TH_RST)))
1389 goto tcp_drop;
1390 }
1391
1392 if (!(flags & TH_ACK)) {
1393 /* These flags are only valid if ACK is set */
1394 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1395 goto tcp_drop;
1396 }
1397
1398 /* Check for illegal header length */
1399 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1400 goto tcp_drop;
1401
1402 /* If flags changed, or reserved data set, then adjust */
1403 if (flags != tcp_get_flags(th) ||
1404 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1405 u_int16_t ov, nv;
1406
1407 ov = *(u_int16_t *)(&th->th_ack + 1);
1408 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1409 tcp_set_flags(th, flags);
1410 nv = *(u_int16_t *)(&th->th_ack + 1);
1411
1412 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0);
1413 rewrite = 1;
1414 }
1415
1416 /* Remove urgent pointer, if TH_URG is not set */
1417 if (!(flags & TH_URG) && th->th_urp) {
1418 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp,
1419 0, 0);
1420 th->th_urp = 0;
1421 rewrite = 1;
1422 }
1423
1424 /* copy back packet headers if we sanitized */
1425 if (rewrite)
1426 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1427
1428 return (PF_PASS);
1429
1430 tcp_drop:
1431 REASON_SET(&reason, PFRES_NORM);
1432 if (rm != NULL && r->log)
1433 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1, NULL);
1434 return (PF_DROP);
1435 }
1436
1437 int
pf_normalize_tcp_init(struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src)1438 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th,
1439 struct pf_state_peer *src)
1440 {
1441 u_int32_t tsval, tsecr;
1442 u_int8_t hdr[60];
1443 u_int8_t *opt;
1444
1445 KASSERT((src->scrub == NULL),
1446 ("pf_normalize_tcp_init: src->scrub != NULL"));
1447
1448 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1449 if (src->scrub == NULL)
1450 return (1);
1451
1452 switch (pd->af) {
1453 #ifdef INET
1454 case AF_INET: {
1455 struct ip *h = mtod(pd->m, struct ip *);
1456 src->scrub->pfss_ttl = h->ip_ttl;
1457 break;
1458 }
1459 #endif /* INET */
1460 #ifdef INET6
1461 case AF_INET6: {
1462 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1463 src->scrub->pfss_ttl = h->ip6_hlim;
1464 break;
1465 }
1466 #endif /* INET6 */
1467 default:
1468 unhandled_af(pd->af);
1469 }
1470
1471 /*
1472 * All normalizations below are only begun if we see the start of
1473 * the connections. They must all set an enabled bit in pfss_flags
1474 */
1475 if ((tcp_get_flags(th) & TH_SYN) == 0)
1476 return (0);
1477
1478 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1479 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1480 /* Diddle with TCP options */
1481 int hlen;
1482 opt = hdr + sizeof(struct tcphdr);
1483 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1484 while (hlen >= TCPOLEN_TIMESTAMP) {
1485 switch (*opt) {
1486 case TCPOPT_EOL: /* FALLTHROUGH */
1487 case TCPOPT_NOP:
1488 opt++;
1489 hlen--;
1490 break;
1491 case TCPOPT_TIMESTAMP:
1492 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1493 src->scrub->pfss_flags |=
1494 PFSS_TIMESTAMP;
1495 src->scrub->pfss_ts_mod =
1496 arc4random();
1497
1498 /* note PFSS_PAWS not set yet */
1499 memcpy(&tsval, &opt[2],
1500 sizeof(u_int32_t));
1501 memcpy(&tsecr, &opt[6],
1502 sizeof(u_int32_t));
1503 src->scrub->pfss_tsval0 = ntohl(tsval);
1504 src->scrub->pfss_tsval = ntohl(tsval);
1505 src->scrub->pfss_tsecr = ntohl(tsecr);
1506 getmicrouptime(&src->scrub->pfss_last);
1507 }
1508 /* FALLTHROUGH */
1509 default:
1510 hlen -= MAX(opt[1], 2);
1511 opt += MAX(opt[1], 2);
1512 break;
1513 }
1514 }
1515 }
1516
1517 return (0);
1518 }
1519
1520 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1521 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1522 {
1523 /* XXX Note: this also cleans up SCTP. */
1524 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1525 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1526
1527 /* Someday... flush the TCP segment reassembly descriptors. */
1528 }
1529 int
pf_normalize_sctp_init(struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1530 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src,
1531 struct pf_state_peer *dst)
1532 {
1533 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1534 if (src->scrub == NULL)
1535 return (1);
1536
1537 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1538 if (dst->scrub == NULL) {
1539 uma_zfree(V_pf_state_scrub_z, src);
1540 return (1);
1541 }
1542
1543 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1544
1545 return (0);
1546 }
1547
1548 int
pf_normalize_tcp_stateful(struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1549 pf_normalize_tcp_stateful(struct pf_pdesc *pd,
1550 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1551 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1552 {
1553 struct timeval uptime;
1554 u_int32_t tsval, tsecr;
1555 u_int tsval_from_last;
1556 u_int8_t hdr[60];
1557 u_int8_t *opt;
1558 int copyback = 0;
1559 int got_ts = 0;
1560 size_t startoff;
1561
1562 KASSERT((src->scrub || dst->scrub),
1563 ("%s: src->scrub && dst->scrub!", __func__));
1564
1565 /*
1566 * Enforce the minimum TTL seen for this connection. Negate a common
1567 * technique to evade an intrusion detection system and confuse
1568 * firewall state code.
1569 */
1570 switch (pd->af) {
1571 #ifdef INET
1572 case AF_INET: {
1573 if (src->scrub) {
1574 struct ip *h = mtod(pd->m, struct ip *);
1575 if (h->ip_ttl > src->scrub->pfss_ttl)
1576 src->scrub->pfss_ttl = h->ip_ttl;
1577 h->ip_ttl = src->scrub->pfss_ttl;
1578 }
1579 break;
1580 }
1581 #endif /* INET */
1582 #ifdef INET6
1583 case AF_INET6: {
1584 if (src->scrub) {
1585 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *);
1586 if (h->ip6_hlim > src->scrub->pfss_ttl)
1587 src->scrub->pfss_ttl = h->ip6_hlim;
1588 h->ip6_hlim = src->scrub->pfss_ttl;
1589 }
1590 break;
1591 }
1592 #endif /* INET6 */
1593 default:
1594 unhandled_af(pd->af);
1595 }
1596
1597 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1598 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1599 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1600 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1601 /* Diddle with TCP options */
1602 int hlen;
1603 opt = hdr + sizeof(struct tcphdr);
1604 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1605 while (hlen >= TCPOLEN_TIMESTAMP) {
1606 startoff = opt - (hdr + sizeof(struct tcphdr));
1607 switch (*opt) {
1608 case TCPOPT_EOL: /* FALLTHROUGH */
1609 case TCPOPT_NOP:
1610 opt++;
1611 hlen--;
1612 break;
1613 case TCPOPT_TIMESTAMP:
1614 /* Modulate the timestamps. Can be used for
1615 * NAT detection, OS uptime determination or
1616 * reboot detection.
1617 */
1618
1619 if (got_ts) {
1620 /* Huh? Multiple timestamps!? */
1621 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1622 DPFPRINTF(("multiple TS??\n"));
1623 pf_print_state(state);
1624 printf("\n");
1625 }
1626 REASON_SET(reason, PFRES_TS);
1627 return (PF_DROP);
1628 }
1629 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1630 memcpy(&tsval, &opt[2],
1631 sizeof(u_int32_t));
1632 if (tsval && src->scrub &&
1633 (src->scrub->pfss_flags &
1634 PFSS_TIMESTAMP)) {
1635 tsval = ntohl(tsval);
1636 pf_patch_32(pd,
1637 &opt[2],
1638 htonl(tsval +
1639 src->scrub->pfss_ts_mod),
1640 PF_ALGNMNT(startoff));
1641 copyback = 1;
1642 }
1643
1644 /* Modulate TS reply iff valid (!0) */
1645 memcpy(&tsecr, &opt[6],
1646 sizeof(u_int32_t));
1647 if (tsecr && dst->scrub &&
1648 (dst->scrub->pfss_flags &
1649 PFSS_TIMESTAMP)) {
1650 tsecr = ntohl(tsecr)
1651 - dst->scrub->pfss_ts_mod;
1652 pf_patch_32(pd,
1653 &opt[6],
1654 htonl(tsecr),
1655 PF_ALGNMNT(startoff));
1656 copyback = 1;
1657 }
1658 got_ts = 1;
1659 }
1660 /* FALLTHROUGH */
1661 default:
1662 hlen -= MAX(opt[1], 2);
1663 opt += MAX(opt[1], 2);
1664 break;
1665 }
1666 }
1667 if (copyback) {
1668 /* Copyback the options, caller copys back header */
1669 *writeback = 1;
1670 m_copyback(pd->m, pd->off + sizeof(struct tcphdr),
1671 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1672 sizeof(struct tcphdr));
1673 }
1674 }
1675
1676 /*
1677 * Must invalidate PAWS checks on connections idle for too long.
1678 * The fastest allowed timestamp clock is 1ms. That turns out to
1679 * be about 24 days before it wraps. XXX Right now our lowerbound
1680 * TS echo check only works for the first 12 days of a connection
1681 * when the TS has exhausted half its 32bit space
1682 */
1683 #define TS_MAX_IDLE (24*24*60*60)
1684 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1685
1686 getmicrouptime(&uptime);
1687 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1688 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1689 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) {
1690 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1691 DPFPRINTF(("src idled out of PAWS\n"));
1692 pf_print_state(state);
1693 printf("\n");
1694 }
1695 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1696 | PFSS_PAWS_IDLED;
1697 }
1698 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1699 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1700 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1701 DPFPRINTF(("dst idled out of PAWS\n"));
1702 pf_print_state(state);
1703 printf("\n");
1704 }
1705 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1706 | PFSS_PAWS_IDLED;
1707 }
1708
1709 if (got_ts && src->scrub && dst->scrub &&
1710 (src->scrub->pfss_flags & PFSS_PAWS) &&
1711 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1712 /* Validate that the timestamps are "in-window".
1713 * RFC1323 describes TCP Timestamp options that allow
1714 * measurement of RTT (round trip time) and PAWS
1715 * (protection against wrapped sequence numbers). PAWS
1716 * gives us a set of rules for rejecting packets on
1717 * long fat pipes (packets that were somehow delayed
1718 * in transit longer than the time it took to send the
1719 * full TCP sequence space of 4Gb). We can use these
1720 * rules and infer a few others that will let us treat
1721 * the 32bit timestamp and the 32bit echoed timestamp
1722 * as sequence numbers to prevent a blind attacker from
1723 * inserting packets into a connection.
1724 *
1725 * RFC1323 tells us:
1726 * - The timestamp on this packet must be greater than
1727 * or equal to the last value echoed by the other
1728 * endpoint. The RFC says those will be discarded
1729 * since it is a dup that has already been acked.
1730 * This gives us a lowerbound on the timestamp.
1731 * timestamp >= other last echoed timestamp
1732 * - The timestamp will be less than or equal to
1733 * the last timestamp plus the time between the
1734 * last packet and now. The RFC defines the max
1735 * clock rate as 1ms. We will allow clocks to be
1736 * up to 10% fast and will allow a total difference
1737 * or 30 seconds due to a route change. And this
1738 * gives us an upperbound on the timestamp.
1739 * timestamp <= last timestamp + max ticks
1740 * We have to be careful here. Windows will send an
1741 * initial timestamp of zero and then initialize it
1742 * to a random value after the 3whs; presumably to
1743 * avoid a DoS by having to call an expensive RNG
1744 * during a SYN flood. Proof MS has at least one
1745 * good security geek.
1746 *
1747 * - The TCP timestamp option must also echo the other
1748 * endpoints timestamp. The timestamp echoed is the
1749 * one carried on the earliest unacknowledged segment
1750 * on the left edge of the sequence window. The RFC
1751 * states that the host will reject any echoed
1752 * timestamps that were larger than any ever sent.
1753 * This gives us an upperbound on the TS echo.
1754 * tescr <= largest_tsval
1755 * - The lowerbound on the TS echo is a little more
1756 * tricky to determine. The other endpoint's echoed
1757 * values will not decrease. But there may be
1758 * network conditions that re-order packets and
1759 * cause our view of them to decrease. For now the
1760 * only lowerbound we can safely determine is that
1761 * the TS echo will never be less than the original
1762 * TS. XXX There is probably a better lowerbound.
1763 * Remove TS_MAX_CONN with better lowerbound check.
1764 * tescr >= other original TS
1765 *
1766 * It is also important to note that the fastest
1767 * timestamp clock of 1ms will wrap its 32bit space in
1768 * 24 days. So we just disable TS checking after 24
1769 * days of idle time. We actually must use a 12d
1770 * connection limit until we can come up with a better
1771 * lowerbound to the TS echo check.
1772 */
1773 struct timeval delta_ts;
1774 int ts_fudge;
1775
1776 /*
1777 * PFTM_TS_DIFF is how many seconds of leeway to allow
1778 * a host's timestamp. This can happen if the previous
1779 * packet got delayed in transit for much longer than
1780 * this packet.
1781 */
1782 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0)
1783 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1784
1785 /* Calculate max ticks since the last timestamp */
1786 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1787 #define TS_MICROSECS 1000000 /* microseconds per second */
1788 delta_ts = uptime;
1789 timevalsub(&delta_ts, &src->scrub->pfss_last);
1790 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1791 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1792
1793 if ((src->state >= TCPS_ESTABLISHED &&
1794 dst->state >= TCPS_ESTABLISHED) &&
1795 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1796 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1797 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1798 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1799 /* Bad RFC1323 implementation or an insertion attack.
1800 *
1801 * - Solaris 2.6 and 2.7 are known to send another ACK
1802 * after the FIN,FIN|ACK,ACK closing that carries
1803 * an old timestamp.
1804 */
1805
1806 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1807 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1808 SEQ_GT(tsval, src->scrub->pfss_tsval +
1809 tsval_from_last) ? '1' : ' ',
1810 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1811 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1812 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1813 "idle: %jus %lums\n",
1814 tsval, tsecr, tsval_from_last,
1815 (uintmax_t)delta_ts.tv_sec,
1816 delta_ts.tv_usec / 1000));
1817 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1818 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1819 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1820 "\n", dst->scrub->pfss_tsval,
1821 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1822 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1823 pf_print_state(state);
1824 pf_print_flags(tcp_get_flags(th));
1825 printf("\n");
1826 }
1827 REASON_SET(reason, PFRES_TS);
1828 return (PF_DROP);
1829 }
1830
1831 /* XXX I'd really like to require tsecr but it's optional */
1832
1833 } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 &&
1834 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1835 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) &&
1836 src->scrub && dst->scrub &&
1837 (src->scrub->pfss_flags & PFSS_PAWS) &&
1838 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1839 /* Didn't send a timestamp. Timestamps aren't really useful
1840 * when:
1841 * - connection opening or closing (often not even sent).
1842 * but we must not let an attacker to put a FIN on a
1843 * data packet to sneak it through our ESTABLISHED check.
1844 * - on a TCP reset. RFC suggests not even looking at TS.
1845 * - on an empty ACK. The TS will not be echoed so it will
1846 * probably not help keep the RTT calculation in sync and
1847 * there isn't as much danger when the sequence numbers
1848 * got wrapped. So some stacks don't include TS on empty
1849 * ACKs :-(
1850 *
1851 * To minimize the disruption to mostly RFC1323 conformant
1852 * stacks, we will only require timestamps on data packets.
1853 *
1854 * And what do ya know, we cannot require timestamps on data
1855 * packets. There appear to be devices that do legitimate
1856 * TCP connection hijacking. There are HTTP devices that allow
1857 * a 3whs (with timestamps) and then buffer the HTTP request.
1858 * If the intermediate device has the HTTP response cache, it
1859 * will spoof the response but not bother timestamping its
1860 * packets. So we can look for the presence of a timestamp in
1861 * the first data packet and if there, require it in all future
1862 * packets.
1863 */
1864
1865 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1866 /*
1867 * Hey! Someone tried to sneak a packet in. Or the
1868 * stack changed its RFC1323 behavior?!?!
1869 */
1870 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1871 DPFPRINTF(("Did not receive expected RFC1323 "
1872 "timestamp\n"));
1873 pf_print_state(state);
1874 pf_print_flags(tcp_get_flags(th));
1875 printf("\n");
1876 }
1877 REASON_SET(reason, PFRES_TS);
1878 return (PF_DROP);
1879 }
1880 }
1881
1882 /*
1883 * We will note if a host sends his data packets with or without
1884 * timestamps. And require all data packets to contain a timestamp
1885 * if the first does. PAWS implicitly requires that all data packets be
1886 * timestamped. But I think there are middle-man devices that hijack
1887 * TCP streams immediately after the 3whs and don't timestamp their
1888 * packets (seen in a WWW accelerator or cache).
1889 */
1890 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1891 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1892 if (got_ts)
1893 src->scrub->pfss_flags |= PFSS_DATA_TS;
1894 else {
1895 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1896 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1897 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1898 /* Don't warn if other host rejected RFC1323 */
1899 DPFPRINTF(("Broken RFC1323 stack did not "
1900 "timestamp data packet. Disabled PAWS "
1901 "security.\n"));
1902 pf_print_state(state);
1903 pf_print_flags(tcp_get_flags(th));
1904 printf("\n");
1905 }
1906 }
1907 }
1908
1909 /*
1910 * Update PAWS values
1911 */
1912 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1913 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1914 getmicrouptime(&src->scrub->pfss_last);
1915 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1916 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1917 src->scrub->pfss_tsval = tsval;
1918
1919 if (tsecr) {
1920 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1921 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1922 src->scrub->pfss_tsecr = tsecr;
1923
1924 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1925 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
1926 src->scrub->pfss_tsval0 == 0)) {
1927 /* tsval0 MUST be the lowest timestamp */
1928 src->scrub->pfss_tsval0 = tsval;
1929 }
1930
1931 /* Only fully initialized after a TS gets echoed */
1932 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
1933 src->scrub->pfss_flags |= PFSS_PAWS;
1934 }
1935 }
1936
1937 /* I have a dream.... TCP segment reassembly.... */
1938 return (0);
1939 }
1940
1941 int
pf_normalize_mss(struct pf_pdesc * pd)1942 pf_normalize_mss(struct pf_pdesc *pd)
1943 {
1944 struct tcphdr *th = &pd->hdr.tcp;
1945 u_int16_t *mss;
1946 int thoff;
1947 int opt, cnt, optlen = 0;
1948 u_char opts[TCP_MAXOLEN];
1949 u_char *optp = opts;
1950 size_t startoff;
1951
1952 thoff = th->th_off << 2;
1953 cnt = thoff - sizeof(struct tcphdr);
1954
1955 if (cnt <= 0 || cnt > MAX_TCPOPTLEN || !pf_pull_hdr(pd->m,
1956 pd->off + sizeof(*th), opts, cnt, NULL, NULL, pd->af))
1957 return (0);
1958
1959 for (; cnt > 0; cnt -= optlen, optp += optlen) {
1960 startoff = optp - opts;
1961 opt = optp[0];
1962 if (opt == TCPOPT_EOL)
1963 break;
1964 if (opt == TCPOPT_NOP)
1965 optlen = 1;
1966 else {
1967 if (cnt < 2)
1968 break;
1969 optlen = optp[1];
1970 if (optlen < 2 || optlen > cnt)
1971 break;
1972 }
1973 switch (opt) {
1974 case TCPOPT_MAXSEG:
1975 mss = (u_int16_t *)(optp + 2);
1976 if ((ntohs(*mss)) > pd->act.max_mss) {
1977 pf_patch_16(pd,
1978 mss, htons(pd->act.max_mss),
1979 PF_ALGNMNT(startoff));
1980 m_copyback(pd->m, pd->off + sizeof(*th),
1981 thoff - sizeof(*th), opts);
1982 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th);
1983 }
1984 break;
1985 default:
1986 break;
1987 }
1988 }
1989
1990 return (0);
1991 }
1992
1993 int
pf_scan_sctp(struct pf_pdesc * pd)1994 pf_scan_sctp(struct pf_pdesc *pd)
1995 {
1996 struct sctp_chunkhdr ch = { };
1997 int chunk_off = sizeof(struct sctphdr);
1998 int chunk_start;
1999 int ret;
2000
2001 while (pd->off + chunk_off < pd->tot_len) {
2002 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL,
2003 NULL, pd->af))
2004 return (PF_DROP);
2005
2006 /* Length includes the header, this must be at least 4. */
2007 if (ntohs(ch.chunk_length) < 4)
2008 return (PF_DROP);
2009
2010 chunk_start = chunk_off;
2011 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2012
2013 switch (ch.chunk_type) {
2014 case SCTP_INITIATION:
2015 case SCTP_INITIATION_ACK: {
2016 struct sctp_init_chunk init;
2017
2018 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init,
2019 sizeof(init), NULL, NULL, pd->af))
2020 return (PF_DROP);
2021
2022 /*
2023 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2024 * any value except 0."
2025 */
2026 if (init.init.initiate_tag == 0)
2027 return (PF_DROP);
2028 if (init.init.num_inbound_streams == 0)
2029 return (PF_DROP);
2030 if (init.init.num_outbound_streams == 0)
2031 return (PF_DROP);
2032 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2033 return (PF_DROP);
2034
2035 /*
2036 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2037 * verification tag.
2038 */
2039 if (ch.chunk_type == SCTP_INITIATION &&
2040 pd->hdr.sctp.v_tag != 0)
2041 return (PF_DROP);
2042
2043 pd->sctp_initiate_tag = init.init.initiate_tag;
2044
2045 if (ch.chunk_type == SCTP_INITIATION)
2046 pd->sctp_flags |= PFDESC_SCTP_INIT;
2047 else
2048 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2049
2050 ret = pf_multihome_scan_init(pd->off + chunk_start,
2051 ntohs(init.ch.chunk_length), pd);
2052 if (ret != PF_PASS)
2053 return (ret);
2054
2055 break;
2056 }
2057 case SCTP_ABORT_ASSOCIATION:
2058 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2059 break;
2060 case SCTP_SHUTDOWN:
2061 case SCTP_SHUTDOWN_ACK:
2062 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2063 break;
2064 case SCTP_SHUTDOWN_COMPLETE:
2065 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2066 break;
2067 case SCTP_COOKIE_ECHO:
2068 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2069 break;
2070 case SCTP_COOKIE_ACK:
2071 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2072 break;
2073 case SCTP_DATA:
2074 pd->sctp_flags |= PFDESC_SCTP_DATA;
2075 break;
2076 case SCTP_HEARTBEAT_REQUEST:
2077 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2078 break;
2079 case SCTP_HEARTBEAT_ACK:
2080 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2081 break;
2082 case SCTP_ASCONF:
2083 pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2084
2085 ret = pf_multihome_scan_asconf(pd->off + chunk_start,
2086 ntohs(ch.chunk_length), pd);
2087 if (ret != PF_PASS)
2088 return (ret);
2089 break;
2090 default:
2091 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2092 break;
2093 }
2094 }
2095
2096 /* Validate chunk lengths vs. packet length. */
2097 if (pd->off + chunk_off != pd->tot_len)
2098 return (PF_DROP);
2099
2100 /*
2101 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2102 * one in a packet.
2103 */
2104 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2105 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2106 return (PF_DROP);
2107 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2108 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2109 return (PF_DROP);
2110 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2111 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2112 return (PF_DROP);
2113 if ((pd->sctp_flags & PFDESC_SCTP_ABORT) &&
2114 (pd->sctp_flags & PFDESC_SCTP_DATA)) {
2115 /*
2116 * RFC4960 3.3.7: DATA chunks MUST NOT be
2117 * bundled with ABORT.
2118 */
2119 return (PF_DROP);
2120 }
2121
2122 return (PF_PASS);
2123 }
2124
2125 int
pf_normalize_sctp(struct pf_pdesc * pd)2126 pf_normalize_sctp(struct pf_pdesc *pd)
2127 {
2128 struct pf_krule *r, *rm = NULL;
2129 struct sctphdr *sh = &pd->hdr.sctp;
2130 u_short reason;
2131 sa_family_t af = pd->af;
2132 int srs;
2133
2134 PF_RULES_RASSERT();
2135
2136 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2137 /* Check if there any scrub rules. Lack of scrub rules means enforced
2138 * packet normalization operation just like in OpenBSD. */
2139 srs = (r != NULL);
2140 while (r != NULL) {
2141 pf_counter_u64_add(&r->evaluations, 1);
2142 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot)
2143 r = r->skip[PF_SKIP_IFP];
2144 else if (r->direction && r->direction != pd->dir)
2145 r = r->skip[PF_SKIP_DIR];
2146 else if (r->af && r->af != af)
2147 r = r->skip[PF_SKIP_AF];
2148 else if (r->proto && r->proto != pd->proto)
2149 r = r->skip[PF_SKIP_PROTO];
2150 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2151 r->src.neg, pd->kif, M_GETFIB(pd->m)))
2152 r = r->skip[PF_SKIP_SRC_ADDR];
2153 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2154 r->src.port[0], r->src.port[1], sh->src_port))
2155 r = r->skip[PF_SKIP_SRC_PORT];
2156 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2157 r->dst.neg, NULL, M_GETFIB(pd->m)))
2158 r = r->skip[PF_SKIP_DST_ADDR];
2159 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2160 r->dst.port[0], r->dst.port[1], sh->dest_port))
2161 r = r->skip[PF_SKIP_DST_PORT];
2162 else {
2163 rm = r;
2164 break;
2165 }
2166 }
2167
2168 if (srs) {
2169 /* With scrub rules present SCTP normalization happens only
2170 * if one of rules has matched and it's not a "no scrub" rule */
2171 if (rm == NULL || rm->action == PF_NOSCRUB)
2172 return (PF_PASS);
2173
2174 pf_counter_u64_critical_enter();
2175 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
2176 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
2177 pf_counter_u64_critical_exit();
2178 }
2179
2180 /* Verify we're a multiple of 4 bytes long */
2181 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4)
2182 goto sctp_drop;
2183
2184 /* INIT chunk needs to be the only chunk */
2185 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2186 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2187 goto sctp_drop;
2188
2189 return (PF_PASS);
2190
2191 sctp_drop:
2192 REASON_SET(&reason, PFRES_NORM);
2193 if (rm != NULL && r->log)
2194 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd,
2195 1, NULL);
2196
2197 return (PF_DROP);
2198 }
2199
2200 #if defined(INET) || defined(INET6)
2201 void
pf_scrub(struct pf_pdesc * pd)2202 pf_scrub(struct pf_pdesc *pd)
2203 {
2204
2205 struct ip *h = mtod(pd->m, struct ip *);
2206 #ifdef INET6
2207 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *);
2208 #endif /* INET6 */
2209
2210 /* Clear IP_DF if no-df was requested */
2211 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF &&
2212 h->ip_off & htons(IP_DF))
2213 {
2214 u_int16_t ip_off = h->ip_off;
2215
2216 h->ip_off &= htons(~IP_DF);
2217 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2218 }
2219
2220 /* Enforce a minimum ttl, may cause endless packet loops */
2221 if (pd->af == AF_INET && pd->act.min_ttl &&
2222 h->ip_ttl < pd->act.min_ttl) {
2223 u_int16_t ip_ttl = h->ip_ttl;
2224
2225 h->ip_ttl = pd->act.min_ttl;
2226 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2227 }
2228 #ifdef INET6
2229 /* Enforce a minimum ttl, may cause endless packet loops */
2230 if (pd->af == AF_INET6 && pd->act.min_ttl &&
2231 h6->ip6_hlim < pd->act.min_ttl)
2232 h6->ip6_hlim = pd->act.min_ttl;
2233 #endif /* INET6 */
2234 /* Enforce tos */
2235 if (pd->act.flags & PFSTATE_SETTOS) {
2236 switch (pd->af) {
2237 case AF_INET: {
2238 u_int16_t ov, nv;
2239
2240 ov = *(u_int16_t *)h;
2241 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2242 nv = *(u_int16_t *)h;
2243
2244 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2245 break;
2246 }
2247 #ifdef INET6
2248 case AF_INET6:
2249 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2250 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20);
2251 break;
2252 #endif /* INET6 */
2253 }
2254 }
2255
2256 /* random-id, but not for fragments */
2257 #ifdef INET
2258 if (pd->af == AF_INET &&
2259 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2260 uint16_t ip_id = h->ip_id;
2261
2262 ip_fillid(h, V_ip_random_id);
2263 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2264 }
2265 #endif /* INET */
2266 }
2267 #endif /* INET || INET6 */
2268