1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_pf.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mbuf.h> 42 #include <sys/mutex.h> 43 #include <sys/refcount.h> 44 #include <sys/socket.h> 45 46 #include <net/if.h> 47 #include <net/vnet.h> 48 #include <net/pfvar.h> 49 #include <net/if_pflog.h> 50 51 #include <netinet/in.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip_var.h> 54 #include <netinet6/ip6_var.h> 55 #include <netinet/tcp.h> 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 59 #ifdef INET6 60 #include <netinet/ip6.h> 61 #endif /* INET6 */ 62 63 struct pf_frent { 64 TAILQ_ENTRY(pf_frent) fr_next; 65 struct mbuf *fe_m; 66 uint16_t fe_hdrlen; /* ipv4 header length with ip options 67 ipv6, extension, fragment header */ 68 uint16_t fe_extoff; /* last extension header offset or 0 */ 69 uint16_t fe_len; /* fragment length */ 70 uint16_t fe_off; /* fragment offset */ 71 uint16_t fe_mff; /* more fragment flag */ 72 }; 73 74 struct pf_fragment_cmp { 75 struct pf_addr frc_src; 76 struct pf_addr frc_dst; 77 uint32_t frc_id; 78 sa_family_t frc_af; 79 uint8_t frc_proto; 80 }; 81 82 struct pf_fragment { 83 struct pf_fragment_cmp fr_key; 84 #define fr_src fr_key.frc_src 85 #define fr_dst fr_key.frc_dst 86 #define fr_id fr_key.frc_id 87 #define fr_af fr_key.frc_af 88 #define fr_proto fr_key.frc_proto 89 90 RB_ENTRY(pf_fragment) fr_entry; 91 TAILQ_ENTRY(pf_fragment) frag_next; 92 uint32_t fr_timeout; 93 uint16_t fr_maxlen; /* maximum length of single fragment */ 94 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 95 }; 96 97 struct pf_fragment_tag { 98 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 99 uint16_t ft_extoff; /* last extension header offset or 0 */ 100 uint16_t ft_maxlen; /* maximum fragment payload length */ 101 uint32_t ft_id; /* fragment id */ 102 }; 103 104 static struct mtx pf_frag_mtx; 105 MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF); 106 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) 107 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) 108 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) 109 110 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 111 112 static VNET_DEFINE(uma_zone_t, pf_frent_z); 113 #define V_pf_frent_z VNET(pf_frent_z) 114 static VNET_DEFINE(uma_zone_t, pf_frag_z); 115 #define V_pf_frag_z VNET(pf_frag_z) 116 117 TAILQ_HEAD(pf_fragqueue, pf_fragment); 118 TAILQ_HEAD(pf_cachequeue, pf_fragment); 119 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue); 120 #define V_pf_fragqueue VNET(pf_fragqueue) 121 RB_HEAD(pf_frag_tree, pf_fragment); 122 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree); 123 #define V_pf_frag_tree VNET(pf_frag_tree) 124 static int pf_frag_compare(struct pf_fragment *, 125 struct pf_fragment *); 126 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 127 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 128 129 static void pf_flush_fragments(void); 130 static void pf_free_fragment(struct pf_fragment *); 131 static void pf_remove_fragment(struct pf_fragment *); 132 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, 133 struct tcphdr *, int, sa_family_t); 134 static struct pf_frent *pf_create_fragment(u_short *); 135 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 136 struct pf_frag_tree *tree); 137 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 138 struct pf_frent *, u_short *); 139 static int pf_isfull_fragment(struct pf_fragment *); 140 static struct mbuf *pf_join_fragment(struct pf_fragment *); 141 #ifdef INET 142 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t); 143 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *); 144 #endif /* INET */ 145 #ifdef INET6 146 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *, 147 struct ip6_frag *, uint16_t, uint16_t, u_short *); 148 static void pf_scrub_ip6(struct mbuf **, uint8_t); 149 #endif /* INET6 */ 150 151 #define DPFPRINTF(x) do { \ 152 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 153 printf("%s: ", __func__); \ 154 printf x ; \ 155 } \ 156 } while(0) 157 158 #ifdef INET 159 static void 160 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 161 { 162 163 key->frc_src.v4 = ip->ip_src; 164 key->frc_dst.v4 = ip->ip_dst; 165 key->frc_af = AF_INET; 166 key->frc_proto = ip->ip_p; 167 key->frc_id = ip->ip_id; 168 } 169 #endif /* INET */ 170 171 void 172 pf_normalize_init(void) 173 { 174 175 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 176 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 177 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 178 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 179 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 180 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 181 UMA_ALIGN_PTR, 0); 182 183 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 184 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 185 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 186 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 187 188 TAILQ_INIT(&V_pf_fragqueue); 189 } 190 191 void 192 pf_normalize_cleanup(void) 193 { 194 195 uma_zdestroy(V_pf_state_scrub_z); 196 uma_zdestroy(V_pf_frent_z); 197 uma_zdestroy(V_pf_frag_z); 198 } 199 200 static int 201 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 202 { 203 int diff; 204 205 if ((diff = a->fr_id - b->fr_id) != 0) 206 return (diff); 207 if ((diff = a->fr_proto - b->fr_proto) != 0) 208 return (diff); 209 if ((diff = a->fr_af - b->fr_af) != 0) 210 return (diff); 211 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 212 return (diff); 213 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 214 return (diff); 215 return (0); 216 } 217 218 void 219 pf_purge_expired_fragments(void) 220 { 221 u_int32_t expire = time_uptime - 222 V_pf_default_rule.timeout[PFTM_FRAG]; 223 224 pf_purge_fragments(expire); 225 } 226 227 void 228 pf_purge_fragments(uint32_t expire) 229 { 230 struct pf_fragment *frag; 231 232 PF_FRAG_LOCK(); 233 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 234 if (frag->fr_timeout > expire) 235 break; 236 237 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 238 pf_free_fragment(frag); 239 } 240 241 PF_FRAG_UNLOCK(); 242 } 243 244 /* 245 * Try to flush old fragments to make space for new ones 246 */ 247 static void 248 pf_flush_fragments(void) 249 { 250 struct pf_fragment *frag; 251 int goal; 252 253 PF_FRAG_ASSERT(); 254 255 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 256 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 257 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 258 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 259 if (frag) 260 pf_free_fragment(frag); 261 else 262 break; 263 } 264 } 265 266 /* Frees the fragments and all associated entries */ 267 static void 268 pf_free_fragment(struct pf_fragment *frag) 269 { 270 struct pf_frent *frent; 271 272 PF_FRAG_ASSERT(); 273 274 /* Free all fragments */ 275 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 276 frent = TAILQ_FIRST(&frag->fr_queue)) { 277 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 278 279 m_freem(frent->fe_m); 280 uma_zfree(V_pf_frent_z, frent); 281 } 282 283 pf_remove_fragment(frag); 284 } 285 286 static struct pf_fragment * 287 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 288 { 289 struct pf_fragment *frag; 290 291 PF_FRAG_ASSERT(); 292 293 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 294 if (frag != NULL) { 295 /* XXX Are we sure we want to update the timeout? */ 296 frag->fr_timeout = time_uptime; 297 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 298 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 299 } 300 301 return (frag); 302 } 303 304 /* Removes a fragment from the fragment queue and frees the fragment */ 305 static void 306 pf_remove_fragment(struct pf_fragment *frag) 307 { 308 309 PF_FRAG_ASSERT(); 310 311 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 312 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 313 uma_zfree(V_pf_frag_z, frag); 314 } 315 316 static struct pf_frent * 317 pf_create_fragment(u_short *reason) 318 { 319 struct pf_frent *frent; 320 321 PF_FRAG_ASSERT(); 322 323 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 324 if (frent == NULL) { 325 pf_flush_fragments(); 326 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 327 if (frent == NULL) { 328 REASON_SET(reason, PFRES_MEMORY); 329 return (NULL); 330 } 331 } 332 333 return (frent); 334 } 335 336 static struct pf_fragment * 337 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 338 u_short *reason) 339 { 340 struct pf_frent *after, *next, *prev; 341 struct pf_fragment *frag; 342 uint16_t total; 343 344 PF_FRAG_ASSERT(); 345 346 /* No empty fragments. */ 347 if (frent->fe_len == 0) { 348 DPFPRINTF(("bad fragment: len 0")); 349 goto bad_fragment; 350 } 351 352 /* All fragments are 8 byte aligned. */ 353 if (frent->fe_mff && (frent->fe_len & 0x7)) { 354 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len)); 355 goto bad_fragment; 356 } 357 358 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 359 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 360 DPFPRINTF(("bad fragment: max packet %d", 361 frent->fe_off + frent->fe_len)); 362 goto bad_fragment; 363 } 364 365 DPFPRINTF((key->frc_af == AF_INET ? 366 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d", 367 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 368 369 /* Fully buffer all of the fragments in this fragment queue. */ 370 frag = pf_find_fragment(key, &V_pf_frag_tree); 371 372 /* Create a new reassembly queue for this packet. */ 373 if (frag == NULL) { 374 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 375 if (frag == NULL) { 376 pf_flush_fragments(); 377 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 378 if (frag == NULL) { 379 REASON_SET(reason, PFRES_MEMORY); 380 goto drop_fragment; 381 } 382 } 383 384 *(struct pf_fragment_cmp *)frag = *key; 385 frag->fr_timeout = time_uptime; 386 frag->fr_maxlen = frent->fe_len; 387 TAILQ_INIT(&frag->fr_queue); 388 389 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 390 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 391 392 /* We do not have a previous fragment. */ 393 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 394 395 return (frag); 396 } 397 398 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 399 400 /* Remember maximum fragment len for refragmentation. */ 401 if (frent->fe_len > frag->fr_maxlen) 402 frag->fr_maxlen = frent->fe_len; 403 404 /* Maximum data we have seen already. */ 405 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 406 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 407 408 /* Non terminal fragments must have more fragments flag. */ 409 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 410 goto bad_fragment; 411 412 /* Check if we saw the last fragment already. */ 413 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 414 if (frent->fe_off + frent->fe_len > total || 415 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 416 goto bad_fragment; 417 } else { 418 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 419 goto bad_fragment; 420 } 421 422 /* Find a fragment after the current one. */ 423 prev = NULL; 424 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) { 425 if (after->fe_off > frent->fe_off) 426 break; 427 prev = after; 428 } 429 430 KASSERT(prev != NULL || after != NULL, 431 ("prev != NULL || after != NULL")); 432 433 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 434 uint16_t precut; 435 436 precut = prev->fe_off + prev->fe_len - frent->fe_off; 437 if (precut >= frent->fe_len) 438 goto bad_fragment; 439 DPFPRINTF(("overlap -%d", precut)); 440 m_adj(frent->fe_m, precut); 441 frent->fe_off += precut; 442 frent->fe_len -= precut; 443 } 444 445 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 446 after = next) { 447 uint16_t aftercut; 448 449 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 450 DPFPRINTF(("adjust overlap %d", aftercut)); 451 if (aftercut < after->fe_len) { 452 m_adj(after->fe_m, aftercut); 453 after->fe_off += aftercut; 454 after->fe_len -= aftercut; 455 break; 456 } 457 458 /* This fragment is completely overlapped, lose it. */ 459 next = TAILQ_NEXT(after, fr_next); 460 m_freem(after->fe_m); 461 TAILQ_REMOVE(&frag->fr_queue, after, fr_next); 462 uma_zfree(V_pf_frent_z, after); 463 } 464 465 if (prev == NULL) 466 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 467 else 468 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 469 470 return (frag); 471 472 bad_fragment: 473 REASON_SET(reason, PFRES_FRAG); 474 drop_fragment: 475 uma_zfree(V_pf_frent_z, frent); 476 return (NULL); 477 } 478 479 static int 480 pf_isfull_fragment(struct pf_fragment *frag) 481 { 482 struct pf_frent *frent, *next; 483 uint16_t off, total; 484 485 /* Check if we are completely reassembled */ 486 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) 487 return (0); 488 489 /* Maximum data we have seen already */ 490 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 491 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 492 493 /* Check if we have all the data */ 494 off = 0; 495 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) { 496 next = TAILQ_NEXT(frent, fr_next); 497 498 off += frent->fe_len; 499 if (off < total && (next == NULL || next->fe_off != off)) { 500 DPFPRINTF(("missing fragment at %d, next %d, total %d", 501 off, next == NULL ? -1 : next->fe_off, total)); 502 return (0); 503 } 504 } 505 DPFPRINTF(("%d < %d?", off, total)); 506 if (off < total) 507 return (0); 508 KASSERT(off == total, ("off == total")); 509 510 return (1); 511 } 512 513 static struct mbuf * 514 pf_join_fragment(struct pf_fragment *frag) 515 { 516 struct mbuf *m, *m2; 517 struct pf_frent *frent, *next; 518 519 frent = TAILQ_FIRST(&frag->fr_queue); 520 next = TAILQ_NEXT(frent, fr_next); 521 522 m = frent->fe_m; 523 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 524 uma_zfree(V_pf_frent_z, frent); 525 for (frent = next; frent != NULL; frent = next) { 526 next = TAILQ_NEXT(frent, fr_next); 527 528 m2 = frent->fe_m; 529 /* Strip off ip header. */ 530 m_adj(m2, frent->fe_hdrlen); 531 /* Strip off any trailing bytes. */ 532 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 533 534 uma_zfree(V_pf_frent_z, frent); 535 m_cat(m, m2); 536 } 537 538 /* Remove from fragment queue. */ 539 pf_remove_fragment(frag); 540 541 return (m); 542 } 543 544 #ifdef INET 545 static int 546 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason) 547 { 548 struct mbuf *m = *m0; 549 struct pf_frent *frent; 550 struct pf_fragment *frag; 551 struct pf_fragment_cmp key; 552 uint16_t total, hdrlen; 553 554 /* Get an entry for the fragment queue */ 555 if ((frent = pf_create_fragment(reason)) == NULL) 556 return (PF_DROP); 557 558 frent->fe_m = m; 559 frent->fe_hdrlen = ip->ip_hl << 2; 560 frent->fe_extoff = 0; 561 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 562 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 563 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 564 565 pf_ip2key(ip, dir, &key); 566 567 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 568 return (PF_DROP); 569 570 /* The mbuf is part of the fragment entry, no direct free or access */ 571 m = *m0 = NULL; 572 573 if (!pf_isfull_fragment(frag)) 574 return (PF_PASS); /* drop because *m0 is NULL, no error */ 575 576 /* We have all the data */ 577 frent = TAILQ_FIRST(&frag->fr_queue); 578 KASSERT(frent != NULL, ("frent != NULL")); 579 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 580 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 581 hdrlen = frent->fe_hdrlen; 582 583 m = *m0 = pf_join_fragment(frag); 584 frag = NULL; 585 586 if (m->m_flags & M_PKTHDR) { 587 int plen = 0; 588 for (m = *m0; m; m = m->m_next) 589 plen += m->m_len; 590 m = *m0; 591 m->m_pkthdr.len = plen; 592 } 593 594 ip = mtod(m, struct ip *); 595 ip->ip_len = htons(hdrlen + total); 596 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 597 598 if (hdrlen + total > IP_MAXPACKET) { 599 DPFPRINTF(("drop: too big: %d", total)); 600 ip->ip_len = 0; 601 REASON_SET(reason, PFRES_SHORT); 602 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 603 return (PF_DROP); 604 } 605 606 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 607 return (PF_PASS); 608 } 609 #endif /* INET */ 610 611 #ifdef INET6 612 static int 613 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr, 614 uint16_t hdrlen, uint16_t extoff, u_short *reason) 615 { 616 struct mbuf *m = *m0; 617 struct pf_frent *frent; 618 struct pf_fragment *frag; 619 struct pf_fragment_cmp key; 620 struct m_tag *mtag; 621 struct pf_fragment_tag *ftag; 622 int off; 623 uint32_t frag_id; 624 uint16_t total, maxlen; 625 uint8_t proto; 626 627 PF_FRAG_LOCK(); 628 629 /* Get an entry for the fragment queue. */ 630 if ((frent = pf_create_fragment(reason)) == NULL) { 631 PF_FRAG_UNLOCK(); 632 return (PF_DROP); 633 } 634 635 frent->fe_m = m; 636 frent->fe_hdrlen = hdrlen; 637 frent->fe_extoff = extoff; 638 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 639 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 640 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 641 642 key.frc_src.v6 = ip6->ip6_src; 643 key.frc_dst.v6 = ip6->ip6_dst; 644 key.frc_af = AF_INET6; 645 /* Only the first fragment's protocol is relevant. */ 646 key.frc_proto = 0; 647 key.frc_id = fraghdr->ip6f_ident; 648 649 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 650 PF_FRAG_UNLOCK(); 651 return (PF_DROP); 652 } 653 654 /* The mbuf is part of the fragment entry, no direct free or access. */ 655 m = *m0 = NULL; 656 657 if (!pf_isfull_fragment(frag)) { 658 PF_FRAG_UNLOCK(); 659 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 660 } 661 662 /* We have all the data. */ 663 extoff = frent->fe_extoff; 664 maxlen = frag->fr_maxlen; 665 frag_id = frag->fr_id; 666 frent = TAILQ_FIRST(&frag->fr_queue); 667 KASSERT(frent != NULL, ("frent != NULL")); 668 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 669 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 670 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 671 672 m = *m0 = pf_join_fragment(frag); 673 frag = NULL; 674 675 PF_FRAG_UNLOCK(); 676 677 /* Take protocol from first fragment header. */ 678 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 679 KASSERT(m, ("%s: short mbuf chain", __func__)); 680 proto = *(mtod(m, caddr_t) + off); 681 m = *m0; 682 683 /* Delete frag6 header */ 684 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 685 goto fail; 686 687 if (m->m_flags & M_PKTHDR) { 688 int plen = 0; 689 for (m = *m0; m; m = m->m_next) 690 plen += m->m_len; 691 m = *m0; 692 m->m_pkthdr.len = plen; 693 } 694 695 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag), 696 M_NOWAIT)) == NULL) 697 goto fail; 698 ftag = (struct pf_fragment_tag *)(mtag + 1); 699 ftag->ft_hdrlen = hdrlen; 700 ftag->ft_extoff = extoff; 701 ftag->ft_maxlen = maxlen; 702 ftag->ft_id = frag_id; 703 m_tag_prepend(m, mtag); 704 705 ip6 = mtod(m, struct ip6_hdr *); 706 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 707 if (extoff) { 708 /* Write protocol into next field of last extension header. */ 709 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 710 &off); 711 KASSERT(m, ("%s: short mbuf chain", __func__)); 712 *(mtod(m, char *) + off) = proto; 713 m = *m0; 714 } else 715 ip6->ip6_nxt = proto; 716 717 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 718 DPFPRINTF(("drop: too big: %d", total)); 719 ip6->ip6_plen = 0; 720 REASON_SET(reason, PFRES_SHORT); 721 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 722 return (PF_DROP); 723 } 724 725 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen))); 726 return (PF_PASS); 727 728 fail: 729 REASON_SET(reason, PFRES_MEMORY); 730 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 731 return (PF_DROP); 732 } 733 #endif /* INET6 */ 734 735 #ifdef INET6 736 int 737 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag) 738 { 739 struct mbuf *m = *m0, *t; 740 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 741 struct pf_pdesc pd; 742 uint32_t frag_id; 743 uint16_t hdrlen, extoff, maxlen; 744 uint8_t proto; 745 int error, action; 746 747 hdrlen = ftag->ft_hdrlen; 748 extoff = ftag->ft_extoff; 749 maxlen = ftag->ft_maxlen; 750 frag_id = ftag->ft_id; 751 m_tag_delete(m, mtag); 752 mtag = NULL; 753 ftag = NULL; 754 755 if (extoff) { 756 int off; 757 758 /* Use protocol from next field of last extension header */ 759 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 760 &off); 761 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 762 proto = *(mtod(m, caddr_t) + off); 763 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 764 m = *m0; 765 } else { 766 struct ip6_hdr *hdr; 767 768 hdr = mtod(m, struct ip6_hdr *); 769 proto = hdr->ip6_nxt; 770 hdr->ip6_nxt = IPPROTO_FRAGMENT; 771 } 772 773 /* The MTU must be a multiple of 8 bytes, or we risk doing the 774 * fragmentation wrong. */ 775 maxlen = maxlen & ~7; 776 777 /* 778 * Maxlen may be less than 8 if there was only a single 779 * fragment. As it was fragmented before, add a fragment 780 * header also for a single fragment. If total or maxlen 781 * is less than 8, ip6_fragment() will return EMSGSIZE and 782 * we drop the packet. 783 */ 784 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 785 m = (*m0)->m_nextpkt; 786 (*m0)->m_nextpkt = NULL; 787 if (error == 0) { 788 /* The first mbuf contains the unfragmented packet. */ 789 m_freem(*m0); 790 *m0 = NULL; 791 action = PF_PASS; 792 } else { 793 /* Drop expects an mbuf to free. */ 794 DPFPRINTF(("refragment error %d", error)); 795 action = PF_DROP; 796 } 797 for (t = m; m; m = t) { 798 t = m->m_nextpkt; 799 m->m_nextpkt = NULL; 800 m->m_flags |= M_SKIP_FIREWALL; 801 memset(&pd, 0, sizeof(pd)); 802 pd.pf_mtag = pf_find_mtag(m); 803 if (error == 0) 804 ip6_forward(m, 0); 805 else 806 m_freem(m); 807 } 808 809 return (action); 810 } 811 #endif /* INET6 */ 812 813 #ifdef INET 814 int 815 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, 816 struct pf_pdesc *pd) 817 { 818 struct mbuf *m = *m0; 819 struct pf_rule *r; 820 struct ip *h = mtod(m, struct ip *); 821 int mff = (ntohs(h->ip_off) & IP_MF); 822 int hlen = h->ip_hl << 2; 823 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 824 u_int16_t max; 825 int ip_len; 826 int ip_off; 827 int tag = -1; 828 int verdict; 829 830 PF_RULES_RASSERT(); 831 832 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 833 while (r != NULL) { 834 r->evaluations++; 835 if (pfi_kif_match(r->kif, kif) == r->ifnot) 836 r = r->skip[PF_SKIP_IFP].ptr; 837 else if (r->direction && r->direction != dir) 838 r = r->skip[PF_SKIP_DIR].ptr; 839 else if (r->af && r->af != AF_INET) 840 r = r->skip[PF_SKIP_AF].ptr; 841 else if (r->proto && r->proto != h->ip_p) 842 r = r->skip[PF_SKIP_PROTO].ptr; 843 else if (PF_MISMATCHAW(&r->src.addr, 844 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 845 r->src.neg, kif, M_GETFIB(m))) 846 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 847 else if (PF_MISMATCHAW(&r->dst.addr, 848 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 849 r->dst.neg, NULL, M_GETFIB(m))) 850 r = r->skip[PF_SKIP_DST_ADDR].ptr; 851 else if (r->match_tag && !pf_match_tag(m, r, &tag, 852 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 853 r = TAILQ_NEXT(r, entries); 854 else 855 break; 856 } 857 858 if (r == NULL || r->action == PF_NOSCRUB) 859 return (PF_PASS); 860 else { 861 r->packets[dir == PF_OUT]++; 862 r->bytes[dir == PF_OUT] += pd->tot_len; 863 } 864 865 /* Check for illegal packets */ 866 if (hlen < (int)sizeof(struct ip)) { 867 REASON_SET(reason, PFRES_NORM); 868 goto drop; 869 } 870 871 if (hlen > ntohs(h->ip_len)) { 872 REASON_SET(reason, PFRES_NORM); 873 goto drop; 874 } 875 876 /* Clear IP_DF if the rule uses the no-df option */ 877 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 878 u_int16_t ip_off = h->ip_off; 879 880 h->ip_off &= htons(~IP_DF); 881 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 882 } 883 884 /* We will need other tests here */ 885 if (!fragoff && !mff) 886 goto no_fragment; 887 888 /* We're dealing with a fragment now. Don't allow fragments 889 * with IP_DF to enter the cache. If the flag was cleared by 890 * no-df above, fine. Otherwise drop it. 891 */ 892 if (h->ip_off & htons(IP_DF)) { 893 DPFPRINTF(("IP_DF\n")); 894 goto bad; 895 } 896 897 ip_len = ntohs(h->ip_len) - hlen; 898 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 899 900 /* All fragments are 8 byte aligned */ 901 if (mff && (ip_len & 0x7)) { 902 DPFPRINTF(("mff and %d\n", ip_len)); 903 goto bad; 904 } 905 906 /* Respect maximum length */ 907 if (fragoff + ip_len > IP_MAXPACKET) { 908 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 909 goto bad; 910 } 911 max = fragoff + ip_len; 912 913 /* Fully buffer all of the fragments 914 * Might return a completely reassembled mbuf, or NULL */ 915 PF_FRAG_LOCK(); 916 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 917 verdict = pf_reassemble(m0, h, dir, reason); 918 PF_FRAG_UNLOCK(); 919 920 if (verdict != PF_PASS) 921 return (PF_DROP); 922 923 m = *m0; 924 if (m == NULL) 925 return (PF_DROP); 926 927 h = mtod(m, struct ip *); 928 929 no_fragment: 930 /* At this point, only IP_DF is allowed in ip_off */ 931 if (h->ip_off & ~htons(IP_DF)) { 932 u_int16_t ip_off = h->ip_off; 933 934 h->ip_off &= htons(IP_DF); 935 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 936 } 937 938 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 939 940 return (PF_PASS); 941 942 bad: 943 DPFPRINTF(("dropping bad fragment\n")); 944 REASON_SET(reason, PFRES_FRAG); 945 drop: 946 if (r != NULL && r->log) 947 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 948 1); 949 950 return (PF_DROP); 951 } 952 #endif 953 954 #ifdef INET6 955 int 956 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif, 957 u_short *reason, struct pf_pdesc *pd) 958 { 959 struct mbuf *m = *m0; 960 struct pf_rule *r; 961 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 962 int extoff; 963 int off; 964 struct ip6_ext ext; 965 struct ip6_opt opt; 966 struct ip6_opt_jumbo jumbo; 967 struct ip6_frag frag; 968 u_int32_t jumbolen = 0, plen; 969 int optend; 970 int ooff; 971 u_int8_t proto; 972 int terminal; 973 974 PF_RULES_RASSERT(); 975 976 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 977 while (r != NULL) { 978 r->evaluations++; 979 if (pfi_kif_match(r->kif, kif) == r->ifnot) 980 r = r->skip[PF_SKIP_IFP].ptr; 981 else if (r->direction && r->direction != dir) 982 r = r->skip[PF_SKIP_DIR].ptr; 983 else if (r->af && r->af != AF_INET6) 984 r = r->skip[PF_SKIP_AF].ptr; 985 #if 0 /* header chain! */ 986 else if (r->proto && r->proto != h->ip6_nxt) 987 r = r->skip[PF_SKIP_PROTO].ptr; 988 #endif 989 else if (PF_MISMATCHAW(&r->src.addr, 990 (struct pf_addr *)&h->ip6_src, AF_INET6, 991 r->src.neg, kif, M_GETFIB(m))) 992 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 993 else if (PF_MISMATCHAW(&r->dst.addr, 994 (struct pf_addr *)&h->ip6_dst, AF_INET6, 995 r->dst.neg, NULL, M_GETFIB(m))) 996 r = r->skip[PF_SKIP_DST_ADDR].ptr; 997 else 998 break; 999 } 1000 1001 if (r == NULL || r->action == PF_NOSCRUB) 1002 return (PF_PASS); 1003 else { 1004 r->packets[dir == PF_OUT]++; 1005 r->bytes[dir == PF_OUT] += pd->tot_len; 1006 } 1007 1008 /* Check for illegal packets */ 1009 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1010 goto drop; 1011 1012 extoff = 0; 1013 off = sizeof(struct ip6_hdr); 1014 proto = h->ip6_nxt; 1015 terminal = 0; 1016 do { 1017 switch (proto) { 1018 case IPPROTO_FRAGMENT: 1019 goto fragment; 1020 break; 1021 case IPPROTO_AH: 1022 case IPPROTO_ROUTING: 1023 case IPPROTO_DSTOPTS: 1024 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1025 NULL, AF_INET6)) 1026 goto shortpkt; 1027 extoff = off; 1028 if (proto == IPPROTO_AH) 1029 off += (ext.ip6e_len + 2) * 4; 1030 else 1031 off += (ext.ip6e_len + 1) * 8; 1032 proto = ext.ip6e_nxt; 1033 break; 1034 case IPPROTO_HOPOPTS: 1035 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1036 NULL, AF_INET6)) 1037 goto shortpkt; 1038 extoff = off; 1039 optend = off + (ext.ip6e_len + 1) * 8; 1040 ooff = off + sizeof(ext); 1041 do { 1042 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1043 sizeof(opt.ip6o_type), NULL, NULL, 1044 AF_INET6)) 1045 goto shortpkt; 1046 if (opt.ip6o_type == IP6OPT_PAD1) { 1047 ooff++; 1048 continue; 1049 } 1050 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1051 NULL, NULL, AF_INET6)) 1052 goto shortpkt; 1053 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1054 goto drop; 1055 switch (opt.ip6o_type) { 1056 case IP6OPT_JUMBO: 1057 if (h->ip6_plen != 0) 1058 goto drop; 1059 if (!pf_pull_hdr(m, ooff, &jumbo, 1060 sizeof(jumbo), NULL, NULL, 1061 AF_INET6)) 1062 goto shortpkt; 1063 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, 1064 sizeof(jumbolen)); 1065 jumbolen = ntohl(jumbolen); 1066 if (jumbolen <= IPV6_MAXPACKET) 1067 goto drop; 1068 if (sizeof(struct ip6_hdr) + jumbolen != 1069 m->m_pkthdr.len) 1070 goto drop; 1071 break; 1072 default: 1073 break; 1074 } 1075 ooff += sizeof(opt) + opt.ip6o_len; 1076 } while (ooff < optend); 1077 1078 off = optend; 1079 proto = ext.ip6e_nxt; 1080 break; 1081 default: 1082 terminal = 1; 1083 break; 1084 } 1085 } while (!terminal); 1086 1087 /* jumbo payload option must be present, or plen > 0 */ 1088 if (ntohs(h->ip6_plen) == 0) 1089 plen = jumbolen; 1090 else 1091 plen = ntohs(h->ip6_plen); 1092 if (plen == 0) 1093 goto drop; 1094 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1095 goto shortpkt; 1096 1097 pf_scrub_ip6(&m, r->min_ttl); 1098 1099 return (PF_PASS); 1100 1101 fragment: 1102 /* Jumbo payload packets cannot be fragmented. */ 1103 plen = ntohs(h->ip6_plen); 1104 if (plen == 0 || jumbolen) 1105 goto drop; 1106 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1107 goto shortpkt; 1108 1109 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1110 goto shortpkt; 1111 1112 /* Offset now points to data portion. */ 1113 off += sizeof(frag); 1114 1115 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */ 1116 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS) 1117 return (PF_DROP); 1118 m = *m0; 1119 if (m == NULL) 1120 return (PF_DROP); 1121 1122 pd->flags |= PFDESC_IP_REAS; 1123 return (PF_PASS); 1124 1125 shortpkt: 1126 REASON_SET(reason, PFRES_SHORT); 1127 if (r != NULL && r->log) 1128 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1129 1); 1130 return (PF_DROP); 1131 1132 drop: 1133 REASON_SET(reason, PFRES_NORM); 1134 if (r != NULL && r->log) 1135 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1136 1); 1137 return (PF_DROP); 1138 } 1139 #endif /* INET6 */ 1140 1141 int 1142 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff, 1143 int off, void *h, struct pf_pdesc *pd) 1144 { 1145 struct pf_rule *r, *rm = NULL; 1146 struct tcphdr *th = pd->hdr.tcp; 1147 int rewrite = 0; 1148 u_short reason; 1149 u_int8_t flags; 1150 sa_family_t af = pd->af; 1151 1152 PF_RULES_RASSERT(); 1153 1154 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1155 while (r != NULL) { 1156 r->evaluations++; 1157 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1158 r = r->skip[PF_SKIP_IFP].ptr; 1159 else if (r->direction && r->direction != dir) 1160 r = r->skip[PF_SKIP_DIR].ptr; 1161 else if (r->af && r->af != af) 1162 r = r->skip[PF_SKIP_AF].ptr; 1163 else if (r->proto && r->proto != pd->proto) 1164 r = r->skip[PF_SKIP_PROTO].ptr; 1165 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1166 r->src.neg, kif, M_GETFIB(m))) 1167 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1168 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1169 r->src.port[0], r->src.port[1], th->th_sport)) 1170 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1171 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1172 r->dst.neg, NULL, M_GETFIB(m))) 1173 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1174 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1175 r->dst.port[0], r->dst.port[1], th->th_dport)) 1176 r = r->skip[PF_SKIP_DST_PORT].ptr; 1177 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1178 pf_osfp_fingerprint(pd, m, off, th), 1179 r->os_fingerprint)) 1180 r = TAILQ_NEXT(r, entries); 1181 else { 1182 rm = r; 1183 break; 1184 } 1185 } 1186 1187 if (rm == NULL || rm->action == PF_NOSCRUB) 1188 return (PF_PASS); 1189 else { 1190 r->packets[dir == PF_OUT]++; 1191 r->bytes[dir == PF_OUT] += pd->tot_len; 1192 } 1193 1194 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1195 pd->flags |= PFDESC_TCP_NORM; 1196 1197 flags = th->th_flags; 1198 if (flags & TH_SYN) { 1199 /* Illegal packet */ 1200 if (flags & TH_RST) 1201 goto tcp_drop; 1202 1203 if (flags & TH_FIN) 1204 goto tcp_drop; 1205 } else { 1206 /* Illegal packet */ 1207 if (!(flags & (TH_ACK|TH_RST))) 1208 goto tcp_drop; 1209 } 1210 1211 if (!(flags & TH_ACK)) { 1212 /* These flags are only valid if ACK is set */ 1213 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1214 goto tcp_drop; 1215 } 1216 1217 /* Check for illegal header length */ 1218 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1219 goto tcp_drop; 1220 1221 /* If flags changed, or reserved data set, then adjust */ 1222 if (flags != th->th_flags || th->th_x2 != 0) { 1223 u_int16_t ov, nv; 1224 1225 ov = *(u_int16_t *)(&th->th_ack + 1); 1226 th->th_flags = flags; 1227 th->th_x2 = 0; 1228 nv = *(u_int16_t *)(&th->th_ack + 1); 1229 1230 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0); 1231 rewrite = 1; 1232 } 1233 1234 /* Remove urgent pointer, if TH_URG is not set */ 1235 if (!(flags & TH_URG) && th->th_urp) { 1236 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, 1237 0, 0); 1238 th->th_urp = 0; 1239 rewrite = 1; 1240 } 1241 1242 /* Process options */ 1243 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1244 rewrite = 1; 1245 1246 /* copy back packet headers if we sanitized */ 1247 if (rewrite) 1248 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1249 1250 return (PF_PASS); 1251 1252 tcp_drop: 1253 REASON_SET(&reason, PFRES_NORM); 1254 if (rm != NULL && r->log) 1255 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1256 1); 1257 return (PF_DROP); 1258 } 1259 1260 int 1261 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1262 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1263 { 1264 u_int32_t tsval, tsecr; 1265 u_int8_t hdr[60]; 1266 u_int8_t *opt; 1267 1268 KASSERT((src->scrub == NULL), 1269 ("pf_normalize_tcp_init: src->scrub != NULL")); 1270 1271 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1272 if (src->scrub == NULL) 1273 return (1); 1274 1275 switch (pd->af) { 1276 #ifdef INET 1277 case AF_INET: { 1278 struct ip *h = mtod(m, struct ip *); 1279 src->scrub->pfss_ttl = h->ip_ttl; 1280 break; 1281 } 1282 #endif /* INET */ 1283 #ifdef INET6 1284 case AF_INET6: { 1285 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1286 src->scrub->pfss_ttl = h->ip6_hlim; 1287 break; 1288 } 1289 #endif /* INET6 */ 1290 } 1291 1292 1293 /* 1294 * All normalizations below are only begun if we see the start of 1295 * the connections. They must all set an enabled bit in pfss_flags 1296 */ 1297 if ((th->th_flags & TH_SYN) == 0) 1298 return (0); 1299 1300 1301 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1302 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1303 /* Diddle with TCP options */ 1304 int hlen; 1305 opt = hdr + sizeof(struct tcphdr); 1306 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1307 while (hlen >= TCPOLEN_TIMESTAMP) { 1308 switch (*opt) { 1309 case TCPOPT_EOL: /* FALLTHROUGH */ 1310 case TCPOPT_NOP: 1311 opt++; 1312 hlen--; 1313 break; 1314 case TCPOPT_TIMESTAMP: 1315 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1316 src->scrub->pfss_flags |= 1317 PFSS_TIMESTAMP; 1318 src->scrub->pfss_ts_mod = 1319 htonl(arc4random()); 1320 1321 /* note PFSS_PAWS not set yet */ 1322 memcpy(&tsval, &opt[2], 1323 sizeof(u_int32_t)); 1324 memcpy(&tsecr, &opt[6], 1325 sizeof(u_int32_t)); 1326 src->scrub->pfss_tsval0 = ntohl(tsval); 1327 src->scrub->pfss_tsval = ntohl(tsval); 1328 src->scrub->pfss_tsecr = ntohl(tsecr); 1329 getmicrouptime(&src->scrub->pfss_last); 1330 } 1331 /* FALLTHROUGH */ 1332 default: 1333 hlen -= MAX(opt[1], 2); 1334 opt += MAX(opt[1], 2); 1335 break; 1336 } 1337 } 1338 } 1339 1340 return (0); 1341 } 1342 1343 void 1344 pf_normalize_tcp_cleanup(struct pf_state *state) 1345 { 1346 if (state->src.scrub) 1347 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1348 if (state->dst.scrub) 1349 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1350 1351 /* Someday... flush the TCP segment reassembly descriptors. */ 1352 } 1353 1354 int 1355 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1356 u_short *reason, struct tcphdr *th, struct pf_state *state, 1357 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1358 { 1359 struct timeval uptime; 1360 u_int32_t tsval, tsecr; 1361 u_int tsval_from_last; 1362 u_int8_t hdr[60]; 1363 u_int8_t *opt; 1364 int copyback = 0; 1365 int got_ts = 0; 1366 1367 KASSERT((src->scrub || dst->scrub), 1368 ("%s: src->scrub && dst->scrub!", __func__)); 1369 1370 /* 1371 * Enforce the minimum TTL seen for this connection. Negate a common 1372 * technique to evade an intrusion detection system and confuse 1373 * firewall state code. 1374 */ 1375 switch (pd->af) { 1376 #ifdef INET 1377 case AF_INET: { 1378 if (src->scrub) { 1379 struct ip *h = mtod(m, struct ip *); 1380 if (h->ip_ttl > src->scrub->pfss_ttl) 1381 src->scrub->pfss_ttl = h->ip_ttl; 1382 h->ip_ttl = src->scrub->pfss_ttl; 1383 } 1384 break; 1385 } 1386 #endif /* INET */ 1387 #ifdef INET6 1388 case AF_INET6: { 1389 if (src->scrub) { 1390 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1391 if (h->ip6_hlim > src->scrub->pfss_ttl) 1392 src->scrub->pfss_ttl = h->ip6_hlim; 1393 h->ip6_hlim = src->scrub->pfss_ttl; 1394 } 1395 break; 1396 } 1397 #endif /* INET6 */ 1398 } 1399 1400 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1401 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1402 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1403 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1404 /* Diddle with TCP options */ 1405 int hlen; 1406 opt = hdr + sizeof(struct tcphdr); 1407 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1408 while (hlen >= TCPOLEN_TIMESTAMP) { 1409 switch (*opt) { 1410 case TCPOPT_EOL: /* FALLTHROUGH */ 1411 case TCPOPT_NOP: 1412 opt++; 1413 hlen--; 1414 break; 1415 case TCPOPT_TIMESTAMP: 1416 /* Modulate the timestamps. Can be used for 1417 * NAT detection, OS uptime determination or 1418 * reboot detection. 1419 */ 1420 1421 if (got_ts) { 1422 /* Huh? Multiple timestamps!? */ 1423 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1424 DPFPRINTF(("multiple TS??")); 1425 pf_print_state(state); 1426 printf("\n"); 1427 } 1428 REASON_SET(reason, PFRES_TS); 1429 return (PF_DROP); 1430 } 1431 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1432 memcpy(&tsval, &opt[2], 1433 sizeof(u_int32_t)); 1434 if (tsval && src->scrub && 1435 (src->scrub->pfss_flags & 1436 PFSS_TIMESTAMP)) { 1437 tsval = ntohl(tsval); 1438 pf_change_proto_a(m, &opt[2], 1439 &th->th_sum, 1440 htonl(tsval + 1441 src->scrub->pfss_ts_mod), 1442 0); 1443 copyback = 1; 1444 } 1445 1446 /* Modulate TS reply iff valid (!0) */ 1447 memcpy(&tsecr, &opt[6], 1448 sizeof(u_int32_t)); 1449 if (tsecr && dst->scrub && 1450 (dst->scrub->pfss_flags & 1451 PFSS_TIMESTAMP)) { 1452 tsecr = ntohl(tsecr) 1453 - dst->scrub->pfss_ts_mod; 1454 pf_change_proto_a(m, &opt[6], 1455 &th->th_sum, htonl(tsecr), 1456 0); 1457 copyback = 1; 1458 } 1459 got_ts = 1; 1460 } 1461 /* FALLTHROUGH */ 1462 default: 1463 hlen -= MAX(opt[1], 2); 1464 opt += MAX(opt[1], 2); 1465 break; 1466 } 1467 } 1468 if (copyback) { 1469 /* Copyback the options, caller copys back header */ 1470 *writeback = 1; 1471 m_copyback(m, off + sizeof(struct tcphdr), 1472 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1473 sizeof(struct tcphdr)); 1474 } 1475 } 1476 1477 1478 /* 1479 * Must invalidate PAWS checks on connections idle for too long. 1480 * The fastest allowed timestamp clock is 1ms. That turns out to 1481 * be about 24 days before it wraps. XXX Right now our lowerbound 1482 * TS echo check only works for the first 12 days of a connection 1483 * when the TS has exhausted half its 32bit space 1484 */ 1485 #define TS_MAX_IDLE (24*24*60*60) 1486 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1487 1488 getmicrouptime(&uptime); 1489 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1490 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1491 time_uptime - state->creation > TS_MAX_CONN)) { 1492 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1493 DPFPRINTF(("src idled out of PAWS\n")); 1494 pf_print_state(state); 1495 printf("\n"); 1496 } 1497 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1498 | PFSS_PAWS_IDLED; 1499 } 1500 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1501 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1502 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1503 DPFPRINTF(("dst idled out of PAWS\n")); 1504 pf_print_state(state); 1505 printf("\n"); 1506 } 1507 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1508 | PFSS_PAWS_IDLED; 1509 } 1510 1511 if (got_ts && src->scrub && dst->scrub && 1512 (src->scrub->pfss_flags & PFSS_PAWS) && 1513 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1514 /* Validate that the timestamps are "in-window". 1515 * RFC1323 describes TCP Timestamp options that allow 1516 * measurement of RTT (round trip time) and PAWS 1517 * (protection against wrapped sequence numbers). PAWS 1518 * gives us a set of rules for rejecting packets on 1519 * long fat pipes (packets that were somehow delayed 1520 * in transit longer than the time it took to send the 1521 * full TCP sequence space of 4Gb). We can use these 1522 * rules and infer a few others that will let us treat 1523 * the 32bit timestamp and the 32bit echoed timestamp 1524 * as sequence numbers to prevent a blind attacker from 1525 * inserting packets into a connection. 1526 * 1527 * RFC1323 tells us: 1528 * - The timestamp on this packet must be greater than 1529 * or equal to the last value echoed by the other 1530 * endpoint. The RFC says those will be discarded 1531 * since it is a dup that has already been acked. 1532 * This gives us a lowerbound on the timestamp. 1533 * timestamp >= other last echoed timestamp 1534 * - The timestamp will be less than or equal to 1535 * the last timestamp plus the time between the 1536 * last packet and now. The RFC defines the max 1537 * clock rate as 1ms. We will allow clocks to be 1538 * up to 10% fast and will allow a total difference 1539 * or 30 seconds due to a route change. And this 1540 * gives us an upperbound on the timestamp. 1541 * timestamp <= last timestamp + max ticks 1542 * We have to be careful here. Windows will send an 1543 * initial timestamp of zero and then initialize it 1544 * to a random value after the 3whs; presumably to 1545 * avoid a DoS by having to call an expensive RNG 1546 * during a SYN flood. Proof MS has at least one 1547 * good security geek. 1548 * 1549 * - The TCP timestamp option must also echo the other 1550 * endpoints timestamp. The timestamp echoed is the 1551 * one carried on the earliest unacknowledged segment 1552 * on the left edge of the sequence window. The RFC 1553 * states that the host will reject any echoed 1554 * timestamps that were larger than any ever sent. 1555 * This gives us an upperbound on the TS echo. 1556 * tescr <= largest_tsval 1557 * - The lowerbound on the TS echo is a little more 1558 * tricky to determine. The other endpoint's echoed 1559 * values will not decrease. But there may be 1560 * network conditions that re-order packets and 1561 * cause our view of them to decrease. For now the 1562 * only lowerbound we can safely determine is that 1563 * the TS echo will never be less than the original 1564 * TS. XXX There is probably a better lowerbound. 1565 * Remove TS_MAX_CONN with better lowerbound check. 1566 * tescr >= other original TS 1567 * 1568 * It is also important to note that the fastest 1569 * timestamp clock of 1ms will wrap its 32bit space in 1570 * 24 days. So we just disable TS checking after 24 1571 * days of idle time. We actually must use a 12d 1572 * connection limit until we can come up with a better 1573 * lowerbound to the TS echo check. 1574 */ 1575 struct timeval delta_ts; 1576 int ts_fudge; 1577 1578 1579 /* 1580 * PFTM_TS_DIFF is how many seconds of leeway to allow 1581 * a host's timestamp. This can happen if the previous 1582 * packet got delayed in transit for much longer than 1583 * this packet. 1584 */ 1585 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1586 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1587 1588 /* Calculate max ticks since the last timestamp */ 1589 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1590 #define TS_MICROSECS 1000000 /* microseconds per second */ 1591 delta_ts = uptime; 1592 timevalsub(&delta_ts, &src->scrub->pfss_last); 1593 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1594 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1595 1596 if ((src->state >= TCPS_ESTABLISHED && 1597 dst->state >= TCPS_ESTABLISHED) && 1598 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1599 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1600 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1601 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1602 /* Bad RFC1323 implementation or an insertion attack. 1603 * 1604 * - Solaris 2.6 and 2.7 are known to send another ACK 1605 * after the FIN,FIN|ACK,ACK closing that carries 1606 * an old timestamp. 1607 */ 1608 1609 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1610 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1611 SEQ_GT(tsval, src->scrub->pfss_tsval + 1612 tsval_from_last) ? '1' : ' ', 1613 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1614 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1615 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1616 "idle: %jus %lums\n", 1617 tsval, tsecr, tsval_from_last, 1618 (uintmax_t)delta_ts.tv_sec, 1619 delta_ts.tv_usec / 1000)); 1620 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1621 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1622 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1623 "\n", dst->scrub->pfss_tsval, 1624 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1625 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1626 pf_print_state(state); 1627 pf_print_flags(th->th_flags); 1628 printf("\n"); 1629 } 1630 REASON_SET(reason, PFRES_TS); 1631 return (PF_DROP); 1632 } 1633 1634 /* XXX I'd really like to require tsecr but it's optional */ 1635 1636 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1637 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1638 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1639 src->scrub && dst->scrub && 1640 (src->scrub->pfss_flags & PFSS_PAWS) && 1641 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1642 /* Didn't send a timestamp. Timestamps aren't really useful 1643 * when: 1644 * - connection opening or closing (often not even sent). 1645 * but we must not let an attacker to put a FIN on a 1646 * data packet to sneak it through our ESTABLISHED check. 1647 * - on a TCP reset. RFC suggests not even looking at TS. 1648 * - on an empty ACK. The TS will not be echoed so it will 1649 * probably not help keep the RTT calculation in sync and 1650 * there isn't as much danger when the sequence numbers 1651 * got wrapped. So some stacks don't include TS on empty 1652 * ACKs :-( 1653 * 1654 * To minimize the disruption to mostly RFC1323 conformant 1655 * stacks, we will only require timestamps on data packets. 1656 * 1657 * And what do ya know, we cannot require timestamps on data 1658 * packets. There appear to be devices that do legitimate 1659 * TCP connection hijacking. There are HTTP devices that allow 1660 * a 3whs (with timestamps) and then buffer the HTTP request. 1661 * If the intermediate device has the HTTP response cache, it 1662 * will spoof the response but not bother timestamping its 1663 * packets. So we can look for the presence of a timestamp in 1664 * the first data packet and if there, require it in all future 1665 * packets. 1666 */ 1667 1668 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1669 /* 1670 * Hey! Someone tried to sneak a packet in. Or the 1671 * stack changed its RFC1323 behavior?!?! 1672 */ 1673 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1674 DPFPRINTF(("Did not receive expected RFC1323 " 1675 "timestamp\n")); 1676 pf_print_state(state); 1677 pf_print_flags(th->th_flags); 1678 printf("\n"); 1679 } 1680 REASON_SET(reason, PFRES_TS); 1681 return (PF_DROP); 1682 } 1683 } 1684 1685 1686 /* 1687 * We will note if a host sends his data packets with or without 1688 * timestamps. And require all data packets to contain a timestamp 1689 * if the first does. PAWS implicitly requires that all data packets be 1690 * timestamped. But I think there are middle-man devices that hijack 1691 * TCP streams immediately after the 3whs and don't timestamp their 1692 * packets (seen in a WWW accelerator or cache). 1693 */ 1694 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1695 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1696 if (got_ts) 1697 src->scrub->pfss_flags |= PFSS_DATA_TS; 1698 else { 1699 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1700 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1701 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1702 /* Don't warn if other host rejected RFC1323 */ 1703 DPFPRINTF(("Broken RFC1323 stack did not " 1704 "timestamp data packet. Disabled PAWS " 1705 "security.\n")); 1706 pf_print_state(state); 1707 pf_print_flags(th->th_flags); 1708 printf("\n"); 1709 } 1710 } 1711 } 1712 1713 1714 /* 1715 * Update PAWS values 1716 */ 1717 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1718 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1719 getmicrouptime(&src->scrub->pfss_last); 1720 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1721 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1722 src->scrub->pfss_tsval = tsval; 1723 1724 if (tsecr) { 1725 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1726 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1727 src->scrub->pfss_tsecr = tsecr; 1728 1729 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1730 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1731 src->scrub->pfss_tsval0 == 0)) { 1732 /* tsval0 MUST be the lowest timestamp */ 1733 src->scrub->pfss_tsval0 = tsval; 1734 } 1735 1736 /* Only fully initialized after a TS gets echoed */ 1737 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1738 src->scrub->pfss_flags |= PFSS_PAWS; 1739 } 1740 } 1741 1742 /* I have a dream.... TCP segment reassembly.... */ 1743 return (0); 1744 } 1745 1746 static int 1747 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th, 1748 int off, sa_family_t af) 1749 { 1750 u_int16_t *mss; 1751 int thoff; 1752 int opt, cnt, optlen = 0; 1753 int rewrite = 0; 1754 u_char opts[TCP_MAXOLEN]; 1755 u_char *optp = opts; 1756 1757 thoff = th->th_off << 2; 1758 cnt = thoff - sizeof(struct tcphdr); 1759 1760 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1761 NULL, NULL, af)) 1762 return (rewrite); 1763 1764 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1765 opt = optp[0]; 1766 if (opt == TCPOPT_EOL) 1767 break; 1768 if (opt == TCPOPT_NOP) 1769 optlen = 1; 1770 else { 1771 if (cnt < 2) 1772 break; 1773 optlen = optp[1]; 1774 if (optlen < 2 || optlen > cnt) 1775 break; 1776 } 1777 switch (opt) { 1778 case TCPOPT_MAXSEG: 1779 mss = (u_int16_t *)(optp + 2); 1780 if ((ntohs(*mss)) > r->max_mss) { 1781 th->th_sum = pf_proto_cksum_fixup(m, 1782 th->th_sum, *mss, htons(r->max_mss), 0); 1783 *mss = htons(r->max_mss); 1784 rewrite = 1; 1785 } 1786 break; 1787 default: 1788 break; 1789 } 1790 } 1791 1792 if (rewrite) 1793 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1794 1795 return (rewrite); 1796 } 1797 1798 #ifdef INET 1799 static void 1800 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1801 { 1802 struct mbuf *m = *m0; 1803 struct ip *h = mtod(m, struct ip *); 1804 1805 /* Clear IP_DF if no-df was requested */ 1806 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1807 u_int16_t ip_off = h->ip_off; 1808 1809 h->ip_off &= htons(~IP_DF); 1810 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1811 } 1812 1813 /* Enforce a minimum ttl, may cause endless packet loops */ 1814 if (min_ttl && h->ip_ttl < min_ttl) { 1815 u_int16_t ip_ttl = h->ip_ttl; 1816 1817 h->ip_ttl = min_ttl; 1818 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 1819 } 1820 1821 /* Enforce tos */ 1822 if (flags & PFRULE_SET_TOS) { 1823 u_int16_t ov, nv; 1824 1825 ov = *(u_int16_t *)h; 1826 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK); 1827 nv = *(u_int16_t *)h; 1828 1829 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 1830 } 1831 1832 /* random-id, but not for fragments */ 1833 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 1834 uint16_t ip_id = h->ip_id; 1835 1836 ip_fillid(h); 1837 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 1838 } 1839 } 1840 #endif /* INET */ 1841 1842 #ifdef INET6 1843 static void 1844 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 1845 { 1846 struct mbuf *m = *m0; 1847 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1848 1849 /* Enforce a minimum ttl, may cause endless packet loops */ 1850 if (min_ttl && h->ip6_hlim < min_ttl) 1851 h->ip6_hlim = min_ttl; 1852 } 1853 #endif 1854