1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_pf.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mbuf.h> 42 #include <sys/mutex.h> 43 #include <sys/refcount.h> 44 #include <sys/rwlock.h> 45 #include <sys/socket.h> 46 47 #include <net/if.h> 48 #include <net/vnet.h> 49 #include <net/pfvar.h> 50 #include <net/if_pflog.h> 51 52 #include <netinet/in.h> 53 #include <netinet/ip.h> 54 #include <netinet/ip_var.h> 55 #include <netinet6/ip6_var.h> 56 #include <netinet/tcp.h> 57 #include <netinet/tcp_fsm.h> 58 #include <netinet/tcp_seq.h> 59 60 #ifdef INET6 61 #include <netinet/ip6.h> 62 #endif /* INET6 */ 63 64 struct pf_frent { 65 TAILQ_ENTRY(pf_frent) fr_next; 66 struct mbuf *fe_m; 67 uint16_t fe_hdrlen; /* ipv4 header length with ip options 68 ipv6, extension, fragment header */ 69 uint16_t fe_extoff; /* last extension header offset or 0 */ 70 uint16_t fe_len; /* fragment length */ 71 uint16_t fe_off; /* fragment offset */ 72 uint16_t fe_mff; /* more fragment flag */ 73 }; 74 75 struct pf_fragment_cmp { 76 struct pf_addr frc_src; 77 struct pf_addr frc_dst; 78 uint32_t frc_id; 79 sa_family_t frc_af; 80 uint8_t frc_proto; 81 }; 82 83 struct pf_fragment { 84 struct pf_fragment_cmp fr_key; 85 #define fr_src fr_key.frc_src 86 #define fr_dst fr_key.frc_dst 87 #define fr_id fr_key.frc_id 88 #define fr_af fr_key.frc_af 89 #define fr_proto fr_key.frc_proto 90 91 RB_ENTRY(pf_fragment) fr_entry; 92 TAILQ_ENTRY(pf_fragment) frag_next; 93 uint32_t fr_timeout; 94 uint16_t fr_maxlen; /* maximum length of single fragment */ 95 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 96 }; 97 98 struct pf_fragment_tag { 99 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 100 uint16_t ft_extoff; /* last extension header offset or 0 */ 101 uint16_t ft_maxlen; /* maximum fragment payload length */ 102 uint32_t ft_id; /* fragment id */ 103 }; 104 105 static struct mtx pf_frag_mtx; 106 MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF); 107 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) 108 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) 109 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) 110 111 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 112 113 static VNET_DEFINE(uma_zone_t, pf_frent_z); 114 #define V_pf_frent_z VNET(pf_frent_z) 115 static VNET_DEFINE(uma_zone_t, pf_frag_z); 116 #define V_pf_frag_z VNET(pf_frag_z) 117 118 TAILQ_HEAD(pf_fragqueue, pf_fragment); 119 TAILQ_HEAD(pf_cachequeue, pf_fragment); 120 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue); 121 #define V_pf_fragqueue VNET(pf_fragqueue) 122 RB_HEAD(pf_frag_tree, pf_fragment); 123 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree); 124 #define V_pf_frag_tree VNET(pf_frag_tree) 125 static int pf_frag_compare(struct pf_fragment *, 126 struct pf_fragment *); 127 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 128 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 129 130 static void pf_flush_fragments(void); 131 static void pf_free_fragment(struct pf_fragment *); 132 static void pf_remove_fragment(struct pf_fragment *); 133 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, 134 struct tcphdr *, int, sa_family_t); 135 static struct pf_frent *pf_create_fragment(u_short *); 136 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 137 struct pf_frag_tree *tree); 138 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 139 struct pf_frent *, u_short *); 140 static int pf_isfull_fragment(struct pf_fragment *); 141 static struct mbuf *pf_join_fragment(struct pf_fragment *); 142 #ifdef INET 143 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t); 144 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *); 145 #endif /* INET */ 146 #ifdef INET6 147 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *, 148 struct ip6_frag *, uint16_t, uint16_t, u_short *); 149 static void pf_scrub_ip6(struct mbuf **, uint8_t); 150 #endif /* INET6 */ 151 152 #define DPFPRINTF(x) do { \ 153 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 154 printf("%s: ", __func__); \ 155 printf x ; \ 156 } \ 157 } while(0) 158 159 #ifdef INET 160 static void 161 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 162 { 163 164 key->frc_src.v4 = ip->ip_src; 165 key->frc_dst.v4 = ip->ip_dst; 166 key->frc_af = AF_INET; 167 key->frc_proto = ip->ip_p; 168 key->frc_id = ip->ip_id; 169 } 170 #endif /* INET */ 171 172 void 173 pf_normalize_init(void) 174 { 175 176 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 177 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 178 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 179 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 180 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 181 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 182 UMA_ALIGN_PTR, 0); 183 184 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 185 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 186 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 187 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 188 189 TAILQ_INIT(&V_pf_fragqueue); 190 } 191 192 void 193 pf_normalize_cleanup(void) 194 { 195 196 uma_zdestroy(V_pf_state_scrub_z); 197 uma_zdestroy(V_pf_frent_z); 198 uma_zdestroy(V_pf_frag_z); 199 } 200 201 static int 202 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 203 { 204 int diff; 205 206 if ((diff = a->fr_id - b->fr_id) != 0) 207 return (diff); 208 if ((diff = a->fr_proto - b->fr_proto) != 0) 209 return (diff); 210 if ((diff = a->fr_af - b->fr_af) != 0) 211 return (diff); 212 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 213 return (diff); 214 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 215 return (diff); 216 return (0); 217 } 218 219 void 220 pf_purge_expired_fragments(void) 221 { 222 struct pf_fragment *frag; 223 u_int32_t expire = time_uptime - 224 V_pf_default_rule.timeout[PFTM_FRAG]; 225 226 PF_FRAG_LOCK(); 227 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 228 if (frag->fr_timeout > expire) 229 break; 230 231 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 232 pf_free_fragment(frag); 233 } 234 235 PF_FRAG_UNLOCK(); 236 } 237 238 /* 239 * Try to flush old fragments to make space for new ones 240 */ 241 static void 242 pf_flush_fragments(void) 243 { 244 struct pf_fragment *frag; 245 int goal; 246 247 PF_FRAG_ASSERT(); 248 249 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 250 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 251 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 252 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 253 if (frag) 254 pf_free_fragment(frag); 255 else 256 break; 257 } 258 } 259 260 /* Frees the fragments and all associated entries */ 261 static void 262 pf_free_fragment(struct pf_fragment *frag) 263 { 264 struct pf_frent *frent; 265 266 PF_FRAG_ASSERT(); 267 268 /* Free all fragments */ 269 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 270 frent = TAILQ_FIRST(&frag->fr_queue)) { 271 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 272 273 m_freem(frent->fe_m); 274 uma_zfree(V_pf_frent_z, frent); 275 } 276 277 pf_remove_fragment(frag); 278 } 279 280 static struct pf_fragment * 281 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 282 { 283 struct pf_fragment *frag; 284 285 PF_FRAG_ASSERT(); 286 287 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 288 if (frag != NULL) { 289 /* XXX Are we sure we want to update the timeout? */ 290 frag->fr_timeout = time_uptime; 291 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 292 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 293 } 294 295 return (frag); 296 } 297 298 /* Removes a fragment from the fragment queue and frees the fragment */ 299 static void 300 pf_remove_fragment(struct pf_fragment *frag) 301 { 302 303 PF_FRAG_ASSERT(); 304 305 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 306 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 307 uma_zfree(V_pf_frag_z, frag); 308 } 309 310 static struct pf_frent * 311 pf_create_fragment(u_short *reason) 312 { 313 struct pf_frent *frent; 314 315 PF_FRAG_ASSERT(); 316 317 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 318 if (frent == NULL) { 319 pf_flush_fragments(); 320 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 321 if (frent == NULL) { 322 REASON_SET(reason, PFRES_MEMORY); 323 return (NULL); 324 } 325 } 326 327 return (frent); 328 } 329 330 static struct pf_fragment * 331 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 332 u_short *reason) 333 { 334 struct pf_frent *after, *next, *prev; 335 struct pf_fragment *frag; 336 uint16_t total; 337 338 PF_FRAG_ASSERT(); 339 340 /* No empty fragments. */ 341 if (frent->fe_len == 0) { 342 DPFPRINTF(("bad fragment: len 0")); 343 goto bad_fragment; 344 } 345 346 /* All fragments are 8 byte aligned. */ 347 if (frent->fe_mff && (frent->fe_len & 0x7)) { 348 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len)); 349 goto bad_fragment; 350 } 351 352 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 353 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 354 DPFPRINTF(("bad fragment: max packet %d", 355 frent->fe_off + frent->fe_len)); 356 goto bad_fragment; 357 } 358 359 DPFPRINTF((key->frc_af == AF_INET ? 360 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d", 361 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 362 363 /* Fully buffer all of the fragments in this fragment queue. */ 364 frag = pf_find_fragment(key, &V_pf_frag_tree); 365 366 /* Create a new reassembly queue for this packet. */ 367 if (frag == NULL) { 368 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 369 if (frag == NULL) { 370 pf_flush_fragments(); 371 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 372 if (frag == NULL) { 373 REASON_SET(reason, PFRES_MEMORY); 374 goto drop_fragment; 375 } 376 } 377 378 *(struct pf_fragment_cmp *)frag = *key; 379 frag->fr_timeout = time_uptime; 380 frag->fr_maxlen = frent->fe_len; 381 TAILQ_INIT(&frag->fr_queue); 382 383 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 384 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 385 386 /* We do not have a previous fragment. */ 387 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 388 389 return (frag); 390 } 391 392 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 393 394 /* Remember maximum fragment len for refragmentation. */ 395 if (frent->fe_len > frag->fr_maxlen) 396 frag->fr_maxlen = frent->fe_len; 397 398 /* Maximum data we have seen already. */ 399 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 400 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 401 402 /* Non terminal fragments must have more fragments flag. */ 403 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 404 goto bad_fragment; 405 406 /* Check if we saw the last fragment already. */ 407 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 408 if (frent->fe_off + frent->fe_len > total || 409 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 410 goto bad_fragment; 411 } else { 412 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 413 goto bad_fragment; 414 } 415 416 /* Find a fragment after the current one. */ 417 prev = NULL; 418 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) { 419 if (after->fe_off > frent->fe_off) 420 break; 421 prev = after; 422 } 423 424 KASSERT(prev != NULL || after != NULL, 425 ("prev != NULL || after != NULL")); 426 427 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 428 uint16_t precut; 429 430 precut = prev->fe_off + prev->fe_len - frent->fe_off; 431 if (precut >= frent->fe_len) 432 goto bad_fragment; 433 DPFPRINTF(("overlap -%d", precut)); 434 m_adj(frent->fe_m, precut); 435 frent->fe_off += precut; 436 frent->fe_len -= precut; 437 } 438 439 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 440 after = next) { 441 uint16_t aftercut; 442 443 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 444 DPFPRINTF(("adjust overlap %d", aftercut)); 445 if (aftercut < after->fe_len) { 446 m_adj(after->fe_m, aftercut); 447 after->fe_off += aftercut; 448 after->fe_len -= aftercut; 449 break; 450 } 451 452 /* This fragment is completely overlapped, lose it. */ 453 next = TAILQ_NEXT(after, fr_next); 454 m_freem(after->fe_m); 455 TAILQ_REMOVE(&frag->fr_queue, after, fr_next); 456 uma_zfree(V_pf_frent_z, after); 457 } 458 459 if (prev == NULL) 460 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 461 else 462 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 463 464 return (frag); 465 466 bad_fragment: 467 REASON_SET(reason, PFRES_FRAG); 468 drop_fragment: 469 uma_zfree(V_pf_frent_z, frent); 470 return (NULL); 471 } 472 473 static int 474 pf_isfull_fragment(struct pf_fragment *frag) 475 { 476 struct pf_frent *frent, *next; 477 uint16_t off, total; 478 479 /* Check if we are completely reassembled */ 480 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) 481 return (0); 482 483 /* Maximum data we have seen already */ 484 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 485 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 486 487 /* Check if we have all the data */ 488 off = 0; 489 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) { 490 next = TAILQ_NEXT(frent, fr_next); 491 492 off += frent->fe_len; 493 if (off < total && (next == NULL || next->fe_off != off)) { 494 DPFPRINTF(("missing fragment at %d, next %d, total %d", 495 off, next == NULL ? -1 : next->fe_off, total)); 496 return (0); 497 } 498 } 499 DPFPRINTF(("%d < %d?", off, total)); 500 if (off < total) 501 return (0); 502 KASSERT(off == total, ("off == total")); 503 504 return (1); 505 } 506 507 static struct mbuf * 508 pf_join_fragment(struct pf_fragment *frag) 509 { 510 struct mbuf *m, *m2; 511 struct pf_frent *frent, *next; 512 513 frent = TAILQ_FIRST(&frag->fr_queue); 514 next = TAILQ_NEXT(frent, fr_next); 515 516 m = frent->fe_m; 517 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 518 uma_zfree(V_pf_frent_z, frent); 519 for (frent = next; frent != NULL; frent = next) { 520 next = TAILQ_NEXT(frent, fr_next); 521 522 m2 = frent->fe_m; 523 /* Strip off ip header. */ 524 m_adj(m2, frent->fe_hdrlen); 525 /* Strip off any trailing bytes. */ 526 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 527 528 uma_zfree(V_pf_frent_z, frent); 529 m_cat(m, m2); 530 } 531 532 /* Remove from fragment queue. */ 533 pf_remove_fragment(frag); 534 535 return (m); 536 } 537 538 #ifdef INET 539 static int 540 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason) 541 { 542 struct mbuf *m = *m0; 543 struct pf_frent *frent; 544 struct pf_fragment *frag; 545 struct pf_fragment_cmp key; 546 uint16_t total, hdrlen; 547 548 /* Get an entry for the fragment queue */ 549 if ((frent = pf_create_fragment(reason)) == NULL) 550 return (PF_DROP); 551 552 frent->fe_m = m; 553 frent->fe_hdrlen = ip->ip_hl << 2; 554 frent->fe_extoff = 0; 555 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 556 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 557 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 558 559 pf_ip2key(ip, dir, &key); 560 561 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 562 return (PF_DROP); 563 564 /* The mbuf is part of the fragment entry, no direct free or access */ 565 m = *m0 = NULL; 566 567 if (!pf_isfull_fragment(frag)) 568 return (PF_PASS); /* drop because *m0 is NULL, no error */ 569 570 /* We have all the data */ 571 frent = TAILQ_FIRST(&frag->fr_queue); 572 KASSERT(frent != NULL, ("frent != NULL")); 573 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 574 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 575 hdrlen = frent->fe_hdrlen; 576 577 m = *m0 = pf_join_fragment(frag); 578 frag = NULL; 579 580 if (m->m_flags & M_PKTHDR) { 581 int plen = 0; 582 for (m = *m0; m; m = m->m_next) 583 plen += m->m_len; 584 m = *m0; 585 m->m_pkthdr.len = plen; 586 } 587 588 ip = mtod(m, struct ip *); 589 ip->ip_len = htons(hdrlen + total); 590 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 591 592 if (hdrlen + total > IP_MAXPACKET) { 593 DPFPRINTF(("drop: too big: %d", total)); 594 ip->ip_len = 0; 595 REASON_SET(reason, PFRES_SHORT); 596 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 597 return (PF_DROP); 598 } 599 600 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 601 return (PF_PASS); 602 } 603 #endif /* INET */ 604 605 #ifdef INET6 606 static int 607 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr, 608 uint16_t hdrlen, uint16_t extoff, u_short *reason) 609 { 610 struct mbuf *m = *m0; 611 struct pf_frent *frent; 612 struct pf_fragment *frag; 613 struct pf_fragment_cmp key; 614 struct m_tag *mtag; 615 struct pf_fragment_tag *ftag; 616 int off; 617 uint32_t frag_id; 618 uint16_t total, maxlen; 619 uint8_t proto; 620 621 PF_FRAG_LOCK(); 622 623 /* Get an entry for the fragment queue. */ 624 if ((frent = pf_create_fragment(reason)) == NULL) { 625 PF_FRAG_UNLOCK(); 626 return (PF_DROP); 627 } 628 629 frent->fe_m = m; 630 frent->fe_hdrlen = hdrlen; 631 frent->fe_extoff = extoff; 632 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 633 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 634 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 635 636 key.frc_src.v6 = ip6->ip6_src; 637 key.frc_dst.v6 = ip6->ip6_dst; 638 key.frc_af = AF_INET6; 639 /* Only the first fragment's protocol is relevant. */ 640 key.frc_proto = 0; 641 key.frc_id = fraghdr->ip6f_ident; 642 643 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 644 PF_FRAG_UNLOCK(); 645 return (PF_DROP); 646 } 647 648 /* The mbuf is part of the fragment entry, no direct free or access. */ 649 m = *m0 = NULL; 650 651 if (!pf_isfull_fragment(frag)) { 652 PF_FRAG_UNLOCK(); 653 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 654 } 655 656 /* We have all the data. */ 657 extoff = frent->fe_extoff; 658 maxlen = frag->fr_maxlen; 659 frag_id = frag->fr_id; 660 frent = TAILQ_FIRST(&frag->fr_queue); 661 KASSERT(frent != NULL, ("frent != NULL")); 662 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 663 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 664 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 665 666 m = *m0 = pf_join_fragment(frag); 667 frag = NULL; 668 669 PF_FRAG_UNLOCK(); 670 671 /* Take protocol from first fragment header. */ 672 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 673 KASSERT(m, ("%s: short mbuf chain", __func__)); 674 proto = *(mtod(m, caddr_t) + off); 675 m = *m0; 676 677 /* Delete frag6 header */ 678 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 679 goto fail; 680 681 if (m->m_flags & M_PKTHDR) { 682 int plen = 0; 683 for (m = *m0; m; m = m->m_next) 684 plen += m->m_len; 685 m = *m0; 686 m->m_pkthdr.len = plen; 687 } 688 689 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag), 690 M_NOWAIT)) == NULL) 691 goto fail; 692 ftag = (struct pf_fragment_tag *)(mtag + 1); 693 ftag->ft_hdrlen = hdrlen; 694 ftag->ft_extoff = extoff; 695 ftag->ft_maxlen = maxlen; 696 ftag->ft_id = frag_id; 697 m_tag_prepend(m, mtag); 698 699 ip6 = mtod(m, struct ip6_hdr *); 700 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 701 if (extoff) { 702 /* Write protocol into next field of last extension header. */ 703 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 704 &off); 705 KASSERT(m, ("%s: short mbuf chain", __func__)); 706 *(mtod(m, char *) + off) = proto; 707 m = *m0; 708 } else 709 ip6->ip6_nxt = proto; 710 711 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 712 DPFPRINTF(("drop: too big: %d", total)); 713 ip6->ip6_plen = 0; 714 REASON_SET(reason, PFRES_SHORT); 715 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 716 return (PF_DROP); 717 } 718 719 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen))); 720 return (PF_PASS); 721 722 fail: 723 REASON_SET(reason, PFRES_MEMORY); 724 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 725 return (PF_DROP); 726 } 727 #endif /* INET6 */ 728 729 #ifdef INET6 730 int 731 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag) 732 { 733 struct mbuf *m = *m0, *t; 734 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 735 struct pf_pdesc pd; 736 uint32_t frag_id; 737 uint16_t hdrlen, extoff, maxlen; 738 uint8_t proto; 739 int error, action; 740 741 hdrlen = ftag->ft_hdrlen; 742 extoff = ftag->ft_extoff; 743 maxlen = ftag->ft_maxlen; 744 frag_id = ftag->ft_id; 745 m_tag_delete(m, mtag); 746 mtag = NULL; 747 ftag = NULL; 748 749 if (extoff) { 750 int off; 751 752 /* Use protocol from next field of last extension header */ 753 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 754 &off); 755 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 756 proto = *(mtod(m, caddr_t) + off); 757 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 758 m = *m0; 759 } else { 760 struct ip6_hdr *hdr; 761 762 hdr = mtod(m, struct ip6_hdr *); 763 proto = hdr->ip6_nxt; 764 hdr->ip6_nxt = IPPROTO_FRAGMENT; 765 } 766 767 /* The MTU must be a multiple of 8 bytes, or we risk doing the 768 * fragmentation wrong. */ 769 maxlen = maxlen & ~7; 770 771 /* 772 * Maxlen may be less than 8 if there was only a single 773 * fragment. As it was fragmented before, add a fragment 774 * header also for a single fragment. If total or maxlen 775 * is less than 8, ip6_fragment() will return EMSGSIZE and 776 * we drop the packet. 777 */ 778 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 779 m = (*m0)->m_nextpkt; 780 (*m0)->m_nextpkt = NULL; 781 if (error == 0) { 782 /* The first mbuf contains the unfragmented packet. */ 783 m_freem(*m0); 784 *m0 = NULL; 785 action = PF_PASS; 786 } else { 787 /* Drop expects an mbuf to free. */ 788 DPFPRINTF(("refragment error %d", error)); 789 action = PF_DROP; 790 } 791 for (t = m; m; m = t) { 792 t = m->m_nextpkt; 793 m->m_nextpkt = NULL; 794 m->m_flags |= M_SKIP_FIREWALL; 795 memset(&pd, 0, sizeof(pd)); 796 pd.pf_mtag = pf_find_mtag(m); 797 if (error == 0) 798 ip6_forward(m, 0); 799 else 800 m_freem(m); 801 } 802 803 return (action); 804 } 805 #endif /* INET6 */ 806 807 #ifdef INET 808 int 809 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, 810 struct pf_pdesc *pd) 811 { 812 struct mbuf *m = *m0; 813 struct pf_rule *r; 814 struct ip *h = mtod(m, struct ip *); 815 int mff = (ntohs(h->ip_off) & IP_MF); 816 int hlen = h->ip_hl << 2; 817 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 818 u_int16_t max; 819 int ip_len; 820 int ip_off; 821 int tag = -1; 822 int verdict; 823 824 PF_RULES_RASSERT(); 825 826 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 827 while (r != NULL) { 828 r->evaluations++; 829 if (pfi_kif_match(r->kif, kif) == r->ifnot) 830 r = r->skip[PF_SKIP_IFP].ptr; 831 else if (r->direction && r->direction != dir) 832 r = r->skip[PF_SKIP_DIR].ptr; 833 else if (r->af && r->af != AF_INET) 834 r = r->skip[PF_SKIP_AF].ptr; 835 else if (r->proto && r->proto != h->ip_p) 836 r = r->skip[PF_SKIP_PROTO].ptr; 837 else if (PF_MISMATCHAW(&r->src.addr, 838 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 839 r->src.neg, kif, M_GETFIB(m))) 840 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 841 else if (PF_MISMATCHAW(&r->dst.addr, 842 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 843 r->dst.neg, NULL, M_GETFIB(m))) 844 r = r->skip[PF_SKIP_DST_ADDR].ptr; 845 else if (r->match_tag && !pf_match_tag(m, r, &tag, 846 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 847 r = TAILQ_NEXT(r, entries); 848 else 849 break; 850 } 851 852 if (r == NULL || r->action == PF_NOSCRUB) 853 return (PF_PASS); 854 else { 855 r->packets[dir == PF_OUT]++; 856 r->bytes[dir == PF_OUT] += pd->tot_len; 857 } 858 859 /* Check for illegal packets */ 860 if (hlen < (int)sizeof(struct ip)) { 861 REASON_SET(reason, PFRES_NORM); 862 goto drop; 863 } 864 865 if (hlen > ntohs(h->ip_len)) { 866 REASON_SET(reason, PFRES_NORM); 867 goto drop; 868 } 869 870 /* Clear IP_DF if the rule uses the no-df option */ 871 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 872 u_int16_t ip_off = h->ip_off; 873 874 h->ip_off &= htons(~IP_DF); 875 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 876 } 877 878 /* We will need other tests here */ 879 if (!fragoff && !mff) 880 goto no_fragment; 881 882 /* We're dealing with a fragment now. Don't allow fragments 883 * with IP_DF to enter the cache. If the flag was cleared by 884 * no-df above, fine. Otherwise drop it. 885 */ 886 if (h->ip_off & htons(IP_DF)) { 887 DPFPRINTF(("IP_DF\n")); 888 goto bad; 889 } 890 891 ip_len = ntohs(h->ip_len) - hlen; 892 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 893 894 /* All fragments are 8 byte aligned */ 895 if (mff && (ip_len & 0x7)) { 896 DPFPRINTF(("mff and %d\n", ip_len)); 897 goto bad; 898 } 899 900 /* Respect maximum length */ 901 if (fragoff + ip_len > IP_MAXPACKET) { 902 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 903 goto bad; 904 } 905 max = fragoff + ip_len; 906 907 /* Fully buffer all of the fragments 908 * Might return a completely reassembled mbuf, or NULL */ 909 PF_FRAG_LOCK(); 910 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 911 verdict = pf_reassemble(m0, h, dir, reason); 912 PF_FRAG_UNLOCK(); 913 914 if (verdict != PF_PASS) 915 return (PF_DROP); 916 917 m = *m0; 918 if (m == NULL) 919 return (PF_DROP); 920 921 h = mtod(m, struct ip *); 922 923 no_fragment: 924 /* At this point, only IP_DF is allowed in ip_off */ 925 if (h->ip_off & ~htons(IP_DF)) { 926 u_int16_t ip_off = h->ip_off; 927 928 h->ip_off &= htons(IP_DF); 929 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 930 } 931 932 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 933 934 return (PF_PASS); 935 936 bad: 937 DPFPRINTF(("dropping bad fragment\n")); 938 REASON_SET(reason, PFRES_FRAG); 939 drop: 940 if (r != NULL && r->log) 941 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 942 1); 943 944 return (PF_DROP); 945 } 946 #endif 947 948 #ifdef INET6 949 int 950 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif, 951 u_short *reason, struct pf_pdesc *pd) 952 { 953 struct mbuf *m = *m0; 954 struct pf_rule *r; 955 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 956 int extoff; 957 int off; 958 struct ip6_ext ext; 959 struct ip6_opt opt; 960 struct ip6_opt_jumbo jumbo; 961 struct ip6_frag frag; 962 u_int32_t jumbolen = 0, plen; 963 int optend; 964 int ooff; 965 u_int8_t proto; 966 int terminal; 967 968 PF_RULES_RASSERT(); 969 970 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 971 while (r != NULL) { 972 r->evaluations++; 973 if (pfi_kif_match(r->kif, kif) == r->ifnot) 974 r = r->skip[PF_SKIP_IFP].ptr; 975 else if (r->direction && r->direction != dir) 976 r = r->skip[PF_SKIP_DIR].ptr; 977 else if (r->af && r->af != AF_INET6) 978 r = r->skip[PF_SKIP_AF].ptr; 979 #if 0 /* header chain! */ 980 else if (r->proto && r->proto != h->ip6_nxt) 981 r = r->skip[PF_SKIP_PROTO].ptr; 982 #endif 983 else if (PF_MISMATCHAW(&r->src.addr, 984 (struct pf_addr *)&h->ip6_src, AF_INET6, 985 r->src.neg, kif, M_GETFIB(m))) 986 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 987 else if (PF_MISMATCHAW(&r->dst.addr, 988 (struct pf_addr *)&h->ip6_dst, AF_INET6, 989 r->dst.neg, NULL, M_GETFIB(m))) 990 r = r->skip[PF_SKIP_DST_ADDR].ptr; 991 else 992 break; 993 } 994 995 if (r == NULL || r->action == PF_NOSCRUB) 996 return (PF_PASS); 997 else { 998 r->packets[dir == PF_OUT]++; 999 r->bytes[dir == PF_OUT] += pd->tot_len; 1000 } 1001 1002 /* Check for illegal packets */ 1003 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1004 goto drop; 1005 1006 extoff = 0; 1007 off = sizeof(struct ip6_hdr); 1008 proto = h->ip6_nxt; 1009 terminal = 0; 1010 do { 1011 switch (proto) { 1012 case IPPROTO_FRAGMENT: 1013 goto fragment; 1014 break; 1015 case IPPROTO_AH: 1016 case IPPROTO_ROUTING: 1017 case IPPROTO_DSTOPTS: 1018 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1019 NULL, AF_INET6)) 1020 goto shortpkt; 1021 extoff = off; 1022 if (proto == IPPROTO_AH) 1023 off += (ext.ip6e_len + 2) * 4; 1024 else 1025 off += (ext.ip6e_len + 1) * 8; 1026 proto = ext.ip6e_nxt; 1027 break; 1028 case IPPROTO_HOPOPTS: 1029 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1030 NULL, AF_INET6)) 1031 goto shortpkt; 1032 extoff = off; 1033 optend = off + (ext.ip6e_len + 1) * 8; 1034 ooff = off + sizeof(ext); 1035 do { 1036 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1037 sizeof(opt.ip6o_type), NULL, NULL, 1038 AF_INET6)) 1039 goto shortpkt; 1040 if (opt.ip6o_type == IP6OPT_PAD1) { 1041 ooff++; 1042 continue; 1043 } 1044 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1045 NULL, NULL, AF_INET6)) 1046 goto shortpkt; 1047 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1048 goto drop; 1049 switch (opt.ip6o_type) { 1050 case IP6OPT_JUMBO: 1051 if (h->ip6_plen != 0) 1052 goto drop; 1053 if (!pf_pull_hdr(m, ooff, &jumbo, 1054 sizeof(jumbo), NULL, NULL, 1055 AF_INET6)) 1056 goto shortpkt; 1057 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, 1058 sizeof(jumbolen)); 1059 jumbolen = ntohl(jumbolen); 1060 if (jumbolen <= IPV6_MAXPACKET) 1061 goto drop; 1062 if (sizeof(struct ip6_hdr) + jumbolen != 1063 m->m_pkthdr.len) 1064 goto drop; 1065 break; 1066 default: 1067 break; 1068 } 1069 ooff += sizeof(opt) + opt.ip6o_len; 1070 } while (ooff < optend); 1071 1072 off = optend; 1073 proto = ext.ip6e_nxt; 1074 break; 1075 default: 1076 terminal = 1; 1077 break; 1078 } 1079 } while (!terminal); 1080 1081 /* jumbo payload option must be present, or plen > 0 */ 1082 if (ntohs(h->ip6_plen) == 0) 1083 plen = jumbolen; 1084 else 1085 plen = ntohs(h->ip6_plen); 1086 if (plen == 0) 1087 goto drop; 1088 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1089 goto shortpkt; 1090 1091 pf_scrub_ip6(&m, r->min_ttl); 1092 1093 return (PF_PASS); 1094 1095 fragment: 1096 /* Jumbo payload packets cannot be fragmented. */ 1097 plen = ntohs(h->ip6_plen); 1098 if (plen == 0 || jumbolen) 1099 goto drop; 1100 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1101 goto shortpkt; 1102 1103 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1104 goto shortpkt; 1105 1106 /* Offset now points to data portion. */ 1107 off += sizeof(frag); 1108 1109 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */ 1110 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS) 1111 return (PF_DROP); 1112 m = *m0; 1113 if (m == NULL) 1114 return (PF_DROP); 1115 1116 pd->flags |= PFDESC_IP_REAS; 1117 return (PF_PASS); 1118 1119 shortpkt: 1120 REASON_SET(reason, PFRES_SHORT); 1121 if (r != NULL && r->log) 1122 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1123 1); 1124 return (PF_DROP); 1125 1126 drop: 1127 REASON_SET(reason, PFRES_NORM); 1128 if (r != NULL && r->log) 1129 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1130 1); 1131 return (PF_DROP); 1132 } 1133 #endif /* INET6 */ 1134 1135 int 1136 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff, 1137 int off, void *h, struct pf_pdesc *pd) 1138 { 1139 struct pf_rule *r, *rm = NULL; 1140 struct tcphdr *th = pd->hdr.tcp; 1141 int rewrite = 0; 1142 u_short reason; 1143 u_int8_t flags; 1144 sa_family_t af = pd->af; 1145 1146 PF_RULES_RASSERT(); 1147 1148 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1149 while (r != NULL) { 1150 r->evaluations++; 1151 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1152 r = r->skip[PF_SKIP_IFP].ptr; 1153 else if (r->direction && r->direction != dir) 1154 r = r->skip[PF_SKIP_DIR].ptr; 1155 else if (r->af && r->af != af) 1156 r = r->skip[PF_SKIP_AF].ptr; 1157 else if (r->proto && r->proto != pd->proto) 1158 r = r->skip[PF_SKIP_PROTO].ptr; 1159 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1160 r->src.neg, kif, M_GETFIB(m))) 1161 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1162 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1163 r->src.port[0], r->src.port[1], th->th_sport)) 1164 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1165 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1166 r->dst.neg, NULL, M_GETFIB(m))) 1167 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1168 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1169 r->dst.port[0], r->dst.port[1], th->th_dport)) 1170 r = r->skip[PF_SKIP_DST_PORT].ptr; 1171 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1172 pf_osfp_fingerprint(pd, m, off, th), 1173 r->os_fingerprint)) 1174 r = TAILQ_NEXT(r, entries); 1175 else { 1176 rm = r; 1177 break; 1178 } 1179 } 1180 1181 if (rm == NULL || rm->action == PF_NOSCRUB) 1182 return (PF_PASS); 1183 else { 1184 r->packets[dir == PF_OUT]++; 1185 r->bytes[dir == PF_OUT] += pd->tot_len; 1186 } 1187 1188 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1189 pd->flags |= PFDESC_TCP_NORM; 1190 1191 flags = th->th_flags; 1192 if (flags & TH_SYN) { 1193 /* Illegal packet */ 1194 if (flags & TH_RST) 1195 goto tcp_drop; 1196 1197 if (flags & TH_FIN) 1198 goto tcp_drop; 1199 } else { 1200 /* Illegal packet */ 1201 if (!(flags & (TH_ACK|TH_RST))) 1202 goto tcp_drop; 1203 } 1204 1205 if (!(flags & TH_ACK)) { 1206 /* These flags are only valid if ACK is set */ 1207 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1208 goto tcp_drop; 1209 } 1210 1211 /* Check for illegal header length */ 1212 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1213 goto tcp_drop; 1214 1215 /* If flags changed, or reserved data set, then adjust */ 1216 if (flags != th->th_flags || th->th_x2 != 0) { 1217 u_int16_t ov, nv; 1218 1219 ov = *(u_int16_t *)(&th->th_ack + 1); 1220 th->th_flags = flags; 1221 th->th_x2 = 0; 1222 nv = *(u_int16_t *)(&th->th_ack + 1); 1223 1224 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0); 1225 rewrite = 1; 1226 } 1227 1228 /* Remove urgent pointer, if TH_URG is not set */ 1229 if (!(flags & TH_URG) && th->th_urp) { 1230 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, 1231 0, 0); 1232 th->th_urp = 0; 1233 rewrite = 1; 1234 } 1235 1236 /* Process options */ 1237 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1238 rewrite = 1; 1239 1240 /* copy back packet headers if we sanitized */ 1241 if (rewrite) 1242 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1243 1244 return (PF_PASS); 1245 1246 tcp_drop: 1247 REASON_SET(&reason, PFRES_NORM); 1248 if (rm != NULL && r->log) 1249 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1250 1); 1251 return (PF_DROP); 1252 } 1253 1254 int 1255 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1256 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1257 { 1258 u_int32_t tsval, tsecr; 1259 u_int8_t hdr[60]; 1260 u_int8_t *opt; 1261 1262 KASSERT((src->scrub == NULL), 1263 ("pf_normalize_tcp_init: src->scrub != NULL")); 1264 1265 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1266 if (src->scrub == NULL) 1267 return (1); 1268 1269 switch (pd->af) { 1270 #ifdef INET 1271 case AF_INET: { 1272 struct ip *h = mtod(m, struct ip *); 1273 src->scrub->pfss_ttl = h->ip_ttl; 1274 break; 1275 } 1276 #endif /* INET */ 1277 #ifdef INET6 1278 case AF_INET6: { 1279 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1280 src->scrub->pfss_ttl = h->ip6_hlim; 1281 break; 1282 } 1283 #endif /* INET6 */ 1284 } 1285 1286 1287 /* 1288 * All normalizations below are only begun if we see the start of 1289 * the connections. They must all set an enabled bit in pfss_flags 1290 */ 1291 if ((th->th_flags & TH_SYN) == 0) 1292 return (0); 1293 1294 1295 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1296 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1297 /* Diddle with TCP options */ 1298 int hlen; 1299 opt = hdr + sizeof(struct tcphdr); 1300 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1301 while (hlen >= TCPOLEN_TIMESTAMP) { 1302 switch (*opt) { 1303 case TCPOPT_EOL: /* FALLTHROUGH */ 1304 case TCPOPT_NOP: 1305 opt++; 1306 hlen--; 1307 break; 1308 case TCPOPT_TIMESTAMP: 1309 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1310 src->scrub->pfss_flags |= 1311 PFSS_TIMESTAMP; 1312 src->scrub->pfss_ts_mod = 1313 htonl(arc4random()); 1314 1315 /* note PFSS_PAWS not set yet */ 1316 memcpy(&tsval, &opt[2], 1317 sizeof(u_int32_t)); 1318 memcpy(&tsecr, &opt[6], 1319 sizeof(u_int32_t)); 1320 src->scrub->pfss_tsval0 = ntohl(tsval); 1321 src->scrub->pfss_tsval = ntohl(tsval); 1322 src->scrub->pfss_tsecr = ntohl(tsecr); 1323 getmicrouptime(&src->scrub->pfss_last); 1324 } 1325 /* FALLTHROUGH */ 1326 default: 1327 hlen -= MAX(opt[1], 2); 1328 opt += MAX(opt[1], 2); 1329 break; 1330 } 1331 } 1332 } 1333 1334 return (0); 1335 } 1336 1337 void 1338 pf_normalize_tcp_cleanup(struct pf_state *state) 1339 { 1340 if (state->src.scrub) 1341 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1342 if (state->dst.scrub) 1343 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1344 1345 /* Someday... flush the TCP segment reassembly descriptors. */ 1346 } 1347 1348 int 1349 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1350 u_short *reason, struct tcphdr *th, struct pf_state *state, 1351 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1352 { 1353 struct timeval uptime; 1354 u_int32_t tsval, tsecr; 1355 u_int tsval_from_last; 1356 u_int8_t hdr[60]; 1357 u_int8_t *opt; 1358 int copyback = 0; 1359 int got_ts = 0; 1360 1361 KASSERT((src->scrub || dst->scrub), 1362 ("%s: src->scrub && dst->scrub!", __func__)); 1363 1364 /* 1365 * Enforce the minimum TTL seen for this connection. Negate a common 1366 * technique to evade an intrusion detection system and confuse 1367 * firewall state code. 1368 */ 1369 switch (pd->af) { 1370 #ifdef INET 1371 case AF_INET: { 1372 if (src->scrub) { 1373 struct ip *h = mtod(m, struct ip *); 1374 if (h->ip_ttl > src->scrub->pfss_ttl) 1375 src->scrub->pfss_ttl = h->ip_ttl; 1376 h->ip_ttl = src->scrub->pfss_ttl; 1377 } 1378 break; 1379 } 1380 #endif /* INET */ 1381 #ifdef INET6 1382 case AF_INET6: { 1383 if (src->scrub) { 1384 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1385 if (h->ip6_hlim > src->scrub->pfss_ttl) 1386 src->scrub->pfss_ttl = h->ip6_hlim; 1387 h->ip6_hlim = src->scrub->pfss_ttl; 1388 } 1389 break; 1390 } 1391 #endif /* INET6 */ 1392 } 1393 1394 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1395 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1396 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1397 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1398 /* Diddle with TCP options */ 1399 int hlen; 1400 opt = hdr + sizeof(struct tcphdr); 1401 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1402 while (hlen >= TCPOLEN_TIMESTAMP) { 1403 switch (*opt) { 1404 case TCPOPT_EOL: /* FALLTHROUGH */ 1405 case TCPOPT_NOP: 1406 opt++; 1407 hlen--; 1408 break; 1409 case TCPOPT_TIMESTAMP: 1410 /* Modulate the timestamps. Can be used for 1411 * NAT detection, OS uptime determination or 1412 * reboot detection. 1413 */ 1414 1415 if (got_ts) { 1416 /* Huh? Multiple timestamps!? */ 1417 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1418 DPFPRINTF(("multiple TS??")); 1419 pf_print_state(state); 1420 printf("\n"); 1421 } 1422 REASON_SET(reason, PFRES_TS); 1423 return (PF_DROP); 1424 } 1425 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1426 memcpy(&tsval, &opt[2], 1427 sizeof(u_int32_t)); 1428 if (tsval && src->scrub && 1429 (src->scrub->pfss_flags & 1430 PFSS_TIMESTAMP)) { 1431 tsval = ntohl(tsval); 1432 pf_change_proto_a(m, &opt[2], 1433 &th->th_sum, 1434 htonl(tsval + 1435 src->scrub->pfss_ts_mod), 1436 0); 1437 copyback = 1; 1438 } 1439 1440 /* Modulate TS reply iff valid (!0) */ 1441 memcpy(&tsecr, &opt[6], 1442 sizeof(u_int32_t)); 1443 if (tsecr && dst->scrub && 1444 (dst->scrub->pfss_flags & 1445 PFSS_TIMESTAMP)) { 1446 tsecr = ntohl(tsecr) 1447 - dst->scrub->pfss_ts_mod; 1448 pf_change_proto_a(m, &opt[6], 1449 &th->th_sum, htonl(tsecr), 1450 0); 1451 copyback = 1; 1452 } 1453 got_ts = 1; 1454 } 1455 /* FALLTHROUGH */ 1456 default: 1457 hlen -= MAX(opt[1], 2); 1458 opt += MAX(opt[1], 2); 1459 break; 1460 } 1461 } 1462 if (copyback) { 1463 /* Copyback the options, caller copys back header */ 1464 *writeback = 1; 1465 m_copyback(m, off + sizeof(struct tcphdr), 1466 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1467 sizeof(struct tcphdr)); 1468 } 1469 } 1470 1471 1472 /* 1473 * Must invalidate PAWS checks on connections idle for too long. 1474 * The fastest allowed timestamp clock is 1ms. That turns out to 1475 * be about 24 days before it wraps. XXX Right now our lowerbound 1476 * TS echo check only works for the first 12 days of a connection 1477 * when the TS has exhausted half its 32bit space 1478 */ 1479 #define TS_MAX_IDLE (24*24*60*60) 1480 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1481 1482 getmicrouptime(&uptime); 1483 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1484 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1485 time_uptime - state->creation > TS_MAX_CONN)) { 1486 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1487 DPFPRINTF(("src idled out of PAWS\n")); 1488 pf_print_state(state); 1489 printf("\n"); 1490 } 1491 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1492 | PFSS_PAWS_IDLED; 1493 } 1494 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1495 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1496 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1497 DPFPRINTF(("dst idled out of PAWS\n")); 1498 pf_print_state(state); 1499 printf("\n"); 1500 } 1501 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1502 | PFSS_PAWS_IDLED; 1503 } 1504 1505 if (got_ts && src->scrub && dst->scrub && 1506 (src->scrub->pfss_flags & PFSS_PAWS) && 1507 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1508 /* Validate that the timestamps are "in-window". 1509 * RFC1323 describes TCP Timestamp options that allow 1510 * measurement of RTT (round trip time) and PAWS 1511 * (protection against wrapped sequence numbers). PAWS 1512 * gives us a set of rules for rejecting packets on 1513 * long fat pipes (packets that were somehow delayed 1514 * in transit longer than the time it took to send the 1515 * full TCP sequence space of 4Gb). We can use these 1516 * rules and infer a few others that will let us treat 1517 * the 32bit timestamp and the 32bit echoed timestamp 1518 * as sequence numbers to prevent a blind attacker from 1519 * inserting packets into a connection. 1520 * 1521 * RFC1323 tells us: 1522 * - The timestamp on this packet must be greater than 1523 * or equal to the last value echoed by the other 1524 * endpoint. The RFC says those will be discarded 1525 * since it is a dup that has already been acked. 1526 * This gives us a lowerbound on the timestamp. 1527 * timestamp >= other last echoed timestamp 1528 * - The timestamp will be less than or equal to 1529 * the last timestamp plus the time between the 1530 * last packet and now. The RFC defines the max 1531 * clock rate as 1ms. We will allow clocks to be 1532 * up to 10% fast and will allow a total difference 1533 * or 30 seconds due to a route change. And this 1534 * gives us an upperbound on the timestamp. 1535 * timestamp <= last timestamp + max ticks 1536 * We have to be careful here. Windows will send an 1537 * initial timestamp of zero and then initialize it 1538 * to a random value after the 3whs; presumably to 1539 * avoid a DoS by having to call an expensive RNG 1540 * during a SYN flood. Proof MS has at least one 1541 * good security geek. 1542 * 1543 * - The TCP timestamp option must also echo the other 1544 * endpoints timestamp. The timestamp echoed is the 1545 * one carried on the earliest unacknowledged segment 1546 * on the left edge of the sequence window. The RFC 1547 * states that the host will reject any echoed 1548 * timestamps that were larger than any ever sent. 1549 * This gives us an upperbound on the TS echo. 1550 * tescr <= largest_tsval 1551 * - The lowerbound on the TS echo is a little more 1552 * tricky to determine. The other endpoint's echoed 1553 * values will not decrease. But there may be 1554 * network conditions that re-order packets and 1555 * cause our view of them to decrease. For now the 1556 * only lowerbound we can safely determine is that 1557 * the TS echo will never be less than the original 1558 * TS. XXX There is probably a better lowerbound. 1559 * Remove TS_MAX_CONN with better lowerbound check. 1560 * tescr >= other original TS 1561 * 1562 * It is also important to note that the fastest 1563 * timestamp clock of 1ms will wrap its 32bit space in 1564 * 24 days. So we just disable TS checking after 24 1565 * days of idle time. We actually must use a 12d 1566 * connection limit until we can come up with a better 1567 * lowerbound to the TS echo check. 1568 */ 1569 struct timeval delta_ts; 1570 int ts_fudge; 1571 1572 1573 /* 1574 * PFTM_TS_DIFF is how many seconds of leeway to allow 1575 * a host's timestamp. This can happen if the previous 1576 * packet got delayed in transit for much longer than 1577 * this packet. 1578 */ 1579 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1580 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1581 1582 /* Calculate max ticks since the last timestamp */ 1583 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1584 #define TS_MICROSECS 1000000 /* microseconds per second */ 1585 delta_ts = uptime; 1586 timevalsub(&delta_ts, &src->scrub->pfss_last); 1587 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1588 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1589 1590 if ((src->state >= TCPS_ESTABLISHED && 1591 dst->state >= TCPS_ESTABLISHED) && 1592 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1593 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1594 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1595 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1596 /* Bad RFC1323 implementation or an insertion attack. 1597 * 1598 * - Solaris 2.6 and 2.7 are known to send another ACK 1599 * after the FIN,FIN|ACK,ACK closing that carries 1600 * an old timestamp. 1601 */ 1602 1603 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1604 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1605 SEQ_GT(tsval, src->scrub->pfss_tsval + 1606 tsval_from_last) ? '1' : ' ', 1607 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1608 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1609 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1610 "idle: %jus %lums\n", 1611 tsval, tsecr, tsval_from_last, 1612 (uintmax_t)delta_ts.tv_sec, 1613 delta_ts.tv_usec / 1000)); 1614 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1615 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1616 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1617 "\n", dst->scrub->pfss_tsval, 1618 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1619 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1620 pf_print_state(state); 1621 pf_print_flags(th->th_flags); 1622 printf("\n"); 1623 } 1624 REASON_SET(reason, PFRES_TS); 1625 return (PF_DROP); 1626 } 1627 1628 /* XXX I'd really like to require tsecr but it's optional */ 1629 1630 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1631 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1632 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1633 src->scrub && dst->scrub && 1634 (src->scrub->pfss_flags & PFSS_PAWS) && 1635 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1636 /* Didn't send a timestamp. Timestamps aren't really useful 1637 * when: 1638 * - connection opening or closing (often not even sent). 1639 * but we must not let an attacker to put a FIN on a 1640 * data packet to sneak it through our ESTABLISHED check. 1641 * - on a TCP reset. RFC suggests not even looking at TS. 1642 * - on an empty ACK. The TS will not be echoed so it will 1643 * probably not help keep the RTT calculation in sync and 1644 * there isn't as much danger when the sequence numbers 1645 * got wrapped. So some stacks don't include TS on empty 1646 * ACKs :-( 1647 * 1648 * To minimize the disruption to mostly RFC1323 conformant 1649 * stacks, we will only require timestamps on data packets. 1650 * 1651 * And what do ya know, we cannot require timestamps on data 1652 * packets. There appear to be devices that do legitimate 1653 * TCP connection hijacking. There are HTTP devices that allow 1654 * a 3whs (with timestamps) and then buffer the HTTP request. 1655 * If the intermediate device has the HTTP response cache, it 1656 * will spoof the response but not bother timestamping its 1657 * packets. So we can look for the presence of a timestamp in 1658 * the first data packet and if there, require it in all future 1659 * packets. 1660 */ 1661 1662 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1663 /* 1664 * Hey! Someone tried to sneak a packet in. Or the 1665 * stack changed its RFC1323 behavior?!?! 1666 */ 1667 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1668 DPFPRINTF(("Did not receive expected RFC1323 " 1669 "timestamp\n")); 1670 pf_print_state(state); 1671 pf_print_flags(th->th_flags); 1672 printf("\n"); 1673 } 1674 REASON_SET(reason, PFRES_TS); 1675 return (PF_DROP); 1676 } 1677 } 1678 1679 1680 /* 1681 * We will note if a host sends his data packets with or without 1682 * timestamps. And require all data packets to contain a timestamp 1683 * if the first does. PAWS implicitly requires that all data packets be 1684 * timestamped. But I think there are middle-man devices that hijack 1685 * TCP streams immediately after the 3whs and don't timestamp their 1686 * packets (seen in a WWW accelerator or cache). 1687 */ 1688 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1689 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1690 if (got_ts) 1691 src->scrub->pfss_flags |= PFSS_DATA_TS; 1692 else { 1693 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1694 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1695 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1696 /* Don't warn if other host rejected RFC1323 */ 1697 DPFPRINTF(("Broken RFC1323 stack did not " 1698 "timestamp data packet. Disabled PAWS " 1699 "security.\n")); 1700 pf_print_state(state); 1701 pf_print_flags(th->th_flags); 1702 printf("\n"); 1703 } 1704 } 1705 } 1706 1707 1708 /* 1709 * Update PAWS values 1710 */ 1711 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1712 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1713 getmicrouptime(&src->scrub->pfss_last); 1714 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1715 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1716 src->scrub->pfss_tsval = tsval; 1717 1718 if (tsecr) { 1719 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1720 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1721 src->scrub->pfss_tsecr = tsecr; 1722 1723 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1724 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1725 src->scrub->pfss_tsval0 == 0)) { 1726 /* tsval0 MUST be the lowest timestamp */ 1727 src->scrub->pfss_tsval0 = tsval; 1728 } 1729 1730 /* Only fully initialized after a TS gets echoed */ 1731 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1732 src->scrub->pfss_flags |= PFSS_PAWS; 1733 } 1734 } 1735 1736 /* I have a dream.... TCP segment reassembly.... */ 1737 return (0); 1738 } 1739 1740 static int 1741 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th, 1742 int off, sa_family_t af) 1743 { 1744 u_int16_t *mss; 1745 int thoff; 1746 int opt, cnt, optlen = 0; 1747 int rewrite = 0; 1748 u_char opts[TCP_MAXOLEN]; 1749 u_char *optp = opts; 1750 1751 thoff = th->th_off << 2; 1752 cnt = thoff - sizeof(struct tcphdr); 1753 1754 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1755 NULL, NULL, af)) 1756 return (rewrite); 1757 1758 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1759 opt = optp[0]; 1760 if (opt == TCPOPT_EOL) 1761 break; 1762 if (opt == TCPOPT_NOP) 1763 optlen = 1; 1764 else { 1765 if (cnt < 2) 1766 break; 1767 optlen = optp[1]; 1768 if (optlen < 2 || optlen > cnt) 1769 break; 1770 } 1771 switch (opt) { 1772 case TCPOPT_MAXSEG: 1773 mss = (u_int16_t *)(optp + 2); 1774 if ((ntohs(*mss)) > r->max_mss) { 1775 th->th_sum = pf_proto_cksum_fixup(m, 1776 th->th_sum, *mss, htons(r->max_mss), 0); 1777 *mss = htons(r->max_mss); 1778 rewrite = 1; 1779 } 1780 break; 1781 default: 1782 break; 1783 } 1784 } 1785 1786 if (rewrite) 1787 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1788 1789 return (rewrite); 1790 } 1791 1792 #ifdef INET 1793 static void 1794 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1795 { 1796 struct mbuf *m = *m0; 1797 struct ip *h = mtod(m, struct ip *); 1798 1799 /* Clear IP_DF if no-df was requested */ 1800 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1801 u_int16_t ip_off = h->ip_off; 1802 1803 h->ip_off &= htons(~IP_DF); 1804 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1805 } 1806 1807 /* Enforce a minimum ttl, may cause endless packet loops */ 1808 if (min_ttl && h->ip_ttl < min_ttl) { 1809 u_int16_t ip_ttl = h->ip_ttl; 1810 1811 h->ip_ttl = min_ttl; 1812 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 1813 } 1814 1815 /* Enforce tos */ 1816 if (flags & PFRULE_SET_TOS) { 1817 u_int16_t ov, nv; 1818 1819 ov = *(u_int16_t *)h; 1820 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK); 1821 nv = *(u_int16_t *)h; 1822 1823 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 1824 } 1825 1826 /* random-id, but not for fragments */ 1827 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 1828 uint16_t ip_id = h->ip_id; 1829 1830 ip_fillid(h); 1831 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 1832 } 1833 } 1834 #endif /* INET */ 1835 1836 #ifdef INET6 1837 static void 1838 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 1839 { 1840 struct mbuf *m = *m0; 1841 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1842 1843 /* Enforce a minimum ttl, may cause endless packet loops */ 1844 if (min_ttl && h->ip6_hlim < min_ttl) 1845 h->ip6_hlim = min_ttl; 1846 } 1847 #endif 1848