1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_pf.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mbuf.h> 42 #include <sys/mutex.h> 43 #include <sys/refcount.h> 44 #include <sys/socket.h> 45 46 #include <net/if.h> 47 #include <net/vnet.h> 48 #include <net/pfvar.h> 49 #include <net/if_pflog.h> 50 51 #include <netinet/in.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip_var.h> 54 #include <netinet6/ip6_var.h> 55 #include <netinet/tcp.h> 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_seq.h> 58 59 #ifdef INET6 60 #include <netinet/ip6.h> 61 #endif /* INET6 */ 62 63 struct pf_frent { 64 TAILQ_ENTRY(pf_frent) fr_next; 65 struct mbuf *fe_m; 66 uint16_t fe_hdrlen; /* ipv4 header length with ip options 67 ipv6, extension, fragment header */ 68 uint16_t fe_extoff; /* last extension header offset or 0 */ 69 uint16_t fe_len; /* fragment length */ 70 uint16_t fe_off; /* fragment offset */ 71 uint16_t fe_mff; /* more fragment flag */ 72 }; 73 74 struct pf_fragment_cmp { 75 struct pf_addr frc_src; 76 struct pf_addr frc_dst; 77 uint32_t frc_id; 78 sa_family_t frc_af; 79 uint8_t frc_proto; 80 }; 81 82 struct pf_fragment { 83 struct pf_fragment_cmp fr_key; 84 #define fr_src fr_key.frc_src 85 #define fr_dst fr_key.frc_dst 86 #define fr_id fr_key.frc_id 87 #define fr_af fr_key.frc_af 88 #define fr_proto fr_key.frc_proto 89 90 /* pointers to queue element */ 91 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; 92 /* count entries between pointers */ 93 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; 94 RB_ENTRY(pf_fragment) fr_entry; 95 TAILQ_ENTRY(pf_fragment) frag_next; 96 uint32_t fr_timeout; 97 uint16_t fr_maxlen; /* maximum length of single fragment */ 98 u_int16_t fr_holes; /* number of holes in the queue */ 99 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 100 }; 101 102 struct pf_fragment_tag { 103 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 104 uint16_t ft_extoff; /* last extension header offset or 0 */ 105 uint16_t ft_maxlen; /* maximum fragment payload length */ 106 uint32_t ft_id; /* fragment id */ 107 }; 108 109 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx); 110 #define V_pf_frag_mtx VNET(pf_frag_mtx) 111 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx) 112 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx) 113 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED) 114 115 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 116 117 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z); 118 #define V_pf_frent_z VNET(pf_frent_z) 119 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z); 120 #define V_pf_frag_z VNET(pf_frag_z) 121 122 TAILQ_HEAD(pf_fragqueue, pf_fragment); 123 TAILQ_HEAD(pf_cachequeue, pf_fragment); 124 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue); 125 #define V_pf_fragqueue VNET(pf_fragqueue) 126 RB_HEAD(pf_frag_tree, pf_fragment); 127 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree); 128 #define V_pf_frag_tree VNET(pf_frag_tree) 129 static int pf_frag_compare(struct pf_fragment *, 130 struct pf_fragment *); 131 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 132 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 133 134 static void pf_flush_fragments(void); 135 static void pf_free_fragment(struct pf_fragment *); 136 static void pf_remove_fragment(struct pf_fragment *); 137 static int pf_normalize_tcpopt(struct pf_krule *, struct mbuf *, 138 struct tcphdr *, int, sa_family_t); 139 static struct pf_frent *pf_create_fragment(u_short *); 140 static int pf_frent_holes(struct pf_frent *frent); 141 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 142 struct pf_frag_tree *tree); 143 static inline int pf_frent_index(struct pf_frent *); 144 static int pf_frent_insert(struct pf_fragment *, 145 struct pf_frent *, struct pf_frent *); 146 void pf_frent_remove(struct pf_fragment *, 147 struct pf_frent *); 148 struct pf_frent *pf_frent_previous(struct pf_fragment *, 149 struct pf_frent *); 150 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 151 struct pf_frent *, u_short *); 152 static struct mbuf *pf_join_fragment(struct pf_fragment *); 153 #ifdef INET 154 static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t); 155 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *); 156 #endif /* INET */ 157 #ifdef INET6 158 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *, 159 struct ip6_frag *, uint16_t, uint16_t, u_short *); 160 static void pf_scrub_ip6(struct mbuf **, uint8_t); 161 #endif /* INET6 */ 162 163 #define DPFPRINTF(x) do { \ 164 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 165 printf("%s: ", __func__); \ 166 printf x ; \ 167 } \ 168 } while(0) 169 170 #ifdef INET 171 static void 172 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 173 { 174 175 key->frc_src.v4 = ip->ip_src; 176 key->frc_dst.v4 = ip->ip_dst; 177 key->frc_af = AF_INET; 178 key->frc_proto = ip->ip_p; 179 key->frc_id = ip->ip_id; 180 } 181 #endif /* INET */ 182 183 void 184 pf_normalize_init(void) 185 { 186 187 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 189 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 190 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 191 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 192 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 193 UMA_ALIGN_PTR, 0); 194 195 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 196 197 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 198 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 199 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 200 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 201 202 TAILQ_INIT(&V_pf_fragqueue); 203 } 204 205 void 206 pf_normalize_cleanup(void) 207 { 208 209 uma_zdestroy(V_pf_state_scrub_z); 210 uma_zdestroy(V_pf_frent_z); 211 uma_zdestroy(V_pf_frag_z); 212 213 mtx_destroy(&V_pf_frag_mtx); 214 } 215 216 static int 217 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 218 { 219 int diff; 220 221 if ((diff = a->fr_id - b->fr_id) != 0) 222 return (diff); 223 if ((diff = a->fr_proto - b->fr_proto) != 0) 224 return (diff); 225 if ((diff = a->fr_af - b->fr_af) != 0) 226 return (diff); 227 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 228 return (diff); 229 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 230 return (diff); 231 return (0); 232 } 233 234 void 235 pf_purge_expired_fragments(void) 236 { 237 u_int32_t expire = time_uptime - 238 V_pf_default_rule.timeout[PFTM_FRAG]; 239 240 pf_purge_fragments(expire); 241 } 242 243 void 244 pf_purge_fragments(uint32_t expire) 245 { 246 struct pf_fragment *frag; 247 248 PF_FRAG_LOCK(); 249 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 250 if (frag->fr_timeout > expire) 251 break; 252 253 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 254 pf_free_fragment(frag); 255 } 256 257 PF_FRAG_UNLOCK(); 258 } 259 260 /* 261 * Try to flush old fragments to make space for new ones 262 */ 263 static void 264 pf_flush_fragments(void) 265 { 266 struct pf_fragment *frag; 267 int goal; 268 269 PF_FRAG_ASSERT(); 270 271 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 272 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 273 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 274 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 275 if (frag) 276 pf_free_fragment(frag); 277 else 278 break; 279 } 280 } 281 282 /* Frees the fragments and all associated entries */ 283 static void 284 pf_free_fragment(struct pf_fragment *frag) 285 { 286 struct pf_frent *frent; 287 288 PF_FRAG_ASSERT(); 289 290 /* Free all fragments */ 291 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 292 frent = TAILQ_FIRST(&frag->fr_queue)) { 293 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 294 295 m_freem(frent->fe_m); 296 uma_zfree(V_pf_frent_z, frent); 297 } 298 299 pf_remove_fragment(frag); 300 } 301 302 static struct pf_fragment * 303 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 304 { 305 struct pf_fragment *frag; 306 307 PF_FRAG_ASSERT(); 308 309 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 310 if (frag != NULL) { 311 /* XXX Are we sure we want to update the timeout? */ 312 frag->fr_timeout = time_uptime; 313 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 314 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 315 } 316 317 return (frag); 318 } 319 320 /* Removes a fragment from the fragment queue and frees the fragment */ 321 static void 322 pf_remove_fragment(struct pf_fragment *frag) 323 { 324 325 PF_FRAG_ASSERT(); 326 KASSERT(frag, ("frag != NULL")); 327 328 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 329 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 330 uma_zfree(V_pf_frag_z, frag); 331 } 332 333 static struct pf_frent * 334 pf_create_fragment(u_short *reason) 335 { 336 struct pf_frent *frent; 337 338 PF_FRAG_ASSERT(); 339 340 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 341 if (frent == NULL) { 342 pf_flush_fragments(); 343 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 344 if (frent == NULL) { 345 REASON_SET(reason, PFRES_MEMORY); 346 return (NULL); 347 } 348 } 349 350 return (frent); 351 } 352 353 /* 354 * Calculate the additional holes that were created in the fragment 355 * queue by inserting this fragment. A fragment in the middle 356 * creates one more hole by splitting. For each connected side, 357 * it loses one hole. 358 * Fragment entry must be in the queue when calling this function. 359 */ 360 static int 361 pf_frent_holes(struct pf_frent *frent) 362 { 363 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 364 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 365 int holes = 1; 366 367 if (prev == NULL) { 368 if (frent->fe_off == 0) 369 holes--; 370 } else { 371 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); 372 if (frent->fe_off == prev->fe_off + prev->fe_len) 373 holes--; 374 } 375 if (next == NULL) { 376 if (!frent->fe_mff) 377 holes--; 378 } else { 379 KASSERT(frent->fe_mff, ("frent->fe_mff")); 380 if (next->fe_off == frent->fe_off + frent->fe_len) 381 holes--; 382 } 383 return holes; 384 } 385 386 static inline int 387 pf_frent_index(struct pf_frent *frent) 388 { 389 /* 390 * We have an array of 16 entry points to the queue. A full size 391 * 65535 octet IP packet can have 8192 fragments. So the queue 392 * traversal length is at most 512 and at most 16 entry points are 393 * checked. We need 128 additional bytes on a 64 bit architecture. 394 */ 395 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) == 396 16 - 1); 397 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); 398 399 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); 400 } 401 402 static int 403 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, 404 struct pf_frent *prev) 405 { 406 int index; 407 408 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); 409 410 /* 411 * A packet has at most 65536 octets. With 16 entry points, each one 412 * spawns 4096 octets. We limit these to 64 fragments each, which 413 * means on average every fragment must have at least 64 octets. 414 */ 415 index = pf_frent_index(frent); 416 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) 417 return ENOBUFS; 418 frag->fr_entries[index]++; 419 420 if (prev == NULL) { 421 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 422 } else { 423 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 424 ("overlapping fragment")); 425 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 426 } 427 428 if (frag->fr_firstoff[index] == NULL) { 429 KASSERT(prev == NULL || pf_frent_index(prev) < index, 430 ("prev == NULL || pf_frent_index(pref) < index")); 431 frag->fr_firstoff[index] = frent; 432 } else { 433 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { 434 KASSERT(prev == NULL || pf_frent_index(prev) < index, 435 ("prev == NULL || pf_frent_index(pref) < index")); 436 frag->fr_firstoff[index] = frent; 437 } else { 438 KASSERT(prev != NULL, ("prev != NULL")); 439 KASSERT(pf_frent_index(prev) == index, 440 ("pf_frent_index(prev) == index")); 441 } 442 } 443 444 frag->fr_holes += pf_frent_holes(frent); 445 446 return 0; 447 } 448 449 void 450 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent) 451 { 452 #ifdef INVARIANTS 453 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 454 #endif 455 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 456 int index; 457 458 frag->fr_holes -= pf_frent_holes(frent); 459 460 index = pf_frent_index(frent); 461 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); 462 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { 463 if (next == NULL) { 464 frag->fr_firstoff[index] = NULL; 465 } else { 466 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, 467 ("overlapping fragment")); 468 if (pf_frent_index(next) == index) { 469 frag->fr_firstoff[index] = next; 470 } else { 471 frag->fr_firstoff[index] = NULL; 472 } 473 } 474 } else { 475 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, 476 ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); 477 KASSERT(prev != NULL, ("prev != NULL")); 478 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 479 ("overlapping fragment")); 480 KASSERT(pf_frent_index(prev) == index, 481 ("pf_frent_index(prev) == index")); 482 } 483 484 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 485 486 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); 487 frag->fr_entries[index]--; 488 } 489 490 struct pf_frent * 491 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent) 492 { 493 struct pf_frent *prev, *next; 494 int index; 495 496 /* 497 * If there are no fragments after frag, take the final one. Assume 498 * that the global queue is not empty. 499 */ 500 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); 501 KASSERT(prev != NULL, ("prev != NULL")); 502 if (prev->fe_off <= frent->fe_off) 503 return prev; 504 /* 505 * We want to find a fragment entry that is before frag, but still 506 * close to it. Find the first fragment entry that is in the same 507 * entry point or in the first entry point after that. As we have 508 * already checked that there are entries behind frag, this will 509 * succeed. 510 */ 511 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS; 512 index++) { 513 prev = frag->fr_firstoff[index]; 514 if (prev != NULL) 515 break; 516 } 517 KASSERT(prev != NULL, ("prev != NULL")); 518 /* 519 * In prev we may have a fragment from the same entry point that is 520 * before frent, or one that is just one position behind frent. 521 * In the latter case, we go back one step and have the predecessor. 522 * There may be none if the new fragment will be the first one. 523 */ 524 if (prev->fe_off > frent->fe_off) { 525 prev = TAILQ_PREV(prev, pf_fragq, fr_next); 526 if (prev == NULL) 527 return NULL; 528 KASSERT(prev->fe_off <= frent->fe_off, 529 ("prev->fe_off <= frent->fe_off")); 530 return prev; 531 } 532 /* 533 * In prev is the first fragment of the entry point. The offset 534 * of frag is behind it. Find the closest previous fragment. 535 */ 536 for (next = TAILQ_NEXT(prev, fr_next); next != NULL; 537 next = TAILQ_NEXT(next, fr_next)) { 538 if (next->fe_off > frent->fe_off) 539 break; 540 prev = next; 541 } 542 return prev; 543 } 544 545 static struct pf_fragment * 546 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 547 u_short *reason) 548 { 549 struct pf_frent *after, *next, *prev; 550 struct pf_fragment *frag; 551 uint16_t total; 552 int old_index, new_index; 553 554 PF_FRAG_ASSERT(); 555 556 /* No empty fragments. */ 557 if (frent->fe_len == 0) { 558 DPFPRINTF(("bad fragment: len 0\n")); 559 goto bad_fragment; 560 } 561 562 /* All fragments are 8 byte aligned. */ 563 if (frent->fe_mff && (frent->fe_len & 0x7)) { 564 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); 565 goto bad_fragment; 566 } 567 568 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 569 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 570 DPFPRINTF(("bad fragment: max packet %d\n", 571 frent->fe_off + frent->fe_len)); 572 goto bad_fragment; 573 } 574 575 DPFPRINTF((key->frc_af == AF_INET ? 576 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", 577 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 578 579 /* Fully buffer all of the fragments in this fragment queue. */ 580 frag = pf_find_fragment(key, &V_pf_frag_tree); 581 582 /* Create a new reassembly queue for this packet. */ 583 if (frag == NULL) { 584 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 585 if (frag == NULL) { 586 pf_flush_fragments(); 587 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 588 if (frag == NULL) { 589 REASON_SET(reason, PFRES_MEMORY); 590 goto drop_fragment; 591 } 592 } 593 594 *(struct pf_fragment_cmp *)frag = *key; 595 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); 596 memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); 597 frag->fr_timeout = time_uptime; 598 frag->fr_maxlen = frent->fe_len; 599 frag->fr_holes = 1; 600 TAILQ_INIT(&frag->fr_queue); 601 602 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 603 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 604 605 /* We do not have a previous fragment, cannot fail. */ 606 pf_frent_insert(frag, frent, NULL); 607 608 return (frag); 609 } 610 611 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 612 613 /* Remember maximum fragment len for refragmentation. */ 614 if (frent->fe_len > frag->fr_maxlen) 615 frag->fr_maxlen = frent->fe_len; 616 617 /* Maximum data we have seen already. */ 618 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 619 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 620 621 /* Non terminal fragments must have more fragments flag. */ 622 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 623 goto bad_fragment; 624 625 /* Check if we saw the last fragment already. */ 626 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 627 if (frent->fe_off + frent->fe_len > total || 628 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 629 goto bad_fragment; 630 } else { 631 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 632 goto bad_fragment; 633 } 634 635 /* Find neighbors for newly inserted fragment */ 636 prev = pf_frent_previous(frag, frent); 637 if (prev == NULL) { 638 after = TAILQ_FIRST(&frag->fr_queue); 639 KASSERT(after != NULL, ("after != NULL")); 640 } else { 641 after = TAILQ_NEXT(prev, fr_next); 642 } 643 644 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 645 uint16_t precut; 646 647 precut = prev->fe_off + prev->fe_len - frent->fe_off; 648 if (precut >= frent->fe_len) 649 goto bad_fragment; 650 DPFPRINTF(("overlap -%d\n", precut)); 651 m_adj(frent->fe_m, precut); 652 frent->fe_off += precut; 653 frent->fe_len -= precut; 654 } 655 656 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 657 after = next) { 658 uint16_t aftercut; 659 660 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 661 DPFPRINTF(("adjust overlap %d\n", aftercut)); 662 if (aftercut < after->fe_len) { 663 m_adj(after->fe_m, aftercut); 664 old_index = pf_frent_index(after); 665 after->fe_off += aftercut; 666 after->fe_len -= aftercut; 667 new_index = pf_frent_index(after); 668 if (old_index != new_index) { 669 DPFPRINTF(("frag index %d, new %d", 670 old_index, new_index)); 671 /* Fragment switched queue as fe_off changed */ 672 after->fe_off -= aftercut; 673 after->fe_len += aftercut; 674 /* Remove restored fragment from old queue */ 675 pf_frent_remove(frag, after); 676 after->fe_off += aftercut; 677 after->fe_len -= aftercut; 678 /* Insert into correct queue */ 679 if (pf_frent_insert(frag, after, prev)) { 680 DPFPRINTF( 681 ("fragment requeue limit exceeded")); 682 m_freem(after->fe_m); 683 uma_zfree(V_pf_frent_z, after); 684 /* There is not way to recover */ 685 goto bad_fragment; 686 } 687 } 688 break; 689 } 690 691 /* This fragment is completely overlapped, lose it. */ 692 next = TAILQ_NEXT(after, fr_next); 693 pf_frent_remove(frag, after); 694 m_freem(after->fe_m); 695 uma_zfree(V_pf_frent_z, after); 696 } 697 698 /* If part of the queue gets too long, there is not way to recover. */ 699 if (pf_frent_insert(frag, frent, prev)) { 700 DPFPRINTF(("fragment queue limit exceeded\n")); 701 goto bad_fragment; 702 } 703 704 return (frag); 705 706 bad_fragment: 707 REASON_SET(reason, PFRES_FRAG); 708 drop_fragment: 709 uma_zfree(V_pf_frent_z, frent); 710 return (NULL); 711 } 712 713 static struct mbuf * 714 pf_join_fragment(struct pf_fragment *frag) 715 { 716 struct mbuf *m, *m2; 717 struct pf_frent *frent, *next; 718 719 frent = TAILQ_FIRST(&frag->fr_queue); 720 next = TAILQ_NEXT(frent, fr_next); 721 722 m = frent->fe_m; 723 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 724 uma_zfree(V_pf_frent_z, frent); 725 for (frent = next; frent != NULL; frent = next) { 726 next = TAILQ_NEXT(frent, fr_next); 727 728 m2 = frent->fe_m; 729 /* Strip off ip header. */ 730 m_adj(m2, frent->fe_hdrlen); 731 /* Strip off any trailing bytes. */ 732 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 733 734 uma_zfree(V_pf_frent_z, frent); 735 m_cat(m, m2); 736 } 737 738 /* Remove from fragment queue. */ 739 pf_remove_fragment(frag); 740 741 return (m); 742 } 743 744 #ifdef INET 745 static int 746 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason) 747 { 748 struct mbuf *m = *m0; 749 struct pf_frent *frent; 750 struct pf_fragment *frag; 751 struct pf_fragment_cmp key; 752 uint16_t total, hdrlen; 753 754 /* Get an entry for the fragment queue */ 755 if ((frent = pf_create_fragment(reason)) == NULL) 756 return (PF_DROP); 757 758 frent->fe_m = m; 759 frent->fe_hdrlen = ip->ip_hl << 2; 760 frent->fe_extoff = 0; 761 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 762 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 763 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 764 765 pf_ip2key(ip, dir, &key); 766 767 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 768 return (PF_DROP); 769 770 /* The mbuf is part of the fragment entry, no direct free or access */ 771 m = *m0 = NULL; 772 773 if (frag->fr_holes) { 774 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); 775 return (PF_PASS); /* drop because *m0 is NULL, no error */ 776 } 777 778 /* We have all the data */ 779 frent = TAILQ_FIRST(&frag->fr_queue); 780 KASSERT(frent != NULL, ("frent != NULL")); 781 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 782 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 783 hdrlen = frent->fe_hdrlen; 784 785 m = *m0 = pf_join_fragment(frag); 786 frag = NULL; 787 788 if (m->m_flags & M_PKTHDR) { 789 int plen = 0; 790 for (m = *m0; m; m = m->m_next) 791 plen += m->m_len; 792 m = *m0; 793 m->m_pkthdr.len = plen; 794 } 795 796 ip = mtod(m, struct ip *); 797 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, 798 htons(hdrlen + total), 0); 799 ip->ip_len = htons(hdrlen + total); 800 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, 801 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); 802 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 803 804 if (hdrlen + total > IP_MAXPACKET) { 805 DPFPRINTF(("drop: too big: %d\n", total)); 806 ip->ip_len = 0; 807 REASON_SET(reason, PFRES_SHORT); 808 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 809 return (PF_DROP); 810 } 811 812 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 813 return (PF_PASS); 814 } 815 #endif /* INET */ 816 817 #ifdef INET6 818 static int 819 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr, 820 uint16_t hdrlen, uint16_t extoff, u_short *reason) 821 { 822 struct mbuf *m = *m0; 823 struct pf_frent *frent; 824 struct pf_fragment *frag; 825 struct pf_fragment_cmp key; 826 struct m_tag *mtag; 827 struct pf_fragment_tag *ftag; 828 int off; 829 uint32_t frag_id; 830 uint16_t total, maxlen; 831 uint8_t proto; 832 833 PF_FRAG_LOCK(); 834 835 /* Get an entry for the fragment queue. */ 836 if ((frent = pf_create_fragment(reason)) == NULL) { 837 PF_FRAG_UNLOCK(); 838 return (PF_DROP); 839 } 840 841 frent->fe_m = m; 842 frent->fe_hdrlen = hdrlen; 843 frent->fe_extoff = extoff; 844 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 845 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 846 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 847 848 key.frc_src.v6 = ip6->ip6_src; 849 key.frc_dst.v6 = ip6->ip6_dst; 850 key.frc_af = AF_INET6; 851 /* Only the first fragment's protocol is relevant. */ 852 key.frc_proto = 0; 853 key.frc_id = fraghdr->ip6f_ident; 854 855 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 856 PF_FRAG_UNLOCK(); 857 return (PF_DROP); 858 } 859 860 /* The mbuf is part of the fragment entry, no direct free or access. */ 861 m = *m0 = NULL; 862 863 if (frag->fr_holes) { 864 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, 865 frag->fr_holes)); 866 PF_FRAG_UNLOCK(); 867 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 868 } 869 870 /* We have all the data. */ 871 frent = TAILQ_FIRST(&frag->fr_queue); 872 KASSERT(frent != NULL, ("frent != NULL")); 873 extoff = frent->fe_extoff; 874 maxlen = frag->fr_maxlen; 875 frag_id = frag->fr_id; 876 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 877 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 878 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 879 880 m = *m0 = pf_join_fragment(frag); 881 frag = NULL; 882 883 PF_FRAG_UNLOCK(); 884 885 /* Take protocol from first fragment header. */ 886 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 887 KASSERT(m, ("%s: short mbuf chain", __func__)); 888 proto = *(mtod(m, caddr_t) + off); 889 m = *m0; 890 891 /* Delete frag6 header */ 892 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 893 goto fail; 894 895 if (m->m_flags & M_PKTHDR) { 896 int plen = 0; 897 for (m = *m0; m; m = m->m_next) 898 plen += m->m_len; 899 m = *m0; 900 m->m_pkthdr.len = plen; 901 } 902 903 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag), 904 M_NOWAIT)) == NULL) 905 goto fail; 906 ftag = (struct pf_fragment_tag *)(mtag + 1); 907 ftag->ft_hdrlen = hdrlen; 908 ftag->ft_extoff = extoff; 909 ftag->ft_maxlen = maxlen; 910 ftag->ft_id = frag_id; 911 m_tag_prepend(m, mtag); 912 913 ip6 = mtod(m, struct ip6_hdr *); 914 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 915 if (extoff) { 916 /* Write protocol into next field of last extension header. */ 917 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 918 &off); 919 KASSERT(m, ("%s: short mbuf chain", __func__)); 920 *(mtod(m, char *) + off) = proto; 921 m = *m0; 922 } else 923 ip6->ip6_nxt = proto; 924 925 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 926 DPFPRINTF(("drop: too big: %d\n", total)); 927 ip6->ip6_plen = 0; 928 REASON_SET(reason, PFRES_SHORT); 929 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 930 return (PF_DROP); 931 } 932 933 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); 934 return (PF_PASS); 935 936 fail: 937 REASON_SET(reason, PFRES_MEMORY); 938 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 939 return (PF_DROP); 940 } 941 #endif /* INET6 */ 942 943 #ifdef INET6 944 int 945 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag) 946 { 947 struct mbuf *m = *m0, *t; 948 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 949 struct pf_pdesc pd; 950 uint32_t frag_id; 951 uint16_t hdrlen, extoff, maxlen; 952 uint8_t proto; 953 int error, action; 954 955 hdrlen = ftag->ft_hdrlen; 956 extoff = ftag->ft_extoff; 957 maxlen = ftag->ft_maxlen; 958 frag_id = ftag->ft_id; 959 m_tag_delete(m, mtag); 960 mtag = NULL; 961 ftag = NULL; 962 963 if (extoff) { 964 int off; 965 966 /* Use protocol from next field of last extension header */ 967 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 968 &off); 969 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 970 proto = *(mtod(m, caddr_t) + off); 971 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 972 m = *m0; 973 } else { 974 struct ip6_hdr *hdr; 975 976 hdr = mtod(m, struct ip6_hdr *); 977 proto = hdr->ip6_nxt; 978 hdr->ip6_nxt = IPPROTO_FRAGMENT; 979 } 980 981 /* The MTU must be a multiple of 8 bytes, or we risk doing the 982 * fragmentation wrong. */ 983 maxlen = maxlen & ~7; 984 985 /* 986 * Maxlen may be less than 8 if there was only a single 987 * fragment. As it was fragmented before, add a fragment 988 * header also for a single fragment. If total or maxlen 989 * is less than 8, ip6_fragment() will return EMSGSIZE and 990 * we drop the packet. 991 */ 992 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 993 m = (*m0)->m_nextpkt; 994 (*m0)->m_nextpkt = NULL; 995 if (error == 0) { 996 /* The first mbuf contains the unfragmented packet. */ 997 m_freem(*m0); 998 *m0 = NULL; 999 action = PF_PASS; 1000 } else { 1001 /* Drop expects an mbuf to free. */ 1002 DPFPRINTF(("refragment error %d\n", error)); 1003 action = PF_DROP; 1004 } 1005 for (t = m; m; m = t) { 1006 t = m->m_nextpkt; 1007 m->m_nextpkt = NULL; 1008 m->m_flags |= M_SKIP_FIREWALL; 1009 memset(&pd, 0, sizeof(pd)); 1010 pd.pf_mtag = pf_find_mtag(m); 1011 if (error == 0) 1012 ip6_forward(m, 0); 1013 else 1014 m_freem(m); 1015 } 1016 1017 return (action); 1018 } 1019 #endif /* INET6 */ 1020 1021 #ifdef INET 1022 int 1023 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason, 1024 struct pf_pdesc *pd) 1025 { 1026 struct mbuf *m = *m0; 1027 struct pf_krule *r; 1028 struct ip *h = mtod(m, struct ip *); 1029 int mff = (ntohs(h->ip_off) & IP_MF); 1030 int hlen = h->ip_hl << 2; 1031 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 1032 u_int16_t max; 1033 int ip_len; 1034 int ip_off; 1035 int tag = -1; 1036 int verdict; 1037 1038 PF_RULES_RASSERT(); 1039 1040 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1041 while (r != NULL) { 1042 pf_counter_u64_add(&r->evaluations, 1); 1043 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1044 r = r->skip[PF_SKIP_IFP].ptr; 1045 else if (r->direction && r->direction != dir) 1046 r = r->skip[PF_SKIP_DIR].ptr; 1047 else if (r->af && r->af != AF_INET) 1048 r = r->skip[PF_SKIP_AF].ptr; 1049 else if (r->proto && r->proto != h->ip_p) 1050 r = r->skip[PF_SKIP_PROTO].ptr; 1051 else if (PF_MISMATCHAW(&r->src.addr, 1052 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 1053 r->src.neg, kif, M_GETFIB(m))) 1054 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1055 else if (PF_MISMATCHAW(&r->dst.addr, 1056 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 1057 r->dst.neg, NULL, M_GETFIB(m))) 1058 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1059 else if (r->match_tag && !pf_match_tag(m, r, &tag, 1060 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 1061 r = TAILQ_NEXT(r, entries); 1062 else 1063 break; 1064 } 1065 1066 if (r == NULL || r->action == PF_NOSCRUB) 1067 return (PF_PASS); 1068 1069 pf_counter_u64_critical_enter(); 1070 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1071 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1072 pf_counter_u64_critical_exit(); 1073 1074 /* Check for illegal packets */ 1075 if (hlen < (int)sizeof(struct ip)) { 1076 REASON_SET(reason, PFRES_NORM); 1077 goto drop; 1078 } 1079 1080 if (hlen > ntohs(h->ip_len)) { 1081 REASON_SET(reason, PFRES_NORM); 1082 goto drop; 1083 } 1084 1085 /* Clear IP_DF if the rule uses the no-df option */ 1086 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1087 u_int16_t ip_off = h->ip_off; 1088 1089 h->ip_off &= htons(~IP_DF); 1090 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1091 } 1092 1093 /* We will need other tests here */ 1094 if (!fragoff && !mff) 1095 goto no_fragment; 1096 1097 /* We're dealing with a fragment now. Don't allow fragments 1098 * with IP_DF to enter the cache. If the flag was cleared by 1099 * no-df above, fine. Otherwise drop it. 1100 */ 1101 if (h->ip_off & htons(IP_DF)) { 1102 DPFPRINTF(("IP_DF\n")); 1103 goto bad; 1104 } 1105 1106 ip_len = ntohs(h->ip_len) - hlen; 1107 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 1108 1109 /* All fragments are 8 byte aligned */ 1110 if (mff && (ip_len & 0x7)) { 1111 DPFPRINTF(("mff and %d\n", ip_len)); 1112 goto bad; 1113 } 1114 1115 /* Respect maximum length */ 1116 if (fragoff + ip_len > IP_MAXPACKET) { 1117 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 1118 goto bad; 1119 } 1120 max = fragoff + ip_len; 1121 1122 /* Fully buffer all of the fragments 1123 * Might return a completely reassembled mbuf, or NULL */ 1124 PF_FRAG_LOCK(); 1125 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 1126 verdict = pf_reassemble(m0, h, dir, reason); 1127 PF_FRAG_UNLOCK(); 1128 1129 if (verdict != PF_PASS) 1130 return (PF_DROP); 1131 1132 m = *m0; 1133 if (m == NULL) 1134 return (PF_DROP); 1135 1136 h = mtod(m, struct ip *); 1137 1138 no_fragment: 1139 /* At this point, only IP_DF is allowed in ip_off */ 1140 if (h->ip_off & ~htons(IP_DF)) { 1141 u_int16_t ip_off = h->ip_off; 1142 1143 h->ip_off &= htons(IP_DF); 1144 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1145 } 1146 1147 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 1148 1149 return (PF_PASS); 1150 1151 bad: 1152 DPFPRINTF(("dropping bad fragment\n")); 1153 REASON_SET(reason, PFRES_FRAG); 1154 drop: 1155 if (r != NULL && r->log) 1156 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1157 1); 1158 1159 return (PF_DROP); 1160 } 1161 #endif 1162 1163 #ifdef INET6 1164 int 1165 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif, 1166 u_short *reason, struct pf_pdesc *pd) 1167 { 1168 struct mbuf *m = *m0; 1169 struct pf_krule *r; 1170 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1171 int extoff; 1172 int off; 1173 struct ip6_ext ext; 1174 struct ip6_opt opt; 1175 struct ip6_frag frag; 1176 u_int32_t plen; 1177 int optend; 1178 int ooff; 1179 u_int8_t proto; 1180 int terminal; 1181 1182 PF_RULES_RASSERT(); 1183 1184 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1185 while (r != NULL) { 1186 pf_counter_u64_add(&r->evaluations, 1); 1187 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1188 r = r->skip[PF_SKIP_IFP].ptr; 1189 else if (r->direction && r->direction != dir) 1190 r = r->skip[PF_SKIP_DIR].ptr; 1191 else if (r->af && r->af != AF_INET6) 1192 r = r->skip[PF_SKIP_AF].ptr; 1193 #if 0 /* header chain! */ 1194 else if (r->proto && r->proto != h->ip6_nxt) 1195 r = r->skip[PF_SKIP_PROTO].ptr; 1196 #endif 1197 else if (PF_MISMATCHAW(&r->src.addr, 1198 (struct pf_addr *)&h->ip6_src, AF_INET6, 1199 r->src.neg, kif, M_GETFIB(m))) 1200 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1201 else if (PF_MISMATCHAW(&r->dst.addr, 1202 (struct pf_addr *)&h->ip6_dst, AF_INET6, 1203 r->dst.neg, NULL, M_GETFIB(m))) 1204 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1205 else 1206 break; 1207 } 1208 1209 if (r == NULL || r->action == PF_NOSCRUB) 1210 return (PF_PASS); 1211 1212 pf_counter_u64_critical_enter(); 1213 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1214 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1215 pf_counter_u64_critical_exit(); 1216 1217 /* Check for illegal packets */ 1218 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1219 goto drop; 1220 1221 plen = ntohs(h->ip6_plen); 1222 /* jumbo payload option not supported */ 1223 if (plen == 0) 1224 goto drop; 1225 1226 extoff = 0; 1227 off = sizeof(struct ip6_hdr); 1228 proto = h->ip6_nxt; 1229 terminal = 0; 1230 do { 1231 switch (proto) { 1232 case IPPROTO_FRAGMENT: 1233 goto fragment; 1234 break; 1235 case IPPROTO_AH: 1236 case IPPROTO_ROUTING: 1237 case IPPROTO_DSTOPTS: 1238 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1239 NULL, AF_INET6)) 1240 goto shortpkt; 1241 extoff = off; 1242 if (proto == IPPROTO_AH) 1243 off += (ext.ip6e_len + 2) * 4; 1244 else 1245 off += (ext.ip6e_len + 1) * 8; 1246 proto = ext.ip6e_nxt; 1247 break; 1248 case IPPROTO_HOPOPTS: 1249 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1250 NULL, AF_INET6)) 1251 goto shortpkt; 1252 extoff = off; 1253 optend = off + (ext.ip6e_len + 1) * 8; 1254 ooff = off + sizeof(ext); 1255 do { 1256 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1257 sizeof(opt.ip6o_type), NULL, NULL, 1258 AF_INET6)) 1259 goto shortpkt; 1260 if (opt.ip6o_type == IP6OPT_PAD1) { 1261 ooff++; 1262 continue; 1263 } 1264 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1265 NULL, NULL, AF_INET6)) 1266 goto shortpkt; 1267 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1268 goto drop; 1269 if (opt.ip6o_type == IP6OPT_JUMBO) 1270 goto drop; 1271 ooff += sizeof(opt) + opt.ip6o_len; 1272 } while (ooff < optend); 1273 1274 off = optend; 1275 proto = ext.ip6e_nxt; 1276 break; 1277 default: 1278 terminal = 1; 1279 break; 1280 } 1281 } while (!terminal); 1282 1283 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1284 goto shortpkt; 1285 1286 pf_scrub_ip6(&m, r->min_ttl); 1287 1288 return (PF_PASS); 1289 1290 fragment: 1291 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1292 goto shortpkt; 1293 1294 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1295 goto shortpkt; 1296 1297 /* Offset now points to data portion. */ 1298 off += sizeof(frag); 1299 1300 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */ 1301 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS) 1302 return (PF_DROP); 1303 m = *m0; 1304 if (m == NULL) 1305 return (PF_DROP); 1306 1307 pd->flags |= PFDESC_IP_REAS; 1308 return (PF_PASS); 1309 1310 shortpkt: 1311 REASON_SET(reason, PFRES_SHORT); 1312 if (r != NULL && r->log) 1313 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1314 1); 1315 return (PF_DROP); 1316 1317 drop: 1318 REASON_SET(reason, PFRES_NORM); 1319 if (r != NULL && r->log) 1320 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1321 1); 1322 return (PF_DROP); 1323 } 1324 #endif /* INET6 */ 1325 1326 int 1327 pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff, 1328 int off, void *h, struct pf_pdesc *pd) 1329 { 1330 struct pf_krule *r, *rm = NULL; 1331 struct tcphdr *th = &pd->hdr.tcp; 1332 int rewrite = 0; 1333 u_short reason; 1334 u_int8_t flags; 1335 sa_family_t af = pd->af; 1336 1337 PF_RULES_RASSERT(); 1338 1339 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1340 while (r != NULL) { 1341 pf_counter_u64_add(&r->evaluations, 1); 1342 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1343 r = r->skip[PF_SKIP_IFP].ptr; 1344 else if (r->direction && r->direction != dir) 1345 r = r->skip[PF_SKIP_DIR].ptr; 1346 else if (r->af && r->af != af) 1347 r = r->skip[PF_SKIP_AF].ptr; 1348 else if (r->proto && r->proto != pd->proto) 1349 r = r->skip[PF_SKIP_PROTO].ptr; 1350 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1351 r->src.neg, kif, M_GETFIB(m))) 1352 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1353 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1354 r->src.port[0], r->src.port[1], th->th_sport)) 1355 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1356 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1357 r->dst.neg, NULL, M_GETFIB(m))) 1358 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1359 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1360 r->dst.port[0], r->dst.port[1], th->th_dport)) 1361 r = r->skip[PF_SKIP_DST_PORT].ptr; 1362 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1363 pf_osfp_fingerprint(pd, m, off, th), 1364 r->os_fingerprint)) 1365 r = TAILQ_NEXT(r, entries); 1366 else { 1367 rm = r; 1368 break; 1369 } 1370 } 1371 1372 if (rm == NULL || rm->action == PF_NOSCRUB) 1373 return (PF_PASS); 1374 1375 pf_counter_u64_critical_enter(); 1376 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1377 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1378 pf_counter_u64_critical_exit(); 1379 1380 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1381 pd->flags |= PFDESC_TCP_NORM; 1382 1383 flags = th->th_flags; 1384 if (flags & TH_SYN) { 1385 /* Illegal packet */ 1386 if (flags & TH_RST) 1387 goto tcp_drop; 1388 1389 if (flags & TH_FIN) 1390 goto tcp_drop; 1391 } else { 1392 /* Illegal packet */ 1393 if (!(flags & (TH_ACK|TH_RST))) 1394 goto tcp_drop; 1395 } 1396 1397 if (!(flags & TH_ACK)) { 1398 /* These flags are only valid if ACK is set */ 1399 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1400 goto tcp_drop; 1401 } 1402 1403 /* Check for illegal header length */ 1404 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1405 goto tcp_drop; 1406 1407 /* If flags changed, or reserved data set, then adjust */ 1408 if (flags != th->th_flags || th->th_x2 != 0) { 1409 u_int16_t ov, nv; 1410 1411 ov = *(u_int16_t *)(&th->th_ack + 1); 1412 th->th_flags = flags; 1413 th->th_x2 = 0; 1414 nv = *(u_int16_t *)(&th->th_ack + 1); 1415 1416 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0); 1417 rewrite = 1; 1418 } 1419 1420 /* Remove urgent pointer, if TH_URG is not set */ 1421 if (!(flags & TH_URG) && th->th_urp) { 1422 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, 1423 0, 0); 1424 th->th_urp = 0; 1425 rewrite = 1; 1426 } 1427 1428 /* Process options */ 1429 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1430 rewrite = 1; 1431 1432 /* copy back packet headers if we sanitized */ 1433 if (rewrite) 1434 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1435 1436 return (PF_PASS); 1437 1438 tcp_drop: 1439 REASON_SET(&reason, PFRES_NORM); 1440 if (rm != NULL && r->log) 1441 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1442 1); 1443 return (PF_DROP); 1444 } 1445 1446 int 1447 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1448 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1449 { 1450 u_int32_t tsval, tsecr; 1451 u_int8_t hdr[60]; 1452 u_int8_t *opt; 1453 1454 KASSERT((src->scrub == NULL), 1455 ("pf_normalize_tcp_init: src->scrub != NULL")); 1456 1457 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1458 if (src->scrub == NULL) 1459 return (1); 1460 1461 switch (pd->af) { 1462 #ifdef INET 1463 case AF_INET: { 1464 struct ip *h = mtod(m, struct ip *); 1465 src->scrub->pfss_ttl = h->ip_ttl; 1466 break; 1467 } 1468 #endif /* INET */ 1469 #ifdef INET6 1470 case AF_INET6: { 1471 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1472 src->scrub->pfss_ttl = h->ip6_hlim; 1473 break; 1474 } 1475 #endif /* INET6 */ 1476 } 1477 1478 /* 1479 * All normalizations below are only begun if we see the start of 1480 * the connections. They must all set an enabled bit in pfss_flags 1481 */ 1482 if ((th->th_flags & TH_SYN) == 0) 1483 return (0); 1484 1485 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1486 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1487 /* Diddle with TCP options */ 1488 int hlen; 1489 opt = hdr + sizeof(struct tcphdr); 1490 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1491 while (hlen >= TCPOLEN_TIMESTAMP) { 1492 switch (*opt) { 1493 case TCPOPT_EOL: /* FALLTHROUGH */ 1494 case TCPOPT_NOP: 1495 opt++; 1496 hlen--; 1497 break; 1498 case TCPOPT_TIMESTAMP: 1499 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1500 src->scrub->pfss_flags |= 1501 PFSS_TIMESTAMP; 1502 src->scrub->pfss_ts_mod = 1503 htonl(arc4random()); 1504 1505 /* note PFSS_PAWS not set yet */ 1506 memcpy(&tsval, &opt[2], 1507 sizeof(u_int32_t)); 1508 memcpy(&tsecr, &opt[6], 1509 sizeof(u_int32_t)); 1510 src->scrub->pfss_tsval0 = ntohl(tsval); 1511 src->scrub->pfss_tsval = ntohl(tsval); 1512 src->scrub->pfss_tsecr = ntohl(tsecr); 1513 getmicrouptime(&src->scrub->pfss_last); 1514 } 1515 /* FALLTHROUGH */ 1516 default: 1517 hlen -= MAX(opt[1], 2); 1518 opt += MAX(opt[1], 2); 1519 break; 1520 } 1521 } 1522 } 1523 1524 return (0); 1525 } 1526 1527 void 1528 pf_normalize_tcp_cleanup(struct pf_kstate *state) 1529 { 1530 if (state->src.scrub) 1531 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1532 if (state->dst.scrub) 1533 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1534 1535 /* Someday... flush the TCP segment reassembly descriptors. */ 1536 } 1537 1538 int 1539 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1540 u_short *reason, struct tcphdr *th, struct pf_kstate *state, 1541 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1542 { 1543 struct timeval uptime; 1544 u_int32_t tsval, tsecr; 1545 u_int tsval_from_last; 1546 u_int8_t hdr[60]; 1547 u_int8_t *opt; 1548 int copyback = 0; 1549 int got_ts = 0; 1550 size_t startoff; 1551 1552 KASSERT((src->scrub || dst->scrub), 1553 ("%s: src->scrub && dst->scrub!", __func__)); 1554 1555 /* 1556 * Enforce the minimum TTL seen for this connection. Negate a common 1557 * technique to evade an intrusion detection system and confuse 1558 * firewall state code. 1559 */ 1560 switch (pd->af) { 1561 #ifdef INET 1562 case AF_INET: { 1563 if (src->scrub) { 1564 struct ip *h = mtod(m, struct ip *); 1565 if (h->ip_ttl > src->scrub->pfss_ttl) 1566 src->scrub->pfss_ttl = h->ip_ttl; 1567 h->ip_ttl = src->scrub->pfss_ttl; 1568 } 1569 break; 1570 } 1571 #endif /* INET */ 1572 #ifdef INET6 1573 case AF_INET6: { 1574 if (src->scrub) { 1575 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1576 if (h->ip6_hlim > src->scrub->pfss_ttl) 1577 src->scrub->pfss_ttl = h->ip6_hlim; 1578 h->ip6_hlim = src->scrub->pfss_ttl; 1579 } 1580 break; 1581 } 1582 #endif /* INET6 */ 1583 } 1584 1585 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1586 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1587 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1588 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1589 /* Diddle with TCP options */ 1590 int hlen; 1591 opt = hdr + sizeof(struct tcphdr); 1592 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1593 while (hlen >= TCPOLEN_TIMESTAMP) { 1594 startoff = opt - (hdr + sizeof(struct tcphdr)); 1595 switch (*opt) { 1596 case TCPOPT_EOL: /* FALLTHROUGH */ 1597 case TCPOPT_NOP: 1598 opt++; 1599 hlen--; 1600 break; 1601 case TCPOPT_TIMESTAMP: 1602 /* Modulate the timestamps. Can be used for 1603 * NAT detection, OS uptime determination or 1604 * reboot detection. 1605 */ 1606 1607 if (got_ts) { 1608 /* Huh? Multiple timestamps!? */ 1609 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1610 DPFPRINTF(("multiple TS??\n")); 1611 pf_print_state(state); 1612 printf("\n"); 1613 } 1614 REASON_SET(reason, PFRES_TS); 1615 return (PF_DROP); 1616 } 1617 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1618 memcpy(&tsval, &opt[2], 1619 sizeof(u_int32_t)); 1620 if (tsval && src->scrub && 1621 (src->scrub->pfss_flags & 1622 PFSS_TIMESTAMP)) { 1623 tsval = ntohl(tsval); 1624 pf_patch_32_unaligned(m, 1625 &th->th_sum, 1626 &opt[2], 1627 htonl(tsval + 1628 src->scrub->pfss_ts_mod), 1629 PF_ALGNMNT(startoff), 1630 0); 1631 copyback = 1; 1632 } 1633 1634 /* Modulate TS reply iff valid (!0) */ 1635 memcpy(&tsecr, &opt[6], 1636 sizeof(u_int32_t)); 1637 if (tsecr && dst->scrub && 1638 (dst->scrub->pfss_flags & 1639 PFSS_TIMESTAMP)) { 1640 tsecr = ntohl(tsecr) 1641 - dst->scrub->pfss_ts_mod; 1642 pf_patch_32_unaligned(m, 1643 &th->th_sum, 1644 &opt[6], 1645 htonl(tsecr), 1646 PF_ALGNMNT(startoff), 1647 0); 1648 copyback = 1; 1649 } 1650 got_ts = 1; 1651 } 1652 /* FALLTHROUGH */ 1653 default: 1654 hlen -= MAX(opt[1], 2); 1655 opt += MAX(opt[1], 2); 1656 break; 1657 } 1658 } 1659 if (copyback) { 1660 /* Copyback the options, caller copys back header */ 1661 *writeback = 1; 1662 m_copyback(m, off + sizeof(struct tcphdr), 1663 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1664 sizeof(struct tcphdr)); 1665 } 1666 } 1667 1668 /* 1669 * Must invalidate PAWS checks on connections idle for too long. 1670 * The fastest allowed timestamp clock is 1ms. That turns out to 1671 * be about 24 days before it wraps. XXX Right now our lowerbound 1672 * TS echo check only works for the first 12 days of a connection 1673 * when the TS has exhausted half its 32bit space 1674 */ 1675 #define TS_MAX_IDLE (24*24*60*60) 1676 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1677 1678 getmicrouptime(&uptime); 1679 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1680 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1681 time_uptime - state->creation > TS_MAX_CONN)) { 1682 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1683 DPFPRINTF(("src idled out of PAWS\n")); 1684 pf_print_state(state); 1685 printf("\n"); 1686 } 1687 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1688 | PFSS_PAWS_IDLED; 1689 } 1690 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1691 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1692 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1693 DPFPRINTF(("dst idled out of PAWS\n")); 1694 pf_print_state(state); 1695 printf("\n"); 1696 } 1697 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1698 | PFSS_PAWS_IDLED; 1699 } 1700 1701 if (got_ts && src->scrub && dst->scrub && 1702 (src->scrub->pfss_flags & PFSS_PAWS) && 1703 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1704 /* Validate that the timestamps are "in-window". 1705 * RFC1323 describes TCP Timestamp options that allow 1706 * measurement of RTT (round trip time) and PAWS 1707 * (protection against wrapped sequence numbers). PAWS 1708 * gives us a set of rules for rejecting packets on 1709 * long fat pipes (packets that were somehow delayed 1710 * in transit longer than the time it took to send the 1711 * full TCP sequence space of 4Gb). We can use these 1712 * rules and infer a few others that will let us treat 1713 * the 32bit timestamp and the 32bit echoed timestamp 1714 * as sequence numbers to prevent a blind attacker from 1715 * inserting packets into a connection. 1716 * 1717 * RFC1323 tells us: 1718 * - The timestamp on this packet must be greater than 1719 * or equal to the last value echoed by the other 1720 * endpoint. The RFC says those will be discarded 1721 * since it is a dup that has already been acked. 1722 * This gives us a lowerbound on the timestamp. 1723 * timestamp >= other last echoed timestamp 1724 * - The timestamp will be less than or equal to 1725 * the last timestamp plus the time between the 1726 * last packet and now. The RFC defines the max 1727 * clock rate as 1ms. We will allow clocks to be 1728 * up to 10% fast and will allow a total difference 1729 * or 30 seconds due to a route change. And this 1730 * gives us an upperbound on the timestamp. 1731 * timestamp <= last timestamp + max ticks 1732 * We have to be careful here. Windows will send an 1733 * initial timestamp of zero and then initialize it 1734 * to a random value after the 3whs; presumably to 1735 * avoid a DoS by having to call an expensive RNG 1736 * during a SYN flood. Proof MS has at least one 1737 * good security geek. 1738 * 1739 * - The TCP timestamp option must also echo the other 1740 * endpoints timestamp. The timestamp echoed is the 1741 * one carried on the earliest unacknowledged segment 1742 * on the left edge of the sequence window. The RFC 1743 * states that the host will reject any echoed 1744 * timestamps that were larger than any ever sent. 1745 * This gives us an upperbound on the TS echo. 1746 * tescr <= largest_tsval 1747 * - The lowerbound on the TS echo is a little more 1748 * tricky to determine. The other endpoint's echoed 1749 * values will not decrease. But there may be 1750 * network conditions that re-order packets and 1751 * cause our view of them to decrease. For now the 1752 * only lowerbound we can safely determine is that 1753 * the TS echo will never be less than the original 1754 * TS. XXX There is probably a better lowerbound. 1755 * Remove TS_MAX_CONN with better lowerbound check. 1756 * tescr >= other original TS 1757 * 1758 * It is also important to note that the fastest 1759 * timestamp clock of 1ms will wrap its 32bit space in 1760 * 24 days. So we just disable TS checking after 24 1761 * days of idle time. We actually must use a 12d 1762 * connection limit until we can come up with a better 1763 * lowerbound to the TS echo check. 1764 */ 1765 struct timeval delta_ts; 1766 int ts_fudge; 1767 1768 /* 1769 * PFTM_TS_DIFF is how many seconds of leeway to allow 1770 * a host's timestamp. This can happen if the previous 1771 * packet got delayed in transit for much longer than 1772 * this packet. 1773 */ 1774 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1775 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1776 1777 /* Calculate max ticks since the last timestamp */ 1778 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1779 #define TS_MICROSECS 1000000 /* microseconds per second */ 1780 delta_ts = uptime; 1781 timevalsub(&delta_ts, &src->scrub->pfss_last); 1782 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1783 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1784 1785 if ((src->state >= TCPS_ESTABLISHED && 1786 dst->state >= TCPS_ESTABLISHED) && 1787 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1788 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1789 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1790 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1791 /* Bad RFC1323 implementation or an insertion attack. 1792 * 1793 * - Solaris 2.6 and 2.7 are known to send another ACK 1794 * after the FIN,FIN|ACK,ACK closing that carries 1795 * an old timestamp. 1796 */ 1797 1798 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1799 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1800 SEQ_GT(tsval, src->scrub->pfss_tsval + 1801 tsval_from_last) ? '1' : ' ', 1802 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1803 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1804 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1805 "idle: %jus %lums\n", 1806 tsval, tsecr, tsval_from_last, 1807 (uintmax_t)delta_ts.tv_sec, 1808 delta_ts.tv_usec / 1000)); 1809 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1810 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1811 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1812 "\n", dst->scrub->pfss_tsval, 1813 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1814 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1815 pf_print_state(state); 1816 pf_print_flags(th->th_flags); 1817 printf("\n"); 1818 } 1819 REASON_SET(reason, PFRES_TS); 1820 return (PF_DROP); 1821 } 1822 1823 /* XXX I'd really like to require tsecr but it's optional */ 1824 1825 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1826 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1827 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1828 src->scrub && dst->scrub && 1829 (src->scrub->pfss_flags & PFSS_PAWS) && 1830 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1831 /* Didn't send a timestamp. Timestamps aren't really useful 1832 * when: 1833 * - connection opening or closing (often not even sent). 1834 * but we must not let an attacker to put a FIN on a 1835 * data packet to sneak it through our ESTABLISHED check. 1836 * - on a TCP reset. RFC suggests not even looking at TS. 1837 * - on an empty ACK. The TS will not be echoed so it will 1838 * probably not help keep the RTT calculation in sync and 1839 * there isn't as much danger when the sequence numbers 1840 * got wrapped. So some stacks don't include TS on empty 1841 * ACKs :-( 1842 * 1843 * To minimize the disruption to mostly RFC1323 conformant 1844 * stacks, we will only require timestamps on data packets. 1845 * 1846 * And what do ya know, we cannot require timestamps on data 1847 * packets. There appear to be devices that do legitimate 1848 * TCP connection hijacking. There are HTTP devices that allow 1849 * a 3whs (with timestamps) and then buffer the HTTP request. 1850 * If the intermediate device has the HTTP response cache, it 1851 * will spoof the response but not bother timestamping its 1852 * packets. So we can look for the presence of a timestamp in 1853 * the first data packet and if there, require it in all future 1854 * packets. 1855 */ 1856 1857 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1858 /* 1859 * Hey! Someone tried to sneak a packet in. Or the 1860 * stack changed its RFC1323 behavior?!?! 1861 */ 1862 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1863 DPFPRINTF(("Did not receive expected RFC1323 " 1864 "timestamp\n")); 1865 pf_print_state(state); 1866 pf_print_flags(th->th_flags); 1867 printf("\n"); 1868 } 1869 REASON_SET(reason, PFRES_TS); 1870 return (PF_DROP); 1871 } 1872 } 1873 1874 /* 1875 * We will note if a host sends his data packets with or without 1876 * timestamps. And require all data packets to contain a timestamp 1877 * if the first does. PAWS implicitly requires that all data packets be 1878 * timestamped. But I think there are middle-man devices that hijack 1879 * TCP streams immediately after the 3whs and don't timestamp their 1880 * packets (seen in a WWW accelerator or cache). 1881 */ 1882 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1883 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1884 if (got_ts) 1885 src->scrub->pfss_flags |= PFSS_DATA_TS; 1886 else { 1887 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1888 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1889 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1890 /* Don't warn if other host rejected RFC1323 */ 1891 DPFPRINTF(("Broken RFC1323 stack did not " 1892 "timestamp data packet. Disabled PAWS " 1893 "security.\n")); 1894 pf_print_state(state); 1895 pf_print_flags(th->th_flags); 1896 printf("\n"); 1897 } 1898 } 1899 } 1900 1901 /* 1902 * Update PAWS values 1903 */ 1904 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1905 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1906 getmicrouptime(&src->scrub->pfss_last); 1907 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1908 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1909 src->scrub->pfss_tsval = tsval; 1910 1911 if (tsecr) { 1912 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1913 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1914 src->scrub->pfss_tsecr = tsecr; 1915 1916 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1917 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1918 src->scrub->pfss_tsval0 == 0)) { 1919 /* tsval0 MUST be the lowest timestamp */ 1920 src->scrub->pfss_tsval0 = tsval; 1921 } 1922 1923 /* Only fully initialized after a TS gets echoed */ 1924 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1925 src->scrub->pfss_flags |= PFSS_PAWS; 1926 } 1927 } 1928 1929 /* I have a dream.... TCP segment reassembly.... */ 1930 return (0); 1931 } 1932 1933 static int 1934 pf_normalize_tcpopt(struct pf_krule *r, struct mbuf *m, struct tcphdr *th, 1935 int off, sa_family_t af) 1936 { 1937 u_int16_t *mss; 1938 int thoff; 1939 int opt, cnt, optlen = 0; 1940 int rewrite = 0; 1941 u_char opts[TCP_MAXOLEN]; 1942 u_char *optp = opts; 1943 size_t startoff; 1944 1945 thoff = th->th_off << 2; 1946 cnt = thoff - sizeof(struct tcphdr); 1947 1948 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1949 NULL, NULL, af)) 1950 return (rewrite); 1951 1952 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1953 startoff = optp - opts; 1954 opt = optp[0]; 1955 if (opt == TCPOPT_EOL) 1956 break; 1957 if (opt == TCPOPT_NOP) 1958 optlen = 1; 1959 else { 1960 if (cnt < 2) 1961 break; 1962 optlen = optp[1]; 1963 if (optlen < 2 || optlen > cnt) 1964 break; 1965 } 1966 switch (opt) { 1967 case TCPOPT_MAXSEG: 1968 mss = (u_int16_t *)(optp + 2); 1969 if ((ntohs(*mss)) > r->max_mss) { 1970 pf_patch_16_unaligned(m, 1971 &th->th_sum, 1972 mss, htons(r->max_mss), 1973 PF_ALGNMNT(startoff), 1974 0); 1975 rewrite = 1; 1976 } 1977 break; 1978 default: 1979 break; 1980 } 1981 } 1982 1983 if (rewrite) 1984 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1985 1986 return (rewrite); 1987 } 1988 1989 #ifdef INET 1990 static void 1991 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1992 { 1993 struct mbuf *m = *m0; 1994 struct ip *h = mtod(m, struct ip *); 1995 1996 /* Clear IP_DF if no-df was requested */ 1997 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1998 u_int16_t ip_off = h->ip_off; 1999 2000 h->ip_off &= htons(~IP_DF); 2001 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 2002 } 2003 2004 /* Enforce a minimum ttl, may cause endless packet loops */ 2005 if (min_ttl && h->ip_ttl < min_ttl) { 2006 u_int16_t ip_ttl = h->ip_ttl; 2007 2008 h->ip_ttl = min_ttl; 2009 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 2010 } 2011 2012 /* Enforce tos */ 2013 if (flags & PFRULE_SET_TOS) { 2014 u_int16_t ov, nv; 2015 2016 ov = *(u_int16_t *)h; 2017 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK); 2018 nv = *(u_int16_t *)h; 2019 2020 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 2021 } 2022 2023 /* random-id, but not for fragments */ 2024 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 2025 uint16_t ip_id = h->ip_id; 2026 2027 ip_fillid(h); 2028 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 2029 } 2030 } 2031 #endif /* INET */ 2032 2033 #ifdef INET6 2034 static void 2035 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 2036 { 2037 struct mbuf *m = *m0; 2038 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 2039 2040 /* Enforce a minimum ttl, may cause endless packet loops */ 2041 if (min_ttl && h->ip6_hlim < min_ttl) 2042 h->ip6_hlim = min_ttl; 2043 } 2044 #endif 2045