1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_pf.h" 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/mbuf.h> 42 #include <sys/mutex.h> 43 #include <sys/refcount.h> 44 #include <sys/socket.h> 45 46 #include <net/if.h> 47 #include <net/vnet.h> 48 #include <net/pfvar.h> 49 #include <net/if_pflog.h> 50 51 #include <netinet/in.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip_var.h> 54 #include <netinet6/ip6_var.h> 55 #include <netinet6/scope6_var.h> 56 #include <netinet/tcp.h> 57 #include <netinet/tcp_fsm.h> 58 #include <netinet/tcp_seq.h> 59 60 #ifdef INET6 61 #include <netinet/ip6.h> 62 #endif /* INET6 */ 63 64 struct pf_frent { 65 TAILQ_ENTRY(pf_frent) fr_next; 66 struct mbuf *fe_m; 67 uint16_t fe_hdrlen; /* ipv4 header length with ip options 68 ipv6, extension, fragment header */ 69 uint16_t fe_extoff; /* last extension header offset or 0 */ 70 uint16_t fe_len; /* fragment length */ 71 uint16_t fe_off; /* fragment offset */ 72 uint16_t fe_mff; /* more fragment flag */ 73 }; 74 75 struct pf_fragment_cmp { 76 struct pf_addr frc_src; 77 struct pf_addr frc_dst; 78 uint32_t frc_id; 79 sa_family_t frc_af; 80 uint8_t frc_proto; 81 }; 82 83 struct pf_fragment { 84 struct pf_fragment_cmp fr_key; 85 #define fr_src fr_key.frc_src 86 #define fr_dst fr_key.frc_dst 87 #define fr_id fr_key.frc_id 88 #define fr_af fr_key.frc_af 89 #define fr_proto fr_key.frc_proto 90 91 /* pointers to queue element */ 92 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; 93 /* count entries between pointers */ 94 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; 95 RB_ENTRY(pf_fragment) fr_entry; 96 TAILQ_ENTRY(pf_fragment) frag_next; 97 uint32_t fr_timeout; 98 uint16_t fr_maxlen; /* maximum length of single fragment */ 99 u_int16_t fr_holes; /* number of holes in the queue */ 100 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 101 }; 102 103 struct pf_fragment_tag { 104 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 105 uint16_t ft_extoff; /* last extension header offset or 0 */ 106 uint16_t ft_maxlen; /* maximum fragment payload length */ 107 uint32_t ft_id; /* fragment id */ 108 }; 109 110 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx); 111 #define V_pf_frag_mtx VNET(pf_frag_mtx) 112 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx) 113 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx) 114 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED) 115 116 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 117 118 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z); 119 #define V_pf_frent_z VNET(pf_frent_z) 120 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z); 121 #define V_pf_frag_z VNET(pf_frag_z) 122 123 TAILQ_HEAD(pf_fragqueue, pf_fragment); 124 TAILQ_HEAD(pf_cachequeue, pf_fragment); 125 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue); 126 #define V_pf_fragqueue VNET(pf_fragqueue) 127 RB_HEAD(pf_frag_tree, pf_fragment); 128 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree); 129 #define V_pf_frag_tree VNET(pf_frag_tree) 130 static int pf_frag_compare(struct pf_fragment *, 131 struct pf_fragment *); 132 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 133 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 134 135 static void pf_flush_fragments(void); 136 static void pf_free_fragment(struct pf_fragment *); 137 static void pf_remove_fragment(struct pf_fragment *); 138 139 static struct pf_frent *pf_create_fragment(u_short *); 140 static int pf_frent_holes(struct pf_frent *frent); 141 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 142 struct pf_frag_tree *tree); 143 static inline int pf_frent_index(struct pf_frent *); 144 static int pf_frent_insert(struct pf_fragment *, 145 struct pf_frent *, struct pf_frent *); 146 void pf_frent_remove(struct pf_fragment *, 147 struct pf_frent *); 148 struct pf_frent *pf_frent_previous(struct pf_fragment *, 149 struct pf_frent *); 150 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 151 struct pf_frent *, u_short *); 152 static struct mbuf *pf_join_fragment(struct pf_fragment *); 153 #ifdef INET 154 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *); 155 #endif /* INET */ 156 #ifdef INET6 157 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *, 158 struct ip6_frag *, uint16_t, uint16_t, u_short *); 159 #endif /* INET6 */ 160 161 #define DPFPRINTF(x) do { \ 162 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 163 printf("%s: ", __func__); \ 164 printf x ; \ 165 } \ 166 } while(0) 167 168 #ifdef INET 169 static void 170 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 171 { 172 173 key->frc_src.v4 = ip->ip_src; 174 key->frc_dst.v4 = ip->ip_dst; 175 key->frc_af = AF_INET; 176 key->frc_proto = ip->ip_p; 177 key->frc_id = ip->ip_id; 178 } 179 #endif /* INET */ 180 181 void 182 pf_normalize_init(void) 183 { 184 185 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 186 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 187 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 189 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 190 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 191 UMA_ALIGN_PTR, 0); 192 193 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 194 195 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 196 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 197 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 198 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 199 200 TAILQ_INIT(&V_pf_fragqueue); 201 } 202 203 void 204 pf_normalize_cleanup(void) 205 { 206 207 uma_zdestroy(V_pf_state_scrub_z); 208 uma_zdestroy(V_pf_frent_z); 209 uma_zdestroy(V_pf_frag_z); 210 211 mtx_destroy(&V_pf_frag_mtx); 212 } 213 214 static int 215 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 216 { 217 int diff; 218 219 if ((diff = a->fr_id - b->fr_id) != 0) 220 return (diff); 221 if ((diff = a->fr_proto - b->fr_proto) != 0) 222 return (diff); 223 if ((diff = a->fr_af - b->fr_af) != 0) 224 return (diff); 225 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 226 return (diff); 227 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 228 return (diff); 229 return (0); 230 } 231 232 void 233 pf_purge_expired_fragments(void) 234 { 235 u_int32_t expire = time_uptime - 236 V_pf_default_rule.timeout[PFTM_FRAG]; 237 238 pf_purge_fragments(expire); 239 } 240 241 void 242 pf_purge_fragments(uint32_t expire) 243 { 244 struct pf_fragment *frag; 245 246 PF_FRAG_LOCK(); 247 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 248 if (frag->fr_timeout > expire) 249 break; 250 251 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 252 pf_free_fragment(frag); 253 } 254 255 PF_FRAG_UNLOCK(); 256 } 257 258 /* 259 * Try to flush old fragments to make space for new ones 260 */ 261 static void 262 pf_flush_fragments(void) 263 { 264 struct pf_fragment *frag; 265 int goal; 266 267 PF_FRAG_ASSERT(); 268 269 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 270 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 271 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 272 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 273 if (frag) 274 pf_free_fragment(frag); 275 else 276 break; 277 } 278 } 279 280 /* Frees the fragments and all associated entries */ 281 static void 282 pf_free_fragment(struct pf_fragment *frag) 283 { 284 struct pf_frent *frent; 285 286 PF_FRAG_ASSERT(); 287 288 /* Free all fragments */ 289 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 290 frent = TAILQ_FIRST(&frag->fr_queue)) { 291 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 292 293 m_freem(frent->fe_m); 294 uma_zfree(V_pf_frent_z, frent); 295 } 296 297 pf_remove_fragment(frag); 298 } 299 300 static struct pf_fragment * 301 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 302 { 303 struct pf_fragment *frag; 304 305 PF_FRAG_ASSERT(); 306 307 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 308 if (frag != NULL) { 309 /* XXX Are we sure we want to update the timeout? */ 310 frag->fr_timeout = time_uptime; 311 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 312 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 313 } 314 315 return (frag); 316 } 317 318 /* Removes a fragment from the fragment queue and frees the fragment */ 319 static void 320 pf_remove_fragment(struct pf_fragment *frag) 321 { 322 323 PF_FRAG_ASSERT(); 324 KASSERT(frag, ("frag != NULL")); 325 326 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 327 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 328 uma_zfree(V_pf_frag_z, frag); 329 } 330 331 static struct pf_frent * 332 pf_create_fragment(u_short *reason) 333 { 334 struct pf_frent *frent; 335 336 PF_FRAG_ASSERT(); 337 338 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 339 if (frent == NULL) { 340 pf_flush_fragments(); 341 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 342 if (frent == NULL) { 343 REASON_SET(reason, PFRES_MEMORY); 344 return (NULL); 345 } 346 } 347 348 return (frent); 349 } 350 351 /* 352 * Calculate the additional holes that were created in the fragment 353 * queue by inserting this fragment. A fragment in the middle 354 * creates one more hole by splitting. For each connected side, 355 * it loses one hole. 356 * Fragment entry must be in the queue when calling this function. 357 */ 358 static int 359 pf_frent_holes(struct pf_frent *frent) 360 { 361 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 362 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 363 int holes = 1; 364 365 if (prev == NULL) { 366 if (frent->fe_off == 0) 367 holes--; 368 } else { 369 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); 370 if (frent->fe_off == prev->fe_off + prev->fe_len) 371 holes--; 372 } 373 if (next == NULL) { 374 if (!frent->fe_mff) 375 holes--; 376 } else { 377 KASSERT(frent->fe_mff, ("frent->fe_mff")); 378 if (next->fe_off == frent->fe_off + frent->fe_len) 379 holes--; 380 } 381 return holes; 382 } 383 384 static inline int 385 pf_frent_index(struct pf_frent *frent) 386 { 387 /* 388 * We have an array of 16 entry points to the queue. A full size 389 * 65535 octet IP packet can have 8192 fragments. So the queue 390 * traversal length is at most 512 and at most 16 entry points are 391 * checked. We need 128 additional bytes on a 64 bit architecture. 392 */ 393 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) == 394 16 - 1); 395 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); 396 397 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); 398 } 399 400 static int 401 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, 402 struct pf_frent *prev) 403 { 404 int index; 405 406 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); 407 408 /* 409 * A packet has at most 65536 octets. With 16 entry points, each one 410 * spawns 4096 octets. We limit these to 64 fragments each, which 411 * means on average every fragment must have at least 64 octets. 412 */ 413 index = pf_frent_index(frent); 414 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) 415 return ENOBUFS; 416 frag->fr_entries[index]++; 417 418 if (prev == NULL) { 419 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 420 } else { 421 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 422 ("overlapping fragment")); 423 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 424 } 425 426 if (frag->fr_firstoff[index] == NULL) { 427 KASSERT(prev == NULL || pf_frent_index(prev) < index, 428 ("prev == NULL || pf_frent_index(pref) < index")); 429 frag->fr_firstoff[index] = frent; 430 } else { 431 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { 432 KASSERT(prev == NULL || pf_frent_index(prev) < index, 433 ("prev == NULL || pf_frent_index(pref) < index")); 434 frag->fr_firstoff[index] = frent; 435 } else { 436 KASSERT(prev != NULL, ("prev != NULL")); 437 KASSERT(pf_frent_index(prev) == index, 438 ("pf_frent_index(prev) == index")); 439 } 440 } 441 442 frag->fr_holes += pf_frent_holes(frent); 443 444 return 0; 445 } 446 447 void 448 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent) 449 { 450 #ifdef INVARIANTS 451 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 452 #endif 453 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 454 int index; 455 456 frag->fr_holes -= pf_frent_holes(frent); 457 458 index = pf_frent_index(frent); 459 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); 460 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { 461 if (next == NULL) { 462 frag->fr_firstoff[index] = NULL; 463 } else { 464 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, 465 ("overlapping fragment")); 466 if (pf_frent_index(next) == index) { 467 frag->fr_firstoff[index] = next; 468 } else { 469 frag->fr_firstoff[index] = NULL; 470 } 471 } 472 } else { 473 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, 474 ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); 475 KASSERT(prev != NULL, ("prev != NULL")); 476 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 477 ("overlapping fragment")); 478 KASSERT(pf_frent_index(prev) == index, 479 ("pf_frent_index(prev) == index")); 480 } 481 482 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 483 484 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); 485 frag->fr_entries[index]--; 486 } 487 488 struct pf_frent * 489 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent) 490 { 491 struct pf_frent *prev, *next; 492 int index; 493 494 /* 495 * If there are no fragments after frag, take the final one. Assume 496 * that the global queue is not empty. 497 */ 498 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); 499 KASSERT(prev != NULL, ("prev != NULL")); 500 if (prev->fe_off <= frent->fe_off) 501 return prev; 502 /* 503 * We want to find a fragment entry that is before frag, but still 504 * close to it. Find the first fragment entry that is in the same 505 * entry point or in the first entry point after that. As we have 506 * already checked that there are entries behind frag, this will 507 * succeed. 508 */ 509 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS; 510 index++) { 511 prev = frag->fr_firstoff[index]; 512 if (prev != NULL) 513 break; 514 } 515 KASSERT(prev != NULL, ("prev != NULL")); 516 /* 517 * In prev we may have a fragment from the same entry point that is 518 * before frent, or one that is just one position behind frent. 519 * In the latter case, we go back one step and have the predecessor. 520 * There may be none if the new fragment will be the first one. 521 */ 522 if (prev->fe_off > frent->fe_off) { 523 prev = TAILQ_PREV(prev, pf_fragq, fr_next); 524 if (prev == NULL) 525 return NULL; 526 KASSERT(prev->fe_off <= frent->fe_off, 527 ("prev->fe_off <= frent->fe_off")); 528 return prev; 529 } 530 /* 531 * In prev is the first fragment of the entry point. The offset 532 * of frag is behind it. Find the closest previous fragment. 533 */ 534 for (next = TAILQ_NEXT(prev, fr_next); next != NULL; 535 next = TAILQ_NEXT(next, fr_next)) { 536 if (next->fe_off > frent->fe_off) 537 break; 538 prev = next; 539 } 540 return prev; 541 } 542 543 static struct pf_fragment * 544 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 545 u_short *reason) 546 { 547 struct pf_frent *after, *next, *prev; 548 struct pf_fragment *frag; 549 uint16_t total; 550 int old_index, new_index; 551 552 PF_FRAG_ASSERT(); 553 554 /* No empty fragments. */ 555 if (frent->fe_len == 0) { 556 DPFPRINTF(("bad fragment: len 0\n")); 557 goto bad_fragment; 558 } 559 560 /* All fragments are 8 byte aligned. */ 561 if (frent->fe_mff && (frent->fe_len & 0x7)) { 562 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); 563 goto bad_fragment; 564 } 565 566 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 567 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 568 DPFPRINTF(("bad fragment: max packet %d\n", 569 frent->fe_off + frent->fe_len)); 570 goto bad_fragment; 571 } 572 573 DPFPRINTF((key->frc_af == AF_INET ? 574 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", 575 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 576 577 /* Fully buffer all of the fragments in this fragment queue. */ 578 frag = pf_find_fragment(key, &V_pf_frag_tree); 579 580 /* Create a new reassembly queue for this packet. */ 581 if (frag == NULL) { 582 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 583 if (frag == NULL) { 584 pf_flush_fragments(); 585 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 586 if (frag == NULL) { 587 REASON_SET(reason, PFRES_MEMORY); 588 goto drop_fragment; 589 } 590 } 591 592 *(struct pf_fragment_cmp *)frag = *key; 593 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); 594 memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); 595 frag->fr_timeout = time_uptime; 596 frag->fr_maxlen = frent->fe_len; 597 frag->fr_holes = 1; 598 TAILQ_INIT(&frag->fr_queue); 599 600 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 601 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 602 603 /* We do not have a previous fragment, cannot fail. */ 604 pf_frent_insert(frag, frent, NULL); 605 606 return (frag); 607 } 608 609 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 610 611 /* Remember maximum fragment len for refragmentation. */ 612 if (frent->fe_len > frag->fr_maxlen) 613 frag->fr_maxlen = frent->fe_len; 614 615 /* Maximum data we have seen already. */ 616 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 617 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 618 619 /* Non terminal fragments must have more fragments flag. */ 620 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 621 goto bad_fragment; 622 623 /* Check if we saw the last fragment already. */ 624 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 625 if (frent->fe_off + frent->fe_len > total || 626 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 627 goto bad_fragment; 628 } else { 629 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 630 goto bad_fragment; 631 } 632 633 /* Find neighbors for newly inserted fragment */ 634 prev = pf_frent_previous(frag, frent); 635 if (prev == NULL) { 636 after = TAILQ_FIRST(&frag->fr_queue); 637 KASSERT(after != NULL, ("after != NULL")); 638 } else { 639 after = TAILQ_NEXT(prev, fr_next); 640 } 641 642 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 643 uint16_t precut; 644 645 precut = prev->fe_off + prev->fe_len - frent->fe_off; 646 if (precut >= frent->fe_len) 647 goto bad_fragment; 648 DPFPRINTF(("overlap -%d\n", precut)); 649 m_adj(frent->fe_m, precut); 650 frent->fe_off += precut; 651 frent->fe_len -= precut; 652 } 653 654 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 655 after = next) { 656 uint16_t aftercut; 657 658 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 659 DPFPRINTF(("adjust overlap %d\n", aftercut)); 660 if (aftercut < after->fe_len) { 661 m_adj(after->fe_m, aftercut); 662 old_index = pf_frent_index(after); 663 after->fe_off += aftercut; 664 after->fe_len -= aftercut; 665 new_index = pf_frent_index(after); 666 if (old_index != new_index) { 667 DPFPRINTF(("frag index %d, new %d", 668 old_index, new_index)); 669 /* Fragment switched queue as fe_off changed */ 670 after->fe_off -= aftercut; 671 after->fe_len += aftercut; 672 /* Remove restored fragment from old queue */ 673 pf_frent_remove(frag, after); 674 after->fe_off += aftercut; 675 after->fe_len -= aftercut; 676 /* Insert into correct queue */ 677 if (pf_frent_insert(frag, after, prev)) { 678 DPFPRINTF( 679 ("fragment requeue limit exceeded")); 680 m_freem(after->fe_m); 681 uma_zfree(V_pf_frent_z, after); 682 /* There is not way to recover */ 683 goto bad_fragment; 684 } 685 } 686 break; 687 } 688 689 /* This fragment is completely overlapped, lose it. */ 690 next = TAILQ_NEXT(after, fr_next); 691 pf_frent_remove(frag, after); 692 m_freem(after->fe_m); 693 uma_zfree(V_pf_frent_z, after); 694 } 695 696 /* If part of the queue gets too long, there is not way to recover. */ 697 if (pf_frent_insert(frag, frent, prev)) { 698 DPFPRINTF(("fragment queue limit exceeded\n")); 699 goto bad_fragment; 700 } 701 702 return (frag); 703 704 bad_fragment: 705 REASON_SET(reason, PFRES_FRAG); 706 drop_fragment: 707 uma_zfree(V_pf_frent_z, frent); 708 return (NULL); 709 } 710 711 static struct mbuf * 712 pf_join_fragment(struct pf_fragment *frag) 713 { 714 struct mbuf *m, *m2; 715 struct pf_frent *frent, *next; 716 717 frent = TAILQ_FIRST(&frag->fr_queue); 718 next = TAILQ_NEXT(frent, fr_next); 719 720 m = frent->fe_m; 721 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 722 uma_zfree(V_pf_frent_z, frent); 723 for (frent = next; frent != NULL; frent = next) { 724 next = TAILQ_NEXT(frent, fr_next); 725 726 m2 = frent->fe_m; 727 /* Strip off ip header. */ 728 m_adj(m2, frent->fe_hdrlen); 729 /* Strip off any trailing bytes. */ 730 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 731 732 uma_zfree(V_pf_frent_z, frent); 733 m_cat(m, m2); 734 } 735 736 /* Remove from fragment queue. */ 737 pf_remove_fragment(frag); 738 739 return (m); 740 } 741 742 #ifdef INET 743 static int 744 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason) 745 { 746 struct mbuf *m = *m0; 747 struct pf_frent *frent; 748 struct pf_fragment *frag; 749 struct pf_fragment_cmp key; 750 uint16_t total, hdrlen; 751 752 /* Get an entry for the fragment queue */ 753 if ((frent = pf_create_fragment(reason)) == NULL) 754 return (PF_DROP); 755 756 frent->fe_m = m; 757 frent->fe_hdrlen = ip->ip_hl << 2; 758 frent->fe_extoff = 0; 759 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 760 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 761 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 762 763 pf_ip2key(ip, dir, &key); 764 765 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 766 return (PF_DROP); 767 768 /* The mbuf is part of the fragment entry, no direct free or access */ 769 m = *m0 = NULL; 770 771 if (frag->fr_holes) { 772 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); 773 return (PF_PASS); /* drop because *m0 is NULL, no error */ 774 } 775 776 /* We have all the data */ 777 frent = TAILQ_FIRST(&frag->fr_queue); 778 KASSERT(frent != NULL, ("frent != NULL")); 779 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 780 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 781 hdrlen = frent->fe_hdrlen; 782 783 m = *m0 = pf_join_fragment(frag); 784 frag = NULL; 785 786 if (m->m_flags & M_PKTHDR) { 787 int plen = 0; 788 for (m = *m0; m; m = m->m_next) 789 plen += m->m_len; 790 m = *m0; 791 m->m_pkthdr.len = plen; 792 } 793 794 ip = mtod(m, struct ip *); 795 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, 796 htons(hdrlen + total), 0); 797 ip->ip_len = htons(hdrlen + total); 798 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, 799 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); 800 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 801 802 if (hdrlen + total > IP_MAXPACKET) { 803 DPFPRINTF(("drop: too big: %d\n", total)); 804 ip->ip_len = 0; 805 REASON_SET(reason, PFRES_SHORT); 806 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 807 return (PF_DROP); 808 } 809 810 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 811 return (PF_PASS); 812 } 813 #endif /* INET */ 814 815 #ifdef INET6 816 static int 817 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr, 818 uint16_t hdrlen, uint16_t extoff, u_short *reason) 819 { 820 struct mbuf *m = *m0; 821 struct pf_frent *frent; 822 struct pf_fragment *frag; 823 struct pf_fragment_cmp key; 824 struct m_tag *mtag; 825 struct pf_fragment_tag *ftag; 826 int off; 827 uint32_t frag_id; 828 uint16_t total, maxlen; 829 uint8_t proto; 830 831 PF_FRAG_LOCK(); 832 833 /* Get an entry for the fragment queue. */ 834 if ((frent = pf_create_fragment(reason)) == NULL) { 835 PF_FRAG_UNLOCK(); 836 return (PF_DROP); 837 } 838 839 frent->fe_m = m; 840 frent->fe_hdrlen = hdrlen; 841 frent->fe_extoff = extoff; 842 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 843 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 844 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 845 846 key.frc_src.v6 = ip6->ip6_src; 847 key.frc_dst.v6 = ip6->ip6_dst; 848 key.frc_af = AF_INET6; 849 /* Only the first fragment's protocol is relevant. */ 850 key.frc_proto = 0; 851 key.frc_id = fraghdr->ip6f_ident; 852 853 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 854 PF_FRAG_UNLOCK(); 855 return (PF_DROP); 856 } 857 858 /* The mbuf is part of the fragment entry, no direct free or access. */ 859 m = *m0 = NULL; 860 861 if (frag->fr_holes) { 862 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, 863 frag->fr_holes)); 864 PF_FRAG_UNLOCK(); 865 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 866 } 867 868 /* We have all the data. */ 869 frent = TAILQ_FIRST(&frag->fr_queue); 870 KASSERT(frent != NULL, ("frent != NULL")); 871 extoff = frent->fe_extoff; 872 maxlen = frag->fr_maxlen; 873 frag_id = frag->fr_id; 874 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 875 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 876 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 877 878 m = *m0 = pf_join_fragment(frag); 879 frag = NULL; 880 881 PF_FRAG_UNLOCK(); 882 883 /* Take protocol from first fragment header. */ 884 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 885 KASSERT(m, ("%s: short mbuf chain", __func__)); 886 proto = *(mtod(m, caddr_t) + off); 887 m = *m0; 888 889 /* Delete frag6 header */ 890 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 891 goto fail; 892 893 if (m->m_flags & M_PKTHDR) { 894 int plen = 0; 895 for (m = *m0; m; m = m->m_next) 896 plen += m->m_len; 897 m = *m0; 898 m->m_pkthdr.len = plen; 899 } 900 901 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag), 902 M_NOWAIT)) == NULL) 903 goto fail; 904 ftag = (struct pf_fragment_tag *)(mtag + 1); 905 ftag->ft_hdrlen = hdrlen; 906 ftag->ft_extoff = extoff; 907 ftag->ft_maxlen = maxlen; 908 ftag->ft_id = frag_id; 909 m_tag_prepend(m, mtag); 910 911 ip6 = mtod(m, struct ip6_hdr *); 912 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 913 if (extoff) { 914 /* Write protocol into next field of last extension header. */ 915 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 916 &off); 917 KASSERT(m, ("%s: short mbuf chain", __func__)); 918 *(mtod(m, char *) + off) = proto; 919 m = *m0; 920 } else 921 ip6->ip6_nxt = proto; 922 923 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 924 DPFPRINTF(("drop: too big: %d\n", total)); 925 ip6->ip6_plen = 0; 926 REASON_SET(reason, PFRES_SHORT); 927 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 928 return (PF_DROP); 929 } 930 931 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); 932 return (PF_PASS); 933 934 fail: 935 REASON_SET(reason, PFRES_MEMORY); 936 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 937 return (PF_DROP); 938 } 939 #endif /* INET6 */ 940 941 #ifdef INET6 942 int 943 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag, 944 bool forward) 945 { 946 struct mbuf *m = *m0, *t; 947 struct ip6_hdr *hdr; 948 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 949 struct pf_pdesc pd; 950 uint32_t frag_id; 951 uint16_t hdrlen, extoff, maxlen; 952 uint8_t proto; 953 int error, action; 954 955 hdrlen = ftag->ft_hdrlen; 956 extoff = ftag->ft_extoff; 957 maxlen = ftag->ft_maxlen; 958 frag_id = ftag->ft_id; 959 m_tag_delete(m, mtag); 960 mtag = NULL; 961 ftag = NULL; 962 963 if (extoff) { 964 int off; 965 966 /* Use protocol from next field of last extension header */ 967 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 968 &off); 969 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 970 proto = *(mtod(m, caddr_t) + off); 971 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 972 m = *m0; 973 } else { 974 hdr = mtod(m, struct ip6_hdr *); 975 proto = hdr->ip6_nxt; 976 hdr->ip6_nxt = IPPROTO_FRAGMENT; 977 } 978 979 /* In case of link-local traffic we'll need a scope set. */ 980 hdr = mtod(m, struct ip6_hdr *); 981 982 in6_setscope(&hdr->ip6_src, ifp, NULL); 983 in6_setscope(&hdr->ip6_dst, ifp, NULL); 984 985 /* The MTU must be a multiple of 8 bytes, or we risk doing the 986 * fragmentation wrong. */ 987 maxlen = maxlen & ~7; 988 989 /* 990 * Maxlen may be less than 8 if there was only a single 991 * fragment. As it was fragmented before, add a fragment 992 * header also for a single fragment. If total or maxlen 993 * is less than 8, ip6_fragment() will return EMSGSIZE and 994 * we drop the packet. 995 */ 996 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 997 m = (*m0)->m_nextpkt; 998 (*m0)->m_nextpkt = NULL; 999 if (error == 0) { 1000 /* The first mbuf contains the unfragmented packet. */ 1001 m_freem(*m0); 1002 *m0 = NULL; 1003 action = PF_PASS; 1004 } else { 1005 /* Drop expects an mbuf to free. */ 1006 DPFPRINTF(("refragment error %d\n", error)); 1007 action = PF_DROP; 1008 } 1009 for (; m; m = t) { 1010 t = m->m_nextpkt; 1011 m->m_nextpkt = NULL; 1012 m->m_flags |= M_SKIP_FIREWALL; 1013 memset(&pd, 0, sizeof(pd)); 1014 pd.pf_mtag = pf_find_mtag(m); 1015 if (error == 0) 1016 if (forward) { 1017 MPASS(m->m_pkthdr.rcvif != NULL); 1018 ip6_forward(m, 0); 1019 } else { 1020 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, 1021 NULL); 1022 } 1023 else 1024 m_freem(m); 1025 } 1026 1027 return (action); 1028 } 1029 #endif /* INET6 */ 1030 1031 #ifdef INET 1032 int 1033 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kkif *kif, u_short *reason, 1034 struct pf_pdesc *pd) 1035 { 1036 struct mbuf *m = *m0; 1037 struct pf_krule *r; 1038 struct ip *h = mtod(m, struct ip *); 1039 int mff = (ntohs(h->ip_off) & IP_MF); 1040 int hlen = h->ip_hl << 2; 1041 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 1042 u_int16_t max; 1043 int ip_len; 1044 int tag = -1; 1045 int verdict; 1046 int srs; 1047 1048 PF_RULES_RASSERT(); 1049 1050 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1051 /* Check if there any scrub rules. Lack of scrub rules means enforced 1052 * packet normalization operation just like in OpenBSD. */ 1053 srs = (r != NULL); 1054 while (r != NULL) { 1055 pf_counter_u64_add(&r->evaluations, 1); 1056 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1057 r = r->skip[PF_SKIP_IFP].ptr; 1058 else if (r->direction && r->direction != dir) 1059 r = r->skip[PF_SKIP_DIR].ptr; 1060 else if (r->af && r->af != AF_INET) 1061 r = r->skip[PF_SKIP_AF].ptr; 1062 else if (r->proto && r->proto != h->ip_p) 1063 r = r->skip[PF_SKIP_PROTO].ptr; 1064 else if (PF_MISMATCHAW(&r->src.addr, 1065 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 1066 r->src.neg, kif, M_GETFIB(m))) 1067 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1068 else if (PF_MISMATCHAW(&r->dst.addr, 1069 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 1070 r->dst.neg, NULL, M_GETFIB(m))) 1071 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1072 else if (r->match_tag && !pf_match_tag(m, r, &tag, 1073 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 1074 r = TAILQ_NEXT(r, entries); 1075 else 1076 break; 1077 } 1078 1079 if (srs) { 1080 /* With scrub rules present IPv4 normalization happens only 1081 * if one of rules has matched and it's not a "no scrub" rule */ 1082 if (r == NULL || r->action == PF_NOSCRUB) 1083 return (PF_PASS); 1084 1085 pf_counter_u64_critical_enter(); 1086 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1087 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1088 pf_counter_u64_critical_exit(); 1089 } else if ((!V_pf_status.reass && (h->ip_off & htons(IP_MF | IP_OFFMASK)))) { 1090 /* With no scrub rules IPv4 fragment reassembly depends on the 1091 * global switch. Fragments can be dropped early if reassembly 1092 * is disabled. */ 1093 REASON_SET(reason, PFRES_NORM); 1094 goto drop; 1095 } 1096 1097 /* Check for illegal packets */ 1098 if (hlen < (int)sizeof(struct ip)) { 1099 REASON_SET(reason, PFRES_NORM); 1100 goto drop; 1101 } 1102 1103 if (hlen > ntohs(h->ip_len)) { 1104 REASON_SET(reason, PFRES_NORM); 1105 goto drop; 1106 } 1107 1108 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */ 1109 if ((((r && r->rule_flag & PFRULE_NODF) || 1110 (V_pf_status.reass & PF_REASS_NODF)) && h->ip_off & htons(IP_DF) 1111 )) { 1112 u_int16_t ip_off = h->ip_off; 1113 1114 h->ip_off &= htons(~IP_DF); 1115 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1116 } 1117 1118 /* We will need other tests here */ 1119 if (!fragoff && !mff) 1120 goto no_fragment; 1121 1122 /* We're dealing with a fragment now. Don't allow fragments 1123 * with IP_DF to enter the cache. If the flag was cleared by 1124 * no-df above, fine. Otherwise drop it. 1125 */ 1126 if (h->ip_off & htons(IP_DF)) { 1127 DPFPRINTF(("IP_DF\n")); 1128 goto bad; 1129 } 1130 1131 ip_len = ntohs(h->ip_len) - hlen; 1132 1133 /* All fragments are 8 byte aligned */ 1134 if (mff && (ip_len & 0x7)) { 1135 DPFPRINTF(("mff and %d\n", ip_len)); 1136 goto bad; 1137 } 1138 1139 /* Respect maximum length */ 1140 if (fragoff + ip_len > IP_MAXPACKET) { 1141 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 1142 goto bad; 1143 } 1144 1145 if (r==NULL || !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) { 1146 max = fragoff + ip_len; 1147 1148 /* Fully buffer all of the fragments 1149 * Might return a completely reassembled mbuf, or NULL */ 1150 PF_FRAG_LOCK(); 1151 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 1152 verdict = pf_reassemble(m0, h, dir, reason); 1153 PF_FRAG_UNLOCK(); 1154 1155 if (verdict != PF_PASS) 1156 return (PF_DROP); 1157 1158 m = *m0; 1159 if (m == NULL) 1160 return (PF_DROP); 1161 1162 h = mtod(m, struct ip *); 1163 1164 no_fragment: 1165 /* At this point, only IP_DF is allowed in ip_off */ 1166 if (h->ip_off & ~htons(IP_DF)) { 1167 u_int16_t ip_off = h->ip_off; 1168 1169 h->ip_off &= htons(IP_DF); 1170 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1171 } 1172 } 1173 if (r != NULL) { 1174 int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag); 1175 pf_scrub_ip(&m, scrub_flags, r->min_ttl, r->set_tos); 1176 } 1177 1178 return (PF_PASS); 1179 1180 bad: 1181 DPFPRINTF(("dropping bad fragment\n")); 1182 REASON_SET(reason, PFRES_FRAG); 1183 drop: 1184 if (r != NULL && r->log) 1185 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1186 1); 1187 1188 return (PF_DROP); 1189 } 1190 #endif 1191 1192 #ifdef INET6 1193 int 1194 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kkif *kif, 1195 u_short *reason, struct pf_pdesc *pd) 1196 { 1197 struct mbuf *m = *m0; 1198 struct pf_krule *r; 1199 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1200 int extoff; 1201 int off; 1202 struct ip6_ext ext; 1203 struct ip6_opt opt; 1204 struct ip6_frag frag; 1205 u_int32_t plen; 1206 int optend; 1207 int ooff; 1208 u_int8_t proto; 1209 int terminal; 1210 int srs; 1211 1212 PF_RULES_RASSERT(); 1213 1214 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1215 /* Check if there any scrub rules. Lack of scrub rules means enforced 1216 * packet normalization operation just like in OpenBSD. */ 1217 srs = (r != NULL); 1218 while (r != NULL) { 1219 pf_counter_u64_add(&r->evaluations, 1); 1220 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1221 r = r->skip[PF_SKIP_IFP].ptr; 1222 else if (r->direction && r->direction != dir) 1223 r = r->skip[PF_SKIP_DIR].ptr; 1224 else if (r->af && r->af != AF_INET6) 1225 r = r->skip[PF_SKIP_AF].ptr; 1226 #if 0 /* header chain! */ 1227 else if (r->proto && r->proto != h->ip6_nxt) 1228 r = r->skip[PF_SKIP_PROTO].ptr; 1229 #endif 1230 else if (PF_MISMATCHAW(&r->src.addr, 1231 (struct pf_addr *)&h->ip6_src, AF_INET6, 1232 r->src.neg, kif, M_GETFIB(m))) 1233 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1234 else if (PF_MISMATCHAW(&r->dst.addr, 1235 (struct pf_addr *)&h->ip6_dst, AF_INET6, 1236 r->dst.neg, NULL, M_GETFIB(m))) 1237 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1238 else 1239 break; 1240 } 1241 1242 if (srs) { 1243 /* With scrub rules present IPv6 normalization happens only 1244 * if one of rules has matched and it's not a "no scrub" rule */ 1245 if (r == NULL || r->action == PF_NOSCRUB) 1246 return (PF_PASS); 1247 1248 pf_counter_u64_critical_enter(); 1249 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1250 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1251 pf_counter_u64_critical_exit(); 1252 } 1253 1254 /* Check for illegal packets */ 1255 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1256 goto drop; 1257 1258 plen = ntohs(h->ip6_plen); 1259 /* jumbo payload option not supported */ 1260 if (plen == 0) 1261 goto drop; 1262 1263 extoff = 0; 1264 off = sizeof(struct ip6_hdr); 1265 proto = h->ip6_nxt; 1266 terminal = 0; 1267 do { 1268 switch (proto) { 1269 case IPPROTO_FRAGMENT: 1270 goto fragment; 1271 break; 1272 case IPPROTO_AH: 1273 case IPPROTO_ROUTING: 1274 case IPPROTO_DSTOPTS: 1275 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1276 NULL, AF_INET6)) 1277 goto shortpkt; 1278 extoff = off; 1279 if (proto == IPPROTO_AH) 1280 off += (ext.ip6e_len + 2) * 4; 1281 else 1282 off += (ext.ip6e_len + 1) * 8; 1283 proto = ext.ip6e_nxt; 1284 break; 1285 case IPPROTO_HOPOPTS: 1286 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1287 NULL, AF_INET6)) 1288 goto shortpkt; 1289 extoff = off; 1290 optend = off + (ext.ip6e_len + 1) * 8; 1291 ooff = off + sizeof(ext); 1292 do { 1293 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1294 sizeof(opt.ip6o_type), NULL, NULL, 1295 AF_INET6)) 1296 goto shortpkt; 1297 if (opt.ip6o_type == IP6OPT_PAD1) { 1298 ooff++; 1299 continue; 1300 } 1301 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1302 NULL, NULL, AF_INET6)) 1303 goto shortpkt; 1304 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1305 goto drop; 1306 if (opt.ip6o_type == IP6OPT_JUMBO) 1307 goto drop; 1308 ooff += sizeof(opt) + opt.ip6o_len; 1309 } while (ooff < optend); 1310 1311 off = optend; 1312 proto = ext.ip6e_nxt; 1313 break; 1314 default: 1315 terminal = 1; 1316 break; 1317 } 1318 } while (!terminal); 1319 1320 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1321 goto shortpkt; 1322 1323 if (r != NULL) { 1324 int scrub_flags = pf_rule_to_scrub_flags(r->rule_flag); 1325 pf_scrub_ip6(&m, scrub_flags, r->min_ttl, r->set_tos); 1326 } 1327 1328 return (PF_PASS); 1329 1330 fragment: 1331 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1332 goto shortpkt; 1333 1334 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1335 goto shortpkt; 1336 1337 /* Offset now points to data portion. */ 1338 off += sizeof(frag); 1339 1340 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */ 1341 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS) 1342 return (PF_DROP); 1343 m = *m0; 1344 if (m == NULL) 1345 return (PF_DROP); 1346 1347 pd->flags |= PFDESC_IP_REAS; 1348 return (PF_PASS); 1349 1350 shortpkt: 1351 REASON_SET(reason, PFRES_SHORT); 1352 if (r != NULL && r->log) 1353 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1354 1); 1355 return (PF_DROP); 1356 1357 drop: 1358 REASON_SET(reason, PFRES_NORM); 1359 if (r != NULL && r->log) 1360 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1361 1); 1362 return (PF_DROP); 1363 } 1364 #endif /* INET6 */ 1365 1366 int 1367 pf_normalize_tcp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff, 1368 int off, void *h, struct pf_pdesc *pd) 1369 { 1370 struct pf_krule *r, *rm = NULL; 1371 struct tcphdr *th = &pd->hdr.tcp; 1372 int rewrite = 0; 1373 u_short reason; 1374 u_int8_t flags; 1375 sa_family_t af = pd->af; 1376 int srs; 1377 1378 PF_RULES_RASSERT(); 1379 1380 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1381 /* Check if there any scrub rules. Lack of scrub rules means enforced 1382 * packet normalization operation just like in OpenBSD. */ 1383 srs = (r != NULL); 1384 while (r != NULL) { 1385 pf_counter_u64_add(&r->evaluations, 1); 1386 if (pfi_kkif_match(r->kif, kif) == r->ifnot) 1387 r = r->skip[PF_SKIP_IFP].ptr; 1388 else if (r->direction && r->direction != dir) 1389 r = r->skip[PF_SKIP_DIR].ptr; 1390 else if (r->af && r->af != af) 1391 r = r->skip[PF_SKIP_AF].ptr; 1392 else if (r->proto && r->proto != pd->proto) 1393 r = r->skip[PF_SKIP_PROTO].ptr; 1394 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1395 r->src.neg, kif, M_GETFIB(m))) 1396 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1397 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1398 r->src.port[0], r->src.port[1], th->th_sport)) 1399 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1400 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1401 r->dst.neg, NULL, M_GETFIB(m))) 1402 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1403 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1404 r->dst.port[0], r->dst.port[1], th->th_dport)) 1405 r = r->skip[PF_SKIP_DST_PORT].ptr; 1406 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1407 pf_osfp_fingerprint(pd, m, off, th), 1408 r->os_fingerprint)) 1409 r = TAILQ_NEXT(r, entries); 1410 else { 1411 rm = r; 1412 break; 1413 } 1414 } 1415 1416 if (srs) { 1417 /* With scrub rules present TCP normalization happens only 1418 * if one of rules has matched and it's not a "no scrub" rule */ 1419 if (rm == NULL || rm->action == PF_NOSCRUB) 1420 return (PF_PASS); 1421 1422 pf_counter_u64_critical_enter(); 1423 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1); 1424 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len); 1425 pf_counter_u64_critical_exit(); 1426 } 1427 1428 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1429 pd->flags |= PFDESC_TCP_NORM; 1430 1431 flags = th->th_flags; 1432 if (flags & TH_SYN) { 1433 /* Illegal packet */ 1434 if (flags & TH_RST) 1435 goto tcp_drop; 1436 1437 if (flags & TH_FIN) 1438 goto tcp_drop; 1439 } else { 1440 /* Illegal packet */ 1441 if (!(flags & (TH_ACK|TH_RST))) 1442 goto tcp_drop; 1443 } 1444 1445 if (!(flags & TH_ACK)) { 1446 /* These flags are only valid if ACK is set */ 1447 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1448 goto tcp_drop; 1449 } 1450 1451 /* Check for illegal header length */ 1452 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1453 goto tcp_drop; 1454 1455 /* If flags changed, or reserved data set, then adjust */ 1456 if (flags != th->th_flags || th->th_x2 != 0) { 1457 u_int16_t ov, nv; 1458 1459 ov = *(u_int16_t *)(&th->th_ack + 1); 1460 th->th_flags = flags; 1461 th->th_x2 = 0; 1462 nv = *(u_int16_t *)(&th->th_ack + 1); 1463 1464 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0); 1465 rewrite = 1; 1466 } 1467 1468 /* Remove urgent pointer, if TH_URG is not set */ 1469 if (!(flags & TH_URG) && th->th_urp) { 1470 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, 1471 0, 0); 1472 th->th_urp = 0; 1473 rewrite = 1; 1474 } 1475 1476 /* Set MSS for old-style scrub rules. 1477 * The function performs its own copyback. */ 1478 if (rm != NULL && rm->max_mss) 1479 pf_normalize_mss(m, off, pd, rm->max_mss); 1480 1481 /* copy back packet headers if we sanitized */ 1482 if (rewrite) 1483 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1484 1485 return (PF_PASS); 1486 1487 tcp_drop: 1488 REASON_SET(&reason, PFRES_NORM); 1489 if (rm != NULL && r->log) 1490 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1491 1); 1492 return (PF_DROP); 1493 } 1494 1495 int 1496 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1497 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1498 { 1499 u_int32_t tsval, tsecr; 1500 u_int8_t hdr[60]; 1501 u_int8_t *opt; 1502 1503 KASSERT((src->scrub == NULL), 1504 ("pf_normalize_tcp_init: src->scrub != NULL")); 1505 1506 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1507 if (src->scrub == NULL) 1508 return (1); 1509 1510 switch (pd->af) { 1511 #ifdef INET 1512 case AF_INET: { 1513 struct ip *h = mtod(m, struct ip *); 1514 src->scrub->pfss_ttl = h->ip_ttl; 1515 break; 1516 } 1517 #endif /* INET */ 1518 #ifdef INET6 1519 case AF_INET6: { 1520 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1521 src->scrub->pfss_ttl = h->ip6_hlim; 1522 break; 1523 } 1524 #endif /* INET6 */ 1525 } 1526 1527 /* 1528 * All normalizations below are only begun if we see the start of 1529 * the connections. They must all set an enabled bit in pfss_flags 1530 */ 1531 if ((th->th_flags & TH_SYN) == 0) 1532 return (0); 1533 1534 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1535 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1536 /* Diddle with TCP options */ 1537 int hlen; 1538 opt = hdr + sizeof(struct tcphdr); 1539 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1540 while (hlen >= TCPOLEN_TIMESTAMP) { 1541 switch (*opt) { 1542 case TCPOPT_EOL: /* FALLTHROUGH */ 1543 case TCPOPT_NOP: 1544 opt++; 1545 hlen--; 1546 break; 1547 case TCPOPT_TIMESTAMP: 1548 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1549 src->scrub->pfss_flags |= 1550 PFSS_TIMESTAMP; 1551 src->scrub->pfss_ts_mod = 1552 htonl(arc4random()); 1553 1554 /* note PFSS_PAWS not set yet */ 1555 memcpy(&tsval, &opt[2], 1556 sizeof(u_int32_t)); 1557 memcpy(&tsecr, &opt[6], 1558 sizeof(u_int32_t)); 1559 src->scrub->pfss_tsval0 = ntohl(tsval); 1560 src->scrub->pfss_tsval = ntohl(tsval); 1561 src->scrub->pfss_tsecr = ntohl(tsecr); 1562 getmicrouptime(&src->scrub->pfss_last); 1563 } 1564 /* FALLTHROUGH */ 1565 default: 1566 hlen -= MAX(opt[1], 2); 1567 opt += MAX(opt[1], 2); 1568 break; 1569 } 1570 } 1571 } 1572 1573 return (0); 1574 } 1575 1576 void 1577 pf_normalize_tcp_cleanup(struct pf_kstate *state) 1578 { 1579 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1580 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1581 1582 /* Someday... flush the TCP segment reassembly descriptors. */ 1583 } 1584 1585 int 1586 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1587 u_short *reason, struct tcphdr *th, struct pf_kstate *state, 1588 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1589 { 1590 struct timeval uptime; 1591 u_int32_t tsval, tsecr; 1592 u_int tsval_from_last; 1593 u_int8_t hdr[60]; 1594 u_int8_t *opt; 1595 int copyback = 0; 1596 int got_ts = 0; 1597 size_t startoff; 1598 1599 KASSERT((src->scrub || dst->scrub), 1600 ("%s: src->scrub && dst->scrub!", __func__)); 1601 1602 /* 1603 * Enforce the minimum TTL seen for this connection. Negate a common 1604 * technique to evade an intrusion detection system and confuse 1605 * firewall state code. 1606 */ 1607 switch (pd->af) { 1608 #ifdef INET 1609 case AF_INET: { 1610 if (src->scrub) { 1611 struct ip *h = mtod(m, struct ip *); 1612 if (h->ip_ttl > src->scrub->pfss_ttl) 1613 src->scrub->pfss_ttl = h->ip_ttl; 1614 h->ip_ttl = src->scrub->pfss_ttl; 1615 } 1616 break; 1617 } 1618 #endif /* INET */ 1619 #ifdef INET6 1620 case AF_INET6: { 1621 if (src->scrub) { 1622 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1623 if (h->ip6_hlim > src->scrub->pfss_ttl) 1624 src->scrub->pfss_ttl = h->ip6_hlim; 1625 h->ip6_hlim = src->scrub->pfss_ttl; 1626 } 1627 break; 1628 } 1629 #endif /* INET6 */ 1630 } 1631 1632 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1633 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1634 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1635 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1636 /* Diddle with TCP options */ 1637 int hlen; 1638 opt = hdr + sizeof(struct tcphdr); 1639 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1640 while (hlen >= TCPOLEN_TIMESTAMP) { 1641 startoff = opt - (hdr + sizeof(struct tcphdr)); 1642 switch (*opt) { 1643 case TCPOPT_EOL: /* FALLTHROUGH */ 1644 case TCPOPT_NOP: 1645 opt++; 1646 hlen--; 1647 break; 1648 case TCPOPT_TIMESTAMP: 1649 /* Modulate the timestamps. Can be used for 1650 * NAT detection, OS uptime determination or 1651 * reboot detection. 1652 */ 1653 1654 if (got_ts) { 1655 /* Huh? Multiple timestamps!? */ 1656 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1657 DPFPRINTF(("multiple TS??\n")); 1658 pf_print_state(state); 1659 printf("\n"); 1660 } 1661 REASON_SET(reason, PFRES_TS); 1662 return (PF_DROP); 1663 } 1664 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1665 memcpy(&tsval, &opt[2], 1666 sizeof(u_int32_t)); 1667 if (tsval && src->scrub && 1668 (src->scrub->pfss_flags & 1669 PFSS_TIMESTAMP)) { 1670 tsval = ntohl(tsval); 1671 pf_patch_32_unaligned(m, 1672 &th->th_sum, 1673 &opt[2], 1674 htonl(tsval + 1675 src->scrub->pfss_ts_mod), 1676 PF_ALGNMNT(startoff), 1677 0); 1678 copyback = 1; 1679 } 1680 1681 /* Modulate TS reply iff valid (!0) */ 1682 memcpy(&tsecr, &opt[6], 1683 sizeof(u_int32_t)); 1684 if (tsecr && dst->scrub && 1685 (dst->scrub->pfss_flags & 1686 PFSS_TIMESTAMP)) { 1687 tsecr = ntohl(tsecr) 1688 - dst->scrub->pfss_ts_mod; 1689 pf_patch_32_unaligned(m, 1690 &th->th_sum, 1691 &opt[6], 1692 htonl(tsecr), 1693 PF_ALGNMNT(startoff), 1694 0); 1695 copyback = 1; 1696 } 1697 got_ts = 1; 1698 } 1699 /* FALLTHROUGH */ 1700 default: 1701 hlen -= MAX(opt[1], 2); 1702 opt += MAX(opt[1], 2); 1703 break; 1704 } 1705 } 1706 if (copyback) { 1707 /* Copyback the options, caller copys back header */ 1708 *writeback = 1; 1709 m_copyback(m, off + sizeof(struct tcphdr), 1710 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1711 sizeof(struct tcphdr)); 1712 } 1713 } 1714 1715 /* 1716 * Must invalidate PAWS checks on connections idle for too long. 1717 * The fastest allowed timestamp clock is 1ms. That turns out to 1718 * be about 24 days before it wraps. XXX Right now our lowerbound 1719 * TS echo check only works for the first 12 days of a connection 1720 * when the TS has exhausted half its 32bit space 1721 */ 1722 #define TS_MAX_IDLE (24*24*60*60) 1723 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1724 1725 getmicrouptime(&uptime); 1726 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1727 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1728 time_uptime - state->creation > TS_MAX_CONN)) { 1729 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1730 DPFPRINTF(("src idled out of PAWS\n")); 1731 pf_print_state(state); 1732 printf("\n"); 1733 } 1734 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1735 | PFSS_PAWS_IDLED; 1736 } 1737 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1738 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1739 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1740 DPFPRINTF(("dst idled out of PAWS\n")); 1741 pf_print_state(state); 1742 printf("\n"); 1743 } 1744 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1745 | PFSS_PAWS_IDLED; 1746 } 1747 1748 if (got_ts && src->scrub && dst->scrub && 1749 (src->scrub->pfss_flags & PFSS_PAWS) && 1750 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1751 /* Validate that the timestamps are "in-window". 1752 * RFC1323 describes TCP Timestamp options that allow 1753 * measurement of RTT (round trip time) and PAWS 1754 * (protection against wrapped sequence numbers). PAWS 1755 * gives us a set of rules for rejecting packets on 1756 * long fat pipes (packets that were somehow delayed 1757 * in transit longer than the time it took to send the 1758 * full TCP sequence space of 4Gb). We can use these 1759 * rules and infer a few others that will let us treat 1760 * the 32bit timestamp and the 32bit echoed timestamp 1761 * as sequence numbers to prevent a blind attacker from 1762 * inserting packets into a connection. 1763 * 1764 * RFC1323 tells us: 1765 * - The timestamp on this packet must be greater than 1766 * or equal to the last value echoed by the other 1767 * endpoint. The RFC says those will be discarded 1768 * since it is a dup that has already been acked. 1769 * This gives us a lowerbound on the timestamp. 1770 * timestamp >= other last echoed timestamp 1771 * - The timestamp will be less than or equal to 1772 * the last timestamp plus the time between the 1773 * last packet and now. The RFC defines the max 1774 * clock rate as 1ms. We will allow clocks to be 1775 * up to 10% fast and will allow a total difference 1776 * or 30 seconds due to a route change. And this 1777 * gives us an upperbound on the timestamp. 1778 * timestamp <= last timestamp + max ticks 1779 * We have to be careful here. Windows will send an 1780 * initial timestamp of zero and then initialize it 1781 * to a random value after the 3whs; presumably to 1782 * avoid a DoS by having to call an expensive RNG 1783 * during a SYN flood. Proof MS has at least one 1784 * good security geek. 1785 * 1786 * - The TCP timestamp option must also echo the other 1787 * endpoints timestamp. The timestamp echoed is the 1788 * one carried on the earliest unacknowledged segment 1789 * on the left edge of the sequence window. The RFC 1790 * states that the host will reject any echoed 1791 * timestamps that were larger than any ever sent. 1792 * This gives us an upperbound on the TS echo. 1793 * tescr <= largest_tsval 1794 * - The lowerbound on the TS echo is a little more 1795 * tricky to determine. The other endpoint's echoed 1796 * values will not decrease. But there may be 1797 * network conditions that re-order packets and 1798 * cause our view of them to decrease. For now the 1799 * only lowerbound we can safely determine is that 1800 * the TS echo will never be less than the original 1801 * TS. XXX There is probably a better lowerbound. 1802 * Remove TS_MAX_CONN with better lowerbound check. 1803 * tescr >= other original TS 1804 * 1805 * It is also important to note that the fastest 1806 * timestamp clock of 1ms will wrap its 32bit space in 1807 * 24 days. So we just disable TS checking after 24 1808 * days of idle time. We actually must use a 12d 1809 * connection limit until we can come up with a better 1810 * lowerbound to the TS echo check. 1811 */ 1812 struct timeval delta_ts; 1813 int ts_fudge; 1814 1815 /* 1816 * PFTM_TS_DIFF is how many seconds of leeway to allow 1817 * a host's timestamp. This can happen if the previous 1818 * packet got delayed in transit for much longer than 1819 * this packet. 1820 */ 1821 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1822 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1823 1824 /* Calculate max ticks since the last timestamp */ 1825 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1826 #define TS_MICROSECS 1000000 /* microseconds per second */ 1827 delta_ts = uptime; 1828 timevalsub(&delta_ts, &src->scrub->pfss_last); 1829 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1830 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1831 1832 if ((src->state >= TCPS_ESTABLISHED && 1833 dst->state >= TCPS_ESTABLISHED) && 1834 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1835 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1836 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1837 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1838 /* Bad RFC1323 implementation or an insertion attack. 1839 * 1840 * - Solaris 2.6 and 2.7 are known to send another ACK 1841 * after the FIN,FIN|ACK,ACK closing that carries 1842 * an old timestamp. 1843 */ 1844 1845 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1846 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1847 SEQ_GT(tsval, src->scrub->pfss_tsval + 1848 tsval_from_last) ? '1' : ' ', 1849 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1850 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1851 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1852 "idle: %jus %lums\n", 1853 tsval, tsecr, tsval_from_last, 1854 (uintmax_t)delta_ts.tv_sec, 1855 delta_ts.tv_usec / 1000)); 1856 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1857 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1858 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1859 "\n", dst->scrub->pfss_tsval, 1860 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1861 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1862 pf_print_state(state); 1863 pf_print_flags(th->th_flags); 1864 printf("\n"); 1865 } 1866 REASON_SET(reason, PFRES_TS); 1867 return (PF_DROP); 1868 } 1869 1870 /* XXX I'd really like to require tsecr but it's optional */ 1871 1872 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1873 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1874 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1875 src->scrub && dst->scrub && 1876 (src->scrub->pfss_flags & PFSS_PAWS) && 1877 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1878 /* Didn't send a timestamp. Timestamps aren't really useful 1879 * when: 1880 * - connection opening or closing (often not even sent). 1881 * but we must not let an attacker to put a FIN on a 1882 * data packet to sneak it through our ESTABLISHED check. 1883 * - on a TCP reset. RFC suggests not even looking at TS. 1884 * - on an empty ACK. The TS will not be echoed so it will 1885 * probably not help keep the RTT calculation in sync and 1886 * there isn't as much danger when the sequence numbers 1887 * got wrapped. So some stacks don't include TS on empty 1888 * ACKs :-( 1889 * 1890 * To minimize the disruption to mostly RFC1323 conformant 1891 * stacks, we will only require timestamps on data packets. 1892 * 1893 * And what do ya know, we cannot require timestamps on data 1894 * packets. There appear to be devices that do legitimate 1895 * TCP connection hijacking. There are HTTP devices that allow 1896 * a 3whs (with timestamps) and then buffer the HTTP request. 1897 * If the intermediate device has the HTTP response cache, it 1898 * will spoof the response but not bother timestamping its 1899 * packets. So we can look for the presence of a timestamp in 1900 * the first data packet and if there, require it in all future 1901 * packets. 1902 */ 1903 1904 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1905 /* 1906 * Hey! Someone tried to sneak a packet in. Or the 1907 * stack changed its RFC1323 behavior?!?! 1908 */ 1909 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1910 DPFPRINTF(("Did not receive expected RFC1323 " 1911 "timestamp\n")); 1912 pf_print_state(state); 1913 pf_print_flags(th->th_flags); 1914 printf("\n"); 1915 } 1916 REASON_SET(reason, PFRES_TS); 1917 return (PF_DROP); 1918 } 1919 } 1920 1921 /* 1922 * We will note if a host sends his data packets with or without 1923 * timestamps. And require all data packets to contain a timestamp 1924 * if the first does. PAWS implicitly requires that all data packets be 1925 * timestamped. But I think there are middle-man devices that hijack 1926 * TCP streams immediately after the 3whs and don't timestamp their 1927 * packets (seen in a WWW accelerator or cache). 1928 */ 1929 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1930 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1931 if (got_ts) 1932 src->scrub->pfss_flags |= PFSS_DATA_TS; 1933 else { 1934 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1935 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1936 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1937 /* Don't warn if other host rejected RFC1323 */ 1938 DPFPRINTF(("Broken RFC1323 stack did not " 1939 "timestamp data packet. Disabled PAWS " 1940 "security.\n")); 1941 pf_print_state(state); 1942 pf_print_flags(th->th_flags); 1943 printf("\n"); 1944 } 1945 } 1946 } 1947 1948 /* 1949 * Update PAWS values 1950 */ 1951 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1952 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1953 getmicrouptime(&src->scrub->pfss_last); 1954 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1955 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1956 src->scrub->pfss_tsval = tsval; 1957 1958 if (tsecr) { 1959 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1960 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1961 src->scrub->pfss_tsecr = tsecr; 1962 1963 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1964 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1965 src->scrub->pfss_tsval0 == 0)) { 1966 /* tsval0 MUST be the lowest timestamp */ 1967 src->scrub->pfss_tsval0 = tsval; 1968 } 1969 1970 /* Only fully initialized after a TS gets echoed */ 1971 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1972 src->scrub->pfss_flags |= PFSS_PAWS; 1973 } 1974 } 1975 1976 /* I have a dream.... TCP segment reassembly.... */ 1977 return (0); 1978 } 1979 1980 int 1981 pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd, u_int16_t maxmss) 1982 { 1983 struct tcphdr *th = &pd->hdr.tcp; 1984 u_int16_t *mss; 1985 int thoff; 1986 int opt, cnt, optlen = 0; 1987 u_char opts[TCP_MAXOLEN]; 1988 u_char *optp = opts; 1989 size_t startoff; 1990 1991 thoff = th->th_off << 2; 1992 cnt = thoff - sizeof(struct tcphdr); 1993 1994 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1995 NULL, NULL, pd->af)) 1996 return (0); 1997 1998 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1999 startoff = optp - opts; 2000 opt = optp[0]; 2001 if (opt == TCPOPT_EOL) 2002 break; 2003 if (opt == TCPOPT_NOP) 2004 optlen = 1; 2005 else { 2006 if (cnt < 2) 2007 break; 2008 optlen = optp[1]; 2009 if (optlen < 2 || optlen > cnt) 2010 break; 2011 } 2012 switch (opt) { 2013 case TCPOPT_MAXSEG: 2014 mss = (u_int16_t *)(optp + 2); 2015 if ((ntohs(*mss)) > maxmss) { 2016 pf_patch_16_unaligned(m, 2017 &th->th_sum, 2018 mss, htons(maxmss), 2019 PF_ALGNMNT(startoff), 2020 0); 2021 m_copyback(m, off + sizeof(*th), 2022 thoff - sizeof(*th), opts); 2023 m_copyback(m, off, sizeof(*th), (caddr_t)th); 2024 } 2025 break; 2026 default: 2027 break; 2028 } 2029 } 2030 2031 return (0); 2032 } 2033 2034 u_int16_t 2035 pf_rule_to_scrub_flags(u_int32_t rule_flags) 2036 { 2037 /* 2038 * Translate pf_krule->rule_flag to pf_krule->scrub_flags. 2039 * The pf_scrub_ip functions have been adapted to the new style of pass 2040 * rules but they might get called if old scrub rules are used. 2041 */ 2042 int scrub_flags = 0; 2043 2044 if (rule_flags & PFRULE_SET_TOS) { 2045 scrub_flags |= PFSTATE_SETTOS; 2046 } 2047 if (rule_flags & PFRULE_RANDOMID) 2048 scrub_flags |= PFSTATE_RANDOMID; 2049 2050 return scrub_flags; 2051 } 2052 2053 #ifdef INET 2054 void 2055 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 2056 { 2057 struct mbuf *m = *m0; 2058 struct ip *h = mtod(m, struct ip *); 2059 2060 /* Clear IP_DF if no-df was requested */ 2061 if (flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) { 2062 u_int16_t ip_off = h->ip_off; 2063 2064 h->ip_off &= htons(~IP_DF); 2065 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 2066 } 2067 2068 /* Enforce a minimum ttl, may cause endless packet loops */ 2069 if (min_ttl && h->ip_ttl < min_ttl) { 2070 u_int16_t ip_ttl = h->ip_ttl; 2071 2072 h->ip_ttl = min_ttl; 2073 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 2074 } 2075 2076 /* Enforce tos */ 2077 if (flags & PFSTATE_SETTOS) { 2078 u_int16_t ov, nv; 2079 2080 ov = *(u_int16_t *)h; 2081 h->ip_tos = tos | (h->ip_tos & IPTOS_ECN_MASK); 2082 nv = *(u_int16_t *)h; 2083 2084 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 2085 } 2086 2087 /* random-id, but not for fragments */ 2088 if (flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 2089 uint16_t ip_id = h->ip_id; 2090 2091 ip_fillid(h); 2092 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 2093 } 2094 } 2095 #endif /* INET */ 2096 2097 #ifdef INET6 2098 void 2099 pf_scrub_ip6(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 2100 { 2101 struct mbuf *m = *m0; 2102 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 2103 2104 /* Enforce a minimum ttl, may cause endless packet loops */ 2105 if (min_ttl && h->ip6_hlim < min_ttl) 2106 h->ip6_hlim = min_ttl; 2107 2108 /* Enforce tos. Set traffic class bits */ 2109 if (flags & PFSTATE_SETTOS) { 2110 h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; 2111 h->ip6_flow |= htonl((tos | IPV6_ECN(h)) << 20); 2112 } 2113 } 2114 #endif 2115