1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 #include "opt_inet.h" 33 #include "opt_inet6.h" 34 #include "opt_pf.h" 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/refcount.h> 42 #include <sys/socket.h> 43 44 #include <net/if.h> 45 #include <net/vnet.h> 46 #include <net/pfvar.h> 47 #include <net/if_pflog.h> 48 49 #include <netinet/in.h> 50 #include <netinet/ip.h> 51 #include <netinet/ip_var.h> 52 #include <netinet6/ip6_var.h> 53 #include <netinet6/scope6_var.h> 54 #include <netinet/tcp.h> 55 #include <netinet/tcp_fsm.h> 56 #include <netinet/tcp_seq.h> 57 #include <netinet/sctp_constants.h> 58 #include <netinet/sctp_header.h> 59 60 #ifdef INET6 61 #include <netinet/ip6.h> 62 #endif /* INET6 */ 63 64 struct pf_frent { 65 TAILQ_ENTRY(pf_frent) fr_next; 66 struct mbuf *fe_m; 67 uint16_t fe_hdrlen; /* ipv4 header length with ip options 68 ipv6, extension, fragment header */ 69 uint16_t fe_extoff; /* last extension header offset or 0 */ 70 uint16_t fe_len; /* fragment length */ 71 uint16_t fe_off; /* fragment offset */ 72 uint16_t fe_mff; /* more fragment flag */ 73 }; 74 75 struct pf_fragment_cmp { 76 struct pf_addr frc_src; 77 struct pf_addr frc_dst; 78 uint32_t frc_id; 79 sa_family_t frc_af; 80 uint8_t frc_proto; 81 }; 82 83 struct pf_fragment { 84 struct pf_fragment_cmp fr_key; 85 #define fr_src fr_key.frc_src 86 #define fr_dst fr_key.frc_dst 87 #define fr_id fr_key.frc_id 88 #define fr_af fr_key.frc_af 89 #define fr_proto fr_key.frc_proto 90 91 /* pointers to queue element */ 92 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; 93 /* count entries between pointers */ 94 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; 95 RB_ENTRY(pf_fragment) fr_entry; 96 TAILQ_ENTRY(pf_fragment) frag_next; 97 uint32_t fr_timeout; 98 uint16_t fr_maxlen; /* maximum length of single fragment */ 99 u_int16_t fr_holes; /* number of holes in the queue */ 100 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 101 }; 102 103 struct pf_fragment_tag { 104 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 105 uint16_t ft_extoff; /* last extension header offset or 0 */ 106 uint16_t ft_maxlen; /* maximum fragment payload length */ 107 uint32_t ft_id; /* fragment id */ 108 }; 109 110 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx); 111 #define V_pf_frag_mtx VNET(pf_frag_mtx) 112 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx) 113 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx) 114 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED) 115 116 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 117 118 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z); 119 #define V_pf_frent_z VNET(pf_frent_z) 120 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z); 121 #define V_pf_frag_z VNET(pf_frag_z) 122 123 TAILQ_HEAD(pf_fragqueue, pf_fragment); 124 TAILQ_HEAD(pf_cachequeue, pf_fragment); 125 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue); 126 #define V_pf_fragqueue VNET(pf_fragqueue) 127 RB_HEAD(pf_frag_tree, pf_fragment); 128 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree); 129 #define V_pf_frag_tree VNET(pf_frag_tree) 130 static int pf_frag_compare(struct pf_fragment *, 131 struct pf_fragment *); 132 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 133 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 134 135 static void pf_flush_fragments(void); 136 static void pf_free_fragment(struct pf_fragment *); 137 static void pf_remove_fragment(struct pf_fragment *); 138 139 static struct pf_frent *pf_create_fragment(u_short *); 140 static int pf_frent_holes(struct pf_frent *frent); 141 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 142 struct pf_frag_tree *tree); 143 static inline int pf_frent_index(struct pf_frent *); 144 static int pf_frent_insert(struct pf_fragment *, 145 struct pf_frent *, struct pf_frent *); 146 void pf_frent_remove(struct pf_fragment *, 147 struct pf_frent *); 148 struct pf_frent *pf_frent_previous(struct pf_fragment *, 149 struct pf_frent *); 150 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 151 struct pf_frent *, u_short *); 152 static struct mbuf *pf_join_fragment(struct pf_fragment *); 153 #ifdef INET 154 static int pf_reassemble(struct mbuf **, int, u_short *); 155 #endif /* INET */ 156 #ifdef INET6 157 static int pf_reassemble6(struct mbuf **, 158 struct ip6_frag *, uint16_t, uint16_t, u_short *); 159 #endif /* INET6 */ 160 161 #define DPFPRINTF(x) do { \ 162 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 163 printf("%s: ", __func__); \ 164 printf x ; \ 165 } \ 166 } while(0) 167 168 #ifdef INET 169 static void 170 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 171 { 172 173 key->frc_src.v4 = ip->ip_src; 174 key->frc_dst.v4 = ip->ip_dst; 175 key->frc_af = AF_INET; 176 key->frc_proto = ip->ip_p; 177 key->frc_id = ip->ip_id; 178 } 179 #endif /* INET */ 180 181 void 182 pf_normalize_init(void) 183 { 184 185 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 186 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 187 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 189 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 190 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 191 UMA_ALIGN_PTR, 0); 192 193 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 194 195 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 196 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 197 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 198 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 199 200 TAILQ_INIT(&V_pf_fragqueue); 201 } 202 203 void 204 pf_normalize_cleanup(void) 205 { 206 207 uma_zdestroy(V_pf_state_scrub_z); 208 uma_zdestroy(V_pf_frent_z); 209 uma_zdestroy(V_pf_frag_z); 210 211 mtx_destroy(&V_pf_frag_mtx); 212 } 213 214 static int 215 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 216 { 217 int diff; 218 219 if ((diff = a->fr_id - b->fr_id) != 0) 220 return (diff); 221 if ((diff = a->fr_proto - b->fr_proto) != 0) 222 return (diff); 223 if ((diff = a->fr_af - b->fr_af) != 0) 224 return (diff); 225 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 226 return (diff); 227 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 228 return (diff); 229 return (0); 230 } 231 232 void 233 pf_purge_expired_fragments(void) 234 { 235 u_int32_t expire = time_uptime - 236 V_pf_default_rule.timeout[PFTM_FRAG]; 237 238 pf_purge_fragments(expire); 239 } 240 241 void 242 pf_purge_fragments(uint32_t expire) 243 { 244 struct pf_fragment *frag; 245 246 PF_FRAG_LOCK(); 247 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 248 if (frag->fr_timeout > expire) 249 break; 250 251 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 252 pf_free_fragment(frag); 253 } 254 255 PF_FRAG_UNLOCK(); 256 } 257 258 /* 259 * Try to flush old fragments to make space for new ones 260 */ 261 static void 262 pf_flush_fragments(void) 263 { 264 struct pf_fragment *frag; 265 int goal; 266 267 PF_FRAG_ASSERT(); 268 269 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 270 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 271 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 272 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 273 if (frag) 274 pf_free_fragment(frag); 275 else 276 break; 277 } 278 } 279 280 /* Frees the fragments and all associated entries */ 281 static void 282 pf_free_fragment(struct pf_fragment *frag) 283 { 284 struct pf_frent *frent; 285 286 PF_FRAG_ASSERT(); 287 288 /* Free all fragments */ 289 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 290 frent = TAILQ_FIRST(&frag->fr_queue)) { 291 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 292 293 m_freem(frent->fe_m); 294 uma_zfree(V_pf_frent_z, frent); 295 } 296 297 pf_remove_fragment(frag); 298 } 299 300 static struct pf_fragment * 301 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 302 { 303 struct pf_fragment *frag; 304 305 PF_FRAG_ASSERT(); 306 307 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 308 if (frag != NULL) { 309 /* XXX Are we sure we want to update the timeout? */ 310 frag->fr_timeout = time_uptime; 311 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 312 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 313 } 314 315 return (frag); 316 } 317 318 /* Removes a fragment from the fragment queue and frees the fragment */ 319 static void 320 pf_remove_fragment(struct pf_fragment *frag) 321 { 322 323 PF_FRAG_ASSERT(); 324 KASSERT(frag, ("frag != NULL")); 325 326 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 327 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 328 uma_zfree(V_pf_frag_z, frag); 329 } 330 331 static struct pf_frent * 332 pf_create_fragment(u_short *reason) 333 { 334 struct pf_frent *frent; 335 336 PF_FRAG_ASSERT(); 337 338 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 339 if (frent == NULL) { 340 pf_flush_fragments(); 341 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 342 if (frent == NULL) { 343 REASON_SET(reason, PFRES_MEMORY); 344 return (NULL); 345 } 346 } 347 348 return (frent); 349 } 350 351 /* 352 * Calculate the additional holes that were created in the fragment 353 * queue by inserting this fragment. A fragment in the middle 354 * creates one more hole by splitting. For each connected side, 355 * it loses one hole. 356 * Fragment entry must be in the queue when calling this function. 357 */ 358 static int 359 pf_frent_holes(struct pf_frent *frent) 360 { 361 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 362 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 363 int holes = 1; 364 365 if (prev == NULL) { 366 if (frent->fe_off == 0) 367 holes--; 368 } else { 369 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); 370 if (frent->fe_off == prev->fe_off + prev->fe_len) 371 holes--; 372 } 373 if (next == NULL) { 374 if (!frent->fe_mff) 375 holes--; 376 } else { 377 KASSERT(frent->fe_mff, ("frent->fe_mff")); 378 if (next->fe_off == frent->fe_off + frent->fe_len) 379 holes--; 380 } 381 return holes; 382 } 383 384 static inline int 385 pf_frent_index(struct pf_frent *frent) 386 { 387 /* 388 * We have an array of 16 entry points to the queue. A full size 389 * 65535 octet IP packet can have 8192 fragments. So the queue 390 * traversal length is at most 512 and at most 16 entry points are 391 * checked. We need 128 additional bytes on a 64 bit architecture. 392 */ 393 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) == 394 16 - 1); 395 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); 396 397 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); 398 } 399 400 static int 401 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, 402 struct pf_frent *prev) 403 { 404 int index; 405 406 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); 407 408 /* 409 * A packet has at most 65536 octets. With 16 entry points, each one 410 * spawns 4096 octets. We limit these to 64 fragments each, which 411 * means on average every fragment must have at least 64 octets. 412 */ 413 index = pf_frent_index(frent); 414 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) 415 return ENOBUFS; 416 frag->fr_entries[index]++; 417 418 if (prev == NULL) { 419 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 420 } else { 421 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 422 ("overlapping fragment")); 423 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 424 } 425 426 if (frag->fr_firstoff[index] == NULL) { 427 KASSERT(prev == NULL || pf_frent_index(prev) < index, 428 ("prev == NULL || pf_frent_index(pref) < index")); 429 frag->fr_firstoff[index] = frent; 430 } else { 431 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { 432 KASSERT(prev == NULL || pf_frent_index(prev) < index, 433 ("prev == NULL || pf_frent_index(pref) < index")); 434 frag->fr_firstoff[index] = frent; 435 } else { 436 KASSERT(prev != NULL, ("prev != NULL")); 437 KASSERT(pf_frent_index(prev) == index, 438 ("pf_frent_index(prev) == index")); 439 } 440 } 441 442 frag->fr_holes += pf_frent_holes(frent); 443 444 return 0; 445 } 446 447 void 448 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent) 449 { 450 #ifdef INVARIANTS 451 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 452 #endif 453 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 454 int index; 455 456 frag->fr_holes -= pf_frent_holes(frent); 457 458 index = pf_frent_index(frent); 459 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); 460 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { 461 if (next == NULL) { 462 frag->fr_firstoff[index] = NULL; 463 } else { 464 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, 465 ("overlapping fragment")); 466 if (pf_frent_index(next) == index) { 467 frag->fr_firstoff[index] = next; 468 } else { 469 frag->fr_firstoff[index] = NULL; 470 } 471 } 472 } else { 473 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, 474 ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); 475 KASSERT(prev != NULL, ("prev != NULL")); 476 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 477 ("overlapping fragment")); 478 KASSERT(pf_frent_index(prev) == index, 479 ("pf_frent_index(prev) == index")); 480 } 481 482 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 483 484 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); 485 frag->fr_entries[index]--; 486 } 487 488 struct pf_frent * 489 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent) 490 { 491 struct pf_frent *prev, *next; 492 int index; 493 494 /* 495 * If there are no fragments after frag, take the final one. Assume 496 * that the global queue is not empty. 497 */ 498 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); 499 KASSERT(prev != NULL, ("prev != NULL")); 500 if (prev->fe_off <= frent->fe_off) 501 return prev; 502 /* 503 * We want to find a fragment entry that is before frag, but still 504 * close to it. Find the first fragment entry that is in the same 505 * entry point or in the first entry point after that. As we have 506 * already checked that there are entries behind frag, this will 507 * succeed. 508 */ 509 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS; 510 index++) { 511 prev = frag->fr_firstoff[index]; 512 if (prev != NULL) 513 break; 514 } 515 KASSERT(prev != NULL, ("prev != NULL")); 516 /* 517 * In prev we may have a fragment from the same entry point that is 518 * before frent, or one that is just one position behind frent. 519 * In the latter case, we go back one step and have the predecessor. 520 * There may be none if the new fragment will be the first one. 521 */ 522 if (prev->fe_off > frent->fe_off) { 523 prev = TAILQ_PREV(prev, pf_fragq, fr_next); 524 if (prev == NULL) 525 return NULL; 526 KASSERT(prev->fe_off <= frent->fe_off, 527 ("prev->fe_off <= frent->fe_off")); 528 return prev; 529 } 530 /* 531 * In prev is the first fragment of the entry point. The offset 532 * of frag is behind it. Find the closest previous fragment. 533 */ 534 for (next = TAILQ_NEXT(prev, fr_next); next != NULL; 535 next = TAILQ_NEXT(next, fr_next)) { 536 if (next->fe_off > frent->fe_off) 537 break; 538 prev = next; 539 } 540 return prev; 541 } 542 543 static struct pf_fragment * 544 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 545 u_short *reason) 546 { 547 struct pf_frent *after, *next, *prev; 548 struct pf_fragment *frag; 549 uint16_t total; 550 int old_index, new_index; 551 552 PF_FRAG_ASSERT(); 553 554 /* No empty fragments. */ 555 if (frent->fe_len == 0) { 556 DPFPRINTF(("bad fragment: len 0\n")); 557 goto bad_fragment; 558 } 559 560 /* All fragments are 8 byte aligned. */ 561 if (frent->fe_mff && (frent->fe_len & 0x7)) { 562 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); 563 goto bad_fragment; 564 } 565 566 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 567 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 568 DPFPRINTF(("bad fragment: max packet %d\n", 569 frent->fe_off + frent->fe_len)); 570 goto bad_fragment; 571 } 572 573 DPFPRINTF((key->frc_af == AF_INET ? 574 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", 575 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 576 577 /* Fully buffer all of the fragments in this fragment queue. */ 578 frag = pf_find_fragment(key, &V_pf_frag_tree); 579 580 /* Create a new reassembly queue for this packet. */ 581 if (frag == NULL) { 582 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 583 if (frag == NULL) { 584 pf_flush_fragments(); 585 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 586 if (frag == NULL) { 587 REASON_SET(reason, PFRES_MEMORY); 588 goto drop_fragment; 589 } 590 } 591 592 *(struct pf_fragment_cmp *)frag = *key; 593 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); 594 memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); 595 frag->fr_timeout = time_uptime; 596 frag->fr_maxlen = frent->fe_len; 597 frag->fr_holes = 1; 598 TAILQ_INIT(&frag->fr_queue); 599 600 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 601 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 602 603 /* We do not have a previous fragment, cannot fail. */ 604 pf_frent_insert(frag, frent, NULL); 605 606 return (frag); 607 } 608 609 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 610 611 /* Remember maximum fragment len for refragmentation. */ 612 if (frent->fe_len > frag->fr_maxlen) 613 frag->fr_maxlen = frent->fe_len; 614 615 /* Maximum data we have seen already. */ 616 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 617 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 618 619 /* Non terminal fragments must have more fragments flag. */ 620 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 621 goto bad_fragment; 622 623 /* Check if we saw the last fragment already. */ 624 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 625 if (frent->fe_off + frent->fe_len > total || 626 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 627 goto bad_fragment; 628 } else { 629 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 630 goto bad_fragment; 631 } 632 633 /* Find neighbors for newly inserted fragment */ 634 prev = pf_frent_previous(frag, frent); 635 if (prev == NULL) { 636 after = TAILQ_FIRST(&frag->fr_queue); 637 KASSERT(after != NULL, ("after != NULL")); 638 } else { 639 after = TAILQ_NEXT(prev, fr_next); 640 } 641 642 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 643 uint16_t precut; 644 645 precut = prev->fe_off + prev->fe_len - frent->fe_off; 646 if (precut >= frent->fe_len) 647 goto bad_fragment; 648 DPFPRINTF(("overlap -%d\n", precut)); 649 m_adj(frent->fe_m, precut); 650 frent->fe_off += precut; 651 frent->fe_len -= precut; 652 } 653 654 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 655 after = next) { 656 uint16_t aftercut; 657 658 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 659 DPFPRINTF(("adjust overlap %d\n", aftercut)); 660 if (aftercut < after->fe_len) { 661 m_adj(after->fe_m, aftercut); 662 old_index = pf_frent_index(after); 663 after->fe_off += aftercut; 664 after->fe_len -= aftercut; 665 new_index = pf_frent_index(after); 666 if (old_index != new_index) { 667 DPFPRINTF(("frag index %d, new %d", 668 old_index, new_index)); 669 /* Fragment switched queue as fe_off changed */ 670 after->fe_off -= aftercut; 671 after->fe_len += aftercut; 672 /* Remove restored fragment from old queue */ 673 pf_frent_remove(frag, after); 674 after->fe_off += aftercut; 675 after->fe_len -= aftercut; 676 /* Insert into correct queue */ 677 if (pf_frent_insert(frag, after, prev)) { 678 DPFPRINTF( 679 ("fragment requeue limit exceeded")); 680 m_freem(after->fe_m); 681 uma_zfree(V_pf_frent_z, after); 682 /* There is not way to recover */ 683 goto bad_fragment; 684 } 685 } 686 break; 687 } 688 689 /* This fragment is completely overlapped, lose it. */ 690 next = TAILQ_NEXT(after, fr_next); 691 pf_frent_remove(frag, after); 692 m_freem(after->fe_m); 693 uma_zfree(V_pf_frent_z, after); 694 } 695 696 /* If part of the queue gets too long, there is not way to recover. */ 697 if (pf_frent_insert(frag, frent, prev)) { 698 DPFPRINTF(("fragment queue limit exceeded\n")); 699 goto bad_fragment; 700 } 701 702 return (frag); 703 704 bad_fragment: 705 REASON_SET(reason, PFRES_FRAG); 706 drop_fragment: 707 uma_zfree(V_pf_frent_z, frent); 708 return (NULL); 709 } 710 711 static struct mbuf * 712 pf_join_fragment(struct pf_fragment *frag) 713 { 714 struct mbuf *m, *m2; 715 struct pf_frent *frent, *next; 716 717 frent = TAILQ_FIRST(&frag->fr_queue); 718 next = TAILQ_NEXT(frent, fr_next); 719 720 m = frent->fe_m; 721 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 722 uma_zfree(V_pf_frent_z, frent); 723 for (frent = next; frent != NULL; frent = next) { 724 next = TAILQ_NEXT(frent, fr_next); 725 726 m2 = frent->fe_m; 727 /* Strip off ip header. */ 728 m_adj(m2, frent->fe_hdrlen); 729 /* Strip off any trailing bytes. */ 730 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 731 732 uma_zfree(V_pf_frent_z, frent); 733 m_cat(m, m2); 734 } 735 736 /* Remove from fragment queue. */ 737 pf_remove_fragment(frag); 738 739 return (m); 740 } 741 742 #ifdef INET 743 static int 744 pf_reassemble(struct mbuf **m0, int dir, u_short *reason) 745 { 746 struct mbuf *m = *m0; 747 struct ip *ip = mtod(m, struct ip *); 748 struct pf_frent *frent; 749 struct pf_fragment *frag; 750 struct pf_fragment_cmp key; 751 uint16_t total, hdrlen; 752 753 /* Get an entry for the fragment queue */ 754 if ((frent = pf_create_fragment(reason)) == NULL) 755 return (PF_DROP); 756 757 frent->fe_m = m; 758 frent->fe_hdrlen = ip->ip_hl << 2; 759 frent->fe_extoff = 0; 760 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 761 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 762 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 763 764 pf_ip2key(ip, dir, &key); 765 766 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 767 return (PF_DROP); 768 769 /* The mbuf is part of the fragment entry, no direct free or access */ 770 m = *m0 = NULL; 771 772 if (frag->fr_holes) { 773 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); 774 return (PF_PASS); /* drop because *m0 is NULL, no error */ 775 } 776 777 /* We have all the data */ 778 frent = TAILQ_FIRST(&frag->fr_queue); 779 KASSERT(frent != NULL, ("frent != NULL")); 780 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 781 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 782 hdrlen = frent->fe_hdrlen; 783 784 m = *m0 = pf_join_fragment(frag); 785 frag = NULL; 786 787 if (m->m_flags & M_PKTHDR) { 788 int plen = 0; 789 for (m = *m0; m; m = m->m_next) 790 plen += m->m_len; 791 m = *m0; 792 m->m_pkthdr.len = plen; 793 } 794 795 ip = mtod(m, struct ip *); 796 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, 797 htons(hdrlen + total), 0); 798 ip->ip_len = htons(hdrlen + total); 799 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, 800 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); 801 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 802 803 if (hdrlen + total > IP_MAXPACKET) { 804 DPFPRINTF(("drop: too big: %d\n", total)); 805 ip->ip_len = 0; 806 REASON_SET(reason, PFRES_SHORT); 807 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 808 return (PF_DROP); 809 } 810 811 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 812 return (PF_PASS); 813 } 814 #endif /* INET */ 815 816 #ifdef INET6 817 static int 818 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr, 819 uint16_t hdrlen, uint16_t extoff, u_short *reason) 820 { 821 struct mbuf *m = *m0; 822 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 823 struct pf_frent *frent; 824 struct pf_fragment *frag; 825 struct pf_fragment_cmp key; 826 struct m_tag *mtag; 827 struct pf_fragment_tag *ftag; 828 int off; 829 uint32_t frag_id; 830 uint16_t total, maxlen; 831 uint8_t proto; 832 833 PF_FRAG_LOCK(); 834 835 /* Get an entry for the fragment queue. */ 836 if ((frent = pf_create_fragment(reason)) == NULL) { 837 PF_FRAG_UNLOCK(); 838 return (PF_DROP); 839 } 840 841 frent->fe_m = m; 842 frent->fe_hdrlen = hdrlen; 843 frent->fe_extoff = extoff; 844 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 845 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 846 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 847 848 key.frc_src.v6 = ip6->ip6_src; 849 key.frc_dst.v6 = ip6->ip6_dst; 850 key.frc_af = AF_INET6; 851 /* Only the first fragment's protocol is relevant. */ 852 key.frc_proto = 0; 853 key.frc_id = fraghdr->ip6f_ident; 854 855 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 856 PF_FRAG_UNLOCK(); 857 return (PF_DROP); 858 } 859 860 /* The mbuf is part of the fragment entry, no direct free or access. */ 861 m = *m0 = NULL; 862 863 if (frag->fr_holes) { 864 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, 865 frag->fr_holes)); 866 PF_FRAG_UNLOCK(); 867 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 868 } 869 870 /* We have all the data. */ 871 frent = TAILQ_FIRST(&frag->fr_queue); 872 KASSERT(frent != NULL, ("frent != NULL")); 873 extoff = frent->fe_extoff; 874 maxlen = frag->fr_maxlen; 875 frag_id = frag->fr_id; 876 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 877 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 878 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 879 880 m = *m0 = pf_join_fragment(frag); 881 frag = NULL; 882 883 PF_FRAG_UNLOCK(); 884 885 /* Take protocol from first fragment header. */ 886 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 887 KASSERT(m, ("%s: short mbuf chain", __func__)); 888 proto = *(mtod(m, uint8_t *) + off); 889 m = *m0; 890 891 /* Delete frag6 header */ 892 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 893 goto fail; 894 895 if (m->m_flags & M_PKTHDR) { 896 int plen = 0; 897 for (m = *m0; m; m = m->m_next) 898 plen += m->m_len; 899 m = *m0; 900 m->m_pkthdr.len = plen; 901 } 902 903 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED, 904 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) 905 goto fail; 906 ftag = (struct pf_fragment_tag *)(mtag + 1); 907 ftag->ft_hdrlen = hdrlen; 908 ftag->ft_extoff = extoff; 909 ftag->ft_maxlen = maxlen; 910 ftag->ft_id = frag_id; 911 m_tag_prepend(m, mtag); 912 913 ip6 = mtod(m, struct ip6_hdr *); 914 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 915 if (extoff) { 916 /* Write protocol into next field of last extension header. */ 917 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 918 &off); 919 KASSERT(m, ("%s: short mbuf chain", __func__)); 920 *(mtod(m, char *) + off) = proto; 921 m = *m0; 922 } else 923 ip6->ip6_nxt = proto; 924 925 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 926 DPFPRINTF(("drop: too big: %d\n", total)); 927 ip6->ip6_plen = 0; 928 REASON_SET(reason, PFRES_SHORT); 929 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 930 return (PF_DROP); 931 } 932 933 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); 934 return (PF_PASS); 935 936 fail: 937 REASON_SET(reason, PFRES_MEMORY); 938 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 939 return (PF_DROP); 940 } 941 #endif /* INET6 */ 942 943 #ifdef INET6 944 int 945 pf_max_frag_size(struct mbuf *m) 946 { 947 struct m_tag *tag; 948 struct pf_fragment_tag *ftag; 949 950 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL); 951 if (tag == NULL) 952 return (m->m_pkthdr.len); 953 954 ftag = (struct pf_fragment_tag *)(tag + 1); 955 956 return (ftag->ft_maxlen); 957 } 958 959 int 960 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag, 961 bool forward) 962 { 963 struct mbuf *m = *m0, *t; 964 struct ip6_hdr *hdr; 965 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 966 struct pf_pdesc pd; 967 uint32_t frag_id; 968 uint16_t hdrlen, extoff, maxlen; 969 uint8_t proto; 970 int error, action; 971 972 hdrlen = ftag->ft_hdrlen; 973 extoff = ftag->ft_extoff; 974 maxlen = ftag->ft_maxlen; 975 frag_id = ftag->ft_id; 976 m_tag_delete(m, mtag); 977 mtag = NULL; 978 ftag = NULL; 979 980 if (extoff) { 981 int off; 982 983 /* Use protocol from next field of last extension header */ 984 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 985 &off); 986 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 987 proto = *(mtod(m, uint8_t *) + off); 988 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 989 m = *m0; 990 } else { 991 hdr = mtod(m, struct ip6_hdr *); 992 proto = hdr->ip6_nxt; 993 hdr->ip6_nxt = IPPROTO_FRAGMENT; 994 } 995 996 /* In case of link-local traffic we'll need a scope set. */ 997 hdr = mtod(m, struct ip6_hdr *); 998 999 in6_setscope(&hdr->ip6_src, ifp, NULL); 1000 in6_setscope(&hdr->ip6_dst, ifp, NULL); 1001 1002 /* The MTU must be a multiple of 8 bytes, or we risk doing the 1003 * fragmentation wrong. */ 1004 maxlen = maxlen & ~7; 1005 1006 /* 1007 * Maxlen may be less than 8 if there was only a single 1008 * fragment. As it was fragmented before, add a fragment 1009 * header also for a single fragment. If total or maxlen 1010 * is less than 8, ip6_fragment() will return EMSGSIZE and 1011 * we drop the packet. 1012 */ 1013 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 1014 m = (*m0)->m_nextpkt; 1015 (*m0)->m_nextpkt = NULL; 1016 if (error == 0) { 1017 /* The first mbuf contains the unfragmented packet. */ 1018 m_freem(*m0); 1019 *m0 = NULL; 1020 action = PF_PASS; 1021 } else { 1022 /* Drop expects an mbuf to free. */ 1023 DPFPRINTF(("refragment error %d\n", error)); 1024 action = PF_DROP; 1025 } 1026 for (; m; m = t) { 1027 t = m->m_nextpkt; 1028 m->m_nextpkt = NULL; 1029 m->m_flags |= M_SKIP_FIREWALL; 1030 memset(&pd, 0, sizeof(pd)); 1031 pd.pf_mtag = pf_find_mtag(m); 1032 if (error == 0) 1033 if (forward) { 1034 MPASS(m->m_pkthdr.rcvif != NULL); 1035 ip6_forward(m, 0); 1036 } else { 1037 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, 1038 NULL); 1039 } 1040 else 1041 m_freem(m); 1042 } 1043 1044 return (action); 1045 } 1046 #endif /* INET6 */ 1047 1048 #ifdef INET 1049 int 1050 pf_normalize_ip(struct mbuf **m0, u_short *reason, 1051 struct pf_pdesc *pd) 1052 { 1053 struct pf_krule *r; 1054 struct ip *h = mtod(*m0, struct ip *); 1055 int mff = (ntohs(h->ip_off) & IP_MF); 1056 int hlen = h->ip_hl << 2; 1057 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 1058 u_int16_t max; 1059 int ip_len; 1060 int tag = -1; 1061 int verdict; 1062 bool scrub_compat; 1063 1064 PF_RULES_RASSERT(); 1065 1066 MPASS(pd->m == *m0); 1067 1068 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1069 /* 1070 * Check if there are any scrub rules, matching or not. 1071 * Lack of scrub rules means: 1072 * - enforced packet normalization operation just like in OpenBSD 1073 * - fragment reassembly depends on V_pf_status.reass 1074 * With scrub rules: 1075 * - packet normalization is performed if there is a matching scrub rule 1076 * - fragment reassembly is performed if the matching rule has no 1077 * PFRULE_FRAGMENT_NOREASS flag 1078 */ 1079 scrub_compat = (r != NULL); 1080 while (r != NULL) { 1081 pf_counter_u64_add(&r->evaluations, 1); 1082 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1083 r = r->skip[PF_SKIP_IFP]; 1084 else if (r->direction && r->direction != pd->dir) 1085 r = r->skip[PF_SKIP_DIR]; 1086 else if (r->af && r->af != AF_INET) 1087 r = r->skip[PF_SKIP_AF]; 1088 else if (r->proto && r->proto != h->ip_p) 1089 r = r->skip[PF_SKIP_PROTO]; 1090 else if (PF_MISMATCHAW(&r->src.addr, 1091 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 1092 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1093 r = r->skip[PF_SKIP_SRC_ADDR]; 1094 else if (PF_MISMATCHAW(&r->dst.addr, 1095 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 1096 r->dst.neg, NULL, M_GETFIB(pd->m))) 1097 r = r->skip[PF_SKIP_DST_ADDR]; 1098 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag, 1099 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 1100 r = TAILQ_NEXT(r, entries); 1101 else 1102 break; 1103 } 1104 1105 if (scrub_compat) { 1106 /* With scrub rules present IPv4 normalization happens only 1107 * if one of rules has matched and it's not a "no scrub" rule */ 1108 if (r == NULL || r->action == PF_NOSCRUB) 1109 return (PF_PASS); 1110 1111 pf_counter_u64_critical_enter(); 1112 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1113 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1114 pf_counter_u64_critical_exit(); 1115 pf_rule_to_actions(r, &pd->act); 1116 } 1117 1118 /* Check for illegal packets */ 1119 if (hlen < (int)sizeof(struct ip)) { 1120 REASON_SET(reason, PFRES_NORM); 1121 goto drop; 1122 } 1123 1124 if (hlen > ntohs(h->ip_len)) { 1125 REASON_SET(reason, PFRES_NORM); 1126 goto drop; 1127 } 1128 1129 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */ 1130 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) || 1131 (r != NULL && r->rule_flag & PFRULE_NODF)) && 1132 (h->ip_off & htons(IP_DF)) 1133 ) { 1134 u_int16_t ip_off = h->ip_off; 1135 1136 h->ip_off &= htons(~IP_DF); 1137 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1138 } 1139 1140 /* We will need other tests here */ 1141 if (!fragoff && !mff) 1142 goto no_fragment; 1143 1144 /* We're dealing with a fragment now. Don't allow fragments 1145 * with IP_DF to enter the cache. If the flag was cleared by 1146 * no-df above, fine. Otherwise drop it. 1147 */ 1148 if (h->ip_off & htons(IP_DF)) { 1149 DPFPRINTF(("IP_DF\n")); 1150 goto bad; 1151 } 1152 1153 ip_len = ntohs(h->ip_len) - hlen; 1154 1155 /* All fragments are 8 byte aligned */ 1156 if (mff && (ip_len & 0x7)) { 1157 DPFPRINTF(("mff and %d\n", ip_len)); 1158 goto bad; 1159 } 1160 1161 /* Respect maximum length */ 1162 if (fragoff + ip_len > IP_MAXPACKET) { 1163 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 1164 goto bad; 1165 } 1166 1167 if ((!scrub_compat && V_pf_status.reass) || 1168 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) 1169 ) { 1170 max = fragoff + ip_len; 1171 1172 /* Fully buffer all of the fragments 1173 * Might return a completely reassembled mbuf, or NULL */ 1174 PF_FRAG_LOCK(); 1175 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 1176 verdict = pf_reassemble(m0, pd->dir, reason); 1177 PF_FRAG_UNLOCK(); 1178 1179 if (verdict != PF_PASS) 1180 return (PF_DROP); 1181 1182 pd->m = *m0; 1183 if (pd->m == NULL) 1184 return (PF_DROP); 1185 1186 h = mtod(pd->m, struct ip *); 1187 1188 no_fragment: 1189 /* At this point, only IP_DF is allowed in ip_off */ 1190 if (h->ip_off & ~htons(IP_DF)) { 1191 u_int16_t ip_off = h->ip_off; 1192 1193 h->ip_off &= htons(IP_DF); 1194 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1195 } 1196 } 1197 1198 return (PF_PASS); 1199 1200 bad: 1201 DPFPRINTF(("dropping bad fragment\n")); 1202 REASON_SET(reason, PFRES_FRAG); 1203 drop: 1204 if (r != NULL && r->log) 1205 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1); 1206 1207 return (PF_DROP); 1208 } 1209 #endif 1210 1211 #ifdef INET6 1212 int 1213 pf_normalize_ip6(struct mbuf **m0, int off, u_short *reason, 1214 struct pf_pdesc *pd) 1215 { 1216 struct pf_krule *r; 1217 struct ip6_frag frag; 1218 bool scrub_compat; 1219 1220 PF_RULES_RASSERT(); 1221 1222 pd->m = *m0; 1223 1224 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1225 /* 1226 * Check if there are any scrub rules, matching or not. 1227 * Lack of scrub rules means: 1228 * - enforced packet normalization operation just like in OpenBSD 1229 * With scrub rules: 1230 * - packet normalization is performed if there is a matching scrub rule 1231 * XXX: Fragment reassembly always performed for IPv6! 1232 */ 1233 scrub_compat = (r != NULL); 1234 while (r != NULL) { 1235 pf_counter_u64_add(&r->evaluations, 1); 1236 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1237 r = r->skip[PF_SKIP_IFP]; 1238 else if (r->direction && r->direction != pd->dir) 1239 r = r->skip[PF_SKIP_DIR]; 1240 else if (r->af && r->af != AF_INET6) 1241 r = r->skip[PF_SKIP_AF]; 1242 else if (r->proto && r->proto != pd->proto) 1243 r = r->skip[PF_SKIP_PROTO]; 1244 else if (PF_MISMATCHAW(&r->src.addr, 1245 (struct pf_addr *)&pd->src, AF_INET6, 1246 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1247 r = r->skip[PF_SKIP_SRC_ADDR]; 1248 else if (PF_MISMATCHAW(&r->dst.addr, 1249 (struct pf_addr *)&pd->dst, AF_INET6, 1250 r->dst.neg, NULL, M_GETFIB(pd->m))) 1251 r = r->skip[PF_SKIP_DST_ADDR]; 1252 else 1253 break; 1254 } 1255 1256 if (scrub_compat) { 1257 /* With scrub rules present IPv6 normalization happens only 1258 * if one of rules has matched and it's not a "no scrub" rule */ 1259 if (r == NULL || r->action == PF_NOSCRUB) 1260 return (PF_PASS); 1261 1262 pf_counter_u64_critical_enter(); 1263 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1264 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1265 pf_counter_u64_critical_exit(); 1266 pf_rule_to_actions(r, &pd->act); 1267 } 1268 1269 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6)) 1270 return (PF_DROP); 1271 1272 /* Offset now points to data portion. */ 1273 off += sizeof(frag); 1274 1275 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) { 1276 /* Returns PF_DROP or *m0 is NULL or completely reassembled 1277 * mbuf. */ 1278 if (pf_reassemble6(m0, &frag, off, pd->extoff, reason) != PF_PASS) 1279 return (PF_DROP); 1280 pd->m = *m0; 1281 if (pd->m == NULL) 1282 return (PF_DROP); 1283 } 1284 1285 return (PF_PASS); 1286 } 1287 #endif /* INET6 */ 1288 1289 int 1290 pf_normalize_tcp(struct pf_pdesc *pd) 1291 { 1292 struct pf_krule *r, *rm = NULL; 1293 struct tcphdr *th = &pd->hdr.tcp; 1294 int rewrite = 0; 1295 u_short reason; 1296 u_int16_t flags; 1297 sa_family_t af = pd->af; 1298 int srs; 1299 1300 PF_RULES_RASSERT(); 1301 1302 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1303 /* Check if there any scrub rules. Lack of scrub rules means enforced 1304 * packet normalization operation just like in OpenBSD. */ 1305 srs = (r != NULL); 1306 while (r != NULL) { 1307 pf_counter_u64_add(&r->evaluations, 1); 1308 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1309 r = r->skip[PF_SKIP_IFP]; 1310 else if (r->direction && r->direction != pd->dir) 1311 r = r->skip[PF_SKIP_DIR]; 1312 else if (r->af && r->af != af) 1313 r = r->skip[PF_SKIP_AF]; 1314 else if (r->proto && r->proto != pd->proto) 1315 r = r->skip[PF_SKIP_PROTO]; 1316 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1317 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1318 r = r->skip[PF_SKIP_SRC_ADDR]; 1319 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1320 r->src.port[0], r->src.port[1], th->th_sport)) 1321 r = r->skip[PF_SKIP_SRC_PORT]; 1322 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1323 r->dst.neg, NULL, M_GETFIB(pd->m))) 1324 r = r->skip[PF_SKIP_DST_ADDR]; 1325 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1326 r->dst.port[0], r->dst.port[1], th->th_dport)) 1327 r = r->skip[PF_SKIP_DST_PORT]; 1328 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1329 pf_osfp_fingerprint(pd, th), 1330 r->os_fingerprint)) 1331 r = TAILQ_NEXT(r, entries); 1332 else { 1333 rm = r; 1334 break; 1335 } 1336 } 1337 1338 if (srs) { 1339 /* With scrub rules present TCP normalization happens only 1340 * if one of rules has matched and it's not a "no scrub" rule */ 1341 if (rm == NULL || rm->action == PF_NOSCRUB) 1342 return (PF_PASS); 1343 1344 pf_counter_u64_critical_enter(); 1345 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1346 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1347 pf_counter_u64_critical_exit(); 1348 pf_rule_to_actions(rm, &pd->act); 1349 } 1350 1351 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1352 pd->flags |= PFDESC_TCP_NORM; 1353 1354 flags = tcp_get_flags(th); 1355 if (flags & TH_SYN) { 1356 /* Illegal packet */ 1357 if (flags & TH_RST) 1358 goto tcp_drop; 1359 1360 if (flags & TH_FIN) 1361 goto tcp_drop; 1362 } else { 1363 /* Illegal packet */ 1364 if (!(flags & (TH_ACK|TH_RST))) 1365 goto tcp_drop; 1366 } 1367 1368 if (!(flags & TH_ACK)) { 1369 /* These flags are only valid if ACK is set */ 1370 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1371 goto tcp_drop; 1372 } 1373 1374 /* Check for illegal header length */ 1375 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1376 goto tcp_drop; 1377 1378 /* If flags changed, or reserved data set, then adjust */ 1379 if (flags != tcp_get_flags(th) || 1380 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) { 1381 u_int16_t ov, nv; 1382 1383 ov = *(u_int16_t *)(&th->th_ack + 1); 1384 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3); 1385 tcp_set_flags(th, flags); 1386 nv = *(u_int16_t *)(&th->th_ack + 1); 1387 1388 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0); 1389 rewrite = 1; 1390 } 1391 1392 /* Remove urgent pointer, if TH_URG is not set */ 1393 if (!(flags & TH_URG) && th->th_urp) { 1394 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp, 1395 0, 0); 1396 th->th_urp = 0; 1397 rewrite = 1; 1398 } 1399 1400 /* copy back packet headers if we sanitized */ 1401 if (rewrite) 1402 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); 1403 1404 return (PF_PASS); 1405 1406 tcp_drop: 1407 REASON_SET(&reason, PFRES_NORM); 1408 if (rm != NULL && r->log) 1409 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1); 1410 return (PF_DROP); 1411 } 1412 1413 int 1414 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th, 1415 struct pf_state_peer *src, struct pf_state_peer *dst) 1416 { 1417 u_int32_t tsval, tsecr; 1418 u_int8_t hdr[60]; 1419 u_int8_t *opt; 1420 1421 KASSERT((src->scrub == NULL), 1422 ("pf_normalize_tcp_init: src->scrub != NULL")); 1423 1424 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1425 if (src->scrub == NULL) 1426 return (1); 1427 1428 switch (pd->af) { 1429 #ifdef INET 1430 case AF_INET: { 1431 struct ip *h = mtod(pd->m, struct ip *); 1432 src->scrub->pfss_ttl = h->ip_ttl; 1433 break; 1434 } 1435 #endif /* INET */ 1436 #ifdef INET6 1437 case AF_INET6: { 1438 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); 1439 src->scrub->pfss_ttl = h->ip6_hlim; 1440 break; 1441 } 1442 #endif /* INET6 */ 1443 } 1444 1445 /* 1446 * All normalizations below are only begun if we see the start of 1447 * the connections. They must all set an enabled bit in pfss_flags 1448 */ 1449 if ((th->th_flags & TH_SYN) == 0) 1450 return (0); 1451 1452 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1453 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1454 /* Diddle with TCP options */ 1455 int hlen; 1456 opt = hdr + sizeof(struct tcphdr); 1457 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1458 while (hlen >= TCPOLEN_TIMESTAMP) { 1459 switch (*opt) { 1460 case TCPOPT_EOL: /* FALLTHROUGH */ 1461 case TCPOPT_NOP: 1462 opt++; 1463 hlen--; 1464 break; 1465 case TCPOPT_TIMESTAMP: 1466 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1467 src->scrub->pfss_flags |= 1468 PFSS_TIMESTAMP; 1469 src->scrub->pfss_ts_mod = 1470 htonl(arc4random()); 1471 1472 /* note PFSS_PAWS not set yet */ 1473 memcpy(&tsval, &opt[2], 1474 sizeof(u_int32_t)); 1475 memcpy(&tsecr, &opt[6], 1476 sizeof(u_int32_t)); 1477 src->scrub->pfss_tsval0 = ntohl(tsval); 1478 src->scrub->pfss_tsval = ntohl(tsval); 1479 src->scrub->pfss_tsecr = ntohl(tsecr); 1480 getmicrouptime(&src->scrub->pfss_last); 1481 } 1482 /* FALLTHROUGH */ 1483 default: 1484 hlen -= MAX(opt[1], 2); 1485 opt += MAX(opt[1], 2); 1486 break; 1487 } 1488 } 1489 } 1490 1491 return (0); 1492 } 1493 1494 void 1495 pf_normalize_tcp_cleanup(struct pf_kstate *state) 1496 { 1497 /* XXX Note: this also cleans up SCTP. */ 1498 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1499 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1500 1501 /* Someday... flush the TCP segment reassembly descriptors. */ 1502 } 1503 int 1504 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src, 1505 struct pf_state_peer *dst) 1506 { 1507 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1508 if (src->scrub == NULL) 1509 return (1); 1510 1511 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1512 if (dst->scrub == NULL) { 1513 uma_zfree(V_pf_state_scrub_z, src); 1514 return (1); 1515 } 1516 1517 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag; 1518 1519 return (0); 1520 } 1521 1522 int 1523 pf_normalize_tcp_stateful(struct pf_pdesc *pd, 1524 u_short *reason, struct tcphdr *th, struct pf_kstate *state, 1525 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1526 { 1527 struct timeval uptime; 1528 u_int32_t tsval, tsecr; 1529 u_int tsval_from_last; 1530 u_int8_t hdr[60]; 1531 u_int8_t *opt; 1532 int copyback = 0; 1533 int got_ts = 0; 1534 size_t startoff; 1535 1536 KASSERT((src->scrub || dst->scrub), 1537 ("%s: src->scrub && dst->scrub!", __func__)); 1538 1539 /* 1540 * Enforce the minimum TTL seen for this connection. Negate a common 1541 * technique to evade an intrusion detection system and confuse 1542 * firewall state code. 1543 */ 1544 switch (pd->af) { 1545 #ifdef INET 1546 case AF_INET: { 1547 if (src->scrub) { 1548 struct ip *h = mtod(pd->m, struct ip *); 1549 if (h->ip_ttl > src->scrub->pfss_ttl) 1550 src->scrub->pfss_ttl = h->ip_ttl; 1551 h->ip_ttl = src->scrub->pfss_ttl; 1552 } 1553 break; 1554 } 1555 #endif /* INET */ 1556 #ifdef INET6 1557 case AF_INET6: { 1558 if (src->scrub) { 1559 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); 1560 if (h->ip6_hlim > src->scrub->pfss_ttl) 1561 src->scrub->pfss_ttl = h->ip6_hlim; 1562 h->ip6_hlim = src->scrub->pfss_ttl; 1563 } 1564 break; 1565 } 1566 #endif /* INET6 */ 1567 } 1568 1569 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1570 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1571 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1572 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1573 /* Diddle with TCP options */ 1574 int hlen; 1575 opt = hdr + sizeof(struct tcphdr); 1576 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1577 while (hlen >= TCPOLEN_TIMESTAMP) { 1578 startoff = opt - (hdr + sizeof(struct tcphdr)); 1579 switch (*opt) { 1580 case TCPOPT_EOL: /* FALLTHROUGH */ 1581 case TCPOPT_NOP: 1582 opt++; 1583 hlen--; 1584 break; 1585 case TCPOPT_TIMESTAMP: 1586 /* Modulate the timestamps. Can be used for 1587 * NAT detection, OS uptime determination or 1588 * reboot detection. 1589 */ 1590 1591 if (got_ts) { 1592 /* Huh? Multiple timestamps!? */ 1593 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1594 DPFPRINTF(("multiple TS??\n")); 1595 pf_print_state(state); 1596 printf("\n"); 1597 } 1598 REASON_SET(reason, PFRES_TS); 1599 return (PF_DROP); 1600 } 1601 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1602 memcpy(&tsval, &opt[2], 1603 sizeof(u_int32_t)); 1604 if (tsval && src->scrub && 1605 (src->scrub->pfss_flags & 1606 PFSS_TIMESTAMP)) { 1607 tsval = ntohl(tsval); 1608 pf_patch_32_unaligned(pd->m, 1609 &th->th_sum, 1610 &opt[2], 1611 htonl(tsval + 1612 src->scrub->pfss_ts_mod), 1613 PF_ALGNMNT(startoff), 1614 0); 1615 copyback = 1; 1616 } 1617 1618 /* Modulate TS reply iff valid (!0) */ 1619 memcpy(&tsecr, &opt[6], 1620 sizeof(u_int32_t)); 1621 if (tsecr && dst->scrub && 1622 (dst->scrub->pfss_flags & 1623 PFSS_TIMESTAMP)) { 1624 tsecr = ntohl(tsecr) 1625 - dst->scrub->pfss_ts_mod; 1626 pf_patch_32_unaligned(pd->m, 1627 &th->th_sum, 1628 &opt[6], 1629 htonl(tsecr), 1630 PF_ALGNMNT(startoff), 1631 0); 1632 copyback = 1; 1633 } 1634 got_ts = 1; 1635 } 1636 /* FALLTHROUGH */ 1637 default: 1638 hlen -= MAX(opt[1], 2); 1639 opt += MAX(opt[1], 2); 1640 break; 1641 } 1642 } 1643 if (copyback) { 1644 /* Copyback the options, caller copys back header */ 1645 *writeback = 1; 1646 m_copyback(pd->m, pd->off + sizeof(struct tcphdr), 1647 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1648 sizeof(struct tcphdr)); 1649 } 1650 } 1651 1652 /* 1653 * Must invalidate PAWS checks on connections idle for too long. 1654 * The fastest allowed timestamp clock is 1ms. That turns out to 1655 * be about 24 days before it wraps. XXX Right now our lowerbound 1656 * TS echo check only works for the first 12 days of a connection 1657 * when the TS has exhausted half its 32bit space 1658 */ 1659 #define TS_MAX_IDLE (24*24*60*60) 1660 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1661 1662 getmicrouptime(&uptime); 1663 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1664 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1665 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) { 1666 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1667 DPFPRINTF(("src idled out of PAWS\n")); 1668 pf_print_state(state); 1669 printf("\n"); 1670 } 1671 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1672 | PFSS_PAWS_IDLED; 1673 } 1674 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1675 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1676 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1677 DPFPRINTF(("dst idled out of PAWS\n")); 1678 pf_print_state(state); 1679 printf("\n"); 1680 } 1681 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1682 | PFSS_PAWS_IDLED; 1683 } 1684 1685 if (got_ts && src->scrub && dst->scrub && 1686 (src->scrub->pfss_flags & PFSS_PAWS) && 1687 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1688 /* Validate that the timestamps are "in-window". 1689 * RFC1323 describes TCP Timestamp options that allow 1690 * measurement of RTT (round trip time) and PAWS 1691 * (protection against wrapped sequence numbers). PAWS 1692 * gives us a set of rules for rejecting packets on 1693 * long fat pipes (packets that were somehow delayed 1694 * in transit longer than the time it took to send the 1695 * full TCP sequence space of 4Gb). We can use these 1696 * rules and infer a few others that will let us treat 1697 * the 32bit timestamp and the 32bit echoed timestamp 1698 * as sequence numbers to prevent a blind attacker from 1699 * inserting packets into a connection. 1700 * 1701 * RFC1323 tells us: 1702 * - The timestamp on this packet must be greater than 1703 * or equal to the last value echoed by the other 1704 * endpoint. The RFC says those will be discarded 1705 * since it is a dup that has already been acked. 1706 * This gives us a lowerbound on the timestamp. 1707 * timestamp >= other last echoed timestamp 1708 * - The timestamp will be less than or equal to 1709 * the last timestamp plus the time between the 1710 * last packet and now. The RFC defines the max 1711 * clock rate as 1ms. We will allow clocks to be 1712 * up to 10% fast and will allow a total difference 1713 * or 30 seconds due to a route change. And this 1714 * gives us an upperbound on the timestamp. 1715 * timestamp <= last timestamp + max ticks 1716 * We have to be careful here. Windows will send an 1717 * initial timestamp of zero and then initialize it 1718 * to a random value after the 3whs; presumably to 1719 * avoid a DoS by having to call an expensive RNG 1720 * during a SYN flood. Proof MS has at least one 1721 * good security geek. 1722 * 1723 * - The TCP timestamp option must also echo the other 1724 * endpoints timestamp. The timestamp echoed is the 1725 * one carried on the earliest unacknowledged segment 1726 * on the left edge of the sequence window. The RFC 1727 * states that the host will reject any echoed 1728 * timestamps that were larger than any ever sent. 1729 * This gives us an upperbound on the TS echo. 1730 * tescr <= largest_tsval 1731 * - The lowerbound on the TS echo is a little more 1732 * tricky to determine. The other endpoint's echoed 1733 * values will not decrease. But there may be 1734 * network conditions that re-order packets and 1735 * cause our view of them to decrease. For now the 1736 * only lowerbound we can safely determine is that 1737 * the TS echo will never be less than the original 1738 * TS. XXX There is probably a better lowerbound. 1739 * Remove TS_MAX_CONN with better lowerbound check. 1740 * tescr >= other original TS 1741 * 1742 * It is also important to note that the fastest 1743 * timestamp clock of 1ms will wrap its 32bit space in 1744 * 24 days. So we just disable TS checking after 24 1745 * days of idle time. We actually must use a 12d 1746 * connection limit until we can come up with a better 1747 * lowerbound to the TS echo check. 1748 */ 1749 struct timeval delta_ts; 1750 int ts_fudge; 1751 1752 /* 1753 * PFTM_TS_DIFF is how many seconds of leeway to allow 1754 * a host's timestamp. This can happen if the previous 1755 * packet got delayed in transit for much longer than 1756 * this packet. 1757 */ 1758 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0) 1759 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1760 1761 /* Calculate max ticks since the last timestamp */ 1762 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1763 #define TS_MICROSECS 1000000 /* microseconds per second */ 1764 delta_ts = uptime; 1765 timevalsub(&delta_ts, &src->scrub->pfss_last); 1766 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1767 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1768 1769 if ((src->state >= TCPS_ESTABLISHED && 1770 dst->state >= TCPS_ESTABLISHED) && 1771 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1772 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1773 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1774 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1775 /* Bad RFC1323 implementation or an insertion attack. 1776 * 1777 * - Solaris 2.6 and 2.7 are known to send another ACK 1778 * after the FIN,FIN|ACK,ACK closing that carries 1779 * an old timestamp. 1780 */ 1781 1782 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1783 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1784 SEQ_GT(tsval, src->scrub->pfss_tsval + 1785 tsval_from_last) ? '1' : ' ', 1786 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1787 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1788 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1789 "idle: %jus %lums\n", 1790 tsval, tsecr, tsval_from_last, 1791 (uintmax_t)delta_ts.tv_sec, 1792 delta_ts.tv_usec / 1000)); 1793 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1794 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1795 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1796 "\n", dst->scrub->pfss_tsval, 1797 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1798 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1799 pf_print_state(state); 1800 pf_print_flags(th->th_flags); 1801 printf("\n"); 1802 } 1803 REASON_SET(reason, PFRES_TS); 1804 return (PF_DROP); 1805 } 1806 1807 /* XXX I'd really like to require tsecr but it's optional */ 1808 1809 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1810 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1811 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1812 src->scrub && dst->scrub && 1813 (src->scrub->pfss_flags & PFSS_PAWS) && 1814 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1815 /* Didn't send a timestamp. Timestamps aren't really useful 1816 * when: 1817 * - connection opening or closing (often not even sent). 1818 * but we must not let an attacker to put a FIN on a 1819 * data packet to sneak it through our ESTABLISHED check. 1820 * - on a TCP reset. RFC suggests not even looking at TS. 1821 * - on an empty ACK. The TS will not be echoed so it will 1822 * probably not help keep the RTT calculation in sync and 1823 * there isn't as much danger when the sequence numbers 1824 * got wrapped. So some stacks don't include TS on empty 1825 * ACKs :-( 1826 * 1827 * To minimize the disruption to mostly RFC1323 conformant 1828 * stacks, we will only require timestamps on data packets. 1829 * 1830 * And what do ya know, we cannot require timestamps on data 1831 * packets. There appear to be devices that do legitimate 1832 * TCP connection hijacking. There are HTTP devices that allow 1833 * a 3whs (with timestamps) and then buffer the HTTP request. 1834 * If the intermediate device has the HTTP response cache, it 1835 * will spoof the response but not bother timestamping its 1836 * packets. So we can look for the presence of a timestamp in 1837 * the first data packet and if there, require it in all future 1838 * packets. 1839 */ 1840 1841 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1842 /* 1843 * Hey! Someone tried to sneak a packet in. Or the 1844 * stack changed its RFC1323 behavior?!?! 1845 */ 1846 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1847 DPFPRINTF(("Did not receive expected RFC1323 " 1848 "timestamp\n")); 1849 pf_print_state(state); 1850 pf_print_flags(th->th_flags); 1851 printf("\n"); 1852 } 1853 REASON_SET(reason, PFRES_TS); 1854 return (PF_DROP); 1855 } 1856 } 1857 1858 /* 1859 * We will note if a host sends his data packets with or without 1860 * timestamps. And require all data packets to contain a timestamp 1861 * if the first does. PAWS implicitly requires that all data packets be 1862 * timestamped. But I think there are middle-man devices that hijack 1863 * TCP streams immediately after the 3whs and don't timestamp their 1864 * packets (seen in a WWW accelerator or cache). 1865 */ 1866 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1867 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1868 if (got_ts) 1869 src->scrub->pfss_flags |= PFSS_DATA_TS; 1870 else { 1871 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1872 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1873 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1874 /* Don't warn if other host rejected RFC1323 */ 1875 DPFPRINTF(("Broken RFC1323 stack did not " 1876 "timestamp data packet. Disabled PAWS " 1877 "security.\n")); 1878 pf_print_state(state); 1879 pf_print_flags(th->th_flags); 1880 printf("\n"); 1881 } 1882 } 1883 } 1884 1885 /* 1886 * Update PAWS values 1887 */ 1888 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1889 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1890 getmicrouptime(&src->scrub->pfss_last); 1891 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1892 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1893 src->scrub->pfss_tsval = tsval; 1894 1895 if (tsecr) { 1896 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1897 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1898 src->scrub->pfss_tsecr = tsecr; 1899 1900 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1901 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1902 src->scrub->pfss_tsval0 == 0)) { 1903 /* tsval0 MUST be the lowest timestamp */ 1904 src->scrub->pfss_tsval0 = tsval; 1905 } 1906 1907 /* Only fully initialized after a TS gets echoed */ 1908 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1909 src->scrub->pfss_flags |= PFSS_PAWS; 1910 } 1911 } 1912 1913 /* I have a dream.... TCP segment reassembly.... */ 1914 return (0); 1915 } 1916 1917 int 1918 pf_normalize_mss(struct pf_pdesc *pd) 1919 { 1920 struct tcphdr *th = &pd->hdr.tcp; 1921 u_int16_t *mss; 1922 int thoff; 1923 int opt, cnt, optlen = 0; 1924 u_char opts[TCP_MAXOLEN]; 1925 u_char *optp = opts; 1926 size_t startoff; 1927 1928 thoff = th->th_off << 2; 1929 cnt = thoff - sizeof(struct tcphdr); 1930 1931 if (cnt > 0 && !pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, cnt, 1932 NULL, NULL, pd->af)) 1933 return (0); 1934 1935 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1936 startoff = optp - opts; 1937 opt = optp[0]; 1938 if (opt == TCPOPT_EOL) 1939 break; 1940 if (opt == TCPOPT_NOP) 1941 optlen = 1; 1942 else { 1943 if (cnt < 2) 1944 break; 1945 optlen = optp[1]; 1946 if (optlen < 2 || optlen > cnt) 1947 break; 1948 } 1949 switch (opt) { 1950 case TCPOPT_MAXSEG: 1951 mss = (u_int16_t *)(optp + 2); 1952 if ((ntohs(*mss)) > pd->act.max_mss) { 1953 pf_patch_16_unaligned(pd->m, 1954 &th->th_sum, 1955 mss, htons(pd->act.max_mss), 1956 PF_ALGNMNT(startoff), 1957 0); 1958 m_copyback(pd->m, pd->off + sizeof(*th), 1959 thoff - sizeof(*th), opts); 1960 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); 1961 } 1962 break; 1963 default: 1964 break; 1965 } 1966 } 1967 1968 return (0); 1969 } 1970 1971 int 1972 pf_scan_sctp(struct pf_pdesc *pd) 1973 { 1974 struct sctp_chunkhdr ch = { }; 1975 int chunk_off = sizeof(struct sctphdr); 1976 int chunk_start; 1977 int ret; 1978 1979 while (pd->off + chunk_off < pd->tot_len) { 1980 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL, 1981 NULL, pd->af)) 1982 return (PF_DROP); 1983 1984 /* Length includes the header, this must be at least 4. */ 1985 if (ntohs(ch.chunk_length) < 4) 1986 return (PF_DROP); 1987 1988 chunk_start = chunk_off; 1989 chunk_off += roundup(ntohs(ch.chunk_length), 4); 1990 1991 switch (ch.chunk_type) { 1992 case SCTP_INITIATION: 1993 case SCTP_INITIATION_ACK: { 1994 struct sctp_init_chunk init; 1995 1996 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init, 1997 sizeof(init), NULL, NULL, pd->af)) 1998 return (PF_DROP); 1999 2000 /* 2001 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have 2002 * any value except 0." 2003 */ 2004 if (init.init.initiate_tag == 0) 2005 return (PF_DROP); 2006 if (init.init.num_inbound_streams == 0) 2007 return (PF_DROP); 2008 if (init.init.num_outbound_streams == 0) 2009 return (PF_DROP); 2010 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND) 2011 return (PF_DROP); 2012 2013 /* 2014 * RFC 9260, Section 3.1, INIT chunks MUST have zero 2015 * verification tag. 2016 */ 2017 if (ch.chunk_type == SCTP_INITIATION && 2018 pd->hdr.sctp.v_tag != 0) 2019 return (PF_DROP); 2020 2021 pd->sctp_initiate_tag = init.init.initiate_tag; 2022 2023 if (ch.chunk_type == SCTP_INITIATION) 2024 pd->sctp_flags |= PFDESC_SCTP_INIT; 2025 else 2026 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK; 2027 2028 ret = pf_multihome_scan_init(pd->off + chunk_start, 2029 ntohs(init.ch.chunk_length), pd); 2030 if (ret != PF_PASS) 2031 return (ret); 2032 2033 break; 2034 } 2035 case SCTP_ABORT_ASSOCIATION: 2036 pd->sctp_flags |= PFDESC_SCTP_ABORT; 2037 break; 2038 case SCTP_SHUTDOWN: 2039 case SCTP_SHUTDOWN_ACK: 2040 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN; 2041 break; 2042 case SCTP_SHUTDOWN_COMPLETE: 2043 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE; 2044 break; 2045 case SCTP_COOKIE_ECHO: 2046 pd->sctp_flags |= PFDESC_SCTP_COOKIE; 2047 break; 2048 case SCTP_COOKIE_ACK: 2049 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK; 2050 break; 2051 case SCTP_DATA: 2052 pd->sctp_flags |= PFDESC_SCTP_DATA; 2053 break; 2054 case SCTP_HEARTBEAT_REQUEST: 2055 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT; 2056 break; 2057 case SCTP_HEARTBEAT_ACK: 2058 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK; 2059 break; 2060 case SCTP_ASCONF: 2061 pd->sctp_flags |= PFDESC_SCTP_ASCONF; 2062 2063 ret = pf_multihome_scan_asconf(pd->off + chunk_start, 2064 ntohs(ch.chunk_length), pd); 2065 if (ret != PF_PASS) 2066 return (ret); 2067 break; 2068 default: 2069 pd->sctp_flags |= PFDESC_SCTP_OTHER; 2070 break; 2071 } 2072 } 2073 2074 /* Validate chunk lengths vs. packet length. */ 2075 if (pd->off + chunk_off != pd->tot_len) 2076 return (PF_DROP); 2077 2078 /* 2079 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only 2080 * one in a packet. 2081 */ 2082 if ((pd->sctp_flags & PFDESC_SCTP_INIT) && 2083 (pd->sctp_flags & ~PFDESC_SCTP_INIT)) 2084 return (PF_DROP); 2085 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) && 2086 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK)) 2087 return (PF_DROP); 2088 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) && 2089 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE)) 2090 return (PF_DROP); 2091 2092 return (PF_PASS); 2093 } 2094 2095 int 2096 pf_normalize_sctp(struct pf_pdesc *pd) 2097 { 2098 struct pf_krule *r, *rm = NULL; 2099 struct sctphdr *sh = &pd->hdr.sctp; 2100 u_short reason; 2101 sa_family_t af = pd->af; 2102 int srs; 2103 2104 PF_RULES_RASSERT(); 2105 2106 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 2107 /* Check if there any scrub rules. Lack of scrub rules means enforced 2108 * packet normalization operation just like in OpenBSD. */ 2109 srs = (r != NULL); 2110 while (r != NULL) { 2111 pf_counter_u64_add(&r->evaluations, 1); 2112 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 2113 r = r->skip[PF_SKIP_IFP]; 2114 else if (r->direction && r->direction != pd->dir) 2115 r = r->skip[PF_SKIP_DIR]; 2116 else if (r->af && r->af != af) 2117 r = r->skip[PF_SKIP_AF]; 2118 else if (r->proto && r->proto != pd->proto) 2119 r = r->skip[PF_SKIP_PROTO]; 2120 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 2121 r->src.neg, pd->kif, M_GETFIB(pd->m))) 2122 r = r->skip[PF_SKIP_SRC_ADDR]; 2123 else if (r->src.port_op && !pf_match_port(r->src.port_op, 2124 r->src.port[0], r->src.port[1], sh->src_port)) 2125 r = r->skip[PF_SKIP_SRC_PORT]; 2126 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 2127 r->dst.neg, NULL, M_GETFIB(pd->m))) 2128 r = r->skip[PF_SKIP_DST_ADDR]; 2129 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 2130 r->dst.port[0], r->dst.port[1], sh->dest_port)) 2131 r = r->skip[PF_SKIP_DST_PORT]; 2132 else { 2133 rm = r; 2134 break; 2135 } 2136 } 2137 2138 if (srs) { 2139 /* With scrub rules present SCTP normalization happens only 2140 * if one of rules has matched and it's not a "no scrub" rule */ 2141 if (rm == NULL || rm->action == PF_NOSCRUB) 2142 return (PF_PASS); 2143 2144 pf_counter_u64_critical_enter(); 2145 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 2146 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 2147 pf_counter_u64_critical_exit(); 2148 } 2149 2150 /* Verify we're a multiple of 4 bytes long */ 2151 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4) 2152 goto sctp_drop; 2153 2154 /* INIT chunk needs to be the only chunk */ 2155 if (pd->sctp_flags & PFDESC_SCTP_INIT) 2156 if (pd->sctp_flags & ~PFDESC_SCTP_INIT) 2157 goto sctp_drop; 2158 2159 return (PF_PASS); 2160 2161 sctp_drop: 2162 REASON_SET(&reason, PFRES_NORM); 2163 if (rm != NULL && r->log) 2164 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 2165 1); 2166 2167 return (PF_DROP); 2168 } 2169 2170 #if defined(INET) || defined(INET6) 2171 void 2172 pf_scrub(struct pf_pdesc *pd) 2173 { 2174 2175 struct ip *h = mtod(pd->m, struct ip *); 2176 #ifdef INET6 2177 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *); 2178 #endif 2179 2180 /* Clear IP_DF if no-df was requested */ 2181 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF && 2182 h->ip_off & htons(IP_DF)) 2183 { 2184 u_int16_t ip_off = h->ip_off; 2185 2186 h->ip_off &= htons(~IP_DF); 2187 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 2188 } 2189 2190 /* Enforce a minimum ttl, may cause endless packet loops */ 2191 if (pd->af == AF_INET && pd->act.min_ttl && 2192 h->ip_ttl < pd->act.min_ttl) { 2193 u_int16_t ip_ttl = h->ip_ttl; 2194 2195 h->ip_ttl = pd->act.min_ttl; 2196 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 2197 } 2198 #ifdef INET6 2199 /* Enforce a minimum ttl, may cause endless packet loops */ 2200 if (pd->af == AF_INET6 && pd->act.min_ttl && 2201 h6->ip6_hlim < pd->act.min_ttl) 2202 h6->ip6_hlim = pd->act.min_ttl; 2203 #endif 2204 /* Enforce tos */ 2205 if (pd->act.flags & PFSTATE_SETTOS) { 2206 switch (pd->af) { 2207 case AF_INET: { 2208 u_int16_t ov, nv; 2209 2210 ov = *(u_int16_t *)h; 2211 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK); 2212 nv = *(u_int16_t *)h; 2213 2214 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 2215 break; 2216 } 2217 #ifdef INET6 2218 case AF_INET6: 2219 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; 2220 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20); 2221 break; 2222 #endif 2223 } 2224 } 2225 2226 /* random-id, but not for fragments */ 2227 #ifdef INET 2228 if (pd->af == AF_INET && 2229 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 2230 uint16_t ip_id = h->ip_id; 2231 2232 ip_fillid(h); 2233 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 2234 } 2235 #endif 2236 } 2237 #endif 2238