1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 29 */ 30 31 #include <sys/cdefs.h> 32 #include "opt_inet.h" 33 #include "opt_inet6.h" 34 #include "opt_pf.h" 35 36 #include <sys/param.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/mutex.h> 41 #include <sys/refcount.h> 42 #include <sys/socket.h> 43 44 #include <net/if.h> 45 #include <net/if_var.h> 46 #include <net/vnet.h> 47 #include <net/pfvar.h> 48 #include <net/if_pflog.h> 49 50 #include <netinet/in.h> 51 #include <netinet/ip.h> 52 #include <netinet/ip_var.h> 53 #include <netinet6/in6_var.h> 54 #include <netinet6/nd6.h> 55 #include <netinet6/ip6_var.h> 56 #include <netinet6/scope6_var.h> 57 #include <netinet/tcp.h> 58 #include <netinet/tcp_fsm.h> 59 #include <netinet/tcp_seq.h> 60 #include <netinet/sctp_constants.h> 61 #include <netinet/sctp_header.h> 62 63 #ifdef INET6 64 #include <netinet/ip6.h> 65 #endif /* INET6 */ 66 67 struct pf_frent { 68 TAILQ_ENTRY(pf_frent) fr_next; 69 struct mbuf *fe_m; 70 uint16_t fe_hdrlen; /* ipv4 header length with ip options 71 ipv6, extension, fragment header */ 72 uint16_t fe_extoff; /* last extension header offset or 0 */ 73 uint16_t fe_len; /* fragment length */ 74 uint16_t fe_off; /* fragment offset */ 75 uint16_t fe_mff; /* more fragment flag */ 76 }; 77 78 struct pf_fragment_cmp { 79 struct pf_addr frc_src; 80 struct pf_addr frc_dst; 81 uint32_t frc_id; 82 sa_family_t frc_af; 83 uint8_t frc_proto; 84 }; 85 86 struct pf_fragment { 87 struct pf_fragment_cmp fr_key; 88 #define fr_src fr_key.frc_src 89 #define fr_dst fr_key.frc_dst 90 #define fr_id fr_key.frc_id 91 #define fr_af fr_key.frc_af 92 #define fr_proto fr_key.frc_proto 93 94 /* pointers to queue element */ 95 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS]; 96 /* count entries between pointers */ 97 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS]; 98 RB_ENTRY(pf_fragment) fr_entry; 99 TAILQ_ENTRY(pf_fragment) frag_next; 100 uint32_t fr_timeout; 101 uint16_t fr_maxlen; /* maximum length of single fragment */ 102 u_int16_t fr_holes; /* number of holes in the queue */ 103 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 104 }; 105 106 struct pf_fragment_tag { 107 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 108 uint16_t ft_extoff; /* last extension header offset or 0 */ 109 uint16_t ft_maxlen; /* maximum fragment payload length */ 110 uint32_t ft_id; /* fragment id */ 111 }; 112 113 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx); 114 #define V_pf_frag_mtx VNET(pf_frag_mtx) 115 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx) 116 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx) 117 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED) 118 119 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 120 121 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z); 122 #define V_pf_frent_z VNET(pf_frent_z) 123 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z); 124 #define V_pf_frag_z VNET(pf_frag_z) 125 126 TAILQ_HEAD(pf_fragqueue, pf_fragment); 127 TAILQ_HEAD(pf_cachequeue, pf_fragment); 128 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue); 129 #define V_pf_fragqueue VNET(pf_fragqueue) 130 RB_HEAD(pf_frag_tree, pf_fragment); 131 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree); 132 #define V_pf_frag_tree VNET(pf_frag_tree) 133 static int pf_frag_compare(struct pf_fragment *, 134 struct pf_fragment *); 135 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 136 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 137 138 static void pf_flush_fragments(void); 139 static void pf_free_fragment(struct pf_fragment *); 140 static void pf_remove_fragment(struct pf_fragment *); 141 142 static struct pf_frent *pf_create_fragment(u_short *); 143 static int pf_frent_holes(struct pf_frent *frent); 144 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 145 struct pf_frag_tree *tree); 146 static inline int pf_frent_index(struct pf_frent *); 147 static int pf_frent_insert(struct pf_fragment *, 148 struct pf_frent *, struct pf_frent *); 149 void pf_frent_remove(struct pf_fragment *, 150 struct pf_frent *); 151 struct pf_frent *pf_frent_previous(struct pf_fragment *, 152 struct pf_frent *); 153 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 154 struct pf_frent *, u_short *); 155 static struct mbuf *pf_join_fragment(struct pf_fragment *); 156 #ifdef INET 157 static int pf_reassemble(struct mbuf **, int, u_short *); 158 #endif /* INET */ 159 #ifdef INET6 160 static int pf_reassemble6(struct mbuf **, 161 struct ip6_frag *, uint16_t, uint16_t, u_short *); 162 #endif /* INET6 */ 163 164 #define DPFPRINTF(x) do { \ 165 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 166 printf("%s: ", __func__); \ 167 printf x ; \ 168 } \ 169 } while(0) 170 171 #ifdef INET 172 static void 173 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 174 { 175 176 key->frc_src.v4 = ip->ip_src; 177 key->frc_dst.v4 = ip->ip_dst; 178 key->frc_af = AF_INET; 179 key->frc_proto = ip->ip_p; 180 key->frc_id = ip->ip_id; 181 } 182 #endif /* INET */ 183 184 void 185 pf_normalize_init(void) 186 { 187 188 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 189 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 190 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 191 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 192 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 193 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 194 UMA_ALIGN_PTR, 0); 195 196 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 197 198 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 199 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 200 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 201 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 202 203 TAILQ_INIT(&V_pf_fragqueue); 204 } 205 206 void 207 pf_normalize_cleanup(void) 208 { 209 210 uma_zdestroy(V_pf_state_scrub_z); 211 uma_zdestroy(V_pf_frent_z); 212 uma_zdestroy(V_pf_frag_z); 213 214 mtx_destroy(&V_pf_frag_mtx); 215 } 216 217 static int 218 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 219 { 220 int diff; 221 222 if ((diff = a->fr_id - b->fr_id) != 0) 223 return (diff); 224 if ((diff = a->fr_proto - b->fr_proto) != 0) 225 return (diff); 226 if ((diff = a->fr_af - b->fr_af) != 0) 227 return (diff); 228 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 229 return (diff); 230 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 231 return (diff); 232 return (0); 233 } 234 235 void 236 pf_purge_expired_fragments(void) 237 { 238 u_int32_t expire = time_uptime - 239 V_pf_default_rule.timeout[PFTM_FRAG]; 240 241 pf_purge_fragments(expire); 242 } 243 244 void 245 pf_purge_fragments(uint32_t expire) 246 { 247 struct pf_fragment *frag; 248 249 PF_FRAG_LOCK(); 250 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 251 if (frag->fr_timeout > expire) 252 break; 253 254 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 255 pf_free_fragment(frag); 256 } 257 258 PF_FRAG_UNLOCK(); 259 } 260 261 /* 262 * Try to flush old fragments to make space for new ones 263 */ 264 static void 265 pf_flush_fragments(void) 266 { 267 struct pf_fragment *frag; 268 int goal; 269 270 PF_FRAG_ASSERT(); 271 272 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 273 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 274 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 275 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 276 if (frag) 277 pf_free_fragment(frag); 278 else 279 break; 280 } 281 } 282 283 /* Frees the fragments and all associated entries */ 284 static void 285 pf_free_fragment(struct pf_fragment *frag) 286 { 287 struct pf_frent *frent; 288 289 PF_FRAG_ASSERT(); 290 291 /* Free all fragments */ 292 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 293 frent = TAILQ_FIRST(&frag->fr_queue)) { 294 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 295 296 m_freem(frent->fe_m); 297 uma_zfree(V_pf_frent_z, frent); 298 } 299 300 pf_remove_fragment(frag); 301 } 302 303 static struct pf_fragment * 304 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 305 { 306 struct pf_fragment *frag; 307 308 PF_FRAG_ASSERT(); 309 310 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 311 if (frag != NULL) { 312 /* XXX Are we sure we want to update the timeout? */ 313 frag->fr_timeout = time_uptime; 314 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 315 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 316 } 317 318 return (frag); 319 } 320 321 /* Removes a fragment from the fragment queue and frees the fragment */ 322 static void 323 pf_remove_fragment(struct pf_fragment *frag) 324 { 325 326 PF_FRAG_ASSERT(); 327 KASSERT(frag, ("frag != NULL")); 328 329 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 330 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 331 uma_zfree(V_pf_frag_z, frag); 332 } 333 334 static struct pf_frent * 335 pf_create_fragment(u_short *reason) 336 { 337 struct pf_frent *frent; 338 339 PF_FRAG_ASSERT(); 340 341 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 342 if (frent == NULL) { 343 pf_flush_fragments(); 344 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 345 if (frent == NULL) { 346 REASON_SET(reason, PFRES_MEMORY); 347 return (NULL); 348 } 349 } 350 351 return (frent); 352 } 353 354 /* 355 * Calculate the additional holes that were created in the fragment 356 * queue by inserting this fragment. A fragment in the middle 357 * creates one more hole by splitting. For each connected side, 358 * it loses one hole. 359 * Fragment entry must be in the queue when calling this function. 360 */ 361 static int 362 pf_frent_holes(struct pf_frent *frent) 363 { 364 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 365 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 366 int holes = 1; 367 368 if (prev == NULL) { 369 if (frent->fe_off == 0) 370 holes--; 371 } else { 372 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0")); 373 if (frent->fe_off == prev->fe_off + prev->fe_len) 374 holes--; 375 } 376 if (next == NULL) { 377 if (!frent->fe_mff) 378 holes--; 379 } else { 380 KASSERT(frent->fe_mff, ("frent->fe_mff")); 381 if (next->fe_off == frent->fe_off + frent->fe_len) 382 holes--; 383 } 384 return holes; 385 } 386 387 static inline int 388 pf_frent_index(struct pf_frent *frent) 389 { 390 /* 391 * We have an array of 16 entry points to the queue. A full size 392 * 65535 octet IP packet can have 8192 fragments. So the queue 393 * traversal length is at most 512 and at most 16 entry points are 394 * checked. We need 128 additional bytes on a 64 bit architecture. 395 */ 396 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) == 397 16 - 1); 398 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1); 399 400 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS); 401 } 402 403 static int 404 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent, 405 struct pf_frent *prev) 406 { 407 int index; 408 409 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff); 410 411 /* 412 * A packet has at most 65536 octets. With 16 entry points, each one 413 * spawns 4096 octets. We limit these to 64 fragments each, which 414 * means on average every fragment must have at least 64 octets. 415 */ 416 index = pf_frent_index(frent); 417 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT) 418 return ENOBUFS; 419 frag->fr_entries[index]++; 420 421 if (prev == NULL) { 422 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 423 } else { 424 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 425 ("overlapping fragment")); 426 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 427 } 428 429 if (frag->fr_firstoff[index] == NULL) { 430 KASSERT(prev == NULL || pf_frent_index(prev) < index, 431 ("prev == NULL || pf_frent_index(pref) < index")); 432 frag->fr_firstoff[index] = frent; 433 } else { 434 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) { 435 KASSERT(prev == NULL || pf_frent_index(prev) < index, 436 ("prev == NULL || pf_frent_index(pref) < index")); 437 frag->fr_firstoff[index] = frent; 438 } else { 439 KASSERT(prev != NULL, ("prev != NULL")); 440 KASSERT(pf_frent_index(prev) == index, 441 ("pf_frent_index(prev) == index")); 442 } 443 } 444 445 frag->fr_holes += pf_frent_holes(frent); 446 447 return 0; 448 } 449 450 void 451 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent) 452 { 453 #ifdef INVARIANTS 454 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next); 455 #endif 456 struct pf_frent *next = TAILQ_NEXT(frent, fr_next); 457 int index; 458 459 frag->fr_holes -= pf_frent_holes(frent); 460 461 index = pf_frent_index(frent); 462 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found")); 463 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) { 464 if (next == NULL) { 465 frag->fr_firstoff[index] = NULL; 466 } else { 467 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off, 468 ("overlapping fragment")); 469 if (pf_frent_index(next) == index) { 470 frag->fr_firstoff[index] = next; 471 } else { 472 frag->fr_firstoff[index] = NULL; 473 } 474 } 475 } else { 476 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off, 477 ("frag->fr_firstoff[index]->fe_off < frent->fe_off")); 478 KASSERT(prev != NULL, ("prev != NULL")); 479 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off, 480 ("overlapping fragment")); 481 KASSERT(pf_frent_index(prev) == index, 482 ("pf_frent_index(prev) == index")); 483 } 484 485 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 486 487 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining")); 488 frag->fr_entries[index]--; 489 } 490 491 struct pf_frent * 492 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent) 493 { 494 struct pf_frent *prev, *next; 495 int index; 496 497 /* 498 * If there are no fragments after frag, take the final one. Assume 499 * that the global queue is not empty. 500 */ 501 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq); 502 KASSERT(prev != NULL, ("prev != NULL")); 503 if (prev->fe_off <= frent->fe_off) 504 return prev; 505 /* 506 * We want to find a fragment entry that is before frag, but still 507 * close to it. Find the first fragment entry that is in the same 508 * entry point or in the first entry point after that. As we have 509 * already checked that there are entries behind frag, this will 510 * succeed. 511 */ 512 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS; 513 index++) { 514 prev = frag->fr_firstoff[index]; 515 if (prev != NULL) 516 break; 517 } 518 KASSERT(prev != NULL, ("prev != NULL")); 519 /* 520 * In prev we may have a fragment from the same entry point that is 521 * before frent, or one that is just one position behind frent. 522 * In the latter case, we go back one step and have the predecessor. 523 * There may be none if the new fragment will be the first one. 524 */ 525 if (prev->fe_off > frent->fe_off) { 526 prev = TAILQ_PREV(prev, pf_fragq, fr_next); 527 if (prev == NULL) 528 return NULL; 529 KASSERT(prev->fe_off <= frent->fe_off, 530 ("prev->fe_off <= frent->fe_off")); 531 return prev; 532 } 533 /* 534 * In prev is the first fragment of the entry point. The offset 535 * of frag is behind it. Find the closest previous fragment. 536 */ 537 for (next = TAILQ_NEXT(prev, fr_next); next != NULL; 538 next = TAILQ_NEXT(next, fr_next)) { 539 if (next->fe_off > frent->fe_off) 540 break; 541 prev = next; 542 } 543 return prev; 544 } 545 546 static struct pf_fragment * 547 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 548 u_short *reason) 549 { 550 struct pf_frent *after, *next, *prev; 551 struct pf_fragment *frag; 552 uint16_t total; 553 int old_index, new_index; 554 555 PF_FRAG_ASSERT(); 556 557 /* No empty fragments. */ 558 if (frent->fe_len == 0) { 559 DPFPRINTF(("bad fragment: len 0\n")); 560 goto bad_fragment; 561 } 562 563 /* All fragments are 8 byte aligned. */ 564 if (frent->fe_mff && (frent->fe_len & 0x7)) { 565 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len)); 566 goto bad_fragment; 567 } 568 569 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 570 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 571 DPFPRINTF(("bad fragment: max packet %d\n", 572 frent->fe_off + frent->fe_len)); 573 goto bad_fragment; 574 } 575 576 DPFPRINTF((key->frc_af == AF_INET ? 577 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n", 578 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 579 580 /* Fully buffer all of the fragments in this fragment queue. */ 581 frag = pf_find_fragment(key, &V_pf_frag_tree); 582 583 /* Create a new reassembly queue for this packet. */ 584 if (frag == NULL) { 585 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 586 if (frag == NULL) { 587 pf_flush_fragments(); 588 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 589 if (frag == NULL) { 590 REASON_SET(reason, PFRES_MEMORY); 591 goto drop_fragment; 592 } 593 } 594 595 *(struct pf_fragment_cmp *)frag = *key; 596 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff)); 597 memset(frag->fr_entries, 0, sizeof(frag->fr_entries)); 598 frag->fr_timeout = time_uptime; 599 frag->fr_maxlen = frent->fe_len; 600 frag->fr_holes = 1; 601 TAILQ_INIT(&frag->fr_queue); 602 603 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 604 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 605 606 /* We do not have a previous fragment, cannot fail. */ 607 pf_frent_insert(frag, frent, NULL); 608 609 return (frag); 610 } 611 612 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 613 614 /* Remember maximum fragment len for refragmentation. */ 615 if (frent->fe_len > frag->fr_maxlen) 616 frag->fr_maxlen = frent->fe_len; 617 618 /* Maximum data we have seen already. */ 619 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 620 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 621 622 /* Non terminal fragments must have more fragments flag. */ 623 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 624 goto bad_fragment; 625 626 /* Check if we saw the last fragment already. */ 627 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 628 if (frent->fe_off + frent->fe_len > total || 629 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 630 goto bad_fragment; 631 } else { 632 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 633 goto bad_fragment; 634 } 635 636 /* Find neighbors for newly inserted fragment */ 637 prev = pf_frent_previous(frag, frent); 638 if (prev == NULL) { 639 after = TAILQ_FIRST(&frag->fr_queue); 640 KASSERT(after != NULL, ("after != NULL")); 641 } else { 642 after = TAILQ_NEXT(prev, fr_next); 643 } 644 645 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 646 uint16_t precut; 647 648 precut = prev->fe_off + prev->fe_len - frent->fe_off; 649 if (precut >= frent->fe_len) 650 goto bad_fragment; 651 DPFPRINTF(("overlap -%d\n", precut)); 652 m_adj(frent->fe_m, precut); 653 frent->fe_off += precut; 654 frent->fe_len -= precut; 655 } 656 657 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 658 after = next) { 659 uint16_t aftercut; 660 661 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 662 DPFPRINTF(("adjust overlap %d\n", aftercut)); 663 if (aftercut < after->fe_len) { 664 m_adj(after->fe_m, aftercut); 665 old_index = pf_frent_index(after); 666 after->fe_off += aftercut; 667 after->fe_len -= aftercut; 668 new_index = pf_frent_index(after); 669 if (old_index != new_index) { 670 DPFPRINTF(("frag index %d, new %d", 671 old_index, new_index)); 672 /* Fragment switched queue as fe_off changed */ 673 after->fe_off -= aftercut; 674 after->fe_len += aftercut; 675 /* Remove restored fragment from old queue */ 676 pf_frent_remove(frag, after); 677 after->fe_off += aftercut; 678 after->fe_len -= aftercut; 679 /* Insert into correct queue */ 680 if (pf_frent_insert(frag, after, prev)) { 681 DPFPRINTF( 682 ("fragment requeue limit exceeded")); 683 m_freem(after->fe_m); 684 uma_zfree(V_pf_frent_z, after); 685 /* There is not way to recover */ 686 goto bad_fragment; 687 } 688 } 689 break; 690 } 691 692 /* This fragment is completely overlapped, lose it. */ 693 next = TAILQ_NEXT(after, fr_next); 694 pf_frent_remove(frag, after); 695 m_freem(after->fe_m); 696 uma_zfree(V_pf_frent_z, after); 697 } 698 699 /* If part of the queue gets too long, there is not way to recover. */ 700 if (pf_frent_insert(frag, frent, prev)) { 701 DPFPRINTF(("fragment queue limit exceeded\n")); 702 goto bad_fragment; 703 } 704 705 return (frag); 706 707 bad_fragment: 708 REASON_SET(reason, PFRES_FRAG); 709 drop_fragment: 710 uma_zfree(V_pf_frent_z, frent); 711 return (NULL); 712 } 713 714 static struct mbuf * 715 pf_join_fragment(struct pf_fragment *frag) 716 { 717 struct mbuf *m, *m2; 718 struct pf_frent *frent, *next; 719 720 frent = TAILQ_FIRST(&frag->fr_queue); 721 next = TAILQ_NEXT(frent, fr_next); 722 723 m = frent->fe_m; 724 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 725 uma_zfree(V_pf_frent_z, frent); 726 for (frent = next; frent != NULL; frent = next) { 727 next = TAILQ_NEXT(frent, fr_next); 728 729 m2 = frent->fe_m; 730 /* Strip off ip header. */ 731 m_adj(m2, frent->fe_hdrlen); 732 /* Strip off any trailing bytes. */ 733 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 734 735 uma_zfree(V_pf_frent_z, frent); 736 m_cat(m, m2); 737 } 738 739 /* Remove from fragment queue. */ 740 pf_remove_fragment(frag); 741 742 return (m); 743 } 744 745 #ifdef INET 746 static int 747 pf_reassemble(struct mbuf **m0, int dir, u_short *reason) 748 { 749 struct mbuf *m = *m0; 750 struct ip *ip = mtod(m, struct ip *); 751 struct pf_frent *frent; 752 struct pf_fragment *frag; 753 struct pf_fragment_cmp key; 754 uint16_t total, hdrlen; 755 756 /* Get an entry for the fragment queue */ 757 if ((frent = pf_create_fragment(reason)) == NULL) 758 return (PF_DROP); 759 760 frent->fe_m = m; 761 frent->fe_hdrlen = ip->ip_hl << 2; 762 frent->fe_extoff = 0; 763 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 764 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 765 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 766 767 pf_ip2key(ip, dir, &key); 768 769 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 770 return (PF_DROP); 771 772 /* The mbuf is part of the fragment entry, no direct free or access */ 773 m = *m0 = NULL; 774 775 if (frag->fr_holes) { 776 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes)); 777 return (PF_PASS); /* drop because *m0 is NULL, no error */ 778 } 779 780 /* We have all the data */ 781 frent = TAILQ_FIRST(&frag->fr_queue); 782 KASSERT(frent != NULL, ("frent != NULL")); 783 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 784 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 785 hdrlen = frent->fe_hdrlen; 786 787 m = *m0 = pf_join_fragment(frag); 788 frag = NULL; 789 790 if (m->m_flags & M_PKTHDR) { 791 int plen = 0; 792 for (m = *m0; m; m = m->m_next) 793 plen += m->m_len; 794 m = *m0; 795 m->m_pkthdr.len = plen; 796 } 797 798 ip = mtod(m, struct ip *); 799 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len, 800 htons(hdrlen + total), 0); 801 ip->ip_len = htons(hdrlen + total); 802 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off, 803 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0); 804 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 805 806 if (hdrlen + total > IP_MAXPACKET) { 807 DPFPRINTF(("drop: too big: %d\n", total)); 808 ip->ip_len = 0; 809 REASON_SET(reason, PFRES_SHORT); 810 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 811 return (PF_DROP); 812 } 813 814 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 815 return (PF_PASS); 816 } 817 #endif /* INET */ 818 819 #ifdef INET6 820 static int 821 pf_reassemble6(struct mbuf **m0, struct ip6_frag *fraghdr, 822 uint16_t hdrlen, uint16_t extoff, u_short *reason) 823 { 824 struct mbuf *m = *m0; 825 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); 826 struct pf_frent *frent; 827 struct pf_fragment *frag; 828 struct pf_fragment_cmp key; 829 struct m_tag *mtag; 830 struct pf_fragment_tag *ftag; 831 int off; 832 uint32_t frag_id; 833 uint16_t total, maxlen; 834 uint8_t proto; 835 836 PF_FRAG_LOCK(); 837 838 /* Get an entry for the fragment queue. */ 839 if ((frent = pf_create_fragment(reason)) == NULL) { 840 PF_FRAG_UNLOCK(); 841 return (PF_DROP); 842 } 843 844 frent->fe_m = m; 845 frent->fe_hdrlen = hdrlen; 846 frent->fe_extoff = extoff; 847 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 848 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 849 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 850 851 key.frc_src.v6 = ip6->ip6_src; 852 key.frc_dst.v6 = ip6->ip6_dst; 853 key.frc_af = AF_INET6; 854 /* Only the first fragment's protocol is relevant. */ 855 key.frc_proto = 0; 856 key.frc_id = fraghdr->ip6f_ident; 857 858 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 859 PF_FRAG_UNLOCK(); 860 return (PF_DROP); 861 } 862 863 /* The mbuf is part of the fragment entry, no direct free or access. */ 864 m = *m0 = NULL; 865 866 if (frag->fr_holes) { 867 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, 868 frag->fr_holes)); 869 PF_FRAG_UNLOCK(); 870 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 871 } 872 873 /* We have all the data. */ 874 frent = TAILQ_FIRST(&frag->fr_queue); 875 KASSERT(frent != NULL, ("frent != NULL")); 876 extoff = frent->fe_extoff; 877 maxlen = frag->fr_maxlen; 878 frag_id = frag->fr_id; 879 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 880 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 881 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 882 883 m = *m0 = pf_join_fragment(frag); 884 frag = NULL; 885 886 PF_FRAG_UNLOCK(); 887 888 /* Take protocol from first fragment header. */ 889 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 890 KASSERT(m, ("%s: short mbuf chain", __func__)); 891 proto = *(mtod(m, uint8_t *) + off); 892 m = *m0; 893 894 /* Delete frag6 header */ 895 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 896 goto fail; 897 898 if (m->m_flags & M_PKTHDR) { 899 int plen = 0; 900 for (m = *m0; m; m = m->m_next) 901 plen += m->m_len; 902 m = *m0; 903 m->m_pkthdr.len = plen; 904 } 905 906 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED, 907 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL) 908 goto fail; 909 ftag = (struct pf_fragment_tag *)(mtag + 1); 910 ftag->ft_hdrlen = hdrlen; 911 ftag->ft_extoff = extoff; 912 ftag->ft_maxlen = maxlen; 913 ftag->ft_id = frag_id; 914 m_tag_prepend(m, mtag); 915 916 ip6 = mtod(m, struct ip6_hdr *); 917 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 918 if (extoff) { 919 /* Write protocol into next field of last extension header. */ 920 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 921 &off); 922 KASSERT(m, ("%s: short mbuf chain", __func__)); 923 *(mtod(m, char *) + off) = proto; 924 m = *m0; 925 } else 926 ip6->ip6_nxt = proto; 927 928 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 929 DPFPRINTF(("drop: too big: %d\n", total)); 930 ip6->ip6_plen = 0; 931 REASON_SET(reason, PFRES_SHORT); 932 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 933 return (PF_DROP); 934 } 935 936 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen))); 937 return (PF_PASS); 938 939 fail: 940 REASON_SET(reason, PFRES_MEMORY); 941 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 942 return (PF_DROP); 943 } 944 #endif /* INET6 */ 945 946 #ifdef INET6 947 int 948 pf_max_frag_size(struct mbuf *m) 949 { 950 struct m_tag *tag; 951 struct pf_fragment_tag *ftag; 952 953 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL); 954 if (tag == NULL) 955 return (m->m_pkthdr.len); 956 957 ftag = (struct pf_fragment_tag *)(tag + 1); 958 959 return (ftag->ft_maxlen); 960 } 961 962 int 963 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag, 964 struct ifnet *rt, bool forward) 965 { 966 struct mbuf *m = *m0, *t; 967 struct ip6_hdr *hdr; 968 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 969 struct pf_pdesc pd; 970 uint32_t frag_id; 971 uint16_t hdrlen, extoff, maxlen; 972 uint8_t proto; 973 int error, action; 974 975 hdrlen = ftag->ft_hdrlen; 976 extoff = ftag->ft_extoff; 977 maxlen = ftag->ft_maxlen; 978 frag_id = ftag->ft_id; 979 m_tag_delete(m, mtag); 980 mtag = NULL; 981 ftag = NULL; 982 983 if (extoff) { 984 int off; 985 986 /* Use protocol from next field of last extension header */ 987 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 988 &off); 989 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 990 proto = *(mtod(m, uint8_t *) + off); 991 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 992 m = *m0; 993 } else { 994 hdr = mtod(m, struct ip6_hdr *); 995 proto = hdr->ip6_nxt; 996 hdr->ip6_nxt = IPPROTO_FRAGMENT; 997 } 998 999 /* In case of link-local traffic we'll need a scope set. */ 1000 hdr = mtod(m, struct ip6_hdr *); 1001 1002 in6_setscope(&hdr->ip6_src, ifp, NULL); 1003 in6_setscope(&hdr->ip6_dst, ifp, NULL); 1004 1005 /* The MTU must be a multiple of 8 bytes, or we risk doing the 1006 * fragmentation wrong. */ 1007 maxlen = maxlen & ~7; 1008 1009 /* 1010 * Maxlen may be less than 8 if there was only a single 1011 * fragment. As it was fragmented before, add a fragment 1012 * header also for a single fragment. If total or maxlen 1013 * is less than 8, ip6_fragment() will return EMSGSIZE and 1014 * we drop the packet. 1015 */ 1016 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 1017 m = (*m0)->m_nextpkt; 1018 (*m0)->m_nextpkt = NULL; 1019 if (error == 0) { 1020 /* The first mbuf contains the unfragmented packet. */ 1021 m_freem(*m0); 1022 *m0 = NULL; 1023 action = PF_PASS; 1024 } else { 1025 /* Drop expects an mbuf to free. */ 1026 DPFPRINTF(("refragment error %d\n", error)); 1027 action = PF_DROP; 1028 } 1029 for (; m; m = t) { 1030 t = m->m_nextpkt; 1031 m->m_nextpkt = NULL; 1032 m->m_flags |= M_SKIP_FIREWALL; 1033 memset(&pd, 0, sizeof(pd)); 1034 pd.pf_mtag = pf_find_mtag(m); 1035 if (error != 0) { 1036 m_freem(m); 1037 continue; 1038 } 1039 if (rt != NULL) { 1040 struct sockaddr_in6 dst; 1041 hdr = mtod(m, struct ip6_hdr *); 1042 1043 bzero(&dst, sizeof(dst)); 1044 dst.sin6_family = AF_INET6; 1045 dst.sin6_len = sizeof(dst); 1046 dst.sin6_addr = hdr->ip6_dst; 1047 1048 nd6_output_ifp(rt, rt, m, &dst, NULL); 1049 } else if (forward) { 1050 MPASS(m->m_pkthdr.rcvif != NULL); 1051 ip6_forward(m, 0); 1052 } else { 1053 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL, 1054 NULL); 1055 } 1056 } 1057 1058 return (action); 1059 } 1060 #endif /* INET6 */ 1061 1062 #ifdef INET 1063 int 1064 pf_normalize_ip(struct mbuf **m0, u_short *reason, 1065 struct pf_pdesc *pd) 1066 { 1067 struct pf_krule *r; 1068 struct ip *h = mtod(*m0, struct ip *); 1069 int mff = (ntohs(h->ip_off) & IP_MF); 1070 int hlen = h->ip_hl << 2; 1071 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 1072 u_int16_t max; 1073 int ip_len; 1074 int tag = -1; 1075 int verdict; 1076 bool scrub_compat; 1077 1078 PF_RULES_RASSERT(); 1079 1080 MPASS(pd->m == *m0); 1081 1082 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1083 /* 1084 * Check if there are any scrub rules, matching or not. 1085 * Lack of scrub rules means: 1086 * - enforced packet normalization operation just like in OpenBSD 1087 * - fragment reassembly depends on V_pf_status.reass 1088 * With scrub rules: 1089 * - packet normalization is performed if there is a matching scrub rule 1090 * - fragment reassembly is performed if the matching rule has no 1091 * PFRULE_FRAGMENT_NOREASS flag 1092 */ 1093 scrub_compat = (r != NULL); 1094 while (r != NULL) { 1095 pf_counter_u64_add(&r->evaluations, 1); 1096 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1097 r = r->skip[PF_SKIP_IFP]; 1098 else if (r->direction && r->direction != pd->dir) 1099 r = r->skip[PF_SKIP_DIR]; 1100 else if (r->af && r->af != AF_INET) 1101 r = r->skip[PF_SKIP_AF]; 1102 else if (r->proto && r->proto != h->ip_p) 1103 r = r->skip[PF_SKIP_PROTO]; 1104 else if (PF_MISMATCHAW(&r->src.addr, 1105 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 1106 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1107 r = r->skip[PF_SKIP_SRC_ADDR]; 1108 else if (PF_MISMATCHAW(&r->dst.addr, 1109 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 1110 r->dst.neg, NULL, M_GETFIB(pd->m))) 1111 r = r->skip[PF_SKIP_DST_ADDR]; 1112 else if (r->match_tag && !pf_match_tag(pd->m, r, &tag, 1113 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 1114 r = TAILQ_NEXT(r, entries); 1115 else 1116 break; 1117 } 1118 1119 if (scrub_compat) { 1120 /* With scrub rules present IPv4 normalization happens only 1121 * if one of rules has matched and it's not a "no scrub" rule */ 1122 if (r == NULL || r->action == PF_NOSCRUB) 1123 return (PF_PASS); 1124 1125 pf_counter_u64_critical_enter(); 1126 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1127 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1128 pf_counter_u64_critical_exit(); 1129 pf_rule_to_actions(r, &pd->act); 1130 } 1131 1132 /* Check for illegal packets */ 1133 if (hlen < (int)sizeof(struct ip)) { 1134 REASON_SET(reason, PFRES_NORM); 1135 goto drop; 1136 } 1137 1138 if (hlen > ntohs(h->ip_len)) { 1139 REASON_SET(reason, PFRES_NORM); 1140 goto drop; 1141 } 1142 1143 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */ 1144 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) || 1145 (r != NULL && r->rule_flag & PFRULE_NODF)) && 1146 (h->ip_off & htons(IP_DF)) 1147 ) { 1148 u_int16_t ip_off = h->ip_off; 1149 1150 h->ip_off &= htons(~IP_DF); 1151 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1152 } 1153 1154 /* We will need other tests here */ 1155 if (!fragoff && !mff) 1156 goto no_fragment; 1157 1158 /* We're dealing with a fragment now. Don't allow fragments 1159 * with IP_DF to enter the cache. If the flag was cleared by 1160 * no-df above, fine. Otherwise drop it. 1161 */ 1162 if (h->ip_off & htons(IP_DF)) { 1163 DPFPRINTF(("IP_DF\n")); 1164 goto bad; 1165 } 1166 1167 ip_len = ntohs(h->ip_len) - hlen; 1168 1169 /* All fragments are 8 byte aligned */ 1170 if (mff && (ip_len & 0x7)) { 1171 DPFPRINTF(("mff and %d\n", ip_len)); 1172 goto bad; 1173 } 1174 1175 /* Respect maximum length */ 1176 if (fragoff + ip_len > IP_MAXPACKET) { 1177 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 1178 goto bad; 1179 } 1180 1181 if ((!scrub_compat && V_pf_status.reass) || 1182 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS)) 1183 ) { 1184 max = fragoff + ip_len; 1185 1186 /* Fully buffer all of the fragments 1187 * Might return a completely reassembled mbuf, or NULL */ 1188 PF_FRAG_LOCK(); 1189 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 1190 verdict = pf_reassemble(m0, pd->dir, reason); 1191 PF_FRAG_UNLOCK(); 1192 1193 if (verdict != PF_PASS) 1194 return (PF_DROP); 1195 1196 pd->m = *m0; 1197 if (pd->m == NULL) 1198 return (PF_DROP); 1199 1200 h = mtod(pd->m, struct ip *); 1201 1202 no_fragment: 1203 /* At this point, only IP_DF is allowed in ip_off */ 1204 if (h->ip_off & ~htons(IP_DF)) { 1205 u_int16_t ip_off = h->ip_off; 1206 1207 h->ip_off &= htons(IP_DF); 1208 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1209 } 1210 } 1211 1212 return (PF_PASS); 1213 1214 bad: 1215 DPFPRINTF(("dropping bad fragment\n")); 1216 REASON_SET(reason, PFRES_FRAG); 1217 drop: 1218 if (r != NULL && r->log) 1219 PFLOG_PACKET(PF_DROP, *reason, r, NULL, NULL, pd, 1); 1220 1221 return (PF_DROP); 1222 } 1223 #endif 1224 1225 #ifdef INET6 1226 int 1227 pf_normalize_ip6(struct mbuf **m0, int off, u_short *reason, 1228 struct pf_pdesc *pd) 1229 { 1230 struct pf_krule *r; 1231 struct ip6_frag frag; 1232 bool scrub_compat; 1233 1234 PF_RULES_RASSERT(); 1235 1236 pd->m = *m0; 1237 1238 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1239 /* 1240 * Check if there are any scrub rules, matching or not. 1241 * Lack of scrub rules means: 1242 * - enforced packet normalization operation just like in OpenBSD 1243 * With scrub rules: 1244 * - packet normalization is performed if there is a matching scrub rule 1245 * XXX: Fragment reassembly always performed for IPv6! 1246 */ 1247 scrub_compat = (r != NULL); 1248 while (r != NULL) { 1249 pf_counter_u64_add(&r->evaluations, 1); 1250 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1251 r = r->skip[PF_SKIP_IFP]; 1252 else if (r->direction && r->direction != pd->dir) 1253 r = r->skip[PF_SKIP_DIR]; 1254 else if (r->af && r->af != AF_INET6) 1255 r = r->skip[PF_SKIP_AF]; 1256 else if (r->proto && r->proto != pd->proto) 1257 r = r->skip[PF_SKIP_PROTO]; 1258 else if (PF_MISMATCHAW(&r->src.addr, 1259 (struct pf_addr *)&pd->src, AF_INET6, 1260 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1261 r = r->skip[PF_SKIP_SRC_ADDR]; 1262 else if (PF_MISMATCHAW(&r->dst.addr, 1263 (struct pf_addr *)&pd->dst, AF_INET6, 1264 r->dst.neg, NULL, M_GETFIB(pd->m))) 1265 r = r->skip[PF_SKIP_DST_ADDR]; 1266 else 1267 break; 1268 } 1269 1270 if (scrub_compat) { 1271 /* With scrub rules present IPv6 normalization happens only 1272 * if one of rules has matched and it's not a "no scrub" rule */ 1273 if (r == NULL || r->action == PF_NOSCRUB) 1274 return (PF_PASS); 1275 1276 pf_counter_u64_critical_enter(); 1277 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1278 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1279 pf_counter_u64_critical_exit(); 1280 pf_rule_to_actions(r, &pd->act); 1281 } 1282 1283 if (!pf_pull_hdr(pd->m, off, &frag, sizeof(frag), NULL, reason, AF_INET6)) 1284 return (PF_DROP); 1285 1286 /* Offset now points to data portion. */ 1287 off += sizeof(frag); 1288 1289 if (pd->virtual_proto == PF_VPROTO_FRAGMENT) { 1290 /* Returns PF_DROP or *m0 is NULL or completely reassembled 1291 * mbuf. */ 1292 if (pf_reassemble6(m0, &frag, off, pd->extoff, reason) != PF_PASS) 1293 return (PF_DROP); 1294 pd->m = *m0; 1295 if (pd->m == NULL) 1296 return (PF_DROP); 1297 } 1298 1299 return (PF_PASS); 1300 } 1301 #endif /* INET6 */ 1302 1303 int 1304 pf_normalize_tcp(struct pf_pdesc *pd) 1305 { 1306 struct pf_krule *r, *rm = NULL; 1307 struct tcphdr *th = &pd->hdr.tcp; 1308 int rewrite = 0; 1309 u_short reason; 1310 u_int16_t flags; 1311 sa_family_t af = pd->af; 1312 int srs; 1313 1314 PF_RULES_RASSERT(); 1315 1316 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1317 /* Check if there any scrub rules. Lack of scrub rules means enforced 1318 * packet normalization operation just like in OpenBSD. */ 1319 srs = (r != NULL); 1320 while (r != NULL) { 1321 pf_counter_u64_add(&r->evaluations, 1); 1322 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 1323 r = r->skip[PF_SKIP_IFP]; 1324 else if (r->direction && r->direction != pd->dir) 1325 r = r->skip[PF_SKIP_DIR]; 1326 else if (r->af && r->af != af) 1327 r = r->skip[PF_SKIP_AF]; 1328 else if (r->proto && r->proto != pd->proto) 1329 r = r->skip[PF_SKIP_PROTO]; 1330 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1331 r->src.neg, pd->kif, M_GETFIB(pd->m))) 1332 r = r->skip[PF_SKIP_SRC_ADDR]; 1333 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1334 r->src.port[0], r->src.port[1], th->th_sport)) 1335 r = r->skip[PF_SKIP_SRC_PORT]; 1336 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1337 r->dst.neg, NULL, M_GETFIB(pd->m))) 1338 r = r->skip[PF_SKIP_DST_ADDR]; 1339 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1340 r->dst.port[0], r->dst.port[1], th->th_dport)) 1341 r = r->skip[PF_SKIP_DST_PORT]; 1342 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1343 pf_osfp_fingerprint(pd, th), 1344 r->os_fingerprint)) 1345 r = TAILQ_NEXT(r, entries); 1346 else { 1347 rm = r; 1348 break; 1349 } 1350 } 1351 1352 if (srs) { 1353 /* With scrub rules present TCP normalization happens only 1354 * if one of rules has matched and it's not a "no scrub" rule */ 1355 if (rm == NULL || rm->action == PF_NOSCRUB) 1356 return (PF_PASS); 1357 1358 pf_counter_u64_critical_enter(); 1359 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 1360 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 1361 pf_counter_u64_critical_exit(); 1362 pf_rule_to_actions(rm, &pd->act); 1363 } 1364 1365 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1366 pd->flags |= PFDESC_TCP_NORM; 1367 1368 flags = tcp_get_flags(th); 1369 if (flags & TH_SYN) { 1370 /* Illegal packet */ 1371 if (flags & TH_RST) 1372 goto tcp_drop; 1373 1374 if (flags & TH_FIN) 1375 goto tcp_drop; 1376 } else { 1377 /* Illegal packet */ 1378 if (!(flags & (TH_ACK|TH_RST))) 1379 goto tcp_drop; 1380 } 1381 1382 if (!(flags & TH_ACK)) { 1383 /* These flags are only valid if ACK is set */ 1384 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1385 goto tcp_drop; 1386 } 1387 1388 /* Check for illegal header length */ 1389 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1390 goto tcp_drop; 1391 1392 /* If flags changed, or reserved data set, then adjust */ 1393 if (flags != tcp_get_flags(th) || 1394 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) { 1395 u_int16_t ov, nv; 1396 1397 ov = *(u_int16_t *)(&th->th_ack + 1); 1398 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3); 1399 tcp_set_flags(th, flags); 1400 nv = *(u_int16_t *)(&th->th_ack + 1); 1401 1402 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, ov, nv, 0); 1403 rewrite = 1; 1404 } 1405 1406 /* Remove urgent pointer, if TH_URG is not set */ 1407 if (!(flags & TH_URG) && th->th_urp) { 1408 th->th_sum = pf_proto_cksum_fixup(pd->m, th->th_sum, th->th_urp, 1409 0, 0); 1410 th->th_urp = 0; 1411 rewrite = 1; 1412 } 1413 1414 /* copy back packet headers if we sanitized */ 1415 if (rewrite) 1416 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); 1417 1418 return (PF_PASS); 1419 1420 tcp_drop: 1421 REASON_SET(&reason, PFRES_NORM); 1422 if (rm != NULL && r->log) 1423 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 1); 1424 return (PF_DROP); 1425 } 1426 1427 int 1428 pf_normalize_tcp_init(struct pf_pdesc *pd, struct tcphdr *th, 1429 struct pf_state_peer *src, struct pf_state_peer *dst) 1430 { 1431 u_int32_t tsval, tsecr; 1432 u_int8_t hdr[60]; 1433 u_int8_t *opt; 1434 1435 KASSERT((src->scrub == NULL), 1436 ("pf_normalize_tcp_init: src->scrub != NULL")); 1437 1438 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1439 if (src->scrub == NULL) 1440 return (1); 1441 1442 switch (pd->af) { 1443 #ifdef INET 1444 case AF_INET: { 1445 struct ip *h = mtod(pd->m, struct ip *); 1446 src->scrub->pfss_ttl = h->ip_ttl; 1447 break; 1448 } 1449 #endif /* INET */ 1450 #ifdef INET6 1451 case AF_INET6: { 1452 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); 1453 src->scrub->pfss_ttl = h->ip6_hlim; 1454 break; 1455 } 1456 #endif /* INET6 */ 1457 } 1458 1459 /* 1460 * All normalizations below are only begun if we see the start of 1461 * the connections. They must all set an enabled bit in pfss_flags 1462 */ 1463 if ((tcp_get_flags(th) & TH_SYN) == 0) 1464 return (0); 1465 1466 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1467 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1468 /* Diddle with TCP options */ 1469 int hlen; 1470 opt = hdr + sizeof(struct tcphdr); 1471 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1472 while (hlen >= TCPOLEN_TIMESTAMP) { 1473 switch (*opt) { 1474 case TCPOPT_EOL: /* FALLTHROUGH */ 1475 case TCPOPT_NOP: 1476 opt++; 1477 hlen--; 1478 break; 1479 case TCPOPT_TIMESTAMP: 1480 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1481 src->scrub->pfss_flags |= 1482 PFSS_TIMESTAMP; 1483 src->scrub->pfss_ts_mod = 1484 htonl(arc4random()); 1485 1486 /* note PFSS_PAWS not set yet */ 1487 memcpy(&tsval, &opt[2], 1488 sizeof(u_int32_t)); 1489 memcpy(&tsecr, &opt[6], 1490 sizeof(u_int32_t)); 1491 src->scrub->pfss_tsval0 = ntohl(tsval); 1492 src->scrub->pfss_tsval = ntohl(tsval); 1493 src->scrub->pfss_tsecr = ntohl(tsecr); 1494 getmicrouptime(&src->scrub->pfss_last); 1495 } 1496 /* FALLTHROUGH */ 1497 default: 1498 hlen -= MAX(opt[1], 2); 1499 opt += MAX(opt[1], 2); 1500 break; 1501 } 1502 } 1503 } 1504 1505 return (0); 1506 } 1507 1508 void 1509 pf_normalize_tcp_cleanup(struct pf_kstate *state) 1510 { 1511 /* XXX Note: this also cleans up SCTP. */ 1512 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1513 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1514 1515 /* Someday... flush the TCP segment reassembly descriptors. */ 1516 } 1517 int 1518 pf_normalize_sctp_init(struct pf_pdesc *pd, struct pf_state_peer *src, 1519 struct pf_state_peer *dst) 1520 { 1521 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1522 if (src->scrub == NULL) 1523 return (1); 1524 1525 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1526 if (dst->scrub == NULL) { 1527 uma_zfree(V_pf_state_scrub_z, src); 1528 return (1); 1529 } 1530 1531 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag; 1532 1533 return (0); 1534 } 1535 1536 int 1537 pf_normalize_tcp_stateful(struct pf_pdesc *pd, 1538 u_short *reason, struct tcphdr *th, struct pf_kstate *state, 1539 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1540 { 1541 struct timeval uptime; 1542 u_int32_t tsval, tsecr; 1543 u_int tsval_from_last; 1544 u_int8_t hdr[60]; 1545 u_int8_t *opt; 1546 int copyback = 0; 1547 int got_ts = 0; 1548 size_t startoff; 1549 1550 KASSERT((src->scrub || dst->scrub), 1551 ("%s: src->scrub && dst->scrub!", __func__)); 1552 1553 /* 1554 * Enforce the minimum TTL seen for this connection. Negate a common 1555 * technique to evade an intrusion detection system and confuse 1556 * firewall state code. 1557 */ 1558 switch (pd->af) { 1559 #ifdef INET 1560 case AF_INET: { 1561 if (src->scrub) { 1562 struct ip *h = mtod(pd->m, struct ip *); 1563 if (h->ip_ttl > src->scrub->pfss_ttl) 1564 src->scrub->pfss_ttl = h->ip_ttl; 1565 h->ip_ttl = src->scrub->pfss_ttl; 1566 } 1567 break; 1568 } 1569 #endif /* INET */ 1570 #ifdef INET6 1571 case AF_INET6: { 1572 if (src->scrub) { 1573 struct ip6_hdr *h = mtod(pd->m, struct ip6_hdr *); 1574 if (h->ip6_hlim > src->scrub->pfss_ttl) 1575 src->scrub->pfss_ttl = h->ip6_hlim; 1576 h->ip6_hlim = src->scrub->pfss_ttl; 1577 } 1578 break; 1579 } 1580 #endif /* INET6 */ 1581 } 1582 1583 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1584 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1585 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1586 pf_pull_hdr(pd->m, pd->off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1587 /* Diddle with TCP options */ 1588 int hlen; 1589 opt = hdr + sizeof(struct tcphdr); 1590 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1591 while (hlen >= TCPOLEN_TIMESTAMP) { 1592 startoff = opt - (hdr + sizeof(struct tcphdr)); 1593 switch (*opt) { 1594 case TCPOPT_EOL: /* FALLTHROUGH */ 1595 case TCPOPT_NOP: 1596 opt++; 1597 hlen--; 1598 break; 1599 case TCPOPT_TIMESTAMP: 1600 /* Modulate the timestamps. Can be used for 1601 * NAT detection, OS uptime determination or 1602 * reboot detection. 1603 */ 1604 1605 if (got_ts) { 1606 /* Huh? Multiple timestamps!? */ 1607 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1608 DPFPRINTF(("multiple TS??\n")); 1609 pf_print_state(state); 1610 printf("\n"); 1611 } 1612 REASON_SET(reason, PFRES_TS); 1613 return (PF_DROP); 1614 } 1615 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1616 memcpy(&tsval, &opt[2], 1617 sizeof(u_int32_t)); 1618 if (tsval && src->scrub && 1619 (src->scrub->pfss_flags & 1620 PFSS_TIMESTAMP)) { 1621 tsval = ntohl(tsval); 1622 pf_patch_32_unaligned(pd->m, 1623 &th->th_sum, 1624 &opt[2], 1625 htonl(tsval + 1626 src->scrub->pfss_ts_mod), 1627 PF_ALGNMNT(startoff), 1628 0); 1629 copyback = 1; 1630 } 1631 1632 /* Modulate TS reply iff valid (!0) */ 1633 memcpy(&tsecr, &opt[6], 1634 sizeof(u_int32_t)); 1635 if (tsecr && dst->scrub && 1636 (dst->scrub->pfss_flags & 1637 PFSS_TIMESTAMP)) { 1638 tsecr = ntohl(tsecr) 1639 - dst->scrub->pfss_ts_mod; 1640 pf_patch_32_unaligned(pd->m, 1641 &th->th_sum, 1642 &opt[6], 1643 htonl(tsecr), 1644 PF_ALGNMNT(startoff), 1645 0); 1646 copyback = 1; 1647 } 1648 got_ts = 1; 1649 } 1650 /* FALLTHROUGH */ 1651 default: 1652 hlen -= MAX(opt[1], 2); 1653 opt += MAX(opt[1], 2); 1654 break; 1655 } 1656 } 1657 if (copyback) { 1658 /* Copyback the options, caller copys back header */ 1659 *writeback = 1; 1660 m_copyback(pd->m, pd->off + sizeof(struct tcphdr), 1661 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1662 sizeof(struct tcphdr)); 1663 } 1664 } 1665 1666 /* 1667 * Must invalidate PAWS checks on connections idle for too long. 1668 * The fastest allowed timestamp clock is 1ms. That turns out to 1669 * be about 24 days before it wraps. XXX Right now our lowerbound 1670 * TS echo check only works for the first 12 days of a connection 1671 * when the TS has exhausted half its 32bit space 1672 */ 1673 #define TS_MAX_IDLE (24*24*60*60) 1674 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1675 1676 getmicrouptime(&uptime); 1677 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1678 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1679 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) { 1680 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1681 DPFPRINTF(("src idled out of PAWS\n")); 1682 pf_print_state(state); 1683 printf("\n"); 1684 } 1685 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1686 | PFSS_PAWS_IDLED; 1687 } 1688 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1689 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1690 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1691 DPFPRINTF(("dst idled out of PAWS\n")); 1692 pf_print_state(state); 1693 printf("\n"); 1694 } 1695 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1696 | PFSS_PAWS_IDLED; 1697 } 1698 1699 if (got_ts && src->scrub && dst->scrub && 1700 (src->scrub->pfss_flags & PFSS_PAWS) && 1701 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1702 /* Validate that the timestamps are "in-window". 1703 * RFC1323 describes TCP Timestamp options that allow 1704 * measurement of RTT (round trip time) and PAWS 1705 * (protection against wrapped sequence numbers). PAWS 1706 * gives us a set of rules for rejecting packets on 1707 * long fat pipes (packets that were somehow delayed 1708 * in transit longer than the time it took to send the 1709 * full TCP sequence space of 4Gb). We can use these 1710 * rules and infer a few others that will let us treat 1711 * the 32bit timestamp and the 32bit echoed timestamp 1712 * as sequence numbers to prevent a blind attacker from 1713 * inserting packets into a connection. 1714 * 1715 * RFC1323 tells us: 1716 * - The timestamp on this packet must be greater than 1717 * or equal to the last value echoed by the other 1718 * endpoint. The RFC says those will be discarded 1719 * since it is a dup that has already been acked. 1720 * This gives us a lowerbound on the timestamp. 1721 * timestamp >= other last echoed timestamp 1722 * - The timestamp will be less than or equal to 1723 * the last timestamp plus the time between the 1724 * last packet and now. The RFC defines the max 1725 * clock rate as 1ms. We will allow clocks to be 1726 * up to 10% fast and will allow a total difference 1727 * or 30 seconds due to a route change. And this 1728 * gives us an upperbound on the timestamp. 1729 * timestamp <= last timestamp + max ticks 1730 * We have to be careful here. Windows will send an 1731 * initial timestamp of zero and then initialize it 1732 * to a random value after the 3whs; presumably to 1733 * avoid a DoS by having to call an expensive RNG 1734 * during a SYN flood. Proof MS has at least one 1735 * good security geek. 1736 * 1737 * - The TCP timestamp option must also echo the other 1738 * endpoints timestamp. The timestamp echoed is the 1739 * one carried on the earliest unacknowledged segment 1740 * on the left edge of the sequence window. The RFC 1741 * states that the host will reject any echoed 1742 * timestamps that were larger than any ever sent. 1743 * This gives us an upperbound on the TS echo. 1744 * tescr <= largest_tsval 1745 * - The lowerbound on the TS echo is a little more 1746 * tricky to determine. The other endpoint's echoed 1747 * values will not decrease. But there may be 1748 * network conditions that re-order packets and 1749 * cause our view of them to decrease. For now the 1750 * only lowerbound we can safely determine is that 1751 * the TS echo will never be less than the original 1752 * TS. XXX There is probably a better lowerbound. 1753 * Remove TS_MAX_CONN with better lowerbound check. 1754 * tescr >= other original TS 1755 * 1756 * It is also important to note that the fastest 1757 * timestamp clock of 1ms will wrap its 32bit space in 1758 * 24 days. So we just disable TS checking after 24 1759 * days of idle time. We actually must use a 12d 1760 * connection limit until we can come up with a better 1761 * lowerbound to the TS echo check. 1762 */ 1763 struct timeval delta_ts; 1764 int ts_fudge; 1765 1766 /* 1767 * PFTM_TS_DIFF is how many seconds of leeway to allow 1768 * a host's timestamp. This can happen if the previous 1769 * packet got delayed in transit for much longer than 1770 * this packet. 1771 */ 1772 if ((ts_fudge = state->rule->timeout[PFTM_TS_DIFF]) == 0) 1773 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1774 1775 /* Calculate max ticks since the last timestamp */ 1776 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1777 #define TS_MICROSECS 1000000 /* microseconds per second */ 1778 delta_ts = uptime; 1779 timevalsub(&delta_ts, &src->scrub->pfss_last); 1780 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1781 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1782 1783 if ((src->state >= TCPS_ESTABLISHED && 1784 dst->state >= TCPS_ESTABLISHED) && 1785 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1786 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1787 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1788 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1789 /* Bad RFC1323 implementation or an insertion attack. 1790 * 1791 * - Solaris 2.6 and 2.7 are known to send another ACK 1792 * after the FIN,FIN|ACK,ACK closing that carries 1793 * an old timestamp. 1794 */ 1795 1796 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1797 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1798 SEQ_GT(tsval, src->scrub->pfss_tsval + 1799 tsval_from_last) ? '1' : ' ', 1800 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1801 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1802 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1803 "idle: %jus %lums\n", 1804 tsval, tsecr, tsval_from_last, 1805 (uintmax_t)delta_ts.tv_sec, 1806 delta_ts.tv_usec / 1000)); 1807 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1808 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1809 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1810 "\n", dst->scrub->pfss_tsval, 1811 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1812 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1813 pf_print_state(state); 1814 pf_print_flags(tcp_get_flags(th)); 1815 printf("\n"); 1816 } 1817 REASON_SET(reason, PFRES_TS); 1818 return (PF_DROP); 1819 } 1820 1821 /* XXX I'd really like to require tsecr but it's optional */ 1822 1823 } else if (!got_ts && (tcp_get_flags(th) & TH_RST) == 0 && 1824 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1825 || pd->p_len > 0 || (tcp_get_flags(th) & TH_SYN)) && 1826 src->scrub && dst->scrub && 1827 (src->scrub->pfss_flags & PFSS_PAWS) && 1828 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1829 /* Didn't send a timestamp. Timestamps aren't really useful 1830 * when: 1831 * - connection opening or closing (often not even sent). 1832 * but we must not let an attacker to put a FIN on a 1833 * data packet to sneak it through our ESTABLISHED check. 1834 * - on a TCP reset. RFC suggests not even looking at TS. 1835 * - on an empty ACK. The TS will not be echoed so it will 1836 * probably not help keep the RTT calculation in sync and 1837 * there isn't as much danger when the sequence numbers 1838 * got wrapped. So some stacks don't include TS on empty 1839 * ACKs :-( 1840 * 1841 * To minimize the disruption to mostly RFC1323 conformant 1842 * stacks, we will only require timestamps on data packets. 1843 * 1844 * And what do ya know, we cannot require timestamps on data 1845 * packets. There appear to be devices that do legitimate 1846 * TCP connection hijacking. There are HTTP devices that allow 1847 * a 3whs (with timestamps) and then buffer the HTTP request. 1848 * If the intermediate device has the HTTP response cache, it 1849 * will spoof the response but not bother timestamping its 1850 * packets. So we can look for the presence of a timestamp in 1851 * the first data packet and if there, require it in all future 1852 * packets. 1853 */ 1854 1855 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1856 /* 1857 * Hey! Someone tried to sneak a packet in. Or the 1858 * stack changed its RFC1323 behavior?!?! 1859 */ 1860 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1861 DPFPRINTF(("Did not receive expected RFC1323 " 1862 "timestamp\n")); 1863 pf_print_state(state); 1864 pf_print_flags(tcp_get_flags(th)); 1865 printf("\n"); 1866 } 1867 REASON_SET(reason, PFRES_TS); 1868 return (PF_DROP); 1869 } 1870 } 1871 1872 /* 1873 * We will note if a host sends his data packets with or without 1874 * timestamps. And require all data packets to contain a timestamp 1875 * if the first does. PAWS implicitly requires that all data packets be 1876 * timestamped. But I think there are middle-man devices that hijack 1877 * TCP streams immediately after the 3whs and don't timestamp their 1878 * packets (seen in a WWW accelerator or cache). 1879 */ 1880 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1881 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1882 if (got_ts) 1883 src->scrub->pfss_flags |= PFSS_DATA_TS; 1884 else { 1885 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1886 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1887 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1888 /* Don't warn if other host rejected RFC1323 */ 1889 DPFPRINTF(("Broken RFC1323 stack did not " 1890 "timestamp data packet. Disabled PAWS " 1891 "security.\n")); 1892 pf_print_state(state); 1893 pf_print_flags(tcp_get_flags(th)); 1894 printf("\n"); 1895 } 1896 } 1897 } 1898 1899 /* 1900 * Update PAWS values 1901 */ 1902 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1903 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1904 getmicrouptime(&src->scrub->pfss_last); 1905 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1906 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1907 src->scrub->pfss_tsval = tsval; 1908 1909 if (tsecr) { 1910 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1911 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1912 src->scrub->pfss_tsecr = tsecr; 1913 1914 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1915 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1916 src->scrub->pfss_tsval0 == 0)) { 1917 /* tsval0 MUST be the lowest timestamp */ 1918 src->scrub->pfss_tsval0 = tsval; 1919 } 1920 1921 /* Only fully initialized after a TS gets echoed */ 1922 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1923 src->scrub->pfss_flags |= PFSS_PAWS; 1924 } 1925 } 1926 1927 /* I have a dream.... TCP segment reassembly.... */ 1928 return (0); 1929 } 1930 1931 int 1932 pf_normalize_mss(struct pf_pdesc *pd) 1933 { 1934 struct tcphdr *th = &pd->hdr.tcp; 1935 u_int16_t *mss; 1936 int thoff; 1937 int opt, cnt, optlen = 0; 1938 u_char opts[TCP_MAXOLEN]; 1939 u_char *optp = opts; 1940 size_t startoff; 1941 1942 thoff = th->th_off << 2; 1943 cnt = thoff - sizeof(struct tcphdr); 1944 1945 if (cnt > 0 && !pf_pull_hdr(pd->m, pd->off + sizeof(*th), opts, cnt, 1946 NULL, NULL, pd->af)) 1947 return (0); 1948 1949 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1950 startoff = optp - opts; 1951 opt = optp[0]; 1952 if (opt == TCPOPT_EOL) 1953 break; 1954 if (opt == TCPOPT_NOP) 1955 optlen = 1; 1956 else { 1957 if (cnt < 2) 1958 break; 1959 optlen = optp[1]; 1960 if (optlen < 2 || optlen > cnt) 1961 break; 1962 } 1963 switch (opt) { 1964 case TCPOPT_MAXSEG: 1965 mss = (u_int16_t *)(optp + 2); 1966 if ((ntohs(*mss)) > pd->act.max_mss) { 1967 pf_patch_16_unaligned(pd->m, 1968 &th->th_sum, 1969 mss, htons(pd->act.max_mss), 1970 PF_ALGNMNT(startoff), 1971 0); 1972 m_copyback(pd->m, pd->off + sizeof(*th), 1973 thoff - sizeof(*th), opts); 1974 m_copyback(pd->m, pd->off, sizeof(*th), (caddr_t)th); 1975 } 1976 break; 1977 default: 1978 break; 1979 } 1980 } 1981 1982 return (0); 1983 } 1984 1985 int 1986 pf_scan_sctp(struct pf_pdesc *pd) 1987 { 1988 struct sctp_chunkhdr ch = { }; 1989 int chunk_off = sizeof(struct sctphdr); 1990 int chunk_start; 1991 int ret; 1992 1993 while (pd->off + chunk_off < pd->tot_len) { 1994 if (!pf_pull_hdr(pd->m, pd->off + chunk_off, &ch, sizeof(ch), NULL, 1995 NULL, pd->af)) 1996 return (PF_DROP); 1997 1998 /* Length includes the header, this must be at least 4. */ 1999 if (ntohs(ch.chunk_length) < 4) 2000 return (PF_DROP); 2001 2002 chunk_start = chunk_off; 2003 chunk_off += roundup(ntohs(ch.chunk_length), 4); 2004 2005 switch (ch.chunk_type) { 2006 case SCTP_INITIATION: 2007 case SCTP_INITIATION_ACK: { 2008 struct sctp_init_chunk init; 2009 2010 if (!pf_pull_hdr(pd->m, pd->off + chunk_start, &init, 2011 sizeof(init), NULL, NULL, pd->af)) 2012 return (PF_DROP); 2013 2014 /* 2015 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have 2016 * any value except 0." 2017 */ 2018 if (init.init.initiate_tag == 0) 2019 return (PF_DROP); 2020 if (init.init.num_inbound_streams == 0) 2021 return (PF_DROP); 2022 if (init.init.num_outbound_streams == 0) 2023 return (PF_DROP); 2024 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND) 2025 return (PF_DROP); 2026 2027 /* 2028 * RFC 9260, Section 3.1, INIT chunks MUST have zero 2029 * verification tag. 2030 */ 2031 if (ch.chunk_type == SCTP_INITIATION && 2032 pd->hdr.sctp.v_tag != 0) 2033 return (PF_DROP); 2034 2035 pd->sctp_initiate_tag = init.init.initiate_tag; 2036 2037 if (ch.chunk_type == SCTP_INITIATION) 2038 pd->sctp_flags |= PFDESC_SCTP_INIT; 2039 else 2040 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK; 2041 2042 ret = pf_multihome_scan_init(pd->off + chunk_start, 2043 ntohs(init.ch.chunk_length), pd); 2044 if (ret != PF_PASS) 2045 return (ret); 2046 2047 break; 2048 } 2049 case SCTP_ABORT_ASSOCIATION: 2050 pd->sctp_flags |= PFDESC_SCTP_ABORT; 2051 break; 2052 case SCTP_SHUTDOWN: 2053 case SCTP_SHUTDOWN_ACK: 2054 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN; 2055 break; 2056 case SCTP_SHUTDOWN_COMPLETE: 2057 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE; 2058 break; 2059 case SCTP_COOKIE_ECHO: 2060 pd->sctp_flags |= PFDESC_SCTP_COOKIE; 2061 break; 2062 case SCTP_COOKIE_ACK: 2063 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK; 2064 break; 2065 case SCTP_DATA: 2066 pd->sctp_flags |= PFDESC_SCTP_DATA; 2067 break; 2068 case SCTP_HEARTBEAT_REQUEST: 2069 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT; 2070 break; 2071 case SCTP_HEARTBEAT_ACK: 2072 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK; 2073 break; 2074 case SCTP_ASCONF: 2075 pd->sctp_flags |= PFDESC_SCTP_ASCONF; 2076 2077 ret = pf_multihome_scan_asconf(pd->off + chunk_start, 2078 ntohs(ch.chunk_length), pd); 2079 if (ret != PF_PASS) 2080 return (ret); 2081 break; 2082 default: 2083 pd->sctp_flags |= PFDESC_SCTP_OTHER; 2084 break; 2085 } 2086 } 2087 2088 /* Validate chunk lengths vs. packet length. */ 2089 if (pd->off + chunk_off != pd->tot_len) 2090 return (PF_DROP); 2091 2092 /* 2093 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only 2094 * one in a packet. 2095 */ 2096 if ((pd->sctp_flags & PFDESC_SCTP_INIT) && 2097 (pd->sctp_flags & ~PFDESC_SCTP_INIT)) 2098 return (PF_DROP); 2099 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) && 2100 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK)) 2101 return (PF_DROP); 2102 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) && 2103 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE)) 2104 return (PF_DROP); 2105 2106 return (PF_PASS); 2107 } 2108 2109 int 2110 pf_normalize_sctp(struct pf_pdesc *pd) 2111 { 2112 struct pf_krule *r, *rm = NULL; 2113 struct sctphdr *sh = &pd->hdr.sctp; 2114 u_short reason; 2115 sa_family_t af = pd->af; 2116 int srs; 2117 2118 PF_RULES_RASSERT(); 2119 2120 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 2121 /* Check if there any scrub rules. Lack of scrub rules means enforced 2122 * packet normalization operation just like in OpenBSD. */ 2123 srs = (r != NULL); 2124 while (r != NULL) { 2125 pf_counter_u64_add(&r->evaluations, 1); 2126 if (pfi_kkif_match(r->kif, pd->kif) == r->ifnot) 2127 r = r->skip[PF_SKIP_IFP]; 2128 else if (r->direction && r->direction != pd->dir) 2129 r = r->skip[PF_SKIP_DIR]; 2130 else if (r->af && r->af != af) 2131 r = r->skip[PF_SKIP_AF]; 2132 else if (r->proto && r->proto != pd->proto) 2133 r = r->skip[PF_SKIP_PROTO]; 2134 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 2135 r->src.neg, pd->kif, M_GETFIB(pd->m))) 2136 r = r->skip[PF_SKIP_SRC_ADDR]; 2137 else if (r->src.port_op && !pf_match_port(r->src.port_op, 2138 r->src.port[0], r->src.port[1], sh->src_port)) 2139 r = r->skip[PF_SKIP_SRC_PORT]; 2140 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 2141 r->dst.neg, NULL, M_GETFIB(pd->m))) 2142 r = r->skip[PF_SKIP_DST_ADDR]; 2143 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 2144 r->dst.port[0], r->dst.port[1], sh->dest_port)) 2145 r = r->skip[PF_SKIP_DST_PORT]; 2146 else { 2147 rm = r; 2148 break; 2149 } 2150 } 2151 2152 if (srs) { 2153 /* With scrub rules present SCTP normalization happens only 2154 * if one of rules has matched and it's not a "no scrub" rule */ 2155 if (rm == NULL || rm->action == PF_NOSCRUB) 2156 return (PF_PASS); 2157 2158 pf_counter_u64_critical_enter(); 2159 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1); 2160 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len); 2161 pf_counter_u64_critical_exit(); 2162 } 2163 2164 /* Verify we're a multiple of 4 bytes long */ 2165 if ((pd->tot_len - pd->off - sizeof(struct sctphdr)) % 4) 2166 goto sctp_drop; 2167 2168 /* INIT chunk needs to be the only chunk */ 2169 if (pd->sctp_flags & PFDESC_SCTP_INIT) 2170 if (pd->sctp_flags & ~PFDESC_SCTP_INIT) 2171 goto sctp_drop; 2172 2173 return (PF_PASS); 2174 2175 sctp_drop: 2176 REASON_SET(&reason, PFRES_NORM); 2177 if (rm != NULL && r->log) 2178 PFLOG_PACKET(PF_DROP, reason, r, NULL, NULL, pd, 2179 1); 2180 2181 return (PF_DROP); 2182 } 2183 2184 #if defined(INET) || defined(INET6) 2185 void 2186 pf_scrub(struct pf_pdesc *pd) 2187 { 2188 2189 struct ip *h = mtod(pd->m, struct ip *); 2190 #ifdef INET6 2191 struct ip6_hdr *h6 = mtod(pd->m, struct ip6_hdr *); 2192 #endif 2193 2194 /* Clear IP_DF if no-df was requested */ 2195 if (pd->af == AF_INET && pd->act.flags & PFSTATE_NODF && 2196 h->ip_off & htons(IP_DF)) 2197 { 2198 u_int16_t ip_off = h->ip_off; 2199 2200 h->ip_off &= htons(~IP_DF); 2201 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 2202 } 2203 2204 /* Enforce a minimum ttl, may cause endless packet loops */ 2205 if (pd->af == AF_INET && pd->act.min_ttl && 2206 h->ip_ttl < pd->act.min_ttl) { 2207 u_int16_t ip_ttl = h->ip_ttl; 2208 2209 h->ip_ttl = pd->act.min_ttl; 2210 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 2211 } 2212 #ifdef INET6 2213 /* Enforce a minimum ttl, may cause endless packet loops */ 2214 if (pd->af == AF_INET6 && pd->act.min_ttl && 2215 h6->ip6_hlim < pd->act.min_ttl) 2216 h6->ip6_hlim = pd->act.min_ttl; 2217 #endif 2218 /* Enforce tos */ 2219 if (pd->act.flags & PFSTATE_SETTOS) { 2220 switch (pd->af) { 2221 case AF_INET: { 2222 u_int16_t ov, nv; 2223 2224 ov = *(u_int16_t *)h; 2225 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK); 2226 nv = *(u_int16_t *)h; 2227 2228 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 2229 break; 2230 } 2231 #ifdef INET6 2232 case AF_INET6: 2233 h6->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK; 2234 h6->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h6)) << 20); 2235 break; 2236 #endif 2237 } 2238 } 2239 2240 /* random-id, but not for fragments */ 2241 #ifdef INET 2242 if (pd->af == AF_INET && 2243 pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 2244 uint16_t ip_id = h->ip_id; 2245 2246 ip_fillid(h); 2247 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 2248 } 2249 #endif 2250 } 2251 #endif 2252