1 /*- 2 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_pf.h" 34 35 #include <sys/param.h> 36 #include <sys/lock.h> 37 #include <sys/mbuf.h> 38 #include <sys/mutex.h> 39 #include <sys/refcount.h> 40 #include <sys/rwlock.h> 41 #include <sys/socket.h> 42 43 #include <net/if.h> 44 #include <net/vnet.h> 45 #include <net/pfvar.h> 46 #include <net/if_pflog.h> 47 48 #include <netinet/in.h> 49 #include <netinet/ip.h> 50 #include <netinet/ip_var.h> 51 #include <netinet/tcp.h> 52 #include <netinet/tcp_fsm.h> 53 #include <netinet/tcp_seq.h> 54 55 #ifdef INET6 56 #include <netinet/ip6.h> 57 #endif /* INET6 */ 58 59 struct pf_frent { 60 LIST_ENTRY(pf_frent) fr_next; 61 union { 62 struct { 63 struct ip *_fr_ip; 64 struct mbuf *_fr_m; 65 } _frag; 66 struct { 67 uint16_t _fr_off; 68 uint16_t _fr_end; 69 } _cache; 70 } _u; 71 }; 72 #define fr_ip _u._frag._fr_ip 73 #define fr_m _u._frag._fr_m 74 #define fr_off _u._cache._fr_off 75 #define fr_end _u._cache._fr_end 76 77 struct pf_fragment { 78 RB_ENTRY(pf_fragment) fr_entry; 79 TAILQ_ENTRY(pf_fragment) frag_next; 80 struct in_addr fr_src; 81 struct in_addr fr_dst; 82 u_int8_t fr_p; /* protocol of this fragment */ 83 u_int8_t fr_flags; /* status flags */ 84 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */ 85 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */ 86 #define PFFRAG_DROP 0x0004 /* Drop all fragments */ 87 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER)) 88 u_int16_t fr_id; /* fragment id for reassemble */ 89 u_int16_t fr_max; /* fragment data max */ 90 u_int32_t fr_timeout; 91 LIST_HEAD(, pf_frent) fr_queue; 92 }; 93 94 static struct mtx pf_frag_mtx; 95 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) 96 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) 97 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) 98 99 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 100 101 static VNET_DEFINE(uma_zone_t, pf_frent_z); 102 #define V_pf_frent_z VNET(pf_frent_z) 103 static VNET_DEFINE(uma_zone_t, pf_frag_z); 104 #define V_pf_frag_z VNET(pf_frag_z) 105 106 TAILQ_HEAD(pf_fragqueue, pf_fragment); 107 TAILQ_HEAD(pf_cachequeue, pf_fragment); 108 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue); 109 #define V_pf_fragqueue VNET(pf_fragqueue) 110 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue); 111 #define V_pf_cachequeue VNET(pf_cachequeue) 112 RB_HEAD(pf_frag_tree, pf_fragment); 113 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree); 114 #define V_pf_frag_tree VNET(pf_frag_tree) 115 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree); 116 #define V_pf_cache_tree VNET(pf_cache_tree) 117 static int pf_frag_compare(struct pf_fragment *, 118 struct pf_fragment *); 119 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 120 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 121 122 /* Private prototypes */ 123 static void pf_free_fragment(struct pf_fragment *); 124 static void pf_remove_fragment(struct pf_fragment *); 125 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, 126 struct tcphdr *, int, sa_family_t); 127 #ifdef INET 128 static void pf_ip2key(struct pf_fragment *, struct ip *); 129 static void pf_scrub_ip(struct mbuf **, u_int32_t, u_int8_t, 130 u_int8_t); 131 static void pf_flush_fragments(void); 132 static struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *); 133 static struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **, 134 struct pf_frent *, int); 135 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*, 136 struct pf_fragment **, int, int, int *); 137 #endif /* INET */ 138 #ifdef INET6 139 static void pf_scrub_ip6(struct mbuf **, u_int8_t); 140 #endif 141 #define DPFPRINTF(x) do { \ 142 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 143 printf("%s: ", __func__); \ 144 printf x ; \ 145 } \ 146 } while(0) 147 148 void 149 pf_normalize_init(void) 150 { 151 152 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 153 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 154 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 155 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 156 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 157 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 158 UMA_ALIGN_PTR, 0); 159 160 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 161 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 162 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 163 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 164 165 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 166 167 TAILQ_INIT(&V_pf_fragqueue); 168 TAILQ_INIT(&V_pf_cachequeue); 169 } 170 171 void 172 pf_normalize_cleanup(void) 173 { 174 175 uma_zdestroy(V_pf_state_scrub_z); 176 uma_zdestroy(V_pf_frent_z); 177 uma_zdestroy(V_pf_frag_z); 178 179 mtx_destroy(&pf_frag_mtx); 180 } 181 182 static int 183 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 184 { 185 int diff; 186 187 if ((diff = a->fr_id - b->fr_id)) 188 return (diff); 189 else if ((diff = a->fr_p - b->fr_p)) 190 return (diff); 191 else if (a->fr_src.s_addr < b->fr_src.s_addr) 192 return (-1); 193 else if (a->fr_src.s_addr > b->fr_src.s_addr) 194 return (1); 195 else if (a->fr_dst.s_addr < b->fr_dst.s_addr) 196 return (-1); 197 else if (a->fr_dst.s_addr > b->fr_dst.s_addr) 198 return (1); 199 return (0); 200 } 201 202 void 203 pf_purge_expired_fragments(void) 204 { 205 struct pf_fragment *frag; 206 u_int32_t expire = time_uptime - 207 V_pf_default_rule.timeout[PFTM_FRAG]; 208 209 PF_FRAG_LOCK(); 210 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 211 KASSERT((BUFFER_FRAGMENTS(frag)), 212 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__)); 213 if (frag->fr_timeout > expire) 214 break; 215 216 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 217 pf_free_fragment(frag); 218 } 219 220 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) { 221 KASSERT((!BUFFER_FRAGMENTS(frag)), 222 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__)); 223 if (frag->fr_timeout > expire) 224 break; 225 226 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 227 pf_free_fragment(frag); 228 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) || 229 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag), 230 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s", 231 __FUNCTION__)); 232 } 233 PF_FRAG_UNLOCK(); 234 } 235 236 #ifdef INET 237 /* 238 * Try to flush old fragments to make space for new ones 239 */ 240 static void 241 pf_flush_fragments(void) 242 { 243 struct pf_fragment *frag, *cache; 244 int goal; 245 246 PF_FRAG_ASSERT(); 247 248 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 249 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 250 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 251 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 252 if (frag) 253 pf_free_fragment(frag); 254 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue); 255 if (cache) 256 pf_free_fragment(cache); 257 if (frag == NULL && cache == NULL) 258 break; 259 } 260 } 261 #endif /* INET */ 262 263 /* Frees the fragments and all associated entries */ 264 static void 265 pf_free_fragment(struct pf_fragment *frag) 266 { 267 struct pf_frent *frent; 268 269 PF_FRAG_ASSERT(); 270 271 /* Free all fragments */ 272 if (BUFFER_FRAGMENTS(frag)) { 273 for (frent = LIST_FIRST(&frag->fr_queue); frent; 274 frent = LIST_FIRST(&frag->fr_queue)) { 275 LIST_REMOVE(frent, fr_next); 276 277 m_freem(frent->fr_m); 278 uma_zfree(V_pf_frent_z, frent); 279 } 280 } else { 281 for (frent = LIST_FIRST(&frag->fr_queue); frent; 282 frent = LIST_FIRST(&frag->fr_queue)) { 283 LIST_REMOVE(frent, fr_next); 284 285 KASSERT((LIST_EMPTY(&frag->fr_queue) || 286 LIST_FIRST(&frag->fr_queue)->fr_off > 287 frent->fr_end), 288 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >" 289 " frent->fr_end): %s", __func__)); 290 291 uma_zfree(V_pf_frent_z, frent); 292 } 293 } 294 295 pf_remove_fragment(frag); 296 } 297 298 #ifdef INET 299 static void 300 pf_ip2key(struct pf_fragment *key, struct ip *ip) 301 { 302 key->fr_p = ip->ip_p; 303 key->fr_id = ip->ip_id; 304 key->fr_src.s_addr = ip->ip_src.s_addr; 305 key->fr_dst.s_addr = ip->ip_dst.s_addr; 306 } 307 308 static struct pf_fragment * 309 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree) 310 { 311 struct pf_fragment key; 312 struct pf_fragment *frag; 313 314 PF_FRAG_ASSERT(); 315 316 pf_ip2key(&key, ip); 317 318 frag = RB_FIND(pf_frag_tree, tree, &key); 319 if (frag != NULL) { 320 /* XXX Are we sure we want to update the timeout? */ 321 frag->fr_timeout = time_uptime; 322 if (BUFFER_FRAGMENTS(frag)) { 323 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 324 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 325 } else { 326 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next); 327 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next); 328 } 329 } 330 331 return (frag); 332 } 333 #endif /* INET */ 334 335 /* Removes a fragment from the fragment queue and frees the fragment */ 336 337 static void 338 pf_remove_fragment(struct pf_fragment *frag) 339 { 340 341 PF_FRAG_ASSERT(); 342 343 if (BUFFER_FRAGMENTS(frag)) { 344 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 345 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 346 uma_zfree(V_pf_frag_z, frag); 347 } else { 348 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag); 349 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next); 350 uma_zfree(V_pf_frag_z, frag); 351 } 352 } 353 354 #ifdef INET 355 #define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3) 356 static struct mbuf * 357 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, 358 struct pf_frent *frent, int mff) 359 { 360 struct mbuf *m = *m0, *m2; 361 struct pf_frent *frea, *next; 362 struct pf_frent *frep = NULL; 363 struct ip *ip = frent->fr_ip; 364 int hlen = ip->ip_hl << 2; 365 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 366 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4; 367 u_int16_t max = ip_len + off; 368 369 PF_FRAG_ASSERT(); 370 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)), 371 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__)); 372 373 /* Strip off ip header */ 374 m->m_data += hlen; 375 m->m_len -= hlen; 376 377 /* Create a new reassembly queue for this packet */ 378 if (*frag == NULL) { 379 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 380 if (*frag == NULL) { 381 pf_flush_fragments(); 382 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 383 if (*frag == NULL) 384 goto drop_fragment; 385 } 386 387 (*frag)->fr_flags = 0; 388 (*frag)->fr_max = 0; 389 (*frag)->fr_src = frent->fr_ip->ip_src; 390 (*frag)->fr_dst = frent->fr_ip->ip_dst; 391 (*frag)->fr_p = frent->fr_ip->ip_p; 392 (*frag)->fr_id = frent->fr_ip->ip_id; 393 (*frag)->fr_timeout = time_uptime; 394 LIST_INIT(&(*frag)->fr_queue); 395 396 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, *frag); 397 TAILQ_INSERT_HEAD(&V_pf_fragqueue, *frag, frag_next); 398 399 /* We do not have a previous fragment */ 400 frep = NULL; 401 goto insert; 402 } 403 404 /* 405 * Find a fragment after the current one: 406 * - off contains the real shifted offset. 407 */ 408 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) { 409 if (FR_IP_OFF(frea) > off) 410 break; 411 frep = frea; 412 } 413 414 KASSERT((frep != NULL || frea != NULL), 415 ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));; 416 417 if (frep != NULL && 418 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 419 4 > off) 420 { 421 u_int16_t precut; 422 423 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - 424 frep->fr_ip->ip_hl * 4 - off; 425 if (precut >= ip_len) 426 goto drop_fragment; 427 m_adj(frent->fr_m, precut); 428 DPFPRINTF(("overlap -%d\n", precut)); 429 /* Enforce 8 byte boundaries */ 430 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3)); 431 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 432 ip_len -= precut; 433 ip->ip_len = htons(ip_len); 434 } 435 436 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea); 437 frea = next) 438 { 439 u_int16_t aftercut; 440 441 aftercut = ip_len + off - FR_IP_OFF(frea); 442 DPFPRINTF(("adjust overlap %d\n", aftercut)); 443 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl 444 * 4) 445 { 446 frea->fr_ip->ip_len = 447 htons(ntohs(frea->fr_ip->ip_len) - aftercut); 448 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) + 449 (aftercut >> 3)); 450 m_adj(frea->fr_m, aftercut); 451 break; 452 } 453 454 /* This fragment is completely overlapped, lose it */ 455 next = LIST_NEXT(frea, fr_next); 456 m_freem(frea->fr_m); 457 LIST_REMOVE(frea, fr_next); 458 uma_zfree(V_pf_frent_z, frea); 459 } 460 461 insert: 462 /* Update maximum data size */ 463 if ((*frag)->fr_max < max) 464 (*frag)->fr_max = max; 465 /* This is the last segment */ 466 if (!mff) 467 (*frag)->fr_flags |= PFFRAG_SEENLAST; 468 469 if (frep == NULL) 470 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next); 471 else 472 LIST_INSERT_AFTER(frep, frent, fr_next); 473 474 /* Check if we are completely reassembled */ 475 if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) 476 return (NULL); 477 478 /* Check if we have all the data */ 479 off = 0; 480 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) { 481 next = LIST_NEXT(frep, fr_next); 482 483 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4; 484 if (off < (*frag)->fr_max && 485 (next == NULL || FR_IP_OFF(next) != off)) 486 { 487 DPFPRINTF(("missing fragment at %d, next %d, max %d\n", 488 off, next == NULL ? -1 : FR_IP_OFF(next), 489 (*frag)->fr_max)); 490 return (NULL); 491 } 492 } 493 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max)); 494 if (off < (*frag)->fr_max) 495 return (NULL); 496 497 /* We have all the data */ 498 frent = LIST_FIRST(&(*frag)->fr_queue); 499 KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__)); 500 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) { 501 DPFPRINTF(("drop: too big: %d\n", off)); 502 pf_free_fragment(*frag); 503 *frag = NULL; 504 return (NULL); 505 } 506 next = LIST_NEXT(frent, fr_next); 507 508 /* Magic from ip_input */ 509 ip = frent->fr_ip; 510 m = frent->fr_m; 511 m2 = m->m_next; 512 m->m_next = NULL; 513 m_cat(m, m2); 514 uma_zfree(V_pf_frent_z, frent); 515 for (frent = next; frent != NULL; frent = next) { 516 next = LIST_NEXT(frent, fr_next); 517 518 m2 = frent->fr_m; 519 uma_zfree(V_pf_frent_z, frent); 520 m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags; 521 m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data; 522 m_cat(m, m2); 523 } 524 525 while (m->m_pkthdr.csum_data & 0xffff0000) 526 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 527 (m->m_pkthdr.csum_data >> 16); 528 ip->ip_src = (*frag)->fr_src; 529 ip->ip_dst = (*frag)->fr_dst; 530 531 /* Remove from fragment queue */ 532 pf_remove_fragment(*frag); 533 *frag = NULL; 534 535 hlen = ip->ip_hl << 2; 536 ip->ip_len = htons(off + hlen); 537 m->m_len += hlen; 538 m->m_data -= hlen; 539 540 /* some debugging cruft by sklower, below, will go away soon */ 541 /* XXX this should be done elsewhere */ 542 if (m->m_flags & M_PKTHDR) { 543 int plen = 0; 544 for (m2 = m; m2; m2 = m2->m_next) 545 plen += m2->m_len; 546 m->m_pkthdr.len = plen; 547 } 548 549 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 550 return (m); 551 552 drop_fragment: 553 /* Oops - fail safe - drop packet */ 554 uma_zfree(V_pf_frent_z, frent); 555 m_freem(m); 556 return (NULL); 557 } 558 559 static struct mbuf * 560 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, 561 int drop, int *nomem) 562 { 563 struct mbuf *m = *m0; 564 struct pf_frent *frp, *fra, *cur = NULL; 565 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2); 566 u_int16_t off = ntohs(h->ip_off) << 3; 567 u_int16_t max = ip_len + off; 568 int hosed = 0; 569 570 PF_FRAG_ASSERT(); 571 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)), 572 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__)); 573 574 /* Create a new range queue for this packet */ 575 if (*frag == NULL) { 576 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 577 if (*frag == NULL) { 578 pf_flush_fragments(); 579 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 580 if (*frag == NULL) 581 goto no_mem; 582 } 583 584 /* Get an entry for the queue */ 585 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 586 if (cur == NULL) { 587 uma_zfree(V_pf_frag_z, *frag); 588 *frag = NULL; 589 goto no_mem; 590 } 591 592 (*frag)->fr_flags = PFFRAG_NOBUFFER; 593 (*frag)->fr_max = 0; 594 (*frag)->fr_src = h->ip_src; 595 (*frag)->fr_dst = h->ip_dst; 596 (*frag)->fr_p = h->ip_p; 597 (*frag)->fr_id = h->ip_id; 598 (*frag)->fr_timeout = time_uptime; 599 600 cur->fr_off = off; 601 cur->fr_end = max; 602 LIST_INIT(&(*frag)->fr_queue); 603 LIST_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next); 604 605 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag); 606 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next); 607 608 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max)); 609 610 goto pass; 611 } 612 613 /* 614 * Find a fragment after the current one: 615 * - off contains the real shifted offset. 616 */ 617 frp = NULL; 618 LIST_FOREACH(fra, &(*frag)->fr_queue, fr_next) { 619 if (fra->fr_off > off) 620 break; 621 frp = fra; 622 } 623 624 KASSERT((frp != NULL || fra != NULL), 625 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__)); 626 627 if (frp != NULL) { 628 int precut; 629 630 precut = frp->fr_end - off; 631 if (precut >= ip_len) { 632 /* Fragment is entirely a duplicate */ 633 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n", 634 h->ip_id, frp->fr_off, frp->fr_end, off, max)); 635 goto drop_fragment; 636 } 637 if (precut == 0) { 638 /* They are adjacent. Fixup cache entry */ 639 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n", 640 h->ip_id, frp->fr_off, frp->fr_end, off, max)); 641 frp->fr_end = max; 642 } else if (precut > 0) { 643 /* The first part of this payload overlaps with a 644 * fragment that has already been passed. 645 * Need to trim off the first part of the payload. 646 * But to do so easily, we need to create another 647 * mbuf to throw the original header into. 648 */ 649 650 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n", 651 h->ip_id, precut, frp->fr_off, frp->fr_end, off, 652 max)); 653 654 off += precut; 655 max -= precut; 656 /* Update the previous frag to encompass this one */ 657 frp->fr_end = max; 658 659 if (!drop) { 660 /* XXX Optimization opportunity 661 * This is a very heavy way to trim the payload. 662 * we could do it much faster by diddling mbuf 663 * internals but that would be even less legible 664 * than this mbuf magic. For my next trick, 665 * I'll pull a rabbit out of my laptop. 666 */ 667 *m0 = m_dup(m, M_NOWAIT); 668 if (*m0 == NULL) 669 goto no_mem; 670 /* From KAME Project : We have missed this! */ 671 m_adj(*m0, (h->ip_hl << 2) - 672 (*m0)->m_pkthdr.len); 673 674 KASSERT(((*m0)->m_next == NULL), 675 ("(*m0)->m_next != NULL: %s", 676 __FUNCTION__)); 677 m_adj(m, precut + (h->ip_hl << 2)); 678 m_cat(*m0, m); 679 m = *m0; 680 if (m->m_flags & M_PKTHDR) { 681 int plen = 0; 682 struct mbuf *t; 683 for (t = m; t; t = t->m_next) 684 plen += t->m_len; 685 m->m_pkthdr.len = plen; 686 } 687 688 689 h = mtod(m, struct ip *); 690 691 KASSERT(((int)m->m_len == 692 ntohs(h->ip_len) - precut), 693 ("m->m_len != ntohs(h->ip_len) - precut: %s", 694 __FUNCTION__)); 695 h->ip_off = htons(ntohs(h->ip_off) + 696 (precut >> 3)); 697 h->ip_len = htons(ntohs(h->ip_len) - precut); 698 } else { 699 hosed++; 700 } 701 } else { 702 /* There is a gap between fragments */ 703 704 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n", 705 h->ip_id, -precut, frp->fr_off, frp->fr_end, off, 706 max)); 707 708 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 709 if (cur == NULL) 710 goto no_mem; 711 712 cur->fr_off = off; 713 cur->fr_end = max; 714 LIST_INSERT_AFTER(frp, cur, fr_next); 715 } 716 } 717 718 if (fra != NULL) { 719 int aftercut; 720 int merge = 0; 721 722 aftercut = max - fra->fr_off; 723 if (aftercut == 0) { 724 /* Adjacent fragments */ 725 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n", 726 h->ip_id, off, max, fra->fr_off, fra->fr_end)); 727 fra->fr_off = off; 728 merge = 1; 729 } else if (aftercut > 0) { 730 /* Need to chop off the tail of this fragment */ 731 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n", 732 h->ip_id, aftercut, off, max, fra->fr_off, 733 fra->fr_end)); 734 fra->fr_off = off; 735 max -= aftercut; 736 737 merge = 1; 738 739 if (!drop) { 740 m_adj(m, -aftercut); 741 if (m->m_flags & M_PKTHDR) { 742 int plen = 0; 743 struct mbuf *t; 744 for (t = m; t; t = t->m_next) 745 plen += t->m_len; 746 m->m_pkthdr.len = plen; 747 } 748 h = mtod(m, struct ip *); 749 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut), 750 ("m->m_len != ntohs(h->ip_len) - aftercut: %s", 751 __FUNCTION__)); 752 h->ip_len = htons(ntohs(h->ip_len) - aftercut); 753 } else { 754 hosed++; 755 } 756 } else if (frp == NULL) { 757 /* There is a gap between fragments */ 758 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n", 759 h->ip_id, -aftercut, off, max, fra->fr_off, 760 fra->fr_end)); 761 762 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 763 if (cur == NULL) 764 goto no_mem; 765 766 cur->fr_off = off; 767 cur->fr_end = max; 768 LIST_INSERT_BEFORE(fra, cur, fr_next); 769 } 770 771 772 /* Need to glue together two separate fragment descriptors */ 773 if (merge) { 774 if (cur && fra->fr_off <= cur->fr_end) { 775 /* Need to merge in a previous 'cur' */ 776 DPFPRINTF(("fragcache[%d]: adjacent(merge " 777 "%d-%d) %d-%d (%d-%d)\n", 778 h->ip_id, cur->fr_off, cur->fr_end, off, 779 max, fra->fr_off, fra->fr_end)); 780 fra->fr_off = cur->fr_off; 781 LIST_REMOVE(cur, fr_next); 782 uma_zfree(V_pf_frent_z, cur); 783 cur = NULL; 784 785 } else if (frp && fra->fr_off <= frp->fr_end) { 786 /* Need to merge in a modified 'frp' */ 787 KASSERT((cur == NULL), ("cur != NULL: %s", 788 __FUNCTION__)); 789 DPFPRINTF(("fragcache[%d]: adjacent(merge " 790 "%d-%d) %d-%d (%d-%d)\n", 791 h->ip_id, frp->fr_off, frp->fr_end, off, 792 max, fra->fr_off, fra->fr_end)); 793 fra->fr_off = frp->fr_off; 794 LIST_REMOVE(frp, fr_next); 795 uma_zfree(V_pf_frent_z, frp); 796 frp = NULL; 797 798 } 799 } 800 } 801 802 if (hosed) { 803 /* 804 * We must keep tracking the overall fragment even when 805 * we're going to drop it anyway so that we know when to 806 * free the overall descriptor. Thus we drop the frag late. 807 */ 808 goto drop_fragment; 809 } 810 811 812 pass: 813 /* Update maximum data size */ 814 if ((*frag)->fr_max < max) 815 (*frag)->fr_max = max; 816 817 /* This is the last segment */ 818 if (!mff) 819 (*frag)->fr_flags |= PFFRAG_SEENLAST; 820 821 /* Check if we are completely reassembled */ 822 if (((*frag)->fr_flags & PFFRAG_SEENLAST) && 823 LIST_FIRST(&(*frag)->fr_queue)->fr_off == 0 && 824 LIST_FIRST(&(*frag)->fr_queue)->fr_end == (*frag)->fr_max) { 825 /* Remove from fragment queue */ 826 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id, 827 (*frag)->fr_max)); 828 pf_free_fragment(*frag); 829 *frag = NULL; 830 } 831 832 return (m); 833 834 no_mem: 835 *nomem = 1; 836 837 /* Still need to pay attention to !IP_MF */ 838 if (!mff && *frag != NULL) 839 (*frag)->fr_flags |= PFFRAG_SEENLAST; 840 841 m_freem(m); 842 return (NULL); 843 844 drop_fragment: 845 846 /* Still need to pay attention to !IP_MF */ 847 if (!mff && *frag != NULL) 848 (*frag)->fr_flags |= PFFRAG_SEENLAST; 849 850 if (drop) { 851 /* This fragment has been deemed bad. Don't reass */ 852 if (((*frag)->fr_flags & PFFRAG_DROP) == 0) 853 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n", 854 h->ip_id)); 855 (*frag)->fr_flags |= PFFRAG_DROP; 856 } 857 858 m_freem(m); 859 return (NULL); 860 } 861 862 int 863 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, 864 struct pf_pdesc *pd) 865 { 866 struct mbuf *m = *m0; 867 struct pf_rule *r; 868 struct pf_frent *frent; 869 struct pf_fragment *frag = NULL; 870 struct ip *h = mtod(m, struct ip *); 871 int mff = (ntohs(h->ip_off) & IP_MF); 872 int hlen = h->ip_hl << 2; 873 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 874 u_int16_t max; 875 int ip_len; 876 int ip_off; 877 int tag = -1; 878 879 PF_RULES_RASSERT(); 880 881 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 882 while (r != NULL) { 883 r->evaluations++; 884 if (pfi_kif_match(r->kif, kif) == r->ifnot) 885 r = r->skip[PF_SKIP_IFP].ptr; 886 else if (r->direction && r->direction != dir) 887 r = r->skip[PF_SKIP_DIR].ptr; 888 else if (r->af && r->af != AF_INET) 889 r = r->skip[PF_SKIP_AF].ptr; 890 else if (r->proto && r->proto != h->ip_p) 891 r = r->skip[PF_SKIP_PROTO].ptr; 892 else if (PF_MISMATCHAW(&r->src.addr, 893 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 894 r->src.neg, kif, M_GETFIB(m))) 895 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 896 else if (PF_MISMATCHAW(&r->dst.addr, 897 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 898 r->dst.neg, NULL, M_GETFIB(m))) 899 r = r->skip[PF_SKIP_DST_ADDR].ptr; 900 else if (r->match_tag && !pf_match_tag(m, r, &tag, 901 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 902 r = TAILQ_NEXT(r, entries); 903 else 904 break; 905 } 906 907 if (r == NULL || r->action == PF_NOSCRUB) 908 return (PF_PASS); 909 else { 910 r->packets[dir == PF_OUT]++; 911 r->bytes[dir == PF_OUT] += pd->tot_len; 912 } 913 914 /* Check for illegal packets */ 915 if (hlen < (int)sizeof(struct ip)) 916 goto drop; 917 918 if (hlen > ntohs(h->ip_len)) 919 goto drop; 920 921 /* Clear IP_DF if the rule uses the no-df option */ 922 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 923 u_int16_t ip_off = h->ip_off; 924 925 h->ip_off &= htons(~IP_DF); 926 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 927 } 928 929 /* We will need other tests here */ 930 if (!fragoff && !mff) 931 goto no_fragment; 932 933 /* We're dealing with a fragment now. Don't allow fragments 934 * with IP_DF to enter the cache. If the flag was cleared by 935 * no-df above, fine. Otherwise drop it. 936 */ 937 if (h->ip_off & htons(IP_DF)) { 938 DPFPRINTF(("IP_DF\n")); 939 goto bad; 940 } 941 942 ip_len = ntohs(h->ip_len) - hlen; 943 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 944 945 /* All fragments are 8 byte aligned */ 946 if (mff && (ip_len & 0x7)) { 947 DPFPRINTF(("mff and %d\n", ip_len)); 948 goto bad; 949 } 950 951 /* Respect maximum length */ 952 if (fragoff + ip_len > IP_MAXPACKET) { 953 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 954 goto bad; 955 } 956 max = fragoff + ip_len; 957 958 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) { 959 960 /* Fully buffer all of the fragments */ 961 PF_FRAG_LOCK(); 962 frag = pf_find_fragment(h, &V_pf_frag_tree); 963 964 /* Check if we saw the last fragment already */ 965 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && 966 max > frag->fr_max) 967 goto bad; 968 969 /* Get an entry for the fragment queue */ 970 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 971 if (frent == NULL) { 972 PF_FRAG_UNLOCK(); 973 REASON_SET(reason, PFRES_MEMORY); 974 return (PF_DROP); 975 } 976 frent->fr_ip = h; 977 frent->fr_m = m; 978 979 /* Might return a completely reassembled mbuf, or NULL */ 980 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 981 *m0 = m = pf_reassemble(m0, &frag, frent, mff); 982 PF_FRAG_UNLOCK(); 983 984 if (m == NULL) 985 return (PF_DROP); 986 987 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) 988 goto drop; 989 990 h = mtod(m, struct ip *); 991 } else { 992 /* non-buffering fragment cache (drops or masks overlaps) */ 993 int nomem = 0; 994 995 if (dir == PF_OUT && pd->pf_mtag && 996 pd->pf_mtag->flags & PF_TAG_FRAGCACHE) { 997 /* 998 * Already passed the fragment cache in the 999 * input direction. If we continued, it would 1000 * appear to be a dup and would be dropped. 1001 */ 1002 goto fragment_pass; 1003 } 1004 1005 PF_FRAG_LOCK(); 1006 frag = pf_find_fragment(h, &V_pf_cache_tree); 1007 1008 /* Check if we saw the last fragment already */ 1009 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && 1010 max > frag->fr_max) { 1011 if (r->rule_flag & PFRULE_FRAGDROP) 1012 frag->fr_flags |= PFFRAG_DROP; 1013 goto bad; 1014 } 1015 1016 *m0 = m = pf_fragcache(m0, h, &frag, mff, 1017 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem); 1018 PF_FRAG_UNLOCK(); 1019 if (m == NULL) { 1020 if (nomem) 1021 goto no_mem; 1022 goto drop; 1023 } 1024 1025 if (dir == PF_IN) { 1026 /* Use mtag from copied and trimmed mbuf chain. */ 1027 pd->pf_mtag = pf_get_mtag(m); 1028 if (pd->pf_mtag == NULL) { 1029 m_freem(m); 1030 *m0 = NULL; 1031 goto no_mem; 1032 } 1033 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE; 1034 } 1035 1036 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) 1037 goto drop; 1038 goto fragment_pass; 1039 } 1040 1041 no_fragment: 1042 /* At this point, only IP_DF is allowed in ip_off */ 1043 if (h->ip_off & ~htons(IP_DF)) { 1044 u_int16_t ip_off = h->ip_off; 1045 1046 h->ip_off &= htons(IP_DF); 1047 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1048 } 1049 1050 /* not missing a return here */ 1051 1052 fragment_pass: 1053 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 1054 1055 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) 1056 pd->flags |= PFDESC_IP_REAS; 1057 return (PF_PASS); 1058 1059 no_mem: 1060 REASON_SET(reason, PFRES_MEMORY); 1061 if (r != NULL && r->log) 1062 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1063 1); 1064 return (PF_DROP); 1065 1066 drop: 1067 REASON_SET(reason, PFRES_NORM); 1068 if (r != NULL && r->log) 1069 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1070 1); 1071 return (PF_DROP); 1072 1073 bad: 1074 DPFPRINTF(("dropping bad fragment\n")); 1075 1076 /* Free associated fragments */ 1077 if (frag != NULL) { 1078 pf_free_fragment(frag); 1079 PF_FRAG_UNLOCK(); 1080 } 1081 1082 REASON_SET(reason, PFRES_FRAG); 1083 if (r != NULL && r->log) 1084 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1085 1); 1086 1087 return (PF_DROP); 1088 } 1089 #endif 1090 1091 #ifdef INET6 1092 int 1093 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif, 1094 u_short *reason, struct pf_pdesc *pd) 1095 { 1096 struct mbuf *m = *m0; 1097 struct pf_rule *r; 1098 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1099 int off; 1100 struct ip6_ext ext; 1101 struct ip6_opt opt; 1102 struct ip6_opt_jumbo jumbo; 1103 struct ip6_frag frag; 1104 u_int32_t jumbolen = 0, plen; 1105 u_int16_t fragoff = 0; 1106 int optend; 1107 int ooff; 1108 u_int8_t proto; 1109 int terminal; 1110 1111 PF_RULES_RASSERT(); 1112 1113 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1114 while (r != NULL) { 1115 r->evaluations++; 1116 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1117 r = r->skip[PF_SKIP_IFP].ptr; 1118 else if (r->direction && r->direction != dir) 1119 r = r->skip[PF_SKIP_DIR].ptr; 1120 else if (r->af && r->af != AF_INET6) 1121 r = r->skip[PF_SKIP_AF].ptr; 1122 #if 0 /* header chain! */ 1123 else if (r->proto && r->proto != h->ip6_nxt) 1124 r = r->skip[PF_SKIP_PROTO].ptr; 1125 #endif 1126 else if (PF_MISMATCHAW(&r->src.addr, 1127 (struct pf_addr *)&h->ip6_src, AF_INET6, 1128 r->src.neg, kif, M_GETFIB(m))) 1129 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1130 else if (PF_MISMATCHAW(&r->dst.addr, 1131 (struct pf_addr *)&h->ip6_dst, AF_INET6, 1132 r->dst.neg, NULL, M_GETFIB(m))) 1133 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1134 else 1135 break; 1136 } 1137 1138 if (r == NULL || r->action == PF_NOSCRUB) 1139 return (PF_PASS); 1140 else { 1141 r->packets[dir == PF_OUT]++; 1142 r->bytes[dir == PF_OUT] += pd->tot_len; 1143 } 1144 1145 /* Check for illegal packets */ 1146 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1147 goto drop; 1148 1149 off = sizeof(struct ip6_hdr); 1150 proto = h->ip6_nxt; 1151 terminal = 0; 1152 do { 1153 switch (proto) { 1154 case IPPROTO_FRAGMENT: 1155 goto fragment; 1156 break; 1157 case IPPROTO_AH: 1158 case IPPROTO_ROUTING: 1159 case IPPROTO_DSTOPTS: 1160 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1161 NULL, AF_INET6)) 1162 goto shortpkt; 1163 if (proto == IPPROTO_AH) 1164 off += (ext.ip6e_len + 2) * 4; 1165 else 1166 off += (ext.ip6e_len + 1) * 8; 1167 proto = ext.ip6e_nxt; 1168 break; 1169 case IPPROTO_HOPOPTS: 1170 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1171 NULL, AF_INET6)) 1172 goto shortpkt; 1173 optend = off + (ext.ip6e_len + 1) * 8; 1174 ooff = off + sizeof(ext); 1175 do { 1176 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1177 sizeof(opt.ip6o_type), NULL, NULL, 1178 AF_INET6)) 1179 goto shortpkt; 1180 if (opt.ip6o_type == IP6OPT_PAD1) { 1181 ooff++; 1182 continue; 1183 } 1184 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1185 NULL, NULL, AF_INET6)) 1186 goto shortpkt; 1187 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1188 goto drop; 1189 switch (opt.ip6o_type) { 1190 case IP6OPT_JUMBO: 1191 if (h->ip6_plen != 0) 1192 goto drop; 1193 if (!pf_pull_hdr(m, ooff, &jumbo, 1194 sizeof(jumbo), NULL, NULL, 1195 AF_INET6)) 1196 goto shortpkt; 1197 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, 1198 sizeof(jumbolen)); 1199 jumbolen = ntohl(jumbolen); 1200 if (jumbolen <= IPV6_MAXPACKET) 1201 goto drop; 1202 if (sizeof(struct ip6_hdr) + jumbolen != 1203 m->m_pkthdr.len) 1204 goto drop; 1205 break; 1206 default: 1207 break; 1208 } 1209 ooff += sizeof(opt) + opt.ip6o_len; 1210 } while (ooff < optend); 1211 1212 off = optend; 1213 proto = ext.ip6e_nxt; 1214 break; 1215 default: 1216 terminal = 1; 1217 break; 1218 } 1219 } while (!terminal); 1220 1221 /* jumbo payload option must be present, or plen > 0 */ 1222 if (ntohs(h->ip6_plen) == 0) 1223 plen = jumbolen; 1224 else 1225 plen = ntohs(h->ip6_plen); 1226 if (plen == 0) 1227 goto drop; 1228 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1229 goto shortpkt; 1230 1231 pf_scrub_ip6(&m, r->min_ttl); 1232 1233 return (PF_PASS); 1234 1235 fragment: 1236 if (ntohs(h->ip6_plen) == 0 || jumbolen) 1237 goto drop; 1238 plen = ntohs(h->ip6_plen); 1239 1240 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1241 goto shortpkt; 1242 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK); 1243 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET) 1244 goto badfrag; 1245 1246 /* do something about it */ 1247 /* remember to set pd->flags |= PFDESC_IP_REAS */ 1248 return (PF_PASS); 1249 1250 shortpkt: 1251 REASON_SET(reason, PFRES_SHORT); 1252 if (r != NULL && r->log) 1253 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1254 1); 1255 return (PF_DROP); 1256 1257 drop: 1258 REASON_SET(reason, PFRES_NORM); 1259 if (r != NULL && r->log) 1260 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1261 1); 1262 return (PF_DROP); 1263 1264 badfrag: 1265 REASON_SET(reason, PFRES_FRAG); 1266 if (r != NULL && r->log) 1267 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1268 1); 1269 return (PF_DROP); 1270 } 1271 #endif /* INET6 */ 1272 1273 int 1274 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff, 1275 int off, void *h, struct pf_pdesc *pd) 1276 { 1277 struct pf_rule *r, *rm = NULL; 1278 struct tcphdr *th = pd->hdr.tcp; 1279 int rewrite = 0; 1280 u_short reason; 1281 u_int8_t flags; 1282 sa_family_t af = pd->af; 1283 1284 PF_RULES_RASSERT(); 1285 1286 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1287 while (r != NULL) { 1288 r->evaluations++; 1289 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1290 r = r->skip[PF_SKIP_IFP].ptr; 1291 else if (r->direction && r->direction != dir) 1292 r = r->skip[PF_SKIP_DIR].ptr; 1293 else if (r->af && r->af != af) 1294 r = r->skip[PF_SKIP_AF].ptr; 1295 else if (r->proto && r->proto != pd->proto) 1296 r = r->skip[PF_SKIP_PROTO].ptr; 1297 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1298 r->src.neg, kif, M_GETFIB(m))) 1299 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1300 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1301 r->src.port[0], r->src.port[1], th->th_sport)) 1302 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1303 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1304 r->dst.neg, NULL, M_GETFIB(m))) 1305 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1306 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1307 r->dst.port[0], r->dst.port[1], th->th_dport)) 1308 r = r->skip[PF_SKIP_DST_PORT].ptr; 1309 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1310 pf_osfp_fingerprint(pd, m, off, th), 1311 r->os_fingerprint)) 1312 r = TAILQ_NEXT(r, entries); 1313 else { 1314 rm = r; 1315 break; 1316 } 1317 } 1318 1319 if (rm == NULL || rm->action == PF_NOSCRUB) 1320 return (PF_PASS); 1321 else { 1322 r->packets[dir == PF_OUT]++; 1323 r->bytes[dir == PF_OUT] += pd->tot_len; 1324 } 1325 1326 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1327 pd->flags |= PFDESC_TCP_NORM; 1328 1329 flags = th->th_flags; 1330 if (flags & TH_SYN) { 1331 /* Illegal packet */ 1332 if (flags & TH_RST) 1333 goto tcp_drop; 1334 1335 if (flags & TH_FIN) 1336 flags &= ~TH_FIN; 1337 } else { 1338 /* Illegal packet */ 1339 if (!(flags & (TH_ACK|TH_RST))) 1340 goto tcp_drop; 1341 } 1342 1343 if (!(flags & TH_ACK)) { 1344 /* These flags are only valid if ACK is set */ 1345 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1346 goto tcp_drop; 1347 } 1348 1349 /* Check for illegal header length */ 1350 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1351 goto tcp_drop; 1352 1353 /* If flags changed, or reserved data set, then adjust */ 1354 if (flags != th->th_flags || th->th_x2 != 0) { 1355 u_int16_t ov, nv; 1356 1357 ov = *(u_int16_t *)(&th->th_ack + 1); 1358 th->th_flags = flags; 1359 th->th_x2 = 0; 1360 nv = *(u_int16_t *)(&th->th_ack + 1); 1361 1362 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0); 1363 rewrite = 1; 1364 } 1365 1366 /* Remove urgent pointer, if TH_URG is not set */ 1367 if (!(flags & TH_URG) && th->th_urp) { 1368 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0); 1369 th->th_urp = 0; 1370 rewrite = 1; 1371 } 1372 1373 /* Process options */ 1374 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1375 rewrite = 1; 1376 1377 /* copy back packet headers if we sanitized */ 1378 if (rewrite) 1379 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1380 1381 return (PF_PASS); 1382 1383 tcp_drop: 1384 REASON_SET(&reason, PFRES_NORM); 1385 if (rm != NULL && r->log) 1386 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1387 1); 1388 return (PF_DROP); 1389 } 1390 1391 int 1392 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1393 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1394 { 1395 u_int32_t tsval, tsecr; 1396 u_int8_t hdr[60]; 1397 u_int8_t *opt; 1398 1399 KASSERT((src->scrub == NULL), 1400 ("pf_normalize_tcp_init: src->scrub != NULL")); 1401 1402 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1403 if (src->scrub == NULL) 1404 return (1); 1405 1406 switch (pd->af) { 1407 #ifdef INET 1408 case AF_INET: { 1409 struct ip *h = mtod(m, struct ip *); 1410 src->scrub->pfss_ttl = h->ip_ttl; 1411 break; 1412 } 1413 #endif /* INET */ 1414 #ifdef INET6 1415 case AF_INET6: { 1416 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1417 src->scrub->pfss_ttl = h->ip6_hlim; 1418 break; 1419 } 1420 #endif /* INET6 */ 1421 } 1422 1423 1424 /* 1425 * All normalizations below are only begun if we see the start of 1426 * the connections. They must all set an enabled bit in pfss_flags 1427 */ 1428 if ((th->th_flags & TH_SYN) == 0) 1429 return (0); 1430 1431 1432 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1433 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1434 /* Diddle with TCP options */ 1435 int hlen; 1436 opt = hdr + sizeof(struct tcphdr); 1437 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1438 while (hlen >= TCPOLEN_TIMESTAMP) { 1439 switch (*opt) { 1440 case TCPOPT_EOL: /* FALLTHROUGH */ 1441 case TCPOPT_NOP: 1442 opt++; 1443 hlen--; 1444 break; 1445 case TCPOPT_TIMESTAMP: 1446 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1447 src->scrub->pfss_flags |= 1448 PFSS_TIMESTAMP; 1449 src->scrub->pfss_ts_mod = 1450 htonl(arc4random()); 1451 1452 /* note PFSS_PAWS not set yet */ 1453 memcpy(&tsval, &opt[2], 1454 sizeof(u_int32_t)); 1455 memcpy(&tsecr, &opt[6], 1456 sizeof(u_int32_t)); 1457 src->scrub->pfss_tsval0 = ntohl(tsval); 1458 src->scrub->pfss_tsval = ntohl(tsval); 1459 src->scrub->pfss_tsecr = ntohl(tsecr); 1460 getmicrouptime(&src->scrub->pfss_last); 1461 } 1462 /* FALLTHROUGH */ 1463 default: 1464 hlen -= MAX(opt[1], 2); 1465 opt += MAX(opt[1], 2); 1466 break; 1467 } 1468 } 1469 } 1470 1471 return (0); 1472 } 1473 1474 void 1475 pf_normalize_tcp_cleanup(struct pf_state *state) 1476 { 1477 if (state->src.scrub) 1478 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1479 if (state->dst.scrub) 1480 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1481 1482 /* Someday... flush the TCP segment reassembly descriptors. */ 1483 } 1484 1485 int 1486 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1487 u_short *reason, struct tcphdr *th, struct pf_state *state, 1488 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1489 { 1490 struct timeval uptime; 1491 u_int32_t tsval, tsecr; 1492 u_int tsval_from_last; 1493 u_int8_t hdr[60]; 1494 u_int8_t *opt; 1495 int copyback = 0; 1496 int got_ts = 0; 1497 1498 KASSERT((src->scrub || dst->scrub), 1499 ("%s: src->scrub && dst->scrub!", __func__)); 1500 1501 /* 1502 * Enforce the minimum TTL seen for this connection. Negate a common 1503 * technique to evade an intrusion detection system and confuse 1504 * firewall state code. 1505 */ 1506 switch (pd->af) { 1507 #ifdef INET 1508 case AF_INET: { 1509 if (src->scrub) { 1510 struct ip *h = mtod(m, struct ip *); 1511 if (h->ip_ttl > src->scrub->pfss_ttl) 1512 src->scrub->pfss_ttl = h->ip_ttl; 1513 h->ip_ttl = src->scrub->pfss_ttl; 1514 } 1515 break; 1516 } 1517 #endif /* INET */ 1518 #ifdef INET6 1519 case AF_INET6: { 1520 if (src->scrub) { 1521 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1522 if (h->ip6_hlim > src->scrub->pfss_ttl) 1523 src->scrub->pfss_ttl = h->ip6_hlim; 1524 h->ip6_hlim = src->scrub->pfss_ttl; 1525 } 1526 break; 1527 } 1528 #endif /* INET6 */ 1529 } 1530 1531 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1532 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1533 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1534 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1535 /* Diddle with TCP options */ 1536 int hlen; 1537 opt = hdr + sizeof(struct tcphdr); 1538 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1539 while (hlen >= TCPOLEN_TIMESTAMP) { 1540 switch (*opt) { 1541 case TCPOPT_EOL: /* FALLTHROUGH */ 1542 case TCPOPT_NOP: 1543 opt++; 1544 hlen--; 1545 break; 1546 case TCPOPT_TIMESTAMP: 1547 /* Modulate the timestamps. Can be used for 1548 * NAT detection, OS uptime determination or 1549 * reboot detection. 1550 */ 1551 1552 if (got_ts) { 1553 /* Huh? Multiple timestamps!? */ 1554 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1555 DPFPRINTF(("multiple TS??")); 1556 pf_print_state(state); 1557 printf("\n"); 1558 } 1559 REASON_SET(reason, PFRES_TS); 1560 return (PF_DROP); 1561 } 1562 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1563 memcpy(&tsval, &opt[2], 1564 sizeof(u_int32_t)); 1565 if (tsval && src->scrub && 1566 (src->scrub->pfss_flags & 1567 PFSS_TIMESTAMP)) { 1568 tsval = ntohl(tsval); 1569 pf_change_a(&opt[2], 1570 &th->th_sum, 1571 htonl(tsval + 1572 src->scrub->pfss_ts_mod), 1573 0); 1574 copyback = 1; 1575 } 1576 1577 /* Modulate TS reply iff valid (!0) */ 1578 memcpy(&tsecr, &opt[6], 1579 sizeof(u_int32_t)); 1580 if (tsecr && dst->scrub && 1581 (dst->scrub->pfss_flags & 1582 PFSS_TIMESTAMP)) { 1583 tsecr = ntohl(tsecr) 1584 - dst->scrub->pfss_ts_mod; 1585 pf_change_a(&opt[6], 1586 &th->th_sum, htonl(tsecr), 1587 0); 1588 copyback = 1; 1589 } 1590 got_ts = 1; 1591 } 1592 /* FALLTHROUGH */ 1593 default: 1594 hlen -= MAX(opt[1], 2); 1595 opt += MAX(opt[1], 2); 1596 break; 1597 } 1598 } 1599 if (copyback) { 1600 /* Copyback the options, caller copys back header */ 1601 *writeback = 1; 1602 m_copyback(m, off + sizeof(struct tcphdr), 1603 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1604 sizeof(struct tcphdr)); 1605 } 1606 } 1607 1608 1609 /* 1610 * Must invalidate PAWS checks on connections idle for too long. 1611 * The fastest allowed timestamp clock is 1ms. That turns out to 1612 * be about 24 days before it wraps. XXX Right now our lowerbound 1613 * TS echo check only works for the first 12 days of a connection 1614 * when the TS has exhausted half its 32bit space 1615 */ 1616 #define TS_MAX_IDLE (24*24*60*60) 1617 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1618 1619 getmicrouptime(&uptime); 1620 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1621 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1622 time_uptime - state->creation > TS_MAX_CONN)) { 1623 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1624 DPFPRINTF(("src idled out of PAWS\n")); 1625 pf_print_state(state); 1626 printf("\n"); 1627 } 1628 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1629 | PFSS_PAWS_IDLED; 1630 } 1631 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1632 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1633 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1634 DPFPRINTF(("dst idled out of PAWS\n")); 1635 pf_print_state(state); 1636 printf("\n"); 1637 } 1638 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1639 | PFSS_PAWS_IDLED; 1640 } 1641 1642 if (got_ts && src->scrub && dst->scrub && 1643 (src->scrub->pfss_flags & PFSS_PAWS) && 1644 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1645 /* Validate that the timestamps are "in-window". 1646 * RFC1323 describes TCP Timestamp options that allow 1647 * measurement of RTT (round trip time) and PAWS 1648 * (protection against wrapped sequence numbers). PAWS 1649 * gives us a set of rules for rejecting packets on 1650 * long fat pipes (packets that were somehow delayed 1651 * in transit longer than the time it took to send the 1652 * full TCP sequence space of 4Gb). We can use these 1653 * rules and infer a few others that will let us treat 1654 * the 32bit timestamp and the 32bit echoed timestamp 1655 * as sequence numbers to prevent a blind attacker from 1656 * inserting packets into a connection. 1657 * 1658 * RFC1323 tells us: 1659 * - The timestamp on this packet must be greater than 1660 * or equal to the last value echoed by the other 1661 * endpoint. The RFC says those will be discarded 1662 * since it is a dup that has already been acked. 1663 * This gives us a lowerbound on the timestamp. 1664 * timestamp >= other last echoed timestamp 1665 * - The timestamp will be less than or equal to 1666 * the last timestamp plus the time between the 1667 * last packet and now. The RFC defines the max 1668 * clock rate as 1ms. We will allow clocks to be 1669 * up to 10% fast and will allow a total difference 1670 * or 30 seconds due to a route change. And this 1671 * gives us an upperbound on the timestamp. 1672 * timestamp <= last timestamp + max ticks 1673 * We have to be careful here. Windows will send an 1674 * initial timestamp of zero and then initialize it 1675 * to a random value after the 3whs; presumably to 1676 * avoid a DoS by having to call an expensive RNG 1677 * during a SYN flood. Proof MS has at least one 1678 * good security geek. 1679 * 1680 * - The TCP timestamp option must also echo the other 1681 * endpoints timestamp. The timestamp echoed is the 1682 * one carried on the earliest unacknowledged segment 1683 * on the left edge of the sequence window. The RFC 1684 * states that the host will reject any echoed 1685 * timestamps that were larger than any ever sent. 1686 * This gives us an upperbound on the TS echo. 1687 * tescr <= largest_tsval 1688 * - The lowerbound on the TS echo is a little more 1689 * tricky to determine. The other endpoint's echoed 1690 * values will not decrease. But there may be 1691 * network conditions that re-order packets and 1692 * cause our view of them to decrease. For now the 1693 * only lowerbound we can safely determine is that 1694 * the TS echo will never be less than the original 1695 * TS. XXX There is probably a better lowerbound. 1696 * Remove TS_MAX_CONN with better lowerbound check. 1697 * tescr >= other original TS 1698 * 1699 * It is also important to note that the fastest 1700 * timestamp clock of 1ms will wrap its 32bit space in 1701 * 24 days. So we just disable TS checking after 24 1702 * days of idle time. We actually must use a 12d 1703 * connection limit until we can come up with a better 1704 * lowerbound to the TS echo check. 1705 */ 1706 struct timeval delta_ts; 1707 int ts_fudge; 1708 1709 1710 /* 1711 * PFTM_TS_DIFF is how many seconds of leeway to allow 1712 * a host's timestamp. This can happen if the previous 1713 * packet got delayed in transit for much longer than 1714 * this packet. 1715 */ 1716 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1717 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1718 1719 /* Calculate max ticks since the last timestamp */ 1720 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1721 #define TS_MICROSECS 1000000 /* microseconds per second */ 1722 delta_ts = uptime; 1723 timevalsub(&delta_ts, &src->scrub->pfss_last); 1724 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1725 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1726 1727 if ((src->state >= TCPS_ESTABLISHED && 1728 dst->state >= TCPS_ESTABLISHED) && 1729 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1730 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1731 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1732 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1733 /* Bad RFC1323 implementation or an insertion attack. 1734 * 1735 * - Solaris 2.6 and 2.7 are known to send another ACK 1736 * after the FIN,FIN|ACK,ACK closing that carries 1737 * an old timestamp. 1738 */ 1739 1740 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1741 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1742 SEQ_GT(tsval, src->scrub->pfss_tsval + 1743 tsval_from_last) ? '1' : ' ', 1744 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1745 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1746 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1747 "idle: %jus %lums\n", 1748 tsval, tsecr, tsval_from_last, 1749 (uintmax_t)delta_ts.tv_sec, 1750 delta_ts.tv_usec / 1000)); 1751 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1752 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1753 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1754 "\n", dst->scrub->pfss_tsval, 1755 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1756 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1757 pf_print_state(state); 1758 pf_print_flags(th->th_flags); 1759 printf("\n"); 1760 } 1761 REASON_SET(reason, PFRES_TS); 1762 return (PF_DROP); 1763 } 1764 1765 /* XXX I'd really like to require tsecr but it's optional */ 1766 1767 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1768 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1769 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1770 src->scrub && dst->scrub && 1771 (src->scrub->pfss_flags & PFSS_PAWS) && 1772 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1773 /* Didn't send a timestamp. Timestamps aren't really useful 1774 * when: 1775 * - connection opening or closing (often not even sent). 1776 * but we must not let an attacker to put a FIN on a 1777 * data packet to sneak it through our ESTABLISHED check. 1778 * - on a TCP reset. RFC suggests not even looking at TS. 1779 * - on an empty ACK. The TS will not be echoed so it will 1780 * probably not help keep the RTT calculation in sync and 1781 * there isn't as much danger when the sequence numbers 1782 * got wrapped. So some stacks don't include TS on empty 1783 * ACKs :-( 1784 * 1785 * To minimize the disruption to mostly RFC1323 conformant 1786 * stacks, we will only require timestamps on data packets. 1787 * 1788 * And what do ya know, we cannot require timestamps on data 1789 * packets. There appear to be devices that do legitimate 1790 * TCP connection hijacking. There are HTTP devices that allow 1791 * a 3whs (with timestamps) and then buffer the HTTP request. 1792 * If the intermediate device has the HTTP response cache, it 1793 * will spoof the response but not bother timestamping its 1794 * packets. So we can look for the presence of a timestamp in 1795 * the first data packet and if there, require it in all future 1796 * packets. 1797 */ 1798 1799 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1800 /* 1801 * Hey! Someone tried to sneak a packet in. Or the 1802 * stack changed its RFC1323 behavior?!?! 1803 */ 1804 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1805 DPFPRINTF(("Did not receive expected RFC1323 " 1806 "timestamp\n")); 1807 pf_print_state(state); 1808 pf_print_flags(th->th_flags); 1809 printf("\n"); 1810 } 1811 REASON_SET(reason, PFRES_TS); 1812 return (PF_DROP); 1813 } 1814 } 1815 1816 1817 /* 1818 * We will note if a host sends his data packets with or without 1819 * timestamps. And require all data packets to contain a timestamp 1820 * if the first does. PAWS implicitly requires that all data packets be 1821 * timestamped. But I think there are middle-man devices that hijack 1822 * TCP streams immediately after the 3whs and don't timestamp their 1823 * packets (seen in a WWW accelerator or cache). 1824 */ 1825 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1826 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1827 if (got_ts) 1828 src->scrub->pfss_flags |= PFSS_DATA_TS; 1829 else { 1830 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1831 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1832 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1833 /* Don't warn if other host rejected RFC1323 */ 1834 DPFPRINTF(("Broken RFC1323 stack did not " 1835 "timestamp data packet. Disabled PAWS " 1836 "security.\n")); 1837 pf_print_state(state); 1838 pf_print_flags(th->th_flags); 1839 printf("\n"); 1840 } 1841 } 1842 } 1843 1844 1845 /* 1846 * Update PAWS values 1847 */ 1848 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1849 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1850 getmicrouptime(&src->scrub->pfss_last); 1851 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1852 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1853 src->scrub->pfss_tsval = tsval; 1854 1855 if (tsecr) { 1856 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1857 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1858 src->scrub->pfss_tsecr = tsecr; 1859 1860 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1861 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1862 src->scrub->pfss_tsval0 == 0)) { 1863 /* tsval0 MUST be the lowest timestamp */ 1864 src->scrub->pfss_tsval0 = tsval; 1865 } 1866 1867 /* Only fully initialized after a TS gets echoed */ 1868 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1869 src->scrub->pfss_flags |= PFSS_PAWS; 1870 } 1871 } 1872 1873 /* I have a dream.... TCP segment reassembly.... */ 1874 return (0); 1875 } 1876 1877 static int 1878 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th, 1879 int off, sa_family_t af) 1880 { 1881 u_int16_t *mss; 1882 int thoff; 1883 int opt, cnt, optlen = 0; 1884 int rewrite = 0; 1885 u_char opts[TCP_MAXOLEN]; 1886 u_char *optp = opts; 1887 1888 thoff = th->th_off << 2; 1889 cnt = thoff - sizeof(struct tcphdr); 1890 1891 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1892 NULL, NULL, af)) 1893 return (rewrite); 1894 1895 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1896 opt = optp[0]; 1897 if (opt == TCPOPT_EOL) 1898 break; 1899 if (opt == TCPOPT_NOP) 1900 optlen = 1; 1901 else { 1902 if (cnt < 2) 1903 break; 1904 optlen = optp[1]; 1905 if (optlen < 2 || optlen > cnt) 1906 break; 1907 } 1908 switch (opt) { 1909 case TCPOPT_MAXSEG: 1910 mss = (u_int16_t *)(optp + 2); 1911 if ((ntohs(*mss)) > r->max_mss) { 1912 th->th_sum = pf_cksum_fixup(th->th_sum, 1913 *mss, htons(r->max_mss), 0); 1914 *mss = htons(r->max_mss); 1915 rewrite = 1; 1916 } 1917 break; 1918 default: 1919 break; 1920 } 1921 } 1922 1923 if (rewrite) 1924 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1925 1926 return (rewrite); 1927 } 1928 1929 #ifdef INET 1930 static void 1931 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1932 { 1933 struct mbuf *m = *m0; 1934 struct ip *h = mtod(m, struct ip *); 1935 1936 /* Clear IP_DF if no-df was requested */ 1937 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1938 u_int16_t ip_off = h->ip_off; 1939 1940 h->ip_off &= htons(~IP_DF); 1941 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1942 } 1943 1944 /* Enforce a minimum ttl, may cause endless packet loops */ 1945 if (min_ttl && h->ip_ttl < min_ttl) { 1946 u_int16_t ip_ttl = h->ip_ttl; 1947 1948 h->ip_ttl = min_ttl; 1949 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 1950 } 1951 1952 /* Enforce tos */ 1953 if (flags & PFRULE_SET_TOS) { 1954 u_int16_t ov, nv; 1955 1956 ov = *(u_int16_t *)h; 1957 h->ip_tos = tos; 1958 nv = *(u_int16_t *)h; 1959 1960 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 1961 } 1962 1963 /* random-id, but not for fragments */ 1964 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 1965 u_int16_t ip_id = h->ip_id; 1966 1967 h->ip_id = ip_randomid(); 1968 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 1969 } 1970 } 1971 #endif /* INET */ 1972 1973 #ifdef INET6 1974 static void 1975 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 1976 { 1977 struct mbuf *m = *m0; 1978 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1979 1980 /* Enforce a minimum ttl, may cause endless packet loops */ 1981 if (min_ttl && h->ip6_hlim < min_ttl) 1982 h->ip6_hlim = min_ttl; 1983 } 1984 #endif 1985