1 /*- 2 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_pf.h" 34 35 #include <sys/param.h> 36 #include <sys/lock.h> 37 #include <sys/mbuf.h> 38 #include <sys/mutex.h> 39 #include <sys/refcount.h> 40 #include <sys/rwlock.h> 41 #include <sys/socket.h> 42 43 #include <net/if.h> 44 #include <net/vnet.h> 45 #include <net/pfvar.h> 46 #include <net/if_pflog.h> 47 48 #include <netinet/in.h> 49 #include <netinet/ip.h> 50 #include <netinet/ip_var.h> 51 #include <netinet/tcp.h> 52 #include <netinet/tcp_fsm.h> 53 #include <netinet/tcp_seq.h> 54 55 #ifdef INET6 56 #include <netinet/ip6.h> 57 #endif /* INET6 */ 58 59 struct pf_frent { 60 LIST_ENTRY(pf_frent) fr_next; 61 union { 62 struct { 63 struct ip *_fr_ip; 64 struct mbuf *_fr_m; 65 } _frag; 66 struct { 67 uint16_t _fr_off; 68 uint16_t _fr_end; 69 } _cache; 70 } _u; 71 }; 72 #define fr_ip _u._frag._fr_ip 73 #define fr_m _u._frag._fr_m 74 #define fr_off _u._cache._fr_off 75 #define fr_end _u._cache._fr_end 76 77 struct pf_fragment { 78 RB_ENTRY(pf_fragment) fr_entry; 79 TAILQ_ENTRY(pf_fragment) frag_next; 80 struct in_addr fr_src; 81 struct in_addr fr_dst; 82 u_int8_t fr_p; /* protocol of this fragment */ 83 u_int8_t fr_flags; /* status flags */ 84 #define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */ 85 #define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */ 86 #define PFFRAG_DROP 0x0004 /* Drop all fragments */ 87 #define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER)) 88 u_int16_t fr_id; /* fragment id for reassemble */ 89 u_int16_t fr_max; /* fragment data max */ 90 u_int32_t fr_timeout; 91 LIST_HEAD(, pf_frent) fr_queue; 92 }; 93 94 static struct mtx pf_frag_mtx; 95 #define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) 96 #define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) 97 #define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) 98 99 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 100 101 static VNET_DEFINE(uma_zone_t, pf_frent_z); 102 #define V_pf_frent_z VNET(pf_frent_z) 103 static VNET_DEFINE(uma_zone_t, pf_frag_z); 104 #define V_pf_frag_z VNET(pf_frag_z) 105 106 TAILQ_HEAD(pf_fragqueue, pf_fragment); 107 TAILQ_HEAD(pf_cachequeue, pf_fragment); 108 static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue); 109 #define V_pf_fragqueue VNET(pf_fragqueue) 110 static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue); 111 #define V_pf_cachequeue VNET(pf_cachequeue) 112 RB_HEAD(pf_frag_tree, pf_fragment); 113 static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree); 114 #define V_pf_frag_tree VNET(pf_frag_tree) 115 static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree); 116 #define V_pf_cache_tree VNET(pf_cache_tree) 117 static int pf_frag_compare(struct pf_fragment *, 118 struct pf_fragment *); 119 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 120 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 121 122 /* Private prototypes */ 123 static void pf_free_fragment(struct pf_fragment *); 124 static void pf_remove_fragment(struct pf_fragment *); 125 static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, 126 struct tcphdr *, int, sa_family_t); 127 #ifdef INET 128 static void pf_ip2key(struct pf_fragment *, struct ip *); 129 static void pf_scrub_ip(struct mbuf **, u_int32_t, u_int8_t, 130 u_int8_t); 131 static void pf_flush_fragments(void); 132 static struct pf_fragment *pf_find_fragment(struct ip *, struct pf_frag_tree *); 133 static struct mbuf *pf_reassemble(struct mbuf **, struct pf_fragment **, 134 struct pf_frent *, int); 135 static struct mbuf *pf_fragcache(struct mbuf **, struct ip*, 136 struct pf_fragment **, int, int, int *); 137 #endif /* INET */ 138 #ifdef INET6 139 static void pf_scrub_ip6(struct mbuf **, u_int8_t); 140 #endif 141 #define DPFPRINTF(x) do { \ 142 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 143 printf("%s: ", __func__); \ 144 printf x ; \ 145 } \ 146 } while(0) 147 148 void 149 pf_normalize_init(void) 150 { 151 152 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 153 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 154 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 155 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 156 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 157 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 158 UMA_ALIGN_PTR, 0); 159 160 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 161 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 162 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 163 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 164 165 mtx_init(&pf_frag_mtx, "pf fragments", NULL, MTX_DEF); 166 167 TAILQ_INIT(&V_pf_fragqueue); 168 TAILQ_INIT(&V_pf_cachequeue); 169 } 170 171 void 172 pf_normalize_cleanup(void) 173 { 174 175 uma_zdestroy(V_pf_state_scrub_z); 176 uma_zdestroy(V_pf_frent_z); 177 uma_zdestroy(V_pf_frag_z); 178 179 mtx_destroy(&pf_frag_mtx); 180 } 181 182 static int 183 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 184 { 185 int diff; 186 187 if ((diff = a->fr_id - b->fr_id)) 188 return (diff); 189 else if ((diff = a->fr_p - b->fr_p)) 190 return (diff); 191 else if (a->fr_src.s_addr < b->fr_src.s_addr) 192 return (-1); 193 else if (a->fr_src.s_addr > b->fr_src.s_addr) 194 return (1); 195 else if (a->fr_dst.s_addr < b->fr_dst.s_addr) 196 return (-1); 197 else if (a->fr_dst.s_addr > b->fr_dst.s_addr) 198 return (1); 199 return (0); 200 } 201 202 void 203 pf_purge_expired_fragments(void) 204 { 205 struct pf_fragment *frag; 206 u_int32_t expire = time_uptime - 207 V_pf_default_rule.timeout[PFTM_FRAG]; 208 209 PF_FRAG_LOCK(); 210 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 211 KASSERT((BUFFER_FRAGMENTS(frag)), 212 ("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__)); 213 if (frag->fr_timeout > expire) 214 break; 215 216 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 217 pf_free_fragment(frag); 218 } 219 220 while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) { 221 KASSERT((!BUFFER_FRAGMENTS(frag)), 222 ("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__)); 223 if (frag->fr_timeout > expire) 224 break; 225 226 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 227 pf_free_fragment(frag); 228 KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) || 229 TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag), 230 ("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s", 231 __FUNCTION__)); 232 } 233 PF_FRAG_UNLOCK(); 234 } 235 236 #ifdef INET 237 /* 238 * Try to flush old fragments to make space for new ones 239 */ 240 static void 241 pf_flush_fragments(void) 242 { 243 struct pf_fragment *frag, *cache; 244 int goal; 245 246 PF_FRAG_ASSERT(); 247 248 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 249 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 250 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 251 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 252 if (frag) 253 pf_free_fragment(frag); 254 cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue); 255 if (cache) 256 pf_free_fragment(cache); 257 if (frag == NULL && cache == NULL) 258 break; 259 } 260 } 261 #endif /* INET */ 262 263 /* Frees the fragments and all associated entries */ 264 static void 265 pf_free_fragment(struct pf_fragment *frag) 266 { 267 struct pf_frent *frent; 268 269 PF_FRAG_ASSERT(); 270 271 /* Free all fragments */ 272 if (BUFFER_FRAGMENTS(frag)) { 273 for (frent = LIST_FIRST(&frag->fr_queue); frent; 274 frent = LIST_FIRST(&frag->fr_queue)) { 275 LIST_REMOVE(frent, fr_next); 276 277 m_freem(frent->fr_m); 278 uma_zfree(V_pf_frent_z, frent); 279 } 280 } else { 281 for (frent = LIST_FIRST(&frag->fr_queue); frent; 282 frent = LIST_FIRST(&frag->fr_queue)) { 283 LIST_REMOVE(frent, fr_next); 284 285 KASSERT((LIST_EMPTY(&frag->fr_queue) || 286 LIST_FIRST(&frag->fr_queue)->fr_off > 287 frent->fr_end), 288 ("! (LIST_EMPTY() || LIST_FIRST()->fr_off >" 289 " frent->fr_end): %s", __func__)); 290 291 uma_zfree(V_pf_frent_z, frent); 292 } 293 } 294 295 pf_remove_fragment(frag); 296 } 297 298 #ifdef INET 299 static void 300 pf_ip2key(struct pf_fragment *key, struct ip *ip) 301 { 302 key->fr_p = ip->ip_p; 303 key->fr_id = ip->ip_id; 304 key->fr_src.s_addr = ip->ip_src.s_addr; 305 key->fr_dst.s_addr = ip->ip_dst.s_addr; 306 } 307 308 static struct pf_fragment * 309 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree) 310 { 311 struct pf_fragment key; 312 struct pf_fragment *frag; 313 314 PF_FRAG_ASSERT(); 315 316 pf_ip2key(&key, ip); 317 318 frag = RB_FIND(pf_frag_tree, tree, &key); 319 if (frag != NULL) { 320 /* XXX Are we sure we want to update the timeout? */ 321 frag->fr_timeout = time_uptime; 322 if (BUFFER_FRAGMENTS(frag)) { 323 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 324 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 325 } else { 326 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next); 327 TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next); 328 } 329 } 330 331 return (frag); 332 } 333 #endif /* INET */ 334 335 /* Removes a fragment from the fragment queue and frees the fragment */ 336 337 static void 338 pf_remove_fragment(struct pf_fragment *frag) 339 { 340 341 PF_FRAG_ASSERT(); 342 343 if (BUFFER_FRAGMENTS(frag)) { 344 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 345 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 346 uma_zfree(V_pf_frag_z, frag); 347 } else { 348 RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag); 349 TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next); 350 uma_zfree(V_pf_frag_z, frag); 351 } 352 } 353 354 #ifdef INET 355 #define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3) 356 static struct mbuf * 357 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag, 358 struct pf_frent *frent, int mff) 359 { 360 struct mbuf *m = *m0, *m2; 361 struct pf_frent *frea, *next; 362 struct pf_frent *frep = NULL; 363 struct ip *ip = frent->fr_ip; 364 int hlen = ip->ip_hl << 2; 365 u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 366 u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4; 367 u_int16_t max = ip_len + off; 368 369 PF_FRAG_ASSERT(); 370 KASSERT((*frag == NULL || BUFFER_FRAGMENTS(*frag)), 371 ("! (*frag == NULL || BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__)); 372 373 /* Strip off ip header */ 374 m->m_data += hlen; 375 m->m_len -= hlen; 376 377 /* Create a new reassembly queue for this packet */ 378 if (*frag == NULL) { 379 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 380 if (*frag == NULL) { 381 pf_flush_fragments(); 382 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 383 if (*frag == NULL) 384 goto drop_fragment; 385 } 386 387 (*frag)->fr_flags = 0; 388 (*frag)->fr_max = 0; 389 (*frag)->fr_src = frent->fr_ip->ip_src; 390 (*frag)->fr_dst = frent->fr_ip->ip_dst; 391 (*frag)->fr_p = frent->fr_ip->ip_p; 392 (*frag)->fr_id = frent->fr_ip->ip_id; 393 (*frag)->fr_timeout = time_uptime; 394 LIST_INIT(&(*frag)->fr_queue); 395 396 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, *frag); 397 TAILQ_INSERT_HEAD(&V_pf_fragqueue, *frag, frag_next); 398 399 /* We do not have a previous fragment */ 400 frep = NULL; 401 goto insert; 402 } 403 404 /* 405 * Find a fragment after the current one: 406 * - off contains the real shifted offset. 407 */ 408 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) { 409 if (FR_IP_OFF(frea) > off) 410 break; 411 frep = frea; 412 } 413 414 KASSERT((frep != NULL || frea != NULL), 415 ("!(frep != NULL || frea != NULL): %s", __FUNCTION__));; 416 417 if (frep != NULL && 418 FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 419 4 > off) 420 { 421 u_int16_t precut; 422 423 precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - 424 frep->fr_ip->ip_hl * 4 - off; 425 if (precut >= ip_len) 426 goto drop_fragment; 427 m_adj(frent->fr_m, precut); 428 DPFPRINTF(("overlap -%d\n", precut)); 429 /* Enforce 8 byte boundaries */ 430 ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3)); 431 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 432 ip_len -= precut; 433 ip->ip_len = htons(ip_len); 434 } 435 436 for (; frea != NULL && ip_len + off > FR_IP_OFF(frea); 437 frea = next) 438 { 439 u_int16_t aftercut; 440 441 aftercut = ip_len + off - FR_IP_OFF(frea); 442 DPFPRINTF(("adjust overlap %d\n", aftercut)); 443 if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl 444 * 4) 445 { 446 frea->fr_ip->ip_len = 447 htons(ntohs(frea->fr_ip->ip_len) - aftercut); 448 frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) + 449 (aftercut >> 3)); 450 m_adj(frea->fr_m, aftercut); 451 break; 452 } 453 454 /* This fragment is completely overlapped, lose it */ 455 next = LIST_NEXT(frea, fr_next); 456 m_freem(frea->fr_m); 457 LIST_REMOVE(frea, fr_next); 458 uma_zfree(V_pf_frent_z, frea); 459 } 460 461 insert: 462 /* Update maximum data size */ 463 if ((*frag)->fr_max < max) 464 (*frag)->fr_max = max; 465 /* This is the last segment */ 466 if (!mff) 467 (*frag)->fr_flags |= PFFRAG_SEENLAST; 468 469 if (frep == NULL) 470 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next); 471 else 472 LIST_INSERT_AFTER(frep, frent, fr_next); 473 474 /* Check if we are completely reassembled */ 475 if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) 476 return (NULL); 477 478 /* Check if we have all the data */ 479 off = 0; 480 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) { 481 next = LIST_NEXT(frep, fr_next); 482 483 off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4; 484 if (off < (*frag)->fr_max && 485 (next == NULL || FR_IP_OFF(next) != off)) 486 { 487 DPFPRINTF(("missing fragment at %d, next %d, max %d\n", 488 off, next == NULL ? -1 : FR_IP_OFF(next), 489 (*frag)->fr_max)); 490 return (NULL); 491 } 492 } 493 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max)); 494 if (off < (*frag)->fr_max) 495 return (NULL); 496 497 /* We have all the data */ 498 frent = LIST_FIRST(&(*frag)->fr_queue); 499 KASSERT((frent != NULL), ("frent == NULL: %s", __FUNCTION__)); 500 if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) { 501 DPFPRINTF(("drop: too big: %d\n", off)); 502 pf_free_fragment(*frag); 503 *frag = NULL; 504 return (NULL); 505 } 506 next = LIST_NEXT(frent, fr_next); 507 508 /* Magic from ip_input */ 509 ip = frent->fr_ip; 510 m = frent->fr_m; 511 m2 = m->m_next; 512 m->m_next = NULL; 513 m_cat(m, m2); 514 uma_zfree(V_pf_frent_z, frent); 515 for (frent = next; frent != NULL; frent = next) { 516 next = LIST_NEXT(frent, fr_next); 517 518 m2 = frent->fr_m; 519 uma_zfree(V_pf_frent_z, frent); 520 m->m_pkthdr.csum_flags &= m2->m_pkthdr.csum_flags; 521 m->m_pkthdr.csum_data += m2->m_pkthdr.csum_data; 522 m_cat(m, m2); 523 } 524 525 while (m->m_pkthdr.csum_data & 0xffff0000) 526 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 527 (m->m_pkthdr.csum_data >> 16); 528 ip->ip_src = (*frag)->fr_src; 529 ip->ip_dst = (*frag)->fr_dst; 530 531 /* Remove from fragment queue */ 532 pf_remove_fragment(*frag); 533 *frag = NULL; 534 535 hlen = ip->ip_hl << 2; 536 ip->ip_len = htons(off + hlen); 537 m->m_len += hlen; 538 m->m_data -= hlen; 539 540 /* some debugging cruft by sklower, below, will go away soon */ 541 /* XXX this should be done elsewhere */ 542 if (m->m_flags & M_PKTHDR) { 543 int plen = 0; 544 for (m2 = m; m2; m2 = m2->m_next) 545 plen += m2->m_len; 546 m->m_pkthdr.len = plen; 547 } 548 549 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 550 return (m); 551 552 drop_fragment: 553 /* Oops - fail safe - drop packet */ 554 uma_zfree(V_pf_frent_z, frent); 555 m_freem(m); 556 return (NULL); 557 } 558 559 static struct mbuf * 560 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, 561 int drop, int *nomem) 562 { 563 struct mbuf *m = *m0; 564 struct pf_frent *frp, *fra, *cur = NULL; 565 int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2); 566 u_int16_t off = ntohs(h->ip_off) << 3; 567 u_int16_t max = ip_len + off; 568 int hosed = 0; 569 570 PF_FRAG_ASSERT(); 571 KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)), 572 ("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__)); 573 574 /* Create a new range queue for this packet */ 575 if (*frag == NULL) { 576 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 577 if (*frag == NULL) { 578 pf_flush_fragments(); 579 *frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 580 if (*frag == NULL) 581 goto no_mem; 582 } 583 584 /* Get an entry for the queue */ 585 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 586 if (cur == NULL) { 587 uma_zfree(V_pf_frag_z, *frag); 588 *frag = NULL; 589 goto no_mem; 590 } 591 592 (*frag)->fr_flags = PFFRAG_NOBUFFER; 593 (*frag)->fr_max = 0; 594 (*frag)->fr_src = h->ip_src; 595 (*frag)->fr_dst = h->ip_dst; 596 (*frag)->fr_p = h->ip_p; 597 (*frag)->fr_id = h->ip_id; 598 (*frag)->fr_timeout = time_uptime; 599 600 cur->fr_off = off; 601 cur->fr_end = max; 602 LIST_INIT(&(*frag)->fr_queue); 603 LIST_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next); 604 605 RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag); 606 TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next); 607 608 DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max)); 609 610 goto pass; 611 } 612 613 /* 614 * Find a fragment after the current one: 615 * - off contains the real shifted offset. 616 */ 617 frp = NULL; 618 LIST_FOREACH(fra, &(*frag)->fr_queue, fr_next) { 619 if (fra->fr_off > off) 620 break; 621 frp = fra; 622 } 623 624 KASSERT((frp != NULL || fra != NULL), 625 ("!(frp != NULL || fra != NULL): %s", __FUNCTION__)); 626 627 if (frp != NULL) { 628 int precut; 629 630 precut = frp->fr_end - off; 631 if (precut >= ip_len) { 632 /* Fragment is entirely a duplicate */ 633 DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n", 634 h->ip_id, frp->fr_off, frp->fr_end, off, max)); 635 goto drop_fragment; 636 } 637 if (precut == 0) { 638 /* They are adjacent. Fixup cache entry */ 639 DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n", 640 h->ip_id, frp->fr_off, frp->fr_end, off, max)); 641 frp->fr_end = max; 642 } else if (precut > 0) { 643 /* The first part of this payload overlaps with a 644 * fragment that has already been passed. 645 * Need to trim off the first part of the payload. 646 * But to do so easily, we need to create another 647 * mbuf to throw the original header into. 648 */ 649 650 DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n", 651 h->ip_id, precut, frp->fr_off, frp->fr_end, off, 652 max)); 653 654 off += precut; 655 max -= precut; 656 /* Update the previous frag to encompass this one */ 657 frp->fr_end = max; 658 659 if (!drop) { 660 /* XXX Optimization opportunity 661 * This is a very heavy way to trim the payload. 662 * we could do it much faster by diddling mbuf 663 * internals but that would be even less legible 664 * than this mbuf magic. For my next trick, 665 * I'll pull a rabbit out of my laptop. 666 */ 667 *m0 = m_dup(m, M_NOWAIT); 668 if (*m0 == NULL) 669 goto no_mem; 670 /* From KAME Project : We have missed this! */ 671 m_adj(*m0, (h->ip_hl << 2) - 672 (*m0)->m_pkthdr.len); 673 674 KASSERT(((*m0)->m_next == NULL), 675 ("(*m0)->m_next != NULL: %s", 676 __FUNCTION__)); 677 m_adj(m, precut + (h->ip_hl << 2)); 678 m_cat(*m0, m); 679 m = *m0; 680 if (m->m_flags & M_PKTHDR) { 681 int plen = 0; 682 struct mbuf *t; 683 for (t = m; t; t = t->m_next) 684 plen += t->m_len; 685 m->m_pkthdr.len = plen; 686 } 687 688 689 h = mtod(m, struct ip *); 690 691 KASSERT(((int)m->m_len == 692 ntohs(h->ip_len) - precut), 693 ("m->m_len != ntohs(h->ip_len) - precut: %s", 694 __FUNCTION__)); 695 h->ip_off = htons(ntohs(h->ip_off) + 696 (precut >> 3)); 697 h->ip_len = htons(ntohs(h->ip_len) - precut); 698 } else { 699 hosed++; 700 } 701 } else { 702 /* There is a gap between fragments */ 703 704 DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n", 705 h->ip_id, -precut, frp->fr_off, frp->fr_end, off, 706 max)); 707 708 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 709 if (cur == NULL) 710 goto no_mem; 711 712 cur->fr_off = off; 713 cur->fr_end = max; 714 LIST_INSERT_AFTER(frp, cur, fr_next); 715 } 716 } 717 718 if (fra != NULL) { 719 int aftercut; 720 int merge = 0; 721 722 aftercut = max - fra->fr_off; 723 if (aftercut == 0) { 724 /* Adjacent fragments */ 725 DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n", 726 h->ip_id, off, max, fra->fr_off, fra->fr_end)); 727 fra->fr_off = off; 728 merge = 1; 729 } else if (aftercut > 0) { 730 /* Need to chop off the tail of this fragment */ 731 DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n", 732 h->ip_id, aftercut, off, max, fra->fr_off, 733 fra->fr_end)); 734 fra->fr_off = off; 735 max -= aftercut; 736 737 merge = 1; 738 739 if (!drop) { 740 m_adj(m, -aftercut); 741 if (m->m_flags & M_PKTHDR) { 742 int plen = 0; 743 struct mbuf *t; 744 for (t = m; t; t = t->m_next) 745 plen += t->m_len; 746 m->m_pkthdr.len = plen; 747 } 748 h = mtod(m, struct ip *); 749 KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut), 750 ("m->m_len != ntohs(h->ip_len) - aftercut: %s", 751 __FUNCTION__)); 752 h->ip_len = htons(ntohs(h->ip_len) - aftercut); 753 } else { 754 hosed++; 755 } 756 } else if (frp == NULL) { 757 /* There is a gap between fragments */ 758 DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n", 759 h->ip_id, -aftercut, off, max, fra->fr_off, 760 fra->fr_end)); 761 762 cur = uma_zalloc(V_pf_frent_z, M_NOWAIT); 763 if (cur == NULL) 764 goto no_mem; 765 766 cur->fr_off = off; 767 cur->fr_end = max; 768 LIST_INSERT_BEFORE(fra, cur, fr_next); 769 } 770 771 772 /* Need to glue together two separate fragment descriptors */ 773 if (merge) { 774 if (cur && fra->fr_off <= cur->fr_end) { 775 /* Need to merge in a previous 'cur' */ 776 DPFPRINTF(("fragcache[%d]: adjacent(merge " 777 "%d-%d) %d-%d (%d-%d)\n", 778 h->ip_id, cur->fr_off, cur->fr_end, off, 779 max, fra->fr_off, fra->fr_end)); 780 fra->fr_off = cur->fr_off; 781 LIST_REMOVE(cur, fr_next); 782 uma_zfree(V_pf_frent_z, cur); 783 cur = NULL; 784 785 } else if (frp && fra->fr_off <= frp->fr_end) { 786 /* Need to merge in a modified 'frp' */ 787 KASSERT((cur == NULL), ("cur != NULL: %s", 788 __FUNCTION__)); 789 DPFPRINTF(("fragcache[%d]: adjacent(merge " 790 "%d-%d) %d-%d (%d-%d)\n", 791 h->ip_id, frp->fr_off, frp->fr_end, off, 792 max, fra->fr_off, fra->fr_end)); 793 fra->fr_off = frp->fr_off; 794 LIST_REMOVE(frp, fr_next); 795 uma_zfree(V_pf_frent_z, frp); 796 frp = NULL; 797 798 } 799 } 800 } 801 802 if (hosed) { 803 /* 804 * We must keep tracking the overall fragment even when 805 * we're going to drop it anyway so that we know when to 806 * free the overall descriptor. Thus we drop the frag late. 807 */ 808 goto drop_fragment; 809 } 810 811 812 pass: 813 /* Update maximum data size */ 814 if ((*frag)->fr_max < max) 815 (*frag)->fr_max = max; 816 817 /* This is the last segment */ 818 if (!mff) 819 (*frag)->fr_flags |= PFFRAG_SEENLAST; 820 821 /* Check if we are completely reassembled */ 822 if (((*frag)->fr_flags & PFFRAG_SEENLAST) && 823 LIST_FIRST(&(*frag)->fr_queue)->fr_off == 0 && 824 LIST_FIRST(&(*frag)->fr_queue)->fr_end == (*frag)->fr_max) { 825 /* Remove from fragment queue */ 826 DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id, 827 (*frag)->fr_max)); 828 pf_free_fragment(*frag); 829 *frag = NULL; 830 } 831 832 return (m); 833 834 no_mem: 835 *nomem = 1; 836 837 /* Still need to pay attention to !IP_MF */ 838 if (!mff && *frag != NULL) 839 (*frag)->fr_flags |= PFFRAG_SEENLAST; 840 841 m_freem(m); 842 return (NULL); 843 844 drop_fragment: 845 846 /* Still need to pay attention to !IP_MF */ 847 if (!mff && *frag != NULL) 848 (*frag)->fr_flags |= PFFRAG_SEENLAST; 849 850 if (drop) { 851 /* This fragment has been deemed bad. Don't reass */ 852 if (((*frag)->fr_flags & PFFRAG_DROP) == 0) 853 DPFPRINTF(("fragcache[%d]: dropping overall fragment\n", 854 h->ip_id)); 855 (*frag)->fr_flags |= PFFRAG_DROP; 856 } 857 858 m_freem(m); 859 return (NULL); 860 } 861 862 int 863 pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, 864 struct pf_pdesc *pd) 865 { 866 struct mbuf *m = *m0; 867 struct pf_rule *r; 868 struct pf_frent *frent; 869 struct pf_fragment *frag = NULL; 870 struct ip *h = mtod(m, struct ip *); 871 int mff = (ntohs(h->ip_off) & IP_MF); 872 int hlen = h->ip_hl << 2; 873 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 874 u_int16_t max; 875 int ip_len; 876 int ip_off; 877 int tag = -1; 878 879 PF_RULES_RASSERT(); 880 881 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 882 while (r != NULL) { 883 r->evaluations++; 884 if (pfi_kif_match(r->kif, kif) == r->ifnot) 885 r = r->skip[PF_SKIP_IFP].ptr; 886 else if (r->direction && r->direction != dir) 887 r = r->skip[PF_SKIP_DIR].ptr; 888 else if (r->af && r->af != AF_INET) 889 r = r->skip[PF_SKIP_AF].ptr; 890 else if (r->proto && r->proto != h->ip_p) 891 r = r->skip[PF_SKIP_PROTO].ptr; 892 else if (PF_MISMATCHAW(&r->src.addr, 893 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 894 r->src.neg, kif, M_GETFIB(m))) 895 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 896 else if (PF_MISMATCHAW(&r->dst.addr, 897 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 898 r->dst.neg, NULL, M_GETFIB(m))) 899 r = r->skip[PF_SKIP_DST_ADDR].ptr; 900 else if (r->match_tag && !pf_match_tag(m, r, &tag, 901 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 902 r = TAILQ_NEXT(r, entries); 903 else 904 break; 905 } 906 907 if (r == NULL || r->action == PF_NOSCRUB) 908 return (PF_PASS); 909 else { 910 r->packets[dir == PF_OUT]++; 911 r->bytes[dir == PF_OUT] += pd->tot_len; 912 } 913 914 /* Check for illegal packets */ 915 if (hlen < (int)sizeof(struct ip)) 916 goto drop; 917 918 if (hlen > ntohs(h->ip_len)) 919 goto drop; 920 921 /* Clear IP_DF if the rule uses the no-df option */ 922 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 923 u_int16_t ip_off = h->ip_off; 924 925 h->ip_off &= htons(~IP_DF); 926 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 927 } 928 929 /* We will need other tests here */ 930 if (!fragoff && !mff) 931 goto no_fragment; 932 933 /* We're dealing with a fragment now. Don't allow fragments 934 * with IP_DF to enter the cache. If the flag was cleared by 935 * no-df above, fine. Otherwise drop it. 936 */ 937 if (h->ip_off & htons(IP_DF)) { 938 DPFPRINTF(("IP_DF\n")); 939 goto bad; 940 } 941 942 ip_len = ntohs(h->ip_len) - hlen; 943 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 944 945 /* All fragments are 8 byte aligned */ 946 if (mff && (ip_len & 0x7)) { 947 DPFPRINTF(("mff and %d\n", ip_len)); 948 goto bad; 949 } 950 951 /* Respect maximum length */ 952 if (fragoff + ip_len > IP_MAXPACKET) { 953 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 954 goto bad; 955 } 956 max = fragoff + ip_len; 957 958 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) { 959 960 /* Fully buffer all of the fragments */ 961 PF_FRAG_LOCK(); 962 frag = pf_find_fragment(h, &V_pf_frag_tree); 963 964 /* Check if we saw the last fragment already */ 965 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && 966 max > frag->fr_max) 967 goto bad; 968 969 /* Get an entry for the fragment queue */ 970 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 971 if (frent == NULL) { 972 PF_FRAG_UNLOCK(); 973 REASON_SET(reason, PFRES_MEMORY); 974 return (PF_DROP); 975 } 976 frent->fr_ip = h; 977 frent->fr_m = m; 978 979 /* Might return a completely reassembled mbuf, or NULL */ 980 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 981 *m0 = m = pf_reassemble(m0, &frag, frent, mff); 982 PF_FRAG_UNLOCK(); 983 984 if (m == NULL) 985 return (PF_DROP); 986 987 /* use mtag from concatenated mbuf chain */ 988 pd->pf_mtag = pf_find_mtag(m); 989 #ifdef DIAGNOSTIC 990 if (pd->pf_mtag == NULL) { 991 printf("%s: pf_find_mtag returned NULL(1)\n", __func__); 992 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) { 993 m_freem(m); 994 *m0 = NULL; 995 goto no_mem; 996 } 997 } 998 #endif 999 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) 1000 goto drop; 1001 1002 h = mtod(m, struct ip *); 1003 } else { 1004 /* non-buffering fragment cache (drops or masks overlaps) */ 1005 int nomem = 0; 1006 1007 if (dir == PF_OUT && pd->pf_mtag->flags & PF_TAG_FRAGCACHE) { 1008 /* 1009 * Already passed the fragment cache in the 1010 * input direction. If we continued, it would 1011 * appear to be a dup and would be dropped. 1012 */ 1013 goto fragment_pass; 1014 } 1015 1016 PF_FRAG_LOCK(); 1017 frag = pf_find_fragment(h, &V_pf_cache_tree); 1018 1019 /* Check if we saw the last fragment already */ 1020 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && 1021 max > frag->fr_max) { 1022 if (r->rule_flag & PFRULE_FRAGDROP) 1023 frag->fr_flags |= PFFRAG_DROP; 1024 goto bad; 1025 } 1026 1027 *m0 = m = pf_fragcache(m0, h, &frag, mff, 1028 (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem); 1029 PF_FRAG_UNLOCK(); 1030 if (m == NULL) { 1031 if (nomem) 1032 goto no_mem; 1033 goto drop; 1034 } 1035 1036 /* use mtag from copied and trimmed mbuf chain */ 1037 pd->pf_mtag = pf_find_mtag(m); 1038 #ifdef DIAGNOSTIC 1039 if (pd->pf_mtag == NULL) { 1040 printf("%s: pf_find_mtag returned NULL(2)\n", __func__); 1041 if ((pd->pf_mtag = pf_get_mtag(m)) == NULL) { 1042 m_freem(m); 1043 *m0 = NULL; 1044 goto no_mem; 1045 } 1046 } 1047 #endif 1048 if (dir == PF_IN) 1049 pd->pf_mtag->flags |= PF_TAG_FRAGCACHE; 1050 1051 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) 1052 goto drop; 1053 goto fragment_pass; 1054 } 1055 1056 no_fragment: 1057 /* At this point, only IP_DF is allowed in ip_off */ 1058 if (h->ip_off & ~htons(IP_DF)) { 1059 u_int16_t ip_off = h->ip_off; 1060 1061 h->ip_off &= htons(IP_DF); 1062 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1063 } 1064 1065 /* not missing a return here */ 1066 1067 fragment_pass: 1068 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 1069 1070 if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) 1071 pd->flags |= PFDESC_IP_REAS; 1072 return (PF_PASS); 1073 1074 no_mem: 1075 REASON_SET(reason, PFRES_MEMORY); 1076 if (r != NULL && r->log) 1077 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1078 1); 1079 return (PF_DROP); 1080 1081 drop: 1082 REASON_SET(reason, PFRES_NORM); 1083 if (r != NULL && r->log) 1084 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1085 1); 1086 return (PF_DROP); 1087 1088 bad: 1089 DPFPRINTF(("dropping bad fragment\n")); 1090 1091 /* Free associated fragments */ 1092 if (frag != NULL) { 1093 pf_free_fragment(frag); 1094 PF_FRAG_UNLOCK(); 1095 } 1096 1097 REASON_SET(reason, PFRES_FRAG); 1098 if (r != NULL && r->log) 1099 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 1100 1); 1101 1102 return (PF_DROP); 1103 } 1104 #endif 1105 1106 #ifdef INET6 1107 int 1108 pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif, 1109 u_short *reason, struct pf_pdesc *pd) 1110 { 1111 struct mbuf *m = *m0; 1112 struct pf_rule *r; 1113 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1114 int off; 1115 struct ip6_ext ext; 1116 struct ip6_opt opt; 1117 struct ip6_opt_jumbo jumbo; 1118 struct ip6_frag frag; 1119 u_int32_t jumbolen = 0, plen; 1120 u_int16_t fragoff = 0; 1121 int optend; 1122 int ooff; 1123 u_int8_t proto; 1124 int terminal; 1125 1126 PF_RULES_RASSERT(); 1127 1128 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1129 while (r != NULL) { 1130 r->evaluations++; 1131 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1132 r = r->skip[PF_SKIP_IFP].ptr; 1133 else if (r->direction && r->direction != dir) 1134 r = r->skip[PF_SKIP_DIR].ptr; 1135 else if (r->af && r->af != AF_INET6) 1136 r = r->skip[PF_SKIP_AF].ptr; 1137 #if 0 /* header chain! */ 1138 else if (r->proto && r->proto != h->ip6_nxt) 1139 r = r->skip[PF_SKIP_PROTO].ptr; 1140 #endif 1141 else if (PF_MISMATCHAW(&r->src.addr, 1142 (struct pf_addr *)&h->ip6_src, AF_INET6, 1143 r->src.neg, kif, M_GETFIB(m))) 1144 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1145 else if (PF_MISMATCHAW(&r->dst.addr, 1146 (struct pf_addr *)&h->ip6_dst, AF_INET6, 1147 r->dst.neg, NULL, M_GETFIB(m))) 1148 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1149 else 1150 break; 1151 } 1152 1153 if (r == NULL || r->action == PF_NOSCRUB) 1154 return (PF_PASS); 1155 else { 1156 r->packets[dir == PF_OUT]++; 1157 r->bytes[dir == PF_OUT] += pd->tot_len; 1158 } 1159 1160 /* Check for illegal packets */ 1161 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1162 goto drop; 1163 1164 off = sizeof(struct ip6_hdr); 1165 proto = h->ip6_nxt; 1166 terminal = 0; 1167 do { 1168 switch (proto) { 1169 case IPPROTO_FRAGMENT: 1170 goto fragment; 1171 break; 1172 case IPPROTO_AH: 1173 case IPPROTO_ROUTING: 1174 case IPPROTO_DSTOPTS: 1175 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1176 NULL, AF_INET6)) 1177 goto shortpkt; 1178 if (proto == IPPROTO_AH) 1179 off += (ext.ip6e_len + 2) * 4; 1180 else 1181 off += (ext.ip6e_len + 1) * 8; 1182 proto = ext.ip6e_nxt; 1183 break; 1184 case IPPROTO_HOPOPTS: 1185 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1186 NULL, AF_INET6)) 1187 goto shortpkt; 1188 optend = off + (ext.ip6e_len + 1) * 8; 1189 ooff = off + sizeof(ext); 1190 do { 1191 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1192 sizeof(opt.ip6o_type), NULL, NULL, 1193 AF_INET6)) 1194 goto shortpkt; 1195 if (opt.ip6o_type == IP6OPT_PAD1) { 1196 ooff++; 1197 continue; 1198 } 1199 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1200 NULL, NULL, AF_INET6)) 1201 goto shortpkt; 1202 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1203 goto drop; 1204 switch (opt.ip6o_type) { 1205 case IP6OPT_JUMBO: 1206 if (h->ip6_plen != 0) 1207 goto drop; 1208 if (!pf_pull_hdr(m, ooff, &jumbo, 1209 sizeof(jumbo), NULL, NULL, 1210 AF_INET6)) 1211 goto shortpkt; 1212 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, 1213 sizeof(jumbolen)); 1214 jumbolen = ntohl(jumbolen); 1215 if (jumbolen <= IPV6_MAXPACKET) 1216 goto drop; 1217 if (sizeof(struct ip6_hdr) + jumbolen != 1218 m->m_pkthdr.len) 1219 goto drop; 1220 break; 1221 default: 1222 break; 1223 } 1224 ooff += sizeof(opt) + opt.ip6o_len; 1225 } while (ooff < optend); 1226 1227 off = optend; 1228 proto = ext.ip6e_nxt; 1229 break; 1230 default: 1231 terminal = 1; 1232 break; 1233 } 1234 } while (!terminal); 1235 1236 /* jumbo payload option must be present, or plen > 0 */ 1237 if (ntohs(h->ip6_plen) == 0) 1238 plen = jumbolen; 1239 else 1240 plen = ntohs(h->ip6_plen); 1241 if (plen == 0) 1242 goto drop; 1243 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1244 goto shortpkt; 1245 1246 pf_scrub_ip6(&m, r->min_ttl); 1247 1248 return (PF_PASS); 1249 1250 fragment: 1251 if (ntohs(h->ip6_plen) == 0 || jumbolen) 1252 goto drop; 1253 plen = ntohs(h->ip6_plen); 1254 1255 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1256 goto shortpkt; 1257 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK); 1258 if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET) 1259 goto badfrag; 1260 1261 /* do something about it */ 1262 /* remember to set pd->flags |= PFDESC_IP_REAS */ 1263 return (PF_PASS); 1264 1265 shortpkt: 1266 REASON_SET(reason, PFRES_SHORT); 1267 if (r != NULL && r->log) 1268 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1269 1); 1270 return (PF_DROP); 1271 1272 drop: 1273 REASON_SET(reason, PFRES_NORM); 1274 if (r != NULL && r->log) 1275 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1276 1); 1277 return (PF_DROP); 1278 1279 badfrag: 1280 REASON_SET(reason, PFRES_FRAG); 1281 if (r != NULL && r->log) 1282 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1283 1); 1284 return (PF_DROP); 1285 } 1286 #endif /* INET6 */ 1287 1288 int 1289 pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff, 1290 int off, void *h, struct pf_pdesc *pd) 1291 { 1292 struct pf_rule *r, *rm = NULL; 1293 struct tcphdr *th = pd->hdr.tcp; 1294 int rewrite = 0; 1295 u_short reason; 1296 u_int8_t flags; 1297 sa_family_t af = pd->af; 1298 1299 PF_RULES_RASSERT(); 1300 1301 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1302 while (r != NULL) { 1303 r->evaluations++; 1304 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1305 r = r->skip[PF_SKIP_IFP].ptr; 1306 else if (r->direction && r->direction != dir) 1307 r = r->skip[PF_SKIP_DIR].ptr; 1308 else if (r->af && r->af != af) 1309 r = r->skip[PF_SKIP_AF].ptr; 1310 else if (r->proto && r->proto != pd->proto) 1311 r = r->skip[PF_SKIP_PROTO].ptr; 1312 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1313 r->src.neg, kif, M_GETFIB(m))) 1314 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1315 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1316 r->src.port[0], r->src.port[1], th->th_sport)) 1317 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1318 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1319 r->dst.neg, NULL, M_GETFIB(m))) 1320 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1321 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1322 r->dst.port[0], r->dst.port[1], th->th_dport)) 1323 r = r->skip[PF_SKIP_DST_PORT].ptr; 1324 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1325 pf_osfp_fingerprint(pd, m, off, th), 1326 r->os_fingerprint)) 1327 r = TAILQ_NEXT(r, entries); 1328 else { 1329 rm = r; 1330 break; 1331 } 1332 } 1333 1334 if (rm == NULL || rm->action == PF_NOSCRUB) 1335 return (PF_PASS); 1336 else { 1337 r->packets[dir == PF_OUT]++; 1338 r->bytes[dir == PF_OUT] += pd->tot_len; 1339 } 1340 1341 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1342 pd->flags |= PFDESC_TCP_NORM; 1343 1344 flags = th->th_flags; 1345 if (flags & TH_SYN) { 1346 /* Illegal packet */ 1347 if (flags & TH_RST) 1348 goto tcp_drop; 1349 1350 if (flags & TH_FIN) 1351 flags &= ~TH_FIN; 1352 } else { 1353 /* Illegal packet */ 1354 if (!(flags & (TH_ACK|TH_RST))) 1355 goto tcp_drop; 1356 } 1357 1358 if (!(flags & TH_ACK)) { 1359 /* These flags are only valid if ACK is set */ 1360 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1361 goto tcp_drop; 1362 } 1363 1364 /* Check for illegal header length */ 1365 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1366 goto tcp_drop; 1367 1368 /* If flags changed, or reserved data set, then adjust */ 1369 if (flags != th->th_flags || th->th_x2 != 0) { 1370 u_int16_t ov, nv; 1371 1372 ov = *(u_int16_t *)(&th->th_ack + 1); 1373 th->th_flags = flags; 1374 th->th_x2 = 0; 1375 nv = *(u_int16_t *)(&th->th_ack + 1); 1376 1377 th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv, 0); 1378 rewrite = 1; 1379 } 1380 1381 /* Remove urgent pointer, if TH_URG is not set */ 1382 if (!(flags & TH_URG) && th->th_urp) { 1383 th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0, 0); 1384 th->th_urp = 0; 1385 rewrite = 1; 1386 } 1387 1388 /* Process options */ 1389 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1390 rewrite = 1; 1391 1392 /* copy back packet headers if we sanitized */ 1393 if (rewrite) 1394 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1395 1396 return (PF_PASS); 1397 1398 tcp_drop: 1399 REASON_SET(&reason, PFRES_NORM); 1400 if (rm != NULL && r->log) 1401 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1402 1); 1403 return (PF_DROP); 1404 } 1405 1406 int 1407 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1408 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1409 { 1410 u_int32_t tsval, tsecr; 1411 u_int8_t hdr[60]; 1412 u_int8_t *opt; 1413 1414 KASSERT((src->scrub == NULL), 1415 ("pf_normalize_tcp_init: src->scrub != NULL")); 1416 1417 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1418 if (src->scrub == NULL) 1419 return (1); 1420 1421 switch (pd->af) { 1422 #ifdef INET 1423 case AF_INET: { 1424 struct ip *h = mtod(m, struct ip *); 1425 src->scrub->pfss_ttl = h->ip_ttl; 1426 break; 1427 } 1428 #endif /* INET */ 1429 #ifdef INET6 1430 case AF_INET6: { 1431 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1432 src->scrub->pfss_ttl = h->ip6_hlim; 1433 break; 1434 } 1435 #endif /* INET6 */ 1436 } 1437 1438 1439 /* 1440 * All normalizations below are only begun if we see the start of 1441 * the connections. They must all set an enabled bit in pfss_flags 1442 */ 1443 if ((th->th_flags & TH_SYN) == 0) 1444 return (0); 1445 1446 1447 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1448 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1449 /* Diddle with TCP options */ 1450 int hlen; 1451 opt = hdr + sizeof(struct tcphdr); 1452 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1453 while (hlen >= TCPOLEN_TIMESTAMP) { 1454 switch (*opt) { 1455 case TCPOPT_EOL: /* FALLTHROUGH */ 1456 case TCPOPT_NOP: 1457 opt++; 1458 hlen--; 1459 break; 1460 case TCPOPT_TIMESTAMP: 1461 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1462 src->scrub->pfss_flags |= 1463 PFSS_TIMESTAMP; 1464 src->scrub->pfss_ts_mod = 1465 htonl(arc4random()); 1466 1467 /* note PFSS_PAWS not set yet */ 1468 memcpy(&tsval, &opt[2], 1469 sizeof(u_int32_t)); 1470 memcpy(&tsecr, &opt[6], 1471 sizeof(u_int32_t)); 1472 src->scrub->pfss_tsval0 = ntohl(tsval); 1473 src->scrub->pfss_tsval = ntohl(tsval); 1474 src->scrub->pfss_tsecr = ntohl(tsecr); 1475 getmicrouptime(&src->scrub->pfss_last); 1476 } 1477 /* FALLTHROUGH */ 1478 default: 1479 hlen -= MAX(opt[1], 2); 1480 opt += MAX(opt[1], 2); 1481 break; 1482 } 1483 } 1484 } 1485 1486 return (0); 1487 } 1488 1489 void 1490 pf_normalize_tcp_cleanup(struct pf_state *state) 1491 { 1492 if (state->src.scrub) 1493 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1494 if (state->dst.scrub) 1495 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1496 1497 /* Someday... flush the TCP segment reassembly descriptors. */ 1498 } 1499 1500 int 1501 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1502 u_short *reason, struct tcphdr *th, struct pf_state *state, 1503 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1504 { 1505 struct timeval uptime; 1506 u_int32_t tsval, tsecr; 1507 u_int tsval_from_last; 1508 u_int8_t hdr[60]; 1509 u_int8_t *opt; 1510 int copyback = 0; 1511 int got_ts = 0; 1512 1513 KASSERT((src->scrub || dst->scrub), 1514 ("%s: src->scrub && dst->scrub!", __func__)); 1515 1516 /* 1517 * Enforce the minimum TTL seen for this connection. Negate a common 1518 * technique to evade an intrusion detection system and confuse 1519 * firewall state code. 1520 */ 1521 switch (pd->af) { 1522 #ifdef INET 1523 case AF_INET: { 1524 if (src->scrub) { 1525 struct ip *h = mtod(m, struct ip *); 1526 if (h->ip_ttl > src->scrub->pfss_ttl) 1527 src->scrub->pfss_ttl = h->ip_ttl; 1528 h->ip_ttl = src->scrub->pfss_ttl; 1529 } 1530 break; 1531 } 1532 #endif /* INET */ 1533 #ifdef INET6 1534 case AF_INET6: { 1535 if (src->scrub) { 1536 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1537 if (h->ip6_hlim > src->scrub->pfss_ttl) 1538 src->scrub->pfss_ttl = h->ip6_hlim; 1539 h->ip6_hlim = src->scrub->pfss_ttl; 1540 } 1541 break; 1542 } 1543 #endif /* INET6 */ 1544 } 1545 1546 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1547 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1548 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1549 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1550 /* Diddle with TCP options */ 1551 int hlen; 1552 opt = hdr + sizeof(struct tcphdr); 1553 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1554 while (hlen >= TCPOLEN_TIMESTAMP) { 1555 switch (*opt) { 1556 case TCPOPT_EOL: /* FALLTHROUGH */ 1557 case TCPOPT_NOP: 1558 opt++; 1559 hlen--; 1560 break; 1561 case TCPOPT_TIMESTAMP: 1562 /* Modulate the timestamps. Can be used for 1563 * NAT detection, OS uptime determination or 1564 * reboot detection. 1565 */ 1566 1567 if (got_ts) { 1568 /* Huh? Multiple timestamps!? */ 1569 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1570 DPFPRINTF(("multiple TS??")); 1571 pf_print_state(state); 1572 printf("\n"); 1573 } 1574 REASON_SET(reason, PFRES_TS); 1575 return (PF_DROP); 1576 } 1577 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1578 memcpy(&tsval, &opt[2], 1579 sizeof(u_int32_t)); 1580 if (tsval && src->scrub && 1581 (src->scrub->pfss_flags & 1582 PFSS_TIMESTAMP)) { 1583 tsval = ntohl(tsval); 1584 pf_change_a(&opt[2], 1585 &th->th_sum, 1586 htonl(tsval + 1587 src->scrub->pfss_ts_mod), 1588 0); 1589 copyback = 1; 1590 } 1591 1592 /* Modulate TS reply iff valid (!0) */ 1593 memcpy(&tsecr, &opt[6], 1594 sizeof(u_int32_t)); 1595 if (tsecr && dst->scrub && 1596 (dst->scrub->pfss_flags & 1597 PFSS_TIMESTAMP)) { 1598 tsecr = ntohl(tsecr) 1599 - dst->scrub->pfss_ts_mod; 1600 pf_change_a(&opt[6], 1601 &th->th_sum, htonl(tsecr), 1602 0); 1603 copyback = 1; 1604 } 1605 got_ts = 1; 1606 } 1607 /* FALLTHROUGH */ 1608 default: 1609 hlen -= MAX(opt[1], 2); 1610 opt += MAX(opt[1], 2); 1611 break; 1612 } 1613 } 1614 if (copyback) { 1615 /* Copyback the options, caller copys back header */ 1616 *writeback = 1; 1617 m_copyback(m, off + sizeof(struct tcphdr), 1618 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1619 sizeof(struct tcphdr)); 1620 } 1621 } 1622 1623 1624 /* 1625 * Must invalidate PAWS checks on connections idle for too long. 1626 * The fastest allowed timestamp clock is 1ms. That turns out to 1627 * be about 24 days before it wraps. XXX Right now our lowerbound 1628 * TS echo check only works for the first 12 days of a connection 1629 * when the TS has exhausted half its 32bit space 1630 */ 1631 #define TS_MAX_IDLE (24*24*60*60) 1632 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1633 1634 getmicrouptime(&uptime); 1635 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1636 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1637 time_uptime - state->creation > TS_MAX_CONN)) { 1638 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1639 DPFPRINTF(("src idled out of PAWS\n")); 1640 pf_print_state(state); 1641 printf("\n"); 1642 } 1643 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1644 | PFSS_PAWS_IDLED; 1645 } 1646 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1647 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1648 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1649 DPFPRINTF(("dst idled out of PAWS\n")); 1650 pf_print_state(state); 1651 printf("\n"); 1652 } 1653 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1654 | PFSS_PAWS_IDLED; 1655 } 1656 1657 if (got_ts && src->scrub && dst->scrub && 1658 (src->scrub->pfss_flags & PFSS_PAWS) && 1659 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1660 /* Validate that the timestamps are "in-window". 1661 * RFC1323 describes TCP Timestamp options that allow 1662 * measurement of RTT (round trip time) and PAWS 1663 * (protection against wrapped sequence numbers). PAWS 1664 * gives us a set of rules for rejecting packets on 1665 * long fat pipes (packets that were somehow delayed 1666 * in transit longer than the time it took to send the 1667 * full TCP sequence space of 4Gb). We can use these 1668 * rules and infer a few others that will let us treat 1669 * the 32bit timestamp and the 32bit echoed timestamp 1670 * as sequence numbers to prevent a blind attacker from 1671 * inserting packets into a connection. 1672 * 1673 * RFC1323 tells us: 1674 * - The timestamp on this packet must be greater than 1675 * or equal to the last value echoed by the other 1676 * endpoint. The RFC says those will be discarded 1677 * since it is a dup that has already been acked. 1678 * This gives us a lowerbound on the timestamp. 1679 * timestamp >= other last echoed timestamp 1680 * - The timestamp will be less than or equal to 1681 * the last timestamp plus the time between the 1682 * last packet and now. The RFC defines the max 1683 * clock rate as 1ms. We will allow clocks to be 1684 * up to 10% fast and will allow a total difference 1685 * or 30 seconds due to a route change. And this 1686 * gives us an upperbound on the timestamp. 1687 * timestamp <= last timestamp + max ticks 1688 * We have to be careful here. Windows will send an 1689 * initial timestamp of zero and then initialize it 1690 * to a random value after the 3whs; presumably to 1691 * avoid a DoS by having to call an expensive RNG 1692 * during a SYN flood. Proof MS has at least one 1693 * good security geek. 1694 * 1695 * - The TCP timestamp option must also echo the other 1696 * endpoints timestamp. The timestamp echoed is the 1697 * one carried on the earliest unacknowledged segment 1698 * on the left edge of the sequence window. The RFC 1699 * states that the host will reject any echoed 1700 * timestamps that were larger than any ever sent. 1701 * This gives us an upperbound on the TS echo. 1702 * tescr <= largest_tsval 1703 * - The lowerbound on the TS echo is a little more 1704 * tricky to determine. The other endpoint's echoed 1705 * values will not decrease. But there may be 1706 * network conditions that re-order packets and 1707 * cause our view of them to decrease. For now the 1708 * only lowerbound we can safely determine is that 1709 * the TS echo will never be less than the original 1710 * TS. XXX There is probably a better lowerbound. 1711 * Remove TS_MAX_CONN with better lowerbound check. 1712 * tescr >= other original TS 1713 * 1714 * It is also important to note that the fastest 1715 * timestamp clock of 1ms will wrap its 32bit space in 1716 * 24 days. So we just disable TS checking after 24 1717 * days of idle time. We actually must use a 12d 1718 * connection limit until we can come up with a better 1719 * lowerbound to the TS echo check. 1720 */ 1721 struct timeval delta_ts; 1722 int ts_fudge; 1723 1724 1725 /* 1726 * PFTM_TS_DIFF is how many seconds of leeway to allow 1727 * a host's timestamp. This can happen if the previous 1728 * packet got delayed in transit for much longer than 1729 * this packet. 1730 */ 1731 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1732 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1733 1734 /* Calculate max ticks since the last timestamp */ 1735 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1736 #define TS_MICROSECS 1000000 /* microseconds per second */ 1737 delta_ts = uptime; 1738 timevalsub(&delta_ts, &src->scrub->pfss_last); 1739 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1740 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1741 1742 if ((src->state >= TCPS_ESTABLISHED && 1743 dst->state >= TCPS_ESTABLISHED) && 1744 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1745 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1746 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1747 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1748 /* Bad RFC1323 implementation or an insertion attack. 1749 * 1750 * - Solaris 2.6 and 2.7 are known to send another ACK 1751 * after the FIN,FIN|ACK,ACK closing that carries 1752 * an old timestamp. 1753 */ 1754 1755 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1756 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1757 SEQ_GT(tsval, src->scrub->pfss_tsval + 1758 tsval_from_last) ? '1' : ' ', 1759 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1760 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1761 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1762 "idle: %jus %lums\n", 1763 tsval, tsecr, tsval_from_last, 1764 (uintmax_t)delta_ts.tv_sec, 1765 delta_ts.tv_usec / 1000)); 1766 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1767 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1768 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1769 "\n", dst->scrub->pfss_tsval, 1770 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1771 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1772 pf_print_state(state); 1773 pf_print_flags(th->th_flags); 1774 printf("\n"); 1775 } 1776 REASON_SET(reason, PFRES_TS); 1777 return (PF_DROP); 1778 } 1779 1780 /* XXX I'd really like to require tsecr but it's optional */ 1781 1782 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1783 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1784 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1785 src->scrub && dst->scrub && 1786 (src->scrub->pfss_flags & PFSS_PAWS) && 1787 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1788 /* Didn't send a timestamp. Timestamps aren't really useful 1789 * when: 1790 * - connection opening or closing (often not even sent). 1791 * but we must not let an attacker to put a FIN on a 1792 * data packet to sneak it through our ESTABLISHED check. 1793 * - on a TCP reset. RFC suggests not even looking at TS. 1794 * - on an empty ACK. The TS will not be echoed so it will 1795 * probably not help keep the RTT calculation in sync and 1796 * there isn't as much danger when the sequence numbers 1797 * got wrapped. So some stacks don't include TS on empty 1798 * ACKs :-( 1799 * 1800 * To minimize the disruption to mostly RFC1323 conformant 1801 * stacks, we will only require timestamps on data packets. 1802 * 1803 * And what do ya know, we cannot require timestamps on data 1804 * packets. There appear to be devices that do legitimate 1805 * TCP connection hijacking. There are HTTP devices that allow 1806 * a 3whs (with timestamps) and then buffer the HTTP request. 1807 * If the intermediate device has the HTTP response cache, it 1808 * will spoof the response but not bother timestamping its 1809 * packets. So we can look for the presence of a timestamp in 1810 * the first data packet and if there, require it in all future 1811 * packets. 1812 */ 1813 1814 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1815 /* 1816 * Hey! Someone tried to sneak a packet in. Or the 1817 * stack changed its RFC1323 behavior?!?! 1818 */ 1819 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1820 DPFPRINTF(("Did not receive expected RFC1323 " 1821 "timestamp\n")); 1822 pf_print_state(state); 1823 pf_print_flags(th->th_flags); 1824 printf("\n"); 1825 } 1826 REASON_SET(reason, PFRES_TS); 1827 return (PF_DROP); 1828 } 1829 } 1830 1831 1832 /* 1833 * We will note if a host sends his data packets with or without 1834 * timestamps. And require all data packets to contain a timestamp 1835 * if the first does. PAWS implicitly requires that all data packets be 1836 * timestamped. But I think there are middle-man devices that hijack 1837 * TCP streams immediately after the 3whs and don't timestamp their 1838 * packets (seen in a WWW accelerator or cache). 1839 */ 1840 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1841 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1842 if (got_ts) 1843 src->scrub->pfss_flags |= PFSS_DATA_TS; 1844 else { 1845 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1846 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1847 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1848 /* Don't warn if other host rejected RFC1323 */ 1849 DPFPRINTF(("Broken RFC1323 stack did not " 1850 "timestamp data packet. Disabled PAWS " 1851 "security.\n")); 1852 pf_print_state(state); 1853 pf_print_flags(th->th_flags); 1854 printf("\n"); 1855 } 1856 } 1857 } 1858 1859 1860 /* 1861 * Update PAWS values 1862 */ 1863 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1864 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1865 getmicrouptime(&src->scrub->pfss_last); 1866 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1867 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1868 src->scrub->pfss_tsval = tsval; 1869 1870 if (tsecr) { 1871 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1872 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1873 src->scrub->pfss_tsecr = tsecr; 1874 1875 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1876 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1877 src->scrub->pfss_tsval0 == 0)) { 1878 /* tsval0 MUST be the lowest timestamp */ 1879 src->scrub->pfss_tsval0 = tsval; 1880 } 1881 1882 /* Only fully initialized after a TS gets echoed */ 1883 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1884 src->scrub->pfss_flags |= PFSS_PAWS; 1885 } 1886 } 1887 1888 /* I have a dream.... TCP segment reassembly.... */ 1889 return (0); 1890 } 1891 1892 static int 1893 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th, 1894 int off, sa_family_t af) 1895 { 1896 u_int16_t *mss; 1897 int thoff; 1898 int opt, cnt, optlen = 0; 1899 int rewrite = 0; 1900 u_char opts[TCP_MAXOLEN]; 1901 u_char *optp = opts; 1902 1903 thoff = th->th_off << 2; 1904 cnt = thoff - sizeof(struct tcphdr); 1905 1906 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1907 NULL, NULL, af)) 1908 return (rewrite); 1909 1910 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1911 opt = optp[0]; 1912 if (opt == TCPOPT_EOL) 1913 break; 1914 if (opt == TCPOPT_NOP) 1915 optlen = 1; 1916 else { 1917 if (cnt < 2) 1918 break; 1919 optlen = optp[1]; 1920 if (optlen < 2 || optlen > cnt) 1921 break; 1922 } 1923 switch (opt) { 1924 case TCPOPT_MAXSEG: 1925 mss = (u_int16_t *)(optp + 2); 1926 if ((ntohs(*mss)) > r->max_mss) { 1927 th->th_sum = pf_cksum_fixup(th->th_sum, 1928 *mss, htons(r->max_mss), 0); 1929 *mss = htons(r->max_mss); 1930 rewrite = 1; 1931 } 1932 break; 1933 default: 1934 break; 1935 } 1936 } 1937 1938 if (rewrite) 1939 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1940 1941 return (rewrite); 1942 } 1943 1944 #ifdef INET 1945 static void 1946 pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1947 { 1948 struct mbuf *m = *m0; 1949 struct ip *h = mtod(m, struct ip *); 1950 1951 /* Clear IP_DF if no-df was requested */ 1952 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1953 u_int16_t ip_off = h->ip_off; 1954 1955 h->ip_off &= htons(~IP_DF); 1956 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1957 } 1958 1959 /* Enforce a minimum ttl, may cause endless packet loops */ 1960 if (min_ttl && h->ip_ttl < min_ttl) { 1961 u_int16_t ip_ttl = h->ip_ttl; 1962 1963 h->ip_ttl = min_ttl; 1964 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 1965 } 1966 1967 /* Enforce tos */ 1968 if (flags & PFRULE_SET_TOS) { 1969 u_int16_t ov, nv; 1970 1971 ov = *(u_int16_t *)h; 1972 h->ip_tos = tos; 1973 nv = *(u_int16_t *)h; 1974 1975 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 1976 } 1977 1978 /* random-id, but not for fragments */ 1979 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 1980 u_int16_t ip_id = h->ip_id; 1981 1982 h->ip_id = ip_randomid(); 1983 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 1984 } 1985 } 1986 #endif /* INET */ 1987 1988 #ifdef INET6 1989 static void 1990 pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 1991 { 1992 struct mbuf *m = *m0; 1993 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1994 1995 /* Enforce a minimum ttl, may cause endless packet loops */ 1996 if (min_ttl && h->ip6_hlim < min_ttl) 1997 h->ip6_hlim = min_ttl; 1998 } 1999 #endif 2000