1 /*- 2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org> 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_rss.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/eventhandler.h> 42 #include <sys/hash.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 50 #include <net/rss_config.h> 51 #include <net/netisr.h> 52 #include <net/vnet.h> 53 54 #include <netinet/in.h> 55 #include <netinet/ip.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/in_rss.h> 58 #ifdef MAC 59 #include <security/mac/mac_framework.h> 60 #endif 61 62 SYSCTL_DECL(_net_inet_ip); 63 64 /* 65 * Reassembly headers are stored in hash buckets. 66 */ 67 #define IPREASS_NHASH_LOG2 10 68 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 69 #define IPREASS_HMASK (IPREASS_NHASH - 1) 70 71 struct ipqbucket { 72 TAILQ_HEAD(ipqhead, ipq) head; 73 struct mtx lock; 74 int count; 75 }; 76 77 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]); 78 #define V_ipq VNET(ipq) 79 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed); 80 #define V_ipq_hashseed VNET(ipq_hashseed) 81 82 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock) 83 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock) 84 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock) 85 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED) 86 87 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize); 88 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize) 89 90 void ipreass_init(void); 91 void ipreass_drain(void); 92 void ipreass_slowtimo(void); 93 #ifdef VIMAGE 94 void ipreass_destroy(void); 95 #endif 96 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS); 97 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS); 98 static void ipreass_zone_change(void *); 99 static void ipreass_drain_tomax(void); 100 static void ipq_free(struct ipqbucket *, struct ipq *); 101 static struct ipq * ipq_reuse(int); 102 103 static inline void 104 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp) 105 { 106 107 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 108 ipq_free(bucket, fp); 109 } 110 111 static inline void 112 ipq_drop(struct ipqbucket *bucket, struct ipq *fp) 113 { 114 115 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 116 ipq_free(bucket, fp); 117 } 118 119 /* 120 * By default, limit the number of IP fragments across all reassembly 121 * queues to 1/32 of the total number of mbuf clusters. 122 * 123 * Limit the total number of reassembly queues per VNET to the 124 * IP fragment limit, but ensure the limit will not allow any bucket 125 * to grow above 100 items. (The bucket limit is 126 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 127 * multiplier to reach a 100-item limit.) 128 * The 100-item limit was chosen as brief testing seems to show that 129 * this produces "reasonable" performance on some subset of systems 130 * under DoS attack. 131 */ 132 #define IP_MAXFRAGS (nmbclusters / 32) 133 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50)) 134 135 static int maxfrags; 136 static volatile u_int nfrags; 137 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW, 138 &maxfrags, 0, 139 "Maximum number of IPv4 fragments allowed across all reassembly queues"); 140 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD, 141 __DEVOLATILE(u_int *, &nfrags), 0, 142 "Current number of IPv4 fragments across all reassembly queues"); 143 144 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone); 145 #define V_ipq_zone VNET(ipq_zone) 146 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET | 147 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I", 148 "Maximum number of IPv4 fragment reassembly queue entries"); 149 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET, 150 &VNET_NAME(ipq_zone), 151 "Current number of IPv4 fragment reassembly queue entries"); 152 153 VNET_DEFINE_STATIC(int, noreass); 154 #define V_noreass VNET(noreass) 155 156 VNET_DEFINE_STATIC(int, maxfragsperpacket); 157 #define V_maxfragsperpacket VNET(maxfragsperpacket) 158 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 159 &VNET_NAME(maxfragsperpacket), 0, 160 "Maximum number of IPv4 fragments allowed per packet"); 161 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize, 162 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 163 sysctl_maxfragbucketsize, "I", 164 "Maximum number of IPv4 fragment reassembly queue entries per bucket"); 165 166 /* 167 * Take incoming datagram fragment and try to reassemble it into 168 * whole datagram. If the argument is the first fragment or one 169 * in between the function will return NULL and store the mbuf 170 * in the fragment chain. If the argument is the last fragment 171 * the packet will be reassembled and the pointer to the new 172 * mbuf returned for further processing. Only m_tags attached 173 * to the first packet/fragment are preserved. 174 * The IP header is *NOT* adjusted out of iplen. 175 */ 176 #define M_IP_FRAG M_PROTO9 177 struct mbuf * 178 ip_reass(struct mbuf *m) 179 { 180 struct ip *ip; 181 struct mbuf *p, *q, *nq, *t; 182 struct ipq *fp; 183 struct ipqhead *head; 184 int i, hlen, next, tmpmax; 185 u_int8_t ecn, ecn0; 186 uint32_t hash, hashkey[3]; 187 #ifdef RSS 188 uint32_t rss_hash, rss_type; 189 #endif 190 191 /* 192 * If no reassembling or maxfragsperpacket are 0, 193 * never accept fragments. 194 * Also, drop packet if it would exceed the maximum 195 * number of fragments. 196 */ 197 tmpmax = maxfrags; 198 if (V_noreass == 1 || V_maxfragsperpacket == 0 || 199 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) { 200 IPSTAT_INC(ips_fragments); 201 IPSTAT_INC(ips_fragdropped); 202 m_freem(m); 203 return (NULL); 204 } 205 206 ip = mtod(m, struct ip *); 207 hlen = ip->ip_hl << 2; 208 209 /* 210 * Adjust ip_len to not reflect header, 211 * convert offset of this to bytes. 212 */ 213 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 214 /* 215 * Make sure that fragments have a data length 216 * that's a non-zero multiple of 8 bytes, unless 217 * this is the last fragment. 218 */ 219 if (ip->ip_len == htons(0) || 220 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) { 221 IPSTAT_INC(ips_toosmall); /* XXX */ 222 IPSTAT_INC(ips_fragdropped); 223 m_freem(m); 224 return (NULL); 225 } 226 if (ip->ip_off & htons(IP_MF)) 227 m->m_flags |= M_IP_FRAG; 228 else 229 m->m_flags &= ~M_IP_FRAG; 230 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 231 232 /* 233 * Make sure the fragment lies within a packet of valid size. 234 */ 235 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) { 236 IPSTAT_INC(ips_toolong); 237 IPSTAT_INC(ips_fragdropped); 238 m_freem(m); 239 return (NULL); 240 } 241 242 /* 243 * Attempt reassembly; if it succeeds, proceed. 244 * ip_reass() will return a different mbuf. 245 */ 246 IPSTAT_INC(ips_fragments); 247 m->m_pkthdr.PH_loc.ptr = ip; 248 249 /* 250 * Presence of header sizes in mbufs 251 * would confuse code below. 252 */ 253 m->m_data += hlen; 254 m->m_len -= hlen; 255 256 hashkey[0] = ip->ip_src.s_addr; 257 hashkey[1] = ip->ip_dst.s_addr; 258 hashkey[2] = (uint32_t)ip->ip_p << 16; 259 hashkey[2] += ip->ip_id; 260 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed); 261 hash &= IPREASS_HMASK; 262 head = &V_ipq[hash].head; 263 IPQ_LOCK(hash); 264 265 /* 266 * Look for queue of fragments 267 * of this datagram. 268 */ 269 TAILQ_FOREACH(fp, head, ipq_list) 270 if (ip->ip_id == fp->ipq_id && 271 ip->ip_src.s_addr == fp->ipq_src.s_addr && 272 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 273 #ifdef MAC 274 mac_ipq_match(m, fp) && 275 #endif 276 ip->ip_p == fp->ipq_p) 277 break; 278 /* 279 * If first fragment to arrive, create a reassembly queue. 280 */ 281 if (fp == NULL) { 282 if (V_ipq[hash].count < V_ipreass_maxbucketsize) 283 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 284 if (fp == NULL) 285 fp = ipq_reuse(hash); 286 if (fp == NULL) 287 goto dropfrag; 288 #ifdef MAC 289 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 290 uma_zfree(V_ipq_zone, fp); 291 fp = NULL; 292 goto dropfrag; 293 } 294 mac_ipq_create(m, fp); 295 #endif 296 TAILQ_INSERT_HEAD(head, fp, ipq_list); 297 V_ipq[hash].count++; 298 fp->ipq_nfrags = 1; 299 atomic_add_int(&nfrags, 1); 300 fp->ipq_ttl = IPFRAGTTL; 301 fp->ipq_p = ip->ip_p; 302 fp->ipq_id = ip->ip_id; 303 fp->ipq_src = ip->ip_src; 304 fp->ipq_dst = ip->ip_dst; 305 fp->ipq_frags = m; 306 if (m->m_flags & M_IP_FRAG) 307 fp->ipq_maxoff = -1; 308 else 309 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 310 m->m_nextpkt = NULL; 311 goto done; 312 } else { 313 /* 314 * If we already saw the last fragment, make sure 315 * this fragment's offset looks sane. Otherwise, if 316 * this is the last fragment, record its endpoint. 317 */ 318 if (fp->ipq_maxoff > 0) { 319 i = ntohs(ip->ip_off) + ntohs(ip->ip_len); 320 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) || 321 ((m->m_flags & M_IP_FRAG) == 0 && 322 i != fp->ipq_maxoff)) { 323 fp = NULL; 324 goto dropfrag; 325 } 326 } else if ((m->m_flags & M_IP_FRAG) == 0) 327 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 328 fp->ipq_nfrags++; 329 atomic_add_int(&nfrags, 1); 330 #ifdef MAC 331 mac_ipq_update(m, fp); 332 #endif 333 } 334 335 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 336 337 /* 338 * Handle ECN by comparing this segment with the first one; 339 * if CE is set, do not lose CE. 340 * drop if CE and not-ECT are mixed for the same packet. 341 */ 342 ecn = ip->ip_tos & IPTOS_ECN_MASK; 343 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 344 if (ecn == IPTOS_ECN_CE) { 345 if (ecn0 == IPTOS_ECN_NOTECT) 346 goto dropfrag; 347 if (ecn0 != IPTOS_ECN_CE) 348 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 349 } 350 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 351 goto dropfrag; 352 353 /* 354 * Find a segment which begins after this one does. 355 */ 356 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 357 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 358 break; 359 360 /* 361 * If there is a preceding segment, it may provide some of 362 * our data already. If so, drop the data from the incoming 363 * segment. If it provides all of our data, drop us, otherwise 364 * stick new segment in the proper place. 365 * 366 * If some of the data is dropped from the preceding 367 * segment, then it's checksum is invalidated. 368 */ 369 if (p) { 370 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 371 ntohs(ip->ip_off); 372 if (i > 0) { 373 if (i >= ntohs(ip->ip_len)) 374 goto dropfrag; 375 m_adj(m, i); 376 m->m_pkthdr.csum_flags = 0; 377 ip->ip_off = htons(ntohs(ip->ip_off) + i); 378 ip->ip_len = htons(ntohs(ip->ip_len) - i); 379 } 380 m->m_nextpkt = p->m_nextpkt; 381 p->m_nextpkt = m; 382 } else { 383 m->m_nextpkt = fp->ipq_frags; 384 fp->ipq_frags = m; 385 } 386 387 /* 388 * While we overlap succeeding segments trim them or, 389 * if they are completely covered, dequeue them. 390 */ 391 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 392 ntohs(GETIP(q)->ip_off); q = nq) { 393 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 394 ntohs(GETIP(q)->ip_off); 395 if (i < ntohs(GETIP(q)->ip_len)) { 396 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 397 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 398 m_adj(q, i); 399 q->m_pkthdr.csum_flags = 0; 400 break; 401 } 402 nq = q->m_nextpkt; 403 m->m_nextpkt = nq; 404 IPSTAT_INC(ips_fragdropped); 405 fp->ipq_nfrags--; 406 atomic_subtract_int(&nfrags, 1); 407 m_freem(q); 408 } 409 410 /* 411 * Check for complete reassembly and perform frag per packet 412 * limiting. 413 * 414 * Frag limiting is performed here so that the nth frag has 415 * a chance to complete the packet before we drop the packet. 416 * As a result, n+1 frags are actually allowed per packet, but 417 * only n will ever be stored. (n = maxfragsperpacket.) 418 * 419 */ 420 next = 0; 421 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 422 if (ntohs(GETIP(q)->ip_off) != next) { 423 if (fp->ipq_nfrags > V_maxfragsperpacket) 424 ipq_drop(&V_ipq[hash], fp); 425 goto done; 426 } 427 next += ntohs(GETIP(q)->ip_len); 428 } 429 /* Make sure the last packet didn't have the IP_MF flag */ 430 if (p->m_flags & M_IP_FRAG) { 431 if (fp->ipq_nfrags > V_maxfragsperpacket) 432 ipq_drop(&V_ipq[hash], fp); 433 goto done; 434 } 435 436 /* 437 * Reassembly is complete. Make sure the packet is a sane size. 438 */ 439 q = fp->ipq_frags; 440 ip = GETIP(q); 441 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 442 IPSTAT_INC(ips_toolong); 443 ipq_drop(&V_ipq[hash], fp); 444 goto done; 445 } 446 447 /* 448 * Concatenate fragments. 449 */ 450 m = q; 451 t = m->m_next; 452 m->m_next = NULL; 453 m_cat(m, t); 454 nq = q->m_nextpkt; 455 q->m_nextpkt = NULL; 456 for (q = nq; q != NULL; q = nq) { 457 nq = q->m_nextpkt; 458 q->m_nextpkt = NULL; 459 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 460 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 461 m_demote_pkthdr(q); 462 m_cat(m, q); 463 } 464 /* 465 * In order to do checksumming faster we do 'end-around carry' here 466 * (and not in for{} loop), though it implies we are not going to 467 * reassemble more than 64k fragments. 468 */ 469 while (m->m_pkthdr.csum_data & 0xffff0000) 470 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 471 (m->m_pkthdr.csum_data >> 16); 472 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 473 #ifdef MAC 474 mac_ipq_reassemble(fp, m); 475 mac_ipq_destroy(fp); 476 #endif 477 478 /* 479 * Create header for new ip packet by modifying header of first 480 * packet; dequeue and discard fragment reassembly header. 481 * Make header visible. 482 */ 483 ip->ip_len = htons((ip->ip_hl << 2) + next); 484 ip->ip_src = fp->ipq_src; 485 ip->ip_dst = fp->ipq_dst; 486 TAILQ_REMOVE(head, fp, ipq_list); 487 V_ipq[hash].count--; 488 uma_zfree(V_ipq_zone, fp); 489 m->m_len += (ip->ip_hl << 2); 490 m->m_data -= (ip->ip_hl << 2); 491 /* some debugging cruft by sklower, below, will go away soon */ 492 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 493 m_fixhdr(m); 494 IPSTAT_INC(ips_reassembled); 495 IPQ_UNLOCK(hash); 496 497 #ifdef RSS 498 /* 499 * Query the RSS layer for the flowid / flowtype for the 500 * mbuf payload. 501 * 502 * For now, just assume we have to calculate a new one. 503 * Later on we should check to see if the assigned flowid matches 504 * what RSS wants for the given IP protocol and if so, just keep it. 505 * 506 * We then queue into the relevant netisr so it can be dispatched 507 * to the correct CPU. 508 * 509 * Note - this may return 1, which means the flowid in the mbuf 510 * is correct for the configured RSS hash types and can be used. 511 */ 512 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 513 m->m_pkthdr.flowid = rss_hash; 514 M_HASHTYPE_SET(m, rss_type); 515 } 516 517 /* 518 * Queue/dispatch for reprocessing. 519 * 520 * Note: this is much slower than just handling the frame in the 521 * current receive context. It's likely worth investigating 522 * why this is. 523 */ 524 netisr_dispatch(NETISR_IP_DIRECT, m); 525 return (NULL); 526 #endif 527 528 /* Handle in-line */ 529 return (m); 530 531 dropfrag: 532 IPSTAT_INC(ips_fragdropped); 533 if (fp != NULL) { 534 fp->ipq_nfrags--; 535 atomic_subtract_int(&nfrags, 1); 536 } 537 m_freem(m); 538 done: 539 IPQ_UNLOCK(hash); 540 return (NULL); 541 542 #undef GETIP 543 } 544 545 /* 546 * Initialize IP reassembly structures. 547 */ 548 void 549 ipreass_init(void) 550 { 551 int max; 552 553 for (int i = 0; i < IPREASS_NHASH; i++) { 554 TAILQ_INIT(&V_ipq[i].head); 555 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL, 556 MTX_DEF | MTX_DUPOK); 557 V_ipq[i].count = 0; 558 } 559 V_ipq_hashseed = arc4random(); 560 V_maxfragsperpacket = 16; 561 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 562 NULL, UMA_ALIGN_PTR, 0); 563 max = IP_MAXFRAGPACKETS; 564 max = uma_zone_set_max(V_ipq_zone, max); 565 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 566 567 if (IS_DEFAULT_VNET(curvnet)) { 568 maxfrags = IP_MAXFRAGS; 569 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change, 570 NULL, EVENTHANDLER_PRI_ANY); 571 } 572 } 573 574 /* 575 * If a timer expires on a reassembly queue, discard it. 576 */ 577 void 578 ipreass_slowtimo(void) 579 { 580 struct ipq *fp, *tmp; 581 582 for (int i = 0; i < IPREASS_NHASH; i++) { 583 IPQ_LOCK(i); 584 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp) 585 if (--fp->ipq_ttl == 0) 586 ipq_timeout(&V_ipq[i], fp); 587 IPQ_UNLOCK(i); 588 } 589 } 590 591 /* 592 * Drain off all datagram fragments. 593 */ 594 void 595 ipreass_drain(void) 596 { 597 598 for (int i = 0; i < IPREASS_NHASH; i++) { 599 IPQ_LOCK(i); 600 while(!TAILQ_EMPTY(&V_ipq[i].head)) 601 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head)); 602 KASSERT(V_ipq[i].count == 0, 603 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i, 604 V_ipq[i].count, V_ipq)); 605 IPQ_UNLOCK(i); 606 } 607 } 608 609 #ifdef VIMAGE 610 /* 611 * Destroy IP reassembly structures. 612 */ 613 void 614 ipreass_destroy(void) 615 { 616 617 ipreass_drain(); 618 uma_zdestroy(V_ipq_zone); 619 for (int i = 0; i < IPREASS_NHASH; i++) 620 mtx_destroy(&V_ipq[i].lock); 621 } 622 #endif 623 624 /* 625 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 626 * max has slightly different semantics than the sysctl, for historical 627 * reasons. 628 */ 629 static void 630 ipreass_drain_tomax(void) 631 { 632 struct ipq *fp; 633 int target; 634 635 /* 636 * Make sure each bucket is under the new limit. If 637 * necessary, drop enough of the oldest elements from 638 * each bucket to get under the new limit. 639 */ 640 for (int i = 0; i < IPREASS_NHASH; i++) { 641 IPQ_LOCK(i); 642 while (V_ipq[i].count > V_ipreass_maxbucketsize && 643 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL) 644 ipq_timeout(&V_ipq[i], fp); 645 IPQ_UNLOCK(i); 646 } 647 648 /* 649 * If we are over the maximum number of fragments, 650 * drain off enough to get down to the new limit, 651 * stripping off last elements on queues. Every 652 * run we strip the oldest element from each bucket. 653 */ 654 target = uma_zone_get_max(V_ipq_zone); 655 while (uma_zone_get_cur(V_ipq_zone) > target) { 656 for (int i = 0; i < IPREASS_NHASH; i++) { 657 IPQ_LOCK(i); 658 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead); 659 if (fp != NULL) 660 ipq_timeout(&V_ipq[i], fp); 661 IPQ_UNLOCK(i); 662 } 663 } 664 } 665 666 static void 667 ipreass_zone_change(void *tag) 668 { 669 VNET_ITERATOR_DECL(vnet_iter); 670 int max; 671 672 maxfrags = IP_MAXFRAGS; 673 max = IP_MAXFRAGPACKETS; 674 VNET_LIST_RLOCK_NOSLEEP(); 675 VNET_FOREACH(vnet_iter) { 676 CURVNET_SET(vnet_iter); 677 max = uma_zone_set_max(V_ipq_zone, max); 678 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 679 ipreass_drain_tomax(); 680 CURVNET_RESTORE(); 681 } 682 VNET_LIST_RUNLOCK_NOSLEEP(); 683 } 684 685 /* 686 * Change the limit on the UMA zone, or disable the fragment allocation 687 * at all. Since 0 and -1 is a special values here, we need our own handler, 688 * instead of sysctl_handle_uma_zone_max(). 689 */ 690 static int 691 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS) 692 { 693 int error, max; 694 695 if (V_noreass == 0) { 696 max = uma_zone_get_max(V_ipq_zone); 697 if (max == 0) 698 max = -1; 699 } else 700 max = 0; 701 error = sysctl_handle_int(oidp, &max, 0, req); 702 if (error || !req->newptr) 703 return (error); 704 if (max > 0) { 705 /* 706 * XXXRW: Might be a good idea to sanity check the argument 707 * and place an extreme upper bound. 708 */ 709 max = uma_zone_set_max(V_ipq_zone, max); 710 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 711 ipreass_drain_tomax(); 712 V_noreass = 0; 713 } else if (max == 0) { 714 V_noreass = 1; 715 ipreass_drain(); 716 } else if (max == -1) { 717 V_noreass = 0; 718 uma_zone_set_max(V_ipq_zone, 0); 719 V_ipreass_maxbucketsize = INT_MAX; 720 } else 721 return (EINVAL); 722 return (0); 723 } 724 725 /* 726 * Seek for old fragment queue header that can be reused. Try to 727 * reuse a header from currently locked hash bucket. 728 */ 729 static struct ipq * 730 ipq_reuse(int start) 731 { 732 struct ipq *fp; 733 int bucket, i; 734 735 IPQ_LOCK_ASSERT(start); 736 737 for (i = 0; i < IPREASS_NHASH; i++) { 738 bucket = (start + i) % IPREASS_NHASH; 739 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) 740 continue; 741 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead); 742 if (fp) { 743 struct mbuf *m; 744 745 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 746 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 747 while (fp->ipq_frags) { 748 m = fp->ipq_frags; 749 fp->ipq_frags = m->m_nextpkt; 750 m_freem(m); 751 } 752 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list); 753 V_ipq[bucket].count--; 754 if (bucket != start) 755 IPQ_UNLOCK(bucket); 756 break; 757 } 758 if (bucket != start) 759 IPQ_UNLOCK(bucket); 760 } 761 IPQ_LOCK_ASSERT(start); 762 return (fp); 763 } 764 765 /* 766 * Free a fragment reassembly header and all associated datagrams. 767 */ 768 static void 769 ipq_free(struct ipqbucket *bucket, struct ipq *fp) 770 { 771 struct mbuf *q; 772 773 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 774 while (fp->ipq_frags) { 775 q = fp->ipq_frags; 776 fp->ipq_frags = q->m_nextpkt; 777 m_freem(q); 778 } 779 TAILQ_REMOVE(&bucket->head, fp, ipq_list); 780 bucket->count--; 781 uma_zfree(V_ipq_zone, fp); 782 } 783 784 /* 785 * Get or set the maximum number of reassembly queues per bucket. 786 */ 787 static int 788 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS) 789 { 790 int error, max; 791 792 max = V_ipreass_maxbucketsize; 793 error = sysctl_handle_int(oidp, &max, 0, req); 794 if (error || !req->newptr) 795 return (error); 796 if (max <= 0) 797 return (EINVAL); 798 V_ipreass_maxbucketsize = max; 799 ipreass_drain_tomax(); 800 return (0); 801 } 802