1 /*- 2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org> 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_rss.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/hash.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/limits.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/sysctl.h> 50 51 #include <net/rss_config.h> 52 #include <net/netisr.h> 53 #include <net/vnet.h> 54 55 #include <netinet/in.h> 56 #include <netinet/ip.h> 57 #include <netinet/ip_var.h> 58 #include <netinet/in_rss.h> 59 #ifdef MAC 60 #include <security/mac/mac_framework.h> 61 #endif 62 63 SYSCTL_DECL(_net_inet_ip); 64 65 /* 66 * Reassembly headers are stored in hash buckets. 67 */ 68 #define IPREASS_NHASH_LOG2 10 69 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 70 #define IPREASS_HMASK (IPREASS_NHASH - 1) 71 72 struct ipqbucket { 73 TAILQ_HEAD(ipqhead, ipq) head; 74 struct mtx lock; 75 int count; 76 }; 77 78 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]); 79 #define V_ipq VNET(ipq) 80 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed); 81 #define V_ipq_hashseed VNET(ipq_hashseed) 82 83 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock) 84 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock) 85 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock) 86 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED) 87 88 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize); 89 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize) 90 91 void ipreass_init(void); 92 void ipreass_drain(void); 93 void ipreass_slowtimo(void); 94 #ifdef VIMAGE 95 void ipreass_destroy(void); 96 #endif 97 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS); 98 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS); 99 static void ipreass_zone_change(void *); 100 static void ipreass_drain_tomax(void); 101 static void ipq_free(struct ipqbucket *, struct ipq *); 102 static struct ipq * ipq_reuse(int); 103 104 static inline void 105 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp) 106 { 107 108 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 109 ipq_free(bucket, fp); 110 } 111 112 static inline void 113 ipq_drop(struct ipqbucket *bucket, struct ipq *fp) 114 { 115 116 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 117 ipq_free(bucket, fp); 118 } 119 120 /* 121 * By default, limit the number of IP fragments across all reassembly 122 * queues to 1/32 of the total number of mbuf clusters. 123 * 124 * Limit the total number of reassembly queues per VNET to the 125 * IP fragment limit, but ensure the limit will not allow any bucket 126 * to grow above 100 items. (The bucket limit is 127 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 128 * multiplier to reach a 100-item limit.) 129 * The 100-item limit was chosen as brief testing seems to show that 130 * this produces "reasonable" performance on some subset of systems 131 * under DoS attack. 132 */ 133 #define IP_MAXFRAGS (nmbclusters / 32) 134 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50)) 135 136 static int maxfrags; 137 static volatile u_int nfrags; 138 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW, 139 &maxfrags, 0, 140 "Maximum number of IPv4 fragments allowed across all reassembly queues"); 141 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD, 142 __DEVOLATILE(u_int *, &nfrags), 0, 143 "Current number of IPv4 fragments across all reassembly queues"); 144 145 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone); 146 #define V_ipq_zone VNET(ipq_zone) 147 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET | 148 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I", 149 "Maximum number of IPv4 fragment reassembly queue entries"); 150 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET, 151 &VNET_NAME(ipq_zone), 152 "Current number of IPv4 fragment reassembly queue entries"); 153 154 VNET_DEFINE_STATIC(int, noreass); 155 #define V_noreass VNET(noreass) 156 157 VNET_DEFINE_STATIC(int, maxfragsperpacket); 158 #define V_maxfragsperpacket VNET(maxfragsperpacket) 159 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 160 &VNET_NAME(maxfragsperpacket), 0, 161 "Maximum number of IPv4 fragments allowed per packet"); 162 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize, 163 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 164 sysctl_maxfragbucketsize, "I", 165 "Maximum number of IPv4 fragment reassembly queue entries per bucket"); 166 167 /* 168 * Take incoming datagram fragment and try to reassemble it into 169 * whole datagram. If the argument is the first fragment or one 170 * in between the function will return NULL and store the mbuf 171 * in the fragment chain. If the argument is the last fragment 172 * the packet will be reassembled and the pointer to the new 173 * mbuf returned for further processing. Only m_tags attached 174 * to the first packet/fragment are preserved. 175 * The IP header is *NOT* adjusted out of iplen. 176 */ 177 #define M_IP_FRAG M_PROTO9 178 struct mbuf * 179 ip_reass(struct mbuf *m) 180 { 181 struct ip *ip; 182 struct mbuf *p, *q, *nq, *t; 183 struct ipq *fp; 184 struct ipqhead *head; 185 int i, hlen, next, tmpmax; 186 u_int8_t ecn, ecn0; 187 uint32_t hash, hashkey[3]; 188 #ifdef RSS 189 uint32_t rss_hash, rss_type; 190 #endif 191 192 /* 193 * If no reassembling or maxfragsperpacket are 0, 194 * never accept fragments. 195 * Also, drop packet if it would exceed the maximum 196 * number of fragments. 197 */ 198 tmpmax = maxfrags; 199 if (V_noreass == 1 || V_maxfragsperpacket == 0 || 200 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) { 201 IPSTAT_INC(ips_fragments); 202 IPSTAT_INC(ips_fragdropped); 203 m_freem(m); 204 return (NULL); 205 } 206 207 ip = mtod(m, struct ip *); 208 hlen = ip->ip_hl << 2; 209 210 /* 211 * Adjust ip_len to not reflect header, 212 * convert offset of this to bytes. 213 */ 214 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 215 /* 216 * Make sure that fragments have a data length 217 * that's a non-zero multiple of 8 bytes, unless 218 * this is the last fragment. 219 */ 220 if (ip->ip_len == htons(0) || 221 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) { 222 IPSTAT_INC(ips_toosmall); /* XXX */ 223 IPSTAT_INC(ips_fragdropped); 224 m_freem(m); 225 return (NULL); 226 } 227 if (ip->ip_off & htons(IP_MF)) 228 m->m_flags |= M_IP_FRAG; 229 else 230 m->m_flags &= ~M_IP_FRAG; 231 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 232 233 /* 234 * Make sure the fragment lies within a packet of valid size. 235 */ 236 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) { 237 IPSTAT_INC(ips_toolong); 238 IPSTAT_INC(ips_fragdropped); 239 m_freem(m); 240 return (NULL); 241 } 242 243 /* 244 * Attempt reassembly; if it succeeds, proceed. 245 * ip_reass() will return a different mbuf. 246 */ 247 IPSTAT_INC(ips_fragments); 248 m->m_pkthdr.PH_loc.ptr = ip; 249 250 /* 251 * Presence of header sizes in mbufs 252 * would confuse code below. 253 */ 254 m->m_data += hlen; 255 m->m_len -= hlen; 256 257 hashkey[0] = ip->ip_src.s_addr; 258 hashkey[1] = ip->ip_dst.s_addr; 259 hashkey[2] = (uint32_t)ip->ip_p << 16; 260 hashkey[2] += ip->ip_id; 261 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed); 262 hash &= IPREASS_HMASK; 263 head = &V_ipq[hash].head; 264 IPQ_LOCK(hash); 265 266 /* 267 * Look for queue of fragments 268 * of this datagram. 269 */ 270 TAILQ_FOREACH(fp, head, ipq_list) 271 if (ip->ip_id == fp->ipq_id && 272 ip->ip_src.s_addr == fp->ipq_src.s_addr && 273 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 274 #ifdef MAC 275 mac_ipq_match(m, fp) && 276 #endif 277 ip->ip_p == fp->ipq_p) 278 break; 279 /* 280 * If first fragment to arrive, create a reassembly queue. 281 */ 282 if (fp == NULL) { 283 if (V_ipq[hash].count < V_ipreass_maxbucketsize) 284 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 285 if (fp == NULL) 286 fp = ipq_reuse(hash); 287 if (fp == NULL) 288 goto dropfrag; 289 #ifdef MAC 290 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 291 uma_zfree(V_ipq_zone, fp); 292 fp = NULL; 293 goto dropfrag; 294 } 295 mac_ipq_create(m, fp); 296 #endif 297 TAILQ_INSERT_HEAD(head, fp, ipq_list); 298 V_ipq[hash].count++; 299 fp->ipq_nfrags = 1; 300 atomic_add_int(&nfrags, 1); 301 fp->ipq_ttl = IPFRAGTTL; 302 fp->ipq_p = ip->ip_p; 303 fp->ipq_id = ip->ip_id; 304 fp->ipq_src = ip->ip_src; 305 fp->ipq_dst = ip->ip_dst; 306 fp->ipq_frags = m; 307 if (m->m_flags & M_IP_FRAG) 308 fp->ipq_maxoff = -1; 309 else 310 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 311 m->m_nextpkt = NULL; 312 goto done; 313 } else { 314 /* 315 * If we already saw the last fragment, make sure 316 * this fragment's offset looks sane. Otherwise, if 317 * this is the last fragment, record its endpoint. 318 */ 319 if (fp->ipq_maxoff > 0) { 320 i = ntohs(ip->ip_off) + ntohs(ip->ip_len); 321 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) || 322 ((m->m_flags & M_IP_FRAG) == 0 && 323 i != fp->ipq_maxoff)) { 324 fp = NULL; 325 goto dropfrag; 326 } 327 } else if ((m->m_flags & M_IP_FRAG) == 0) 328 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 329 fp->ipq_nfrags++; 330 atomic_add_int(&nfrags, 1); 331 #ifdef MAC 332 mac_ipq_update(m, fp); 333 #endif 334 } 335 336 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 337 338 /* 339 * Handle ECN by comparing this segment with the first one; 340 * if CE is set, do not lose CE. 341 * drop if CE and not-ECT are mixed for the same packet. 342 */ 343 ecn = ip->ip_tos & IPTOS_ECN_MASK; 344 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 345 if (ecn == IPTOS_ECN_CE) { 346 if (ecn0 == IPTOS_ECN_NOTECT) 347 goto dropfrag; 348 if (ecn0 != IPTOS_ECN_CE) 349 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 350 } 351 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 352 goto dropfrag; 353 354 /* 355 * Find a segment which begins after this one does. 356 */ 357 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 358 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 359 break; 360 361 /* 362 * If there is a preceding segment, it may provide some of 363 * our data already. If so, drop the data from the incoming 364 * segment. If it provides all of our data, drop us, otherwise 365 * stick new segment in the proper place. 366 * 367 * If some of the data is dropped from the preceding 368 * segment, then it's checksum is invalidated. 369 */ 370 if (p) { 371 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 372 ntohs(ip->ip_off); 373 if (i > 0) { 374 if (i >= ntohs(ip->ip_len)) 375 goto dropfrag; 376 m_adj(m, i); 377 m->m_pkthdr.csum_flags = 0; 378 ip->ip_off = htons(ntohs(ip->ip_off) + i); 379 ip->ip_len = htons(ntohs(ip->ip_len) - i); 380 } 381 m->m_nextpkt = p->m_nextpkt; 382 p->m_nextpkt = m; 383 } else { 384 m->m_nextpkt = fp->ipq_frags; 385 fp->ipq_frags = m; 386 } 387 388 /* 389 * While we overlap succeeding segments trim them or, 390 * if they are completely covered, dequeue them. 391 */ 392 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 393 ntohs(GETIP(q)->ip_off); q = nq) { 394 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 395 ntohs(GETIP(q)->ip_off); 396 if (i < ntohs(GETIP(q)->ip_len)) { 397 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 398 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 399 m_adj(q, i); 400 q->m_pkthdr.csum_flags = 0; 401 break; 402 } 403 nq = q->m_nextpkt; 404 m->m_nextpkt = nq; 405 IPSTAT_INC(ips_fragdropped); 406 fp->ipq_nfrags--; 407 atomic_subtract_int(&nfrags, 1); 408 m_freem(q); 409 } 410 411 /* 412 * Check for complete reassembly and perform frag per packet 413 * limiting. 414 * 415 * Frag limiting is performed here so that the nth frag has 416 * a chance to complete the packet before we drop the packet. 417 * As a result, n+1 frags are actually allowed per packet, but 418 * only n will ever be stored. (n = maxfragsperpacket.) 419 * 420 */ 421 next = 0; 422 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 423 if (ntohs(GETIP(q)->ip_off) != next) { 424 if (fp->ipq_nfrags > V_maxfragsperpacket) 425 ipq_drop(&V_ipq[hash], fp); 426 goto done; 427 } 428 next += ntohs(GETIP(q)->ip_len); 429 } 430 /* Make sure the last packet didn't have the IP_MF flag */ 431 if (p->m_flags & M_IP_FRAG) { 432 if (fp->ipq_nfrags > V_maxfragsperpacket) 433 ipq_drop(&V_ipq[hash], fp); 434 goto done; 435 } 436 437 /* 438 * Reassembly is complete. Make sure the packet is a sane size. 439 */ 440 q = fp->ipq_frags; 441 ip = GETIP(q); 442 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 443 IPSTAT_INC(ips_toolong); 444 ipq_drop(&V_ipq[hash], fp); 445 goto done; 446 } 447 448 /* 449 * Concatenate fragments. 450 */ 451 m = q; 452 t = m->m_next; 453 m->m_next = NULL; 454 m_cat(m, t); 455 nq = q->m_nextpkt; 456 q->m_nextpkt = NULL; 457 for (q = nq; q != NULL; q = nq) { 458 nq = q->m_nextpkt; 459 q->m_nextpkt = NULL; 460 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 461 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 462 m_demote_pkthdr(q); 463 m_cat(m, q); 464 } 465 /* 466 * In order to do checksumming faster we do 'end-around carry' here 467 * (and not in for{} loop), though it implies we are not going to 468 * reassemble more than 64k fragments. 469 */ 470 while (m->m_pkthdr.csum_data & 0xffff0000) 471 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 472 (m->m_pkthdr.csum_data >> 16); 473 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 474 #ifdef MAC 475 mac_ipq_reassemble(fp, m); 476 mac_ipq_destroy(fp); 477 #endif 478 479 /* 480 * Create header for new ip packet by modifying header of first 481 * packet; dequeue and discard fragment reassembly header. 482 * Make header visible. 483 */ 484 ip->ip_len = htons((ip->ip_hl << 2) + next); 485 ip->ip_src = fp->ipq_src; 486 ip->ip_dst = fp->ipq_dst; 487 TAILQ_REMOVE(head, fp, ipq_list); 488 V_ipq[hash].count--; 489 uma_zfree(V_ipq_zone, fp); 490 m->m_len += (ip->ip_hl << 2); 491 m->m_data -= (ip->ip_hl << 2); 492 /* some debugging cruft by sklower, below, will go away soon */ 493 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 494 m_fixhdr(m); 495 IPSTAT_INC(ips_reassembled); 496 IPQ_UNLOCK(hash); 497 498 #ifdef RSS 499 /* 500 * Query the RSS layer for the flowid / flowtype for the 501 * mbuf payload. 502 * 503 * For now, just assume we have to calculate a new one. 504 * Later on we should check to see if the assigned flowid matches 505 * what RSS wants for the given IP protocol and if so, just keep it. 506 * 507 * We then queue into the relevant netisr so it can be dispatched 508 * to the correct CPU. 509 * 510 * Note - this may return 1, which means the flowid in the mbuf 511 * is correct for the configured RSS hash types and can be used. 512 */ 513 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 514 m->m_pkthdr.flowid = rss_hash; 515 M_HASHTYPE_SET(m, rss_type); 516 } 517 518 /* 519 * Queue/dispatch for reprocessing. 520 * 521 * Note: this is much slower than just handling the frame in the 522 * current receive context. It's likely worth investigating 523 * why this is. 524 */ 525 netisr_dispatch(NETISR_IP_DIRECT, m); 526 return (NULL); 527 #endif 528 529 /* Handle in-line */ 530 return (m); 531 532 dropfrag: 533 IPSTAT_INC(ips_fragdropped); 534 if (fp != NULL) { 535 fp->ipq_nfrags--; 536 atomic_subtract_int(&nfrags, 1); 537 } 538 m_freem(m); 539 done: 540 IPQ_UNLOCK(hash); 541 return (NULL); 542 543 #undef GETIP 544 } 545 546 /* 547 * Initialize IP reassembly structures. 548 */ 549 void 550 ipreass_init(void) 551 { 552 int max; 553 554 for (int i = 0; i < IPREASS_NHASH; i++) { 555 TAILQ_INIT(&V_ipq[i].head); 556 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL, 557 MTX_DEF | MTX_DUPOK); 558 V_ipq[i].count = 0; 559 } 560 V_ipq_hashseed = arc4random(); 561 V_maxfragsperpacket = 16; 562 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 563 NULL, UMA_ALIGN_PTR, 0); 564 max = IP_MAXFRAGPACKETS; 565 max = uma_zone_set_max(V_ipq_zone, max); 566 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 567 568 if (IS_DEFAULT_VNET(curvnet)) { 569 maxfrags = IP_MAXFRAGS; 570 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change, 571 NULL, EVENTHANDLER_PRI_ANY); 572 } 573 } 574 575 /* 576 * If a timer expires on a reassembly queue, discard it. 577 */ 578 void 579 ipreass_slowtimo(void) 580 { 581 struct ipq *fp, *tmp; 582 583 for (int i = 0; i < IPREASS_NHASH; i++) { 584 IPQ_LOCK(i); 585 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp) 586 if (--fp->ipq_ttl == 0) 587 ipq_timeout(&V_ipq[i], fp); 588 IPQ_UNLOCK(i); 589 } 590 } 591 592 /* 593 * Drain off all datagram fragments. 594 */ 595 void 596 ipreass_drain(void) 597 { 598 599 for (int i = 0; i < IPREASS_NHASH; i++) { 600 IPQ_LOCK(i); 601 while(!TAILQ_EMPTY(&V_ipq[i].head)) 602 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head)); 603 KASSERT(V_ipq[i].count == 0, 604 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i, 605 V_ipq[i].count, V_ipq)); 606 IPQ_UNLOCK(i); 607 } 608 } 609 610 #ifdef VIMAGE 611 /* 612 * Destroy IP reassembly structures. 613 */ 614 void 615 ipreass_destroy(void) 616 { 617 618 ipreass_drain(); 619 uma_zdestroy(V_ipq_zone); 620 for (int i = 0; i < IPREASS_NHASH; i++) 621 mtx_destroy(&V_ipq[i].lock); 622 } 623 #endif 624 625 /* 626 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 627 * max has slightly different semantics than the sysctl, for historical 628 * reasons. 629 */ 630 static void 631 ipreass_drain_tomax(void) 632 { 633 struct ipq *fp; 634 int target; 635 636 /* 637 * Make sure each bucket is under the new limit. If 638 * necessary, drop enough of the oldest elements from 639 * each bucket to get under the new limit. 640 */ 641 for (int i = 0; i < IPREASS_NHASH; i++) { 642 IPQ_LOCK(i); 643 while (V_ipq[i].count > V_ipreass_maxbucketsize && 644 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL) 645 ipq_timeout(&V_ipq[i], fp); 646 IPQ_UNLOCK(i); 647 } 648 649 /* 650 * If we are over the maximum number of fragments, 651 * drain off enough to get down to the new limit, 652 * stripping off last elements on queues. Every 653 * run we strip the oldest element from each bucket. 654 */ 655 target = uma_zone_get_max(V_ipq_zone); 656 while (uma_zone_get_cur(V_ipq_zone) > target) { 657 for (int i = 0; i < IPREASS_NHASH; i++) { 658 IPQ_LOCK(i); 659 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead); 660 if (fp != NULL) 661 ipq_timeout(&V_ipq[i], fp); 662 IPQ_UNLOCK(i); 663 } 664 } 665 } 666 667 static void 668 ipreass_zone_change(void *tag) 669 { 670 VNET_ITERATOR_DECL(vnet_iter); 671 int max; 672 673 maxfrags = IP_MAXFRAGS; 674 max = IP_MAXFRAGPACKETS; 675 VNET_LIST_RLOCK_NOSLEEP(); 676 VNET_FOREACH(vnet_iter) { 677 CURVNET_SET(vnet_iter); 678 max = uma_zone_set_max(V_ipq_zone, max); 679 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 680 ipreass_drain_tomax(); 681 CURVNET_RESTORE(); 682 } 683 VNET_LIST_RUNLOCK_NOSLEEP(); 684 } 685 686 /* 687 * Change the limit on the UMA zone, or disable the fragment allocation 688 * at all. Since 0 and -1 is a special values here, we need our own handler, 689 * instead of sysctl_handle_uma_zone_max(). 690 */ 691 static int 692 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS) 693 { 694 int error, max; 695 696 if (V_noreass == 0) { 697 max = uma_zone_get_max(V_ipq_zone); 698 if (max == 0) 699 max = -1; 700 } else 701 max = 0; 702 error = sysctl_handle_int(oidp, &max, 0, req); 703 if (error || !req->newptr) 704 return (error); 705 if (max > 0) { 706 /* 707 * XXXRW: Might be a good idea to sanity check the argument 708 * and place an extreme upper bound. 709 */ 710 max = uma_zone_set_max(V_ipq_zone, max); 711 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 712 ipreass_drain_tomax(); 713 V_noreass = 0; 714 } else if (max == 0) { 715 V_noreass = 1; 716 ipreass_drain(); 717 } else if (max == -1) { 718 V_noreass = 0; 719 uma_zone_set_max(V_ipq_zone, 0); 720 V_ipreass_maxbucketsize = INT_MAX; 721 } else 722 return (EINVAL); 723 return (0); 724 } 725 726 /* 727 * Seek for old fragment queue header that can be reused. Try to 728 * reuse a header from currently locked hash bucket. 729 */ 730 static struct ipq * 731 ipq_reuse(int start) 732 { 733 struct ipq *fp; 734 int bucket, i; 735 736 IPQ_LOCK_ASSERT(start); 737 738 for (i = 0; i < IPREASS_NHASH; i++) { 739 bucket = (start + i) % IPREASS_NHASH; 740 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) 741 continue; 742 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead); 743 if (fp) { 744 struct mbuf *m; 745 746 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 747 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 748 while (fp->ipq_frags) { 749 m = fp->ipq_frags; 750 fp->ipq_frags = m->m_nextpkt; 751 m_freem(m); 752 } 753 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list); 754 V_ipq[bucket].count--; 755 if (bucket != start) 756 IPQ_UNLOCK(bucket); 757 break; 758 } 759 if (bucket != start) 760 IPQ_UNLOCK(bucket); 761 } 762 IPQ_LOCK_ASSERT(start); 763 return (fp); 764 } 765 766 /* 767 * Free a fragment reassembly header and all associated datagrams. 768 */ 769 static void 770 ipq_free(struct ipqbucket *bucket, struct ipq *fp) 771 { 772 struct mbuf *q; 773 774 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 775 while (fp->ipq_frags) { 776 q = fp->ipq_frags; 777 fp->ipq_frags = q->m_nextpkt; 778 m_freem(q); 779 } 780 TAILQ_REMOVE(&bucket->head, fp, ipq_list); 781 bucket->count--; 782 uma_zfree(V_ipq_zone, fp); 783 } 784 785 /* 786 * Get or set the maximum number of reassembly queues per bucket. 787 */ 788 static int 789 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS) 790 { 791 int error, max; 792 793 max = V_ipreass_maxbucketsize; 794 error = sysctl_handle_int(oidp, &max, 0, req); 795 if (error || !req->newptr) 796 return (error); 797 if (max <= 0) 798 return (EINVAL); 799 V_ipreass_maxbucketsize = max; 800 ipreass_drain_tomax(); 801 return (0); 802 } 803