1 /*- 2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org> 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_rss.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/eventhandler.h> 42 #include <sys/kernel.h> 43 #include <sys/hash.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/limits.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/sysctl.h> 50 #include <sys/socket.h> 51 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/rss_config.h> 55 #include <net/netisr.h> 56 #include <net/vnet.h> 57 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip_var.h> 61 #include <netinet/in_rss.h> 62 #ifdef MAC 63 #include <security/mac/mac_framework.h> 64 #endif 65 66 SYSCTL_DECL(_net_inet_ip); 67 68 /* 69 * Reassembly headers are stored in hash buckets. 70 */ 71 #define IPREASS_NHASH_LOG2 10 72 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 73 #define IPREASS_HMASK (IPREASS_NHASH - 1) 74 75 struct ipqbucket { 76 TAILQ_HEAD(ipqhead, ipq) head; 77 struct mtx lock; 78 int count; 79 }; 80 81 VNET_DEFINE_STATIC(struct ipqbucket, ipq[IPREASS_NHASH]); 82 #define V_ipq VNET(ipq) 83 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed); 84 #define V_ipq_hashseed VNET(ipq_hashseed) 85 86 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock) 87 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock) 88 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock) 89 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED) 90 91 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize); 92 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize) 93 94 void ipreass_init(void); 95 void ipreass_drain(void); 96 void ipreass_slowtimo(void); 97 #ifdef VIMAGE 98 void ipreass_destroy(void); 99 #endif 100 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS); 101 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS); 102 static void ipreass_zone_change(void *); 103 static void ipreass_drain_tomax(void); 104 static void ipq_free(struct ipqbucket *, struct ipq *); 105 static struct ipq * ipq_reuse(int); 106 107 static inline void 108 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp) 109 { 110 111 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 112 ipq_free(bucket, fp); 113 } 114 115 static inline void 116 ipq_drop(struct ipqbucket *bucket, struct ipq *fp) 117 { 118 119 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 120 ipq_free(bucket, fp); 121 } 122 123 /* 124 * By default, limit the number of IP fragments across all reassembly 125 * queues to 1/32 of the total number of mbuf clusters. 126 * 127 * Limit the total number of reassembly queues per VNET to the 128 * IP fragment limit, but ensure the limit will not allow any bucket 129 * to grow above 100 items. (The bucket limit is 130 * IP_MAXFRAGPACKETS / (IPREASS_NHASH / 2), so the 50 is the correct 131 * multiplier to reach a 100-item limit.) 132 * The 100-item limit was chosen as brief testing seems to show that 133 * this produces "reasonable" performance on some subset of systems 134 * under DoS attack. 135 */ 136 #define IP_MAXFRAGS (nmbclusters / 32) 137 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, IPREASS_NHASH * 50)) 138 139 static int maxfrags; 140 static volatile u_int nfrags; 141 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW, 142 &maxfrags, 0, 143 "Maximum number of IPv4 fragments allowed across all reassembly queues"); 144 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD, 145 __DEVOLATILE(u_int *, &nfrags), 0, 146 "Current number of IPv4 fragments across all reassembly queues"); 147 148 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone); 149 #define V_ipq_zone VNET(ipq_zone) 150 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET | 151 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I", 152 "Maximum number of IPv4 fragment reassembly queue entries"); 153 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET, 154 &VNET_NAME(ipq_zone), 155 "Current number of IPv4 fragment reassembly queue entries"); 156 157 VNET_DEFINE_STATIC(int, noreass); 158 #define V_noreass VNET(noreass) 159 160 VNET_DEFINE_STATIC(int, maxfragsperpacket); 161 #define V_maxfragsperpacket VNET(maxfragsperpacket) 162 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 163 &VNET_NAME(maxfragsperpacket), 0, 164 "Maximum number of IPv4 fragments allowed per packet"); 165 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize, 166 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 167 sysctl_maxfragbucketsize, "I", 168 "Maximum number of IPv4 fragment reassembly queue entries per bucket"); 169 170 /* 171 * Take incoming datagram fragment and try to reassemble it into 172 * whole datagram. If the argument is the first fragment or one 173 * in between the function will return NULL and store the mbuf 174 * in the fragment chain. If the argument is the last fragment 175 * the packet will be reassembled and the pointer to the new 176 * mbuf returned for further processing. Only m_tags attached 177 * to the first packet/fragment are preserved. 178 * The IP header is *NOT* adjusted out of iplen. 179 */ 180 #define M_IP_FRAG M_PROTO9 181 struct mbuf * 182 ip_reass(struct mbuf *m) 183 { 184 struct ip *ip; 185 struct mbuf *p, *q, *nq, *t; 186 struct ipq *fp; 187 struct ipqhead *head; 188 int i, hlen, next, tmpmax; 189 u_int8_t ecn, ecn0; 190 uint32_t hash, hashkey[3]; 191 #ifdef RSS 192 uint32_t rss_hash, rss_type; 193 #endif 194 195 /* 196 * If no reassembling or maxfragsperpacket are 0, 197 * never accept fragments. 198 * Also, drop packet if it would exceed the maximum 199 * number of fragments. 200 */ 201 tmpmax = maxfrags; 202 if (V_noreass == 1 || V_maxfragsperpacket == 0 || 203 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) { 204 IPSTAT_INC(ips_fragments); 205 IPSTAT_INC(ips_fragdropped); 206 m_freem(m); 207 return (NULL); 208 } 209 210 ip = mtod(m, struct ip *); 211 hlen = ip->ip_hl << 2; 212 213 /* 214 * Adjust ip_len to not reflect header, 215 * convert offset of this to bytes. 216 */ 217 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 218 /* 219 * Make sure that fragments have a data length 220 * that's a non-zero multiple of 8 bytes, unless 221 * this is the last fragment. 222 */ 223 if (ip->ip_len == htons(0) || 224 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) { 225 IPSTAT_INC(ips_toosmall); /* XXX */ 226 IPSTAT_INC(ips_fragdropped); 227 m_freem(m); 228 return (NULL); 229 } 230 if (ip->ip_off & htons(IP_MF)) 231 m->m_flags |= M_IP_FRAG; 232 else 233 m->m_flags &= ~M_IP_FRAG; 234 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 235 236 /* 237 * Make sure the fragment lies within a packet of valid size. 238 */ 239 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) { 240 IPSTAT_INC(ips_toolong); 241 IPSTAT_INC(ips_fragdropped); 242 m_freem(m); 243 return (NULL); 244 } 245 246 /* 247 * Attempt reassembly; if it succeeds, proceed. 248 * ip_reass() will return a different mbuf. 249 */ 250 IPSTAT_INC(ips_fragments); 251 m->m_pkthdr.PH_loc.ptr = ip; 252 253 /* 254 * Presence of header sizes in mbufs 255 * would confuse code below. 256 */ 257 m->m_data += hlen; 258 m->m_len -= hlen; 259 260 hashkey[0] = ip->ip_src.s_addr; 261 hashkey[1] = ip->ip_dst.s_addr; 262 hashkey[2] = (uint32_t)ip->ip_p << 16; 263 hashkey[2] += ip->ip_id; 264 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed); 265 hash &= IPREASS_HMASK; 266 head = &V_ipq[hash].head; 267 IPQ_LOCK(hash); 268 269 /* 270 * Look for queue of fragments 271 * of this datagram. 272 */ 273 TAILQ_FOREACH(fp, head, ipq_list) 274 if (ip->ip_id == fp->ipq_id && 275 ip->ip_src.s_addr == fp->ipq_src.s_addr && 276 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 277 #ifdef MAC 278 mac_ipq_match(m, fp) && 279 #endif 280 ip->ip_p == fp->ipq_p) 281 break; 282 /* 283 * If first fragment to arrive, create a reassembly queue. 284 */ 285 if (fp == NULL) { 286 if (V_ipq[hash].count < V_ipreass_maxbucketsize) 287 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 288 if (fp == NULL) 289 fp = ipq_reuse(hash); 290 if (fp == NULL) 291 goto dropfrag; 292 #ifdef MAC 293 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 294 uma_zfree(V_ipq_zone, fp); 295 fp = NULL; 296 goto dropfrag; 297 } 298 mac_ipq_create(m, fp); 299 #endif 300 TAILQ_INSERT_HEAD(head, fp, ipq_list); 301 V_ipq[hash].count++; 302 fp->ipq_nfrags = 1; 303 atomic_add_int(&nfrags, 1); 304 fp->ipq_ttl = IPFRAGTTL; 305 fp->ipq_p = ip->ip_p; 306 fp->ipq_id = ip->ip_id; 307 fp->ipq_src = ip->ip_src; 308 fp->ipq_dst = ip->ip_dst; 309 fp->ipq_frags = m; 310 if (m->m_flags & M_IP_FRAG) 311 fp->ipq_maxoff = -1; 312 else 313 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 314 m->m_nextpkt = NULL; 315 goto done; 316 } else { 317 /* 318 * If we already saw the last fragment, make sure 319 * this fragment's offset looks sane. Otherwise, if 320 * this is the last fragment, record its endpoint. 321 */ 322 if (fp->ipq_maxoff > 0) { 323 i = ntohs(ip->ip_off) + ntohs(ip->ip_len); 324 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) || 325 ((m->m_flags & M_IP_FRAG) == 0 && 326 i != fp->ipq_maxoff)) { 327 fp = NULL; 328 goto dropfrag; 329 } 330 } else if ((m->m_flags & M_IP_FRAG) == 0) 331 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 332 fp->ipq_nfrags++; 333 atomic_add_int(&nfrags, 1); 334 #ifdef MAC 335 mac_ipq_update(m, fp); 336 #endif 337 } 338 339 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 340 341 /* 342 * Handle ECN by comparing this segment with the first one; 343 * if CE is set, do not lose CE. 344 * drop if CE and not-ECT are mixed for the same packet. 345 */ 346 ecn = ip->ip_tos & IPTOS_ECN_MASK; 347 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 348 if (ecn == IPTOS_ECN_CE) { 349 if (ecn0 == IPTOS_ECN_NOTECT) 350 goto dropfrag; 351 if (ecn0 != IPTOS_ECN_CE) 352 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 353 } 354 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 355 goto dropfrag; 356 357 /* 358 * Find a segment which begins after this one does. 359 */ 360 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 361 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 362 break; 363 364 /* 365 * If there is a preceding segment, it may provide some of 366 * our data already. If so, drop the data from the incoming 367 * segment. If it provides all of our data, drop us, otherwise 368 * stick new segment in the proper place. 369 * 370 * If some of the data is dropped from the preceding 371 * segment, then it's checksum is invalidated. 372 */ 373 if (p) { 374 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 375 ntohs(ip->ip_off); 376 if (i > 0) { 377 if (i >= ntohs(ip->ip_len)) 378 goto dropfrag; 379 m_adj(m, i); 380 m->m_pkthdr.csum_flags = 0; 381 ip->ip_off = htons(ntohs(ip->ip_off) + i); 382 ip->ip_len = htons(ntohs(ip->ip_len) - i); 383 } 384 m->m_nextpkt = p->m_nextpkt; 385 p->m_nextpkt = m; 386 } else { 387 m->m_nextpkt = fp->ipq_frags; 388 fp->ipq_frags = m; 389 } 390 391 /* 392 * While we overlap succeeding segments trim them or, 393 * if they are completely covered, dequeue them. 394 */ 395 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 396 ntohs(GETIP(q)->ip_off); q = nq) { 397 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 398 ntohs(GETIP(q)->ip_off); 399 if (i < ntohs(GETIP(q)->ip_len)) { 400 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 401 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 402 m_adj(q, i); 403 q->m_pkthdr.csum_flags = 0; 404 break; 405 } 406 nq = q->m_nextpkt; 407 m->m_nextpkt = nq; 408 IPSTAT_INC(ips_fragdropped); 409 fp->ipq_nfrags--; 410 atomic_subtract_int(&nfrags, 1); 411 m_freem(q); 412 } 413 414 /* 415 * Check for complete reassembly and perform frag per packet 416 * limiting. 417 * 418 * Frag limiting is performed here so that the nth frag has 419 * a chance to complete the packet before we drop the packet. 420 * As a result, n+1 frags are actually allowed per packet, but 421 * only n will ever be stored. (n = maxfragsperpacket.) 422 * 423 */ 424 next = 0; 425 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 426 if (ntohs(GETIP(q)->ip_off) != next) { 427 if (fp->ipq_nfrags > V_maxfragsperpacket) 428 ipq_drop(&V_ipq[hash], fp); 429 goto done; 430 } 431 next += ntohs(GETIP(q)->ip_len); 432 } 433 /* Make sure the last packet didn't have the IP_MF flag */ 434 if (p->m_flags & M_IP_FRAG) { 435 if (fp->ipq_nfrags > V_maxfragsperpacket) 436 ipq_drop(&V_ipq[hash], fp); 437 goto done; 438 } 439 440 /* 441 * Reassembly is complete. Make sure the packet is a sane size. 442 */ 443 q = fp->ipq_frags; 444 ip = GETIP(q); 445 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 446 IPSTAT_INC(ips_toolong); 447 ipq_drop(&V_ipq[hash], fp); 448 goto done; 449 } 450 451 /* 452 * Concatenate fragments. 453 */ 454 m = q; 455 t = m->m_next; 456 m->m_next = NULL; 457 m_cat(m, t); 458 nq = q->m_nextpkt; 459 q->m_nextpkt = NULL; 460 for (q = nq; q != NULL; q = nq) { 461 nq = q->m_nextpkt; 462 q->m_nextpkt = NULL; 463 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 464 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 465 m_demote_pkthdr(q); 466 m_cat(m, q); 467 } 468 /* 469 * In order to do checksumming faster we do 'end-around carry' here 470 * (and not in for{} loop), though it implies we are not going to 471 * reassemble more than 64k fragments. 472 */ 473 while (m->m_pkthdr.csum_data & 0xffff0000) 474 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 475 (m->m_pkthdr.csum_data >> 16); 476 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 477 #ifdef MAC 478 mac_ipq_reassemble(fp, m); 479 mac_ipq_destroy(fp); 480 #endif 481 482 /* 483 * Create header for new ip packet by modifying header of first 484 * packet; dequeue and discard fragment reassembly header. 485 * Make header visible. 486 */ 487 ip->ip_len = htons((ip->ip_hl << 2) + next); 488 ip->ip_src = fp->ipq_src; 489 ip->ip_dst = fp->ipq_dst; 490 TAILQ_REMOVE(head, fp, ipq_list); 491 V_ipq[hash].count--; 492 uma_zfree(V_ipq_zone, fp); 493 m->m_len += (ip->ip_hl << 2); 494 m->m_data -= (ip->ip_hl << 2); 495 /* some debugging cruft by sklower, below, will go away soon */ 496 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 497 m_fixhdr(m); 498 IPSTAT_INC(ips_reassembled); 499 IPQ_UNLOCK(hash); 500 501 #ifdef RSS 502 /* 503 * Query the RSS layer for the flowid / flowtype for the 504 * mbuf payload. 505 * 506 * For now, just assume we have to calculate a new one. 507 * Later on we should check to see if the assigned flowid matches 508 * what RSS wants for the given IP protocol and if so, just keep it. 509 * 510 * We then queue into the relevant netisr so it can be dispatched 511 * to the correct CPU. 512 * 513 * Note - this may return 1, which means the flowid in the mbuf 514 * is correct for the configured RSS hash types and can be used. 515 */ 516 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 517 m->m_pkthdr.flowid = rss_hash; 518 M_HASHTYPE_SET(m, rss_type); 519 } 520 521 /* 522 * Queue/dispatch for reprocessing. 523 * 524 * Note: this is much slower than just handling the frame in the 525 * current receive context. It's likely worth investigating 526 * why this is. 527 */ 528 netisr_dispatch(NETISR_IP_DIRECT, m); 529 return (NULL); 530 #endif 531 532 /* Handle in-line */ 533 return (m); 534 535 dropfrag: 536 IPSTAT_INC(ips_fragdropped); 537 if (fp != NULL) { 538 fp->ipq_nfrags--; 539 atomic_subtract_int(&nfrags, 1); 540 } 541 m_freem(m); 542 done: 543 IPQ_UNLOCK(hash); 544 return (NULL); 545 546 #undef GETIP 547 } 548 549 /* 550 * Initialize IP reassembly structures. 551 */ 552 void 553 ipreass_init(void) 554 { 555 int max; 556 557 for (int i = 0; i < IPREASS_NHASH; i++) { 558 TAILQ_INIT(&V_ipq[i].head); 559 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL, 560 MTX_DEF | MTX_DUPOK); 561 V_ipq[i].count = 0; 562 } 563 V_ipq_hashseed = arc4random(); 564 V_maxfragsperpacket = 16; 565 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 566 NULL, UMA_ALIGN_PTR, 0); 567 max = IP_MAXFRAGPACKETS; 568 max = uma_zone_set_max(V_ipq_zone, max); 569 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 570 571 if (IS_DEFAULT_VNET(curvnet)) { 572 maxfrags = IP_MAXFRAGS; 573 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change, 574 NULL, EVENTHANDLER_PRI_ANY); 575 } 576 } 577 578 /* 579 * If a timer expires on a reassembly queue, discard it. 580 */ 581 void 582 ipreass_slowtimo(void) 583 { 584 struct ipq *fp, *tmp; 585 586 for (int i = 0; i < IPREASS_NHASH; i++) { 587 IPQ_LOCK(i); 588 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp) 589 if (--fp->ipq_ttl == 0) 590 ipq_timeout(&V_ipq[i], fp); 591 IPQ_UNLOCK(i); 592 } 593 } 594 595 /* 596 * Drain off all datagram fragments. 597 */ 598 void 599 ipreass_drain(void) 600 { 601 602 for (int i = 0; i < IPREASS_NHASH; i++) { 603 IPQ_LOCK(i); 604 while(!TAILQ_EMPTY(&V_ipq[i].head)) 605 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head)); 606 KASSERT(V_ipq[i].count == 0, 607 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i, 608 V_ipq[i].count, V_ipq)); 609 IPQ_UNLOCK(i); 610 } 611 } 612 613 /* 614 * Drain off all datagram fragments belonging to 615 * the given network interface. 616 */ 617 static void 618 ipreass_cleanup(void *arg __unused, struct ifnet *ifp) 619 { 620 struct ipq *fp, *temp; 621 struct mbuf *m; 622 int i; 623 624 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 625 626 CURVNET_SET_QUIET(ifp->if_vnet); 627 for (i = 0; i < IPREASS_NHASH; i++) { 628 IPQ_LOCK(i); 629 /* Scan fragment list. */ 630 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) { 631 for (m = fp->ipq_frags; m != NULL; m = m->m_nextpkt) { 632 if (m->m_pkthdr.rcvif == ifp) { 633 ipq_drop(&V_ipq[i], fp); 634 break; 635 } 636 } 637 } 638 IPQ_UNLOCK(i); 639 } 640 CURVNET_RESTORE(); 641 } 642 EVENTHANDLER_DEFINE(ifnet_departure_event, ipreass_cleanup, NULL, 0); 643 644 #ifdef VIMAGE 645 /* 646 * Destroy IP reassembly structures. 647 */ 648 void 649 ipreass_destroy(void) 650 { 651 652 ipreass_drain(); 653 uma_zdestroy(V_ipq_zone); 654 for (int i = 0; i < IPREASS_NHASH; i++) 655 mtx_destroy(&V_ipq[i].lock); 656 } 657 #endif 658 659 /* 660 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 661 * max has slightly different semantics than the sysctl, for historical 662 * reasons. 663 */ 664 static void 665 ipreass_drain_tomax(void) 666 { 667 struct ipq *fp; 668 int target; 669 670 /* 671 * Make sure each bucket is under the new limit. If 672 * necessary, drop enough of the oldest elements from 673 * each bucket to get under the new limit. 674 */ 675 for (int i = 0; i < IPREASS_NHASH; i++) { 676 IPQ_LOCK(i); 677 while (V_ipq[i].count > V_ipreass_maxbucketsize && 678 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL) 679 ipq_timeout(&V_ipq[i], fp); 680 IPQ_UNLOCK(i); 681 } 682 683 /* 684 * If we are over the maximum number of fragments, 685 * drain off enough to get down to the new limit, 686 * stripping off last elements on queues. Every 687 * run we strip the oldest element from each bucket. 688 */ 689 target = uma_zone_get_max(V_ipq_zone); 690 while (uma_zone_get_cur(V_ipq_zone) > target) { 691 for (int i = 0; i < IPREASS_NHASH; i++) { 692 IPQ_LOCK(i); 693 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead); 694 if (fp != NULL) 695 ipq_timeout(&V_ipq[i], fp); 696 IPQ_UNLOCK(i); 697 } 698 } 699 } 700 701 static void 702 ipreass_zone_change(void *tag) 703 { 704 VNET_ITERATOR_DECL(vnet_iter); 705 int max; 706 707 maxfrags = IP_MAXFRAGS; 708 max = IP_MAXFRAGPACKETS; 709 VNET_LIST_RLOCK_NOSLEEP(); 710 VNET_FOREACH(vnet_iter) { 711 CURVNET_SET(vnet_iter); 712 max = uma_zone_set_max(V_ipq_zone, max); 713 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 714 ipreass_drain_tomax(); 715 CURVNET_RESTORE(); 716 } 717 VNET_LIST_RUNLOCK_NOSLEEP(); 718 } 719 720 /* 721 * Change the limit on the UMA zone, or disable the fragment allocation 722 * at all. Since 0 and -1 is a special values here, we need our own handler, 723 * instead of sysctl_handle_uma_zone_max(). 724 */ 725 static int 726 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS) 727 { 728 int error, max; 729 730 if (V_noreass == 0) { 731 max = uma_zone_get_max(V_ipq_zone); 732 if (max == 0) 733 max = -1; 734 } else 735 max = 0; 736 error = sysctl_handle_int(oidp, &max, 0, req); 737 if (error || !req->newptr) 738 return (error); 739 if (max > 0) { 740 /* 741 * XXXRW: Might be a good idea to sanity check the argument 742 * and place an extreme upper bound. 743 */ 744 max = uma_zone_set_max(V_ipq_zone, max); 745 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 746 ipreass_drain_tomax(); 747 V_noreass = 0; 748 } else if (max == 0) { 749 V_noreass = 1; 750 ipreass_drain(); 751 } else if (max == -1) { 752 V_noreass = 0; 753 uma_zone_set_max(V_ipq_zone, 0); 754 V_ipreass_maxbucketsize = INT_MAX; 755 } else 756 return (EINVAL); 757 return (0); 758 } 759 760 /* 761 * Seek for old fragment queue header that can be reused. Try to 762 * reuse a header from currently locked hash bucket. 763 */ 764 static struct ipq * 765 ipq_reuse(int start) 766 { 767 struct ipq *fp; 768 int bucket, i; 769 770 IPQ_LOCK_ASSERT(start); 771 772 for (i = 0; i < IPREASS_NHASH; i++) { 773 bucket = (start + i) % IPREASS_NHASH; 774 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) 775 continue; 776 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead); 777 if (fp) { 778 struct mbuf *m; 779 780 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 781 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 782 while (fp->ipq_frags) { 783 m = fp->ipq_frags; 784 fp->ipq_frags = m->m_nextpkt; 785 m_freem(m); 786 } 787 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list); 788 V_ipq[bucket].count--; 789 if (bucket != start) 790 IPQ_UNLOCK(bucket); 791 break; 792 } 793 if (bucket != start) 794 IPQ_UNLOCK(bucket); 795 } 796 IPQ_LOCK_ASSERT(start); 797 return (fp); 798 } 799 800 /* 801 * Free a fragment reassembly header and all associated datagrams. 802 */ 803 static void 804 ipq_free(struct ipqbucket *bucket, struct ipq *fp) 805 { 806 struct mbuf *q; 807 808 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 809 while (fp->ipq_frags) { 810 q = fp->ipq_frags; 811 fp->ipq_frags = q->m_nextpkt; 812 m_freem(q); 813 } 814 TAILQ_REMOVE(&bucket->head, fp, ipq_list); 815 bucket->count--; 816 uma_zfree(V_ipq_zone, fp); 817 } 818 819 /* 820 * Get or set the maximum number of reassembly queues per bucket. 821 */ 822 static int 823 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS) 824 { 825 int error, max; 826 827 max = V_ipreass_maxbucketsize; 828 error = sysctl_handle_int(oidp, &max, 0, req); 829 if (error || !req->newptr) 830 return (error); 831 if (max <= 0) 832 return (EINVAL); 833 V_ipreass_maxbucketsize = max; 834 ipreass_drain_tomax(); 835 return (0); 836 } 837