1 /*- 2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org> 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 #include "opt_rss.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/eventhandler.h> 40 #include <sys/kernel.h> 41 #include <sys/hash.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/sysctl.h> 48 #include <sys/socket.h> 49 50 #include <net/if.h> 51 #include <net/if_var.h> 52 #include <net/if_private.h> 53 #include <net/rss_config.h> 54 #include <net/netisr.h> 55 #include <net/vnet.h> 56 57 #include <netinet/in.h> 58 #include <netinet/ip.h> 59 #include <netinet/ip_var.h> 60 #include <netinet/in_rss.h> 61 #ifdef MAC 62 #include <security/mac/mac_framework.h> 63 #endif 64 65 SYSCTL_DECL(_net_inet_ip); 66 67 /* 68 * Reassembly headers are stored in hash buckets. 69 */ 70 #define IPREASS_NHASH_LOG2 10 71 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 72 #define IPREASS_HMASK (V_ipq_hashsize - 1) 73 74 struct ipqbucket { 75 TAILQ_HEAD(ipqhead, ipq) head; 76 struct mtx lock; 77 struct callout timer; 78 #ifdef VIMAGE 79 struct vnet *vnet; 80 #endif 81 int count; 82 }; 83 84 VNET_DEFINE_STATIC(struct ipqbucket *, ipq); 85 #define V_ipq VNET(ipq) 86 VNET_DEFINE_STATIC(uint32_t, ipq_hashseed); 87 #define V_ipq_hashseed VNET(ipq_hashseed) 88 VNET_DEFINE_STATIC(uint32_t, ipq_hashsize); 89 #define V_ipq_hashsize VNET(ipq_hashsize) 90 91 #define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock) 92 #define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock) 93 #define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock) 94 #define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED) 95 #define IPQ_BUCKET_LOCK_ASSERT(b) mtx_assert(&(b)->lock, MA_OWNED) 96 97 VNET_DEFINE_STATIC(int, ipreass_maxbucketsize); 98 #define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize) 99 100 void ipreass_init(void); 101 void ipreass_vnet_init(void); 102 #ifdef VIMAGE 103 void ipreass_destroy(void); 104 #endif 105 static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS); 106 static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS); 107 static int sysctl_fragttl(SYSCTL_HANDLER_ARGS); 108 static void ipreass_zone_change(void *); 109 static void ipreass_drain_tomax(void); 110 static void ipq_free(struct ipqbucket *, struct ipq *); 111 static struct ipq * ipq_reuse(int); 112 static void ipreass_callout(void *); 113 static void ipreass_reschedule(struct ipqbucket *); 114 115 static inline void 116 ipq_timeout(struct ipqbucket *bucket, struct ipq *fp) 117 { 118 119 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 120 ipq_free(bucket, fp); 121 } 122 123 static inline void 124 ipq_drop(struct ipqbucket *bucket, struct ipq *fp) 125 { 126 127 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 128 ipq_free(bucket, fp); 129 ipreass_reschedule(bucket); 130 } 131 132 /* 133 * By default, limit the number of IP fragments across all reassembly 134 * queues to 1/32 of the total number of mbuf clusters. 135 * 136 * Limit the total number of reassembly queues per VNET to the 137 * IP fragment limit, but ensure the limit will not allow any bucket 138 * to grow above 100 items. (The bucket limit is 139 * IP_MAXFRAGPACKETS / (V_ipq_hashsize / 2), so the 50 is the correct 140 * multiplier to reach a 100-item limit.) 141 * The 100-item limit was chosen as brief testing seems to show that 142 * this produces "reasonable" performance on some subset of systems 143 * under DoS attack. 144 */ 145 #define IP_MAXFRAGS (nmbclusters / 32) 146 #define IP_MAXFRAGPACKETS (imin(IP_MAXFRAGS, V_ipq_hashsize * 50)) 147 148 static int maxfrags; 149 static u_int __exclusive_cache_line nfrags; 150 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW, 151 &maxfrags, 0, 152 "Maximum number of IPv4 fragments allowed across all reassembly queues"); 153 SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD, 154 &nfrags, 0, 155 "Current number of IPv4 fragments across all reassembly queues"); 156 157 VNET_DEFINE_STATIC(uma_zone_t, ipq_zone); 158 #define V_ipq_zone VNET(ipq_zone) 159 160 SYSCTL_UINT(_net_inet_ip, OID_AUTO, reass_hashsize, 161 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(ipq_hashsize), 0, 162 "Size of IP fragment reassembly hashtable"); 163 164 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, 165 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 166 NULL, 0, sysctl_maxfragpackets, "I", 167 "Maximum number of IPv4 fragment reassembly queue entries"); 168 SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET, 169 &VNET_NAME(ipq_zone), 170 "Current number of IPv4 fragment reassembly queue entries"); 171 172 VNET_DEFINE_STATIC(int, noreass); 173 #define V_noreass VNET(noreass) 174 175 VNET_DEFINE_STATIC(int, maxfragsperpacket); 176 #define V_maxfragsperpacket VNET(maxfragsperpacket) 177 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 178 &VNET_NAME(maxfragsperpacket), 0, 179 "Maximum number of IPv4 fragments allowed per packet"); 180 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize, 181 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 182 sysctl_maxfragbucketsize, "I", 183 "Maximum number of IPv4 fragment reassembly queue entries per bucket"); 184 185 VNET_DEFINE_STATIC(u_int, ipfragttl) = 30; 186 #define V_ipfragttl VNET(ipfragttl) 187 SYSCTL_PROC(_net_inet_ip, OID_AUTO, fragttl, CTLTYPE_INT | CTLFLAG_RW | 188 CTLFLAG_MPSAFE | CTLFLAG_VNET, NULL, 0, sysctl_fragttl, "IU", 189 "IP fragment life time on reassembly queue (seconds)"); 190 191 /* 192 * Take incoming datagram fragment and try to reassemble it into 193 * whole datagram. If the argument is the first fragment or one 194 * in between the function will return NULL and store the mbuf 195 * in the fragment chain. If the argument is the last fragment 196 * the packet will be reassembled and the pointer to the new 197 * mbuf returned for further processing. Only m_tags attached 198 * to the first packet/fragment are preserved. 199 * The IP header is *NOT* adjusted out of iplen. 200 */ 201 #define M_IP_FRAG M_PROTO9 202 struct mbuf * 203 ip_reass(struct mbuf *m) 204 { 205 struct ip *ip; 206 struct mbuf *p, *q, *nq, *t; 207 struct ipq *fp; 208 struct ifnet *srcifp; 209 struct ipqhead *head; 210 int i, hlen, next, tmpmax; 211 u_int8_t ecn, ecn0; 212 uint32_t hash, hashkey[3]; 213 #ifdef RSS 214 uint32_t rss_hash, rss_type; 215 #endif 216 217 /* 218 * If no reassembling or maxfragsperpacket are 0, 219 * never accept fragments. 220 * Also, drop packet if it would exceed the maximum 221 * number of fragments. 222 */ 223 tmpmax = maxfrags; 224 if (V_noreass == 1 || V_maxfragsperpacket == 0 || 225 (tmpmax >= 0 && atomic_load_int(&nfrags) >= (u_int)tmpmax)) { 226 IPSTAT_INC(ips_fragments); 227 IPSTAT_INC(ips_fragdropped); 228 m_freem(m); 229 return (NULL); 230 } 231 232 ip = mtod(m, struct ip *); 233 hlen = ip->ip_hl << 2; 234 235 /* 236 * Adjust ip_len to not reflect header, 237 * convert offset of this to bytes. 238 */ 239 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 240 /* 241 * Make sure that fragments have a data length 242 * that's a non-zero multiple of 8 bytes, unless 243 * this is the last fragment. 244 */ 245 if (ip->ip_len == htons(0) || 246 ((ip->ip_off & htons(IP_MF)) && (ntohs(ip->ip_len) & 0x7) != 0)) { 247 IPSTAT_INC(ips_toosmall); /* XXX */ 248 IPSTAT_INC(ips_fragdropped); 249 m_freem(m); 250 return (NULL); 251 } 252 if (ip->ip_off & htons(IP_MF)) 253 m->m_flags |= M_IP_FRAG; 254 else 255 m->m_flags &= ~M_IP_FRAG; 256 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 257 258 /* 259 * Make sure the fragment lies within a packet of valid size. 260 */ 261 if (ntohs(ip->ip_len) + ntohs(ip->ip_off) > IP_MAXPACKET) { 262 IPSTAT_INC(ips_toolong); 263 IPSTAT_INC(ips_fragdropped); 264 m_freem(m); 265 return (NULL); 266 } 267 268 /* 269 * Store receive network interface pointer for later. 270 */ 271 srcifp = m->m_pkthdr.rcvif; 272 273 /* 274 * Attempt reassembly; if it succeeds, proceed. 275 * ip_reass() will return a different mbuf. 276 */ 277 IPSTAT_INC(ips_fragments); 278 m->m_pkthdr.PH_loc.ptr = ip; 279 280 /* 281 * Presence of header sizes in mbufs 282 * would confuse code below. 283 */ 284 m->m_data += hlen; 285 m->m_len -= hlen; 286 287 hashkey[0] = ip->ip_src.s_addr; 288 hashkey[1] = ip->ip_dst.s_addr; 289 hashkey[2] = (uint32_t)ip->ip_p << 16; 290 hashkey[2] += ip->ip_id; 291 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed); 292 hash &= IPREASS_HMASK; 293 head = &V_ipq[hash].head; 294 IPQ_LOCK(hash); 295 296 /* 297 * Look for queue of fragments 298 * of this datagram. 299 */ 300 TAILQ_FOREACH(fp, head, ipq_list) 301 if (ip->ip_id == fp->ipq_id && 302 ip->ip_src.s_addr == fp->ipq_src.s_addr && 303 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 304 #ifdef MAC 305 mac_ipq_match(m, fp) && 306 #endif 307 ip->ip_p == fp->ipq_p) 308 break; 309 /* 310 * If first fragment to arrive, create a reassembly queue. 311 */ 312 if (fp == NULL) { 313 if (V_ipq[hash].count < V_ipreass_maxbucketsize) 314 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 315 if (fp == NULL) 316 fp = ipq_reuse(hash); 317 if (fp == NULL) 318 goto dropfrag; 319 #ifdef MAC 320 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 321 uma_zfree(V_ipq_zone, fp); 322 fp = NULL; 323 goto dropfrag; 324 } 325 mac_ipq_create(m, fp); 326 #endif 327 TAILQ_INSERT_HEAD(head, fp, ipq_list); 328 V_ipq[hash].count++; 329 fp->ipq_nfrags = 1; 330 atomic_add_int(&nfrags, 1); 331 fp->ipq_expire = time_uptime + V_ipfragttl; 332 fp->ipq_p = ip->ip_p; 333 fp->ipq_id = ip->ip_id; 334 fp->ipq_src = ip->ip_src; 335 fp->ipq_dst = ip->ip_dst; 336 fp->ipq_frags = m; 337 if (m->m_flags & M_IP_FRAG) 338 fp->ipq_maxoff = -1; 339 else 340 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 341 m->m_nextpkt = NULL; 342 if (fp == TAILQ_LAST(head, ipqhead)) 343 callout_reset_sbt(&V_ipq[hash].timer, 344 SBT_1S * V_ipfragttl, SBT_1S, ipreass_callout, 345 &V_ipq[hash], 0); 346 else 347 MPASS(callout_active(&V_ipq[hash].timer)); 348 goto done; 349 } else { 350 /* 351 * If we already saw the last fragment, make sure 352 * this fragment's offset looks sane. Otherwise, if 353 * this is the last fragment, record its endpoint. 354 */ 355 if (fp->ipq_maxoff > 0) { 356 i = ntohs(ip->ip_off) + ntohs(ip->ip_len); 357 if (((m->m_flags & M_IP_FRAG) && i >= fp->ipq_maxoff) || 358 ((m->m_flags & M_IP_FRAG) == 0 && 359 i != fp->ipq_maxoff)) { 360 fp = NULL; 361 goto dropfrag; 362 } 363 } else if ((m->m_flags & M_IP_FRAG) == 0) 364 fp->ipq_maxoff = ntohs(ip->ip_off) + ntohs(ip->ip_len); 365 fp->ipq_nfrags++; 366 atomic_add_int(&nfrags, 1); 367 #ifdef MAC 368 mac_ipq_update(m, fp); 369 #endif 370 } 371 372 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 373 374 /* 375 * Handle ECN by comparing this segment with the first one; 376 * if CE is set, do not lose CE. 377 * drop if CE and not-ECT are mixed for the same packet. 378 */ 379 ecn = ip->ip_tos & IPTOS_ECN_MASK; 380 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 381 if (ecn == IPTOS_ECN_CE) { 382 if (ecn0 == IPTOS_ECN_NOTECT) 383 goto dropfrag; 384 if (ecn0 != IPTOS_ECN_CE) 385 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 386 } 387 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 388 goto dropfrag; 389 390 /* 391 * Find a segment which begins after this one does. 392 */ 393 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 394 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 395 break; 396 397 /* 398 * If there is a preceding segment, it may provide some of 399 * our data already. If so, drop the data from the incoming 400 * segment. If it provides all of our data, drop us, otherwise 401 * stick new segment in the proper place. 402 * 403 * If some of the data is dropped from the preceding 404 * segment, then it's checksum is invalidated. 405 */ 406 if (p) { 407 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 408 ntohs(ip->ip_off); 409 if (i > 0) { 410 if (i >= ntohs(ip->ip_len)) 411 goto dropfrag; 412 m_adj(m, i); 413 m->m_pkthdr.csum_flags = 0; 414 ip->ip_off = htons(ntohs(ip->ip_off) + i); 415 ip->ip_len = htons(ntohs(ip->ip_len) - i); 416 } 417 m->m_nextpkt = p->m_nextpkt; 418 p->m_nextpkt = m; 419 } else { 420 m->m_nextpkt = fp->ipq_frags; 421 fp->ipq_frags = m; 422 } 423 424 /* 425 * While we overlap succeeding segments trim them or, 426 * if they are completely covered, dequeue them. 427 */ 428 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 429 ntohs(GETIP(q)->ip_off); q = nq) { 430 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 431 ntohs(GETIP(q)->ip_off); 432 if (i < ntohs(GETIP(q)->ip_len)) { 433 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 434 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 435 m_adj(q, i); 436 q->m_pkthdr.csum_flags = 0; 437 break; 438 } 439 nq = q->m_nextpkt; 440 m->m_nextpkt = nq; 441 IPSTAT_INC(ips_fragdropped); 442 fp->ipq_nfrags--; 443 atomic_subtract_int(&nfrags, 1); 444 m_freem(q); 445 } 446 447 /* 448 * Check for complete reassembly and perform frag per packet 449 * limiting. 450 * 451 * Frag limiting is performed here so that the nth frag has 452 * a chance to complete the packet before we drop the packet. 453 * As a result, n+1 frags are actually allowed per packet, but 454 * only n will ever be stored. (n = maxfragsperpacket.) 455 * 456 */ 457 next = 0; 458 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 459 if (ntohs(GETIP(q)->ip_off) != next) { 460 if (fp->ipq_nfrags > V_maxfragsperpacket) 461 ipq_drop(&V_ipq[hash], fp); 462 goto done; 463 } 464 next += ntohs(GETIP(q)->ip_len); 465 } 466 /* Make sure the last packet didn't have the IP_MF flag */ 467 if (p->m_flags & M_IP_FRAG) { 468 if (fp->ipq_nfrags > V_maxfragsperpacket) 469 ipq_drop(&V_ipq[hash], fp); 470 goto done; 471 } 472 473 /* 474 * Reassembly is complete. Make sure the packet is a sane size. 475 */ 476 q = fp->ipq_frags; 477 ip = GETIP(q); 478 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 479 IPSTAT_INC(ips_toolong); 480 ipq_drop(&V_ipq[hash], fp); 481 goto done; 482 } 483 484 /* 485 * Concatenate fragments. 486 */ 487 m = q; 488 t = m->m_next; 489 m->m_next = NULL; 490 m_cat(m, t); 491 nq = q->m_nextpkt; 492 q->m_nextpkt = NULL; 493 for (q = nq; q != NULL; q = nq) { 494 nq = q->m_nextpkt; 495 q->m_nextpkt = NULL; 496 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 497 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 498 m_demote_pkthdr(q); 499 m_cat(m, q); 500 } 501 /* 502 * In order to do checksumming faster we do 'end-around carry' here 503 * (and not in for{} loop), though it implies we are not going to 504 * reassemble more than 64k fragments. 505 */ 506 while (m->m_pkthdr.csum_data & 0xffff0000) 507 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 508 (m->m_pkthdr.csum_data >> 16); 509 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 510 #ifdef MAC 511 mac_ipq_reassemble(fp, m); 512 mac_ipq_destroy(fp); 513 #endif 514 515 /* 516 * Create header for new ip packet by modifying header of first 517 * packet; dequeue and discard fragment reassembly header. 518 * Make header visible. 519 */ 520 ip->ip_len = htons((ip->ip_hl << 2) + next); 521 ip->ip_src = fp->ipq_src; 522 ip->ip_dst = fp->ipq_dst; 523 TAILQ_REMOVE(head, fp, ipq_list); 524 V_ipq[hash].count--; 525 uma_zfree(V_ipq_zone, fp); 526 m->m_len += (ip->ip_hl << 2); 527 m->m_data -= (ip->ip_hl << 2); 528 /* some debugging cruft by sklower, below, will go away soon */ 529 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ 530 m_fixhdr(m); 531 /* set valid receive interface pointer */ 532 m->m_pkthdr.rcvif = srcifp; 533 } 534 IPSTAT_INC(ips_reassembled); 535 ipreass_reschedule(&V_ipq[hash]); 536 IPQ_UNLOCK(hash); 537 538 #ifdef RSS 539 /* 540 * Query the RSS layer for the flowid / flowtype for the 541 * mbuf payload. 542 * 543 * For now, just assume we have to calculate a new one. 544 * Later on we should check to see if the assigned flowid matches 545 * what RSS wants for the given IP protocol and if so, just keep it. 546 * 547 * We then queue into the relevant netisr so it can be dispatched 548 * to the correct CPU. 549 * 550 * Note - this may return 1, which means the flowid in the mbuf 551 * is correct for the configured RSS hash types and can be used. 552 */ 553 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 554 m->m_pkthdr.flowid = rss_hash; 555 M_HASHTYPE_SET(m, rss_type); 556 } 557 558 /* 559 * Queue/dispatch for reprocessing. 560 * 561 * Note: this is much slower than just handling the frame in the 562 * current receive context. It's likely worth investigating 563 * why this is. 564 */ 565 netisr_dispatch(NETISR_IP_DIRECT, m); 566 return (NULL); 567 #endif 568 569 /* Handle in-line */ 570 return (m); 571 572 dropfrag: 573 IPSTAT_INC(ips_fragdropped); 574 if (fp != NULL) { 575 fp->ipq_nfrags--; 576 atomic_subtract_int(&nfrags, 1); 577 } 578 m_freem(m); 579 done: 580 IPQ_UNLOCK(hash); 581 return (NULL); 582 583 #undef GETIP 584 } 585 586 /* 587 * Timer expired on a bucket. 588 * There should be at least one ipq to be timed out. 589 */ 590 static void 591 ipreass_callout(void *arg) 592 { 593 struct ipqbucket *bucket = arg; 594 struct ipq *fp; 595 596 IPQ_BUCKET_LOCK_ASSERT(bucket); 597 MPASS(atomic_load_int(&nfrags) > 0); 598 599 CURVNET_SET(bucket->vnet); 600 fp = TAILQ_LAST(&bucket->head, ipqhead); 601 KASSERT(fp != NULL && fp->ipq_expire <= time_uptime, 602 ("%s: stray callout on bucket %p, %ju < %ju", __func__, bucket, 603 fp ? (uintmax_t)fp->ipq_expire : 0, (uintmax_t)time_uptime)); 604 605 while (fp != NULL && fp->ipq_expire <= time_uptime) { 606 ipq_timeout(bucket, fp); 607 fp = TAILQ_LAST(&bucket->head, ipqhead); 608 } 609 ipreass_reschedule(bucket); 610 CURVNET_RESTORE(); 611 } 612 613 static void 614 ipreass_reschedule(struct ipqbucket *bucket) 615 { 616 struct ipq *fp; 617 618 IPQ_BUCKET_LOCK_ASSERT(bucket); 619 620 if ((fp = TAILQ_LAST(&bucket->head, ipqhead)) != NULL) { 621 time_t t; 622 623 /* Protect against time_uptime tick. */ 624 t = fp->ipq_expire - time_uptime; 625 t = (t > 0) ? t : 1; 626 callout_reset_sbt(&bucket->timer, SBT_1S * t, SBT_1S, 627 ipreass_callout, bucket, 0); 628 } else 629 callout_stop(&bucket->timer); 630 } 631 632 static void 633 ipreass_drain_vnet(void) 634 { 635 u_int dropped = 0; 636 637 for (int i = 0; i < V_ipq_hashsize; i++) { 638 bool resched; 639 640 IPQ_LOCK(i); 641 resched = !TAILQ_EMPTY(&V_ipq[i].head); 642 while(!TAILQ_EMPTY(&V_ipq[i].head)) { 643 struct ipq *fp = TAILQ_FIRST(&V_ipq[i].head); 644 645 dropped += fp->ipq_nfrags; 646 ipq_free(&V_ipq[i], fp); 647 } 648 if (resched) 649 ipreass_reschedule(&V_ipq[i]); 650 KASSERT(V_ipq[i].count == 0, 651 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i, 652 V_ipq[i].count, V_ipq)); 653 IPQ_UNLOCK(i); 654 } 655 IPSTAT_ADD(ips_fragdropped, dropped); 656 } 657 658 /* 659 * Drain off all datagram fragments. 660 */ 661 static void 662 ipreass_drain(void) 663 { 664 VNET_ITERATOR_DECL(vnet_iter); 665 666 VNET_FOREACH(vnet_iter) { 667 CURVNET_SET(vnet_iter); 668 ipreass_drain_vnet(); 669 CURVNET_RESTORE(); 670 } 671 } 672 673 674 /* 675 * Initialize IP reassembly structures. 676 */ 677 MALLOC_DEFINE(M_IPREASS_HASH, "IP reass", "IP packet reassembly hash headers"); 678 void 679 ipreass_vnet_init(void) 680 { 681 int max; 682 683 V_ipq_hashsize = IPREASS_NHASH; 684 TUNABLE_INT_FETCH("net.inet.ip.reass_hashsize", &V_ipq_hashsize); 685 V_ipq = malloc(sizeof(struct ipqbucket) * V_ipq_hashsize, 686 M_IPREASS_HASH, M_WAITOK); 687 688 for (int i = 0; i < V_ipq_hashsize; i++) { 689 TAILQ_INIT(&V_ipq[i].head); 690 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL, 691 MTX_DEF | MTX_DUPOK | MTX_NEW); 692 callout_init_mtx(&V_ipq[i].timer, &V_ipq[i].lock, 0); 693 V_ipq[i].count = 0; 694 #ifdef VIMAGE 695 V_ipq[i].vnet = curvnet; 696 #endif 697 } 698 V_ipq_hashseed = arc4random(); 699 V_maxfragsperpacket = 16; 700 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 701 NULL, UMA_ALIGN_PTR, 0); 702 max = IP_MAXFRAGPACKETS; 703 max = uma_zone_set_max(V_ipq_zone, max); 704 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1); 705 } 706 707 void 708 ipreass_init(void) 709 { 710 711 maxfrags = IP_MAXFRAGS; 712 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change, 713 NULL, EVENTHANDLER_PRI_ANY); 714 EVENTHANDLER_REGISTER(vm_lowmem, ipreass_drain, NULL, 715 LOWMEM_PRI_DEFAULT); 716 EVENTHANDLER_REGISTER(mbuf_lowmem, ipreass_drain, NULL, 717 LOWMEM_PRI_DEFAULT); 718 } 719 720 /* 721 * Drain off all datagram fragments belonging to 722 * the given network interface. 723 */ 724 static void 725 ipreass_cleanup(void *arg __unused, struct ifnet *ifp) 726 { 727 struct ipq *fp, *temp; 728 struct mbuf *m; 729 int i; 730 731 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__)); 732 733 CURVNET_SET_QUIET(ifp->if_vnet); 734 735 /* 736 * Skip processing if IPv4 reassembly is not initialised or 737 * torn down by ipreass_destroy(). 738 */ 739 if (V_ipq_zone == NULL) { 740 CURVNET_RESTORE(); 741 return; 742 } 743 744 for (i = 0; i < V_ipq_hashsize; i++) { 745 IPQ_LOCK(i); 746 /* Scan fragment list. */ 747 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) { 748 for (m = fp->ipq_frags; m != NULL; m = m->m_nextpkt) { 749 /* clear no longer valid rcvif pointer */ 750 if (m->m_pkthdr.rcvif == ifp) 751 m->m_pkthdr.rcvif = NULL; 752 } 753 } 754 IPQ_UNLOCK(i); 755 } 756 CURVNET_RESTORE(); 757 } 758 EVENTHANDLER_DEFINE(ifnet_departure_event, ipreass_cleanup, NULL, 0); 759 760 #ifdef VIMAGE 761 /* 762 * Destroy IP reassembly structures. 763 */ 764 void 765 ipreass_destroy(void) 766 { 767 768 ipreass_drain_vnet(); 769 uma_zdestroy(V_ipq_zone); 770 V_ipq_zone = NULL; 771 for (int i = 0; i < V_ipq_hashsize; i++) 772 mtx_destroy(&V_ipq[i].lock); 773 free(V_ipq, M_IPREASS_HASH); 774 } 775 #endif 776 777 /* 778 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 779 * max has slightly different semantics than the sysctl, for historical 780 * reasons. 781 */ 782 static void 783 ipreass_drain_tomax(void) 784 { 785 struct ipq *fp; 786 int target; 787 788 /* 789 * Make sure each bucket is under the new limit. If 790 * necessary, drop enough of the oldest elements from 791 * each bucket to get under the new limit. 792 */ 793 for (int i = 0; i < V_ipq_hashsize; i++) { 794 IPQ_LOCK(i); 795 while (V_ipq[i].count > V_ipreass_maxbucketsize && 796 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL) 797 ipq_timeout(&V_ipq[i], fp); 798 ipreass_reschedule(&V_ipq[i]); 799 IPQ_UNLOCK(i); 800 } 801 802 /* 803 * If we are over the maximum number of fragments, 804 * drain off enough to get down to the new limit, 805 * stripping off last elements on queues. Every 806 * run we strip the oldest element from each bucket. 807 */ 808 target = uma_zone_get_max(V_ipq_zone); 809 while (uma_zone_get_cur(V_ipq_zone) > target) { 810 for (int i = 0; i < V_ipq_hashsize; i++) { 811 IPQ_LOCK(i); 812 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead); 813 if (fp != NULL) { 814 ipq_timeout(&V_ipq[i], fp); 815 ipreass_reschedule(&V_ipq[i]); 816 } 817 IPQ_UNLOCK(i); 818 } 819 } 820 } 821 822 static void 823 ipreass_zone_change(void *tag) 824 { 825 VNET_ITERATOR_DECL(vnet_iter); 826 int max; 827 828 maxfrags = IP_MAXFRAGS; 829 max = IP_MAXFRAGPACKETS; 830 VNET_LIST_RLOCK_NOSLEEP(); 831 VNET_FOREACH(vnet_iter) { 832 CURVNET_SET(vnet_iter); 833 max = uma_zone_set_max(V_ipq_zone, max); 834 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1); 835 ipreass_drain_tomax(); 836 CURVNET_RESTORE(); 837 } 838 VNET_LIST_RUNLOCK_NOSLEEP(); 839 } 840 841 /* 842 * Change the limit on the UMA zone, or disable the fragment allocation 843 * at all. Since 0 and -1 is a special values here, we need our own handler, 844 * instead of sysctl_handle_uma_zone_max(). 845 */ 846 static int 847 sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS) 848 { 849 int error, max; 850 851 if (V_noreass == 0) { 852 max = uma_zone_get_max(V_ipq_zone); 853 if (max == 0) 854 max = -1; 855 } else 856 max = 0; 857 error = sysctl_handle_int(oidp, &max, 0, req); 858 if (error || !req->newptr) 859 return (error); 860 if (max > 0) { 861 /* 862 * XXXRW: Might be a good idea to sanity check the argument 863 * and place an extreme upper bound. 864 */ 865 max = uma_zone_set_max(V_ipq_zone, max); 866 V_ipreass_maxbucketsize = imax(max / (V_ipq_hashsize / 2), 1); 867 ipreass_drain_tomax(); 868 V_noreass = 0; 869 } else if (max == 0) { 870 V_noreass = 1; 871 ipreass_drain(); 872 } else if (max == -1) { 873 V_noreass = 0; 874 uma_zone_set_max(V_ipq_zone, 0); 875 V_ipreass_maxbucketsize = INT_MAX; 876 } else 877 return (EINVAL); 878 return (0); 879 } 880 881 /* 882 * Seek for old fragment queue header that can be reused. Try to 883 * reuse a header from currently locked hash bucket. 884 */ 885 static struct ipq * 886 ipq_reuse(int start) 887 { 888 struct ipq *fp; 889 int bucket, i; 890 891 IPQ_LOCK_ASSERT(start); 892 893 for (i = 0; i < V_ipq_hashsize; i++) { 894 bucket = (start + i) % V_ipq_hashsize; 895 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) 896 continue; 897 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead); 898 if (fp) { 899 struct mbuf *m; 900 901 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 902 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 903 while (fp->ipq_frags) { 904 m = fp->ipq_frags; 905 fp->ipq_frags = m->m_nextpkt; 906 m_freem(m); 907 } 908 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list); 909 V_ipq[bucket].count--; 910 ipreass_reschedule(&V_ipq[bucket]); 911 if (bucket != start) 912 IPQ_UNLOCK(bucket); 913 break; 914 } 915 if (bucket != start) 916 IPQ_UNLOCK(bucket); 917 } 918 IPQ_LOCK_ASSERT(start); 919 return (fp); 920 } 921 922 /* 923 * Free a fragment reassembly header and all associated datagrams. 924 */ 925 static void 926 ipq_free(struct ipqbucket *bucket, struct ipq *fp) 927 { 928 struct mbuf *q; 929 930 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 931 while (fp->ipq_frags) { 932 q = fp->ipq_frags; 933 fp->ipq_frags = q->m_nextpkt; 934 m_freem(q); 935 } 936 TAILQ_REMOVE(&bucket->head, fp, ipq_list); 937 bucket->count--; 938 uma_zfree(V_ipq_zone, fp); 939 } 940 941 /* 942 * Get or set the maximum number of reassembly queues per bucket. 943 */ 944 static int 945 sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS) 946 { 947 int error, max; 948 949 max = V_ipreass_maxbucketsize; 950 error = sysctl_handle_int(oidp, &max, 0, req); 951 if (error || !req->newptr) 952 return (error); 953 if (max <= 0) 954 return (EINVAL); 955 V_ipreass_maxbucketsize = max; 956 ipreass_drain_tomax(); 957 return (0); 958 } 959 960 /* 961 * Get or set the IP fragment time to live. 962 */ 963 static int 964 sysctl_fragttl(SYSCTL_HANDLER_ARGS) 965 { 966 u_int ttl; 967 int error; 968 969 ttl = V_ipfragttl; 970 error = sysctl_handle_int(oidp, &ttl, 0, req); 971 if (error || !req->newptr) 972 return (error); 973 974 if (ttl < 1 || ttl > MAXTTL) 975 return (EINVAL); 976 977 atomic_store_int(&V_ipfragttl, ttl); 978 return (0); 979 } 980