1 /*- 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_param.h" 36 #include "opt_mbuf_stress_test.h" 37 #include "opt_mbuf_profiling.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/sysctl.h> 47 #include <sys/domain.h> 48 #include <sys/protosw.h> 49 #include <sys/uio.h> 50 #include <sys/sdt.h> 51 52 SDT_PROVIDER_DEFINE(mbuf); 53 54 SDT_PROBE_DEFINE5_XLATE(mbuf, , , m__init, 55 "struct mbuf *", "mbufinfo_t *", 56 "uint32_t", "uint32_t", 57 "uint16_t", "uint16_t", 58 "uint32_t", "uint32_t", 59 "uint32_t", "uint32_t"); 60 61 SDT_PROBE_DEFINE3_XLATE(mbuf, , , m__gethdr, 62 "uint32_t", "uint32_t", 63 "uint16_t", "uint16_t", 64 "struct mbuf *", "mbufinfo_t *"); 65 66 SDT_PROBE_DEFINE3_XLATE(mbuf, , , m__get, 67 "uint32_t", "uint32_t", 68 "uint16_t", "uint16_t", 69 "struct mbuf *", "mbufinfo_t *"); 70 71 SDT_PROBE_DEFINE4_XLATE(mbuf, , , m__getcl, 72 "uint32_t", "uint32_t", 73 "uint16_t", "uint16_t", 74 "uint32_t", "uint32_t", 75 "struct mbuf *", "mbufinfo_t *"); 76 77 SDT_PROBE_DEFINE3_XLATE(mbuf, , , m__clget, 78 "struct mbuf *", "mbufinfo_t *", 79 "uint32_t", "uint32_t", 80 "uint32_t", "uint32_t"); 81 82 SDT_PROBE_DEFINE4_XLATE(mbuf, , , m__cljget, 83 "struct mbuf *", "mbufinfo_t *", 84 "uint32_t", "uint32_t", 85 "uint32_t", "uint32_t", 86 "void*", "void*"); 87 88 SDT_PROBE_DEFINE(mbuf, , , m__cljset); 89 90 SDT_PROBE_DEFINE1_XLATE(mbuf, , , m__free, 91 "struct mbuf *", "mbufinfo_t *"); 92 93 SDT_PROBE_DEFINE1_XLATE(mbuf, , , m__freem, 94 "struct mbuf *", "mbufinfo_t *"); 95 96 #include <security/mac/mac_framework.h> 97 98 int max_linkhdr; 99 int max_protohdr; 100 int max_hdr; 101 int max_datalen; 102 #ifdef MBUF_STRESS_TEST 103 int m_defragpackets; 104 int m_defragbytes; 105 int m_defraguseless; 106 int m_defragfailure; 107 int m_defragrandomfailures; 108 #endif 109 110 /* 111 * sysctl(8) exported objects 112 */ 113 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 114 &max_linkhdr, 0, "Size of largest link layer header"); 115 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 116 &max_protohdr, 0, "Size of largest protocol layer header"); 117 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 118 &max_hdr, 0, "Size of largest link plus protocol header"); 119 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 120 &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 121 #ifdef MBUF_STRESS_TEST 122 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 123 &m_defragpackets, 0, ""); 124 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 125 &m_defragbytes, 0, ""); 126 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 127 &m_defraguseless, 0, ""); 128 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 129 &m_defragfailure, 0, ""); 130 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 131 &m_defragrandomfailures, 0, ""); 132 #endif 133 134 /* 135 * Ensure the correct size of various mbuf parameters. It could be off due 136 * to compiler-induced padding and alignment artifacts. 137 */ 138 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN); 139 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN); 140 141 /* 142 * mbuf data storage should be 64-bit aligned regardless of architectural 143 * pointer size; check this is the case with and without a packet header. 144 */ 145 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0); 146 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0); 147 148 /* 149 * While the specific values here don't matter too much (i.e., +/- a few 150 * words), we do want to ensure that changes to these values are carefully 151 * reasoned about and properly documented. This is especially the case as 152 * network-protocol and device-driver modules encode these layouts, and must 153 * be recompiled if the structures change. Check these values at compile time 154 * against the ones documented in comments in mbuf.h. 155 * 156 * NB: Possibly they should be documented there via #define's and not just 157 * comments. 158 */ 159 #if defined(__LP64__) 160 CTASSERT(offsetof(struct mbuf, m_dat) == 32); 161 CTASSERT(sizeof(struct pkthdr) == 56); 162 CTASSERT(sizeof(struct m_ext) == 48); 163 #else 164 CTASSERT(offsetof(struct mbuf, m_dat) == 24); 165 CTASSERT(sizeof(struct pkthdr) == 48); 166 CTASSERT(sizeof(struct m_ext) == 28); 167 #endif 168 169 /* 170 * Assert that the queue(3) macros produce code of the same size as an old 171 * plain pointer does. 172 */ 173 #ifdef INVARIANTS 174 static struct mbuf m_assertbuf; 175 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next)); 176 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next)); 177 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt)); 178 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt)); 179 #endif 180 181 /* 182 * Attach the cluster from *m to *n, set up m_ext in *n 183 * and bump the refcount of the cluster. 184 */ 185 void 186 mb_dupcl(struct mbuf *n, struct mbuf *m) 187 { 188 volatile u_int *refcnt; 189 190 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 191 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n)); 192 193 n->m_ext = m->m_ext; 194 n->m_flags |= M_EXT; 195 n->m_flags |= m->m_flags & M_RDONLY; 196 197 /* See if this is the mbuf that holds the embedded refcount. */ 198 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 199 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; 200 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF; 201 } else { 202 KASSERT(m->m_ext.ext_cnt != NULL, 203 ("%s: no refcounting pointer on %p", __func__, m)); 204 refcnt = m->m_ext.ext_cnt; 205 } 206 207 if (*refcnt == 1) 208 *refcnt += 1; 209 else 210 atomic_add_int(refcnt, 1); 211 } 212 213 void 214 m_demote_pkthdr(struct mbuf *m) 215 { 216 217 M_ASSERTPKTHDR(m); 218 219 m_tag_delete_chain(m, NULL); 220 m->m_flags &= ~M_PKTHDR; 221 bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 222 } 223 224 /* 225 * Clean up mbuf (chain) from any tags and packet headers. 226 * If "all" is set then the first mbuf in the chain will be 227 * cleaned too. 228 */ 229 void 230 m_demote(struct mbuf *m0, int all, int flags) 231 { 232 struct mbuf *m; 233 234 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 235 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", 236 __func__, m, m0)); 237 if (m->m_flags & M_PKTHDR) 238 m_demote_pkthdr(m); 239 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags); 240 } 241 } 242 243 /* 244 * Sanity checks on mbuf (chain) for use in KASSERT() and general 245 * debugging. 246 * Returns 0 or panics when bad and 1 on all tests passed. 247 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 248 * blow up later. 249 */ 250 int 251 m_sanity(struct mbuf *m0, int sanitize) 252 { 253 struct mbuf *m; 254 caddr_t a, b; 255 int pktlen = 0; 256 257 #ifdef INVARIANTS 258 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 259 #else 260 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 261 #endif 262 263 for (m = m0; m != NULL; m = m->m_next) { 264 /* 265 * Basic pointer checks. If any of these fails then some 266 * unrelated kernel memory before or after us is trashed. 267 * No way to recover from that. 268 */ 269 a = M_START(m); 270 b = a + M_SIZE(m); 271 if ((caddr_t)m->m_data < a) 272 M_SANITY_ACTION("m_data outside mbuf data range left"); 273 if ((caddr_t)m->m_data > b) 274 M_SANITY_ACTION("m_data outside mbuf data range right"); 275 if ((caddr_t)m->m_data + m->m_len > b) 276 M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 277 278 /* m->m_nextpkt may only be set on first mbuf in chain. */ 279 if (m != m0 && m->m_nextpkt != NULL) { 280 if (sanitize) { 281 m_freem(m->m_nextpkt); 282 m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 283 } else 284 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 285 } 286 287 /* packet length (not mbuf length!) calculation */ 288 if (m0->m_flags & M_PKTHDR) 289 pktlen += m->m_len; 290 291 /* m_tags may only be attached to first mbuf in chain. */ 292 if (m != m0 && m->m_flags & M_PKTHDR && 293 !SLIST_EMPTY(&m->m_pkthdr.tags)) { 294 if (sanitize) { 295 m_tag_delete_chain(m, NULL); 296 /* put in 0xDEADC0DE perhaps? */ 297 } else 298 M_SANITY_ACTION("m_tags on in-chain mbuf"); 299 } 300 301 /* M_PKTHDR may only be set on first mbuf in chain */ 302 if (m != m0 && m->m_flags & M_PKTHDR) { 303 if (sanitize) { 304 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 305 m->m_flags &= ~M_PKTHDR; 306 /* put in 0xDEADCODE and leave hdr flag in */ 307 } else 308 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 309 } 310 } 311 m = m0; 312 if (pktlen && pktlen != m->m_pkthdr.len) { 313 if (sanitize) 314 m->m_pkthdr.len = 0; 315 else 316 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 317 } 318 return 1; 319 320 #undef M_SANITY_ACTION 321 } 322 323 /* 324 * Non-inlined part of m_init(). 325 */ 326 int 327 m_pkthdr_init(struct mbuf *m, int how) 328 { 329 #ifdef MAC 330 int error; 331 #endif 332 m->m_data = m->m_pktdat; 333 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 334 #ifdef MAC 335 /* If the label init fails, fail the alloc */ 336 error = mac_mbuf_init(m, how); 337 if (error) 338 return (error); 339 #endif 340 341 return (0); 342 } 343 344 /* 345 * "Move" mbuf pkthdr from "from" to "to". 346 * "from" must have M_PKTHDR set, and "to" must be empty. 347 */ 348 void 349 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 350 { 351 352 #if 0 353 /* see below for why these are not enabled */ 354 M_ASSERTPKTHDR(to); 355 /* Note: with MAC, this may not be a good assertion. */ 356 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 357 ("m_move_pkthdr: to has tags")); 358 #endif 359 #ifdef MAC 360 /* 361 * XXXMAC: It could be this should also occur for non-MAC? 362 */ 363 if (to->m_flags & M_PKTHDR) 364 m_tag_delete_chain(to, NULL); 365 #endif 366 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 367 if ((to->m_flags & M_EXT) == 0) 368 to->m_data = to->m_pktdat; 369 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 370 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 371 from->m_flags &= ~M_PKTHDR; 372 } 373 374 /* 375 * Duplicate "from"'s mbuf pkthdr in "to". 376 * "from" must have M_PKTHDR set, and "to" must be empty. 377 * In particular, this does a deep copy of the packet tags. 378 */ 379 int 380 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 381 { 382 383 #if 0 384 /* 385 * The mbuf allocator only initializes the pkthdr 386 * when the mbuf is allocated with m_gethdr(). Many users 387 * (e.g. m_copy*, m_prepend) use m_get() and then 388 * smash the pkthdr as needed causing these 389 * assertions to trip. For now just disable them. 390 */ 391 M_ASSERTPKTHDR(to); 392 /* Note: with MAC, this may not be a good assertion. */ 393 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 394 #endif 395 MBUF_CHECKSLEEP(how); 396 #ifdef MAC 397 if (to->m_flags & M_PKTHDR) 398 m_tag_delete_chain(to, NULL); 399 #endif 400 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 401 if ((to->m_flags & M_EXT) == 0) 402 to->m_data = to->m_pktdat; 403 to->m_pkthdr = from->m_pkthdr; 404 SLIST_INIT(&to->m_pkthdr.tags); 405 return (m_tag_copy_chain(to, from, how)); 406 } 407 408 /* 409 * Lesser-used path for M_PREPEND: 410 * allocate new mbuf to prepend to chain, 411 * copy junk along. 412 */ 413 struct mbuf * 414 m_prepend(struct mbuf *m, int len, int how) 415 { 416 struct mbuf *mn; 417 418 if (m->m_flags & M_PKTHDR) 419 mn = m_gethdr(how, m->m_type); 420 else 421 mn = m_get(how, m->m_type); 422 if (mn == NULL) { 423 m_freem(m); 424 return (NULL); 425 } 426 if (m->m_flags & M_PKTHDR) 427 m_move_pkthdr(mn, m); 428 mn->m_next = m; 429 m = mn; 430 if (len < M_SIZE(m)) 431 M_ALIGN(m, len); 432 m->m_len = len; 433 return (m); 434 } 435 436 /* 437 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 438 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 439 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 440 * Note that the copy is read-only, because clusters are not copied, 441 * only their reference counts are incremented. 442 */ 443 struct mbuf * 444 m_copym(struct mbuf *m, int off0, int len, int wait) 445 { 446 struct mbuf *n, **np; 447 int off = off0; 448 struct mbuf *top; 449 int copyhdr = 0; 450 451 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 452 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 453 MBUF_CHECKSLEEP(wait); 454 if (off == 0 && m->m_flags & M_PKTHDR) 455 copyhdr = 1; 456 while (off > 0) { 457 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 458 if (off < m->m_len) 459 break; 460 off -= m->m_len; 461 m = m->m_next; 462 } 463 np = ⊤ 464 top = 0; 465 while (len > 0) { 466 if (m == NULL) { 467 KASSERT(len == M_COPYALL, 468 ("m_copym, length > size of mbuf chain")); 469 break; 470 } 471 if (copyhdr) 472 n = m_gethdr(wait, m->m_type); 473 else 474 n = m_get(wait, m->m_type); 475 *np = n; 476 if (n == NULL) 477 goto nospace; 478 if (copyhdr) { 479 if (!m_dup_pkthdr(n, m, wait)) 480 goto nospace; 481 if (len == M_COPYALL) 482 n->m_pkthdr.len -= off0; 483 else 484 n->m_pkthdr.len = len; 485 copyhdr = 0; 486 } 487 n->m_len = min(len, m->m_len - off); 488 if (m->m_flags & M_EXT) { 489 n->m_data = m->m_data + off; 490 mb_dupcl(n, m); 491 } else 492 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 493 (u_int)n->m_len); 494 if (len != M_COPYALL) 495 len -= n->m_len; 496 off = 0; 497 m = m->m_next; 498 np = &n->m_next; 499 } 500 501 return (top); 502 nospace: 503 m_freem(top); 504 return (NULL); 505 } 506 507 /* 508 * Copy an entire packet, including header (which must be present). 509 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 510 * Note that the copy is read-only, because clusters are not copied, 511 * only their reference counts are incremented. 512 * Preserve alignment of the first mbuf so if the creator has left 513 * some room at the beginning (e.g. for inserting protocol headers) 514 * the copies still have the room available. 515 */ 516 struct mbuf * 517 m_copypacket(struct mbuf *m, int how) 518 { 519 struct mbuf *top, *n, *o; 520 521 MBUF_CHECKSLEEP(how); 522 n = m_get(how, m->m_type); 523 top = n; 524 if (n == NULL) 525 goto nospace; 526 527 if (!m_dup_pkthdr(n, m, how)) 528 goto nospace; 529 n->m_len = m->m_len; 530 if (m->m_flags & M_EXT) { 531 n->m_data = m->m_data; 532 mb_dupcl(n, m); 533 } else { 534 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 535 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 536 } 537 538 m = m->m_next; 539 while (m) { 540 o = m_get(how, m->m_type); 541 if (o == NULL) 542 goto nospace; 543 544 n->m_next = o; 545 n = n->m_next; 546 547 n->m_len = m->m_len; 548 if (m->m_flags & M_EXT) { 549 n->m_data = m->m_data; 550 mb_dupcl(n, m); 551 } else { 552 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 553 } 554 555 m = m->m_next; 556 } 557 return top; 558 nospace: 559 m_freem(top); 560 return (NULL); 561 } 562 563 /* 564 * Copy data from an mbuf chain starting "off" bytes from the beginning, 565 * continuing for "len" bytes, into the indicated buffer. 566 */ 567 void 568 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 569 { 570 u_int count; 571 572 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 573 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 574 while (off > 0) { 575 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 576 if (off < m->m_len) 577 break; 578 off -= m->m_len; 579 m = m->m_next; 580 } 581 while (len > 0) { 582 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 583 count = min(m->m_len - off, len); 584 bcopy(mtod(m, caddr_t) + off, cp, count); 585 len -= count; 586 cp += count; 587 off = 0; 588 m = m->m_next; 589 } 590 } 591 592 /* 593 * Copy a packet header mbuf chain into a completely new chain, including 594 * copying any mbuf clusters. Use this instead of m_copypacket() when 595 * you need a writable copy of an mbuf chain. 596 */ 597 struct mbuf * 598 m_dup(const struct mbuf *m, int how) 599 { 600 struct mbuf **p, *top = NULL; 601 int remain, moff, nsize; 602 603 MBUF_CHECKSLEEP(how); 604 /* Sanity check */ 605 if (m == NULL) 606 return (NULL); 607 M_ASSERTPKTHDR(m); 608 609 /* While there's more data, get a new mbuf, tack it on, and fill it */ 610 remain = m->m_pkthdr.len; 611 moff = 0; 612 p = ⊤ 613 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 614 struct mbuf *n; 615 616 /* Get the next new mbuf */ 617 if (remain >= MINCLSIZE) { 618 n = m_getcl(how, m->m_type, 0); 619 nsize = MCLBYTES; 620 } else { 621 n = m_get(how, m->m_type); 622 nsize = MLEN; 623 } 624 if (n == NULL) 625 goto nospace; 626 627 if (top == NULL) { /* First one, must be PKTHDR */ 628 if (!m_dup_pkthdr(n, m, how)) { 629 m_free(n); 630 goto nospace; 631 } 632 if ((n->m_flags & M_EXT) == 0) 633 nsize = MHLEN; 634 n->m_flags &= ~M_RDONLY; 635 } 636 n->m_len = 0; 637 638 /* Link it into the new chain */ 639 *p = n; 640 p = &n->m_next; 641 642 /* Copy data from original mbuf(s) into new mbuf */ 643 while (n->m_len < nsize && m != NULL) { 644 int chunk = min(nsize - n->m_len, m->m_len - moff); 645 646 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 647 moff += chunk; 648 n->m_len += chunk; 649 remain -= chunk; 650 if (moff == m->m_len) { 651 m = m->m_next; 652 moff = 0; 653 } 654 } 655 656 /* Check correct total mbuf length */ 657 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 658 ("%s: bogus m_pkthdr.len", __func__)); 659 } 660 return (top); 661 662 nospace: 663 m_freem(top); 664 return (NULL); 665 } 666 667 /* 668 * Concatenate mbuf chain n to m. 669 * Both chains must be of the same type (e.g. MT_DATA). 670 * Any m_pkthdr is not updated. 671 */ 672 void 673 m_cat(struct mbuf *m, struct mbuf *n) 674 { 675 while (m->m_next) 676 m = m->m_next; 677 while (n) { 678 if (!M_WRITABLE(m) || 679 M_TRAILINGSPACE(m) < n->m_len) { 680 /* just join the two chains */ 681 m->m_next = n; 682 return; 683 } 684 /* splat the data from one into the other */ 685 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 686 (u_int)n->m_len); 687 m->m_len += n->m_len; 688 n = m_free(n); 689 } 690 } 691 692 /* 693 * Concatenate two pkthdr mbuf chains. 694 */ 695 void 696 m_catpkt(struct mbuf *m, struct mbuf *n) 697 { 698 699 M_ASSERTPKTHDR(m); 700 M_ASSERTPKTHDR(n); 701 702 m->m_pkthdr.len += n->m_pkthdr.len; 703 m_demote(n, 1, 0); 704 705 m_cat(m, n); 706 } 707 708 void 709 m_adj(struct mbuf *mp, int req_len) 710 { 711 int len = req_len; 712 struct mbuf *m; 713 int count; 714 715 if ((m = mp) == NULL) 716 return; 717 if (len >= 0) { 718 /* 719 * Trim from head. 720 */ 721 while (m != NULL && len > 0) { 722 if (m->m_len <= len) { 723 len -= m->m_len; 724 m->m_len = 0; 725 m = m->m_next; 726 } else { 727 m->m_len -= len; 728 m->m_data += len; 729 len = 0; 730 } 731 } 732 if (mp->m_flags & M_PKTHDR) 733 mp->m_pkthdr.len -= (req_len - len); 734 } else { 735 /* 736 * Trim from tail. Scan the mbuf chain, 737 * calculating its length and finding the last mbuf. 738 * If the adjustment only affects this mbuf, then just 739 * adjust and return. Otherwise, rescan and truncate 740 * after the remaining size. 741 */ 742 len = -len; 743 count = 0; 744 for (;;) { 745 count += m->m_len; 746 if (m->m_next == (struct mbuf *)0) 747 break; 748 m = m->m_next; 749 } 750 if (m->m_len >= len) { 751 m->m_len -= len; 752 if (mp->m_flags & M_PKTHDR) 753 mp->m_pkthdr.len -= len; 754 return; 755 } 756 count -= len; 757 if (count < 0) 758 count = 0; 759 /* 760 * Correct length for chain is "count". 761 * Find the mbuf with last data, adjust its length, 762 * and toss data from remaining mbufs on chain. 763 */ 764 m = mp; 765 if (m->m_flags & M_PKTHDR) 766 m->m_pkthdr.len = count; 767 for (; m; m = m->m_next) { 768 if (m->m_len >= count) { 769 m->m_len = count; 770 if (m->m_next != NULL) { 771 m_freem(m->m_next); 772 m->m_next = NULL; 773 } 774 break; 775 } 776 count -= m->m_len; 777 } 778 } 779 } 780 781 /* 782 * Rearange an mbuf chain so that len bytes are contiguous 783 * and in the data area of an mbuf (so that mtod will work 784 * for a structure of size len). Returns the resulting 785 * mbuf chain on success, frees it and returns null on failure. 786 * If there is room, it will add up to max_protohdr-len extra bytes to the 787 * contiguous region in an attempt to avoid being called next time. 788 */ 789 struct mbuf * 790 m_pullup(struct mbuf *n, int len) 791 { 792 struct mbuf *m; 793 int count; 794 int space; 795 796 /* 797 * If first mbuf has no cluster, and has room for len bytes 798 * without shifting current data, pullup into it, 799 * otherwise allocate a new mbuf to prepend to the chain. 800 */ 801 if ((n->m_flags & M_EXT) == 0 && 802 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 803 if (n->m_len >= len) 804 return (n); 805 m = n; 806 n = n->m_next; 807 len -= m->m_len; 808 } else { 809 if (len > MHLEN) 810 goto bad; 811 m = m_get(M_NOWAIT, n->m_type); 812 if (m == NULL) 813 goto bad; 814 if (n->m_flags & M_PKTHDR) 815 m_move_pkthdr(m, n); 816 } 817 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 818 do { 819 count = min(min(max(len, max_protohdr), space), n->m_len); 820 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 821 (u_int)count); 822 len -= count; 823 m->m_len += count; 824 n->m_len -= count; 825 space -= count; 826 if (n->m_len) 827 n->m_data += count; 828 else 829 n = m_free(n); 830 } while (len > 0 && n); 831 if (len > 0) { 832 (void) m_free(m); 833 goto bad; 834 } 835 m->m_next = n; 836 return (m); 837 bad: 838 m_freem(n); 839 return (NULL); 840 } 841 842 /* 843 * Like m_pullup(), except a new mbuf is always allocated, and we allow 844 * the amount of empty space before the data in the new mbuf to be specified 845 * (in the event that the caller expects to prepend later). 846 */ 847 struct mbuf * 848 m_copyup(struct mbuf *n, int len, int dstoff) 849 { 850 struct mbuf *m; 851 int count, space; 852 853 if (len > (MHLEN - dstoff)) 854 goto bad; 855 m = m_get(M_NOWAIT, n->m_type); 856 if (m == NULL) 857 goto bad; 858 if (n->m_flags & M_PKTHDR) 859 m_move_pkthdr(m, n); 860 m->m_data += dstoff; 861 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 862 do { 863 count = min(min(max(len, max_protohdr), space), n->m_len); 864 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 865 (unsigned)count); 866 len -= count; 867 m->m_len += count; 868 n->m_len -= count; 869 space -= count; 870 if (n->m_len) 871 n->m_data += count; 872 else 873 n = m_free(n); 874 } while (len > 0 && n); 875 if (len > 0) { 876 (void) m_free(m); 877 goto bad; 878 } 879 m->m_next = n; 880 return (m); 881 bad: 882 m_freem(n); 883 return (NULL); 884 } 885 886 /* 887 * Partition an mbuf chain in two pieces, returning the tail -- 888 * all but the first len0 bytes. In case of failure, it returns NULL and 889 * attempts to restore the chain to its original state. 890 * 891 * Note that the resulting mbufs might be read-only, because the new 892 * mbuf can end up sharing an mbuf cluster with the original mbuf if 893 * the "breaking point" happens to lie within a cluster mbuf. Use the 894 * M_WRITABLE() macro to check for this case. 895 */ 896 struct mbuf * 897 m_split(struct mbuf *m0, int len0, int wait) 898 { 899 struct mbuf *m, *n; 900 u_int len = len0, remain; 901 902 MBUF_CHECKSLEEP(wait); 903 for (m = m0; m && len > m->m_len; m = m->m_next) 904 len -= m->m_len; 905 if (m == NULL) 906 return (NULL); 907 remain = m->m_len - len; 908 if (m0->m_flags & M_PKTHDR && remain == 0) { 909 n = m_gethdr(wait, m0->m_type); 910 if (n == NULL) 911 return (NULL); 912 n->m_next = m->m_next; 913 m->m_next = NULL; 914 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 915 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 916 m0->m_pkthdr.len = len0; 917 return (n); 918 } else if (m0->m_flags & M_PKTHDR) { 919 n = m_gethdr(wait, m0->m_type); 920 if (n == NULL) 921 return (NULL); 922 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 923 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 924 m0->m_pkthdr.len = len0; 925 if (m->m_flags & M_EXT) 926 goto extpacket; 927 if (remain > MHLEN) { 928 /* m can't be the lead packet */ 929 M_ALIGN(n, 0); 930 n->m_next = m_split(m, len, wait); 931 if (n->m_next == NULL) { 932 (void) m_free(n); 933 return (NULL); 934 } else { 935 n->m_len = 0; 936 return (n); 937 } 938 } else 939 M_ALIGN(n, remain); 940 } else if (remain == 0) { 941 n = m->m_next; 942 m->m_next = NULL; 943 return (n); 944 } else { 945 n = m_get(wait, m->m_type); 946 if (n == NULL) 947 return (NULL); 948 M_ALIGN(n, remain); 949 } 950 extpacket: 951 if (m->m_flags & M_EXT) { 952 n->m_data = m->m_data + len; 953 mb_dupcl(n, m); 954 } else { 955 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 956 } 957 n->m_len = remain; 958 m->m_len = len; 959 n->m_next = m->m_next; 960 m->m_next = NULL; 961 return (n); 962 } 963 /* 964 * Routine to copy from device local memory into mbufs. 965 * Note that `off' argument is offset into first mbuf of target chain from 966 * which to begin copying the data to. 967 */ 968 struct mbuf * 969 m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 970 void (*copy)(char *from, caddr_t to, u_int len)) 971 { 972 struct mbuf *m; 973 struct mbuf *top = NULL, **mp = ⊤ 974 int len; 975 976 if (off < 0 || off > MHLEN) 977 return (NULL); 978 979 while (totlen > 0) { 980 if (top == NULL) { /* First one, must be PKTHDR */ 981 if (totlen + off >= MINCLSIZE) { 982 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 983 len = MCLBYTES; 984 } else { 985 m = m_gethdr(M_NOWAIT, MT_DATA); 986 len = MHLEN; 987 988 /* Place initial small packet/header at end of mbuf */ 989 if (m && totlen + off + max_linkhdr <= MLEN) { 990 m->m_data += max_linkhdr; 991 len -= max_linkhdr; 992 } 993 } 994 if (m == NULL) 995 return NULL; 996 m->m_pkthdr.rcvif = ifp; 997 m->m_pkthdr.len = totlen; 998 } else { 999 if (totlen + off >= MINCLSIZE) { 1000 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1001 len = MCLBYTES; 1002 } else { 1003 m = m_get(M_NOWAIT, MT_DATA); 1004 len = MLEN; 1005 } 1006 if (m == NULL) { 1007 m_freem(top); 1008 return NULL; 1009 } 1010 } 1011 if (off) { 1012 m->m_data += off; 1013 len -= off; 1014 off = 0; 1015 } 1016 m->m_len = len = min(totlen, len); 1017 if (copy) 1018 copy(buf, mtod(m, caddr_t), (u_int)len); 1019 else 1020 bcopy(buf, mtod(m, caddr_t), (u_int)len); 1021 buf += len; 1022 *mp = m; 1023 mp = &m->m_next; 1024 totlen -= len; 1025 } 1026 return (top); 1027 } 1028 1029 /* 1030 * Copy data from a buffer back into the indicated mbuf chain, 1031 * starting "off" bytes from the beginning, extending the mbuf 1032 * chain if necessary. 1033 */ 1034 void 1035 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1036 { 1037 int mlen; 1038 struct mbuf *m = m0, *n; 1039 int totlen = 0; 1040 1041 if (m0 == NULL) 1042 return; 1043 while (off > (mlen = m->m_len)) { 1044 off -= mlen; 1045 totlen += mlen; 1046 if (m->m_next == NULL) { 1047 n = m_get(M_NOWAIT, m->m_type); 1048 if (n == NULL) 1049 goto out; 1050 bzero(mtod(n, caddr_t), MLEN); 1051 n->m_len = min(MLEN, len + off); 1052 m->m_next = n; 1053 } 1054 m = m->m_next; 1055 } 1056 while (len > 0) { 1057 if (m->m_next == NULL && (len > m->m_len - off)) { 1058 m->m_len += min(len - (m->m_len - off), 1059 M_TRAILINGSPACE(m)); 1060 } 1061 mlen = min (m->m_len - off, len); 1062 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1063 cp += mlen; 1064 len -= mlen; 1065 mlen += off; 1066 off = 0; 1067 totlen += mlen; 1068 if (len == 0) 1069 break; 1070 if (m->m_next == NULL) { 1071 n = m_get(M_NOWAIT, m->m_type); 1072 if (n == NULL) 1073 break; 1074 n->m_len = min(MLEN, len); 1075 m->m_next = n; 1076 } 1077 m = m->m_next; 1078 } 1079 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1080 m->m_pkthdr.len = totlen; 1081 } 1082 1083 /* 1084 * Append the specified data to the indicated mbuf chain, 1085 * Extend the mbuf chain if the new data does not fit in 1086 * existing space. 1087 * 1088 * Return 1 if able to complete the job; otherwise 0. 1089 */ 1090 int 1091 m_append(struct mbuf *m0, int len, c_caddr_t cp) 1092 { 1093 struct mbuf *m, *n; 1094 int remainder, space; 1095 1096 for (m = m0; m->m_next != NULL; m = m->m_next) 1097 ; 1098 remainder = len; 1099 space = M_TRAILINGSPACE(m); 1100 if (space > 0) { 1101 /* 1102 * Copy into available space. 1103 */ 1104 if (space > remainder) 1105 space = remainder; 1106 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 1107 m->m_len += space; 1108 cp += space, remainder -= space; 1109 } 1110 while (remainder > 0) { 1111 /* 1112 * Allocate a new mbuf; could check space 1113 * and allocate a cluster instead. 1114 */ 1115 n = m_get(M_NOWAIT, m->m_type); 1116 if (n == NULL) 1117 break; 1118 n->m_len = min(MLEN, remainder); 1119 bcopy(cp, mtod(n, caddr_t), n->m_len); 1120 cp += n->m_len, remainder -= n->m_len; 1121 m->m_next = n; 1122 m = n; 1123 } 1124 if (m0->m_flags & M_PKTHDR) 1125 m0->m_pkthdr.len += len - remainder; 1126 return (remainder == 0); 1127 } 1128 1129 /* 1130 * Apply function f to the data in an mbuf chain starting "off" bytes from 1131 * the beginning, continuing for "len" bytes. 1132 */ 1133 int 1134 m_apply(struct mbuf *m, int off, int len, 1135 int (*f)(void *, void *, u_int), void *arg) 1136 { 1137 u_int count; 1138 int rval; 1139 1140 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 1141 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 1142 while (off > 0) { 1143 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1144 if (off < m->m_len) 1145 break; 1146 off -= m->m_len; 1147 m = m->m_next; 1148 } 1149 while (len > 0) { 1150 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1151 count = min(m->m_len - off, len); 1152 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 1153 if (rval) 1154 return (rval); 1155 len -= count; 1156 off = 0; 1157 m = m->m_next; 1158 } 1159 return (0); 1160 } 1161 1162 /* 1163 * Return a pointer to mbuf/offset of location in mbuf chain. 1164 */ 1165 struct mbuf * 1166 m_getptr(struct mbuf *m, int loc, int *off) 1167 { 1168 1169 while (loc >= 0) { 1170 /* Normal end of search. */ 1171 if (m->m_len > loc) { 1172 *off = loc; 1173 return (m); 1174 } else { 1175 loc -= m->m_len; 1176 if (m->m_next == NULL) { 1177 if (loc == 0) { 1178 /* Point at the end of valid data. */ 1179 *off = m->m_len; 1180 return (m); 1181 } 1182 return (NULL); 1183 } 1184 m = m->m_next; 1185 } 1186 } 1187 return (NULL); 1188 } 1189 1190 void 1191 m_print(const struct mbuf *m, int maxlen) 1192 { 1193 int len; 1194 int pdata; 1195 const struct mbuf *m2; 1196 1197 if (m == NULL) { 1198 printf("mbuf: %p\n", m); 1199 return; 1200 } 1201 1202 if (m->m_flags & M_PKTHDR) 1203 len = m->m_pkthdr.len; 1204 else 1205 len = -1; 1206 m2 = m; 1207 while (m2 != NULL && (len == -1 || len)) { 1208 pdata = m2->m_len; 1209 if (maxlen != -1 && pdata > maxlen) 1210 pdata = maxlen; 1211 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 1212 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 1213 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 1214 "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 1215 if (pdata) 1216 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 1217 if (len != -1) 1218 len -= m2->m_len; 1219 m2 = m2->m_next; 1220 } 1221 if (len > 0) 1222 printf("%d bytes unaccounted for.\n", len); 1223 return; 1224 } 1225 1226 u_int 1227 m_fixhdr(struct mbuf *m0) 1228 { 1229 u_int len; 1230 1231 len = m_length(m0, NULL); 1232 m0->m_pkthdr.len = len; 1233 return (len); 1234 } 1235 1236 u_int 1237 m_length(struct mbuf *m0, struct mbuf **last) 1238 { 1239 struct mbuf *m; 1240 u_int len; 1241 1242 len = 0; 1243 for (m = m0; m != NULL; m = m->m_next) { 1244 len += m->m_len; 1245 if (m->m_next == NULL) 1246 break; 1247 } 1248 if (last != NULL) 1249 *last = m; 1250 return (len); 1251 } 1252 1253 /* 1254 * Defragment a mbuf chain, returning the shortest possible 1255 * chain of mbufs and clusters. If allocation fails and 1256 * this cannot be completed, NULL will be returned, but 1257 * the passed in chain will be unchanged. Upon success, 1258 * the original chain will be freed, and the new chain 1259 * will be returned. 1260 * 1261 * If a non-packet header is passed in, the original 1262 * mbuf (chain?) will be returned unharmed. 1263 */ 1264 struct mbuf * 1265 m_defrag(struct mbuf *m0, int how) 1266 { 1267 struct mbuf *m_new = NULL, *m_final = NULL; 1268 int progress = 0, length; 1269 1270 MBUF_CHECKSLEEP(how); 1271 if (!(m0->m_flags & M_PKTHDR)) 1272 return (m0); 1273 1274 m_fixhdr(m0); /* Needed sanity check */ 1275 1276 #ifdef MBUF_STRESS_TEST 1277 if (m_defragrandomfailures) { 1278 int temp = arc4random() & 0xff; 1279 if (temp == 0xba) 1280 goto nospace; 1281 } 1282 #endif 1283 1284 if (m0->m_pkthdr.len > MHLEN) 1285 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1286 else 1287 m_final = m_gethdr(how, MT_DATA); 1288 1289 if (m_final == NULL) 1290 goto nospace; 1291 1292 if (m_dup_pkthdr(m_final, m0, how) == 0) 1293 goto nospace; 1294 1295 m_new = m_final; 1296 1297 while (progress < m0->m_pkthdr.len) { 1298 length = m0->m_pkthdr.len - progress; 1299 if (length > MCLBYTES) 1300 length = MCLBYTES; 1301 1302 if (m_new == NULL) { 1303 if (length > MLEN) 1304 m_new = m_getcl(how, MT_DATA, 0); 1305 else 1306 m_new = m_get(how, MT_DATA); 1307 if (m_new == NULL) 1308 goto nospace; 1309 } 1310 1311 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1312 progress += length; 1313 m_new->m_len = length; 1314 if (m_new != m_final) 1315 m_cat(m_final, m_new); 1316 m_new = NULL; 1317 } 1318 #ifdef MBUF_STRESS_TEST 1319 if (m0->m_next == NULL) 1320 m_defraguseless++; 1321 #endif 1322 m_freem(m0); 1323 m0 = m_final; 1324 #ifdef MBUF_STRESS_TEST 1325 m_defragpackets++; 1326 m_defragbytes += m0->m_pkthdr.len; 1327 #endif 1328 return (m0); 1329 nospace: 1330 #ifdef MBUF_STRESS_TEST 1331 m_defragfailure++; 1332 #endif 1333 if (m_final) 1334 m_freem(m_final); 1335 return (NULL); 1336 } 1337 1338 /* 1339 * Defragment an mbuf chain, returning at most maxfrags separate 1340 * mbufs+clusters. If this is not possible NULL is returned and 1341 * the original mbuf chain is left in it's present (potentially 1342 * modified) state. We use two techniques: collapsing consecutive 1343 * mbufs and replacing consecutive mbufs by a cluster. 1344 * 1345 * NB: this should really be named m_defrag but that name is taken 1346 */ 1347 struct mbuf * 1348 m_collapse(struct mbuf *m0, int how, int maxfrags) 1349 { 1350 struct mbuf *m, *n, *n2, **prev; 1351 u_int curfrags; 1352 1353 /* 1354 * Calculate the current number of frags. 1355 */ 1356 curfrags = 0; 1357 for (m = m0; m != NULL; m = m->m_next) 1358 curfrags++; 1359 /* 1360 * First, try to collapse mbufs. Note that we always collapse 1361 * towards the front so we don't need to deal with moving the 1362 * pkthdr. This may be suboptimal if the first mbuf has much 1363 * less data than the following. 1364 */ 1365 m = m0; 1366 again: 1367 for (;;) { 1368 n = m->m_next; 1369 if (n == NULL) 1370 break; 1371 if (M_WRITABLE(m) && 1372 n->m_len < M_TRAILINGSPACE(m)) { 1373 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1374 n->m_len); 1375 m->m_len += n->m_len; 1376 m->m_next = n->m_next; 1377 m_free(n); 1378 if (--curfrags <= maxfrags) 1379 return m0; 1380 } else 1381 m = n; 1382 } 1383 KASSERT(maxfrags > 1, 1384 ("maxfrags %u, but normal collapse failed", maxfrags)); 1385 /* 1386 * Collapse consecutive mbufs to a cluster. 1387 */ 1388 prev = &m0->m_next; /* NB: not the first mbuf */ 1389 while ((n = *prev) != NULL) { 1390 if ((n2 = n->m_next) != NULL && 1391 n->m_len + n2->m_len < MCLBYTES) { 1392 m = m_getcl(how, MT_DATA, 0); 1393 if (m == NULL) 1394 goto bad; 1395 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1396 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1397 n2->m_len); 1398 m->m_len = n->m_len + n2->m_len; 1399 m->m_next = n2->m_next; 1400 *prev = m; 1401 m_free(n); 1402 m_free(n2); 1403 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1404 return m0; 1405 /* 1406 * Still not there, try the normal collapse 1407 * again before we allocate another cluster. 1408 */ 1409 goto again; 1410 } 1411 prev = &n->m_next; 1412 } 1413 /* 1414 * No place where we can collapse to a cluster; punt. 1415 * This can occur if, for example, you request 2 frags 1416 * but the packet requires that both be clusters (we 1417 * never reallocate the first mbuf to avoid moving the 1418 * packet header). 1419 */ 1420 bad: 1421 return NULL; 1422 } 1423 1424 #ifdef MBUF_STRESS_TEST 1425 1426 /* 1427 * Fragment an mbuf chain. There's no reason you'd ever want to do 1428 * this in normal usage, but it's great for stress testing various 1429 * mbuf consumers. 1430 * 1431 * If fragmentation is not possible, the original chain will be 1432 * returned. 1433 * 1434 * Possible length values: 1435 * 0 no fragmentation will occur 1436 * > 0 each fragment will be of the specified length 1437 * -1 each fragment will be the same random value in length 1438 * -2 each fragment's length will be entirely random 1439 * (Random values range from 1 to 256) 1440 */ 1441 struct mbuf * 1442 m_fragment(struct mbuf *m0, int how, int length) 1443 { 1444 struct mbuf *m_new = NULL, *m_final = NULL; 1445 int progress = 0; 1446 1447 if (!(m0->m_flags & M_PKTHDR)) 1448 return (m0); 1449 1450 if ((length == 0) || (length < -2)) 1451 return (m0); 1452 1453 m_fixhdr(m0); /* Needed sanity check */ 1454 1455 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1456 1457 if (m_final == NULL) 1458 goto nospace; 1459 1460 if (m_dup_pkthdr(m_final, m0, how) == 0) 1461 goto nospace; 1462 1463 m_new = m_final; 1464 1465 if (length == -1) 1466 length = 1 + (arc4random() & 255); 1467 1468 while (progress < m0->m_pkthdr.len) { 1469 int fraglen; 1470 1471 if (length > 0) 1472 fraglen = length; 1473 else 1474 fraglen = 1 + (arc4random() & 255); 1475 if (fraglen > m0->m_pkthdr.len - progress) 1476 fraglen = m0->m_pkthdr.len - progress; 1477 1478 if (fraglen > MCLBYTES) 1479 fraglen = MCLBYTES; 1480 1481 if (m_new == NULL) { 1482 m_new = m_getcl(how, MT_DATA, 0); 1483 if (m_new == NULL) 1484 goto nospace; 1485 } 1486 1487 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t)); 1488 progress += fraglen; 1489 m_new->m_len = fraglen; 1490 if (m_new != m_final) 1491 m_cat(m_final, m_new); 1492 m_new = NULL; 1493 } 1494 m_freem(m0); 1495 m0 = m_final; 1496 return (m0); 1497 nospace: 1498 if (m_final) 1499 m_freem(m_final); 1500 /* Return the original chain on failure */ 1501 return (m0); 1502 } 1503 1504 #endif 1505 1506 /* 1507 * Copy the contents of uio into a properly sized mbuf chain. 1508 */ 1509 struct mbuf * 1510 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1511 { 1512 struct mbuf *m, *mb; 1513 int error, length; 1514 ssize_t total; 1515 int progress = 0; 1516 1517 /* 1518 * len can be zero or an arbitrary large value bound by 1519 * the total data supplied by the uio. 1520 */ 1521 if (len > 0) 1522 total = min(uio->uio_resid, len); 1523 else 1524 total = uio->uio_resid; 1525 1526 /* 1527 * The smallest unit returned by m_getm2() is a single mbuf 1528 * with pkthdr. We can't align past it. 1529 */ 1530 if (align >= MHLEN) 1531 return (NULL); 1532 1533 /* 1534 * Give us the full allocation or nothing. 1535 * If len is zero return the smallest empty mbuf. 1536 */ 1537 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 1538 if (m == NULL) 1539 return (NULL); 1540 m->m_data += align; 1541 1542 /* Fill all mbufs with uio data and update header information. */ 1543 for (mb = m; mb != NULL; mb = mb->m_next) { 1544 length = min(M_TRAILINGSPACE(mb), total - progress); 1545 1546 error = uiomove(mtod(mb, void *), length, uio); 1547 if (error) { 1548 m_freem(m); 1549 return (NULL); 1550 } 1551 1552 mb->m_len = length; 1553 progress += length; 1554 if (flags & M_PKTHDR) 1555 m->m_pkthdr.len += length; 1556 } 1557 KASSERT(progress == total, ("%s: progress != total", __func__)); 1558 1559 return (m); 1560 } 1561 1562 /* 1563 * Copy an mbuf chain into a uio limited by len if set. 1564 */ 1565 int 1566 m_mbuftouio(struct uio *uio, struct mbuf *m, int len) 1567 { 1568 int error, length, total; 1569 int progress = 0; 1570 1571 if (len > 0) 1572 total = min(uio->uio_resid, len); 1573 else 1574 total = uio->uio_resid; 1575 1576 /* Fill the uio with data from the mbufs. */ 1577 for (; m != NULL; m = m->m_next) { 1578 length = min(m->m_len, total - progress); 1579 1580 error = uiomove(mtod(m, void *), length, uio); 1581 if (error) 1582 return (error); 1583 1584 progress += length; 1585 } 1586 1587 return (0); 1588 } 1589 1590 /* 1591 * Create a writable copy of the mbuf chain. While doing this 1592 * we compact the chain with a goal of producing a chain with 1593 * at most two mbufs. The second mbuf in this chain is likely 1594 * to be a cluster. The primary purpose of this work is to create 1595 * a writable packet for encryption, compression, etc. The 1596 * secondary goal is to linearize the data so the data can be 1597 * passed to crypto hardware in the most efficient manner possible. 1598 */ 1599 struct mbuf * 1600 m_unshare(struct mbuf *m0, int how) 1601 { 1602 struct mbuf *m, *mprev; 1603 struct mbuf *n, *mfirst, *mlast; 1604 int len, off; 1605 1606 mprev = NULL; 1607 for (m = m0; m != NULL; m = mprev->m_next) { 1608 /* 1609 * Regular mbufs are ignored unless there's a cluster 1610 * in front of it that we can use to coalesce. We do 1611 * the latter mainly so later clusters can be coalesced 1612 * also w/o having to handle them specially (i.e. convert 1613 * mbuf+cluster -> cluster). This optimization is heavily 1614 * influenced by the assumption that we're running over 1615 * Ethernet where MCLBYTES is large enough that the max 1616 * packet size will permit lots of coalescing into a 1617 * single cluster. This in turn permits efficient 1618 * crypto operations, especially when using hardware. 1619 */ 1620 if ((m->m_flags & M_EXT) == 0) { 1621 if (mprev && (mprev->m_flags & M_EXT) && 1622 m->m_len <= M_TRAILINGSPACE(mprev)) { 1623 /* XXX: this ignores mbuf types */ 1624 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1625 mtod(m, caddr_t), m->m_len); 1626 mprev->m_len += m->m_len; 1627 mprev->m_next = m->m_next; /* unlink from chain */ 1628 m_free(m); /* reclaim mbuf */ 1629 #if 0 1630 newipsecstat.ips_mbcoalesced++; 1631 #endif 1632 } else { 1633 mprev = m; 1634 } 1635 continue; 1636 } 1637 /* 1638 * Writable mbufs are left alone (for now). 1639 */ 1640 if (M_WRITABLE(m)) { 1641 mprev = m; 1642 continue; 1643 } 1644 1645 /* 1646 * Not writable, replace with a copy or coalesce with 1647 * the previous mbuf if possible (since we have to copy 1648 * it anyway, we try to reduce the number of mbufs and 1649 * clusters so that future work is easier). 1650 */ 1651 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 1652 /* NB: we only coalesce into a cluster or larger */ 1653 if (mprev != NULL && (mprev->m_flags & M_EXT) && 1654 m->m_len <= M_TRAILINGSPACE(mprev)) { 1655 /* XXX: this ignores mbuf types */ 1656 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1657 mtod(m, caddr_t), m->m_len); 1658 mprev->m_len += m->m_len; 1659 mprev->m_next = m->m_next; /* unlink from chain */ 1660 m_free(m); /* reclaim mbuf */ 1661 #if 0 1662 newipsecstat.ips_clcoalesced++; 1663 #endif 1664 continue; 1665 } 1666 1667 /* 1668 * Allocate new space to hold the copy and copy the data. 1669 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 1670 * splitting them into clusters. We could just malloc a 1671 * buffer and make it external but too many device drivers 1672 * don't know how to break up the non-contiguous memory when 1673 * doing DMA. 1674 */ 1675 n = m_getcl(how, m->m_type, m->m_flags); 1676 if (n == NULL) { 1677 m_freem(m0); 1678 return (NULL); 1679 } 1680 if (m->m_flags & M_PKTHDR) { 1681 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR", 1682 __func__, m0, m)); 1683 m_move_pkthdr(n, m); 1684 } 1685 len = m->m_len; 1686 off = 0; 1687 mfirst = n; 1688 mlast = NULL; 1689 for (;;) { 1690 int cc = min(len, MCLBYTES); 1691 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 1692 n->m_len = cc; 1693 if (mlast != NULL) 1694 mlast->m_next = n; 1695 mlast = n; 1696 #if 0 1697 newipsecstat.ips_clcopied++; 1698 #endif 1699 1700 len -= cc; 1701 if (len <= 0) 1702 break; 1703 off += cc; 1704 1705 n = m_getcl(how, m->m_type, m->m_flags); 1706 if (n == NULL) { 1707 m_freem(mfirst); 1708 m_freem(m0); 1709 return (NULL); 1710 } 1711 } 1712 n->m_next = m->m_next; 1713 if (mprev == NULL) 1714 m0 = mfirst; /* new head of chain */ 1715 else 1716 mprev->m_next = mfirst; /* replace old mbuf */ 1717 m_free(m); /* release old mbuf */ 1718 mprev = mfirst; 1719 } 1720 return (m0); 1721 } 1722 1723 #ifdef MBUF_PROFILING 1724 1725 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 1726 struct mbufprofile { 1727 uintmax_t wasted[MP_BUCKETS]; 1728 uintmax_t used[MP_BUCKETS]; 1729 uintmax_t segments[MP_BUCKETS]; 1730 } mbprof; 1731 1732 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 1733 #define MP_NUMLINES 6 1734 #define MP_NUMSPERLINE 16 1735 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 1736 /* work out max space needed and add a bit of spare space too */ 1737 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 1738 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 1739 1740 char mbprofbuf[MP_BUFSIZE]; 1741 1742 void 1743 m_profile(struct mbuf *m) 1744 { 1745 int segments = 0; 1746 int used = 0; 1747 int wasted = 0; 1748 1749 while (m) { 1750 segments++; 1751 used += m->m_len; 1752 if (m->m_flags & M_EXT) { 1753 wasted += MHLEN - sizeof(m->m_ext) + 1754 m->m_ext.ext_size - m->m_len; 1755 } else { 1756 if (m->m_flags & M_PKTHDR) 1757 wasted += MHLEN - m->m_len; 1758 else 1759 wasted += MLEN - m->m_len; 1760 } 1761 m = m->m_next; 1762 } 1763 /* be paranoid.. it helps */ 1764 if (segments > MP_BUCKETS - 1) 1765 segments = MP_BUCKETS - 1; 1766 if (used > 100000) 1767 used = 100000; 1768 if (wasted > 100000) 1769 wasted = 100000; 1770 /* store in the appropriate bucket */ 1771 /* don't bother locking. if it's slightly off, so what? */ 1772 mbprof.segments[segments]++; 1773 mbprof.used[fls(used)]++; 1774 mbprof.wasted[fls(wasted)]++; 1775 } 1776 1777 static void 1778 mbprof_textify(void) 1779 { 1780 int offset; 1781 char *c; 1782 uint64_t *p; 1783 1784 p = &mbprof.wasted[0]; 1785 c = mbprofbuf; 1786 offset = snprintf(c, MP_MAXLINE + 10, 1787 "wasted:\n" 1788 "%ju %ju %ju %ju %ju %ju %ju %ju " 1789 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 1790 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1791 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1792 #ifdef BIG_ARRAY 1793 p = &mbprof.wasted[16]; 1794 c += offset; 1795 offset = snprintf(c, MP_MAXLINE, 1796 "%ju %ju %ju %ju %ju %ju %ju %ju " 1797 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 1798 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1799 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1800 #endif 1801 p = &mbprof.used[0]; 1802 c += offset; 1803 offset = snprintf(c, MP_MAXLINE + 10, 1804 "used:\n" 1805 "%ju %ju %ju %ju %ju %ju %ju %ju " 1806 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 1807 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1808 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1809 #ifdef BIG_ARRAY 1810 p = &mbprof.used[16]; 1811 c += offset; 1812 offset = snprintf(c, MP_MAXLINE, 1813 "%ju %ju %ju %ju %ju %ju %ju %ju " 1814 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 1815 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1816 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1817 #endif 1818 p = &mbprof.segments[0]; 1819 c += offset; 1820 offset = snprintf(c, MP_MAXLINE + 10, 1821 "segments:\n" 1822 "%ju %ju %ju %ju %ju %ju %ju %ju " 1823 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 1824 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1825 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1826 #ifdef BIG_ARRAY 1827 p = &mbprof.segments[16]; 1828 c += offset; 1829 offset = snprintf(c, MP_MAXLINE, 1830 "%ju %ju %ju %ju %ju %ju %ju %ju " 1831 "%ju %ju %ju %ju %ju %ju %ju %jju", 1832 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1833 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 1834 #endif 1835 } 1836 1837 static int 1838 mbprof_handler(SYSCTL_HANDLER_ARGS) 1839 { 1840 int error; 1841 1842 mbprof_textify(); 1843 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 1844 return (error); 1845 } 1846 1847 static int 1848 mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 1849 { 1850 int clear, error; 1851 1852 clear = 0; 1853 error = sysctl_handle_int(oidp, &clear, 0, req); 1854 if (error || !req->newptr) 1855 return (error); 1856 1857 if (clear) { 1858 bzero(&mbprof, sizeof(mbprof)); 1859 } 1860 1861 return (error); 1862 } 1863 1864 1865 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD, 1866 NULL, 0, mbprof_handler, "A", "mbuf profiling statistics"); 1867 1868 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW, 1869 NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics"); 1870 #endif 1871 1872