1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_param.h" 38 #include "opt_mbuf_stress_test.h" 39 #include "opt_mbuf_profiling.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/sysctl.h> 49 #include <sys/domain.h> 50 #include <sys/protosw.h> 51 #include <sys/uio.h> 52 #include <sys/vmmeter.h> 53 #include <sys/sdt.h> 54 #include <vm/vm.h> 55 #include <vm/vm_pageout.h> 56 #include <vm/vm_page.h> 57 58 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init, 59 "struct mbuf *", "mbufinfo_t *", 60 "uint32_t", "uint32_t", 61 "uint16_t", "uint16_t", 62 "uint32_t", "uint32_t", 63 "uint32_t", "uint32_t"); 64 65 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr, 66 "uint32_t", "uint32_t", 67 "uint16_t", "uint16_t", 68 "struct mbuf *", "mbufinfo_t *"); 69 70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get, 71 "uint32_t", "uint32_t", 72 "uint16_t", "uint16_t", 73 "struct mbuf *", "mbufinfo_t *"); 74 75 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl, 76 "uint32_t", "uint32_t", 77 "uint16_t", "uint16_t", 78 "uint32_t", "uint32_t", 79 "struct mbuf *", "mbufinfo_t *"); 80 81 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget, 82 "struct mbuf *", "mbufinfo_t *", 83 "uint32_t", "uint32_t", 84 "uint32_t", "uint32_t"); 85 86 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget, 87 "struct mbuf *", "mbufinfo_t *", 88 "uint32_t", "uint32_t", 89 "uint32_t", "uint32_t", 90 "void*", "void*"); 91 92 SDT_PROBE_DEFINE(sdt, , , m__cljset); 93 94 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free, 95 "struct mbuf *", "mbufinfo_t *"); 96 97 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem, 98 "struct mbuf *", "mbufinfo_t *"); 99 100 #include <security/mac/mac_framework.h> 101 102 int max_linkhdr; 103 int max_protohdr; 104 int max_hdr; 105 int max_datalen; 106 #ifdef MBUF_STRESS_TEST 107 int m_defragpackets; 108 int m_defragbytes; 109 int m_defraguseless; 110 int m_defragfailure; 111 int m_defragrandomfailures; 112 #endif 113 114 /* 115 * sysctl(8) exported objects 116 */ 117 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 118 &max_linkhdr, 0, "Size of largest link layer header"); 119 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 120 &max_protohdr, 0, "Size of largest protocol layer header"); 121 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 122 &max_hdr, 0, "Size of largest link plus protocol header"); 123 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 124 &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 125 #ifdef MBUF_STRESS_TEST 126 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 127 &m_defragpackets, 0, ""); 128 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 129 &m_defragbytes, 0, ""); 130 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 131 &m_defraguseless, 0, ""); 132 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 133 &m_defragfailure, 0, ""); 134 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 135 &m_defragrandomfailures, 0, ""); 136 #endif 137 138 /* 139 * Ensure the correct size of various mbuf parameters. It could be off due 140 * to compiler-induced padding and alignment artifacts. 141 */ 142 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN); 143 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN); 144 145 /* 146 * mbuf data storage should be 64-bit aligned regardless of architectural 147 * pointer size; check this is the case with and without a packet header. 148 */ 149 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0); 150 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0); 151 152 /* 153 * While the specific values here don't matter too much (i.e., +/- a few 154 * words), we do want to ensure that changes to these values are carefully 155 * reasoned about and properly documented. This is especially the case as 156 * network-protocol and device-driver modules encode these layouts, and must 157 * be recompiled if the structures change. Check these values at compile time 158 * against the ones documented in comments in mbuf.h. 159 * 160 * NB: Possibly they should be documented there via #define's and not just 161 * comments. 162 */ 163 #if defined(__LP64__) 164 CTASSERT(offsetof(struct mbuf, m_dat) == 32); 165 CTASSERT(sizeof(struct pkthdr) == 56); 166 CTASSERT(sizeof(struct m_ext) == 168); 167 #else 168 CTASSERT(offsetof(struct mbuf, m_dat) == 24); 169 CTASSERT(sizeof(struct pkthdr) == 48); 170 CTASSERT(sizeof(struct m_ext) == 184); 171 #endif 172 173 /* 174 * Assert that the queue(3) macros produce code of the same size as an old 175 * plain pointer does. 176 */ 177 #ifdef INVARIANTS 178 static struct mbuf __used m_assertbuf; 179 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next)); 180 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next)); 181 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt)); 182 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt)); 183 #endif 184 185 /* 186 * Attach the cluster from *m to *n, set up m_ext in *n 187 * and bump the refcount of the cluster. 188 */ 189 void 190 mb_dupcl(struct mbuf *n, struct mbuf *m) 191 { 192 volatile u_int *refcnt; 193 194 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 195 KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n)); 196 197 /* 198 * Cache access optimization. For most kinds of external 199 * storage we don't need full copy of m_ext, since the 200 * holder of the 'ext_count' is responsible to carry the 201 * free routine and its arguments. Exclusion is EXT_EXTREF, 202 * where 'ext_cnt' doesn't point into mbuf at all. 203 */ 204 if (m->m_ext.ext_type == EXT_EXTREF) 205 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext)); 206 else if (m->m_ext.ext_type == EXT_PGS) 207 bcopy(&m->m_ext_pgs, &n->m_ext_pgs, 208 sizeof(struct mbuf_ext_pgs)); 209 else 210 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen); 211 n->m_flags |= M_EXT; 212 n->m_flags |= m->m_flags & (M_RDONLY | M_NOMAP); 213 214 /* See if this is the mbuf that holds the embedded refcount. */ 215 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 216 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; 217 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF; 218 } else { 219 KASSERT(m->m_ext.ext_cnt != NULL, 220 ("%s: no refcounting pointer on %p", __func__, m)); 221 refcnt = m->m_ext.ext_cnt; 222 } 223 224 if (*refcnt == 1) 225 *refcnt += 1; 226 else 227 atomic_add_int(refcnt, 1); 228 } 229 230 void 231 m_demote_pkthdr(struct mbuf *m) 232 { 233 234 M_ASSERTPKTHDR(m); 235 236 m_tag_delete_chain(m, NULL); 237 m->m_flags &= ~M_PKTHDR; 238 bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 239 } 240 241 /* 242 * Clean up mbuf (chain) from any tags and packet headers. 243 * If "all" is set then the first mbuf in the chain will be 244 * cleaned too. 245 */ 246 void 247 m_demote(struct mbuf *m0, int all, int flags) 248 { 249 struct mbuf *m; 250 251 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 252 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", 253 __func__, m, m0)); 254 if (m->m_flags & M_PKTHDR) 255 m_demote_pkthdr(m); 256 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | 257 M_NOMAP | flags); 258 } 259 } 260 261 /* 262 * Sanity checks on mbuf (chain) for use in KASSERT() and general 263 * debugging. 264 * Returns 0 or panics when bad and 1 on all tests passed. 265 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 266 * blow up later. 267 */ 268 int 269 m_sanity(struct mbuf *m0, int sanitize) 270 { 271 struct mbuf *m; 272 caddr_t a, b; 273 int pktlen = 0; 274 275 #ifdef INVARIANTS 276 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 277 #else 278 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 279 #endif 280 281 for (m = m0; m != NULL; m = m->m_next) { 282 /* 283 * Basic pointer checks. If any of these fails then some 284 * unrelated kernel memory before or after us is trashed. 285 * No way to recover from that. 286 */ 287 a = M_START(m); 288 b = a + M_SIZE(m); 289 if ((caddr_t)m->m_data < a) 290 M_SANITY_ACTION("m_data outside mbuf data range left"); 291 if ((caddr_t)m->m_data > b) 292 M_SANITY_ACTION("m_data outside mbuf data range right"); 293 if ((caddr_t)m->m_data + m->m_len > b) 294 M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 295 296 /* m->m_nextpkt may only be set on first mbuf in chain. */ 297 if (m != m0 && m->m_nextpkt != NULL) { 298 if (sanitize) { 299 m_freem(m->m_nextpkt); 300 m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 301 } else 302 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 303 } 304 305 /* packet length (not mbuf length!) calculation */ 306 if (m0->m_flags & M_PKTHDR) 307 pktlen += m->m_len; 308 309 /* m_tags may only be attached to first mbuf in chain. */ 310 if (m != m0 && m->m_flags & M_PKTHDR && 311 !SLIST_EMPTY(&m->m_pkthdr.tags)) { 312 if (sanitize) { 313 m_tag_delete_chain(m, NULL); 314 /* put in 0xDEADC0DE perhaps? */ 315 } else 316 M_SANITY_ACTION("m_tags on in-chain mbuf"); 317 } 318 319 /* M_PKTHDR may only be set on first mbuf in chain */ 320 if (m != m0 && m->m_flags & M_PKTHDR) { 321 if (sanitize) { 322 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 323 m->m_flags &= ~M_PKTHDR; 324 /* put in 0xDEADCODE and leave hdr flag in */ 325 } else 326 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 327 } 328 } 329 m = m0; 330 if (pktlen && pktlen != m->m_pkthdr.len) { 331 if (sanitize) 332 m->m_pkthdr.len = 0; 333 else 334 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 335 } 336 return 1; 337 338 #undef M_SANITY_ACTION 339 } 340 341 /* 342 * Non-inlined part of m_init(). 343 */ 344 int 345 m_pkthdr_init(struct mbuf *m, int how) 346 { 347 #ifdef MAC 348 int error; 349 #endif 350 m->m_data = m->m_pktdat; 351 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 352 #ifdef NUMA 353 m->m_pkthdr.numa_domain = M_NODOM; 354 #endif 355 #ifdef MAC 356 /* If the label init fails, fail the alloc */ 357 error = mac_mbuf_init(m, how); 358 if (error) 359 return (error); 360 #endif 361 362 return (0); 363 } 364 365 /* 366 * "Move" mbuf pkthdr from "from" to "to". 367 * "from" must have M_PKTHDR set, and "to" must be empty. 368 */ 369 void 370 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 371 { 372 373 #if 0 374 /* see below for why these are not enabled */ 375 M_ASSERTPKTHDR(to); 376 /* Note: with MAC, this may not be a good assertion. */ 377 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 378 ("m_move_pkthdr: to has tags")); 379 #endif 380 #ifdef MAC 381 /* 382 * XXXMAC: It could be this should also occur for non-MAC? 383 */ 384 if (to->m_flags & M_PKTHDR) 385 m_tag_delete_chain(to, NULL); 386 #endif 387 to->m_flags = (from->m_flags & M_COPYFLAGS) | 388 (to->m_flags & (M_EXT | M_NOMAP)); 389 if ((to->m_flags & M_EXT) == 0) 390 to->m_data = to->m_pktdat; 391 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 392 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 393 from->m_flags &= ~M_PKTHDR; 394 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) { 395 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 396 from->m_pkthdr.snd_tag = NULL; 397 } 398 } 399 400 /* 401 * Duplicate "from"'s mbuf pkthdr in "to". 402 * "from" must have M_PKTHDR set, and "to" must be empty. 403 * In particular, this does a deep copy of the packet tags. 404 */ 405 int 406 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 407 { 408 409 #if 0 410 /* 411 * The mbuf allocator only initializes the pkthdr 412 * when the mbuf is allocated with m_gethdr(). Many users 413 * (e.g. m_copy*, m_prepend) use m_get() and then 414 * smash the pkthdr as needed causing these 415 * assertions to trip. For now just disable them. 416 */ 417 M_ASSERTPKTHDR(to); 418 /* Note: with MAC, this may not be a good assertion. */ 419 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 420 #endif 421 MBUF_CHECKSLEEP(how); 422 #ifdef MAC 423 if (to->m_flags & M_PKTHDR) 424 m_tag_delete_chain(to, NULL); 425 #endif 426 to->m_flags = (from->m_flags & M_COPYFLAGS) | 427 (to->m_flags & (M_EXT | M_NOMAP)); 428 if ((to->m_flags & M_EXT) == 0) 429 to->m_data = to->m_pktdat; 430 to->m_pkthdr = from->m_pkthdr; 431 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) 432 m_snd_tag_ref(from->m_pkthdr.snd_tag); 433 SLIST_INIT(&to->m_pkthdr.tags); 434 return (m_tag_copy_chain(to, from, how)); 435 } 436 437 /* 438 * Lesser-used path for M_PREPEND: 439 * allocate new mbuf to prepend to chain, 440 * copy junk along. 441 */ 442 struct mbuf * 443 m_prepend(struct mbuf *m, int len, int how) 444 { 445 struct mbuf *mn; 446 447 if (m->m_flags & M_PKTHDR) 448 mn = m_gethdr(how, m->m_type); 449 else 450 mn = m_get(how, m->m_type); 451 if (mn == NULL) { 452 m_freem(m); 453 return (NULL); 454 } 455 if (m->m_flags & M_PKTHDR) 456 m_move_pkthdr(mn, m); 457 mn->m_next = m; 458 m = mn; 459 if (len < M_SIZE(m)) 460 M_ALIGN(m, len); 461 m->m_len = len; 462 return (m); 463 } 464 465 /* 466 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 467 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 468 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 469 * Note that the copy is read-only, because clusters are not copied, 470 * only their reference counts are incremented. 471 */ 472 struct mbuf * 473 m_copym(struct mbuf *m, int off0, int len, int wait) 474 { 475 struct mbuf *n, **np; 476 int off = off0; 477 struct mbuf *top; 478 int copyhdr = 0; 479 480 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 481 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 482 MBUF_CHECKSLEEP(wait); 483 if (off == 0 && m->m_flags & M_PKTHDR) 484 copyhdr = 1; 485 while (off > 0) { 486 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 487 if (off < m->m_len) 488 break; 489 off -= m->m_len; 490 m = m->m_next; 491 } 492 np = ⊤ 493 top = NULL; 494 while (len > 0) { 495 if (m == NULL) { 496 KASSERT(len == M_COPYALL, 497 ("m_copym, length > size of mbuf chain")); 498 break; 499 } 500 if (copyhdr) 501 n = m_gethdr(wait, m->m_type); 502 else 503 n = m_get(wait, m->m_type); 504 *np = n; 505 if (n == NULL) 506 goto nospace; 507 if (copyhdr) { 508 if (!m_dup_pkthdr(n, m, wait)) 509 goto nospace; 510 if (len == M_COPYALL) 511 n->m_pkthdr.len -= off0; 512 else 513 n->m_pkthdr.len = len; 514 copyhdr = 0; 515 } 516 n->m_len = min(len, m->m_len - off); 517 if (m->m_flags & M_EXT) { 518 n->m_data = m->m_data + off; 519 mb_dupcl(n, m); 520 } else 521 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 522 (u_int)n->m_len); 523 if (len != M_COPYALL) 524 len -= n->m_len; 525 off = 0; 526 m = m->m_next; 527 np = &n->m_next; 528 } 529 530 return (top); 531 nospace: 532 m_freem(top); 533 return (NULL); 534 } 535 536 /* 537 * Copy an entire packet, including header (which must be present). 538 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 539 * Note that the copy is read-only, because clusters are not copied, 540 * only their reference counts are incremented. 541 * Preserve alignment of the first mbuf so if the creator has left 542 * some room at the beginning (e.g. for inserting protocol headers) 543 * the copies still have the room available. 544 */ 545 struct mbuf * 546 m_copypacket(struct mbuf *m, int how) 547 { 548 struct mbuf *top, *n, *o; 549 550 MBUF_CHECKSLEEP(how); 551 n = m_get(how, m->m_type); 552 top = n; 553 if (n == NULL) 554 goto nospace; 555 556 if (!m_dup_pkthdr(n, m, how)) 557 goto nospace; 558 n->m_len = m->m_len; 559 if (m->m_flags & M_EXT) { 560 n->m_data = m->m_data; 561 mb_dupcl(n, m); 562 } else { 563 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 564 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 565 } 566 567 m = m->m_next; 568 while (m) { 569 o = m_get(how, m->m_type); 570 if (o == NULL) 571 goto nospace; 572 573 n->m_next = o; 574 n = n->m_next; 575 576 n->m_len = m->m_len; 577 if (m->m_flags & M_EXT) { 578 n->m_data = m->m_data; 579 mb_dupcl(n, m); 580 } else { 581 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 582 } 583 584 m = m->m_next; 585 } 586 return top; 587 nospace: 588 m_freem(top); 589 return (NULL); 590 } 591 592 static void 593 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp) 594 { 595 struct iovec iov; 596 struct uio uio; 597 int error; 598 599 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off)); 600 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len)); 601 KASSERT(off < m->m_len, 602 ("m_copyfromunmapped: len exceeds mbuf length")); 603 iov.iov_base = cp; 604 iov.iov_len = len; 605 uio.uio_resid = len; 606 uio.uio_iov = &iov; 607 uio.uio_segflg = UIO_SYSSPACE; 608 uio.uio_iovcnt = 1; 609 uio.uio_offset = 0; 610 uio.uio_rw = UIO_READ; 611 error = m_unmappedtouio(m, off, &uio, len); 612 KASSERT(error == 0, ("m_unmappedtouio failed: off %d, len %d", off, 613 len)); 614 } 615 616 /* 617 * Copy data from an mbuf chain starting "off" bytes from the beginning, 618 * continuing for "len" bytes, into the indicated buffer. 619 */ 620 void 621 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 622 { 623 u_int count; 624 625 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 626 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 627 while (off > 0) { 628 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 629 if (off < m->m_len) 630 break; 631 off -= m->m_len; 632 m = m->m_next; 633 } 634 while (len > 0) { 635 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 636 count = min(m->m_len - off, len); 637 if ((m->m_flags & M_NOMAP) != 0) 638 m_copyfromunmapped(m, off, count, cp); 639 else 640 bcopy(mtod(m, caddr_t) + off, cp, count); 641 len -= count; 642 cp += count; 643 off = 0; 644 m = m->m_next; 645 } 646 } 647 648 /* 649 * Copy a packet header mbuf chain into a completely new chain, including 650 * copying any mbuf clusters. Use this instead of m_copypacket() when 651 * you need a writable copy of an mbuf chain. 652 */ 653 struct mbuf * 654 m_dup(const struct mbuf *m, int how) 655 { 656 struct mbuf **p, *top = NULL; 657 int remain, moff, nsize; 658 659 MBUF_CHECKSLEEP(how); 660 /* Sanity check */ 661 if (m == NULL) 662 return (NULL); 663 M_ASSERTPKTHDR(m); 664 665 /* While there's more data, get a new mbuf, tack it on, and fill it */ 666 remain = m->m_pkthdr.len; 667 moff = 0; 668 p = ⊤ 669 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 670 struct mbuf *n; 671 672 /* Get the next new mbuf */ 673 if (remain >= MINCLSIZE) { 674 n = m_getcl(how, m->m_type, 0); 675 nsize = MCLBYTES; 676 } else { 677 n = m_get(how, m->m_type); 678 nsize = MLEN; 679 } 680 if (n == NULL) 681 goto nospace; 682 683 if (top == NULL) { /* First one, must be PKTHDR */ 684 if (!m_dup_pkthdr(n, m, how)) { 685 m_free(n); 686 goto nospace; 687 } 688 if ((n->m_flags & M_EXT) == 0) 689 nsize = MHLEN; 690 n->m_flags &= ~M_RDONLY; 691 } 692 n->m_len = 0; 693 694 /* Link it into the new chain */ 695 *p = n; 696 p = &n->m_next; 697 698 /* Copy data from original mbuf(s) into new mbuf */ 699 while (n->m_len < nsize && m != NULL) { 700 int chunk = min(nsize - n->m_len, m->m_len - moff); 701 702 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 703 moff += chunk; 704 n->m_len += chunk; 705 remain -= chunk; 706 if (moff == m->m_len) { 707 m = m->m_next; 708 moff = 0; 709 } 710 } 711 712 /* Check correct total mbuf length */ 713 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 714 ("%s: bogus m_pkthdr.len", __func__)); 715 } 716 return (top); 717 718 nospace: 719 m_freem(top); 720 return (NULL); 721 } 722 723 /* 724 * Concatenate mbuf chain n to m. 725 * Both chains must be of the same type (e.g. MT_DATA). 726 * Any m_pkthdr is not updated. 727 */ 728 void 729 m_cat(struct mbuf *m, struct mbuf *n) 730 { 731 while (m->m_next) 732 m = m->m_next; 733 while (n) { 734 if (!M_WRITABLE(m) || 735 (n->m_flags & M_NOMAP) != 0 || 736 M_TRAILINGSPACE(m) < n->m_len) { 737 /* just join the two chains */ 738 m->m_next = n; 739 return; 740 } 741 /* splat the data from one into the other */ 742 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 743 (u_int)n->m_len); 744 m->m_len += n->m_len; 745 n = m_free(n); 746 } 747 } 748 749 /* 750 * Concatenate two pkthdr mbuf chains. 751 */ 752 void 753 m_catpkt(struct mbuf *m, struct mbuf *n) 754 { 755 756 M_ASSERTPKTHDR(m); 757 M_ASSERTPKTHDR(n); 758 759 m->m_pkthdr.len += n->m_pkthdr.len; 760 m_demote(n, 1, 0); 761 762 m_cat(m, n); 763 } 764 765 void 766 m_adj(struct mbuf *mp, int req_len) 767 { 768 int len = req_len; 769 struct mbuf *m; 770 int count; 771 772 if ((m = mp) == NULL) 773 return; 774 if (len >= 0) { 775 /* 776 * Trim from head. 777 */ 778 while (m != NULL && len > 0) { 779 if (m->m_len <= len) { 780 len -= m->m_len; 781 m->m_len = 0; 782 m = m->m_next; 783 } else { 784 m->m_len -= len; 785 m->m_data += len; 786 len = 0; 787 } 788 } 789 if (mp->m_flags & M_PKTHDR) 790 mp->m_pkthdr.len -= (req_len - len); 791 } else { 792 /* 793 * Trim from tail. Scan the mbuf chain, 794 * calculating its length and finding the last mbuf. 795 * If the adjustment only affects this mbuf, then just 796 * adjust and return. Otherwise, rescan and truncate 797 * after the remaining size. 798 */ 799 len = -len; 800 count = 0; 801 for (;;) { 802 count += m->m_len; 803 if (m->m_next == (struct mbuf *)0) 804 break; 805 m = m->m_next; 806 } 807 if (m->m_len >= len) { 808 m->m_len -= len; 809 if (mp->m_flags & M_PKTHDR) 810 mp->m_pkthdr.len -= len; 811 return; 812 } 813 count -= len; 814 if (count < 0) 815 count = 0; 816 /* 817 * Correct length for chain is "count". 818 * Find the mbuf with last data, adjust its length, 819 * and toss data from remaining mbufs on chain. 820 */ 821 m = mp; 822 if (m->m_flags & M_PKTHDR) 823 m->m_pkthdr.len = count; 824 for (; m; m = m->m_next) { 825 if (m->m_len >= count) { 826 m->m_len = count; 827 if (m->m_next != NULL) { 828 m_freem(m->m_next); 829 m->m_next = NULL; 830 } 831 break; 832 } 833 count -= m->m_len; 834 } 835 } 836 } 837 838 /* 839 * Rearange an mbuf chain so that len bytes are contiguous 840 * and in the data area of an mbuf (so that mtod will work 841 * for a structure of size len). Returns the resulting 842 * mbuf chain on success, frees it and returns null on failure. 843 * If there is room, it will add up to max_protohdr-len extra bytes to the 844 * contiguous region in an attempt to avoid being called next time. 845 */ 846 struct mbuf * 847 m_pullup(struct mbuf *n, int len) 848 { 849 struct mbuf *m; 850 int count; 851 int space; 852 853 KASSERT((n->m_flags & M_NOMAP) == 0, 854 ("%s: unmapped mbuf %p", __func__, n)); 855 856 /* 857 * If first mbuf has no cluster, and has room for len bytes 858 * without shifting current data, pullup into it, 859 * otherwise allocate a new mbuf to prepend to the chain. 860 */ 861 if ((n->m_flags & M_EXT) == 0 && 862 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 863 if (n->m_len >= len) 864 return (n); 865 m = n; 866 n = n->m_next; 867 len -= m->m_len; 868 } else { 869 if (len > MHLEN) 870 goto bad; 871 m = m_get(M_NOWAIT, n->m_type); 872 if (m == NULL) 873 goto bad; 874 if (n->m_flags & M_PKTHDR) 875 m_move_pkthdr(m, n); 876 } 877 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 878 do { 879 count = min(min(max(len, max_protohdr), space), n->m_len); 880 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 881 (u_int)count); 882 len -= count; 883 m->m_len += count; 884 n->m_len -= count; 885 space -= count; 886 if (n->m_len) 887 n->m_data += count; 888 else 889 n = m_free(n); 890 } while (len > 0 && n); 891 if (len > 0) { 892 (void) m_free(m); 893 goto bad; 894 } 895 m->m_next = n; 896 return (m); 897 bad: 898 m_freem(n); 899 return (NULL); 900 } 901 902 /* 903 * Like m_pullup(), except a new mbuf is always allocated, and we allow 904 * the amount of empty space before the data in the new mbuf to be specified 905 * (in the event that the caller expects to prepend later). 906 */ 907 struct mbuf * 908 m_copyup(struct mbuf *n, int len, int dstoff) 909 { 910 struct mbuf *m; 911 int count, space; 912 913 if (len > (MHLEN - dstoff)) 914 goto bad; 915 m = m_get(M_NOWAIT, n->m_type); 916 if (m == NULL) 917 goto bad; 918 if (n->m_flags & M_PKTHDR) 919 m_move_pkthdr(m, n); 920 m->m_data += dstoff; 921 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 922 do { 923 count = min(min(max(len, max_protohdr), space), n->m_len); 924 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 925 (unsigned)count); 926 len -= count; 927 m->m_len += count; 928 n->m_len -= count; 929 space -= count; 930 if (n->m_len) 931 n->m_data += count; 932 else 933 n = m_free(n); 934 } while (len > 0 && n); 935 if (len > 0) { 936 (void) m_free(m); 937 goto bad; 938 } 939 m->m_next = n; 940 return (m); 941 bad: 942 m_freem(n); 943 return (NULL); 944 } 945 946 /* 947 * Partition an mbuf chain in two pieces, returning the tail -- 948 * all but the first len0 bytes. In case of failure, it returns NULL and 949 * attempts to restore the chain to its original state. 950 * 951 * Note that the resulting mbufs might be read-only, because the new 952 * mbuf can end up sharing an mbuf cluster with the original mbuf if 953 * the "breaking point" happens to lie within a cluster mbuf. Use the 954 * M_WRITABLE() macro to check for this case. 955 */ 956 struct mbuf * 957 m_split(struct mbuf *m0, int len0, int wait) 958 { 959 struct mbuf *m, *n; 960 u_int len = len0, remain; 961 962 MBUF_CHECKSLEEP(wait); 963 for (m = m0; m && len > m->m_len; m = m->m_next) 964 len -= m->m_len; 965 if (m == NULL) 966 return (NULL); 967 remain = m->m_len - len; 968 if (m0->m_flags & M_PKTHDR && remain == 0) { 969 n = m_gethdr(wait, m0->m_type); 970 if (n == NULL) 971 return (NULL); 972 n->m_next = m->m_next; 973 m->m_next = NULL; 974 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 975 n->m_pkthdr.snd_tag = 976 m_snd_tag_ref(m0->m_pkthdr.snd_tag); 977 n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 978 } else 979 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 980 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 981 m0->m_pkthdr.len = len0; 982 return (n); 983 } else if (m0->m_flags & M_PKTHDR) { 984 n = m_gethdr(wait, m0->m_type); 985 if (n == NULL) 986 return (NULL); 987 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 988 n->m_pkthdr.snd_tag = 989 m_snd_tag_ref(m0->m_pkthdr.snd_tag); 990 n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 991 } else 992 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 993 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 994 m0->m_pkthdr.len = len0; 995 if (m->m_flags & M_EXT) 996 goto extpacket; 997 if (remain > MHLEN) { 998 /* m can't be the lead packet */ 999 M_ALIGN(n, 0); 1000 n->m_next = m_split(m, len, wait); 1001 if (n->m_next == NULL) { 1002 (void) m_free(n); 1003 return (NULL); 1004 } else { 1005 n->m_len = 0; 1006 return (n); 1007 } 1008 } else 1009 M_ALIGN(n, remain); 1010 } else if (remain == 0) { 1011 n = m->m_next; 1012 m->m_next = NULL; 1013 return (n); 1014 } else { 1015 n = m_get(wait, m->m_type); 1016 if (n == NULL) 1017 return (NULL); 1018 M_ALIGN(n, remain); 1019 } 1020 extpacket: 1021 if (m->m_flags & M_EXT) { 1022 n->m_data = m->m_data + len; 1023 mb_dupcl(n, m); 1024 } else { 1025 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1026 } 1027 n->m_len = remain; 1028 m->m_len = len; 1029 n->m_next = m->m_next; 1030 m->m_next = NULL; 1031 return (n); 1032 } 1033 /* 1034 * Routine to copy from device local memory into mbufs. 1035 * Note that `off' argument is offset into first mbuf of target chain from 1036 * which to begin copying the data to. 1037 */ 1038 struct mbuf * 1039 m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1040 void (*copy)(char *from, caddr_t to, u_int len)) 1041 { 1042 struct mbuf *m; 1043 struct mbuf *top = NULL, **mp = ⊤ 1044 int len; 1045 1046 if (off < 0 || off > MHLEN) 1047 return (NULL); 1048 1049 while (totlen > 0) { 1050 if (top == NULL) { /* First one, must be PKTHDR */ 1051 if (totlen + off >= MINCLSIZE) { 1052 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1053 len = MCLBYTES; 1054 } else { 1055 m = m_gethdr(M_NOWAIT, MT_DATA); 1056 len = MHLEN; 1057 1058 /* Place initial small packet/header at end of mbuf */ 1059 if (m && totlen + off + max_linkhdr <= MHLEN) { 1060 m->m_data += max_linkhdr; 1061 len -= max_linkhdr; 1062 } 1063 } 1064 if (m == NULL) 1065 return NULL; 1066 m->m_pkthdr.rcvif = ifp; 1067 m->m_pkthdr.len = totlen; 1068 } else { 1069 if (totlen + off >= MINCLSIZE) { 1070 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1071 len = MCLBYTES; 1072 } else { 1073 m = m_get(M_NOWAIT, MT_DATA); 1074 len = MLEN; 1075 } 1076 if (m == NULL) { 1077 m_freem(top); 1078 return NULL; 1079 } 1080 } 1081 if (off) { 1082 m->m_data += off; 1083 len -= off; 1084 off = 0; 1085 } 1086 m->m_len = len = min(totlen, len); 1087 if (copy) 1088 copy(buf, mtod(m, caddr_t), (u_int)len); 1089 else 1090 bcopy(buf, mtod(m, caddr_t), (u_int)len); 1091 buf += len; 1092 *mp = m; 1093 mp = &m->m_next; 1094 totlen -= len; 1095 } 1096 return (top); 1097 } 1098 1099 /* 1100 * Copy data from a buffer back into the indicated mbuf chain, 1101 * starting "off" bytes from the beginning, extending the mbuf 1102 * chain if necessary. 1103 */ 1104 void 1105 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1106 { 1107 int mlen; 1108 struct mbuf *m = m0, *n; 1109 int totlen = 0; 1110 1111 if (m0 == NULL) 1112 return; 1113 while (off > (mlen = m->m_len)) { 1114 off -= mlen; 1115 totlen += mlen; 1116 if (m->m_next == NULL) { 1117 n = m_get(M_NOWAIT, m->m_type); 1118 if (n == NULL) 1119 goto out; 1120 bzero(mtod(n, caddr_t), MLEN); 1121 n->m_len = min(MLEN, len + off); 1122 m->m_next = n; 1123 } 1124 m = m->m_next; 1125 } 1126 while (len > 0) { 1127 if (m->m_next == NULL && (len > m->m_len - off)) { 1128 m->m_len += min(len - (m->m_len - off), 1129 M_TRAILINGSPACE(m)); 1130 } 1131 mlen = min (m->m_len - off, len); 1132 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1133 cp += mlen; 1134 len -= mlen; 1135 mlen += off; 1136 off = 0; 1137 totlen += mlen; 1138 if (len == 0) 1139 break; 1140 if (m->m_next == NULL) { 1141 n = m_get(M_NOWAIT, m->m_type); 1142 if (n == NULL) 1143 break; 1144 n->m_len = min(MLEN, len); 1145 m->m_next = n; 1146 } 1147 m = m->m_next; 1148 } 1149 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1150 m->m_pkthdr.len = totlen; 1151 } 1152 1153 /* 1154 * Append the specified data to the indicated mbuf chain, 1155 * Extend the mbuf chain if the new data does not fit in 1156 * existing space. 1157 * 1158 * Return 1 if able to complete the job; otherwise 0. 1159 */ 1160 int 1161 m_append(struct mbuf *m0, int len, c_caddr_t cp) 1162 { 1163 struct mbuf *m, *n; 1164 int remainder, space; 1165 1166 for (m = m0; m->m_next != NULL; m = m->m_next) 1167 ; 1168 remainder = len; 1169 space = M_TRAILINGSPACE(m); 1170 if (space > 0) { 1171 /* 1172 * Copy into available space. 1173 */ 1174 if (space > remainder) 1175 space = remainder; 1176 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 1177 m->m_len += space; 1178 cp += space, remainder -= space; 1179 } 1180 while (remainder > 0) { 1181 /* 1182 * Allocate a new mbuf; could check space 1183 * and allocate a cluster instead. 1184 */ 1185 n = m_get(M_NOWAIT, m->m_type); 1186 if (n == NULL) 1187 break; 1188 n->m_len = min(MLEN, remainder); 1189 bcopy(cp, mtod(n, caddr_t), n->m_len); 1190 cp += n->m_len, remainder -= n->m_len; 1191 m->m_next = n; 1192 m = n; 1193 } 1194 if (m0->m_flags & M_PKTHDR) 1195 m0->m_pkthdr.len += len - remainder; 1196 return (remainder == 0); 1197 } 1198 1199 /* 1200 * Apply function f to the data in an mbuf chain starting "off" bytes from 1201 * the beginning, continuing for "len" bytes. 1202 */ 1203 int 1204 m_apply(struct mbuf *m, int off, int len, 1205 int (*f)(void *, void *, u_int), void *arg) 1206 { 1207 u_int count; 1208 int rval; 1209 1210 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 1211 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 1212 while (off > 0) { 1213 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1214 if (off < m->m_len) 1215 break; 1216 off -= m->m_len; 1217 m = m->m_next; 1218 } 1219 while (len > 0) { 1220 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1221 count = min(m->m_len - off, len); 1222 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 1223 if (rval) 1224 return (rval); 1225 len -= count; 1226 off = 0; 1227 m = m->m_next; 1228 } 1229 return (0); 1230 } 1231 1232 /* 1233 * Return a pointer to mbuf/offset of location in mbuf chain. 1234 */ 1235 struct mbuf * 1236 m_getptr(struct mbuf *m, int loc, int *off) 1237 { 1238 1239 while (loc >= 0) { 1240 /* Normal end of search. */ 1241 if (m->m_len > loc) { 1242 *off = loc; 1243 return (m); 1244 } else { 1245 loc -= m->m_len; 1246 if (m->m_next == NULL) { 1247 if (loc == 0) { 1248 /* Point at the end of valid data. */ 1249 *off = m->m_len; 1250 return (m); 1251 } 1252 return (NULL); 1253 } 1254 m = m->m_next; 1255 } 1256 } 1257 return (NULL); 1258 } 1259 1260 void 1261 m_print(const struct mbuf *m, int maxlen) 1262 { 1263 int len; 1264 int pdata; 1265 const struct mbuf *m2; 1266 1267 if (m == NULL) { 1268 printf("mbuf: %p\n", m); 1269 return; 1270 } 1271 1272 if (m->m_flags & M_PKTHDR) 1273 len = m->m_pkthdr.len; 1274 else 1275 len = -1; 1276 m2 = m; 1277 while (m2 != NULL && (len == -1 || len)) { 1278 pdata = m2->m_len; 1279 if (maxlen != -1 && pdata > maxlen) 1280 pdata = maxlen; 1281 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 1282 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 1283 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 1284 "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 1285 if (pdata) 1286 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 1287 if (len != -1) 1288 len -= m2->m_len; 1289 m2 = m2->m_next; 1290 } 1291 if (len > 0) 1292 printf("%d bytes unaccounted for.\n", len); 1293 return; 1294 } 1295 1296 u_int 1297 m_fixhdr(struct mbuf *m0) 1298 { 1299 u_int len; 1300 1301 len = m_length(m0, NULL); 1302 m0->m_pkthdr.len = len; 1303 return (len); 1304 } 1305 1306 u_int 1307 m_length(struct mbuf *m0, struct mbuf **last) 1308 { 1309 struct mbuf *m; 1310 u_int len; 1311 1312 len = 0; 1313 for (m = m0; m != NULL; m = m->m_next) { 1314 len += m->m_len; 1315 if (m->m_next == NULL) 1316 break; 1317 } 1318 if (last != NULL) 1319 *last = m; 1320 return (len); 1321 } 1322 1323 /* 1324 * Defragment a mbuf chain, returning the shortest possible 1325 * chain of mbufs and clusters. If allocation fails and 1326 * this cannot be completed, NULL will be returned, but 1327 * the passed in chain will be unchanged. Upon success, 1328 * the original chain will be freed, and the new chain 1329 * will be returned. 1330 * 1331 * If a non-packet header is passed in, the original 1332 * mbuf (chain?) will be returned unharmed. 1333 */ 1334 struct mbuf * 1335 m_defrag(struct mbuf *m0, int how) 1336 { 1337 struct mbuf *m_new = NULL, *m_final = NULL; 1338 int progress = 0, length; 1339 1340 MBUF_CHECKSLEEP(how); 1341 if (!(m0->m_flags & M_PKTHDR)) 1342 return (m0); 1343 1344 m_fixhdr(m0); /* Needed sanity check */ 1345 1346 #ifdef MBUF_STRESS_TEST 1347 if (m_defragrandomfailures) { 1348 int temp = arc4random() & 0xff; 1349 if (temp == 0xba) 1350 goto nospace; 1351 } 1352 #endif 1353 1354 if (m0->m_pkthdr.len > MHLEN) 1355 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1356 else 1357 m_final = m_gethdr(how, MT_DATA); 1358 1359 if (m_final == NULL) 1360 goto nospace; 1361 1362 if (m_dup_pkthdr(m_final, m0, how) == 0) 1363 goto nospace; 1364 1365 m_new = m_final; 1366 1367 while (progress < m0->m_pkthdr.len) { 1368 length = m0->m_pkthdr.len - progress; 1369 if (length > MCLBYTES) 1370 length = MCLBYTES; 1371 1372 if (m_new == NULL) { 1373 if (length > MLEN) 1374 m_new = m_getcl(how, MT_DATA, 0); 1375 else 1376 m_new = m_get(how, MT_DATA); 1377 if (m_new == NULL) 1378 goto nospace; 1379 } 1380 1381 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1382 progress += length; 1383 m_new->m_len = length; 1384 if (m_new != m_final) 1385 m_cat(m_final, m_new); 1386 m_new = NULL; 1387 } 1388 #ifdef MBUF_STRESS_TEST 1389 if (m0->m_next == NULL) 1390 m_defraguseless++; 1391 #endif 1392 m_freem(m0); 1393 m0 = m_final; 1394 #ifdef MBUF_STRESS_TEST 1395 m_defragpackets++; 1396 m_defragbytes += m0->m_pkthdr.len; 1397 #endif 1398 return (m0); 1399 nospace: 1400 #ifdef MBUF_STRESS_TEST 1401 m_defragfailure++; 1402 #endif 1403 if (m_final) 1404 m_freem(m_final); 1405 return (NULL); 1406 } 1407 1408 /* 1409 * Return the number of fragments an mbuf will use. This is usually 1410 * used as a proxy for the number of scatter/gather elements needed by 1411 * a DMA engine to access an mbuf. In general mapped mbufs are 1412 * assumed to be backed by physically contiguous buffers that only 1413 * need a single fragment. Unmapped mbufs, on the other hand, can 1414 * span disjoint physical pages. 1415 */ 1416 static int 1417 frags_per_mbuf(struct mbuf *m) 1418 { 1419 struct mbuf_ext_pgs *ext_pgs; 1420 int frags; 1421 1422 if ((m->m_flags & M_NOMAP) == 0) 1423 return (1); 1424 1425 /* 1426 * The header and trailer are counted as a single fragment 1427 * each when present. 1428 * 1429 * XXX: This overestimates the number of fragments by assuming 1430 * all the backing physical pages are disjoint. 1431 */ 1432 ext_pgs = &m->m_ext_pgs; 1433 frags = 0; 1434 if (ext_pgs->hdr_len != 0) 1435 frags++; 1436 frags += ext_pgs->npgs; 1437 if (ext_pgs->trail_len != 0) 1438 frags++; 1439 1440 return (frags); 1441 } 1442 1443 /* 1444 * Defragment an mbuf chain, returning at most maxfrags separate 1445 * mbufs+clusters. If this is not possible NULL is returned and 1446 * the original mbuf chain is left in its present (potentially 1447 * modified) state. We use two techniques: collapsing consecutive 1448 * mbufs and replacing consecutive mbufs by a cluster. 1449 * 1450 * NB: this should really be named m_defrag but that name is taken 1451 */ 1452 struct mbuf * 1453 m_collapse(struct mbuf *m0, int how, int maxfrags) 1454 { 1455 struct mbuf *m, *n, *n2, **prev; 1456 u_int curfrags; 1457 1458 /* 1459 * Calculate the current number of frags. 1460 */ 1461 curfrags = 0; 1462 for (m = m0; m != NULL; m = m->m_next) 1463 curfrags += frags_per_mbuf(m); 1464 /* 1465 * First, try to collapse mbufs. Note that we always collapse 1466 * towards the front so we don't need to deal with moving the 1467 * pkthdr. This may be suboptimal if the first mbuf has much 1468 * less data than the following. 1469 */ 1470 m = m0; 1471 again: 1472 for (;;) { 1473 n = m->m_next; 1474 if (n == NULL) 1475 break; 1476 if (M_WRITABLE(m) && 1477 n->m_len < M_TRAILINGSPACE(m)) { 1478 m_copydata(n, 0, n->m_len, 1479 mtod(m, char *) + m->m_len); 1480 m->m_len += n->m_len; 1481 m->m_next = n->m_next; 1482 curfrags -= frags_per_mbuf(n); 1483 m_free(n); 1484 if (curfrags <= maxfrags) 1485 return m0; 1486 } else 1487 m = n; 1488 } 1489 KASSERT(maxfrags > 1, 1490 ("maxfrags %u, but normal collapse failed", maxfrags)); 1491 /* 1492 * Collapse consecutive mbufs to a cluster. 1493 */ 1494 prev = &m0->m_next; /* NB: not the first mbuf */ 1495 while ((n = *prev) != NULL) { 1496 if ((n2 = n->m_next) != NULL && 1497 n->m_len + n2->m_len < MCLBYTES) { 1498 m = m_getcl(how, MT_DATA, 0); 1499 if (m == NULL) 1500 goto bad; 1501 m_copydata(n, 0, n->m_len, mtod(m, char *)); 1502 m_copydata(n2, 0, n2->m_len, 1503 mtod(m, char *) + n->m_len); 1504 m->m_len = n->m_len + n2->m_len; 1505 m->m_next = n2->m_next; 1506 *prev = m; 1507 curfrags += 1; /* For the new cluster */ 1508 curfrags -= frags_per_mbuf(n); 1509 curfrags -= frags_per_mbuf(n2); 1510 m_free(n); 1511 m_free(n2); 1512 if (curfrags <= maxfrags) 1513 return m0; 1514 /* 1515 * Still not there, try the normal collapse 1516 * again before we allocate another cluster. 1517 */ 1518 goto again; 1519 } 1520 prev = &n->m_next; 1521 } 1522 /* 1523 * No place where we can collapse to a cluster; punt. 1524 * This can occur if, for example, you request 2 frags 1525 * but the packet requires that both be clusters (we 1526 * never reallocate the first mbuf to avoid moving the 1527 * packet header). 1528 */ 1529 bad: 1530 return NULL; 1531 } 1532 1533 #ifdef MBUF_STRESS_TEST 1534 1535 /* 1536 * Fragment an mbuf chain. There's no reason you'd ever want to do 1537 * this in normal usage, but it's great for stress testing various 1538 * mbuf consumers. 1539 * 1540 * If fragmentation is not possible, the original chain will be 1541 * returned. 1542 * 1543 * Possible length values: 1544 * 0 no fragmentation will occur 1545 * > 0 each fragment will be of the specified length 1546 * -1 each fragment will be the same random value in length 1547 * -2 each fragment's length will be entirely random 1548 * (Random values range from 1 to 256) 1549 */ 1550 struct mbuf * 1551 m_fragment(struct mbuf *m0, int how, int length) 1552 { 1553 struct mbuf *m_first, *m_last; 1554 int divisor = 255, progress = 0, fraglen; 1555 1556 if (!(m0->m_flags & M_PKTHDR)) 1557 return (m0); 1558 1559 if (length == 0 || length < -2) 1560 return (m0); 1561 if (length > MCLBYTES) 1562 length = MCLBYTES; 1563 if (length < 0 && divisor > MCLBYTES) 1564 divisor = MCLBYTES; 1565 if (length == -1) 1566 length = 1 + (arc4random() % divisor); 1567 if (length > 0) 1568 fraglen = length; 1569 1570 m_fixhdr(m0); /* Needed sanity check */ 1571 1572 m_first = m_getcl(how, MT_DATA, M_PKTHDR); 1573 if (m_first == NULL) 1574 goto nospace; 1575 1576 if (m_dup_pkthdr(m_first, m0, how) == 0) 1577 goto nospace; 1578 1579 m_last = m_first; 1580 1581 while (progress < m0->m_pkthdr.len) { 1582 if (length == -2) 1583 fraglen = 1 + (arc4random() % divisor); 1584 if (fraglen > m0->m_pkthdr.len - progress) 1585 fraglen = m0->m_pkthdr.len - progress; 1586 1587 if (progress != 0) { 1588 struct mbuf *m_new = m_getcl(how, MT_DATA, 0); 1589 if (m_new == NULL) 1590 goto nospace; 1591 1592 m_last->m_next = m_new; 1593 m_last = m_new; 1594 } 1595 1596 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t)); 1597 progress += fraglen; 1598 m_last->m_len = fraglen; 1599 } 1600 m_freem(m0); 1601 m0 = m_first; 1602 return (m0); 1603 nospace: 1604 if (m_first) 1605 m_freem(m_first); 1606 /* Return the original chain on failure */ 1607 return (m0); 1608 } 1609 1610 #endif 1611 1612 /* 1613 * Free pages from mbuf_ext_pgs, assuming they were allocated via 1614 * vm_page_alloc() and aren't associated with any object. Complement 1615 * to allocator from m_uiotombuf_nomap(). 1616 */ 1617 void 1618 mb_free_mext_pgs(struct mbuf *m) 1619 { 1620 struct mbuf_ext_pgs *ext_pgs; 1621 vm_page_t pg; 1622 1623 MBUF_EXT_PGS_ASSERT(m); 1624 ext_pgs = &m->m_ext_pgs; 1625 for (int i = 0; i < ext_pgs->npgs; i++) { 1626 pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]); 1627 vm_page_unwire_noq(pg); 1628 vm_page_free(pg); 1629 } 1630 } 1631 1632 static struct mbuf * 1633 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags) 1634 { 1635 struct mbuf *m, *mb, *prev; 1636 struct mbuf_ext_pgs *pgs; 1637 vm_page_t pg_array[MBUF_PEXT_MAX_PGS]; 1638 int error, length, i, needed; 1639 ssize_t total; 1640 int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | 1641 VM_ALLOC_WIRED; 1642 1643 /* 1644 * len can be zero or an arbitrary large value bound by 1645 * the total data supplied by the uio. 1646 */ 1647 if (len > 0) 1648 total = MIN(uio->uio_resid, len); 1649 else 1650 total = uio->uio_resid; 1651 1652 if (maxseg == 0) 1653 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE; 1654 1655 /* 1656 * Allocate the pages 1657 */ 1658 m = NULL; 1659 MPASS((flags & M_PKTHDR) == 0); 1660 while (total > 0) { 1661 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs); 1662 if (mb == NULL) 1663 goto failed; 1664 if (m == NULL) 1665 m = mb; 1666 else 1667 prev->m_next = mb; 1668 prev = mb; 1669 pgs = &mb->m_ext_pgs; 1670 pgs->flags = MBUF_PEXT_FLAG_ANON; 1671 needed = length = MIN(maxseg, total); 1672 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) { 1673 retry_page: 1674 pg_array[i] = vm_page_alloc(NULL, 0, pflags); 1675 if (pg_array[i] == NULL) { 1676 if (how & M_NOWAIT) { 1677 goto failed; 1678 } else { 1679 vm_wait(NULL); 1680 goto retry_page; 1681 } 1682 } 1683 pg_array[i]->flags &= ~PG_ZERO; 1684 pgs->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]); 1685 pgs->npgs++; 1686 } 1687 pgs->last_pg_len = length - PAGE_SIZE * (pgs->npgs - 1); 1688 MBUF_EXT_PGS_ASSERT_SANITY(pgs); 1689 total -= length; 1690 error = uiomove_fromphys(pg_array, 0, length, uio); 1691 if (error != 0) 1692 goto failed; 1693 mb->m_len = length; 1694 mb->m_ext.ext_size += PAGE_SIZE * pgs->npgs; 1695 if (flags & M_PKTHDR) 1696 m->m_pkthdr.len += length; 1697 } 1698 return (m); 1699 1700 failed: 1701 m_freem(m); 1702 return (NULL); 1703 } 1704 1705 /* 1706 * Copy the contents of uio into a properly sized mbuf chain. 1707 */ 1708 struct mbuf * 1709 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1710 { 1711 struct mbuf *m, *mb; 1712 int error, length; 1713 ssize_t total; 1714 int progress = 0; 1715 1716 if (flags & M_NOMAP) 1717 return (m_uiotombuf_nomap(uio, how, len, align, flags)); 1718 1719 /* 1720 * len can be zero or an arbitrary large value bound by 1721 * the total data supplied by the uio. 1722 */ 1723 if (len > 0) 1724 total = (uio->uio_resid < len) ? uio->uio_resid : len; 1725 else 1726 total = uio->uio_resid; 1727 1728 /* 1729 * The smallest unit returned by m_getm2() is a single mbuf 1730 * with pkthdr. We can't align past it. 1731 */ 1732 if (align >= MHLEN) 1733 return (NULL); 1734 1735 /* 1736 * Give us the full allocation or nothing. 1737 * If len is zero return the smallest empty mbuf. 1738 */ 1739 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 1740 if (m == NULL) 1741 return (NULL); 1742 m->m_data += align; 1743 1744 /* Fill all mbufs with uio data and update header information. */ 1745 for (mb = m; mb != NULL; mb = mb->m_next) { 1746 length = min(M_TRAILINGSPACE(mb), total - progress); 1747 1748 error = uiomove(mtod(mb, void *), length, uio); 1749 if (error) { 1750 m_freem(m); 1751 return (NULL); 1752 } 1753 1754 mb->m_len = length; 1755 progress += length; 1756 if (flags & M_PKTHDR) 1757 m->m_pkthdr.len += length; 1758 } 1759 KASSERT(progress == total, ("%s: progress != total", __func__)); 1760 1761 return (m); 1762 } 1763 1764 /* 1765 * Copy data from an unmapped mbuf into a uio limited by len if set. 1766 */ 1767 int 1768 m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len) 1769 { 1770 struct mbuf_ext_pgs *ext_pgs; 1771 vm_page_t pg; 1772 int error, i, off, pglen, pgoff, seglen, segoff; 1773 1774 MBUF_EXT_PGS_ASSERT(m); 1775 ext_pgs = __DECONST(void *, &m->m_ext_pgs); 1776 error = 0; 1777 1778 /* Skip over any data removed from the front. */ 1779 off = mtod(m, vm_offset_t); 1780 1781 off += m_off; 1782 if (ext_pgs->hdr_len != 0) { 1783 if (off >= ext_pgs->hdr_len) { 1784 off -= ext_pgs->hdr_len; 1785 } else { 1786 seglen = ext_pgs->hdr_len - off; 1787 segoff = off; 1788 seglen = min(seglen, len); 1789 off = 0; 1790 len -= seglen; 1791 error = uiomove(&ext_pgs->m_epg_hdr[segoff], seglen, uio); 1792 } 1793 } 1794 pgoff = ext_pgs->first_pg_off; 1795 for (i = 0; i < ext_pgs->npgs && error == 0 && len > 0; i++) { 1796 pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff); 1797 if (off >= pglen) { 1798 off -= pglen; 1799 pgoff = 0; 1800 continue; 1801 } 1802 seglen = pglen - off; 1803 segoff = pgoff + off; 1804 off = 0; 1805 seglen = min(seglen, len); 1806 len -= seglen; 1807 pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]); 1808 error = uiomove_fromphys(&pg, segoff, seglen, uio); 1809 pgoff = 0; 1810 }; 1811 if (len != 0 && error == 0) { 1812 KASSERT((off + len) <= ext_pgs->trail_len, 1813 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len, 1814 ext_pgs->trail_len, m_off)); 1815 error = uiomove(&ext_pgs->m_epg_trail[off], len, uio); 1816 } 1817 return (error); 1818 } 1819 1820 /* 1821 * Copy an mbuf chain into a uio limited by len if set. 1822 */ 1823 int 1824 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len) 1825 { 1826 int error, length, total; 1827 int progress = 0; 1828 1829 if (len > 0) 1830 total = min(uio->uio_resid, len); 1831 else 1832 total = uio->uio_resid; 1833 1834 /* Fill the uio with data from the mbufs. */ 1835 for (; m != NULL; m = m->m_next) { 1836 length = min(m->m_len, total - progress); 1837 1838 if ((m->m_flags & M_NOMAP) != 0) 1839 error = m_unmappedtouio(m, 0, uio, length); 1840 else 1841 error = uiomove(mtod(m, void *), length, uio); 1842 if (error) 1843 return (error); 1844 1845 progress += length; 1846 } 1847 1848 return (0); 1849 } 1850 1851 /* 1852 * Create a writable copy of the mbuf chain. While doing this 1853 * we compact the chain with a goal of producing a chain with 1854 * at most two mbufs. The second mbuf in this chain is likely 1855 * to be a cluster. The primary purpose of this work is to create 1856 * a writable packet for encryption, compression, etc. The 1857 * secondary goal is to linearize the data so the data can be 1858 * passed to crypto hardware in the most efficient manner possible. 1859 */ 1860 struct mbuf * 1861 m_unshare(struct mbuf *m0, int how) 1862 { 1863 struct mbuf *m, *mprev; 1864 struct mbuf *n, *mfirst, *mlast; 1865 int len, off; 1866 1867 mprev = NULL; 1868 for (m = m0; m != NULL; m = mprev->m_next) { 1869 /* 1870 * Regular mbufs are ignored unless there's a cluster 1871 * in front of it that we can use to coalesce. We do 1872 * the latter mainly so later clusters can be coalesced 1873 * also w/o having to handle them specially (i.e. convert 1874 * mbuf+cluster -> cluster). This optimization is heavily 1875 * influenced by the assumption that we're running over 1876 * Ethernet where MCLBYTES is large enough that the max 1877 * packet size will permit lots of coalescing into a 1878 * single cluster. This in turn permits efficient 1879 * crypto operations, especially when using hardware. 1880 */ 1881 if ((m->m_flags & M_EXT) == 0) { 1882 if (mprev && (mprev->m_flags & M_EXT) && 1883 m->m_len <= M_TRAILINGSPACE(mprev)) { 1884 /* XXX: this ignores mbuf types */ 1885 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1886 mtod(m, caddr_t), m->m_len); 1887 mprev->m_len += m->m_len; 1888 mprev->m_next = m->m_next; /* unlink from chain */ 1889 m_free(m); /* reclaim mbuf */ 1890 } else { 1891 mprev = m; 1892 } 1893 continue; 1894 } 1895 /* 1896 * Writable mbufs are left alone (for now). 1897 */ 1898 if (M_WRITABLE(m)) { 1899 mprev = m; 1900 continue; 1901 } 1902 1903 /* 1904 * Not writable, replace with a copy or coalesce with 1905 * the previous mbuf if possible (since we have to copy 1906 * it anyway, we try to reduce the number of mbufs and 1907 * clusters so that future work is easier). 1908 */ 1909 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 1910 /* NB: we only coalesce into a cluster or larger */ 1911 if (mprev != NULL && (mprev->m_flags & M_EXT) && 1912 m->m_len <= M_TRAILINGSPACE(mprev)) { 1913 /* XXX: this ignores mbuf types */ 1914 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 1915 mtod(m, caddr_t), m->m_len); 1916 mprev->m_len += m->m_len; 1917 mprev->m_next = m->m_next; /* unlink from chain */ 1918 m_free(m); /* reclaim mbuf */ 1919 continue; 1920 } 1921 1922 /* 1923 * Allocate new space to hold the copy and copy the data. 1924 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 1925 * splitting them into clusters. We could just malloc a 1926 * buffer and make it external but too many device drivers 1927 * don't know how to break up the non-contiguous memory when 1928 * doing DMA. 1929 */ 1930 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 1931 if (n == NULL) { 1932 m_freem(m0); 1933 return (NULL); 1934 } 1935 if (m->m_flags & M_PKTHDR) { 1936 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR", 1937 __func__, m0, m)); 1938 m_move_pkthdr(n, m); 1939 } 1940 len = m->m_len; 1941 off = 0; 1942 mfirst = n; 1943 mlast = NULL; 1944 for (;;) { 1945 int cc = min(len, MCLBYTES); 1946 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 1947 n->m_len = cc; 1948 if (mlast != NULL) 1949 mlast->m_next = n; 1950 mlast = n; 1951 #if 0 1952 newipsecstat.ips_clcopied++; 1953 #endif 1954 1955 len -= cc; 1956 if (len <= 0) 1957 break; 1958 off += cc; 1959 1960 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 1961 if (n == NULL) { 1962 m_freem(mfirst); 1963 m_freem(m0); 1964 return (NULL); 1965 } 1966 } 1967 n->m_next = m->m_next; 1968 if (mprev == NULL) 1969 m0 = mfirst; /* new head of chain */ 1970 else 1971 mprev->m_next = mfirst; /* replace old mbuf */ 1972 m_free(m); /* release old mbuf */ 1973 mprev = mfirst; 1974 } 1975 return (m0); 1976 } 1977 1978 #ifdef MBUF_PROFILING 1979 1980 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 1981 struct mbufprofile { 1982 uintmax_t wasted[MP_BUCKETS]; 1983 uintmax_t used[MP_BUCKETS]; 1984 uintmax_t segments[MP_BUCKETS]; 1985 } mbprof; 1986 1987 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 1988 #define MP_NUMLINES 6 1989 #define MP_NUMSPERLINE 16 1990 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 1991 /* work out max space needed and add a bit of spare space too */ 1992 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 1993 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 1994 1995 char mbprofbuf[MP_BUFSIZE]; 1996 1997 void 1998 m_profile(struct mbuf *m) 1999 { 2000 int segments = 0; 2001 int used = 0; 2002 int wasted = 0; 2003 2004 while (m) { 2005 segments++; 2006 used += m->m_len; 2007 if (m->m_flags & M_EXT) { 2008 wasted += MHLEN - sizeof(m->m_ext) + 2009 m->m_ext.ext_size - m->m_len; 2010 } else { 2011 if (m->m_flags & M_PKTHDR) 2012 wasted += MHLEN - m->m_len; 2013 else 2014 wasted += MLEN - m->m_len; 2015 } 2016 m = m->m_next; 2017 } 2018 /* be paranoid.. it helps */ 2019 if (segments > MP_BUCKETS - 1) 2020 segments = MP_BUCKETS - 1; 2021 if (used > 100000) 2022 used = 100000; 2023 if (wasted > 100000) 2024 wasted = 100000; 2025 /* store in the appropriate bucket */ 2026 /* don't bother locking. if it's slightly off, so what? */ 2027 mbprof.segments[segments]++; 2028 mbprof.used[fls(used)]++; 2029 mbprof.wasted[fls(wasted)]++; 2030 } 2031 2032 static void 2033 mbprof_textify(void) 2034 { 2035 int offset; 2036 char *c; 2037 uint64_t *p; 2038 2039 p = &mbprof.wasted[0]; 2040 c = mbprofbuf; 2041 offset = snprintf(c, MP_MAXLINE + 10, 2042 "wasted:\n" 2043 "%ju %ju %ju %ju %ju %ju %ju %ju " 2044 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2045 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2046 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2047 #ifdef BIG_ARRAY 2048 p = &mbprof.wasted[16]; 2049 c += offset; 2050 offset = snprintf(c, MP_MAXLINE, 2051 "%ju %ju %ju %ju %ju %ju %ju %ju " 2052 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2053 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2054 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2055 #endif 2056 p = &mbprof.used[0]; 2057 c += offset; 2058 offset = snprintf(c, MP_MAXLINE + 10, 2059 "used:\n" 2060 "%ju %ju %ju %ju %ju %ju %ju %ju " 2061 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2062 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2063 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2064 #ifdef BIG_ARRAY 2065 p = &mbprof.used[16]; 2066 c += offset; 2067 offset = snprintf(c, MP_MAXLINE, 2068 "%ju %ju %ju %ju %ju %ju %ju %ju " 2069 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2070 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2071 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2072 #endif 2073 p = &mbprof.segments[0]; 2074 c += offset; 2075 offset = snprintf(c, MP_MAXLINE + 10, 2076 "segments:\n" 2077 "%ju %ju %ju %ju %ju %ju %ju %ju " 2078 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2079 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2080 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2081 #ifdef BIG_ARRAY 2082 p = &mbprof.segments[16]; 2083 c += offset; 2084 offset = snprintf(c, MP_MAXLINE, 2085 "%ju %ju %ju %ju %ju %ju %ju %ju " 2086 "%ju %ju %ju %ju %ju %ju %ju %jju", 2087 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2088 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2089 #endif 2090 } 2091 2092 static int 2093 mbprof_handler(SYSCTL_HANDLER_ARGS) 2094 { 2095 int error; 2096 2097 mbprof_textify(); 2098 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 2099 return (error); 2100 } 2101 2102 static int 2103 mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 2104 { 2105 int clear, error; 2106 2107 clear = 0; 2108 error = sysctl_handle_int(oidp, &clear, 0, req); 2109 if (error || !req->newptr) 2110 return (error); 2111 2112 if (clear) { 2113 bzero(&mbprof, sizeof(mbprof)); 2114 } 2115 2116 return (error); 2117 } 2118 2119 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, 2120 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 2121 mbprof_handler, "A", 2122 "mbuf profiling statistics"); 2123 2124 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, 2125 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, 2126 mbprof_clr_handler, "I", 2127 "clear mbuf profiling statistics"); 2128 #endif 2129 2130