1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1988, 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_param.h" 38 #include "opt_mbuf_stress_test.h" 39 #include "opt_mbuf_profiling.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/sysctl.h> 49 #include <sys/domain.h> 50 #include <sys/protosw.h> 51 #include <sys/uio.h> 52 #include <sys/vmmeter.h> 53 #include <sys/sdt.h> 54 #include <vm/vm.h> 55 #include <vm/vm_pageout.h> 56 #include <vm/vm_page.h> 57 58 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init, 59 "struct mbuf *", "mbufinfo_t *", 60 "uint32_t", "uint32_t", 61 "uint16_t", "uint16_t", 62 "uint32_t", "uint32_t", 63 "uint32_t", "uint32_t"); 64 65 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr_raw, 66 "uint32_t", "uint32_t", 67 "uint16_t", "uint16_t", 68 "struct mbuf *", "mbufinfo_t *"); 69 70 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr, 71 "uint32_t", "uint32_t", 72 "uint16_t", "uint16_t", 73 "struct mbuf *", "mbufinfo_t *"); 74 75 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get_raw, 76 "uint32_t", "uint32_t", 77 "uint16_t", "uint16_t", 78 "struct mbuf *", "mbufinfo_t *"); 79 80 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get, 81 "uint32_t", "uint32_t", 82 "uint16_t", "uint16_t", 83 "struct mbuf *", "mbufinfo_t *"); 84 85 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl, 86 "uint32_t", "uint32_t", 87 "uint16_t", "uint16_t", 88 "uint32_t", "uint32_t", 89 "struct mbuf *", "mbufinfo_t *"); 90 91 SDT_PROBE_DEFINE5_XLATE(sdt, , , m__getjcl, 92 "uint32_t", "uint32_t", 93 "uint16_t", "uint16_t", 94 "uint32_t", "uint32_t", 95 "uint32_t", "uint32_t", 96 "struct mbuf *", "mbufinfo_t *"); 97 98 SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget, 99 "struct mbuf *", "mbufinfo_t *", 100 "uint32_t", "uint32_t", 101 "uint32_t", "uint32_t"); 102 103 SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget, 104 "struct mbuf *", "mbufinfo_t *", 105 "uint32_t", "uint32_t", 106 "uint32_t", "uint32_t", 107 "void*", "void*"); 108 109 SDT_PROBE_DEFINE(sdt, , , m__cljset); 110 111 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free, 112 "struct mbuf *", "mbufinfo_t *"); 113 114 SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem, 115 "struct mbuf *", "mbufinfo_t *"); 116 117 #include <security/mac/mac_framework.h> 118 119 int max_linkhdr; 120 int max_protohdr; 121 int max_hdr; 122 int max_datalen; 123 #ifdef MBUF_STRESS_TEST 124 int m_defragpackets; 125 int m_defragbytes; 126 int m_defraguseless; 127 int m_defragfailure; 128 int m_defragrandomfailures; 129 #endif 130 131 /* 132 * sysctl(8) exported objects 133 */ 134 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 135 &max_linkhdr, 0, "Size of largest link layer header"); 136 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 137 &max_protohdr, 0, "Size of largest protocol layer header"); 138 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 139 &max_hdr, 0, "Size of largest link plus protocol header"); 140 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 141 &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 142 #ifdef MBUF_STRESS_TEST 143 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 144 &m_defragpackets, 0, ""); 145 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 146 &m_defragbytes, 0, ""); 147 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 148 &m_defraguseless, 0, ""); 149 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 150 &m_defragfailure, 0, ""); 151 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 152 &m_defragrandomfailures, 0, ""); 153 #endif 154 155 /* 156 * Ensure the correct size of various mbuf parameters. It could be off due 157 * to compiler-induced padding and alignment artifacts. 158 */ 159 CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN); 160 CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN); 161 162 /* 163 * mbuf data storage should be 64-bit aligned regardless of architectural 164 * pointer size; check this is the case with and without a packet header. 165 */ 166 CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0); 167 CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0); 168 169 /* 170 * While the specific values here don't matter too much (i.e., +/- a few 171 * words), we do want to ensure that changes to these values are carefully 172 * reasoned about and properly documented. This is especially the case as 173 * network-protocol and device-driver modules encode these layouts, and must 174 * be recompiled if the structures change. Check these values at compile time 175 * against the ones documented in comments in mbuf.h. 176 * 177 * NB: Possibly they should be documented there via #define's and not just 178 * comments. 179 */ 180 #if defined(__LP64__) 181 CTASSERT(offsetof(struct mbuf, m_dat) == 32); 182 CTASSERT(sizeof(struct pkthdr) == 56); 183 CTASSERT(sizeof(struct m_ext) == 160); 184 #else 185 CTASSERT(offsetof(struct mbuf, m_dat) == 24); 186 CTASSERT(sizeof(struct pkthdr) == 48); 187 #if defined(__powerpc__) && defined(BOOKE) 188 /* PowerPC booke has 64-bit physical pointers. */ 189 CTASSERT(sizeof(struct m_ext) == 184); 190 #else 191 CTASSERT(sizeof(struct m_ext) == 180); 192 #endif 193 #endif 194 195 /* 196 * Assert that the queue(3) macros produce code of the same size as an old 197 * plain pointer does. 198 */ 199 #ifdef INVARIANTS 200 static struct mbuf __used m_assertbuf; 201 CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next)); 202 CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next)); 203 CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt)); 204 CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt)); 205 #endif 206 207 /* 208 * Attach the cluster from *m to *n, set up m_ext in *n 209 * and bump the refcount of the cluster. 210 */ 211 void 212 mb_dupcl(struct mbuf *n, struct mbuf *m) 213 { 214 volatile u_int *refcnt; 215 216 KASSERT(m->m_flags & (M_EXT|M_EXTPG), 217 ("%s: M_EXT|M_EXTPG not set on %p", __func__, m)); 218 KASSERT(!(n->m_flags & (M_EXT|M_EXTPG)), 219 ("%s: M_EXT|M_EXTPG set on %p", __func__, n)); 220 221 /* 222 * Cache access optimization. 223 * 224 * o Regular M_EXT storage doesn't need full copy of m_ext, since 225 * the holder of the 'ext_count' is responsible to carry the free 226 * routine and its arguments. 227 * o M_EXTPG data is split between main part of mbuf and m_ext, the 228 * main part is copied in full, the m_ext part is similar to M_EXT. 229 * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is 230 * special - it needs full copy of m_ext into each mbuf, since any 231 * copy could end up as the last to free. 232 */ 233 if (m->m_flags & M_EXTPG) { 234 bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy, 235 __rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy)); 236 bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen); 237 } else if (m->m_ext.ext_type == EXT_EXTREF) 238 bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext)); 239 else 240 bcopy(&m->m_ext, &n->m_ext, m_ext_copylen); 241 242 n->m_flags |= m->m_flags & (M_RDONLY | M_EXT | M_EXTPG); 243 244 /* See if this is the mbuf that holds the embedded refcount. */ 245 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 246 refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; 247 n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF; 248 } else { 249 KASSERT(m->m_ext.ext_cnt != NULL, 250 ("%s: no refcounting pointer on %p", __func__, m)); 251 refcnt = m->m_ext.ext_cnt; 252 } 253 254 if (*refcnt == 1) 255 *refcnt += 1; 256 else 257 atomic_add_int(refcnt, 1); 258 } 259 260 void 261 m_demote_pkthdr(struct mbuf *m) 262 { 263 264 M_ASSERTPKTHDR(m); 265 266 m_tag_delete_chain(m, NULL); 267 m->m_flags &= ~M_PKTHDR; 268 bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 269 } 270 271 /* 272 * Clean up mbuf (chain) from any tags and packet headers. 273 * If "all" is set then the first mbuf in the chain will be 274 * cleaned too. 275 */ 276 void 277 m_demote(struct mbuf *m0, int all, int flags) 278 { 279 struct mbuf *m; 280 281 for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 282 KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", 283 __func__, m, m0)); 284 if (m->m_flags & M_PKTHDR) 285 m_demote_pkthdr(m); 286 m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | 287 M_EXTPG | flags); 288 } 289 } 290 291 /* 292 * Sanity checks on mbuf (chain) for use in KASSERT() and general 293 * debugging. 294 * Returns 0 or panics when bad and 1 on all tests passed. 295 * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 296 * blow up later. 297 */ 298 int 299 m_sanity(struct mbuf *m0, int sanitize) 300 { 301 struct mbuf *m; 302 caddr_t a, b; 303 int pktlen = 0; 304 305 #ifdef INVARIANTS 306 #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 307 #else 308 #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 309 #endif 310 311 for (m = m0; m != NULL; m = m->m_next) { 312 /* 313 * Basic pointer checks. If any of these fails then some 314 * unrelated kernel memory before or after us is trashed. 315 * No way to recover from that. 316 */ 317 a = M_START(m); 318 b = a + M_SIZE(m); 319 if ((caddr_t)m->m_data < a) 320 M_SANITY_ACTION("m_data outside mbuf data range left"); 321 if ((caddr_t)m->m_data > b) 322 M_SANITY_ACTION("m_data outside mbuf data range right"); 323 if ((caddr_t)m->m_data + m->m_len > b) 324 M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 325 326 /* m->m_nextpkt may only be set on first mbuf in chain. */ 327 if (m != m0 && m->m_nextpkt != NULL) { 328 if (sanitize) { 329 m_freem(m->m_nextpkt); 330 m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 331 } else 332 M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 333 } 334 335 /* packet length (not mbuf length!) calculation */ 336 if (m0->m_flags & M_PKTHDR) 337 pktlen += m->m_len; 338 339 /* m_tags may only be attached to first mbuf in chain. */ 340 if (m != m0 && m->m_flags & M_PKTHDR && 341 !SLIST_EMPTY(&m->m_pkthdr.tags)) { 342 if (sanitize) { 343 m_tag_delete_chain(m, NULL); 344 /* put in 0xDEADC0DE perhaps? */ 345 } else 346 M_SANITY_ACTION("m_tags on in-chain mbuf"); 347 } 348 349 /* M_PKTHDR may only be set on first mbuf in chain */ 350 if (m != m0 && m->m_flags & M_PKTHDR) { 351 if (sanitize) { 352 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 353 m->m_flags &= ~M_PKTHDR; 354 /* put in 0xDEADCODE and leave hdr flag in */ 355 } else 356 M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 357 } 358 } 359 m = m0; 360 if (pktlen && pktlen != m->m_pkthdr.len) { 361 if (sanitize) 362 m->m_pkthdr.len = 0; 363 else 364 M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 365 } 366 return 1; 367 368 #undef M_SANITY_ACTION 369 } 370 371 /* 372 * Non-inlined part of m_init(). 373 */ 374 int 375 m_pkthdr_init(struct mbuf *m, int how) 376 { 377 #ifdef MAC 378 int error; 379 #endif 380 m->m_data = m->m_pktdat; 381 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 382 #ifdef NUMA 383 m->m_pkthdr.numa_domain = M_NODOM; 384 #endif 385 #ifdef MAC 386 /* If the label init fails, fail the alloc */ 387 error = mac_mbuf_init(m, how); 388 if (error) 389 return (error); 390 #endif 391 392 return (0); 393 } 394 395 /* 396 * "Move" mbuf pkthdr from "from" to "to". 397 * "from" must have M_PKTHDR set, and "to" must be empty. 398 */ 399 void 400 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 401 { 402 403 #if 0 404 /* see below for why these are not enabled */ 405 M_ASSERTPKTHDR(to); 406 /* Note: with MAC, this may not be a good assertion. */ 407 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 408 ("m_move_pkthdr: to has tags")); 409 #endif 410 #ifdef MAC 411 /* 412 * XXXMAC: It could be this should also occur for non-MAC? 413 */ 414 if (to->m_flags & M_PKTHDR) 415 m_tag_delete_chain(to, NULL); 416 #endif 417 to->m_flags = (from->m_flags & M_COPYFLAGS) | 418 (to->m_flags & (M_EXT | M_EXTPG)); 419 if ((to->m_flags & M_EXT) == 0) 420 to->m_data = to->m_pktdat; 421 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 422 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 423 from->m_flags &= ~M_PKTHDR; 424 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) { 425 from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 426 from->m_pkthdr.snd_tag = NULL; 427 } 428 } 429 430 /* 431 * Duplicate "from"'s mbuf pkthdr in "to". 432 * "from" must have M_PKTHDR set, and "to" must be empty. 433 * In particular, this does a deep copy of the packet tags. 434 */ 435 int 436 m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 437 { 438 439 #if 0 440 /* 441 * The mbuf allocator only initializes the pkthdr 442 * when the mbuf is allocated with m_gethdr(). Many users 443 * (e.g. m_copy*, m_prepend) use m_get() and then 444 * smash the pkthdr as needed causing these 445 * assertions to trip. For now just disable them. 446 */ 447 M_ASSERTPKTHDR(to); 448 /* Note: with MAC, this may not be a good assertion. */ 449 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 450 #endif 451 MBUF_CHECKSLEEP(how); 452 #ifdef MAC 453 if (to->m_flags & M_PKTHDR) 454 m_tag_delete_chain(to, NULL); 455 #endif 456 to->m_flags = (from->m_flags & M_COPYFLAGS) | 457 (to->m_flags & (M_EXT | M_EXTPG)); 458 if ((to->m_flags & M_EXT) == 0) 459 to->m_data = to->m_pktdat; 460 to->m_pkthdr = from->m_pkthdr; 461 if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) 462 m_snd_tag_ref(from->m_pkthdr.snd_tag); 463 SLIST_INIT(&to->m_pkthdr.tags); 464 return (m_tag_copy_chain(to, from, how)); 465 } 466 467 /* 468 * Lesser-used path for M_PREPEND: 469 * allocate new mbuf to prepend to chain, 470 * copy junk along. 471 */ 472 struct mbuf * 473 m_prepend(struct mbuf *m, int len, int how) 474 { 475 struct mbuf *mn; 476 477 if (m->m_flags & M_PKTHDR) 478 mn = m_gethdr(how, m->m_type); 479 else 480 mn = m_get(how, m->m_type); 481 if (mn == NULL) { 482 m_freem(m); 483 return (NULL); 484 } 485 if (m->m_flags & M_PKTHDR) 486 m_move_pkthdr(mn, m); 487 mn->m_next = m; 488 m = mn; 489 if (len < M_SIZE(m)) 490 M_ALIGN(m, len); 491 m->m_len = len; 492 return (m); 493 } 494 495 /* 496 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 497 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 498 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 499 * Note that the copy is read-only, because clusters are not copied, 500 * only their reference counts are incremented. 501 */ 502 struct mbuf * 503 m_copym(struct mbuf *m, int off0, int len, int wait) 504 { 505 struct mbuf *n, **np; 506 int off = off0; 507 struct mbuf *top; 508 int copyhdr = 0; 509 510 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 511 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 512 MBUF_CHECKSLEEP(wait); 513 if (off == 0 && m->m_flags & M_PKTHDR) 514 copyhdr = 1; 515 while (off > 0) { 516 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 517 if (off < m->m_len) 518 break; 519 off -= m->m_len; 520 m = m->m_next; 521 } 522 np = ⊤ 523 top = NULL; 524 while (len > 0) { 525 if (m == NULL) { 526 KASSERT(len == M_COPYALL, 527 ("m_copym, length > size of mbuf chain")); 528 break; 529 } 530 if (copyhdr) 531 n = m_gethdr(wait, m->m_type); 532 else 533 n = m_get(wait, m->m_type); 534 *np = n; 535 if (n == NULL) 536 goto nospace; 537 if (copyhdr) { 538 if (!m_dup_pkthdr(n, m, wait)) 539 goto nospace; 540 if (len == M_COPYALL) 541 n->m_pkthdr.len -= off0; 542 else 543 n->m_pkthdr.len = len; 544 copyhdr = 0; 545 } 546 n->m_len = min(len, m->m_len - off); 547 if (m->m_flags & (M_EXT|M_EXTPG)) { 548 n->m_data = m->m_data + off; 549 mb_dupcl(n, m); 550 } else 551 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 552 (u_int)n->m_len); 553 if (len != M_COPYALL) 554 len -= n->m_len; 555 off = 0; 556 m = m->m_next; 557 np = &n->m_next; 558 } 559 560 return (top); 561 nospace: 562 m_freem(top); 563 return (NULL); 564 } 565 566 /* 567 * Copy an entire packet, including header (which must be present). 568 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 569 * Note that the copy is read-only, because clusters are not copied, 570 * only their reference counts are incremented. 571 * Preserve alignment of the first mbuf so if the creator has left 572 * some room at the beginning (e.g. for inserting protocol headers) 573 * the copies still have the room available. 574 */ 575 struct mbuf * 576 m_copypacket(struct mbuf *m, int how) 577 { 578 struct mbuf *top, *n, *o; 579 580 MBUF_CHECKSLEEP(how); 581 n = m_get(how, m->m_type); 582 top = n; 583 if (n == NULL) 584 goto nospace; 585 586 if (!m_dup_pkthdr(n, m, how)) 587 goto nospace; 588 n->m_len = m->m_len; 589 if (m->m_flags & (M_EXT|M_EXTPG)) { 590 n->m_data = m->m_data; 591 mb_dupcl(n, m); 592 } else { 593 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 594 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 595 } 596 597 m = m->m_next; 598 while (m) { 599 o = m_get(how, m->m_type); 600 if (o == NULL) 601 goto nospace; 602 603 n->m_next = o; 604 n = n->m_next; 605 606 n->m_len = m->m_len; 607 if (m->m_flags & (M_EXT|M_EXTPG)) { 608 n->m_data = m->m_data; 609 mb_dupcl(n, m); 610 } else { 611 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 612 } 613 614 m = m->m_next; 615 } 616 return top; 617 nospace: 618 m_freem(top); 619 return (NULL); 620 } 621 622 static void 623 m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp) 624 { 625 struct iovec iov; 626 struct uio uio; 627 int error; 628 629 KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off)); 630 KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len)); 631 KASSERT(off < m->m_len, 632 ("m_copyfromunmapped: len exceeds mbuf length")); 633 iov.iov_base = cp; 634 iov.iov_len = len; 635 uio.uio_resid = len; 636 uio.uio_iov = &iov; 637 uio.uio_segflg = UIO_SYSSPACE; 638 uio.uio_iovcnt = 1; 639 uio.uio_offset = 0; 640 uio.uio_rw = UIO_READ; 641 error = m_unmapped_uiomove(m, off, &uio, len); 642 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off, 643 len)); 644 } 645 646 /* 647 * Copy data from an mbuf chain starting "off" bytes from the beginning, 648 * continuing for "len" bytes, into the indicated buffer. 649 */ 650 void 651 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 652 { 653 u_int count; 654 655 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 656 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 657 while (off > 0) { 658 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 659 if (off < m->m_len) 660 break; 661 off -= m->m_len; 662 m = m->m_next; 663 } 664 while (len > 0) { 665 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 666 count = min(m->m_len - off, len); 667 if ((m->m_flags & M_EXTPG) != 0) 668 m_copyfromunmapped(m, off, count, cp); 669 else 670 bcopy(mtod(m, caddr_t) + off, cp, count); 671 len -= count; 672 cp += count; 673 off = 0; 674 m = m->m_next; 675 } 676 } 677 678 /* 679 * Copy a packet header mbuf chain into a completely new chain, including 680 * copying any mbuf clusters. Use this instead of m_copypacket() when 681 * you need a writable copy of an mbuf chain. 682 */ 683 struct mbuf * 684 m_dup(const struct mbuf *m, int how) 685 { 686 struct mbuf **p, *top = NULL; 687 int remain, moff, nsize; 688 689 MBUF_CHECKSLEEP(how); 690 /* Sanity check */ 691 if (m == NULL) 692 return (NULL); 693 M_ASSERTPKTHDR(m); 694 695 /* While there's more data, get a new mbuf, tack it on, and fill it */ 696 remain = m->m_pkthdr.len; 697 moff = 0; 698 p = ⊤ 699 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 700 struct mbuf *n; 701 702 /* Get the next new mbuf */ 703 if (remain >= MINCLSIZE) { 704 n = m_getcl(how, m->m_type, 0); 705 nsize = MCLBYTES; 706 } else { 707 n = m_get(how, m->m_type); 708 nsize = MLEN; 709 } 710 if (n == NULL) 711 goto nospace; 712 713 if (top == NULL) { /* First one, must be PKTHDR */ 714 if (!m_dup_pkthdr(n, m, how)) { 715 m_free(n); 716 goto nospace; 717 } 718 if ((n->m_flags & M_EXT) == 0) 719 nsize = MHLEN; 720 n->m_flags &= ~M_RDONLY; 721 } 722 n->m_len = 0; 723 724 /* Link it into the new chain */ 725 *p = n; 726 p = &n->m_next; 727 728 /* Copy data from original mbuf(s) into new mbuf */ 729 while (n->m_len < nsize && m != NULL) { 730 int chunk = min(nsize - n->m_len, m->m_len - moff); 731 732 m_copydata(m, moff, chunk, n->m_data + n->m_len); 733 moff += chunk; 734 n->m_len += chunk; 735 remain -= chunk; 736 if (moff == m->m_len) { 737 m = m->m_next; 738 moff = 0; 739 } 740 } 741 742 /* Check correct total mbuf length */ 743 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 744 ("%s: bogus m_pkthdr.len", __func__)); 745 } 746 return (top); 747 748 nospace: 749 m_freem(top); 750 return (NULL); 751 } 752 753 /* 754 * Concatenate mbuf chain n to m. 755 * Both chains must be of the same type (e.g. MT_DATA). 756 * Any m_pkthdr is not updated. 757 */ 758 void 759 m_cat(struct mbuf *m, struct mbuf *n) 760 { 761 while (m->m_next) 762 m = m->m_next; 763 while (n) { 764 if (!M_WRITABLE(m) || 765 (n->m_flags & M_EXTPG) != 0 || 766 M_TRAILINGSPACE(m) < n->m_len) { 767 /* just join the two chains */ 768 m->m_next = n; 769 return; 770 } 771 /* splat the data from one into the other */ 772 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 773 (u_int)n->m_len); 774 m->m_len += n->m_len; 775 n = m_free(n); 776 } 777 } 778 779 /* 780 * Concatenate two pkthdr mbuf chains. 781 */ 782 void 783 m_catpkt(struct mbuf *m, struct mbuf *n) 784 { 785 786 M_ASSERTPKTHDR(m); 787 M_ASSERTPKTHDR(n); 788 789 m->m_pkthdr.len += n->m_pkthdr.len; 790 m_demote(n, 1, 0); 791 792 m_cat(m, n); 793 } 794 795 void 796 m_adj(struct mbuf *mp, int req_len) 797 { 798 int len = req_len; 799 struct mbuf *m; 800 int count; 801 802 if ((m = mp) == NULL) 803 return; 804 if (len >= 0) { 805 /* 806 * Trim from head. 807 */ 808 while (m != NULL && len > 0) { 809 if (m->m_len <= len) { 810 len -= m->m_len; 811 m->m_len = 0; 812 m = m->m_next; 813 } else { 814 m->m_len -= len; 815 m->m_data += len; 816 len = 0; 817 } 818 } 819 if (mp->m_flags & M_PKTHDR) 820 mp->m_pkthdr.len -= (req_len - len); 821 } else { 822 /* 823 * Trim from tail. Scan the mbuf chain, 824 * calculating its length and finding the last mbuf. 825 * If the adjustment only affects this mbuf, then just 826 * adjust and return. Otherwise, rescan and truncate 827 * after the remaining size. 828 */ 829 len = -len; 830 count = 0; 831 for (;;) { 832 count += m->m_len; 833 if (m->m_next == (struct mbuf *)0) 834 break; 835 m = m->m_next; 836 } 837 if (m->m_len >= len) { 838 m->m_len -= len; 839 if (mp->m_flags & M_PKTHDR) 840 mp->m_pkthdr.len -= len; 841 return; 842 } 843 count -= len; 844 if (count < 0) 845 count = 0; 846 /* 847 * Correct length for chain is "count". 848 * Find the mbuf with last data, adjust its length, 849 * and toss data from remaining mbufs on chain. 850 */ 851 m = mp; 852 if (m->m_flags & M_PKTHDR) 853 m->m_pkthdr.len = count; 854 for (; m; m = m->m_next) { 855 if (m->m_len >= count) { 856 m->m_len = count; 857 if (m->m_next != NULL) { 858 m_freem(m->m_next); 859 m->m_next = NULL; 860 } 861 break; 862 } 863 count -= m->m_len; 864 } 865 } 866 } 867 868 void 869 m_adj_decap(struct mbuf *mp, int len) 870 { 871 uint8_t rsstype; 872 873 m_adj(mp, len); 874 if ((mp->m_flags & M_PKTHDR) != 0) { 875 /* 876 * If flowid was calculated by card from the inner 877 * headers, move flowid to the decapsulated mbuf 878 * chain, otherwise clear. This depends on the 879 * internals of m_adj, which keeps pkthdr as is, in 880 * particular not changing rsstype and flowid. 881 */ 882 rsstype = mp->m_pkthdr.rsstype; 883 if ((rsstype & M_HASHTYPE_INNER) != 0) { 884 M_HASHTYPE_SET(mp, rsstype & ~M_HASHTYPE_INNER); 885 } else { 886 M_HASHTYPE_CLEAR(mp); 887 } 888 } 889 } 890 891 /* 892 * Rearange an mbuf chain so that len bytes are contiguous 893 * and in the data area of an mbuf (so that mtod will work 894 * for a structure of size len). Returns the resulting 895 * mbuf chain on success, frees it and returns null on failure. 896 * If there is room, it will add up to max_protohdr-len extra bytes to the 897 * contiguous region in an attempt to avoid being called next time. 898 */ 899 struct mbuf * 900 m_pullup(struct mbuf *n, int len) 901 { 902 struct mbuf *m; 903 int count; 904 int space; 905 906 KASSERT((n->m_flags & M_EXTPG) == 0, 907 ("%s: unmapped mbuf %p", __func__, n)); 908 909 /* 910 * If first mbuf has no cluster, and has room for len bytes 911 * without shifting current data, pullup into it, 912 * otherwise allocate a new mbuf to prepend to the chain. 913 */ 914 if ((n->m_flags & M_EXT) == 0 && 915 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 916 if (n->m_len >= len) 917 return (n); 918 m = n; 919 n = n->m_next; 920 len -= m->m_len; 921 } else { 922 if (len > MHLEN) 923 goto bad; 924 m = m_get(M_NOWAIT, n->m_type); 925 if (m == NULL) 926 goto bad; 927 if (n->m_flags & M_PKTHDR) 928 m_move_pkthdr(m, n); 929 } 930 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 931 do { 932 count = min(min(max(len, max_protohdr), space), n->m_len); 933 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 934 (u_int)count); 935 len -= count; 936 m->m_len += count; 937 n->m_len -= count; 938 space -= count; 939 if (n->m_len) 940 n->m_data += count; 941 else 942 n = m_free(n); 943 } while (len > 0 && n); 944 if (len > 0) { 945 (void) m_free(m); 946 goto bad; 947 } 948 m->m_next = n; 949 return (m); 950 bad: 951 m_freem(n); 952 return (NULL); 953 } 954 955 /* 956 * Like m_pullup(), except a new mbuf is always allocated, and we allow 957 * the amount of empty space before the data in the new mbuf to be specified 958 * (in the event that the caller expects to prepend later). 959 */ 960 struct mbuf * 961 m_copyup(struct mbuf *n, int len, int dstoff) 962 { 963 struct mbuf *m; 964 int count, space; 965 966 if (len > (MHLEN - dstoff)) 967 goto bad; 968 m = m_get(M_NOWAIT, n->m_type); 969 if (m == NULL) 970 goto bad; 971 if (n->m_flags & M_PKTHDR) 972 m_move_pkthdr(m, n); 973 m->m_data += dstoff; 974 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 975 do { 976 count = min(min(max(len, max_protohdr), space), n->m_len); 977 memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 978 (unsigned)count); 979 len -= count; 980 m->m_len += count; 981 n->m_len -= count; 982 space -= count; 983 if (n->m_len) 984 n->m_data += count; 985 else 986 n = m_free(n); 987 } while (len > 0 && n); 988 if (len > 0) { 989 (void) m_free(m); 990 goto bad; 991 } 992 m->m_next = n; 993 return (m); 994 bad: 995 m_freem(n); 996 return (NULL); 997 } 998 999 /* 1000 * Partition an mbuf chain in two pieces, returning the tail -- 1001 * all but the first len0 bytes. In case of failure, it returns NULL and 1002 * attempts to restore the chain to its original state. 1003 * 1004 * Note that the resulting mbufs might be read-only, because the new 1005 * mbuf can end up sharing an mbuf cluster with the original mbuf if 1006 * the "breaking point" happens to lie within a cluster mbuf. Use the 1007 * M_WRITABLE() macro to check for this case. 1008 */ 1009 struct mbuf * 1010 m_split(struct mbuf *m0, int len0, int wait) 1011 { 1012 struct mbuf *m, *n; 1013 u_int len = len0, remain; 1014 1015 MBUF_CHECKSLEEP(wait); 1016 for (m = m0; m && len > m->m_len; m = m->m_next) 1017 len -= m->m_len; 1018 if (m == NULL) 1019 return (NULL); 1020 remain = m->m_len - len; 1021 if (m0->m_flags & M_PKTHDR && remain == 0) { 1022 n = m_gethdr(wait, m0->m_type); 1023 if (n == NULL) 1024 return (NULL); 1025 n->m_next = m->m_next; 1026 m->m_next = NULL; 1027 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1028 n->m_pkthdr.snd_tag = 1029 m_snd_tag_ref(m0->m_pkthdr.snd_tag); 1030 n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 1031 } else 1032 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1033 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1034 m0->m_pkthdr.len = len0; 1035 return (n); 1036 } else if (m0->m_flags & M_PKTHDR) { 1037 n = m_gethdr(wait, m0->m_type); 1038 if (n == NULL) 1039 return (NULL); 1040 if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 1041 n->m_pkthdr.snd_tag = 1042 m_snd_tag_ref(m0->m_pkthdr.snd_tag); 1043 n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 1044 } else 1045 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1046 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1047 m0->m_pkthdr.len = len0; 1048 if (m->m_flags & (M_EXT|M_EXTPG)) 1049 goto extpacket; 1050 if (remain > MHLEN) { 1051 /* m can't be the lead packet */ 1052 M_ALIGN(n, 0); 1053 n->m_next = m_split(m, len, wait); 1054 if (n->m_next == NULL) { 1055 (void) m_free(n); 1056 return (NULL); 1057 } else { 1058 n->m_len = 0; 1059 return (n); 1060 } 1061 } else 1062 M_ALIGN(n, remain); 1063 } else if (remain == 0) { 1064 n = m->m_next; 1065 m->m_next = NULL; 1066 return (n); 1067 } else { 1068 n = m_get(wait, m->m_type); 1069 if (n == NULL) 1070 return (NULL); 1071 M_ALIGN(n, remain); 1072 } 1073 extpacket: 1074 if (m->m_flags & (M_EXT|M_EXTPG)) { 1075 n->m_data = m->m_data + len; 1076 mb_dupcl(n, m); 1077 } else { 1078 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1079 } 1080 n->m_len = remain; 1081 m->m_len = len; 1082 n->m_next = m->m_next; 1083 m->m_next = NULL; 1084 return (n); 1085 } 1086 /* 1087 * Routine to copy from device local memory into mbufs. 1088 * Note that `off' argument is offset into first mbuf of target chain from 1089 * which to begin copying the data to. 1090 */ 1091 struct mbuf * 1092 m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1093 void (*copy)(char *from, caddr_t to, u_int len)) 1094 { 1095 struct mbuf *m; 1096 struct mbuf *top = NULL, **mp = ⊤ 1097 int len; 1098 1099 if (off < 0 || off > MHLEN) 1100 return (NULL); 1101 1102 while (totlen > 0) { 1103 if (top == NULL) { /* First one, must be PKTHDR */ 1104 if (totlen + off >= MINCLSIZE) { 1105 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1106 len = MCLBYTES; 1107 } else { 1108 m = m_gethdr(M_NOWAIT, MT_DATA); 1109 len = MHLEN; 1110 1111 /* Place initial small packet/header at end of mbuf */ 1112 if (m && totlen + off + max_linkhdr <= MHLEN) { 1113 m->m_data += max_linkhdr; 1114 len -= max_linkhdr; 1115 } 1116 } 1117 if (m == NULL) 1118 return NULL; 1119 m->m_pkthdr.rcvif = ifp; 1120 m->m_pkthdr.len = totlen; 1121 } else { 1122 if (totlen + off >= MINCLSIZE) { 1123 m = m_getcl(M_NOWAIT, MT_DATA, 0); 1124 len = MCLBYTES; 1125 } else { 1126 m = m_get(M_NOWAIT, MT_DATA); 1127 len = MLEN; 1128 } 1129 if (m == NULL) { 1130 m_freem(top); 1131 return NULL; 1132 } 1133 } 1134 if (off) { 1135 m->m_data += off; 1136 len -= off; 1137 off = 0; 1138 } 1139 m->m_len = len = min(totlen, len); 1140 if (copy) 1141 copy(buf, mtod(m, caddr_t), (u_int)len); 1142 else 1143 bcopy(buf, mtod(m, caddr_t), (u_int)len); 1144 buf += len; 1145 *mp = m; 1146 mp = &m->m_next; 1147 totlen -= len; 1148 } 1149 return (top); 1150 } 1151 1152 static void 1153 m_copytounmapped(const struct mbuf *m, int off, int len, c_caddr_t cp) 1154 { 1155 struct iovec iov; 1156 struct uio uio; 1157 int error; 1158 1159 KASSERT(off >= 0, ("m_copytounmapped: negative off %d", off)); 1160 KASSERT(len >= 0, ("m_copytounmapped: negative len %d", len)); 1161 KASSERT(off < m->m_len, ("m_copytounmapped: len exceeds mbuf length")); 1162 iov.iov_base = __DECONST(caddr_t, cp); 1163 iov.iov_len = len; 1164 uio.uio_resid = len; 1165 uio.uio_iov = &iov; 1166 uio.uio_segflg = UIO_SYSSPACE; 1167 uio.uio_iovcnt = 1; 1168 uio.uio_offset = 0; 1169 uio.uio_rw = UIO_WRITE; 1170 error = m_unmapped_uiomove(m, off, &uio, len); 1171 KASSERT(error == 0, ("m_unmapped_uiomove failed: off %d, len %d", off, 1172 len)); 1173 } 1174 1175 /* 1176 * Copy data from a buffer back into the indicated mbuf chain, 1177 * starting "off" bytes from the beginning, extending the mbuf 1178 * chain if necessary. 1179 */ 1180 void 1181 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1182 { 1183 int mlen; 1184 struct mbuf *m = m0, *n; 1185 int totlen = 0; 1186 1187 if (m0 == NULL) 1188 return; 1189 while (off > (mlen = m->m_len)) { 1190 off -= mlen; 1191 totlen += mlen; 1192 if (m->m_next == NULL) { 1193 n = m_get(M_NOWAIT, m->m_type); 1194 if (n == NULL) 1195 goto out; 1196 bzero(mtod(n, caddr_t), MLEN); 1197 n->m_len = min(MLEN, len + off); 1198 m->m_next = n; 1199 } 1200 m = m->m_next; 1201 } 1202 while (len > 0) { 1203 if (m->m_next == NULL && (len > m->m_len - off)) { 1204 m->m_len += min(len - (m->m_len - off), 1205 M_TRAILINGSPACE(m)); 1206 } 1207 mlen = min (m->m_len - off, len); 1208 if ((m->m_flags & M_EXTPG) != 0) 1209 m_copytounmapped(m, off, mlen, cp); 1210 else 1211 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1212 cp += mlen; 1213 len -= mlen; 1214 mlen += off; 1215 off = 0; 1216 totlen += mlen; 1217 if (len == 0) 1218 break; 1219 if (m->m_next == NULL) { 1220 n = m_get(M_NOWAIT, m->m_type); 1221 if (n == NULL) 1222 break; 1223 n->m_len = min(MLEN, len); 1224 m->m_next = n; 1225 } 1226 m = m->m_next; 1227 } 1228 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1229 m->m_pkthdr.len = totlen; 1230 } 1231 1232 /* 1233 * Append the specified data to the indicated mbuf chain, 1234 * Extend the mbuf chain if the new data does not fit in 1235 * existing space. 1236 * 1237 * Return 1 if able to complete the job; otherwise 0. 1238 */ 1239 int 1240 m_append(struct mbuf *m0, int len, c_caddr_t cp) 1241 { 1242 struct mbuf *m, *n; 1243 int remainder, space; 1244 1245 for (m = m0; m->m_next != NULL; m = m->m_next) 1246 ; 1247 remainder = len; 1248 space = M_TRAILINGSPACE(m); 1249 if (space > 0) { 1250 /* 1251 * Copy into available space. 1252 */ 1253 if (space > remainder) 1254 space = remainder; 1255 bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 1256 m->m_len += space; 1257 cp += space, remainder -= space; 1258 } 1259 while (remainder > 0) { 1260 /* 1261 * Allocate a new mbuf; could check space 1262 * and allocate a cluster instead. 1263 */ 1264 n = m_get(M_NOWAIT, m->m_type); 1265 if (n == NULL) 1266 break; 1267 n->m_len = min(MLEN, remainder); 1268 bcopy(cp, mtod(n, caddr_t), n->m_len); 1269 cp += n->m_len, remainder -= n->m_len; 1270 m->m_next = n; 1271 m = n; 1272 } 1273 if (m0->m_flags & M_PKTHDR) 1274 m0->m_pkthdr.len += len - remainder; 1275 return (remainder == 0); 1276 } 1277 1278 static int 1279 m_apply_extpg_one(struct mbuf *m, int off, int len, 1280 int (*f)(void *, void *, u_int), void *arg) 1281 { 1282 void *p; 1283 u_int i, count, pgoff, pglen; 1284 int rval; 1285 1286 KASSERT(PMAP_HAS_DMAP, 1287 ("m_apply_extpg_one does not support unmapped mbufs")); 1288 off += mtod(m, vm_offset_t); 1289 if (off < m->m_epg_hdrlen) { 1290 count = min(m->m_epg_hdrlen - off, len); 1291 rval = f(arg, m->m_epg_hdr + off, count); 1292 if (rval) 1293 return (rval); 1294 len -= count; 1295 off = 0; 1296 } else 1297 off -= m->m_epg_hdrlen; 1298 pgoff = m->m_epg_1st_off; 1299 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 1300 pglen = m_epg_pagelen(m, i, pgoff); 1301 if (off < pglen) { 1302 count = min(pglen - off, len); 1303 p = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] + pgoff); 1304 rval = f(arg, p, count); 1305 if (rval) 1306 return (rval); 1307 len -= count; 1308 off = 0; 1309 } else 1310 off -= pglen; 1311 pgoff = 0; 1312 } 1313 if (len > 0) { 1314 KASSERT(off < m->m_epg_trllen, 1315 ("m_apply_extpg_one: offset beyond trailer")); 1316 KASSERT(len <= m->m_epg_trllen - off, 1317 ("m_apply_extpg_one: length beyond trailer")); 1318 return (f(arg, m->m_epg_trail + off, len)); 1319 } 1320 return (0); 1321 } 1322 1323 /* Apply function f to the data in a single mbuf. */ 1324 static int 1325 m_apply_one(struct mbuf *m, int off, int len, 1326 int (*f)(void *, void *, u_int), void *arg) 1327 { 1328 if ((m->m_flags & M_EXTPG) != 0) 1329 return (m_apply_extpg_one(m, off, len, f, arg)); 1330 else 1331 return (f(arg, mtod(m, caddr_t) + off, len)); 1332 } 1333 1334 /* 1335 * Apply function f to the data in an mbuf chain starting "off" bytes from 1336 * the beginning, continuing for "len" bytes. 1337 */ 1338 int 1339 m_apply(struct mbuf *m, int off, int len, 1340 int (*f)(void *, void *, u_int), void *arg) 1341 { 1342 u_int count; 1343 int rval; 1344 1345 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 1346 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 1347 while (off > 0) { 1348 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1349 if (off < m->m_len) 1350 break; 1351 off -= m->m_len; 1352 m = m->m_next; 1353 } 1354 while (len > 0) { 1355 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 1356 count = min(m->m_len - off, len); 1357 rval = m_apply_one(m, off, count, f, arg); 1358 if (rval) 1359 return (rval); 1360 len -= count; 1361 off = 0; 1362 m = m->m_next; 1363 } 1364 return (0); 1365 } 1366 1367 /* 1368 * Return a pointer to mbuf/offset of location in mbuf chain. 1369 */ 1370 struct mbuf * 1371 m_getptr(struct mbuf *m, int loc, int *off) 1372 { 1373 1374 while (loc >= 0) { 1375 /* Normal end of search. */ 1376 if (m->m_len > loc) { 1377 *off = loc; 1378 return (m); 1379 } else { 1380 loc -= m->m_len; 1381 if (m->m_next == NULL) { 1382 if (loc == 0) { 1383 /* Point at the end of valid data. */ 1384 *off = m->m_len; 1385 return (m); 1386 } 1387 return (NULL); 1388 } 1389 m = m->m_next; 1390 } 1391 } 1392 return (NULL); 1393 } 1394 1395 void 1396 m_print(const struct mbuf *m, int maxlen) 1397 { 1398 int len; 1399 int pdata; 1400 const struct mbuf *m2; 1401 1402 if (m == NULL) { 1403 printf("mbuf: %p\n", m); 1404 return; 1405 } 1406 1407 if (m->m_flags & M_PKTHDR) 1408 len = m->m_pkthdr.len; 1409 else 1410 len = -1; 1411 m2 = m; 1412 while (m2 != NULL && (len == -1 || len)) { 1413 pdata = m2->m_len; 1414 if (maxlen != -1 && pdata > maxlen) 1415 pdata = maxlen; 1416 printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 1417 m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 1418 "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 1419 "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 1420 if (pdata) 1421 printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 1422 if (len != -1) 1423 len -= m2->m_len; 1424 m2 = m2->m_next; 1425 } 1426 if (len > 0) 1427 printf("%d bytes unaccounted for.\n", len); 1428 return; 1429 } 1430 1431 u_int 1432 m_fixhdr(struct mbuf *m0) 1433 { 1434 u_int len; 1435 1436 len = m_length(m0, NULL); 1437 m0->m_pkthdr.len = len; 1438 return (len); 1439 } 1440 1441 u_int 1442 m_length(struct mbuf *m0, struct mbuf **last) 1443 { 1444 struct mbuf *m; 1445 u_int len; 1446 1447 len = 0; 1448 for (m = m0; m != NULL; m = m->m_next) { 1449 len += m->m_len; 1450 if (m->m_next == NULL) 1451 break; 1452 } 1453 if (last != NULL) 1454 *last = m; 1455 return (len); 1456 } 1457 1458 /* 1459 * Defragment a mbuf chain, returning the shortest possible 1460 * chain of mbufs and clusters. If allocation fails and 1461 * this cannot be completed, NULL will be returned, but 1462 * the passed in chain will be unchanged. Upon success, 1463 * the original chain will be freed, and the new chain 1464 * will be returned. 1465 * 1466 * If a non-packet header is passed in, the original 1467 * mbuf (chain?) will be returned unharmed. 1468 */ 1469 struct mbuf * 1470 m_defrag(struct mbuf *m0, int how) 1471 { 1472 struct mbuf *m_new = NULL, *m_final = NULL; 1473 int progress = 0, length; 1474 1475 MBUF_CHECKSLEEP(how); 1476 if (!(m0->m_flags & M_PKTHDR)) 1477 return (m0); 1478 1479 m_fixhdr(m0); /* Needed sanity check */ 1480 1481 #ifdef MBUF_STRESS_TEST 1482 if (m_defragrandomfailures) { 1483 int temp = arc4random() & 0xff; 1484 if (temp == 0xba) 1485 goto nospace; 1486 } 1487 #endif 1488 1489 if (m0->m_pkthdr.len > MHLEN) 1490 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1491 else 1492 m_final = m_gethdr(how, MT_DATA); 1493 1494 if (m_final == NULL) 1495 goto nospace; 1496 1497 if (m_dup_pkthdr(m_final, m0, how) == 0) 1498 goto nospace; 1499 1500 m_new = m_final; 1501 1502 while (progress < m0->m_pkthdr.len) { 1503 length = m0->m_pkthdr.len - progress; 1504 if (length > MCLBYTES) 1505 length = MCLBYTES; 1506 1507 if (m_new == NULL) { 1508 if (length > MLEN) 1509 m_new = m_getcl(how, MT_DATA, 0); 1510 else 1511 m_new = m_get(how, MT_DATA); 1512 if (m_new == NULL) 1513 goto nospace; 1514 } 1515 1516 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 1517 progress += length; 1518 m_new->m_len = length; 1519 if (m_new != m_final) 1520 m_cat(m_final, m_new); 1521 m_new = NULL; 1522 } 1523 #ifdef MBUF_STRESS_TEST 1524 if (m0->m_next == NULL) 1525 m_defraguseless++; 1526 #endif 1527 m_freem(m0); 1528 m0 = m_final; 1529 #ifdef MBUF_STRESS_TEST 1530 m_defragpackets++; 1531 m_defragbytes += m0->m_pkthdr.len; 1532 #endif 1533 return (m0); 1534 nospace: 1535 #ifdef MBUF_STRESS_TEST 1536 m_defragfailure++; 1537 #endif 1538 if (m_final) 1539 m_freem(m_final); 1540 return (NULL); 1541 } 1542 1543 /* 1544 * Return the number of fragments an mbuf will use. This is usually 1545 * used as a proxy for the number of scatter/gather elements needed by 1546 * a DMA engine to access an mbuf. In general mapped mbufs are 1547 * assumed to be backed by physically contiguous buffers that only 1548 * need a single fragment. Unmapped mbufs, on the other hand, can 1549 * span disjoint physical pages. 1550 */ 1551 static int 1552 frags_per_mbuf(struct mbuf *m) 1553 { 1554 int frags; 1555 1556 if ((m->m_flags & M_EXTPG) == 0) 1557 return (1); 1558 1559 /* 1560 * The header and trailer are counted as a single fragment 1561 * each when present. 1562 * 1563 * XXX: This overestimates the number of fragments by assuming 1564 * all the backing physical pages are disjoint. 1565 */ 1566 frags = 0; 1567 if (m->m_epg_hdrlen != 0) 1568 frags++; 1569 frags += m->m_epg_npgs; 1570 if (m->m_epg_trllen != 0) 1571 frags++; 1572 1573 return (frags); 1574 } 1575 1576 /* 1577 * Defragment an mbuf chain, returning at most maxfrags separate 1578 * mbufs+clusters. If this is not possible NULL is returned and 1579 * the original mbuf chain is left in its present (potentially 1580 * modified) state. We use two techniques: collapsing consecutive 1581 * mbufs and replacing consecutive mbufs by a cluster. 1582 * 1583 * NB: this should really be named m_defrag but that name is taken 1584 */ 1585 struct mbuf * 1586 m_collapse(struct mbuf *m0, int how, int maxfrags) 1587 { 1588 struct mbuf *m, *n, *n2, **prev; 1589 u_int curfrags; 1590 1591 /* 1592 * Calculate the current number of frags. 1593 */ 1594 curfrags = 0; 1595 for (m = m0; m != NULL; m = m->m_next) 1596 curfrags += frags_per_mbuf(m); 1597 /* 1598 * First, try to collapse mbufs. Note that we always collapse 1599 * towards the front so we don't need to deal with moving the 1600 * pkthdr. This may be suboptimal if the first mbuf has much 1601 * less data than the following. 1602 */ 1603 m = m0; 1604 again: 1605 for (;;) { 1606 n = m->m_next; 1607 if (n == NULL) 1608 break; 1609 if (M_WRITABLE(m) && 1610 n->m_len < M_TRAILINGSPACE(m)) { 1611 m_copydata(n, 0, n->m_len, 1612 mtod(m, char *) + m->m_len); 1613 m->m_len += n->m_len; 1614 m->m_next = n->m_next; 1615 curfrags -= frags_per_mbuf(n); 1616 m_free(n); 1617 if (curfrags <= maxfrags) 1618 return m0; 1619 } else 1620 m = n; 1621 } 1622 KASSERT(maxfrags > 1, 1623 ("maxfrags %u, but normal collapse failed", maxfrags)); 1624 /* 1625 * Collapse consecutive mbufs to a cluster. 1626 */ 1627 prev = &m0->m_next; /* NB: not the first mbuf */ 1628 while ((n = *prev) != NULL) { 1629 if ((n2 = n->m_next) != NULL && 1630 n->m_len + n2->m_len < MCLBYTES) { 1631 m = m_getcl(how, MT_DATA, 0); 1632 if (m == NULL) 1633 goto bad; 1634 m_copydata(n, 0, n->m_len, mtod(m, char *)); 1635 m_copydata(n2, 0, n2->m_len, 1636 mtod(m, char *) + n->m_len); 1637 m->m_len = n->m_len + n2->m_len; 1638 m->m_next = n2->m_next; 1639 *prev = m; 1640 curfrags += 1; /* For the new cluster */ 1641 curfrags -= frags_per_mbuf(n); 1642 curfrags -= frags_per_mbuf(n2); 1643 m_free(n); 1644 m_free(n2); 1645 if (curfrags <= maxfrags) 1646 return m0; 1647 /* 1648 * Still not there, try the normal collapse 1649 * again before we allocate another cluster. 1650 */ 1651 goto again; 1652 } 1653 prev = &n->m_next; 1654 } 1655 /* 1656 * No place where we can collapse to a cluster; punt. 1657 * This can occur if, for example, you request 2 frags 1658 * but the packet requires that both be clusters (we 1659 * never reallocate the first mbuf to avoid moving the 1660 * packet header). 1661 */ 1662 bad: 1663 return NULL; 1664 } 1665 1666 #ifdef MBUF_STRESS_TEST 1667 1668 /* 1669 * Fragment an mbuf chain. There's no reason you'd ever want to do 1670 * this in normal usage, but it's great for stress testing various 1671 * mbuf consumers. 1672 * 1673 * If fragmentation is not possible, the original chain will be 1674 * returned. 1675 * 1676 * Possible length values: 1677 * 0 no fragmentation will occur 1678 * > 0 each fragment will be of the specified length 1679 * -1 each fragment will be the same random value in length 1680 * -2 each fragment's length will be entirely random 1681 * (Random values range from 1 to 256) 1682 */ 1683 struct mbuf * 1684 m_fragment(struct mbuf *m0, int how, int length) 1685 { 1686 struct mbuf *m_first, *m_last; 1687 int divisor = 255, progress = 0, fraglen; 1688 1689 if (!(m0->m_flags & M_PKTHDR)) 1690 return (m0); 1691 1692 if (length == 0 || length < -2) 1693 return (m0); 1694 if (length > MCLBYTES) 1695 length = MCLBYTES; 1696 if (length < 0 && divisor > MCLBYTES) 1697 divisor = MCLBYTES; 1698 if (length == -1) 1699 length = 1 + (arc4random() % divisor); 1700 if (length > 0) 1701 fraglen = length; 1702 1703 m_fixhdr(m0); /* Needed sanity check */ 1704 1705 m_first = m_getcl(how, MT_DATA, M_PKTHDR); 1706 if (m_first == NULL) 1707 goto nospace; 1708 1709 if (m_dup_pkthdr(m_first, m0, how) == 0) 1710 goto nospace; 1711 1712 m_last = m_first; 1713 1714 while (progress < m0->m_pkthdr.len) { 1715 if (length == -2) 1716 fraglen = 1 + (arc4random() % divisor); 1717 if (fraglen > m0->m_pkthdr.len - progress) 1718 fraglen = m0->m_pkthdr.len - progress; 1719 1720 if (progress != 0) { 1721 struct mbuf *m_new = m_getcl(how, MT_DATA, 0); 1722 if (m_new == NULL) 1723 goto nospace; 1724 1725 m_last->m_next = m_new; 1726 m_last = m_new; 1727 } 1728 1729 m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t)); 1730 progress += fraglen; 1731 m_last->m_len = fraglen; 1732 } 1733 m_freem(m0); 1734 m0 = m_first; 1735 return (m0); 1736 nospace: 1737 if (m_first) 1738 m_freem(m_first); 1739 /* Return the original chain on failure */ 1740 return (m0); 1741 } 1742 1743 #endif 1744 1745 /* 1746 * Free pages from mbuf_ext_pgs, assuming they were allocated via 1747 * vm_page_alloc() and aren't associated with any object. Complement 1748 * to allocator from m_uiotombuf_nomap(). 1749 */ 1750 void 1751 mb_free_mext_pgs(struct mbuf *m) 1752 { 1753 vm_page_t pg; 1754 1755 M_ASSERTEXTPG(m); 1756 for (int i = 0; i < m->m_epg_npgs; i++) { 1757 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); 1758 vm_page_unwire_noq(pg); 1759 vm_page_free(pg); 1760 } 1761 } 1762 1763 static struct mbuf * 1764 m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags) 1765 { 1766 struct mbuf *m, *mb, *prev; 1767 vm_page_t pg_array[MBUF_PEXT_MAX_PGS]; 1768 int error, length, i, needed; 1769 ssize_t total; 1770 int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | 1771 VM_ALLOC_WIRED; 1772 1773 MPASS((flags & M_PKTHDR) == 0); 1774 MPASS((how & M_ZERO) == 0); 1775 1776 /* 1777 * len can be zero or an arbitrary large value bound by 1778 * the total data supplied by the uio. 1779 */ 1780 if (len > 0) 1781 total = MIN(uio->uio_resid, len); 1782 else 1783 total = uio->uio_resid; 1784 1785 if (maxseg == 0) 1786 maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE; 1787 1788 /* 1789 * If total is zero, return an empty mbuf. This can occur 1790 * for TLS 1.0 connections which send empty fragments as 1791 * a countermeasure against the known-IV weakness in CBC 1792 * ciphersuites. 1793 */ 1794 if (__predict_false(total == 0)) { 1795 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs); 1796 if (mb == NULL) 1797 return (NULL); 1798 mb->m_epg_flags = EPG_FLAG_ANON; 1799 return (mb); 1800 } 1801 1802 /* 1803 * Allocate the pages 1804 */ 1805 m = NULL; 1806 while (total > 0) { 1807 mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs); 1808 if (mb == NULL) 1809 goto failed; 1810 if (m == NULL) 1811 m = mb; 1812 else 1813 prev->m_next = mb; 1814 prev = mb; 1815 mb->m_epg_flags = EPG_FLAG_ANON; 1816 needed = length = MIN(maxseg, total); 1817 for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) { 1818 retry_page: 1819 pg_array[i] = vm_page_alloc(NULL, 0, pflags); 1820 if (pg_array[i] == NULL) { 1821 if (how & M_NOWAIT) { 1822 goto failed; 1823 } else { 1824 vm_wait(NULL); 1825 goto retry_page; 1826 } 1827 } 1828 mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]); 1829 mb->m_epg_npgs++; 1830 } 1831 mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1); 1832 MBUF_EXT_PGS_ASSERT_SANITY(mb); 1833 total -= length; 1834 error = uiomove_fromphys(pg_array, 0, length, uio); 1835 if (error != 0) 1836 goto failed; 1837 mb->m_len = length; 1838 mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs; 1839 if (flags & M_PKTHDR) 1840 m->m_pkthdr.len += length; 1841 } 1842 return (m); 1843 1844 failed: 1845 m_freem(m); 1846 return (NULL); 1847 } 1848 1849 /* 1850 * Copy the contents of uio into a properly sized mbuf chain. 1851 */ 1852 struct mbuf * 1853 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1854 { 1855 struct mbuf *m, *mb; 1856 int error, length; 1857 ssize_t total; 1858 int progress = 0; 1859 1860 if (flags & M_EXTPG) 1861 return (m_uiotombuf_nomap(uio, how, len, align, flags)); 1862 1863 /* 1864 * len can be zero or an arbitrary large value bound by 1865 * the total data supplied by the uio. 1866 */ 1867 if (len > 0) 1868 total = (uio->uio_resid < len) ? uio->uio_resid : len; 1869 else 1870 total = uio->uio_resid; 1871 1872 /* 1873 * The smallest unit returned by m_getm2() is a single mbuf 1874 * with pkthdr. We can't align past it. 1875 */ 1876 if (align >= MHLEN) 1877 return (NULL); 1878 1879 /* 1880 * Give us the full allocation or nothing. 1881 * If len is zero return the smallest empty mbuf. 1882 */ 1883 m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 1884 if (m == NULL) 1885 return (NULL); 1886 m->m_data += align; 1887 1888 /* Fill all mbufs with uio data and update header information. */ 1889 for (mb = m; mb != NULL; mb = mb->m_next) { 1890 length = min(M_TRAILINGSPACE(mb), total - progress); 1891 1892 error = uiomove(mtod(mb, void *), length, uio); 1893 if (error) { 1894 m_freem(m); 1895 return (NULL); 1896 } 1897 1898 mb->m_len = length; 1899 progress += length; 1900 if (flags & M_PKTHDR) 1901 m->m_pkthdr.len += length; 1902 } 1903 KASSERT(progress == total, ("%s: progress != total", __func__)); 1904 1905 return (m); 1906 } 1907 1908 /* 1909 * Copy data to/from an unmapped mbuf into a uio limited by len if set. 1910 */ 1911 int 1912 m_unmapped_uiomove(const struct mbuf *m, int m_off, struct uio *uio, int len) 1913 { 1914 vm_page_t pg; 1915 int error, i, off, pglen, pgoff, seglen, segoff; 1916 1917 M_ASSERTEXTPG(m); 1918 error = 0; 1919 1920 /* Skip over any data removed from the front. */ 1921 off = mtod(m, vm_offset_t); 1922 1923 off += m_off; 1924 if (m->m_epg_hdrlen != 0) { 1925 if (off >= m->m_epg_hdrlen) { 1926 off -= m->m_epg_hdrlen; 1927 } else { 1928 seglen = m->m_epg_hdrlen - off; 1929 segoff = off; 1930 seglen = min(seglen, len); 1931 off = 0; 1932 len -= seglen; 1933 error = uiomove(__DECONST(void *, 1934 &m->m_epg_hdr[segoff]), seglen, uio); 1935 } 1936 } 1937 pgoff = m->m_epg_1st_off; 1938 for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) { 1939 pglen = m_epg_pagelen(m, i, pgoff); 1940 if (off >= pglen) { 1941 off -= pglen; 1942 pgoff = 0; 1943 continue; 1944 } 1945 seglen = pglen - off; 1946 segoff = pgoff + off; 1947 off = 0; 1948 seglen = min(seglen, len); 1949 len -= seglen; 1950 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); 1951 error = uiomove_fromphys(&pg, segoff, seglen, uio); 1952 pgoff = 0; 1953 }; 1954 if (len != 0 && error == 0) { 1955 KASSERT((off + len) <= m->m_epg_trllen, 1956 ("off + len > trail (%d + %d > %d, m_off = %d)", off, len, 1957 m->m_epg_trllen, m_off)); 1958 error = uiomove(__DECONST(void *, &m->m_epg_trail[off]), 1959 len, uio); 1960 } 1961 return (error); 1962 } 1963 1964 /* 1965 * Copy an mbuf chain into a uio limited by len if set. 1966 */ 1967 int 1968 m_mbuftouio(struct uio *uio, const struct mbuf *m, int len) 1969 { 1970 int error, length, total; 1971 int progress = 0; 1972 1973 if (len > 0) 1974 total = min(uio->uio_resid, len); 1975 else 1976 total = uio->uio_resid; 1977 1978 /* Fill the uio with data from the mbufs. */ 1979 for (; m != NULL; m = m->m_next) { 1980 length = min(m->m_len, total - progress); 1981 1982 if ((m->m_flags & M_EXTPG) != 0) 1983 error = m_unmapped_uiomove(m, 0, uio, length); 1984 else 1985 error = uiomove(mtod(m, void *), length, uio); 1986 if (error) 1987 return (error); 1988 1989 progress += length; 1990 } 1991 1992 return (0); 1993 } 1994 1995 /* 1996 * Create a writable copy of the mbuf chain. While doing this 1997 * we compact the chain with a goal of producing a chain with 1998 * at most two mbufs. The second mbuf in this chain is likely 1999 * to be a cluster. The primary purpose of this work is to create 2000 * a writable packet for encryption, compression, etc. The 2001 * secondary goal is to linearize the data so the data can be 2002 * passed to crypto hardware in the most efficient manner possible. 2003 */ 2004 struct mbuf * 2005 m_unshare(struct mbuf *m0, int how) 2006 { 2007 struct mbuf *m, *mprev; 2008 struct mbuf *n, *mfirst, *mlast; 2009 int len, off; 2010 2011 mprev = NULL; 2012 for (m = m0; m != NULL; m = mprev->m_next) { 2013 /* 2014 * Regular mbufs are ignored unless there's a cluster 2015 * in front of it that we can use to coalesce. We do 2016 * the latter mainly so later clusters can be coalesced 2017 * also w/o having to handle them specially (i.e. convert 2018 * mbuf+cluster -> cluster). This optimization is heavily 2019 * influenced by the assumption that we're running over 2020 * Ethernet where MCLBYTES is large enough that the max 2021 * packet size will permit lots of coalescing into a 2022 * single cluster. This in turn permits efficient 2023 * crypto operations, especially when using hardware. 2024 */ 2025 if ((m->m_flags & M_EXT) == 0) { 2026 if (mprev && (mprev->m_flags & M_EXT) && 2027 m->m_len <= M_TRAILINGSPACE(mprev)) { 2028 /* XXX: this ignores mbuf types */ 2029 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 2030 mtod(m, caddr_t), m->m_len); 2031 mprev->m_len += m->m_len; 2032 mprev->m_next = m->m_next; /* unlink from chain */ 2033 m_free(m); /* reclaim mbuf */ 2034 } else { 2035 mprev = m; 2036 } 2037 continue; 2038 } 2039 /* 2040 * Writable mbufs are left alone (for now). 2041 */ 2042 if (M_WRITABLE(m)) { 2043 mprev = m; 2044 continue; 2045 } 2046 2047 /* 2048 * Not writable, replace with a copy or coalesce with 2049 * the previous mbuf if possible (since we have to copy 2050 * it anyway, we try to reduce the number of mbufs and 2051 * clusters so that future work is easier). 2052 */ 2053 KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 2054 /* NB: we only coalesce into a cluster or larger */ 2055 if (mprev != NULL && (mprev->m_flags & M_EXT) && 2056 m->m_len <= M_TRAILINGSPACE(mprev)) { 2057 /* XXX: this ignores mbuf types */ 2058 memcpy(mtod(mprev, caddr_t) + mprev->m_len, 2059 mtod(m, caddr_t), m->m_len); 2060 mprev->m_len += m->m_len; 2061 mprev->m_next = m->m_next; /* unlink from chain */ 2062 m_free(m); /* reclaim mbuf */ 2063 continue; 2064 } 2065 2066 /* 2067 * Allocate new space to hold the copy and copy the data. 2068 * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 2069 * splitting them into clusters. We could just malloc a 2070 * buffer and make it external but too many device drivers 2071 * don't know how to break up the non-contiguous memory when 2072 * doing DMA. 2073 */ 2074 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 2075 if (n == NULL) { 2076 m_freem(m0); 2077 return (NULL); 2078 } 2079 if (m->m_flags & M_PKTHDR) { 2080 KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR", 2081 __func__, m0, m)); 2082 m_move_pkthdr(n, m); 2083 } 2084 len = m->m_len; 2085 off = 0; 2086 mfirst = n; 2087 mlast = NULL; 2088 for (;;) { 2089 int cc = min(len, MCLBYTES); 2090 memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 2091 n->m_len = cc; 2092 if (mlast != NULL) 2093 mlast->m_next = n; 2094 mlast = n; 2095 #if 0 2096 newipsecstat.ips_clcopied++; 2097 #endif 2098 2099 len -= cc; 2100 if (len <= 0) 2101 break; 2102 off += cc; 2103 2104 n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 2105 if (n == NULL) { 2106 m_freem(mfirst); 2107 m_freem(m0); 2108 return (NULL); 2109 } 2110 } 2111 n->m_next = m->m_next; 2112 if (mprev == NULL) 2113 m0 = mfirst; /* new head of chain */ 2114 else 2115 mprev->m_next = mfirst; /* replace old mbuf */ 2116 m_free(m); /* release old mbuf */ 2117 mprev = mfirst; 2118 } 2119 return (m0); 2120 } 2121 2122 #ifdef MBUF_PROFILING 2123 2124 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 2125 struct mbufprofile { 2126 uintmax_t wasted[MP_BUCKETS]; 2127 uintmax_t used[MP_BUCKETS]; 2128 uintmax_t segments[MP_BUCKETS]; 2129 } mbprof; 2130 2131 #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 2132 #define MP_NUMLINES 6 2133 #define MP_NUMSPERLINE 16 2134 #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 2135 /* work out max space needed and add a bit of spare space too */ 2136 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 2137 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 2138 2139 char mbprofbuf[MP_BUFSIZE]; 2140 2141 void 2142 m_profile(struct mbuf *m) 2143 { 2144 int segments = 0; 2145 int used = 0; 2146 int wasted = 0; 2147 2148 while (m) { 2149 segments++; 2150 used += m->m_len; 2151 if (m->m_flags & M_EXT) { 2152 wasted += MHLEN - sizeof(m->m_ext) + 2153 m->m_ext.ext_size - m->m_len; 2154 } else { 2155 if (m->m_flags & M_PKTHDR) 2156 wasted += MHLEN - m->m_len; 2157 else 2158 wasted += MLEN - m->m_len; 2159 } 2160 m = m->m_next; 2161 } 2162 /* be paranoid.. it helps */ 2163 if (segments > MP_BUCKETS - 1) 2164 segments = MP_BUCKETS - 1; 2165 if (used > 100000) 2166 used = 100000; 2167 if (wasted > 100000) 2168 wasted = 100000; 2169 /* store in the appropriate bucket */ 2170 /* don't bother locking. if it's slightly off, so what? */ 2171 mbprof.segments[segments]++; 2172 mbprof.used[fls(used)]++; 2173 mbprof.wasted[fls(wasted)]++; 2174 } 2175 2176 static void 2177 mbprof_textify(void) 2178 { 2179 int offset; 2180 char *c; 2181 uint64_t *p; 2182 2183 p = &mbprof.wasted[0]; 2184 c = mbprofbuf; 2185 offset = snprintf(c, MP_MAXLINE + 10, 2186 "wasted:\n" 2187 "%ju %ju %ju %ju %ju %ju %ju %ju " 2188 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2189 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2190 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2191 #ifdef BIG_ARRAY 2192 p = &mbprof.wasted[16]; 2193 c += offset; 2194 offset = snprintf(c, MP_MAXLINE, 2195 "%ju %ju %ju %ju %ju %ju %ju %ju " 2196 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2197 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2198 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2199 #endif 2200 p = &mbprof.used[0]; 2201 c += offset; 2202 offset = snprintf(c, MP_MAXLINE + 10, 2203 "used:\n" 2204 "%ju %ju %ju %ju %ju %ju %ju %ju " 2205 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2206 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2207 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2208 #ifdef BIG_ARRAY 2209 p = &mbprof.used[16]; 2210 c += offset; 2211 offset = snprintf(c, MP_MAXLINE, 2212 "%ju %ju %ju %ju %ju %ju %ju %ju " 2213 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2214 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2215 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2216 #endif 2217 p = &mbprof.segments[0]; 2218 c += offset; 2219 offset = snprintf(c, MP_MAXLINE + 10, 2220 "segments:\n" 2221 "%ju %ju %ju %ju %ju %ju %ju %ju " 2222 "%ju %ju %ju %ju %ju %ju %ju %ju\n", 2223 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2224 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2225 #ifdef BIG_ARRAY 2226 p = &mbprof.segments[16]; 2227 c += offset; 2228 offset = snprintf(c, MP_MAXLINE, 2229 "%ju %ju %ju %ju %ju %ju %ju %ju " 2230 "%ju %ju %ju %ju %ju %ju %ju %jju", 2231 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 2232 p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 2233 #endif 2234 } 2235 2236 static int 2237 mbprof_handler(SYSCTL_HANDLER_ARGS) 2238 { 2239 int error; 2240 2241 mbprof_textify(); 2242 error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 2243 return (error); 2244 } 2245 2246 static int 2247 mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 2248 { 2249 int clear, error; 2250 2251 clear = 0; 2252 error = sysctl_handle_int(oidp, &clear, 0, req); 2253 if (error || !req->newptr) 2254 return (error); 2255 2256 if (clear) { 2257 bzero(&mbprof, sizeof(mbprof)); 2258 } 2259 2260 return (error); 2261 } 2262 2263 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, 2264 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 2265 mbprof_handler, "A", 2266 "mbuf profiling statistics"); 2267 2268 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, 2269 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, 2270 mbprof_clr_handler, "I", 2271 "clear mbuf profiling statistics"); 2272 #endif 2273