1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_mac.h" 36 #include "opt_param.h" 37 #include "opt_mbuf_stress_test.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/mac.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/sysctl.h> 48 #include <sys/domain.h> 49 #include <sys/protosw.h> 50 #include <sys/uio.h> 51 52 int max_linkhdr; 53 int max_protohdr; 54 int max_hdr; 55 int max_datalen; 56 #ifdef MBUF_STRESS_TEST 57 int m_defragpackets; 58 int m_defragbytes; 59 int m_defraguseless; 60 int m_defragfailure; 61 int m_defragrandomfailures; 62 #endif 63 64 /* 65 * sysctl(8) exported objects 66 */ 67 SYSCTL_DECL(_kern_ipc); 68 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 69 &max_linkhdr, 0, ""); 70 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 71 &max_protohdr, 0, ""); 72 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 73 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 74 &max_datalen, 0, ""); 75 #ifdef MBUF_STRESS_TEST 76 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 77 &m_defragpackets, 0, ""); 78 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 79 &m_defragbytes, 0, ""); 80 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 81 &m_defraguseless, 0, ""); 82 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 83 &m_defragfailure, 0, ""); 84 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 85 &m_defragrandomfailures, 0, ""); 86 #endif 87 88 /* 89 * "Move" mbuf pkthdr from "from" to "to". 90 * "from" must have M_PKTHDR set, and "to" must be empty. 91 */ 92 void 93 m_move_pkthdr(struct mbuf *to, struct mbuf *from) 94 { 95 96 #if 0 97 /* see below for why these are not enabled */ 98 M_ASSERTPKTHDR(to); 99 /* Note: with MAC, this may not be a good assertion. */ 100 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 101 ("m_move_pkthdr: to has tags")); 102 #endif 103 KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster")); 104 #ifdef MAC 105 /* 106 * XXXMAC: It could be this should also occur for non-MAC? 107 */ 108 if (to->m_flags & M_PKTHDR) 109 m_tag_delete_chain(to, NULL); 110 #endif 111 to->m_flags = from->m_flags & M_COPYFLAGS; 112 to->m_data = to->m_pktdat; 113 to->m_pkthdr = from->m_pkthdr; /* especially tags */ 114 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 115 from->m_flags &= ~M_PKTHDR; 116 } 117 118 /* 119 * Duplicate "from"'s mbuf pkthdr in "to". 120 * "from" must have M_PKTHDR set, and "to" must be empty. 121 * In particular, this does a deep copy of the packet tags. 122 */ 123 int 124 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) 125 { 126 127 #if 0 128 /* 129 * The mbuf allocator only initializes the pkthdr 130 * when the mbuf is allocated with MGETHDR. Many users 131 * (e.g. m_copy*, m_prepend) use MGET and then 132 * smash the pkthdr as needed causing these 133 * assertions to trip. For now just disable them. 134 */ 135 M_ASSERTPKTHDR(to); 136 /* Note: with MAC, this may not be a good assertion. */ 137 KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 138 #endif 139 #ifdef MAC 140 if (to->m_flags & M_PKTHDR) 141 m_tag_delete_chain(to, NULL); 142 #endif 143 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 144 if ((to->m_flags & M_EXT) == 0) 145 to->m_data = to->m_pktdat; 146 to->m_pkthdr = from->m_pkthdr; 147 SLIST_INIT(&to->m_pkthdr.tags); 148 return (m_tag_copy_chain(to, from, MBTOM(how))); 149 } 150 151 /* 152 * Lesser-used path for M_PREPEND: 153 * allocate new mbuf to prepend to chain, 154 * copy junk along. 155 */ 156 struct mbuf * 157 m_prepend(struct mbuf *m, int len, int how) 158 { 159 struct mbuf *mn; 160 161 if (m->m_flags & M_PKTHDR) 162 MGETHDR(mn, how, m->m_type); 163 else 164 MGET(mn, how, m->m_type); 165 if (mn == NULL) { 166 m_freem(m); 167 return (NULL); 168 } 169 if (m->m_flags & M_PKTHDR) 170 M_MOVE_PKTHDR(mn, m); 171 mn->m_next = m; 172 m = mn; 173 if (len < MHLEN) 174 MH_ALIGN(m, len); 175 m->m_len = len; 176 return (m); 177 } 178 179 /* 180 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 181 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 182 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 183 * Note that the copy is read-only, because clusters are not copied, 184 * only their reference counts are incremented. 185 */ 186 struct mbuf * 187 m_copym(struct mbuf *m, int off0, int len, int wait) 188 { 189 struct mbuf *n, **np; 190 int off = off0; 191 struct mbuf *top; 192 int copyhdr = 0; 193 194 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 195 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 196 if (off == 0 && m->m_flags & M_PKTHDR) 197 copyhdr = 1; 198 while (off > 0) { 199 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 200 if (off < m->m_len) 201 break; 202 off -= m->m_len; 203 m = m->m_next; 204 } 205 np = ⊤ 206 top = 0; 207 while (len > 0) { 208 if (m == NULL) { 209 KASSERT(len == M_COPYALL, 210 ("m_copym, length > size of mbuf chain")); 211 break; 212 } 213 if (copyhdr) 214 MGETHDR(n, wait, m->m_type); 215 else 216 MGET(n, wait, m->m_type); 217 *np = n; 218 if (n == NULL) 219 goto nospace; 220 if (copyhdr) { 221 if (!m_dup_pkthdr(n, m, wait)) 222 goto nospace; 223 if (len == M_COPYALL) 224 n->m_pkthdr.len -= off0; 225 else 226 n->m_pkthdr.len = len; 227 copyhdr = 0; 228 } 229 n->m_len = min(len, m->m_len - off); 230 if (m->m_flags & M_EXT) { 231 n->m_data = m->m_data + off; 232 n->m_ext = m->m_ext; 233 n->m_flags |= M_EXT; 234 MEXT_ADD_REF(m); 235 } else 236 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 237 (u_int)n->m_len); 238 if (len != M_COPYALL) 239 len -= n->m_len; 240 off = 0; 241 m = m->m_next; 242 np = &n->m_next; 243 } 244 if (top == NULL) 245 mbstat.m_mcfail++; /* XXX: No consistency. */ 246 247 return (top); 248 nospace: 249 m_freem(top); 250 mbstat.m_mcfail++; /* XXX: No consistency. */ 251 return (NULL); 252 } 253 254 /* 255 * Copy an entire packet, including header (which must be present). 256 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 257 * Note that the copy is read-only, because clusters are not copied, 258 * only their reference counts are incremented. 259 * Preserve alignment of the first mbuf so if the creator has left 260 * some room at the beginning (e.g. for inserting protocol headers) 261 * the copies still have the room available. 262 */ 263 struct mbuf * 264 m_copypacket(struct mbuf *m, int how) 265 { 266 struct mbuf *top, *n, *o; 267 268 MGET(n, how, m->m_type); 269 top = n; 270 if (n == NULL) 271 goto nospace; 272 273 if (!m_dup_pkthdr(n, m, how)) 274 goto nospace; 275 n->m_len = m->m_len; 276 if (m->m_flags & M_EXT) { 277 n->m_data = m->m_data; 278 n->m_ext = m->m_ext; 279 n->m_flags |= M_EXT; 280 MEXT_ADD_REF(m); 281 } else { 282 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 283 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 284 } 285 286 m = m->m_next; 287 while (m) { 288 MGET(o, how, m->m_type); 289 if (o == NULL) 290 goto nospace; 291 292 n->m_next = o; 293 n = n->m_next; 294 295 n->m_len = m->m_len; 296 if (m->m_flags & M_EXT) { 297 n->m_data = m->m_data; 298 n->m_ext = m->m_ext; 299 n->m_flags |= M_EXT; 300 MEXT_ADD_REF(m); 301 } else { 302 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 303 } 304 305 m = m->m_next; 306 } 307 return top; 308 nospace: 309 m_freem(top); 310 mbstat.m_mcfail++; /* XXX: No consistency. */ 311 return (NULL); 312 } 313 314 /* 315 * Copy data from an mbuf chain starting "off" bytes from the beginning, 316 * continuing for "len" bytes, into the indicated buffer. 317 */ 318 void 319 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 320 { 321 u_int count; 322 323 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 324 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 325 while (off > 0) { 326 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 327 if (off < m->m_len) 328 break; 329 off -= m->m_len; 330 m = m->m_next; 331 } 332 while (len > 0) { 333 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 334 count = min(m->m_len - off, len); 335 bcopy(mtod(m, caddr_t) + off, cp, count); 336 len -= count; 337 cp += count; 338 off = 0; 339 m = m->m_next; 340 } 341 } 342 343 /* 344 * Copy a packet header mbuf chain into a completely new chain, including 345 * copying any mbuf clusters. Use this instead of m_copypacket() when 346 * you need a writable copy of an mbuf chain. 347 */ 348 struct mbuf * 349 m_dup(struct mbuf *m, int how) 350 { 351 struct mbuf **p, *top = NULL; 352 int remain, moff, nsize; 353 354 /* Sanity check */ 355 if (m == NULL) 356 return (NULL); 357 M_ASSERTPKTHDR(m); 358 359 /* While there's more data, get a new mbuf, tack it on, and fill it */ 360 remain = m->m_pkthdr.len; 361 moff = 0; 362 p = ⊤ 363 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 364 struct mbuf *n; 365 366 /* Get the next new mbuf */ 367 MGET(n, how, m->m_type); 368 if (n == NULL) 369 goto nospace; 370 if (top == NULL) { /* first one, must be PKTHDR */ 371 if (!m_dup_pkthdr(n, m, how)) 372 goto nospace; 373 nsize = MHLEN; 374 } else /* not the first one */ 375 nsize = MLEN; 376 if (remain >= MINCLSIZE) { 377 MCLGET(n, how); 378 if ((n->m_flags & M_EXT) == 0) { 379 (void)m_free(n); 380 goto nospace; 381 } 382 nsize = MCLBYTES; 383 } 384 n->m_len = 0; 385 386 /* Link it into the new chain */ 387 *p = n; 388 p = &n->m_next; 389 390 /* Copy data from original mbuf(s) into new mbuf */ 391 while (n->m_len < nsize && m != NULL) { 392 int chunk = min(nsize - n->m_len, m->m_len - moff); 393 394 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 395 moff += chunk; 396 n->m_len += chunk; 397 remain -= chunk; 398 if (moff == m->m_len) { 399 m = m->m_next; 400 moff = 0; 401 } 402 } 403 404 /* Check correct total mbuf length */ 405 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 406 ("%s: bogus m_pkthdr.len", __func__)); 407 } 408 return (top); 409 410 nospace: 411 m_freem(top); 412 mbstat.m_mcfail++; /* XXX: No consistency. */ 413 return (NULL); 414 } 415 416 /* 417 * Concatenate mbuf chain n to m. 418 * Both chains must be of the same type (e.g. MT_DATA). 419 * Any m_pkthdr is not updated. 420 */ 421 void 422 m_cat(struct mbuf *m, struct mbuf *n) 423 { 424 while (m->m_next) 425 m = m->m_next; 426 while (n) { 427 if (m->m_flags & M_EXT || 428 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 429 /* just join the two chains */ 430 m->m_next = n; 431 return; 432 } 433 /* splat the data from one into the other */ 434 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 435 (u_int)n->m_len); 436 m->m_len += n->m_len; 437 n = m_free(n); 438 } 439 } 440 441 void 442 m_adj(struct mbuf *mp, int req_len) 443 { 444 int len = req_len; 445 struct mbuf *m; 446 int count; 447 448 if ((m = mp) == NULL) 449 return; 450 if (len >= 0) { 451 /* 452 * Trim from head. 453 */ 454 while (m != NULL && len > 0) { 455 if (m->m_len <= len) { 456 len -= m->m_len; 457 m->m_len = 0; 458 m = m->m_next; 459 } else { 460 m->m_len -= len; 461 m->m_data += len; 462 len = 0; 463 } 464 } 465 m = mp; 466 if (mp->m_flags & M_PKTHDR) 467 m->m_pkthdr.len -= (req_len - len); 468 } else { 469 /* 470 * Trim from tail. Scan the mbuf chain, 471 * calculating its length and finding the last mbuf. 472 * If the adjustment only affects this mbuf, then just 473 * adjust and return. Otherwise, rescan and truncate 474 * after the remaining size. 475 */ 476 len = -len; 477 count = 0; 478 for (;;) { 479 count += m->m_len; 480 if (m->m_next == (struct mbuf *)0) 481 break; 482 m = m->m_next; 483 } 484 if (m->m_len >= len) { 485 m->m_len -= len; 486 if (mp->m_flags & M_PKTHDR) 487 mp->m_pkthdr.len -= len; 488 return; 489 } 490 count -= len; 491 if (count < 0) 492 count = 0; 493 /* 494 * Correct length for chain is "count". 495 * Find the mbuf with last data, adjust its length, 496 * and toss data from remaining mbufs on chain. 497 */ 498 m = mp; 499 if (m->m_flags & M_PKTHDR) 500 m->m_pkthdr.len = count; 501 for (; m; m = m->m_next) { 502 if (m->m_len >= count) { 503 m->m_len = count; 504 break; 505 } 506 count -= m->m_len; 507 } 508 while (m->m_next) 509 (m = m->m_next) ->m_len = 0; 510 } 511 } 512 513 /* 514 * Rearange an mbuf chain so that len bytes are contiguous 515 * and in the data area of an mbuf (so that mtod and dtom 516 * will work for a structure of size len). Returns the resulting 517 * mbuf chain on success, frees it and returns null on failure. 518 * If there is room, it will add up to max_protohdr-len extra bytes to the 519 * contiguous region in an attempt to avoid being called next time. 520 */ 521 struct mbuf * 522 m_pullup(struct mbuf *n, int len) 523 { 524 struct mbuf *m; 525 int count; 526 int space; 527 528 /* 529 * If first mbuf has no cluster, and has room for len bytes 530 * without shifting current data, pullup into it, 531 * otherwise allocate a new mbuf to prepend to the chain. 532 */ 533 if ((n->m_flags & M_EXT) == 0 && 534 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 535 if (n->m_len >= len) 536 return (n); 537 m = n; 538 n = n->m_next; 539 len -= m->m_len; 540 } else { 541 if (len > MHLEN) 542 goto bad; 543 MGET(m, M_DONTWAIT, n->m_type); 544 if (m == NULL) 545 goto bad; 546 m->m_len = 0; 547 if (n->m_flags & M_PKTHDR) 548 M_MOVE_PKTHDR(m, n); 549 } 550 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 551 do { 552 count = min(min(max(len, max_protohdr), space), n->m_len); 553 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 554 (u_int)count); 555 len -= count; 556 m->m_len += count; 557 n->m_len -= count; 558 space -= count; 559 if (n->m_len) 560 n->m_data += count; 561 else 562 n = m_free(n); 563 } while (len > 0 && n); 564 if (len > 0) { 565 (void) m_free(m); 566 goto bad; 567 } 568 m->m_next = n; 569 return (m); 570 bad: 571 m_freem(n); 572 mbstat.m_mpfail++; /* XXX: No consistency. */ 573 return (NULL); 574 } 575 576 /* 577 * Partition an mbuf chain in two pieces, returning the tail -- 578 * all but the first len0 bytes. In case of failure, it returns NULL and 579 * attempts to restore the chain to its original state. 580 * 581 * Note that the resulting mbufs might be read-only, because the new 582 * mbuf can end up sharing an mbuf cluster with the original mbuf if 583 * the "breaking point" happens to lie within a cluster mbuf. Use the 584 * M_WRITABLE() macro to check for this case. 585 */ 586 struct mbuf * 587 m_split(struct mbuf *m0, int len0, int wait) 588 { 589 struct mbuf *m, *n; 590 u_int len = len0, remain; 591 592 for (m = m0; m && len > m->m_len; m = m->m_next) 593 len -= m->m_len; 594 if (m == NULL) 595 return (NULL); 596 remain = m->m_len - len; 597 if (m0->m_flags & M_PKTHDR) { 598 MGETHDR(n, wait, m0->m_type); 599 if (n == NULL) 600 return (NULL); 601 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 602 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 603 m0->m_pkthdr.len = len0; 604 if (m->m_flags & M_EXT) 605 goto extpacket; 606 if (remain > MHLEN) { 607 /* m can't be the lead packet */ 608 MH_ALIGN(n, 0); 609 n->m_next = m_split(m, len, wait); 610 if (n->m_next == NULL) { 611 (void) m_free(n); 612 return (NULL); 613 } else { 614 n->m_len = 0; 615 return (n); 616 } 617 } else 618 MH_ALIGN(n, remain); 619 } else if (remain == 0) { 620 n = m->m_next; 621 m->m_next = NULL; 622 return (n); 623 } else { 624 MGET(n, wait, m->m_type); 625 if (n == NULL) 626 return (NULL); 627 M_ALIGN(n, remain); 628 } 629 extpacket: 630 if (m->m_flags & M_EXT) { 631 n->m_flags |= M_EXT; 632 n->m_ext = m->m_ext; 633 MEXT_ADD_REF(m); 634 n->m_data = m->m_data + len; 635 } else { 636 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 637 } 638 n->m_len = remain; 639 m->m_len = len; 640 n->m_next = m->m_next; 641 m->m_next = NULL; 642 return (n); 643 } 644 /* 645 * Routine to copy from device local memory into mbufs. 646 * Note that `off' argument is offset into first mbuf of target chain from 647 * which to begin copying the data to. 648 */ 649 struct mbuf * 650 m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 651 void (*copy)(char *from, caddr_t to, u_int len)) 652 { 653 struct mbuf *m; 654 struct mbuf *top = 0, **mp = ⊤ 655 int len; 656 657 if (off < 0 || off > MHLEN) 658 return (NULL); 659 660 MGETHDR(m, M_DONTWAIT, MT_DATA); 661 if (m == NULL) 662 return (NULL); 663 m->m_pkthdr.rcvif = ifp; 664 m->m_pkthdr.len = totlen; 665 len = MHLEN; 666 667 while (totlen > 0) { 668 if (top) { 669 MGET(m, M_DONTWAIT, MT_DATA); 670 if (m == NULL) { 671 m_freem(top); 672 return (NULL); 673 } 674 len = MLEN; 675 } 676 if (totlen + off >= MINCLSIZE) { 677 MCLGET(m, M_DONTWAIT); 678 if (m->m_flags & M_EXT) 679 len = MCLBYTES; 680 } else { 681 /* 682 * Place initial small packet/header at end of mbuf. 683 */ 684 if (top == NULL && totlen + off + max_linkhdr <= len) { 685 m->m_data += max_linkhdr; 686 len -= max_linkhdr; 687 } 688 } 689 if (off) { 690 m->m_data += off; 691 len -= off; 692 off = 0; 693 } 694 m->m_len = len = min(totlen, len); 695 if (copy) 696 copy(buf, mtod(m, caddr_t), (u_int)len); 697 else 698 bcopy(buf, mtod(m, caddr_t), (u_int)len); 699 buf += len; 700 *mp = m; 701 mp = &m->m_next; 702 totlen -= len; 703 } 704 return (top); 705 } 706 707 /* 708 * Copy data from a buffer back into the indicated mbuf chain, 709 * starting "off" bytes from the beginning, extending the mbuf 710 * chain if necessary. 711 */ 712 void 713 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 714 { 715 int mlen; 716 struct mbuf *m = m0, *n; 717 int totlen = 0; 718 719 if (m0 == NULL) 720 return; 721 while (off > (mlen = m->m_len)) { 722 off -= mlen; 723 totlen += mlen; 724 if (m->m_next == NULL) { 725 n = m_get_clrd(M_DONTWAIT, m->m_type); 726 if (n == NULL) 727 goto out; 728 n->m_len = min(MLEN, len + off); 729 m->m_next = n; 730 } 731 m = m->m_next; 732 } 733 while (len > 0) { 734 mlen = min (m->m_len - off, len); 735 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 736 cp += mlen; 737 len -= mlen; 738 mlen += off; 739 off = 0; 740 totlen += mlen; 741 if (len == 0) 742 break; 743 if (m->m_next == NULL) { 744 n = m_get(M_DONTWAIT, m->m_type); 745 if (n == NULL) 746 break; 747 n->m_len = min(MLEN, len); 748 m->m_next = n; 749 } 750 m = m->m_next; 751 } 752 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 753 m->m_pkthdr.len = totlen; 754 } 755 756 /* 757 * Apply function f to the data in an mbuf chain starting "off" bytes from 758 * the beginning, continuing for "len" bytes. 759 */ 760 int 761 m_apply(struct mbuf *m, int off, int len, 762 int (*f)(void *, void *, u_int), void *arg) 763 { 764 u_int count; 765 int rval; 766 767 KASSERT(off >= 0, ("m_apply, negative off %d", off)); 768 KASSERT(len >= 0, ("m_apply, negative len %d", len)); 769 while (off > 0) { 770 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 771 if (off < m->m_len) 772 break; 773 off -= m->m_len; 774 m = m->m_next; 775 } 776 while (len > 0) { 777 KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 778 count = min(m->m_len - off, len); 779 rval = (*f)(arg, mtod(m, caddr_t) + off, count); 780 if (rval) 781 return (rval); 782 len -= count; 783 off = 0; 784 m = m->m_next; 785 } 786 return (0); 787 } 788 789 /* 790 * Return a pointer to mbuf/offset of location in mbuf chain. 791 */ 792 struct mbuf * 793 m_getptr(struct mbuf *m, int loc, int *off) 794 { 795 796 while (loc >= 0) { 797 /* Normal end of search. */ 798 if (m->m_len > loc) { 799 *off = loc; 800 return (m); 801 } else { 802 loc -= m->m_len; 803 if (m->m_next == NULL) { 804 if (loc == 0) { 805 /* Point at the end of valid data. */ 806 *off = m->m_len; 807 return (m); 808 } 809 return (NULL); 810 } 811 m = m->m_next; 812 } 813 } 814 return (NULL); 815 } 816 817 void 818 m_print(const struct mbuf *m) 819 { 820 int len; 821 const struct mbuf *m2; 822 823 len = m->m_pkthdr.len; 824 m2 = m; 825 while (len) { 826 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 827 len -= m2->m_len; 828 m2 = m2->m_next; 829 } 830 return; 831 } 832 833 u_int 834 m_fixhdr(struct mbuf *m0) 835 { 836 u_int len; 837 838 len = m_length(m0, NULL); 839 m0->m_pkthdr.len = len; 840 return (len); 841 } 842 843 u_int 844 m_length(struct mbuf *m0, struct mbuf **last) 845 { 846 struct mbuf *m; 847 u_int len; 848 849 len = 0; 850 for (m = m0; m != NULL; m = m->m_next) { 851 len += m->m_len; 852 if (m->m_next == NULL) 853 break; 854 } 855 if (last != NULL) 856 *last = m; 857 return (len); 858 } 859 860 /* 861 * Defragment a mbuf chain, returning the shortest possible 862 * chain of mbufs and clusters. If allocation fails and 863 * this cannot be completed, NULL will be returned, but 864 * the passed in chain will be unchanged. Upon success, 865 * the original chain will be freed, and the new chain 866 * will be returned. 867 * 868 * If a non-packet header is passed in, the original 869 * mbuf (chain?) will be returned unharmed. 870 */ 871 struct mbuf * 872 m_defrag(struct mbuf *m0, int how) 873 { 874 struct mbuf *m_new = NULL, *m_final = NULL; 875 int progress = 0, length; 876 877 if (!(m0->m_flags & M_PKTHDR)) 878 return (m0); 879 880 m_fixhdr(m0); /* Needed sanity check */ 881 882 #ifdef MBUF_STRESS_TEST 883 if (m_defragrandomfailures) { 884 int temp = arc4random() & 0xff; 885 if (temp == 0xba) 886 goto nospace; 887 } 888 #endif 889 890 if (m0->m_pkthdr.len > MHLEN) 891 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 892 else 893 m_final = m_gethdr(how, MT_DATA); 894 895 if (m_final == NULL) 896 goto nospace; 897 898 if (m_dup_pkthdr(m_final, m0, how) == 0) 899 goto nospace; 900 901 m_new = m_final; 902 903 while (progress < m0->m_pkthdr.len) { 904 length = m0->m_pkthdr.len - progress; 905 if (length > MCLBYTES) 906 length = MCLBYTES; 907 908 if (m_new == NULL) { 909 if (length > MLEN) 910 m_new = m_getcl(how, MT_DATA, 0); 911 else 912 m_new = m_get(how, MT_DATA); 913 if (m_new == NULL) 914 goto nospace; 915 } 916 917 m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 918 progress += length; 919 m_new->m_len = length; 920 if (m_new != m_final) 921 m_cat(m_final, m_new); 922 m_new = NULL; 923 } 924 #ifdef MBUF_STRESS_TEST 925 if (m0->m_next == NULL) 926 m_defraguseless++; 927 #endif 928 m_freem(m0); 929 m0 = m_final; 930 #ifdef MBUF_STRESS_TEST 931 m_defragpackets++; 932 m_defragbytes += m0->m_pkthdr.len; 933 #endif 934 return (m0); 935 nospace: 936 #ifdef MBUF_STRESS_TEST 937 m_defragfailure++; 938 #endif 939 if (m_new) 940 m_free(m_new); 941 if (m_final) 942 m_freem(m_final); 943 return (NULL); 944 } 945 946 #ifdef MBUF_STRESS_TEST 947 948 /* 949 * Fragment an mbuf chain. There's no reason you'd ever want to do 950 * this in normal usage, but it's great for stress testing various 951 * mbuf consumers. 952 * 953 * If fragmentation is not possible, the original chain will be 954 * returned. 955 * 956 * Possible length values: 957 * 0 no fragmentation will occur 958 * > 0 each fragment will be of the specified length 959 * -1 each fragment will be the same random value in length 960 * -2 each fragment's length will be entirely random 961 * (Random values range from 1 to 256) 962 */ 963 struct mbuf * 964 m_fragment(struct mbuf *m0, int how, int length) 965 { 966 struct mbuf *m_new = NULL, *m_final = NULL; 967 int progress = 0; 968 969 if (!(m0->m_flags & M_PKTHDR)) 970 return (m0); 971 972 if ((length == 0) || (length < -2)) 973 return (m0); 974 975 m_fixhdr(m0); /* Needed sanity check */ 976 977 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 978 979 if (m_final == NULL) 980 goto nospace; 981 982 if (m_dup_pkthdr(m_final, m0, how) == 0) 983 goto nospace; 984 985 m_new = m_final; 986 987 if (length == -1) 988 length = 1 + (arc4random() & 255); 989 990 while (progress < m0->m_pkthdr.len) { 991 int fraglen; 992 993 if (length > 0) 994 fraglen = length; 995 else 996 fraglen = 1 + (arc4random() & 255); 997 if (fraglen > m0->m_pkthdr.len - progress) 998 fraglen = m0->m_pkthdr.len - progress; 999 1000 if (fraglen > MCLBYTES) 1001 fraglen = MCLBYTES; 1002 1003 if (m_new == NULL) { 1004 m_new = m_getcl(how, MT_DATA, 0); 1005 if (m_new == NULL) 1006 goto nospace; 1007 } 1008 1009 m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t)); 1010 progress += fraglen; 1011 m_new->m_len = fraglen; 1012 if (m_new != m_final) 1013 m_cat(m_final, m_new); 1014 m_new = NULL; 1015 } 1016 m_freem(m0); 1017 m0 = m_final; 1018 return (m0); 1019 nospace: 1020 if (m_new) 1021 m_free(m_new); 1022 if (m_final) 1023 m_freem(m_final); 1024 /* Return the original chain on failure */ 1025 return (m0); 1026 } 1027 1028 #endif 1029 1030 struct mbuf * 1031 m_uiotombuf(struct uio *uio, int how, int len) 1032 { 1033 struct mbuf *m_new = NULL, *m_final = NULL; 1034 int progress = 0, error = 0, length, total; 1035 1036 if (len > 0) 1037 total = min(uio->uio_resid, len); 1038 else 1039 total = uio->uio_resid; 1040 if (total > MHLEN) 1041 m_final = m_getcl(how, MT_DATA, M_PKTHDR); 1042 else 1043 m_final = m_gethdr(how, MT_DATA); 1044 if (m_final == NULL) 1045 goto nospace; 1046 m_new = m_final; 1047 while (progress < total) { 1048 length = total - progress; 1049 if (length > MCLBYTES) 1050 length = MCLBYTES; 1051 if (m_new == NULL) { 1052 if (length > MLEN) 1053 m_new = m_getcl(how, MT_DATA, 0); 1054 else 1055 m_new = m_get(how, MT_DATA); 1056 if (m_new == NULL) 1057 goto nospace; 1058 } 1059 error = uiomove(mtod(m_new, void *), length, uio); 1060 if (error) 1061 goto nospace; 1062 progress += length; 1063 m_new->m_len = length; 1064 if (m_new != m_final) 1065 m_cat(m_final, m_new); 1066 m_new = NULL; 1067 } 1068 m_fixhdr(m_final); 1069 return (m_final); 1070 nospace: 1071 if (m_new) 1072 m_free(m_new); 1073 if (m_final) 1074 m_freem(m_final); 1075 return (NULL); 1076 } 1077