1 /* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 * $FreeBSD$ 35 */ 36 37 #include "opt_param.h" 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/sysctl.h> 45 #include <sys/domain.h> 46 #include <sys/protosw.h> 47 48 int max_linkhdr; 49 int max_protohdr; 50 int max_hdr; 51 int max_datalen; 52 53 /* 54 * sysctl(8) exported objects 55 */ 56 SYSCTL_DECL(_kern_ipc); 57 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 58 &max_linkhdr, 0, ""); 59 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 60 &max_protohdr, 0, ""); 61 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 62 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 63 &max_datalen, 0, ""); 64 65 /* 66 * struct mbuf * 67 * m_getm(m, len, how, type) 68 * 69 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits 70 * best) and return a pointer to the top of the allocated chain. If m is 71 * non-null, then we assume that it is a single mbuf or an mbuf chain to 72 * which we want len bytes worth of mbufs and/or clusters attached, and so 73 * if we succeed in allocating it, we will just return a pointer to m. 74 * 75 * If we happen to fail at any point during the allocation, we will free 76 * up everything we have already allocated and return NULL. 77 * 78 */ 79 struct mbuf * 80 m_getm(struct mbuf *m, int len, int how, int type) 81 { 82 struct mbuf *top, *tail, *mp, *mtail = NULL; 83 84 KASSERT(len >= 0, ("len is < 0 in m_getm")); 85 86 MGET(mp, how, type); 87 if (mp == NULL) 88 return (NULL); 89 else if (len > MINCLSIZE) { 90 MCLGET(mp, how); 91 if ((mp->m_flags & M_EXT) == 0) { 92 m_free(mp); 93 return (NULL); 94 } 95 } 96 mp->m_len = 0; 97 len -= M_TRAILINGSPACE(mp); 98 99 if (m != NULL) 100 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next); 101 else 102 m = mp; 103 104 top = tail = mp; 105 while (len > 0) { 106 MGET(mp, how, type); 107 if (mp == NULL) 108 goto failed; 109 110 tail->m_next = mp; 111 tail = mp; 112 if (len > MINCLSIZE) { 113 MCLGET(mp, how); 114 if ((mp->m_flags & M_EXT) == 0) 115 goto failed; 116 } 117 118 mp->m_len = 0; 119 len -= M_TRAILINGSPACE(mp); 120 } 121 122 if (mtail != NULL) 123 mtail->m_next = top; 124 return (m); 125 126 failed: 127 m_freem(top); 128 return (NULL); 129 } 130 131 void 132 m_freem(struct mbuf *m) 133 { 134 while (m) { 135 m = m_free(m); 136 } 137 } 138 139 /* 140 * Lesser-used path for M_PREPEND: 141 * allocate new mbuf to prepend to chain, 142 * copy junk along. 143 */ 144 struct mbuf * 145 m_prepend(struct mbuf *m, int len, int how) 146 { 147 struct mbuf *mn; 148 149 MGET(mn, how, m->m_type); 150 if (mn == NULL) { 151 m_freem(m); 152 return (NULL); 153 } 154 if (m->m_flags & M_PKTHDR) { 155 M_COPY_PKTHDR(mn, m); 156 m->m_flags &= ~M_PKTHDR; 157 } 158 mn->m_next = m; 159 m = mn; 160 if (len < MHLEN) 161 MH_ALIGN(m, len); 162 m->m_len = len; 163 return (m); 164 } 165 166 /* 167 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 168 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 169 * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 170 * Note that the copy is read-only, because clusters are not copied, 171 * only their reference counts are incremented. 172 */ 173 struct mbuf * 174 m_copym(struct mbuf *m, int off0, int len, int wait) 175 { 176 struct mbuf *n, **np; 177 int off = off0; 178 struct mbuf *top; 179 int copyhdr = 0; 180 181 KASSERT(off >= 0, ("m_copym, negative off %d", off)); 182 KASSERT(len >= 0, ("m_copym, negative len %d", len)); 183 if (off == 0 && m->m_flags & M_PKTHDR) 184 copyhdr = 1; 185 while (off > 0) { 186 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 187 if (off < m->m_len) 188 break; 189 off -= m->m_len; 190 m = m->m_next; 191 } 192 np = ⊤ 193 top = 0; 194 while (len > 0) { 195 if (m == NULL) { 196 KASSERT(len == M_COPYALL, 197 ("m_copym, length > size of mbuf chain")); 198 break; 199 } 200 MGET(n, wait, m->m_type); 201 *np = n; 202 if (n == NULL) 203 goto nospace; 204 if (copyhdr) { 205 M_COPY_PKTHDR(n, m); 206 if (len == M_COPYALL) 207 n->m_pkthdr.len -= off0; 208 else 209 n->m_pkthdr.len = len; 210 copyhdr = 0; 211 } 212 n->m_len = min(len, m->m_len - off); 213 if (m->m_flags & M_EXT) { 214 n->m_data = m->m_data + off; 215 n->m_ext = m->m_ext; 216 n->m_flags |= M_EXT; 217 MEXT_ADD_REF(m); 218 } else 219 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 220 (unsigned)n->m_len); 221 if (len != M_COPYALL) 222 len -= n->m_len; 223 off = 0; 224 m = m->m_next; 225 np = &n->m_next; 226 } 227 if (top == NULL) 228 mbstat.m_mcfail++; /* XXX: No consistency. */ 229 230 return (top); 231 nospace: 232 m_freem(top); 233 mbstat.m_mcfail++; /* XXX: No consistency. */ 234 return (NULL); 235 } 236 237 /* 238 * Copy an entire packet, including header (which must be present). 239 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 240 * Note that the copy is read-only, because clusters are not copied, 241 * only their reference counts are incremented. 242 * Preserve alignment of the first mbuf so if the creator has left 243 * some room at the beginning (e.g. for inserting protocol headers) 244 * the copies still have the room available. 245 */ 246 struct mbuf * 247 m_copypacket(struct mbuf *m, int how) 248 { 249 struct mbuf *top, *n, *o; 250 251 MGET(n, how, m->m_type); 252 top = n; 253 if (n == NULL) 254 goto nospace; 255 256 M_COPY_PKTHDR(n, m); 257 n->m_len = m->m_len; 258 if (m->m_flags & M_EXT) { 259 n->m_data = m->m_data; 260 n->m_ext = m->m_ext; 261 n->m_flags |= M_EXT; 262 MEXT_ADD_REF(m); 263 } else { 264 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 265 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 266 } 267 268 m = m->m_next; 269 while (m) { 270 MGET(o, how, m->m_type); 271 if (o == NULL) 272 goto nospace; 273 274 n->m_next = o; 275 n = n->m_next; 276 277 n->m_len = m->m_len; 278 if (m->m_flags & M_EXT) { 279 n->m_data = m->m_data; 280 n->m_ext = m->m_ext; 281 n->m_flags |= M_EXT; 282 MEXT_ADD_REF(m); 283 } else { 284 bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 285 } 286 287 m = m->m_next; 288 } 289 return top; 290 nospace: 291 m_freem(top); 292 mbstat.m_mcfail++; /* XXX: No consistency. */ 293 return (NULL); 294 } 295 296 /* 297 * Copy data from an mbuf chain starting "off" bytes from the beginning, 298 * continuing for "len" bytes, into the indicated buffer. 299 */ 300 void 301 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 302 { 303 unsigned count; 304 305 KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 306 KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 307 while (off > 0) { 308 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 309 if (off < m->m_len) 310 break; 311 off -= m->m_len; 312 m = m->m_next; 313 } 314 while (len > 0) { 315 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 316 count = min(m->m_len - off, len); 317 bcopy(mtod(m, caddr_t) + off, cp, count); 318 len -= count; 319 cp += count; 320 off = 0; 321 m = m->m_next; 322 } 323 } 324 325 /* 326 * Copy a packet header mbuf chain into a completely new chain, including 327 * copying any mbuf clusters. Use this instead of m_copypacket() when 328 * you need a writable copy of an mbuf chain. 329 */ 330 struct mbuf * 331 m_dup(struct mbuf *m, int how) 332 { 333 struct mbuf **p, *top = NULL; 334 int remain, moff, nsize; 335 336 /* Sanity check */ 337 if (m == NULL) 338 return (NULL); 339 KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__)); 340 341 /* While there's more data, get a new mbuf, tack it on, and fill it */ 342 remain = m->m_pkthdr.len; 343 moff = 0; 344 p = ⊤ 345 while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 346 struct mbuf *n; 347 348 /* Get the next new mbuf */ 349 MGET(n, how, m->m_type); 350 if (n == NULL) 351 goto nospace; 352 if (top == NULL) { /* first one, must be PKTHDR */ 353 M_COPY_PKTHDR(n, m); 354 nsize = MHLEN; 355 } else /* not the first one */ 356 nsize = MLEN; 357 if (remain >= MINCLSIZE) { 358 MCLGET(n, how); 359 if ((n->m_flags & M_EXT) == 0) { 360 (void)m_free(n); 361 goto nospace; 362 } 363 nsize = MCLBYTES; 364 } 365 n->m_len = 0; 366 367 /* Link it into the new chain */ 368 *p = n; 369 p = &n->m_next; 370 371 /* Copy data from original mbuf(s) into new mbuf */ 372 while (n->m_len < nsize && m != NULL) { 373 int chunk = min(nsize - n->m_len, m->m_len - moff); 374 375 bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 376 moff += chunk; 377 n->m_len += chunk; 378 remain -= chunk; 379 if (moff == m->m_len) { 380 m = m->m_next; 381 moff = 0; 382 } 383 } 384 385 /* Check correct total mbuf length */ 386 KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 387 ("%s: bogus m_pkthdr.len", __func__)); 388 } 389 return (top); 390 391 nospace: 392 m_freem(top); 393 mbstat.m_mcfail++; /* XXX: No consistency. */ 394 return (NULL); 395 } 396 397 /* 398 * Concatenate mbuf chain n to m. 399 * Both chains must be of the same type (e.g. MT_DATA). 400 * Any m_pkthdr is not updated. 401 */ 402 void 403 m_cat(struct mbuf *m, struct mbuf *n) 404 { 405 while (m->m_next) 406 m = m->m_next; 407 while (n) { 408 if (m->m_flags & M_EXT || 409 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 410 /* just join the two chains */ 411 m->m_next = n; 412 return; 413 } 414 /* splat the data from one into the other */ 415 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 416 (u_int)n->m_len); 417 m->m_len += n->m_len; 418 n = m_free(n); 419 } 420 } 421 422 void 423 m_adj(struct mbuf *mp, int req_len) 424 { 425 int len = req_len; 426 struct mbuf *m; 427 int count; 428 429 if ((m = mp) == NULL) 430 return; 431 if (len >= 0) { 432 /* 433 * Trim from head. 434 */ 435 while (m != NULL && len > 0) { 436 if (m->m_len <= len) { 437 len -= m->m_len; 438 m->m_len = 0; 439 m = m->m_next; 440 } else { 441 m->m_len -= len; 442 m->m_data += len; 443 len = 0; 444 } 445 } 446 m = mp; 447 if (mp->m_flags & M_PKTHDR) 448 m->m_pkthdr.len -= (req_len - len); 449 } else { 450 /* 451 * Trim from tail. Scan the mbuf chain, 452 * calculating its length and finding the last mbuf. 453 * If the adjustment only affects this mbuf, then just 454 * adjust and return. Otherwise, rescan and truncate 455 * after the remaining size. 456 */ 457 len = -len; 458 count = 0; 459 for (;;) { 460 count += m->m_len; 461 if (m->m_next == (struct mbuf *)0) 462 break; 463 m = m->m_next; 464 } 465 if (m->m_len >= len) { 466 m->m_len -= len; 467 if (mp->m_flags & M_PKTHDR) 468 mp->m_pkthdr.len -= len; 469 return; 470 } 471 count -= len; 472 if (count < 0) 473 count = 0; 474 /* 475 * Correct length for chain is "count". 476 * Find the mbuf with last data, adjust its length, 477 * and toss data from remaining mbufs on chain. 478 */ 479 m = mp; 480 if (m->m_flags & M_PKTHDR) 481 m->m_pkthdr.len = count; 482 for (; m; m = m->m_next) { 483 if (m->m_len >= count) { 484 m->m_len = count; 485 break; 486 } 487 count -= m->m_len; 488 } 489 while (m->m_next) 490 (m = m->m_next) ->m_len = 0; 491 } 492 } 493 494 /* 495 * Rearange an mbuf chain so that len bytes are contiguous 496 * and in the data area of an mbuf (so that mtod and dtom 497 * will work for a structure of size len). Returns the resulting 498 * mbuf chain on success, frees it and returns null on failure. 499 * If there is room, it will add up to max_protohdr-len extra bytes to the 500 * contiguous region in an attempt to avoid being called next time. 501 */ 502 struct mbuf * 503 m_pullup(struct mbuf *n, int len) 504 { 505 struct mbuf *m; 506 int count; 507 int space; 508 509 /* 510 * If first mbuf has no cluster, and has room for len bytes 511 * without shifting current data, pullup into it, 512 * otherwise allocate a new mbuf to prepend to the chain. 513 */ 514 if ((n->m_flags & M_EXT) == 0 && 515 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 516 if (n->m_len >= len) 517 return (n); 518 m = n; 519 n = n->m_next; 520 len -= m->m_len; 521 } else { 522 if (len > MHLEN) 523 goto bad; 524 MGET(m, M_DONTWAIT, n->m_type); 525 if (m == NULL) 526 goto bad; 527 m->m_len = 0; 528 if (n->m_flags & M_PKTHDR) { 529 M_COPY_PKTHDR(m, n); 530 n->m_flags &= ~M_PKTHDR; 531 } 532 } 533 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 534 do { 535 count = min(min(max(len, max_protohdr), space), n->m_len); 536 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 537 (unsigned)count); 538 len -= count; 539 m->m_len += count; 540 n->m_len -= count; 541 space -= count; 542 if (n->m_len) 543 n->m_data += count; 544 else 545 n = m_free(n); 546 } while (len > 0 && n); 547 if (len > 0) { 548 (void) m_free(m); 549 goto bad; 550 } 551 m->m_next = n; 552 return (m); 553 bad: 554 m_freem(n); 555 mbstat.m_mpfail++; /* XXX: No consistency. */ 556 return (NULL); 557 } 558 559 /* 560 * Partition an mbuf chain in two pieces, returning the tail -- 561 * all but the first len0 bytes. In case of failure, it returns NULL and 562 * attempts to restore the chain to its original state. 563 */ 564 struct mbuf * 565 m_split(struct mbuf *m0, int len0, int wait) 566 { 567 struct mbuf *m, *n; 568 unsigned len = len0, remain; 569 570 for (m = m0; m && len > m->m_len; m = m->m_next) 571 len -= m->m_len; 572 if (m == NULL) 573 return (NULL); 574 remain = m->m_len - len; 575 if (m0->m_flags & M_PKTHDR) { 576 MGETHDR(n, wait, m0->m_type); 577 if (n == NULL) 578 return (NULL); 579 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 580 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 581 m0->m_pkthdr.len = len0; 582 if (m->m_flags & M_EXT) 583 goto extpacket; 584 if (remain > MHLEN) { 585 /* m can't be the lead packet */ 586 MH_ALIGN(n, 0); 587 n->m_next = m_split(m, len, wait); 588 if (n->m_next == NULL) { 589 (void) m_free(n); 590 return (NULL); 591 } else 592 return (n); 593 } else 594 MH_ALIGN(n, remain); 595 } else if (remain == 0) { 596 n = m->m_next; 597 m->m_next = NULL; 598 return (n); 599 } else { 600 MGET(n, wait, m->m_type); 601 if (n == NULL) 602 return (NULL); 603 M_ALIGN(n, remain); 604 } 605 extpacket: 606 if (m->m_flags & M_EXT) { 607 n->m_flags |= M_EXT; 608 n->m_ext = m->m_ext; 609 MEXT_ADD_REF(m); 610 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 611 n->m_data = m->m_data + len; 612 } else { 613 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 614 } 615 n->m_len = remain; 616 m->m_len = len; 617 n->m_next = m->m_next; 618 m->m_next = NULL; 619 return (n); 620 } 621 /* 622 * Routine to copy from device local memory into mbufs. 623 * Note that `off' argument is offset into first mbuf of target chain from 624 * which to begin copying the data to. 625 */ 626 struct mbuf * 627 m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 628 void (*copy)(char *from, caddr_t to, u_int len)) 629 { 630 struct mbuf *m; 631 struct mbuf *top = 0, **mp = ⊤ 632 int len; 633 634 if (off < 0 || off > MHLEN) 635 return (NULL); 636 637 MGETHDR(m, M_DONTWAIT, MT_DATA); 638 if (m == NULL) 639 return (NULL); 640 m->m_pkthdr.rcvif = ifp; 641 m->m_pkthdr.len = totlen; 642 len = MHLEN; 643 644 while (totlen > 0) { 645 if (top) { 646 MGET(m, M_DONTWAIT, MT_DATA); 647 if (m == NULL) { 648 m_freem(top); 649 return (NULL); 650 } 651 len = MLEN; 652 } 653 if (totlen + off >= MINCLSIZE) { 654 MCLGET(m, M_DONTWAIT); 655 if (m->m_flags & M_EXT) 656 len = MCLBYTES; 657 } else { 658 /* 659 * Place initial small packet/header at end of mbuf. 660 */ 661 if (top == NULL && totlen + off + max_linkhdr <= len) { 662 m->m_data += max_linkhdr; 663 len -= max_linkhdr; 664 } 665 } 666 if (off) { 667 m->m_data += off; 668 len -= off; 669 off = 0; 670 } 671 m->m_len = len = min(totlen, len); 672 if (copy) 673 copy(buf, mtod(m, caddr_t), (unsigned)len); 674 else 675 bcopy(buf, mtod(m, caddr_t), (unsigned)len); 676 buf += len; 677 *mp = m; 678 mp = &m->m_next; 679 totlen -= len; 680 } 681 return (top); 682 } 683 684 /* 685 * Copy data from a buffer back into the indicated mbuf chain, 686 * starting "off" bytes from the beginning, extending the mbuf 687 * chain if necessary. 688 */ 689 void 690 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 691 { 692 int mlen; 693 struct mbuf *m = m0, *n; 694 int totlen = 0; 695 696 if (m0 == NULL) 697 return; 698 while (off > (mlen = m->m_len)) { 699 off -= mlen; 700 totlen += mlen; 701 if (m->m_next == NULL) { 702 n = m_get_clrd(M_DONTWAIT, m->m_type); 703 if (n == NULL) 704 goto out; 705 n->m_len = min(MLEN, len + off); 706 m->m_next = n; 707 } 708 m = m->m_next; 709 } 710 while (len > 0) { 711 mlen = min (m->m_len - off, len); 712 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 713 cp += mlen; 714 len -= mlen; 715 mlen += off; 716 off = 0; 717 totlen += mlen; 718 if (len == 0) 719 break; 720 if (m->m_next == NULL) { 721 n = m_get(M_DONTWAIT, m->m_type); 722 if (n == NULL) 723 break; 724 n->m_len = min(MLEN, len); 725 m->m_next = n; 726 } 727 m = m->m_next; 728 } 729 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 730 m->m_pkthdr.len = totlen; 731 } 732 733 void 734 m_print(const struct mbuf *m) 735 { 736 int len; 737 const struct mbuf *m2; 738 739 len = m->m_pkthdr.len; 740 m2 = m; 741 while (len) { 742 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 743 len -= m2->m_len; 744 m2 = m2->m_next; 745 } 746 return; 747 } 748