19454b2d8SWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1988, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 6df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 7df8bae1dSRodney W. Grimes * are met: 8df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 9df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 10df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 12df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 13df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 14df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 15df8bae1dSRodney W. Grimes * without specific prior written permission. 16df8bae1dSRodney W. Grimes * 17df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27df8bae1dSRodney W. Grimes * SUCH DAMAGE. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 30df8bae1dSRodney W. Grimes */ 31df8bae1dSRodney W. Grimes 32677b542eSDavid E. O'Brien #include <sys/cdefs.h> 33677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 34677b542eSDavid E. O'Brien 35240ef842SDavid E. O'Brien #include "opt_param.h" 36352d050eSMike Silbersack #include "opt_mbuf_stress_test.h" 376eeac1d9SJulian Elischer #include "opt_mbuf_profiling.h" 38e32a5b94SRobert Watson 39df8bae1dSRodney W. Grimes #include <sys/param.h> 40df8bae1dSRodney W. Grimes #include <sys/systm.h> 41fb919e4dSMark Murray #include <sys/kernel.h> 42beb699c7SMike Silbersack #include <sys/limits.h> 43fb919e4dSMark Murray #include <sys/lock.h> 44f9d0d524SRobert Watson #include <sys/malloc.h> 45df8bae1dSRodney W. Grimes #include <sys/mbuf.h> 46639acc13SGarrett Wollman #include <sys/sysctl.h> 47df8bae1dSRodney W. Grimes #include <sys/domain.h> 48df8bae1dSRodney W. Grimes #include <sys/protosw.h> 49beb699c7SMike Silbersack #include <sys/uio.h> 50fb919e4dSMark Murray 5128f8db14SBruce Evans int max_linkhdr; 5228f8db14SBruce Evans int max_protohdr; 5328f8db14SBruce Evans int max_hdr; 5428f8db14SBruce Evans int max_datalen; 5551710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 5655e9f80dSMike Silbersack int m_defragpackets; 5755e9f80dSMike Silbersack int m_defragbytes; 5855e9f80dSMike Silbersack int m_defraguseless; 5955e9f80dSMike Silbersack int m_defragfailure; 60352d050eSMike Silbersack int m_defragrandomfailures; 61352d050eSMike Silbersack #endif 627d032714SBosko Milekic 637d032714SBosko Milekic /* 647d032714SBosko Milekic * sysctl(8) exported objects 657d032714SBosko Milekic */ 6680444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 6780444f88SAndre Oppermann &max_linkhdr, 0, "Size of largest link layer header"); 6880444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 6980444f88SAndre Oppermann &max_protohdr, 0, "Size of largest protocol layer header"); 7080444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 7180444f88SAndre Oppermann &max_hdr, 0, "Size of largest link plus protocol header"); 7280444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 7380444f88SAndre Oppermann &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 7451710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 7555e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 7655e9f80dSMike Silbersack &m_defragpackets, 0, ""); 7755e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 7855e9f80dSMike Silbersack &m_defragbytes, 0, ""); 7955e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 8055e9f80dSMike Silbersack &m_defraguseless, 0, ""); 8155e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 8255e9f80dSMike Silbersack &m_defragfailure, 0, ""); 83352d050eSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 84352d050eSMike Silbersack &m_defragrandomfailures, 0, ""); 85352d050eSMike Silbersack #endif 86df8bae1dSRodney W. Grimes 87df8bae1dSRodney W. Grimes /* 88f729ede6SAndre Oppermann * Ensure the correct size of various mbuf parameters. It could be off due 89f729ede6SAndre Oppermann * to compiler-induced padding and alignment artifacts. 90f729ede6SAndre Oppermann */ 91f729ede6SAndre Oppermann CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN); 92f729ede6SAndre Oppermann CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN); 93f729ede6SAndre Oppermann 94f729ede6SAndre Oppermann /* 953d1a9ed3SRobert Watson * mbuf data storage should be 64-bit aligned regardless of architectural 963d1a9ed3SRobert Watson * pointer size; check this is the case with and without a packet header. 973d1a9ed3SRobert Watson */ 983d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0); 993d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0); 1003d1a9ed3SRobert Watson 1013d1a9ed3SRobert Watson /* 1023d1a9ed3SRobert Watson * While the specific values here don't matter too much (i.e., +/- a few 1033d1a9ed3SRobert Watson * words), we do want to ensure that changes to these values are carefully 1043d1a9ed3SRobert Watson * reasoned about and properly documented. This is especially the case as 1053d1a9ed3SRobert Watson * network-protocol and device-driver modules encode these layouts, and must 1063d1a9ed3SRobert Watson * be recompiled if the structures change. Check these values at compile time 1073d1a9ed3SRobert Watson * against the ones documented in comments in mbuf.h. 1083d1a9ed3SRobert Watson * 1093d1a9ed3SRobert Watson * NB: Possibly they should be documented there via #define's and not just 1103d1a9ed3SRobert Watson * comments. 1113d1a9ed3SRobert Watson */ 1123d1a9ed3SRobert Watson #if defined(__LP64__) 1133d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) == 32); 1143d1a9ed3SRobert Watson CTASSERT(sizeof(struct pkthdr) == 56); 115ee52391eSGleb Smirnoff CTASSERT(sizeof(struct m_ext) == 48); 1163d1a9ed3SRobert Watson #else 1173d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) == 24); 1183d1a9ed3SRobert Watson CTASSERT(sizeof(struct pkthdr) == 48); 119ee52391eSGleb Smirnoff CTASSERT(sizeof(struct m_ext) == 28); 1203d1a9ed3SRobert Watson #endif 1213d1a9ed3SRobert Watson 1223d1a9ed3SRobert Watson /* 123ec9d83ddSGleb Smirnoff * Assert that the queue(3) macros produce code of the same size as an old 124ec9d83ddSGleb Smirnoff * plain pointer does. 125ec9d83ddSGleb Smirnoff */ 126ec9d83ddSGleb Smirnoff #ifdef INVARIANTS 127ec9d83ddSGleb Smirnoff static struct mbuf m_assertbuf; 128ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next)); 129ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next)); 130ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt)); 131ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt)); 132ec9d83ddSGleb Smirnoff #endif 133ec9d83ddSGleb Smirnoff 134ec9d83ddSGleb Smirnoff /* 13529110f87SGleb Smirnoff * m_get2() allocates minimum mbuf that would fit "size" argument. 13629110f87SGleb Smirnoff */ 13729110f87SGleb Smirnoff struct mbuf * 13841a7572bSGleb Smirnoff m_get2(int size, int how, short type, int flags) 13929110f87SGleb Smirnoff { 14029110f87SGleb Smirnoff struct mb_args args; 14129110f87SGleb Smirnoff struct mbuf *m, *n; 14229110f87SGleb Smirnoff 14329110f87SGleb Smirnoff args.flags = flags; 14429110f87SGleb Smirnoff args.type = type; 14529110f87SGleb Smirnoff 14629110f87SGleb Smirnoff if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) 14729110f87SGleb Smirnoff return (uma_zalloc_arg(zone_mbuf, &args, how)); 14829110f87SGleb Smirnoff if (size <= MCLBYTES) 14929110f87SGleb Smirnoff return (uma_zalloc_arg(zone_pack, &args, how)); 1503112ae76SGleb Smirnoff 1513112ae76SGleb Smirnoff if (size > MJUMPAGESIZE) 15229110f87SGleb Smirnoff return (NULL); 15329110f87SGleb Smirnoff 15429110f87SGleb Smirnoff m = uma_zalloc_arg(zone_mbuf, &args, how); 15529110f87SGleb Smirnoff if (m == NULL) 15629110f87SGleb Smirnoff return (NULL); 15729110f87SGleb Smirnoff 1583112ae76SGleb Smirnoff n = uma_zalloc_arg(zone_jumbop, m, how); 15929110f87SGleb Smirnoff if (n == NULL) { 16029110f87SGleb Smirnoff uma_zfree(zone_mbuf, m); 16129110f87SGleb Smirnoff return (NULL); 16229110f87SGleb Smirnoff } 16329110f87SGleb Smirnoff 16429110f87SGleb Smirnoff return (m); 16529110f87SGleb Smirnoff } 16629110f87SGleb Smirnoff 16729110f87SGleb Smirnoff /* 16829110f87SGleb Smirnoff * m_getjcl() returns an mbuf with a cluster of the specified size attached. 16929110f87SGleb Smirnoff * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 17029110f87SGleb Smirnoff */ 17129110f87SGleb Smirnoff struct mbuf * 17229110f87SGleb Smirnoff m_getjcl(int how, short type, int flags, int size) 17329110f87SGleb Smirnoff { 17429110f87SGleb Smirnoff struct mb_args args; 17529110f87SGleb Smirnoff struct mbuf *m, *n; 17629110f87SGleb Smirnoff uma_zone_t zone; 17729110f87SGleb Smirnoff 17829110f87SGleb Smirnoff if (size == MCLBYTES) 17929110f87SGleb Smirnoff return m_getcl(how, type, flags); 18029110f87SGleb Smirnoff 18129110f87SGleb Smirnoff args.flags = flags; 18229110f87SGleb Smirnoff args.type = type; 18329110f87SGleb Smirnoff 18429110f87SGleb Smirnoff m = uma_zalloc_arg(zone_mbuf, &args, how); 18529110f87SGleb Smirnoff if (m == NULL) 18629110f87SGleb Smirnoff return (NULL); 18729110f87SGleb Smirnoff 18829110f87SGleb Smirnoff zone = m_getzone(size); 18929110f87SGleb Smirnoff n = uma_zalloc_arg(zone, m, how); 19029110f87SGleb Smirnoff if (n == NULL) { 19129110f87SGleb Smirnoff uma_zfree(zone_mbuf, m); 19229110f87SGleb Smirnoff return (NULL); 19329110f87SGleb Smirnoff } 19429110f87SGleb Smirnoff return (m); 19529110f87SGleb Smirnoff } 19629110f87SGleb Smirnoff 19729110f87SGleb Smirnoff /* 198099a0e58SBosko Milekic * Allocate a given length worth of mbufs and/or clusters (whatever fits 199099a0e58SBosko Milekic * best) and return a pointer to the top of the allocated chain. If an 200099a0e58SBosko Milekic * existing mbuf chain is provided, then we will append the new chain 201099a0e58SBosko Milekic * to the existing one but still return the top of the newly allocated 202099a0e58SBosko Milekic * chain. 203099a0e58SBosko Milekic */ 204099a0e58SBosko Milekic struct mbuf * 2055e20f43dSAndre Oppermann m_getm2(struct mbuf *m, int len, int how, short type, int flags) 206099a0e58SBosko Milekic { 2075e20f43dSAndre Oppermann struct mbuf *mb, *nm = NULL, *mtail = NULL; 208099a0e58SBosko Milekic 2095e20f43dSAndre Oppermann KASSERT(len >= 0, ("%s: len is < 0", __func__)); 210099a0e58SBosko Milekic 2115e20f43dSAndre Oppermann /* Validate flags. */ 2125e20f43dSAndre Oppermann flags &= (M_PKTHDR | M_EOR); 2135e20f43dSAndre Oppermann 2145e20f43dSAndre Oppermann /* Packet header mbuf must be first in chain. */ 2155e20f43dSAndre Oppermann if ((flags & M_PKTHDR) && m != NULL) 2165e20f43dSAndre Oppermann flags &= ~M_PKTHDR; 2175e20f43dSAndre Oppermann 2185e20f43dSAndre Oppermann /* Loop and append maximum sized mbufs to the chain tail. */ 2195e20f43dSAndre Oppermann while (len > 0) { 2205e20f43dSAndre Oppermann if (len > MCLBYTES) 2215e20f43dSAndre Oppermann mb = m_getjcl(how, type, (flags & M_PKTHDR), 2225e20f43dSAndre Oppermann MJUMPAGESIZE); 2235e20f43dSAndre Oppermann else if (len >= MINCLSIZE) 2245e20f43dSAndre Oppermann mb = m_getcl(how, type, (flags & M_PKTHDR)); 2255e20f43dSAndre Oppermann else if (flags & M_PKTHDR) 2265e20f43dSAndre Oppermann mb = m_gethdr(how, type); 227099a0e58SBosko Milekic else 2285e20f43dSAndre Oppermann mb = m_get(how, type); 229099a0e58SBosko Milekic 2305e20f43dSAndre Oppermann /* Fail the whole operation if one mbuf can't be allocated. */ 2315e20f43dSAndre Oppermann if (mb == NULL) { 2325e20f43dSAndre Oppermann if (nm != NULL) 2335e20f43dSAndre Oppermann m_freem(nm); 2345e20f43dSAndre Oppermann return (NULL); 235099a0e58SBosko Milekic } 236099a0e58SBosko Milekic 2375e20f43dSAndre Oppermann /* Book keeping. */ 238b66f2a48SRobert Watson len -= M_SIZE(mb); 239099a0e58SBosko Milekic if (mtail != NULL) 2405e20f43dSAndre Oppermann mtail->m_next = mb; 2415e20f43dSAndre Oppermann else 2425e20f43dSAndre Oppermann nm = mb; 2435e20f43dSAndre Oppermann mtail = mb; 2445e20f43dSAndre Oppermann flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ 2455e20f43dSAndre Oppermann } 2465e20f43dSAndre Oppermann if (flags & M_EOR) 2475e20f43dSAndre Oppermann mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ 2485e20f43dSAndre Oppermann 2495e20f43dSAndre Oppermann /* If mbuf was supplied, append new chain to the end of it. */ 2505e20f43dSAndre Oppermann if (m != NULL) { 2515e20f43dSAndre Oppermann for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) 2525e20f43dSAndre Oppermann ; 2535e20f43dSAndre Oppermann mtail->m_next = nm; 2545e20f43dSAndre Oppermann mtail->m_flags &= ~M_EOR; 2555e20f43dSAndre Oppermann } else 2565e20f43dSAndre Oppermann m = nm; 2575e20f43dSAndre Oppermann 2585e20f43dSAndre Oppermann return (m); 259099a0e58SBosko Milekic } 260099a0e58SBosko Milekic 261099a0e58SBosko Milekic /* 262099a0e58SBosko Milekic * Free an entire chain of mbufs and associated external buffers, if 263099a0e58SBosko Milekic * applicable. 264099a0e58SBosko Milekic */ 265099a0e58SBosko Milekic void 266099a0e58SBosko Milekic m_freem(struct mbuf *mb) 267099a0e58SBosko Milekic { 268099a0e58SBosko Milekic 269099a0e58SBosko Milekic while (mb != NULL) 270099a0e58SBosko Milekic mb = m_free(mb); 271099a0e58SBosko Milekic } 272099a0e58SBosko Milekic 2731a996ed1SEdward Tomasz Napierala /*- 274099a0e58SBosko Milekic * Configure a provided mbuf to refer to the provided external storage 275099a0e58SBosko Milekic * buffer and setup a reference count for said buffer. If the setting 276099a0e58SBosko Milekic * up of the reference count fails, the M_EXT bit will not be set. If 277099a0e58SBosko Milekic * successfull, the M_EXT bit is set in the mbuf's flags. 278099a0e58SBosko Milekic * 279099a0e58SBosko Milekic * Arguments: 280099a0e58SBosko Milekic * mb The existing mbuf to which to attach the provided buffer. 281099a0e58SBosko Milekic * buf The address of the provided external storage buffer. 282099a0e58SBosko Milekic * size The size of the provided buffer. 283099a0e58SBosko Milekic * freef A pointer to a routine that is responsible for freeing the 284099a0e58SBosko Milekic * provided external storage buffer. 285099a0e58SBosko Milekic * args A pointer to an argument structure (of any type) to be passed 286099a0e58SBosko Milekic * to the provided freef routine (may be NULL). 287099a0e58SBosko Milekic * flags Any other flags to be passed to the provided mbuf. 288099a0e58SBosko Milekic * type The type that the external storage buffer should be 289099a0e58SBosko Milekic * labeled with. 290099a0e58SBosko Milekic * 291099a0e58SBosko Milekic * Returns: 292099a0e58SBosko Milekic * Nothing. 293099a0e58SBosko Milekic */ 2948c629bdfSGleb Smirnoff int 295099a0e58SBosko Milekic m_extadd(struct mbuf *mb, caddr_t buf, u_int size, 29615c28f87SGleb Smirnoff void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2, 2979a736876SAndre Oppermann int flags, int type, int wait) 298099a0e58SBosko Milekic { 29956a4e45aSAndre Oppermann KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); 300099a0e58SBosko Milekic 30156a4e45aSAndre Oppermann if (type != EXT_EXTREF) 302fcc34a23SGleb Smirnoff mb->m_ext.ext_cnt = uma_zalloc(zone_ext_refcnt, wait); 3038c629bdfSGleb Smirnoff 304fcc34a23SGleb Smirnoff if (mb->m_ext.ext_cnt == NULL) 3058c629bdfSGleb Smirnoff return (ENOMEM); 3068c629bdfSGleb Smirnoff 307fcc34a23SGleb Smirnoff *(mb->m_ext.ext_cnt) = 1; 308099a0e58SBosko Milekic mb->m_flags |= (M_EXT | flags); 309099a0e58SBosko Milekic mb->m_ext.ext_buf = buf; 310099a0e58SBosko Milekic mb->m_data = mb->m_ext.ext_buf; 311099a0e58SBosko Milekic mb->m_ext.ext_size = size; 312099a0e58SBosko Milekic mb->m_ext.ext_free = freef; 313cf827063SPoul-Henning Kamp mb->m_ext.ext_arg1 = arg1; 314cf827063SPoul-Henning Kamp mb->m_ext.ext_arg2 = arg2; 315099a0e58SBosko Milekic mb->m_ext.ext_type = type; 316894734cbSAndre Oppermann mb->m_ext.ext_flags = 0; 3178c629bdfSGleb Smirnoff 3188c629bdfSGleb Smirnoff return (0); 319099a0e58SBosko Milekic } 320099a0e58SBosko Milekic 321099a0e58SBosko Milekic /* 322099a0e58SBosko Milekic * Non-directly-exported function to clean up after mbufs with M_EXT 32356a4e45aSAndre Oppermann * storage attached to them if the reference count hits 1. 324099a0e58SBosko Milekic */ 325099a0e58SBosko Milekic void 326099a0e58SBosko Milekic mb_free_ext(struct mbuf *m) 327099a0e58SBosko Milekic { 3281fbe6a82SGleb Smirnoff int freembuf; 329457869b9SKip Macy 3301fbe6a82SGleb Smirnoff KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 3313d2a3ff2SBosko Milekic 3321227f20dSAndre Oppermann /* 3331fbe6a82SGleb Smirnoff * Check if the header is embedded in the cluster. 3341227f20dSAndre Oppermann */ 3351fbe6a82SGleb Smirnoff freembuf = (m->m_flags & M_NOFREE) ? 0 : 1; 3361227f20dSAndre Oppermann 3371fbe6a82SGleb Smirnoff switch (m->m_ext.ext_type) { 3381fbe6a82SGleb Smirnoff case EXT_SFBUF: 3391fbe6a82SGleb Smirnoff sf_ext_free(m->m_ext.ext_arg1, m->m_ext.ext_arg2); 3401fbe6a82SGleb Smirnoff break; 3411fbe6a82SGleb Smirnoff default: 3421fbe6a82SGleb Smirnoff KASSERT(m->m_ext.ext_cnt != NULL, 3431fbe6a82SGleb Smirnoff ("%s: no refcounting pointer on %p", __func__, m)); 3441fbe6a82SGleb Smirnoff /* 3451fbe6a82SGleb Smirnoff * Free attached storage if this mbuf is the only 3461fbe6a82SGleb Smirnoff * reference to it. 3471fbe6a82SGleb Smirnoff */ 3481fbe6a82SGleb Smirnoff if (*(m->m_ext.ext_cnt) != 1) { 3491fbe6a82SGleb Smirnoff if (atomic_fetchadd_int(m->m_ext.ext_cnt, -1) != 1) 3501fbe6a82SGleb Smirnoff break; 3511fbe6a82SGleb Smirnoff } 3521fbe6a82SGleb Smirnoff 35356a4e45aSAndre Oppermann switch (m->m_ext.ext_type) { 354cd5bb63bSAndre Oppermann case EXT_PACKET: /* The packet zone is special. */ 355fcc34a23SGleb Smirnoff if (*(m->m_ext.ext_cnt) == 0) 356fcc34a23SGleb Smirnoff *(m->m_ext.ext_cnt) = 1; 357099a0e58SBosko Milekic uma_zfree(zone_pack, m); 35856a4e45aSAndre Oppermann return; /* Job done. */ 359cd5bb63bSAndre Oppermann case EXT_CLUSTER: 360cd5bb63bSAndre Oppermann uma_zfree(zone_clust, m->m_ext.ext_buf); 36156a4e45aSAndre Oppermann break; 362ec63cb90SAndre Oppermann case EXT_JUMBOP: 363ec63cb90SAndre Oppermann uma_zfree(zone_jumbop, m->m_ext.ext_buf); 364d5269a63SAndre Oppermann break; 36556a4e45aSAndre Oppermann case EXT_JUMBO9: 36656a4e45aSAndre Oppermann uma_zfree(zone_jumbo9, m->m_ext.ext_buf); 36756a4e45aSAndre Oppermann break; 36856a4e45aSAndre Oppermann case EXT_JUMBO16: 36956a4e45aSAndre Oppermann uma_zfree(zone_jumbo16, m->m_ext.ext_buf); 37056a4e45aSAndre Oppermann break; 37156a4e45aSAndre Oppermann case EXT_NET_DRV: 37256a4e45aSAndre Oppermann case EXT_MOD_TYPE: 37356a4e45aSAndre Oppermann case EXT_DISPOSABLE: 374fcc34a23SGleb Smirnoff *(m->m_ext.ext_cnt) = 0; 37556a4e45aSAndre Oppermann uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *, 376fcc34a23SGleb Smirnoff m->m_ext.ext_cnt)); 37756a4e45aSAndre Oppermann /* FALLTHROUGH */ 37856a4e45aSAndre Oppermann case EXT_EXTREF: 37956a4e45aSAndre Oppermann KASSERT(m->m_ext.ext_free != NULL, 38056a4e45aSAndre Oppermann ("%s: ext_free not set", __func__)); 38115c28f87SGleb Smirnoff (*(m->m_ext.ext_free))(m, m->m_ext.ext_arg1, 382cf827063SPoul-Henning Kamp m->m_ext.ext_arg2); 38356a4e45aSAndre Oppermann break; 38456a4e45aSAndre Oppermann default: 38556a4e45aSAndre Oppermann KASSERT(m->m_ext.ext_type == 0, 38656a4e45aSAndre Oppermann ("%s: unknown ext_type", __func__)); 3873d2a3ff2SBosko Milekic } 38856a4e45aSAndre Oppermann } 389457869b9SKip Macy 3901fbe6a82SGleb Smirnoff if (freembuf) 39196e12413SBosko Milekic uma_zfree(zone_mbuf, m); 392b5b2ea9aSBosko Milekic } 393099a0e58SBosko Milekic 394099a0e58SBosko Milekic /* 3956bccea7cSRebecca Cran * Attach the cluster from *m to *n, set up m_ext in *n 39656a4e45aSAndre Oppermann * and bump the refcount of the cluster. 39756a4e45aSAndre Oppermann */ 39856a4e45aSAndre Oppermann static void 399*0cbefd30SAlexander V. Chernikov mb_dupcl(struct mbuf *n, const struct mbuf *m) 40056a4e45aSAndre Oppermann { 40156a4e45aSAndre Oppermann 4021fbe6a82SGleb Smirnoff KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 4031fbe6a82SGleb Smirnoff KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n)); 4041fbe6a82SGleb Smirnoff 4051fbe6a82SGleb Smirnoff switch (m->m_ext.ext_type) { 4061fbe6a82SGleb Smirnoff case EXT_SFBUF: 4071fbe6a82SGleb Smirnoff sf_ext_ref(m->m_ext.ext_arg1, m->m_ext.ext_arg2); 4081fbe6a82SGleb Smirnoff break; 4091fbe6a82SGleb Smirnoff default: 4101fbe6a82SGleb Smirnoff KASSERT(m->m_ext.ext_cnt != NULL, 4111fbe6a82SGleb Smirnoff ("%s: no refcounting pointer on %p", __func__, m)); 412fcc34a23SGleb Smirnoff if (*(m->m_ext.ext_cnt) == 1) 413fcc34a23SGleb Smirnoff *(m->m_ext.ext_cnt) += 1; 41456a4e45aSAndre Oppermann else 415fcc34a23SGleb Smirnoff atomic_add_int(m->m_ext.ext_cnt, 1); 4161fbe6a82SGleb Smirnoff } 4171fbe6a82SGleb Smirnoff 418c71b4037SGleb Smirnoff n->m_ext = m->m_ext; 41956a4e45aSAndre Oppermann n->m_flags |= M_EXT; 42032a8b1d8SColin Percival n->m_flags |= m->m_flags & M_RDONLY; 42156a4e45aSAndre Oppermann } 42256a4e45aSAndre Oppermann 4239523d1bfSNavdeep Parhar void 4249523d1bfSNavdeep Parhar m_demote_pkthdr(struct mbuf *m) 4259523d1bfSNavdeep Parhar { 4269523d1bfSNavdeep Parhar 4279523d1bfSNavdeep Parhar M_ASSERTPKTHDR(m); 4289523d1bfSNavdeep Parhar 4299523d1bfSNavdeep Parhar m_tag_delete_chain(m, NULL); 4309523d1bfSNavdeep Parhar m->m_flags &= ~M_PKTHDR; 4319523d1bfSNavdeep Parhar bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 4329523d1bfSNavdeep Parhar } 4339523d1bfSNavdeep Parhar 43456a4e45aSAndre Oppermann /* 435ed111688SAndre Oppermann * Clean up mbuf (chain) from any tags and packet headers. 436e0068c3aSAndre Oppermann * If "all" is set then the first mbuf in the chain will be 437e0068c3aSAndre Oppermann * cleaned too. 438ed111688SAndre Oppermann */ 439ed111688SAndre Oppermann void 440651e4e6aSGleb Smirnoff m_demote(struct mbuf *m0, int all, int flags) 441ed111688SAndre Oppermann { 442ed111688SAndre Oppermann struct mbuf *m; 443ed111688SAndre Oppermann 444ed111688SAndre Oppermann for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 4457ee2d058SGleb Smirnoff KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", 4467ee2d058SGleb Smirnoff __func__, m, m0)); 4479523d1bfSNavdeep Parhar if (m->m_flags & M_PKTHDR) 4489523d1bfSNavdeep Parhar m_demote_pkthdr(m); 449651e4e6aSGleb Smirnoff m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags); 450ed111688SAndre Oppermann } 451ed111688SAndre Oppermann } 452ed111688SAndre Oppermann 453ed111688SAndre Oppermann /* 454fdcc028dSAndre Oppermann * Sanity checks on mbuf (chain) for use in KASSERT() and general 455fdcc028dSAndre Oppermann * debugging. 456fdcc028dSAndre Oppermann * Returns 0 or panics when bad and 1 on all tests passed. 457fdcc028dSAndre Oppermann * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 458fdcc028dSAndre Oppermann * blow up later. 459a048affbSAndre Oppermann */ 460a048affbSAndre Oppermann int 461a048affbSAndre Oppermann m_sanity(struct mbuf *m0, int sanitize) 462a048affbSAndre Oppermann { 463a048affbSAndre Oppermann struct mbuf *m; 464a048affbSAndre Oppermann caddr_t a, b; 465a048affbSAndre Oppermann int pktlen = 0; 466a048affbSAndre Oppermann 46721ee3e7aSKip Macy #ifdef INVARIANTS 46821ee3e7aSKip Macy #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 46921ee3e7aSKip Macy #else 47021ee3e7aSKip Macy #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 47121ee3e7aSKip Macy #endif 472a048affbSAndre Oppermann 473fdcc028dSAndre Oppermann for (m = m0; m != NULL; m = m->m_next) { 474a048affbSAndre Oppermann /* 475a048affbSAndre Oppermann * Basic pointer checks. If any of these fails then some 476a048affbSAndre Oppermann * unrelated kernel memory before or after us is trashed. 477a048affbSAndre Oppermann * No way to recover from that. 478a048affbSAndre Oppermann */ 479b66f2a48SRobert Watson a = M_START(m); 480b66f2a48SRobert Watson b = a + M_SIZE(m); 481a048affbSAndre Oppermann if ((caddr_t)m->m_data < a) 482a048affbSAndre Oppermann M_SANITY_ACTION("m_data outside mbuf data range left"); 483a048affbSAndre Oppermann if ((caddr_t)m->m_data > b) 484a048affbSAndre Oppermann M_SANITY_ACTION("m_data outside mbuf data range right"); 485a048affbSAndre Oppermann if ((caddr_t)m->m_data + m->m_len > b) 486a048affbSAndre Oppermann M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 487a048affbSAndre Oppermann 488a048affbSAndre Oppermann /* m->m_nextpkt may only be set on first mbuf in chain. */ 489fdcc028dSAndre Oppermann if (m != m0 && m->m_nextpkt != NULL) { 490a048affbSAndre Oppermann if (sanitize) { 491a048affbSAndre Oppermann m_freem(m->m_nextpkt); 492a048affbSAndre Oppermann m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 493a048affbSAndre Oppermann } else 494a048affbSAndre Oppermann M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 495a048affbSAndre Oppermann } 496a048affbSAndre Oppermann 497a048affbSAndre Oppermann /* packet length (not mbuf length!) calculation */ 498a048affbSAndre Oppermann if (m0->m_flags & M_PKTHDR) 499a048affbSAndre Oppermann pktlen += m->m_len; 500a048affbSAndre Oppermann 501a048affbSAndre Oppermann /* m_tags may only be attached to first mbuf in chain. */ 502a048affbSAndre Oppermann if (m != m0 && m->m_flags & M_PKTHDR && 503a048affbSAndre Oppermann !SLIST_EMPTY(&m->m_pkthdr.tags)) { 504a048affbSAndre Oppermann if (sanitize) { 505a048affbSAndre Oppermann m_tag_delete_chain(m, NULL); 506a048affbSAndre Oppermann /* put in 0xDEADC0DE perhaps? */ 507fdcc028dSAndre Oppermann } else 508a048affbSAndre Oppermann M_SANITY_ACTION("m_tags on in-chain mbuf"); 509a048affbSAndre Oppermann } 510a048affbSAndre Oppermann 511a048affbSAndre Oppermann /* M_PKTHDR may only be set on first mbuf in chain */ 512a048affbSAndre Oppermann if (m != m0 && m->m_flags & M_PKTHDR) { 513a048affbSAndre Oppermann if (sanitize) { 514a048affbSAndre Oppermann bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 515a048affbSAndre Oppermann m->m_flags &= ~M_PKTHDR; 516a048affbSAndre Oppermann /* put in 0xDEADCODE and leave hdr flag in */ 517a048affbSAndre Oppermann } else 518a048affbSAndre Oppermann M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 519a048affbSAndre Oppermann } 520a048affbSAndre Oppermann } 521fdcc028dSAndre Oppermann m = m0; 522fdcc028dSAndre Oppermann if (pktlen && pktlen != m->m_pkthdr.len) { 523a048affbSAndre Oppermann if (sanitize) 524fdcc028dSAndre Oppermann m->m_pkthdr.len = 0; 525a048affbSAndre Oppermann else 526a048affbSAndre Oppermann M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 527a048affbSAndre Oppermann } 528a048affbSAndre Oppermann return 1; 529fdcc028dSAndre Oppermann 530fdcc028dSAndre Oppermann #undef M_SANITY_ACTION 531a048affbSAndre Oppermann } 532a048affbSAndre Oppermann 533a048affbSAndre Oppermann 534a048affbSAndre Oppermann /* 5359967cafcSSam Leffler * "Move" mbuf pkthdr from "from" to "to". 536e37b1fcdSRobert Watson * "from" must have M_PKTHDR set, and "to" must be empty. 537e37b1fcdSRobert Watson */ 538e37b1fcdSRobert Watson void 5399967cafcSSam Leffler m_move_pkthdr(struct mbuf *to, struct mbuf *from) 540e37b1fcdSRobert Watson { 541e37b1fcdSRobert Watson 542e37b1fcdSRobert Watson #if 0 5439967cafcSSam Leffler /* see below for why these are not enabled */ 544fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(to); 545225bff6fSRobert Watson /* Note: with MAC, this may not be a good assertion. */ 5469967cafcSSam Leffler KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 5479967cafcSSam Leffler ("m_move_pkthdr: to has tags")); 548e37b1fcdSRobert Watson #endif 549e32a5b94SRobert Watson #ifdef MAC 550225bff6fSRobert Watson /* 551225bff6fSRobert Watson * XXXMAC: It could be this should also occur for non-MAC? 552225bff6fSRobert Watson */ 553e32a5b94SRobert Watson if (to->m_flags & M_PKTHDR) 554225bff6fSRobert Watson m_tag_delete_chain(to, NULL); 555e32a5b94SRobert Watson #endif 556a4e71429SSam Leffler to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 557a4e71429SSam Leffler if ((to->m_flags & M_EXT) == 0) 5589967cafcSSam Leffler to->m_data = to->m_pktdat; 5599967cafcSSam Leffler to->m_pkthdr = from->m_pkthdr; /* especially tags */ 5609967cafcSSam Leffler SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 5619967cafcSSam Leffler from->m_flags &= ~M_PKTHDR; 5629967cafcSSam Leffler } 5639967cafcSSam Leffler 5649967cafcSSam Leffler /* 5659967cafcSSam Leffler * Duplicate "from"'s mbuf pkthdr in "to". 5669967cafcSSam Leffler * "from" must have M_PKTHDR set, and "to" must be empty. 5679967cafcSSam Leffler * In particular, this does a deep copy of the packet tags. 5689967cafcSSam Leffler */ 5699967cafcSSam Leffler int 570*0cbefd30SAlexander V. Chernikov m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 5719967cafcSSam Leffler { 5729967cafcSSam Leffler 5739967cafcSSam Leffler #if 0 5749967cafcSSam Leffler /* 5759967cafcSSam Leffler * The mbuf allocator only initializes the pkthdr 576c95be8b5SGleb Smirnoff * when the mbuf is allocated with m_gethdr(). Many users 577c95be8b5SGleb Smirnoff * (e.g. m_copy*, m_prepend) use m_get() and then 5789967cafcSSam Leffler * smash the pkthdr as needed causing these 5799967cafcSSam Leffler * assertions to trip. For now just disable them. 5809967cafcSSam Leffler */ 581fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(to); 582225bff6fSRobert Watson /* Note: with MAC, this may not be a good assertion. */ 5839967cafcSSam Leffler KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 5849967cafcSSam Leffler #endif 585063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 5869967cafcSSam Leffler #ifdef MAC 5879967cafcSSam Leffler if (to->m_flags & M_PKTHDR) 588225bff6fSRobert Watson m_tag_delete_chain(to, NULL); 5899967cafcSSam Leffler #endif 590df8c7fc9SMike Silbersack to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 591df8c7fc9SMike Silbersack if ((to->m_flags & M_EXT) == 0) 5929967cafcSSam Leffler to->m_data = to->m_pktdat; 593e37b1fcdSRobert Watson to->m_pkthdr = from->m_pkthdr; 5949967cafcSSam Leffler SLIST_INIT(&to->m_pkthdr.tags); 59594985f74SGleb Smirnoff return (m_tag_copy_chain(to, from, how)); 596e37b1fcdSRobert Watson } 597e37b1fcdSRobert Watson 598e37b1fcdSRobert Watson /* 599df8bae1dSRodney W. Grimes * Lesser-used path for M_PREPEND: 600df8bae1dSRodney W. Grimes * allocate new mbuf to prepend to chain, 601df8bae1dSRodney W. Grimes * copy junk along. 602df8bae1dSRodney W. Grimes */ 603df8bae1dSRodney W. Grimes struct mbuf * 604122a814aSBosko Milekic m_prepend(struct mbuf *m, int len, int how) 605df8bae1dSRodney W. Grimes { 606df8bae1dSRodney W. Grimes struct mbuf *mn; 607df8bae1dSRodney W. Grimes 608f8bf8e39SMike Silbersack if (m->m_flags & M_PKTHDR) 609c95be8b5SGleb Smirnoff mn = m_gethdr(how, m->m_type); 610f8bf8e39SMike Silbersack else 611c95be8b5SGleb Smirnoff mn = m_get(how, m->m_type); 612122a814aSBosko Milekic if (mn == NULL) { 613df8bae1dSRodney W. Grimes m_freem(m); 614122a814aSBosko Milekic return (NULL); 615df8bae1dSRodney W. Grimes } 616225bff6fSRobert Watson if (m->m_flags & M_PKTHDR) 617c95be8b5SGleb Smirnoff m_move_pkthdr(mn, m); 618df8bae1dSRodney W. Grimes mn->m_next = m; 619df8bae1dSRodney W. Grimes m = mn; 620ed6a66caSRobert Watson if (len < M_SIZE(m)) 6215288989fSRandall Stewart M_ALIGN(m, len); 622df8bae1dSRodney W. Grimes m->m_len = len; 623df8bae1dSRodney W. Grimes return (m); 624df8bae1dSRodney W. Grimes } 625df8bae1dSRodney W. Grimes 626df8bae1dSRodney W. Grimes /* 627df8bae1dSRodney W. Grimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 628df8bae1dSRodney W. Grimes * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 629eb1b1807SGleb Smirnoff * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 6301c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 6311c38f2eaSArchie Cobbs * only their reference counts are incremented. 632df8bae1dSRodney W. Grimes */ 633df8bae1dSRodney W. Grimes struct mbuf * 634*0cbefd30SAlexander V. Chernikov m_copym(const struct mbuf *m, int off0, int len, int wait) 635df8bae1dSRodney W. Grimes { 636122a814aSBosko Milekic struct mbuf *n, **np; 637122a814aSBosko Milekic int off = off0; 638df8bae1dSRodney W. Grimes struct mbuf *top; 639df8bae1dSRodney W. Grimes int copyhdr = 0; 640df8bae1dSRodney W. Grimes 641e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copym, negative off %d", off)); 642e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copym, negative len %d", len)); 643063d8114SAlfred Perlstein MBUF_CHECKSLEEP(wait); 644df8bae1dSRodney W. Grimes if (off == 0 && m->m_flags & M_PKTHDR) 645df8bae1dSRodney W. Grimes copyhdr = 1; 646df8bae1dSRodney W. Grimes while (off > 0) { 647e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 648df8bae1dSRodney W. Grimes if (off < m->m_len) 649df8bae1dSRodney W. Grimes break; 650df8bae1dSRodney W. Grimes off -= m->m_len; 651df8bae1dSRodney W. Grimes m = m->m_next; 652df8bae1dSRodney W. Grimes } 653df8bae1dSRodney W. Grimes np = ⊤ 654df8bae1dSRodney W. Grimes top = 0; 655df8bae1dSRodney W. Grimes while (len > 0) { 656122a814aSBosko Milekic if (m == NULL) { 657e0a653ddSAlfred Perlstein KASSERT(len == M_COPYALL, 658e0a653ddSAlfred Perlstein ("m_copym, length > size of mbuf chain")); 659df8bae1dSRodney W. Grimes break; 660df8bae1dSRodney W. Grimes } 661f8bf8e39SMike Silbersack if (copyhdr) 662c95be8b5SGleb Smirnoff n = m_gethdr(wait, m->m_type); 663f8bf8e39SMike Silbersack else 664c95be8b5SGleb Smirnoff n = m_get(wait, m->m_type); 665df8bae1dSRodney W. Grimes *np = n; 666122a814aSBosko Milekic if (n == NULL) 667df8bae1dSRodney W. Grimes goto nospace; 668df8bae1dSRodney W. Grimes if (copyhdr) { 6699967cafcSSam Leffler if (!m_dup_pkthdr(n, m, wait)) 6709967cafcSSam Leffler goto nospace; 671df8bae1dSRodney W. Grimes if (len == M_COPYALL) 672df8bae1dSRodney W. Grimes n->m_pkthdr.len -= off0; 673df8bae1dSRodney W. Grimes else 674df8bae1dSRodney W. Grimes n->m_pkthdr.len = len; 675df8bae1dSRodney W. Grimes copyhdr = 0; 676df8bae1dSRodney W. Grimes } 677df8bae1dSRodney W. Grimes n->m_len = min(len, m->m_len - off); 678df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 679df8bae1dSRodney W. Grimes n->m_data = m->m_data + off; 68056a4e45aSAndre Oppermann mb_dupcl(n, m); 681df8bae1dSRodney W. Grimes } else 682df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 683bd395ae8SBosko Milekic (u_int)n->m_len); 684df8bae1dSRodney W. Grimes if (len != M_COPYALL) 685df8bae1dSRodney W. Grimes len -= n->m_len; 686df8bae1dSRodney W. Grimes off = 0; 687df8bae1dSRodney W. Grimes m = m->m_next; 688df8bae1dSRodney W. Grimes np = &n->m_next; 689df8bae1dSRodney W. Grimes } 69008442f8aSBosko Milekic 691df8bae1dSRodney W. Grimes return (top); 692df8bae1dSRodney W. Grimes nospace: 693df8bae1dSRodney W. Grimes m_freem(top); 694122a814aSBosko Milekic return (NULL); 695df8bae1dSRodney W. Grimes } 696df8bae1dSRodney W. Grimes 697df8bae1dSRodney W. Grimes /* 6986a06dea0SGarrett Wollman * Copy an entire packet, including header (which must be present). 6996a06dea0SGarrett Wollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 7001c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 7011c38f2eaSArchie Cobbs * only their reference counts are incremented. 7025fe86675SLuigi Rizzo * Preserve alignment of the first mbuf so if the creator has left 7035fe86675SLuigi Rizzo * some room at the beginning (e.g. for inserting protocol headers) 7045fe86675SLuigi Rizzo * the copies still have the room available. 7056a06dea0SGarrett Wollman */ 7066a06dea0SGarrett Wollman struct mbuf * 707122a814aSBosko Milekic m_copypacket(struct mbuf *m, int how) 7086a06dea0SGarrett Wollman { 7096a06dea0SGarrett Wollman struct mbuf *top, *n, *o; 7106a06dea0SGarrett Wollman 711063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 712c95be8b5SGleb Smirnoff n = m_get(how, m->m_type); 7136a06dea0SGarrett Wollman top = n; 714122a814aSBosko Milekic if (n == NULL) 7156a06dea0SGarrett Wollman goto nospace; 7166a06dea0SGarrett Wollman 7179967cafcSSam Leffler if (!m_dup_pkthdr(n, m, how)) 7189967cafcSSam Leffler goto nospace; 7196a06dea0SGarrett Wollman n->m_len = m->m_len; 7206a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 7216a06dea0SGarrett Wollman n->m_data = m->m_data; 72256a4e45aSAndre Oppermann mb_dupcl(n, m); 7236a06dea0SGarrett Wollman } else { 7245fe86675SLuigi Rizzo n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 7256a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 7266a06dea0SGarrett Wollman } 7276a06dea0SGarrett Wollman 7286a06dea0SGarrett Wollman m = m->m_next; 7296a06dea0SGarrett Wollman while (m) { 730c95be8b5SGleb Smirnoff o = m_get(how, m->m_type); 731122a814aSBosko Milekic if (o == NULL) 7326a06dea0SGarrett Wollman goto nospace; 7336a06dea0SGarrett Wollman 7346a06dea0SGarrett Wollman n->m_next = o; 7356a06dea0SGarrett Wollman n = n->m_next; 7366a06dea0SGarrett Wollman 7376a06dea0SGarrett Wollman n->m_len = m->m_len; 7386a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 7396a06dea0SGarrett Wollman n->m_data = m->m_data; 74056a4e45aSAndre Oppermann mb_dupcl(n, m); 7416a06dea0SGarrett Wollman } else { 7426a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 7436a06dea0SGarrett Wollman } 7446a06dea0SGarrett Wollman 7456a06dea0SGarrett Wollman m = m->m_next; 7466a06dea0SGarrett Wollman } 7476a06dea0SGarrett Wollman return top; 7486a06dea0SGarrett Wollman nospace: 7496a06dea0SGarrett Wollman m_freem(top); 750122a814aSBosko Milekic return (NULL); 7516a06dea0SGarrett Wollman } 7526a06dea0SGarrett Wollman 7536a06dea0SGarrett Wollman /* 754df8bae1dSRodney W. Grimes * Copy data from an mbuf chain starting "off" bytes from the beginning, 755df8bae1dSRodney W. Grimes * continuing for "len" bytes, into the indicated buffer. 756df8bae1dSRodney W. Grimes */ 75726f9a767SRodney W. Grimes void 758a8cfc0eeSJulian Elischer m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 759df8bae1dSRodney W. Grimes { 760bd395ae8SBosko Milekic u_int count; 761df8bae1dSRodney W. Grimes 762e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 763e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 764df8bae1dSRodney W. Grimes while (off > 0) { 765e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 766df8bae1dSRodney W. Grimes if (off < m->m_len) 767df8bae1dSRodney W. Grimes break; 768df8bae1dSRodney W. Grimes off -= m->m_len; 769df8bae1dSRodney W. Grimes m = m->m_next; 770df8bae1dSRodney W. Grimes } 771df8bae1dSRodney W. Grimes while (len > 0) { 772e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 773df8bae1dSRodney W. Grimes count = min(m->m_len - off, len); 774df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + off, cp, count); 775df8bae1dSRodney W. Grimes len -= count; 776df8bae1dSRodney W. Grimes cp += count; 777df8bae1dSRodney W. Grimes off = 0; 778df8bae1dSRodney W. Grimes m = m->m_next; 779df8bae1dSRodney W. Grimes } 780df8bae1dSRodney W. Grimes } 781df8bae1dSRodney W. Grimes 782df8bae1dSRodney W. Grimes /* 7831c38f2eaSArchie Cobbs * Copy a packet header mbuf chain into a completely new chain, including 7841c38f2eaSArchie Cobbs * copying any mbuf clusters. Use this instead of m_copypacket() when 7851c38f2eaSArchie Cobbs * you need a writable copy of an mbuf chain. 7861c38f2eaSArchie Cobbs */ 7871c38f2eaSArchie Cobbs struct mbuf * 788*0cbefd30SAlexander V. Chernikov m_dup(const struct mbuf *m, int how) 7891c38f2eaSArchie Cobbs { 7901c38f2eaSArchie Cobbs struct mbuf **p, *top = NULL; 7911c38f2eaSArchie Cobbs int remain, moff, nsize; 7921c38f2eaSArchie Cobbs 793063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 7941c38f2eaSArchie Cobbs /* Sanity check */ 7951c38f2eaSArchie Cobbs if (m == NULL) 796122a814aSBosko Milekic return (NULL); 797fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(m); 7981c38f2eaSArchie Cobbs 7991c38f2eaSArchie Cobbs /* While there's more data, get a new mbuf, tack it on, and fill it */ 8001c38f2eaSArchie Cobbs remain = m->m_pkthdr.len; 8011c38f2eaSArchie Cobbs moff = 0; 8021c38f2eaSArchie Cobbs p = ⊤ 8031c38f2eaSArchie Cobbs while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 8041c38f2eaSArchie Cobbs struct mbuf *n; 8051c38f2eaSArchie Cobbs 8061c38f2eaSArchie Cobbs /* Get the next new mbuf */ 807099a0e58SBosko Milekic if (remain >= MINCLSIZE) { 808099a0e58SBosko Milekic n = m_getcl(how, m->m_type, 0); 809099a0e58SBosko Milekic nsize = MCLBYTES; 810099a0e58SBosko Milekic } else { 811099a0e58SBosko Milekic n = m_get(how, m->m_type); 812099a0e58SBosko Milekic nsize = MLEN; 813099a0e58SBosko Milekic } 8141c38f2eaSArchie Cobbs if (n == NULL) 8151c38f2eaSArchie Cobbs goto nospace; 816099a0e58SBosko Milekic 817099a0e58SBosko Milekic if (top == NULL) { /* First one, must be PKTHDR */ 818099a0e58SBosko Milekic if (!m_dup_pkthdr(n, m, how)) { 819099a0e58SBosko Milekic m_free(n); 8201c38f2eaSArchie Cobbs goto nospace; 8211c38f2eaSArchie Cobbs } 82263e6f390SEd Maste if ((n->m_flags & M_EXT) == 0) 823099a0e58SBosko Milekic nsize = MHLEN; 824089bb672SAndrey V. Elsukov n->m_flags &= ~M_RDONLY; 8251c38f2eaSArchie Cobbs } 8261c38f2eaSArchie Cobbs n->m_len = 0; 8271c38f2eaSArchie Cobbs 8281c38f2eaSArchie Cobbs /* Link it into the new chain */ 8291c38f2eaSArchie Cobbs *p = n; 8301c38f2eaSArchie Cobbs p = &n->m_next; 8311c38f2eaSArchie Cobbs 8321c38f2eaSArchie Cobbs /* Copy data from original mbuf(s) into new mbuf */ 8331c38f2eaSArchie Cobbs while (n->m_len < nsize && m != NULL) { 8341c38f2eaSArchie Cobbs int chunk = min(nsize - n->m_len, m->m_len - moff); 8351c38f2eaSArchie Cobbs 8361c38f2eaSArchie Cobbs bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 8371c38f2eaSArchie Cobbs moff += chunk; 8381c38f2eaSArchie Cobbs n->m_len += chunk; 8391c38f2eaSArchie Cobbs remain -= chunk; 8401c38f2eaSArchie Cobbs if (moff == m->m_len) { 8411c38f2eaSArchie Cobbs m = m->m_next; 8421c38f2eaSArchie Cobbs moff = 0; 8431c38f2eaSArchie Cobbs } 8441c38f2eaSArchie Cobbs } 8451c38f2eaSArchie Cobbs 8461c38f2eaSArchie Cobbs /* Check correct total mbuf length */ 8471c38f2eaSArchie Cobbs KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 848a48740b6SDavid E. O'Brien ("%s: bogus m_pkthdr.len", __func__)); 8491c38f2eaSArchie Cobbs } 8501c38f2eaSArchie Cobbs return (top); 8511c38f2eaSArchie Cobbs 8521c38f2eaSArchie Cobbs nospace: 8531c38f2eaSArchie Cobbs m_freem(top); 854122a814aSBosko Milekic return (NULL); 8551c38f2eaSArchie Cobbs } 8561c38f2eaSArchie Cobbs 8571c38f2eaSArchie Cobbs /* 858df8bae1dSRodney W. Grimes * Concatenate mbuf chain n to m. 859df8bae1dSRodney W. Grimes * Both chains must be of the same type (e.g. MT_DATA). 860df8bae1dSRodney W. Grimes * Any m_pkthdr is not updated. 861df8bae1dSRodney W. Grimes */ 86226f9a767SRodney W. Grimes void 863122a814aSBosko Milekic m_cat(struct mbuf *m, struct mbuf *n) 864df8bae1dSRodney W. Grimes { 865df8bae1dSRodney W. Grimes while (m->m_next) 866df8bae1dSRodney W. Grimes m = m->m_next; 867df8bae1dSRodney W. Grimes while (n) { 86814d7c5b1SAndre Oppermann if (!M_WRITABLE(m) || 86914d7c5b1SAndre Oppermann M_TRAILINGSPACE(m) < n->m_len) { 870df8bae1dSRodney W. Grimes /* just join the two chains */ 871df8bae1dSRodney W. Grimes m->m_next = n; 872df8bae1dSRodney W. Grimes return; 873df8bae1dSRodney W. Grimes } 874df8bae1dSRodney W. Grimes /* splat the data from one into the other */ 875df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 876df8bae1dSRodney W. Grimes (u_int)n->m_len); 877df8bae1dSRodney W. Grimes m->m_len += n->m_len; 878df8bae1dSRodney W. Grimes n = m_free(n); 879df8bae1dSRodney W. Grimes } 880df8bae1dSRodney W. Grimes } 881df8bae1dSRodney W. Grimes 8821967edbaSGleb Smirnoff /* 8831967edbaSGleb Smirnoff * Concatenate two pkthdr mbuf chains. 8841967edbaSGleb Smirnoff */ 8851967edbaSGleb Smirnoff void 8861967edbaSGleb Smirnoff m_catpkt(struct mbuf *m, struct mbuf *n) 8871967edbaSGleb Smirnoff { 8881967edbaSGleb Smirnoff 8891967edbaSGleb Smirnoff M_ASSERTPKTHDR(m); 8901967edbaSGleb Smirnoff M_ASSERTPKTHDR(n); 8911967edbaSGleb Smirnoff 8921967edbaSGleb Smirnoff m->m_pkthdr.len += n->m_pkthdr.len; 893651e4e6aSGleb Smirnoff m_demote(n, 1, 0); 8941967edbaSGleb Smirnoff 8951967edbaSGleb Smirnoff m_cat(m, n); 8961967edbaSGleb Smirnoff } 8971967edbaSGleb Smirnoff 89826f9a767SRodney W. Grimes void 899122a814aSBosko Milekic m_adj(struct mbuf *mp, int req_len) 900df8bae1dSRodney W. Grimes { 901122a814aSBosko Milekic int len = req_len; 902122a814aSBosko Milekic struct mbuf *m; 903122a814aSBosko Milekic int count; 904df8bae1dSRodney W. Grimes 905df8bae1dSRodney W. Grimes if ((m = mp) == NULL) 906df8bae1dSRodney W. Grimes return; 907df8bae1dSRodney W. Grimes if (len >= 0) { 908df8bae1dSRodney W. Grimes /* 909df8bae1dSRodney W. Grimes * Trim from head. 910df8bae1dSRodney W. Grimes */ 911df8bae1dSRodney W. Grimes while (m != NULL && len > 0) { 912df8bae1dSRodney W. Grimes if (m->m_len <= len) { 913df8bae1dSRodney W. Grimes len -= m->m_len; 914df8bae1dSRodney W. Grimes m->m_len = 0; 915df8bae1dSRodney W. Grimes m = m->m_next; 916df8bae1dSRodney W. Grimes } else { 917df8bae1dSRodney W. Grimes m->m_len -= len; 918df8bae1dSRodney W. Grimes m->m_data += len; 919df8bae1dSRodney W. Grimes len = 0; 920df8bae1dSRodney W. Grimes } 921df8bae1dSRodney W. Grimes } 922df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 923a83baab6SMarko Zec mp->m_pkthdr.len -= (req_len - len); 924df8bae1dSRodney W. Grimes } else { 925df8bae1dSRodney W. Grimes /* 926df8bae1dSRodney W. Grimes * Trim from tail. Scan the mbuf chain, 927df8bae1dSRodney W. Grimes * calculating its length and finding the last mbuf. 928df8bae1dSRodney W. Grimes * If the adjustment only affects this mbuf, then just 929df8bae1dSRodney W. Grimes * adjust and return. Otherwise, rescan and truncate 930df8bae1dSRodney W. Grimes * after the remaining size. 931df8bae1dSRodney W. Grimes */ 932df8bae1dSRodney W. Grimes len = -len; 933df8bae1dSRodney W. Grimes count = 0; 934df8bae1dSRodney W. Grimes for (;;) { 935df8bae1dSRodney W. Grimes count += m->m_len; 936df8bae1dSRodney W. Grimes if (m->m_next == (struct mbuf *)0) 937df8bae1dSRodney W. Grimes break; 938df8bae1dSRodney W. Grimes m = m->m_next; 939df8bae1dSRodney W. Grimes } 940df8bae1dSRodney W. Grimes if (m->m_len >= len) { 941df8bae1dSRodney W. Grimes m->m_len -= len; 942df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 943df8bae1dSRodney W. Grimes mp->m_pkthdr.len -= len; 944df8bae1dSRodney W. Grimes return; 945df8bae1dSRodney W. Grimes } 946df8bae1dSRodney W. Grimes count -= len; 947df8bae1dSRodney W. Grimes if (count < 0) 948df8bae1dSRodney W. Grimes count = 0; 949df8bae1dSRodney W. Grimes /* 950df8bae1dSRodney W. Grimes * Correct length for chain is "count". 951df8bae1dSRodney W. Grimes * Find the mbuf with last data, adjust its length, 952df8bae1dSRodney W. Grimes * and toss data from remaining mbufs on chain. 953df8bae1dSRodney W. Grimes */ 954df8bae1dSRodney W. Grimes m = mp; 955df8bae1dSRodney W. Grimes if (m->m_flags & M_PKTHDR) 956df8bae1dSRodney W. Grimes m->m_pkthdr.len = count; 957df8bae1dSRodney W. Grimes for (; m; m = m->m_next) { 958df8bae1dSRodney W. Grimes if (m->m_len >= count) { 959df8bae1dSRodney W. Grimes m->m_len = count; 96059d8b310SSam Leffler if (m->m_next != NULL) { 96159d8b310SSam Leffler m_freem(m->m_next); 96259d8b310SSam Leffler m->m_next = NULL; 96359d8b310SSam Leffler } 964df8bae1dSRodney W. Grimes break; 965df8bae1dSRodney W. Grimes } 966df8bae1dSRodney W. Grimes count -= m->m_len; 967df8bae1dSRodney W. Grimes } 968df8bae1dSRodney W. Grimes } 969df8bae1dSRodney W. Grimes } 970df8bae1dSRodney W. Grimes 971df8bae1dSRodney W. Grimes /* 972df8bae1dSRodney W. Grimes * Rearange an mbuf chain so that len bytes are contiguous 973a2c36a02SKevin Lo * and in the data area of an mbuf (so that mtod will work 974a2c36a02SKevin Lo * for a structure of size len). Returns the resulting 975df8bae1dSRodney W. Grimes * mbuf chain on success, frees it and returns null on failure. 976df8bae1dSRodney W. Grimes * If there is room, it will add up to max_protohdr-len extra bytes to the 977df8bae1dSRodney W. Grimes * contiguous region in an attempt to avoid being called next time. 978df8bae1dSRodney W. Grimes */ 979df8bae1dSRodney W. Grimes struct mbuf * 980122a814aSBosko Milekic m_pullup(struct mbuf *n, int len) 981df8bae1dSRodney W. Grimes { 982122a814aSBosko Milekic struct mbuf *m; 983122a814aSBosko Milekic int count; 984df8bae1dSRodney W. Grimes int space; 985df8bae1dSRodney W. Grimes 986df8bae1dSRodney W. Grimes /* 987df8bae1dSRodney W. Grimes * If first mbuf has no cluster, and has room for len bytes 988df8bae1dSRodney W. Grimes * without shifting current data, pullup into it, 989df8bae1dSRodney W. Grimes * otherwise allocate a new mbuf to prepend to the chain. 990df8bae1dSRodney W. Grimes */ 991df8bae1dSRodney W. Grimes if ((n->m_flags & M_EXT) == 0 && 992df8bae1dSRodney W. Grimes n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 993df8bae1dSRodney W. Grimes if (n->m_len >= len) 994df8bae1dSRodney W. Grimes return (n); 995df8bae1dSRodney W. Grimes m = n; 996df8bae1dSRodney W. Grimes n = n->m_next; 997df8bae1dSRodney W. Grimes len -= m->m_len; 998df8bae1dSRodney W. Grimes } else { 999df8bae1dSRodney W. Grimes if (len > MHLEN) 1000df8bae1dSRodney W. Grimes goto bad; 1001c95be8b5SGleb Smirnoff m = m_get(M_NOWAIT, n->m_type); 1002122a814aSBosko Milekic if (m == NULL) 1003df8bae1dSRodney W. Grimes goto bad; 10049967cafcSSam Leffler if (n->m_flags & M_PKTHDR) 1005c95be8b5SGleb Smirnoff m_move_pkthdr(m, n); 1006df8bae1dSRodney W. Grimes } 1007df8bae1dSRodney W. Grimes space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1008df8bae1dSRodney W. Grimes do { 1009df8bae1dSRodney W. Grimes count = min(min(max(len, max_protohdr), space), n->m_len); 1010df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1011bd395ae8SBosko Milekic (u_int)count); 1012df8bae1dSRodney W. Grimes len -= count; 1013df8bae1dSRodney W. Grimes m->m_len += count; 1014df8bae1dSRodney W. Grimes n->m_len -= count; 1015df8bae1dSRodney W. Grimes space -= count; 1016df8bae1dSRodney W. Grimes if (n->m_len) 1017df8bae1dSRodney W. Grimes n->m_data += count; 1018df8bae1dSRodney W. Grimes else 1019df8bae1dSRodney W. Grimes n = m_free(n); 1020df8bae1dSRodney W. Grimes } while (len > 0 && n); 1021df8bae1dSRodney W. Grimes if (len > 0) { 1022df8bae1dSRodney W. Grimes (void) m_free(m); 1023df8bae1dSRodney W. Grimes goto bad; 1024df8bae1dSRodney W. Grimes } 1025df8bae1dSRodney W. Grimes m->m_next = n; 1026df8bae1dSRodney W. Grimes return (m); 1027df8bae1dSRodney W. Grimes bad: 1028df8bae1dSRodney W. Grimes m_freem(n); 1029122a814aSBosko Milekic return (NULL); 1030df8bae1dSRodney W. Grimes } 1031df8bae1dSRodney W. Grimes 1032df8bae1dSRodney W. Grimes /* 10337ac139a9SJohn-Mark Gurney * Like m_pullup(), except a new mbuf is always allocated, and we allow 10347ac139a9SJohn-Mark Gurney * the amount of empty space before the data in the new mbuf to be specified 10357ac139a9SJohn-Mark Gurney * (in the event that the caller expects to prepend later). 10367ac139a9SJohn-Mark Gurney */ 10377ac139a9SJohn-Mark Gurney int MSFail; 10387ac139a9SJohn-Mark Gurney 10397ac139a9SJohn-Mark Gurney struct mbuf * 10407ac139a9SJohn-Mark Gurney m_copyup(struct mbuf *n, int len, int dstoff) 10417ac139a9SJohn-Mark Gurney { 10427ac139a9SJohn-Mark Gurney struct mbuf *m; 10437ac139a9SJohn-Mark Gurney int count, space; 10447ac139a9SJohn-Mark Gurney 10457ac139a9SJohn-Mark Gurney if (len > (MHLEN - dstoff)) 10467ac139a9SJohn-Mark Gurney goto bad; 1047c95be8b5SGleb Smirnoff m = m_get(M_NOWAIT, n->m_type); 10487ac139a9SJohn-Mark Gurney if (m == NULL) 10497ac139a9SJohn-Mark Gurney goto bad; 10507ac139a9SJohn-Mark Gurney if (n->m_flags & M_PKTHDR) 1051c95be8b5SGleb Smirnoff m_move_pkthdr(m, n); 10527ac139a9SJohn-Mark Gurney m->m_data += dstoff; 10537ac139a9SJohn-Mark Gurney space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 10547ac139a9SJohn-Mark Gurney do { 10557ac139a9SJohn-Mark Gurney count = min(min(max(len, max_protohdr), space), n->m_len); 10567ac139a9SJohn-Mark Gurney memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 10577ac139a9SJohn-Mark Gurney (unsigned)count); 10587ac139a9SJohn-Mark Gurney len -= count; 10597ac139a9SJohn-Mark Gurney m->m_len += count; 10607ac139a9SJohn-Mark Gurney n->m_len -= count; 10617ac139a9SJohn-Mark Gurney space -= count; 10627ac139a9SJohn-Mark Gurney if (n->m_len) 10637ac139a9SJohn-Mark Gurney n->m_data += count; 10647ac139a9SJohn-Mark Gurney else 10657ac139a9SJohn-Mark Gurney n = m_free(n); 10667ac139a9SJohn-Mark Gurney } while (len > 0 && n); 10677ac139a9SJohn-Mark Gurney if (len > 0) { 10687ac139a9SJohn-Mark Gurney (void) m_free(m); 10697ac139a9SJohn-Mark Gurney goto bad; 10707ac139a9SJohn-Mark Gurney } 10717ac139a9SJohn-Mark Gurney m->m_next = n; 10727ac139a9SJohn-Mark Gurney return (m); 10737ac139a9SJohn-Mark Gurney bad: 10747ac139a9SJohn-Mark Gurney m_freem(n); 10757ac139a9SJohn-Mark Gurney MSFail++; 10767ac139a9SJohn-Mark Gurney return (NULL); 10777ac139a9SJohn-Mark Gurney } 10787ac139a9SJohn-Mark Gurney 10797ac139a9SJohn-Mark Gurney /* 1080df8bae1dSRodney W. Grimes * Partition an mbuf chain in two pieces, returning the tail -- 1081df8bae1dSRodney W. Grimes * all but the first len0 bytes. In case of failure, it returns NULL and 1082df8bae1dSRodney W. Grimes * attempts to restore the chain to its original state. 108348d183faSArchie Cobbs * 108448d183faSArchie Cobbs * Note that the resulting mbufs might be read-only, because the new 108548d183faSArchie Cobbs * mbuf can end up sharing an mbuf cluster with the original mbuf if 108648d183faSArchie Cobbs * the "breaking point" happens to lie within a cluster mbuf. Use the 108748d183faSArchie Cobbs * M_WRITABLE() macro to check for this case. 1088df8bae1dSRodney W. Grimes */ 1089df8bae1dSRodney W. Grimes struct mbuf * 1090122a814aSBosko Milekic m_split(struct mbuf *m0, int len0, int wait) 1091df8bae1dSRodney W. Grimes { 1092122a814aSBosko Milekic struct mbuf *m, *n; 1093bd395ae8SBosko Milekic u_int len = len0, remain; 1094df8bae1dSRodney W. Grimes 1095063d8114SAlfred Perlstein MBUF_CHECKSLEEP(wait); 1096df8bae1dSRodney W. Grimes for (m = m0; m && len > m->m_len; m = m->m_next) 1097df8bae1dSRodney W. Grimes len -= m->m_len; 1098122a814aSBosko Milekic if (m == NULL) 1099122a814aSBosko Milekic return (NULL); 1100df8bae1dSRodney W. Grimes remain = m->m_len - len; 110121f39848SGleb Smirnoff if (m0->m_flags & M_PKTHDR && remain == 0) { 110221f39848SGleb Smirnoff n = m_gethdr(wait, m0->m_type); 110377badb18SGleb Smirnoff if (n == NULL) 110421f39848SGleb Smirnoff return (NULL); 110521f39848SGleb Smirnoff n->m_next = m->m_next; 110621f39848SGleb Smirnoff m->m_next = NULL; 110721f39848SGleb Smirnoff n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 110821f39848SGleb Smirnoff n->m_pkthdr.len = m0->m_pkthdr.len - len0; 110921f39848SGleb Smirnoff m0->m_pkthdr.len = len0; 111021f39848SGleb Smirnoff return (n); 111121f39848SGleb Smirnoff } else if (m0->m_flags & M_PKTHDR) { 1112c95be8b5SGleb Smirnoff n = m_gethdr(wait, m0->m_type); 1113122a814aSBosko Milekic if (n == NULL) 1114122a814aSBosko Milekic return (NULL); 1115df8bae1dSRodney W. Grimes n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1116df8bae1dSRodney W. Grimes n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1117df8bae1dSRodney W. Grimes m0->m_pkthdr.len = len0; 1118df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) 1119df8bae1dSRodney W. Grimes goto extpacket; 1120df8bae1dSRodney W. Grimes if (remain > MHLEN) { 1121df8bae1dSRodney W. Grimes /* m can't be the lead packet */ 1122ed6a66caSRobert Watson M_ALIGN(n, 0); 1123df8bae1dSRodney W. Grimes n->m_next = m_split(m, len, wait); 1124122a814aSBosko Milekic if (n->m_next == NULL) { 1125df8bae1dSRodney W. Grimes (void) m_free(n); 1126122a814aSBosko Milekic return (NULL); 112740376987SJeffrey Hsu } else { 112840376987SJeffrey Hsu n->m_len = 0; 1129df8bae1dSRodney W. Grimes return (n); 113040376987SJeffrey Hsu } 1131df8bae1dSRodney W. Grimes } else 1132ed6a66caSRobert Watson M_ALIGN(n, remain); 1133df8bae1dSRodney W. Grimes } else if (remain == 0) { 1134df8bae1dSRodney W. Grimes n = m->m_next; 1135122a814aSBosko Milekic m->m_next = NULL; 1136df8bae1dSRodney W. Grimes return (n); 1137df8bae1dSRodney W. Grimes } else { 1138c95be8b5SGleb Smirnoff n = m_get(wait, m->m_type); 1139122a814aSBosko Milekic if (n == NULL) 1140122a814aSBosko Milekic return (NULL); 1141df8bae1dSRodney W. Grimes M_ALIGN(n, remain); 1142df8bae1dSRodney W. Grimes } 1143df8bae1dSRodney W. Grimes extpacket: 1144df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 1145df8bae1dSRodney W. Grimes n->m_data = m->m_data + len; 114656a4e45aSAndre Oppermann mb_dupcl(n, m); 1147df8bae1dSRodney W. Grimes } else { 1148df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1149df8bae1dSRodney W. Grimes } 1150df8bae1dSRodney W. Grimes n->m_len = remain; 1151df8bae1dSRodney W. Grimes m->m_len = len; 1152df8bae1dSRodney W. Grimes n->m_next = m->m_next; 1153122a814aSBosko Milekic m->m_next = NULL; 1154df8bae1dSRodney W. Grimes return (n); 1155df8bae1dSRodney W. Grimes } 1156df8bae1dSRodney W. Grimes /* 1157df8bae1dSRodney W. Grimes * Routine to copy from device local memory into mbufs. 1158f5eece3fSBosko Milekic * Note that `off' argument is offset into first mbuf of target chain from 1159f5eece3fSBosko Milekic * which to begin copying the data to. 1160df8bae1dSRodney W. Grimes */ 1161df8bae1dSRodney W. Grimes struct mbuf * 1162f5eece3fSBosko Milekic m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1163122a814aSBosko Milekic void (*copy)(char *from, caddr_t to, u_int len)) 1164df8bae1dSRodney W. Grimes { 1165122a814aSBosko Milekic struct mbuf *m; 1166099a0e58SBosko Milekic struct mbuf *top = NULL, **mp = ⊤ 1167f5eece3fSBosko Milekic int len; 1168df8bae1dSRodney W. Grimes 1169f5eece3fSBosko Milekic if (off < 0 || off > MHLEN) 1170f5eece3fSBosko Milekic return (NULL); 1171f5eece3fSBosko Milekic 1172df8bae1dSRodney W. Grimes while (totlen > 0) { 1173099a0e58SBosko Milekic if (top == NULL) { /* First one, must be PKTHDR */ 1174f5eece3fSBosko Milekic if (totlen + off >= MINCLSIZE) { 1175eb1b1807SGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1176f5eece3fSBosko Milekic len = MCLBYTES; 1177df8bae1dSRodney W. Grimes } else { 1178eb1b1807SGleb Smirnoff m = m_gethdr(M_NOWAIT, MT_DATA); 1179099a0e58SBosko Milekic len = MHLEN; 1180099a0e58SBosko Milekic 1181099a0e58SBosko Milekic /* Place initial small packet/header at end of mbuf */ 1182099a0e58SBosko Milekic if (m && totlen + off + max_linkhdr <= MLEN) { 1183df8bae1dSRodney W. Grimes m->m_data += max_linkhdr; 1184f5eece3fSBosko Milekic len -= max_linkhdr; 1185df8bae1dSRodney W. Grimes } 1186f5eece3fSBosko Milekic } 1187099a0e58SBosko Milekic if (m == NULL) 1188099a0e58SBosko Milekic return NULL; 1189099a0e58SBosko Milekic m->m_pkthdr.rcvif = ifp; 1190099a0e58SBosko Milekic m->m_pkthdr.len = totlen; 1191099a0e58SBosko Milekic } else { 1192099a0e58SBosko Milekic if (totlen + off >= MINCLSIZE) { 1193eb1b1807SGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, 0); 1194099a0e58SBosko Milekic len = MCLBYTES; 1195099a0e58SBosko Milekic } else { 1196eb1b1807SGleb Smirnoff m = m_get(M_NOWAIT, MT_DATA); 1197099a0e58SBosko Milekic len = MLEN; 1198099a0e58SBosko Milekic } 1199099a0e58SBosko Milekic if (m == NULL) { 1200099a0e58SBosko Milekic m_freem(top); 1201099a0e58SBosko Milekic return NULL; 1202099a0e58SBosko Milekic } 1203099a0e58SBosko Milekic } 1204f5eece3fSBosko Milekic if (off) { 1205f5eece3fSBosko Milekic m->m_data += off; 1206f5eece3fSBosko Milekic len -= off; 1207f5eece3fSBosko Milekic off = 0; 1208f5eece3fSBosko Milekic } 1209f5eece3fSBosko Milekic m->m_len = len = min(totlen, len); 1210df8bae1dSRodney W. Grimes if (copy) 1211bd395ae8SBosko Milekic copy(buf, mtod(m, caddr_t), (u_int)len); 1212df8bae1dSRodney W. Grimes else 1213bd395ae8SBosko Milekic bcopy(buf, mtod(m, caddr_t), (u_int)len); 1214f5eece3fSBosko Milekic buf += len; 1215df8bae1dSRodney W. Grimes *mp = m; 1216df8bae1dSRodney W. Grimes mp = &m->m_next; 1217df8bae1dSRodney W. Grimes totlen -= len; 1218df8bae1dSRodney W. Grimes } 1219df8bae1dSRodney W. Grimes return (top); 1220df8bae1dSRodney W. Grimes } 1221c5789ba3SPoul-Henning Kamp 1222c5789ba3SPoul-Henning Kamp /* 1223c5789ba3SPoul-Henning Kamp * Copy data from a buffer back into the indicated mbuf chain, 1224c5789ba3SPoul-Henning Kamp * starting "off" bytes from the beginning, extending the mbuf 1225c5789ba3SPoul-Henning Kamp * chain if necessary. 1226c5789ba3SPoul-Henning Kamp */ 1227c5789ba3SPoul-Henning Kamp void 122824665342SLuigi Rizzo m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1229c5789ba3SPoul-Henning Kamp { 1230122a814aSBosko Milekic int mlen; 1231122a814aSBosko Milekic struct mbuf *m = m0, *n; 1232c5789ba3SPoul-Henning Kamp int totlen = 0; 1233c5789ba3SPoul-Henning Kamp 1234122a814aSBosko Milekic if (m0 == NULL) 1235c5789ba3SPoul-Henning Kamp return; 1236c5789ba3SPoul-Henning Kamp while (off > (mlen = m->m_len)) { 1237c5789ba3SPoul-Henning Kamp off -= mlen; 1238c5789ba3SPoul-Henning Kamp totlen += mlen; 1239122a814aSBosko Milekic if (m->m_next == NULL) { 1240eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 1241122a814aSBosko Milekic if (n == NULL) 1242c5789ba3SPoul-Henning Kamp goto out; 1243099a0e58SBosko Milekic bzero(mtod(n, caddr_t), MLEN); 1244c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len + off); 1245c5789ba3SPoul-Henning Kamp m->m_next = n; 1246c5789ba3SPoul-Henning Kamp } 1247c5789ba3SPoul-Henning Kamp m = m->m_next; 1248c5789ba3SPoul-Henning Kamp } 1249c5789ba3SPoul-Henning Kamp while (len > 0) { 1250129c5c81SAlexander Motin if (m->m_next == NULL && (len > m->m_len - off)) { 1251129c5c81SAlexander Motin m->m_len += min(len - (m->m_len - off), 1252129c5c81SAlexander Motin M_TRAILINGSPACE(m)); 1253129c5c81SAlexander Motin } 1254c5789ba3SPoul-Henning Kamp mlen = min (m->m_len - off, len); 1255bd395ae8SBosko Milekic bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1256c5789ba3SPoul-Henning Kamp cp += mlen; 1257c5789ba3SPoul-Henning Kamp len -= mlen; 1258c5789ba3SPoul-Henning Kamp mlen += off; 1259c5789ba3SPoul-Henning Kamp off = 0; 1260c5789ba3SPoul-Henning Kamp totlen += mlen; 1261c5789ba3SPoul-Henning Kamp if (len == 0) 1262c5789ba3SPoul-Henning Kamp break; 1263122a814aSBosko Milekic if (m->m_next == NULL) { 1264eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 1265122a814aSBosko Milekic if (n == NULL) 1266c5789ba3SPoul-Henning Kamp break; 1267c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len); 1268c5789ba3SPoul-Henning Kamp m->m_next = n; 1269c5789ba3SPoul-Henning Kamp } 1270c5789ba3SPoul-Henning Kamp m = m->m_next; 1271c5789ba3SPoul-Henning Kamp } 1272c5789ba3SPoul-Henning Kamp out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1273c5789ba3SPoul-Henning Kamp m->m_pkthdr.len = totlen; 1274c5789ba3SPoul-Henning Kamp } 1275ce4a64f7SPoul-Henning Kamp 127637621fd5SBruce M Simpson /* 12774873d175SSam Leffler * Append the specified data to the indicated mbuf chain, 12784873d175SSam Leffler * Extend the mbuf chain if the new data does not fit in 12794873d175SSam Leffler * existing space. 12804873d175SSam Leffler * 12814873d175SSam Leffler * Return 1 if able to complete the job; otherwise 0. 12824873d175SSam Leffler */ 12834873d175SSam Leffler int 12844873d175SSam Leffler m_append(struct mbuf *m0, int len, c_caddr_t cp) 12854873d175SSam Leffler { 12864873d175SSam Leffler struct mbuf *m, *n; 12874873d175SSam Leffler int remainder, space; 12884873d175SSam Leffler 12894873d175SSam Leffler for (m = m0; m->m_next != NULL; m = m->m_next) 12904873d175SSam Leffler ; 12914873d175SSam Leffler remainder = len; 12924873d175SSam Leffler space = M_TRAILINGSPACE(m); 12934873d175SSam Leffler if (space > 0) { 12944873d175SSam Leffler /* 12954873d175SSam Leffler * Copy into available space. 12964873d175SSam Leffler */ 12974873d175SSam Leffler if (space > remainder) 12984873d175SSam Leffler space = remainder; 12994873d175SSam Leffler bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 13004873d175SSam Leffler m->m_len += space; 13014873d175SSam Leffler cp += space, remainder -= space; 13024873d175SSam Leffler } 13034873d175SSam Leffler while (remainder > 0) { 13044873d175SSam Leffler /* 13054873d175SSam Leffler * Allocate a new mbuf; could check space 13064873d175SSam Leffler * and allocate a cluster instead. 13074873d175SSam Leffler */ 1308eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 13094873d175SSam Leffler if (n == NULL) 13104873d175SSam Leffler break; 13114873d175SSam Leffler n->m_len = min(MLEN, remainder); 1312a37c415eSSam Leffler bcopy(cp, mtod(n, caddr_t), n->m_len); 1313a37c415eSSam Leffler cp += n->m_len, remainder -= n->m_len; 13144873d175SSam Leffler m->m_next = n; 13154873d175SSam Leffler m = n; 13164873d175SSam Leffler } 13174873d175SSam Leffler if (m0->m_flags & M_PKTHDR) 13184873d175SSam Leffler m0->m_pkthdr.len += len - remainder; 13194873d175SSam Leffler return (remainder == 0); 13204873d175SSam Leffler } 13214873d175SSam Leffler 13224873d175SSam Leffler /* 132337621fd5SBruce M Simpson * Apply function f to the data in an mbuf chain starting "off" bytes from 132437621fd5SBruce M Simpson * the beginning, continuing for "len" bytes. 132537621fd5SBruce M Simpson */ 132637621fd5SBruce M Simpson int 132737621fd5SBruce M Simpson m_apply(struct mbuf *m, int off, int len, 132854065297SBruce M Simpson int (*f)(void *, void *, u_int), void *arg) 132937621fd5SBruce M Simpson { 133054065297SBruce M Simpson u_int count; 133137621fd5SBruce M Simpson int rval; 133237621fd5SBruce M Simpson 133337621fd5SBruce M Simpson KASSERT(off >= 0, ("m_apply, negative off %d", off)); 133437621fd5SBruce M Simpson KASSERT(len >= 0, ("m_apply, negative len %d", len)); 133537621fd5SBruce M Simpson while (off > 0) { 133637621fd5SBruce M Simpson KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 133737621fd5SBruce M Simpson if (off < m->m_len) 133837621fd5SBruce M Simpson break; 133937621fd5SBruce M Simpson off -= m->m_len; 134037621fd5SBruce M Simpson m = m->m_next; 134137621fd5SBruce M Simpson } 134237621fd5SBruce M Simpson while (len > 0) { 134337621fd5SBruce M Simpson KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 134437621fd5SBruce M Simpson count = min(m->m_len - off, len); 134537621fd5SBruce M Simpson rval = (*f)(arg, mtod(m, caddr_t) + off, count); 134637621fd5SBruce M Simpson if (rval) 134737621fd5SBruce M Simpson return (rval); 134837621fd5SBruce M Simpson len -= count; 134937621fd5SBruce M Simpson off = 0; 135037621fd5SBruce M Simpson m = m->m_next; 135137621fd5SBruce M Simpson } 135237621fd5SBruce M Simpson return (0); 135337621fd5SBruce M Simpson } 135437621fd5SBruce M Simpson 135537621fd5SBruce M Simpson /* 135637621fd5SBruce M Simpson * Return a pointer to mbuf/offset of location in mbuf chain. 135737621fd5SBruce M Simpson */ 135837621fd5SBruce M Simpson struct mbuf * 135937621fd5SBruce M Simpson m_getptr(struct mbuf *m, int loc, int *off) 136037621fd5SBruce M Simpson { 136137621fd5SBruce M Simpson 136237621fd5SBruce M Simpson while (loc >= 0) { 136354065297SBruce M Simpson /* Normal end of search. */ 136437621fd5SBruce M Simpson if (m->m_len > loc) { 136537621fd5SBruce M Simpson *off = loc; 136637621fd5SBruce M Simpson return (m); 136737621fd5SBruce M Simpson } else { 136837621fd5SBruce M Simpson loc -= m->m_len; 136937621fd5SBruce M Simpson if (m->m_next == NULL) { 137037621fd5SBruce M Simpson if (loc == 0) { 137154065297SBruce M Simpson /* Point at the end of valid data. */ 137237621fd5SBruce M Simpson *off = m->m_len; 137337621fd5SBruce M Simpson return (m); 137454065297SBruce M Simpson } 137537621fd5SBruce M Simpson return (NULL); 137654065297SBruce M Simpson } 137737621fd5SBruce M Simpson m = m->m_next; 137837621fd5SBruce M Simpson } 137937621fd5SBruce M Simpson } 138037621fd5SBruce M Simpson return (NULL); 138137621fd5SBruce M Simpson } 138237621fd5SBruce M Simpson 1383ce4a64f7SPoul-Henning Kamp void 13847b125090SJohn-Mark Gurney m_print(const struct mbuf *m, int maxlen) 1385ce4a64f7SPoul-Henning Kamp { 1386ce4a64f7SPoul-Henning Kamp int len; 13877b125090SJohn-Mark Gurney int pdata; 13886357e7b5SEivind Eklund const struct mbuf *m2; 1389ce4a64f7SPoul-Henning Kamp 13907e949c46SKenneth D. Merry if (m == NULL) { 13917e949c46SKenneth D. Merry printf("mbuf: %p\n", m); 13927e949c46SKenneth D. Merry return; 13937e949c46SKenneth D. Merry } 13947e949c46SKenneth D. Merry 13957b125090SJohn-Mark Gurney if (m->m_flags & M_PKTHDR) 1396ce4a64f7SPoul-Henning Kamp len = m->m_pkthdr.len; 13977b125090SJohn-Mark Gurney else 13987b125090SJohn-Mark Gurney len = -1; 1399ce4a64f7SPoul-Henning Kamp m2 = m; 14007b125090SJohn-Mark Gurney while (m2 != NULL && (len == -1 || len)) { 14017b125090SJohn-Mark Gurney pdata = m2->m_len; 14027b125090SJohn-Mark Gurney if (maxlen != -1 && pdata > maxlen) 14037b125090SJohn-Mark Gurney pdata = maxlen; 14047b125090SJohn-Mark Gurney printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 14057b125090SJohn-Mark Gurney m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 14067b125090SJohn-Mark Gurney "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 14077b125090SJohn-Mark Gurney "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 14087b125090SJohn-Mark Gurney if (pdata) 140945e0d0aaSJohn-Mark Gurney printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 14107b125090SJohn-Mark Gurney if (len != -1) 1411ce4a64f7SPoul-Henning Kamp len -= m2->m_len; 1412ce4a64f7SPoul-Henning Kamp m2 = m2->m_next; 1413ce4a64f7SPoul-Henning Kamp } 14147b125090SJohn-Mark Gurney if (len > 0) 14157b125090SJohn-Mark Gurney printf("%d bytes unaccounted for.\n", len); 1416ce4a64f7SPoul-Henning Kamp return; 1417ce4a64f7SPoul-Henning Kamp } 14183f2e06c5SPoul-Henning Kamp 1419bd395ae8SBosko Milekic u_int 14203f2e06c5SPoul-Henning Kamp m_fixhdr(struct mbuf *m0) 14213f2e06c5SPoul-Henning Kamp { 1422bd395ae8SBosko Milekic u_int len; 14233f2e06c5SPoul-Henning Kamp 1424ac6e585dSPoul-Henning Kamp len = m_length(m0, NULL); 14253f2e06c5SPoul-Henning Kamp m0->m_pkthdr.len = len; 1426ac6e585dSPoul-Henning Kamp return (len); 1427ac6e585dSPoul-Henning Kamp } 1428ac6e585dSPoul-Henning Kamp 1429bd395ae8SBosko Milekic u_int 1430ac6e585dSPoul-Henning Kamp m_length(struct mbuf *m0, struct mbuf **last) 1431ac6e585dSPoul-Henning Kamp { 1432ac6e585dSPoul-Henning Kamp struct mbuf *m; 1433bd395ae8SBosko Milekic u_int len; 1434ac6e585dSPoul-Henning Kamp 1435ac6e585dSPoul-Henning Kamp len = 0; 1436ac6e585dSPoul-Henning Kamp for (m = m0; m != NULL; m = m->m_next) { 1437ac6e585dSPoul-Henning Kamp len += m->m_len; 1438ac6e585dSPoul-Henning Kamp if (m->m_next == NULL) 1439ac6e585dSPoul-Henning Kamp break; 1440ac6e585dSPoul-Henning Kamp } 1441ac6e585dSPoul-Henning Kamp if (last != NULL) 1442ac6e585dSPoul-Henning Kamp *last = m; 1443ac6e585dSPoul-Henning Kamp return (len); 14443f2e06c5SPoul-Henning Kamp } 144555e9f80dSMike Silbersack 144655e9f80dSMike Silbersack /* 144755e9f80dSMike Silbersack * Defragment a mbuf chain, returning the shortest possible 144855e9f80dSMike Silbersack * chain of mbufs and clusters. If allocation fails and 144955e9f80dSMike Silbersack * this cannot be completed, NULL will be returned, but 145055e9f80dSMike Silbersack * the passed in chain will be unchanged. Upon success, 145155e9f80dSMike Silbersack * the original chain will be freed, and the new chain 145255e9f80dSMike Silbersack * will be returned. 145355e9f80dSMike Silbersack * 145455e9f80dSMike Silbersack * If a non-packet header is passed in, the original 145555e9f80dSMike Silbersack * mbuf (chain?) will be returned unharmed. 145655e9f80dSMike Silbersack */ 145755e9f80dSMike Silbersack struct mbuf * 145855e9f80dSMike Silbersack m_defrag(struct mbuf *m0, int how) 145955e9f80dSMike Silbersack { 146055e9f80dSMike Silbersack struct mbuf *m_new = NULL, *m_final = NULL; 146155e9f80dSMike Silbersack int progress = 0, length; 146255e9f80dSMike Silbersack 1463063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 146455e9f80dSMike Silbersack if (!(m0->m_flags & M_PKTHDR)) 146555e9f80dSMike Silbersack return (m0); 146655e9f80dSMike Silbersack 1467f8bf8e39SMike Silbersack m_fixhdr(m0); /* Needed sanity check */ 1468f8bf8e39SMike Silbersack 1469352d050eSMike Silbersack #ifdef MBUF_STRESS_TEST 1470352d050eSMike Silbersack if (m_defragrandomfailures) { 1471352d050eSMike Silbersack int temp = arc4random() & 0xff; 1472352d050eSMike Silbersack if (temp == 0xba) 1473352d050eSMike Silbersack goto nospace; 1474352d050eSMike Silbersack } 1475352d050eSMike Silbersack #endif 147655e9f80dSMike Silbersack 147755e9f80dSMike Silbersack if (m0->m_pkthdr.len > MHLEN) 147855e9f80dSMike Silbersack m_final = m_getcl(how, MT_DATA, M_PKTHDR); 147955e9f80dSMike Silbersack else 148055e9f80dSMike Silbersack m_final = m_gethdr(how, MT_DATA); 148155e9f80dSMike Silbersack 148255e9f80dSMike Silbersack if (m_final == NULL) 148355e9f80dSMike Silbersack goto nospace; 148455e9f80dSMike Silbersack 1485a89ec05eSPeter Wemm if (m_dup_pkthdr(m_final, m0, how) == 0) 148655e9f80dSMike Silbersack goto nospace; 148755e9f80dSMike Silbersack 148855e9f80dSMike Silbersack m_new = m_final; 148955e9f80dSMike Silbersack 149055e9f80dSMike Silbersack while (progress < m0->m_pkthdr.len) { 149155e9f80dSMike Silbersack length = m0->m_pkthdr.len - progress; 149255e9f80dSMike Silbersack if (length > MCLBYTES) 149355e9f80dSMike Silbersack length = MCLBYTES; 149455e9f80dSMike Silbersack 149555e9f80dSMike Silbersack if (m_new == NULL) { 149655e9f80dSMike Silbersack if (length > MLEN) 149755e9f80dSMike Silbersack m_new = m_getcl(how, MT_DATA, 0); 149855e9f80dSMike Silbersack else 149955e9f80dSMike Silbersack m_new = m_get(how, MT_DATA); 150055e9f80dSMike Silbersack if (m_new == NULL) 150155e9f80dSMike Silbersack goto nospace; 150255e9f80dSMike Silbersack } 150355e9f80dSMike Silbersack 150455e9f80dSMike Silbersack m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 150555e9f80dSMike Silbersack progress += length; 150655e9f80dSMike Silbersack m_new->m_len = length; 150755e9f80dSMike Silbersack if (m_new != m_final) 150855e9f80dSMike Silbersack m_cat(m_final, m_new); 150955e9f80dSMike Silbersack m_new = NULL; 151055e9f80dSMike Silbersack } 151151710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 151255e9f80dSMike Silbersack if (m0->m_next == NULL) 151355e9f80dSMike Silbersack m_defraguseless++; 151451710a45SMike Silbersack #endif 151555e9f80dSMike Silbersack m_freem(m0); 151655e9f80dSMike Silbersack m0 = m_final; 151751710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 151855e9f80dSMike Silbersack m_defragpackets++; 151955e9f80dSMike Silbersack m_defragbytes += m0->m_pkthdr.len; 152051710a45SMike Silbersack #endif 152155e9f80dSMike Silbersack return (m0); 152255e9f80dSMike Silbersack nospace: 152351710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 152455e9f80dSMike Silbersack m_defragfailure++; 152551710a45SMike Silbersack #endif 152655e9f80dSMike Silbersack if (m_final) 152755e9f80dSMike Silbersack m_freem(m_final); 152855e9f80dSMike Silbersack return (NULL); 152955e9f80dSMike Silbersack } 15303390d476SMike Silbersack 1531eeb76a18SSam Leffler /* 1532eeb76a18SSam Leffler * Defragment an mbuf chain, returning at most maxfrags separate 1533eeb76a18SSam Leffler * mbufs+clusters. If this is not possible NULL is returned and 1534eeb76a18SSam Leffler * the original mbuf chain is left in it's present (potentially 1535eeb76a18SSam Leffler * modified) state. We use two techniques: collapsing consecutive 1536eeb76a18SSam Leffler * mbufs and replacing consecutive mbufs by a cluster. 1537eeb76a18SSam Leffler * 1538eeb76a18SSam Leffler * NB: this should really be named m_defrag but that name is taken 1539eeb76a18SSam Leffler */ 1540eeb76a18SSam Leffler struct mbuf * 1541eeb76a18SSam Leffler m_collapse(struct mbuf *m0, int how, int maxfrags) 1542eeb76a18SSam Leffler { 1543eeb76a18SSam Leffler struct mbuf *m, *n, *n2, **prev; 1544eeb76a18SSam Leffler u_int curfrags; 1545eeb76a18SSam Leffler 1546eeb76a18SSam Leffler /* 1547eeb76a18SSam Leffler * Calculate the current number of frags. 1548eeb76a18SSam Leffler */ 1549eeb76a18SSam Leffler curfrags = 0; 1550eeb76a18SSam Leffler for (m = m0; m != NULL; m = m->m_next) 1551eeb76a18SSam Leffler curfrags++; 1552eeb76a18SSam Leffler /* 1553eeb76a18SSam Leffler * First, try to collapse mbufs. Note that we always collapse 1554eeb76a18SSam Leffler * towards the front so we don't need to deal with moving the 1555eeb76a18SSam Leffler * pkthdr. This may be suboptimal if the first mbuf has much 1556eeb76a18SSam Leffler * less data than the following. 1557eeb76a18SSam Leffler */ 1558eeb76a18SSam Leffler m = m0; 1559eeb76a18SSam Leffler again: 1560eeb76a18SSam Leffler for (;;) { 1561eeb76a18SSam Leffler n = m->m_next; 1562eeb76a18SSam Leffler if (n == NULL) 1563eeb76a18SSam Leffler break; 156414d7c5b1SAndre Oppermann if (M_WRITABLE(m) && 1565eeb76a18SSam Leffler n->m_len < M_TRAILINGSPACE(m)) { 1566eeb76a18SSam Leffler bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 1567eeb76a18SSam Leffler n->m_len); 1568eeb76a18SSam Leffler m->m_len += n->m_len; 1569eeb76a18SSam Leffler m->m_next = n->m_next; 1570eeb76a18SSam Leffler m_free(n); 1571eeb76a18SSam Leffler if (--curfrags <= maxfrags) 1572eeb76a18SSam Leffler return m0; 1573eeb76a18SSam Leffler } else 1574eeb76a18SSam Leffler m = n; 1575eeb76a18SSam Leffler } 1576eeb76a18SSam Leffler KASSERT(maxfrags > 1, 1577eeb76a18SSam Leffler ("maxfrags %u, but normal collapse failed", maxfrags)); 1578eeb76a18SSam Leffler /* 1579eeb76a18SSam Leffler * Collapse consecutive mbufs to a cluster. 1580eeb76a18SSam Leffler */ 1581eeb76a18SSam Leffler prev = &m0->m_next; /* NB: not the first mbuf */ 1582eeb76a18SSam Leffler while ((n = *prev) != NULL) { 1583eeb76a18SSam Leffler if ((n2 = n->m_next) != NULL && 1584eeb76a18SSam Leffler n->m_len + n2->m_len < MCLBYTES) { 1585eeb76a18SSam Leffler m = m_getcl(how, MT_DATA, 0); 1586eeb76a18SSam Leffler if (m == NULL) 1587eeb76a18SSam Leffler goto bad; 1588eeb76a18SSam Leffler bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 1589eeb76a18SSam Leffler bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 1590eeb76a18SSam Leffler n2->m_len); 1591eeb76a18SSam Leffler m->m_len = n->m_len + n2->m_len; 1592eeb76a18SSam Leffler m->m_next = n2->m_next; 1593eeb76a18SSam Leffler *prev = m; 1594eeb76a18SSam Leffler m_free(n); 1595eeb76a18SSam Leffler m_free(n2); 1596eeb76a18SSam Leffler if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 1597eeb76a18SSam Leffler return m0; 1598eeb76a18SSam Leffler /* 1599eeb76a18SSam Leffler * Still not there, try the normal collapse 1600eeb76a18SSam Leffler * again before we allocate another cluster. 1601eeb76a18SSam Leffler */ 1602eeb76a18SSam Leffler goto again; 1603eeb76a18SSam Leffler } 1604eeb76a18SSam Leffler prev = &n->m_next; 1605eeb76a18SSam Leffler } 1606eeb76a18SSam Leffler /* 1607eeb76a18SSam Leffler * No place where we can collapse to a cluster; punt. 1608eeb76a18SSam Leffler * This can occur if, for example, you request 2 frags 1609eeb76a18SSam Leffler * but the packet requires that both be clusters (we 1610eeb76a18SSam Leffler * never reallocate the first mbuf to avoid moving the 1611eeb76a18SSam Leffler * packet header). 1612eeb76a18SSam Leffler */ 1613eeb76a18SSam Leffler bad: 1614eeb76a18SSam Leffler return NULL; 1615eeb76a18SSam Leffler } 1616eeb76a18SSam Leffler 16173390d476SMike Silbersack #ifdef MBUF_STRESS_TEST 16183390d476SMike Silbersack 16193390d476SMike Silbersack /* 16203390d476SMike Silbersack * Fragment an mbuf chain. There's no reason you'd ever want to do 16213390d476SMike Silbersack * this in normal usage, but it's great for stress testing various 16223390d476SMike Silbersack * mbuf consumers. 16233390d476SMike Silbersack * 16243390d476SMike Silbersack * If fragmentation is not possible, the original chain will be 16253390d476SMike Silbersack * returned. 16263390d476SMike Silbersack * 16273390d476SMike Silbersack * Possible length values: 16283390d476SMike Silbersack * 0 no fragmentation will occur 16293390d476SMike Silbersack * > 0 each fragment will be of the specified length 16303390d476SMike Silbersack * -1 each fragment will be the same random value in length 16313390d476SMike Silbersack * -2 each fragment's length will be entirely random 16323390d476SMike Silbersack * (Random values range from 1 to 256) 16333390d476SMike Silbersack */ 16343390d476SMike Silbersack struct mbuf * 16353390d476SMike Silbersack m_fragment(struct mbuf *m0, int how, int length) 16363390d476SMike Silbersack { 16373390d476SMike Silbersack struct mbuf *m_new = NULL, *m_final = NULL; 16383390d476SMike Silbersack int progress = 0; 16393390d476SMike Silbersack 16403390d476SMike Silbersack if (!(m0->m_flags & M_PKTHDR)) 16413390d476SMike Silbersack return (m0); 16423390d476SMike Silbersack 16433390d476SMike Silbersack if ((length == 0) || (length < -2)) 16443390d476SMike Silbersack return (m0); 16453390d476SMike Silbersack 16463390d476SMike Silbersack m_fixhdr(m0); /* Needed sanity check */ 16473390d476SMike Silbersack 16483390d476SMike Silbersack m_final = m_getcl(how, MT_DATA, M_PKTHDR); 16493390d476SMike Silbersack 16503390d476SMike Silbersack if (m_final == NULL) 16513390d476SMike Silbersack goto nospace; 16523390d476SMike Silbersack 16538dee2f67SMike Silbersack if (m_dup_pkthdr(m_final, m0, how) == 0) 16543390d476SMike Silbersack goto nospace; 16553390d476SMike Silbersack 16563390d476SMike Silbersack m_new = m_final; 16573390d476SMike Silbersack 16583390d476SMike Silbersack if (length == -1) 16593390d476SMike Silbersack length = 1 + (arc4random() & 255); 16603390d476SMike Silbersack 16613390d476SMike Silbersack while (progress < m0->m_pkthdr.len) { 16623390d476SMike Silbersack int fraglen; 16633390d476SMike Silbersack 16643390d476SMike Silbersack if (length > 0) 16653390d476SMike Silbersack fraglen = length; 16663390d476SMike Silbersack else 16673390d476SMike Silbersack fraglen = 1 + (arc4random() & 255); 16683390d476SMike Silbersack if (fraglen > m0->m_pkthdr.len - progress) 16693390d476SMike Silbersack fraglen = m0->m_pkthdr.len - progress; 16703390d476SMike Silbersack 16713390d476SMike Silbersack if (fraglen > MCLBYTES) 16723390d476SMike Silbersack fraglen = MCLBYTES; 16733390d476SMike Silbersack 16743390d476SMike Silbersack if (m_new == NULL) { 16753390d476SMike Silbersack m_new = m_getcl(how, MT_DATA, 0); 16763390d476SMike Silbersack if (m_new == NULL) 16773390d476SMike Silbersack goto nospace; 16783390d476SMike Silbersack } 16793390d476SMike Silbersack 16803390d476SMike Silbersack m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t)); 16813390d476SMike Silbersack progress += fraglen; 16823390d476SMike Silbersack m_new->m_len = fraglen; 16833390d476SMike Silbersack if (m_new != m_final) 16843390d476SMike Silbersack m_cat(m_final, m_new); 16853390d476SMike Silbersack m_new = NULL; 16863390d476SMike Silbersack } 16873390d476SMike Silbersack m_freem(m0); 16883390d476SMike Silbersack m0 = m_final; 16893390d476SMike Silbersack return (m0); 16903390d476SMike Silbersack nospace: 16913390d476SMike Silbersack if (m_final) 16923390d476SMike Silbersack m_freem(m_final); 16933390d476SMike Silbersack /* Return the original chain on failure */ 16943390d476SMike Silbersack return (m0); 16953390d476SMike Silbersack } 16963390d476SMike Silbersack 16973390d476SMike Silbersack #endif 1698beb699c7SMike Silbersack 16995e20f43dSAndre Oppermann /* 17005e20f43dSAndre Oppermann * Copy the contents of uio into a properly sized mbuf chain. 17015e20f43dSAndre Oppermann */ 1702beb699c7SMike Silbersack struct mbuf * 17035e20f43dSAndre Oppermann m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1704beb699c7SMike Silbersack { 17055e20f43dSAndre Oppermann struct mbuf *m, *mb; 1706526d0bd5SKonstantin Belousov int error, length; 1707526d0bd5SKonstantin Belousov ssize_t total; 17085e20f43dSAndre Oppermann int progress = 0; 1709beb699c7SMike Silbersack 17105e20f43dSAndre Oppermann /* 17115e20f43dSAndre Oppermann * len can be zero or an arbitrary large value bound by 17125e20f43dSAndre Oppermann * the total data supplied by the uio. 17135e20f43dSAndre Oppermann */ 1714beb699c7SMike Silbersack if (len > 0) 1715beb699c7SMike Silbersack total = min(uio->uio_resid, len); 1716beb699c7SMike Silbersack else 1717beb699c7SMike Silbersack total = uio->uio_resid; 17185e20f43dSAndre Oppermann 17195e20f43dSAndre Oppermann /* 17205e20f43dSAndre Oppermann * The smallest unit returned by m_getm2() is a single mbuf 17219128ec21SAndrew Thompson * with pkthdr. We can't align past it. 17225e20f43dSAndre Oppermann */ 172375ae2570SMaksim Yevmenkin if (align >= MHLEN) 1724beb699c7SMike Silbersack return (NULL); 17255e20f43dSAndre Oppermann 17267c32173bSAndre Oppermann /* 17277c32173bSAndre Oppermann * Give us the full allocation or nothing. 17287c32173bSAndre Oppermann * If len is zero return the smallest empty mbuf. 17297c32173bSAndre Oppermann */ 17307c32173bSAndre Oppermann m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 17315e20f43dSAndre Oppermann if (m == NULL) 17325e20f43dSAndre Oppermann return (NULL); 17335e20f43dSAndre Oppermann m->m_data += align; 17345e20f43dSAndre Oppermann 17355e20f43dSAndre Oppermann /* Fill all mbufs with uio data and update header information. */ 17365e20f43dSAndre Oppermann for (mb = m; mb != NULL; mb = mb->m_next) { 17375e20f43dSAndre Oppermann length = min(M_TRAILINGSPACE(mb), total - progress); 17385e20f43dSAndre Oppermann 17395e20f43dSAndre Oppermann error = uiomove(mtod(mb, void *), length, uio); 17405e20f43dSAndre Oppermann if (error) { 17415e20f43dSAndre Oppermann m_freem(m); 17425e20f43dSAndre Oppermann return (NULL); 17435e20f43dSAndre Oppermann } 17445e20f43dSAndre Oppermann 17455e20f43dSAndre Oppermann mb->m_len = length; 17465e20f43dSAndre Oppermann progress += length; 17475e20f43dSAndre Oppermann if (flags & M_PKTHDR) 17485e20f43dSAndre Oppermann m->m_pkthdr.len += length; 17495e20f43dSAndre Oppermann } 17505e20f43dSAndre Oppermann KASSERT(progress == total, ("%s: progress != total", __func__)); 17515e20f43dSAndre Oppermann 17525e20f43dSAndre Oppermann return (m); 1753beb699c7SMike Silbersack } 1754ab8ab90cSSam Leffler 1755ab8ab90cSSam Leffler /* 1756bc05b2f6SAndre Oppermann * Copy an mbuf chain into a uio limited by len if set. 1757bc05b2f6SAndre Oppermann */ 1758bc05b2f6SAndre Oppermann int 1759bc05b2f6SAndre Oppermann m_mbuftouio(struct uio *uio, struct mbuf *m, int len) 1760bc05b2f6SAndre Oppermann { 1761bc05b2f6SAndre Oppermann int error, length, total; 1762bc05b2f6SAndre Oppermann int progress = 0; 1763bc05b2f6SAndre Oppermann 1764bc05b2f6SAndre Oppermann if (len > 0) 1765bc05b2f6SAndre Oppermann total = min(uio->uio_resid, len); 1766bc05b2f6SAndre Oppermann else 1767bc05b2f6SAndre Oppermann total = uio->uio_resid; 1768bc05b2f6SAndre Oppermann 1769bc05b2f6SAndre Oppermann /* Fill the uio with data from the mbufs. */ 1770bc05b2f6SAndre Oppermann for (; m != NULL; m = m->m_next) { 1771bc05b2f6SAndre Oppermann length = min(m->m_len, total - progress); 1772bc05b2f6SAndre Oppermann 1773bc05b2f6SAndre Oppermann error = uiomove(mtod(m, void *), length, uio); 1774bc05b2f6SAndre Oppermann if (error) 1775bc05b2f6SAndre Oppermann return (error); 1776bc05b2f6SAndre Oppermann 1777bc05b2f6SAndre Oppermann progress += length; 1778bc05b2f6SAndre Oppermann } 1779bc05b2f6SAndre Oppermann 1780bc05b2f6SAndre Oppermann return (0); 1781bc05b2f6SAndre Oppermann } 1782bc05b2f6SAndre Oppermann 1783bc05b2f6SAndre Oppermann /* 178447e2996eSSam Leffler * Create a writable copy of the mbuf chain. While doing this 178547e2996eSSam Leffler * we compact the chain with a goal of producing a chain with 178647e2996eSSam Leffler * at most two mbufs. The second mbuf in this chain is likely 178747e2996eSSam Leffler * to be a cluster. The primary purpose of this work is to create 178847e2996eSSam Leffler * a writable packet for encryption, compression, etc. The 178947e2996eSSam Leffler * secondary goal is to linearize the data so the data can be 179047e2996eSSam Leffler * passed to crypto hardware in the most efficient manner possible. 179147e2996eSSam Leffler */ 179247e2996eSSam Leffler struct mbuf * 179347e2996eSSam Leffler m_unshare(struct mbuf *m0, int how) 179447e2996eSSam Leffler { 179547e2996eSSam Leffler struct mbuf *m, *mprev; 179647e2996eSSam Leffler struct mbuf *n, *mfirst, *mlast; 179747e2996eSSam Leffler int len, off; 179847e2996eSSam Leffler 179947e2996eSSam Leffler mprev = NULL; 180047e2996eSSam Leffler for (m = m0; m != NULL; m = mprev->m_next) { 180147e2996eSSam Leffler /* 180247e2996eSSam Leffler * Regular mbufs are ignored unless there's a cluster 180347e2996eSSam Leffler * in front of it that we can use to coalesce. We do 180447e2996eSSam Leffler * the latter mainly so later clusters can be coalesced 180547e2996eSSam Leffler * also w/o having to handle them specially (i.e. convert 180647e2996eSSam Leffler * mbuf+cluster -> cluster). This optimization is heavily 180747e2996eSSam Leffler * influenced by the assumption that we're running over 180847e2996eSSam Leffler * Ethernet where MCLBYTES is large enough that the max 180947e2996eSSam Leffler * packet size will permit lots of coalescing into a 181047e2996eSSam Leffler * single cluster. This in turn permits efficient 181147e2996eSSam Leffler * crypto operations, especially when using hardware. 181247e2996eSSam Leffler */ 181347e2996eSSam Leffler if ((m->m_flags & M_EXT) == 0) { 181447e2996eSSam Leffler if (mprev && (mprev->m_flags & M_EXT) && 181547e2996eSSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 181647e2996eSSam Leffler /* XXX: this ignores mbuf types */ 181747e2996eSSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 181847e2996eSSam Leffler mtod(m, caddr_t), m->m_len); 181947e2996eSSam Leffler mprev->m_len += m->m_len; 182047e2996eSSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 182147e2996eSSam Leffler m_free(m); /* reclaim mbuf */ 182247e2996eSSam Leffler #if 0 182347e2996eSSam Leffler newipsecstat.ips_mbcoalesced++; 182447e2996eSSam Leffler #endif 182547e2996eSSam Leffler } else { 182647e2996eSSam Leffler mprev = m; 182747e2996eSSam Leffler } 182847e2996eSSam Leffler continue; 182947e2996eSSam Leffler } 183047e2996eSSam Leffler /* 183147e2996eSSam Leffler * Writable mbufs are left alone (for now). 183247e2996eSSam Leffler */ 183347e2996eSSam Leffler if (M_WRITABLE(m)) { 183447e2996eSSam Leffler mprev = m; 183547e2996eSSam Leffler continue; 183647e2996eSSam Leffler } 183747e2996eSSam Leffler 183847e2996eSSam Leffler /* 183947e2996eSSam Leffler * Not writable, replace with a copy or coalesce with 184047e2996eSSam Leffler * the previous mbuf if possible (since we have to copy 184147e2996eSSam Leffler * it anyway, we try to reduce the number of mbufs and 184247e2996eSSam Leffler * clusters so that future work is easier). 184347e2996eSSam Leffler */ 184447e2996eSSam Leffler KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 184547e2996eSSam Leffler /* NB: we only coalesce into a cluster or larger */ 184647e2996eSSam Leffler if (mprev != NULL && (mprev->m_flags & M_EXT) && 184747e2996eSSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 184847e2996eSSam Leffler /* XXX: this ignores mbuf types */ 184947e2996eSSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 185047e2996eSSam Leffler mtod(m, caddr_t), m->m_len); 185147e2996eSSam Leffler mprev->m_len += m->m_len; 185247e2996eSSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 185347e2996eSSam Leffler m_free(m); /* reclaim mbuf */ 185447e2996eSSam Leffler #if 0 185547e2996eSSam Leffler newipsecstat.ips_clcoalesced++; 185647e2996eSSam Leffler #endif 185747e2996eSSam Leffler continue; 185847e2996eSSam Leffler } 185947e2996eSSam Leffler 186047e2996eSSam Leffler /* 18615368b81eSGleb Smirnoff * Allocate new space to hold the copy and copy the data. 18625368b81eSGleb Smirnoff * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 18635368b81eSGleb Smirnoff * splitting them into clusters. We could just malloc a 18645368b81eSGleb Smirnoff * buffer and make it external but too many device drivers 18655368b81eSGleb Smirnoff * don't know how to break up the non-contiguous memory when 18665368b81eSGleb Smirnoff * doing DMA. 186747e2996eSSam Leffler */ 186847e2996eSSam Leffler n = m_getcl(how, m->m_type, m->m_flags); 186947e2996eSSam Leffler if (n == NULL) { 187047e2996eSSam Leffler m_freem(m0); 187147e2996eSSam Leffler return (NULL); 187247e2996eSSam Leffler } 187347e2996eSSam Leffler len = m->m_len; 187447e2996eSSam Leffler off = 0; 187547e2996eSSam Leffler mfirst = n; 187647e2996eSSam Leffler mlast = NULL; 187747e2996eSSam Leffler for (;;) { 187847e2996eSSam Leffler int cc = min(len, MCLBYTES); 187947e2996eSSam Leffler memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 188047e2996eSSam Leffler n->m_len = cc; 188147e2996eSSam Leffler if (mlast != NULL) 188247e2996eSSam Leffler mlast->m_next = n; 188347e2996eSSam Leffler mlast = n; 188447e2996eSSam Leffler #if 0 188547e2996eSSam Leffler newipsecstat.ips_clcopied++; 188647e2996eSSam Leffler #endif 188747e2996eSSam Leffler 188847e2996eSSam Leffler len -= cc; 188947e2996eSSam Leffler if (len <= 0) 189047e2996eSSam Leffler break; 189147e2996eSSam Leffler off += cc; 189247e2996eSSam Leffler 189347e2996eSSam Leffler n = m_getcl(how, m->m_type, m->m_flags); 189447e2996eSSam Leffler if (n == NULL) { 189547e2996eSSam Leffler m_freem(mfirst); 189647e2996eSSam Leffler m_freem(m0); 189747e2996eSSam Leffler return (NULL); 189847e2996eSSam Leffler } 189947e2996eSSam Leffler } 190047e2996eSSam Leffler n->m_next = m->m_next; 190147e2996eSSam Leffler if (mprev == NULL) 190247e2996eSSam Leffler m0 = mfirst; /* new head of chain */ 190347e2996eSSam Leffler else 190447e2996eSSam Leffler mprev->m_next = mfirst; /* replace old mbuf */ 190547e2996eSSam Leffler m_free(m); /* release old mbuf */ 190647e2996eSSam Leffler mprev = mfirst; 190747e2996eSSam Leffler } 190847e2996eSSam Leffler return (m0); 190947e2996eSSam Leffler } 19106eeac1d9SJulian Elischer 19116eeac1d9SJulian Elischer #ifdef MBUF_PROFILING 19126eeac1d9SJulian Elischer 19136eeac1d9SJulian Elischer #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 19146eeac1d9SJulian Elischer struct mbufprofile { 19152182c0cfSJulian Elischer uintmax_t wasted[MP_BUCKETS]; 19162182c0cfSJulian Elischer uintmax_t used[MP_BUCKETS]; 19172182c0cfSJulian Elischer uintmax_t segments[MP_BUCKETS]; 19186eeac1d9SJulian Elischer } mbprof; 19196eeac1d9SJulian Elischer 19206eeac1d9SJulian Elischer #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 19216eeac1d9SJulian Elischer #define MP_NUMLINES 6 19226eeac1d9SJulian Elischer #define MP_NUMSPERLINE 16 19236eeac1d9SJulian Elischer #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 19246eeac1d9SJulian Elischer /* work out max space needed and add a bit of spare space too */ 19256eeac1d9SJulian Elischer #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 19266eeac1d9SJulian Elischer #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 19276eeac1d9SJulian Elischer 19286eeac1d9SJulian Elischer char mbprofbuf[MP_BUFSIZE]; 19296eeac1d9SJulian Elischer 19306eeac1d9SJulian Elischer void 19316eeac1d9SJulian Elischer m_profile(struct mbuf *m) 19326eeac1d9SJulian Elischer { 19336eeac1d9SJulian Elischer int segments = 0; 19346eeac1d9SJulian Elischer int used = 0; 19356eeac1d9SJulian Elischer int wasted = 0; 19366eeac1d9SJulian Elischer 19376eeac1d9SJulian Elischer while (m) { 19386eeac1d9SJulian Elischer segments++; 19396eeac1d9SJulian Elischer used += m->m_len; 19406eeac1d9SJulian Elischer if (m->m_flags & M_EXT) { 19416eeac1d9SJulian Elischer wasted += MHLEN - sizeof(m->m_ext) + 19426eeac1d9SJulian Elischer m->m_ext.ext_size - m->m_len; 19436eeac1d9SJulian Elischer } else { 19446eeac1d9SJulian Elischer if (m->m_flags & M_PKTHDR) 19456eeac1d9SJulian Elischer wasted += MHLEN - m->m_len; 19466eeac1d9SJulian Elischer else 19476eeac1d9SJulian Elischer wasted += MLEN - m->m_len; 19486eeac1d9SJulian Elischer } 19496eeac1d9SJulian Elischer m = m->m_next; 19506eeac1d9SJulian Elischer } 19516eeac1d9SJulian Elischer /* be paranoid.. it helps */ 19526eeac1d9SJulian Elischer if (segments > MP_BUCKETS - 1) 19536eeac1d9SJulian Elischer segments = MP_BUCKETS - 1; 19546eeac1d9SJulian Elischer if (used > 100000) 19556eeac1d9SJulian Elischer used = 100000; 19566eeac1d9SJulian Elischer if (wasted > 100000) 19576eeac1d9SJulian Elischer wasted = 100000; 19586eeac1d9SJulian Elischer /* store in the appropriate bucket */ 19596eeac1d9SJulian Elischer /* don't bother locking. if it's slightly off, so what? */ 19606eeac1d9SJulian Elischer mbprof.segments[segments]++; 19616eeac1d9SJulian Elischer mbprof.used[fls(used)]++; 19626eeac1d9SJulian Elischer mbprof.wasted[fls(wasted)]++; 19636eeac1d9SJulian Elischer } 19646eeac1d9SJulian Elischer 19656eeac1d9SJulian Elischer static void 19666eeac1d9SJulian Elischer mbprof_textify(void) 19676eeac1d9SJulian Elischer { 19686eeac1d9SJulian Elischer int offset; 19696eeac1d9SJulian Elischer char *c; 197060ae52f7SEd Schouten uint64_t *p; 19716eeac1d9SJulian Elischer 19726eeac1d9SJulian Elischer p = &mbprof.wasted[0]; 19736eeac1d9SJulian Elischer c = mbprofbuf; 19746eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 19756eeac1d9SJulian Elischer "wasted:\n" 19762182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 19772182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 19786eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 19796eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 19806eeac1d9SJulian Elischer #ifdef BIG_ARRAY 19816eeac1d9SJulian Elischer p = &mbprof.wasted[16]; 19826eeac1d9SJulian Elischer c += offset; 19836eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 19842182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 19852182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 19866eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 19876eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 19886eeac1d9SJulian Elischer #endif 19896eeac1d9SJulian Elischer p = &mbprof.used[0]; 19906eeac1d9SJulian Elischer c += offset; 19916eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 19926eeac1d9SJulian Elischer "used:\n" 19932182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 19942182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 19956eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 19966eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 19976eeac1d9SJulian Elischer #ifdef BIG_ARRAY 19986eeac1d9SJulian Elischer p = &mbprof.used[16]; 19996eeac1d9SJulian Elischer c += offset; 20006eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 20012182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20022182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20036eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20046eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20056eeac1d9SJulian Elischer #endif 20066eeac1d9SJulian Elischer p = &mbprof.segments[0]; 20076eeac1d9SJulian Elischer c += offset; 20086eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 20096eeac1d9SJulian Elischer "segments:\n" 20102182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20112182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20126eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20136eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20146eeac1d9SJulian Elischer #ifdef BIG_ARRAY 20156eeac1d9SJulian Elischer p = &mbprof.segments[16]; 20166eeac1d9SJulian Elischer c += offset; 20176eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 20182182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20192182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %jju", 20206eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20216eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20226eeac1d9SJulian Elischer #endif 20236eeac1d9SJulian Elischer } 20246eeac1d9SJulian Elischer 20256eeac1d9SJulian Elischer static int 20266eeac1d9SJulian Elischer mbprof_handler(SYSCTL_HANDLER_ARGS) 20276eeac1d9SJulian Elischer { 20286eeac1d9SJulian Elischer int error; 20296eeac1d9SJulian Elischer 20306eeac1d9SJulian Elischer mbprof_textify(); 20316eeac1d9SJulian Elischer error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 20326eeac1d9SJulian Elischer return (error); 20336eeac1d9SJulian Elischer } 20346eeac1d9SJulian Elischer 20356eeac1d9SJulian Elischer static int 20366eeac1d9SJulian Elischer mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 20376eeac1d9SJulian Elischer { 20386eeac1d9SJulian Elischer int clear, error; 20396eeac1d9SJulian Elischer 20406eeac1d9SJulian Elischer clear = 0; 20416eeac1d9SJulian Elischer error = sysctl_handle_int(oidp, &clear, 0, req); 20426eeac1d9SJulian Elischer if (error || !req->newptr) 20436eeac1d9SJulian Elischer return (error); 20446eeac1d9SJulian Elischer 20456eeac1d9SJulian Elischer if (clear) { 20466eeac1d9SJulian Elischer bzero(&mbprof, sizeof(mbprof)); 20476eeac1d9SJulian Elischer } 20486eeac1d9SJulian Elischer 20496eeac1d9SJulian Elischer return (error); 20506eeac1d9SJulian Elischer } 20516eeac1d9SJulian Elischer 20526eeac1d9SJulian Elischer 20536eeac1d9SJulian Elischer SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD, 20546eeac1d9SJulian Elischer NULL, 0, mbprof_handler, "A", "mbuf profiling statistics"); 20556eeac1d9SJulian Elischer 20566eeac1d9SJulian Elischer SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW, 20576eeac1d9SJulian Elischer NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics"); 20586eeac1d9SJulian Elischer #endif 20596eeac1d9SJulian Elischer 2060