1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1988, 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 4df8bae1dSRodney W. Grimes * 5df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 6df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 7df8bae1dSRodney W. Grimes * are met: 8df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 9df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 10df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 12df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 13df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 14df8bae1dSRodney W. Grimes * must display the following acknowledgement: 15df8bae1dSRodney W. Grimes * This product includes software developed by the University of 16df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 17df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 18df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 19df8bae1dSRodney W. Grimes * without specific prior written permission. 20df8bae1dSRodney W. Grimes * 21df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31df8bae1dSRodney W. Grimes * SUCH DAMAGE. 32df8bae1dSRodney W. Grimes * 33df8bae1dSRodney W. Grimes * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34c3aac50fSPeter Wemm * $FreeBSD$ 35df8bae1dSRodney W. Grimes */ 36df8bae1dSRodney W. Grimes 37134c934cSMike Smith #include "opt_param.h" 38df8bae1dSRodney W. Grimes #include <sys/param.h> 39df8bae1dSRodney W. Grimes #include <sys/systm.h> 4064cfdf46SBruce Evans #include <sys/malloc.h> 41df8bae1dSRodney W. Grimes #include <sys/mbuf.h> 4235e0e5b3SJohn Baldwin #include <sys/mutex.h> 43df8bae1dSRodney W. Grimes #include <sys/kernel.h> 44639acc13SGarrett Wollman #include <sys/sysctl.h> 45df8bae1dSRodney W. Grimes #include <sys/domain.h> 46df8bae1dSRodney W. Grimes #include <sys/protosw.h> 47df8bae1dSRodney W. Grimes #include <vm/vm.h> 4828f8db14SBruce Evans #include <vm/vm_kern.h> 49efeaf95aSDavid Greenman #include <vm/vm_extern.h> 50f48b807fSBrian Feldman 51122a814aSBosko Milekic static void mbinit(void *); 522b14f991SJulian Elischer SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 532b14f991SJulian Elischer 54df8bae1dSRodney W. Grimes struct mbuf *mbutl; 5528f8db14SBruce Evans struct mbstat mbstat; 56af0e6bcdSAlfred Perlstein u_long mbtypes[MT_NTYPES]; 5728f8db14SBruce Evans int max_linkhdr; 5828f8db14SBruce Evans int max_protohdr; 5928f8db14SBruce Evans int max_hdr; 6028f8db14SBruce Evans int max_datalen; 61134c934cSMike Smith int nmbclusters; 62134c934cSMike Smith int nmbufs; 63181d2a15SBosko Milekic int nmbcnt; 647d032714SBosko Milekic u_long m_mballoc_wid = 0; 657d032714SBosko Milekic u_long m_clalloc_wid = 0; 66df8bae1dSRodney W. Grimes 677d032714SBosko Milekic /* 687d032714SBosko Milekic * freelist header structures... 697d032714SBosko Milekic * mbffree_lst, mclfree_lst, mcntfree_lst 707d032714SBosko Milekic */ 717d032714SBosko Milekic struct mbffree_lst mmbfree; 727d032714SBosko Milekic struct mclfree_lst mclfree; 737d032714SBosko Milekic struct mcntfree_lst mcntfree; 747d032714SBosko Milekic 757d032714SBosko Milekic /* 767d032714SBosko Milekic * sysctl(8) exported objects 777d032714SBosko Milekic */ 78ce02431fSDoug Rabson SYSCTL_DECL(_kern_ipc); 79639acc13SGarrett Wollman SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 80639acc13SGarrett Wollman &max_linkhdr, 0, ""); 81639acc13SGarrett Wollman SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 82639acc13SGarrett Wollman &max_protohdr, 0, ""); 83639acc13SGarrett Wollman SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 84639acc13SGarrett Wollman SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 85639acc13SGarrett Wollman &max_datalen, 0, ""); 86f48b807fSBrian Feldman SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 87f48b807fSBrian Feldman &mbuf_wait, 0, ""); 889ad48853SAlfred Perlstein SYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RD, &mbstat, mbstat, ""); 89af0e6bcdSAlfred Perlstein SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 90af0e6bcdSAlfred Perlstein sizeof(mbtypes), "LU", ""); 91134c934cSMike Smith SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 92736e4b67SMike Smith &nmbclusters, 0, "Maximum number of mbuf clusters available"); 93736e4b67SMike Smith SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 94736e4b67SMike Smith "Maximum number of mbufs available"); 95181d2a15SBosko Milekic SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0, 96181d2a15SBosko Milekic "Maximum number of ext_buf counters available"); 97134c934cSMike Smith #ifndef NMBCLUSTERS 98134c934cSMike Smith #define NMBCLUSTERS (512 + MAXUSERS * 16) 99134c934cSMike Smith #endif 100134c934cSMike Smith TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 101736e4b67SMike Smith TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 102181d2a15SBosko Milekic TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt); 103639acc13SGarrett Wollman 104122a814aSBosko Milekic static void m_reclaim(void); 10587b6de2bSPoul-Henning Kamp 1067d032714SBosko Milekic /* Initial allocation numbers */ 107a5c4836dSDavid Malone #define NCL_INIT 2 1087642f474SPoul-Henning Kamp #define NMB_INIT 16 1097d032714SBosko Milekic #define REF_INIT NMBCLUSTERS 1107642f474SPoul-Henning Kamp 1117d032714SBosko Milekic /* 1127d032714SBosko Milekic * Full mbuf subsystem initialization done here. 1137d032714SBosko Milekic * 1147d032714SBosko Milekic * XXX: If ever we have system specific map setups to do, then move them to 1157d032714SBosko Milekic * machdep.c - for now, there is no reason for this stuff to go there. 1167d032714SBosko Milekic */ 1172b14f991SJulian Elischer static void 118122a814aSBosko Milekic mbinit(void *dummy) 119df8bae1dSRodney W. Grimes { 1207d032714SBosko Milekic vm_offset_t maxaddr, mb_map_size; 121df8bae1dSRodney W. Grimes 1227d032714SBosko Milekic /* 1237d032714SBosko Milekic * Setup the mb_map, allocate requested VM space. 1247d032714SBosko Milekic */ 125181d2a15SBosko Milekic mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES + nmbcnt 1267d032714SBosko Milekic * sizeof(union mext_refcnt); 1277d032714SBosko Milekic mb_map_size = roundup2(mb_map_size, PAGE_SIZE); 1287d032714SBosko Milekic mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 1297d032714SBosko Milekic mb_map_size); 130122a814aSBosko Milekic /* XXX XXX XXX: mb_map->system_map = 1; */ 131a5c4836dSDavid Malone 1327d032714SBosko Milekic /* 1337d032714SBosko Milekic * Initialize the free list headers, and setup locks for lists. 1347d032714SBosko Milekic */ 1357d032714SBosko Milekic mmbfree.m_head = NULL; 1367d032714SBosko Milekic mclfree.m_head = NULL; 1377d032714SBosko Milekic mcntfree.m_head = NULL; 1387d032714SBosko Milekic mtx_init(&mmbfree.m_mtx, "mbuf free list lock", MTX_DEF); 1397d032714SBosko Milekic mtx_init(&mclfree.m_mtx, "mcluster free list lock", MTX_DEF); 1407d032714SBosko Milekic mtx_init(&mcntfree.m_mtx, "m_ext counter free list lock", MTX_DEF); 1417d032714SBosko Milekic 1427d032714SBosko Milekic /* 1437d032714SBosko Milekic * Initialize mbuf subsystem (sysctl exported) statistics structure. 1447d032714SBosko Milekic */ 145639acc13SGarrett Wollman mbstat.m_msize = MSIZE; 146639acc13SGarrett Wollman mbstat.m_mclbytes = MCLBYTES; 147639acc13SGarrett Wollman mbstat.m_minclsize = MINCLSIZE; 148639acc13SGarrett Wollman mbstat.m_mlen = MLEN; 149639acc13SGarrett Wollman mbstat.m_mhlen = MHLEN; 150639acc13SGarrett Wollman 1517d032714SBosko Milekic /* 1527d032714SBosko Milekic * Perform some initial allocations. 1537d032714SBosko Milekic */ 1549ed346baSBosko Milekic mtx_lock(&mcntfree.m_mtx); 1557d032714SBosko Milekic if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) 156a5c4836dSDavid Malone goto bad; 1579ed346baSBosko Milekic mtx_unlock(&mcntfree.m_mtx); 1587d032714SBosko Milekic 1599ed346baSBosko Milekic mtx_lock(&mmbfree.m_mtx); 1606a06dea0SGarrett Wollman if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 1616a06dea0SGarrett Wollman goto bad; 1629ed346baSBosko Milekic mtx_unlock(&mmbfree.m_mtx); 1637d032714SBosko Milekic 1649ed346baSBosko Milekic mtx_lock(&mclfree.m_mtx); 165df8bae1dSRodney W. Grimes if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 166df8bae1dSRodney W. Grimes goto bad; 1679ed346baSBosko Milekic mtx_unlock(&mclfree.m_mtx); 1687d032714SBosko Milekic 169df8bae1dSRodney W. Grimes return; 170df8bae1dSRodney W. Grimes bad: 171a5c4836dSDavid Malone panic("mbinit: failed to initialize mbuf subsystem!"); 172a5c4836dSDavid Malone } 173a5c4836dSDavid Malone 174a5c4836dSDavid Malone /* 175a5c4836dSDavid Malone * Allocate at least nmb reference count structs and place them 176a5c4836dSDavid Malone * on the ref cnt free list. 1777d032714SBosko Milekic * 1787d032714SBosko Milekic * Must be called with the mcntfree lock held. 179a5c4836dSDavid Malone */ 180a5c4836dSDavid Malone int 181122a814aSBosko Milekic m_alloc_ref(u_int nmb, int how) 182a5c4836dSDavid Malone { 183a5c4836dSDavid Malone caddr_t p; 184a5c4836dSDavid Malone u_int nbytes; 185a5c4836dSDavid Malone int i; 186a5c4836dSDavid Malone 187a5c4836dSDavid Malone /* 188a5c4836dSDavid Malone * We don't cap the amount of memory that can be used 189a5c4836dSDavid Malone * by the reference counters, like we do for mbufs and 1907d032714SBosko Milekic * mbuf clusters. In fact, we're absolutely sure that we 1917d032714SBosko Milekic * won't ever be going over our allocated space. We keep enough 1927d032714SBosko Milekic * space in mb_map to accomodate maximum values of allocatable 1937d032714SBosko Milekic * external buffers including, but not limited to, clusters. 1947d032714SBosko Milekic * (That's also why we won't have to have wait routines for 1957d032714SBosko Milekic * counters). 1967d032714SBosko Milekic * 1977d032714SBosko Milekic * If we're in here, we're absolutely certain to be returning 1987d032714SBosko Milekic * succesfully, as long as there is physical memory to accomodate 1997d032714SBosko Milekic * us. And if there isn't, but we're willing to wait, then 2007d032714SBosko Milekic * kmem_malloc() will do the only waiting needed. 201a5c4836dSDavid Malone */ 202a5c4836dSDavid Malone 203a5c4836dSDavid Malone nbytes = round_page(nmb * sizeof(union mext_refcnt)); 2049ed346baSBosko Milekic mtx_unlock(&mcntfree.m_mtx); 2052a0c503eSBosko Milekic if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? 2062a0c503eSBosko Milekic M_WAITOK : M_NOWAIT)) == NULL) { 2079ed346baSBosko Milekic mtx_lock(&mcntfree.m_mtx); 208a5c4836dSDavid Malone return (0); 2097d032714SBosko Milekic } 210a5c4836dSDavid Malone nmb = nbytes / sizeof(union mext_refcnt); 211a5c4836dSDavid Malone 2127d032714SBosko Milekic /* 2137d032714SBosko Milekic * We don't let go of the mutex in order to avoid a race. 2147d032714SBosko Milekic * It is up to the caller to let go of the mutex. 2157d032714SBosko Milekic */ 2169ed346baSBosko Milekic mtx_lock(&mcntfree.m_mtx); 217a5c4836dSDavid Malone for (i = 0; i < nmb; i++) { 2187d032714SBosko Milekic ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; 2197d032714SBosko Milekic mcntfree.m_head = (union mext_refcnt *)p; 220a5c4836dSDavid Malone p += sizeof(union mext_refcnt); 221a5c4836dSDavid Malone mbstat.m_refree++; 222a5c4836dSDavid Malone } 223a5c4836dSDavid Malone mbstat.m_refcnt += nmb; 224a5c4836dSDavid Malone 225a5c4836dSDavid Malone return (1); 226df8bae1dSRodney W. Grimes } 227df8bae1dSRodney W. Grimes 228df8bae1dSRodney W. Grimes /* 2296a06dea0SGarrett Wollman * Allocate at least nmb mbufs and place on mbuf free list. 2307d032714SBosko Milekic * 2317d032714SBosko Milekic * Must be called with the mmbfree lock held. 2326a06dea0SGarrett Wollman */ 2336a06dea0SGarrett Wollman int 234122a814aSBosko Milekic m_mballoc(int nmb, int how) 2356a06dea0SGarrett Wollman { 236122a814aSBosko Milekic caddr_t p; 237122a814aSBosko Milekic int i; 2386a06dea0SGarrett Wollman int nbytes; 2396a06dea0SGarrett Wollman 240f48b807fSBrian Feldman /* 2417d032714SBosko Milekic * If we've hit the mbuf limit, stop allocating from mb_map. 2427d032714SBosko Milekic * Also, once we run out of map space, it will be impossible to 2437d032714SBosko Milekic * get any more (nothing is ever freed back to the map). 244736e4b67SMike Smith */ 2457d032714SBosko Milekic if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) { 246736e4b67SMike Smith /* 2477d032714SBosko Milekic * Needs to be atomic as we may be incrementing it 2487d032714SBosko Milekic * while holding another mutex, like mclfree. In other 2497d032714SBosko Milekic * words, m_drops is not reserved solely for mbufs, 2507d032714SBosko Milekic * but is also available for clusters. 2516a06dea0SGarrett Wollman */ 2527d032714SBosko Milekic atomic_add_long(&mbstat.m_drops, 1); 2536a06dea0SGarrett Wollman return (0); 254d8392c6cSGarrett Wollman } 255d8392c6cSGarrett Wollman 2567d032714SBosko Milekic nbytes = round_page(nmb * MSIZE); 2577d032714SBosko Milekic 2589ed346baSBosko Milekic mtx_unlock(&mmbfree.m_mtx); 2597d032714SBosko Milekic p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 260122a814aSBosko Milekic if (p == NULL && how == M_TRYWAIT) { 2617d032714SBosko Milekic atomic_add_long(&mbstat.m_wait, 1); 2627d032714SBosko Milekic p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 2637d032714SBosko Milekic } 2649ed346baSBosko Milekic mtx_lock(&mmbfree.m_mtx); 2657d032714SBosko Milekic 2666a06dea0SGarrett Wollman /* 2677d032714SBosko Milekic * Either the map is now full, or `how' is M_DONTWAIT and there 2686a06dea0SGarrett Wollman * are no pages left. 2696a06dea0SGarrett Wollman */ 2706a06dea0SGarrett Wollman if (p == NULL) 2716a06dea0SGarrett Wollman return (0); 2726a06dea0SGarrett Wollman 2736a06dea0SGarrett Wollman nmb = nbytes / MSIZE; 2747d032714SBosko Milekic 2757d032714SBosko Milekic /* 2767d032714SBosko Milekic * We don't let go of the mutex in order to avoid a race. 2777d032714SBosko Milekic * It is up to the caller to let go of the mutex when done 2787d032714SBosko Milekic * with grabbing the mbuf from the free list. 2797d032714SBosko Milekic */ 2806a06dea0SGarrett Wollman for (i = 0; i < nmb; i++) { 2817d032714SBosko Milekic ((struct mbuf *)p)->m_next = mmbfree.m_head; 2827d032714SBosko Milekic mmbfree.m_head = (struct mbuf *)p; 2836a06dea0SGarrett Wollman p += MSIZE; 2846a06dea0SGarrett Wollman } 2856a06dea0SGarrett Wollman mbstat.m_mbufs += nmb; 286af0e6bcdSAlfred Perlstein mbtypes[MT_FREE] += nmb; 2876a06dea0SGarrett Wollman return (1); 2886a06dea0SGarrett Wollman } 2896a06dea0SGarrett Wollman 290f48b807fSBrian Feldman /* 291f48b807fSBrian Feldman * Once the mb_map has been exhausted and if the call to the allocation macros 2922a0c503eSBosko Milekic * (or, in some cases, functions) is with M_TRYWAIT, then it is necessary to 2932a0c503eSBosko Milekic * rely solely on reclaimed mbufs. 2947d032714SBosko Milekic * 2957d032714SBosko Milekic * Here we request for the protocols to free up some resources and, if we 2967d032714SBosko Milekic * still cannot get anything, then we wait for an mbuf to be freed for a 297f48b807fSBrian Feldman * designated (mbuf_wait) time. 2987d032714SBosko Milekic * 299d113d385SBosko Milekic * Must be called with the mmbfree mutex held. 300f48b807fSBrian Feldman */ 301f48b807fSBrian Feldman struct mbuf * 3027d032714SBosko Milekic m_mballoc_wait(void) 303f48b807fSBrian Feldman { 3047d032714SBosko Milekic struct mbuf *p = NULL; 305f48b807fSBrian Feldman 306f48b807fSBrian Feldman /* 3077d032714SBosko Milekic * See if we can drain some resources out of the protocols. 308d113d385SBosko Milekic * We drop the mmbfree mutex to avoid recursing into it in some of 309d113d385SBosko Milekic * the drain routines. Clearly, we're faced with a race here because 310d113d385SBosko Milekic * once something is freed during the drain, it may be grabbed right 311d113d385SBosko Milekic * from under us by some other thread. But we accept this possibility 312d113d385SBosko Milekic * in order to avoid a potentially large lock recursion and, more 313d113d385SBosko Milekic * importantly, to avoid a potential lock order reversal which may 314d113d385SBosko Milekic * result in deadlock (See comment above m_reclaim()). 315f48b807fSBrian Feldman */ 3169ed346baSBosko Milekic mtx_unlock(&mmbfree.m_mtx); 3177d032714SBosko Milekic m_reclaim(); 318d113d385SBosko Milekic 3199ed346baSBosko Milekic mtx_lock(&mmbfree.m_mtx); 3207d032714SBosko Milekic _MGET(p, M_DONTWAIT); 3217d032714SBosko Milekic 3227d032714SBosko Milekic if (p == NULL) { 3237d032714SBosko Milekic m_mballoc_wid++; 32456acb799SBosko Milekic msleep(&m_mballoc_wid, &mmbfree.m_mtx, PVM, "mballc", 32556acb799SBosko Milekic mbuf_wait); 3267d032714SBosko Milekic m_mballoc_wid--; 3277d032714SBosko Milekic 3287d032714SBosko Milekic /* 3297d032714SBosko Milekic * Try again (one last time). 3307d032714SBosko Milekic * 3317d032714SBosko Milekic * We retry to fetch _even_ if the sleep timed out. This 3327d032714SBosko Milekic * is left this way, purposely, in the [unlikely] case 3337d032714SBosko Milekic * that an mbuf was freed but the sleep was not awoken 3347d032714SBosko Milekic * in time. 3357d032714SBosko Milekic * 3367d032714SBosko Milekic * If the sleep didn't time out (i.e. we got woken up) then 3377d032714SBosko Milekic * we have the lock so we just grab an mbuf, hopefully. 3387d032714SBosko Milekic */ 3397d032714SBosko Milekic _MGET(p, M_DONTWAIT); 340f48b807fSBrian Feldman } 341f48b807fSBrian Feldman 3427d032714SBosko Milekic /* If we waited and got something... */ 3437d032714SBosko Milekic if (p != NULL) { 3447d032714SBosko Milekic atomic_add_long(&mbstat.m_wait, 1); 3457d032714SBosko Milekic if (mmbfree.m_head != NULL) 3467d032714SBosko Milekic MBWAKEUP(m_mballoc_wid); 3477d032714SBosko Milekic } else 3487d032714SBosko Milekic atomic_add_long(&mbstat.m_drops, 1); 3497d032714SBosko Milekic 350f48b807fSBrian Feldman return (p); 351f48b807fSBrian Feldman } 352f48b807fSBrian Feldman 3536a06dea0SGarrett Wollman /* 354df8bae1dSRodney W. Grimes * Allocate some number of mbuf clusters 355df8bae1dSRodney W. Grimes * and place on cluster free list. 3567d032714SBosko Milekic * 3577d032714SBosko Milekic * Must be called with the mclfree lock held. 358df8bae1dSRodney W. Grimes */ 35926f9a767SRodney W. Grimes int 360122a814aSBosko Milekic m_clalloc(int ncl, int how) 361df8bae1dSRodney W. Grimes { 362122a814aSBosko Milekic caddr_t p; 363122a814aSBosko Milekic int i; 364df8bae1dSRodney W. Grimes int npg; 365df8bae1dSRodney W. Grimes 3665eb7d0cdSDavid Greenman /* 3677d032714SBosko Milekic * If the map is now full (nothing will ever be freed to it). 368736e4b67SMike Smith * If we've hit the mcluster number limit, stop allocating from 3697d032714SBosko Milekic * mb_map. 370736e4b67SMike Smith */ 3717d032714SBosko Milekic if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) { 3727d032714SBosko Milekic atomic_add_long(&mbstat.m_drops, 1); 373736e4b67SMike Smith return (0); 374736e4b67SMike Smith } 375736e4b67SMike Smith 376e911eafcSPoul-Henning Kamp npg = ncl; 3779ed346baSBosko Milekic mtx_unlock(&mclfree.m_mtx); 378649c409dSDavid Greenman p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 3792a0c503eSBosko Milekic how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); 38030f700e9SGarrett Wollman ncl = ncl * PAGE_SIZE / MCLBYTES; 3819ed346baSBosko Milekic mtx_lock(&mclfree.m_mtx); 3827d032714SBosko Milekic 3835eb7d0cdSDavid Greenman /* 3847d032714SBosko Milekic * Either the map is now full, or `how' is M_DONTWAIT and there 3855eb7d0cdSDavid Greenman * are no pages left. 3865eb7d0cdSDavid Greenman */ 387d8392c6cSGarrett Wollman if (p == NULL) { 3887d032714SBosko Milekic atomic_add_long(&mbstat.m_drops, 1); 389df8bae1dSRodney W. Grimes return (0); 390d8392c6cSGarrett Wollman } 3915eb7d0cdSDavid Greenman 3927d032714SBosko Milekic /* 3937d032714SBosko Milekic * We don't let go of the mutex in order to avoid a race. 3947d032714SBosko Milekic */ 395df8bae1dSRodney W. Grimes for (i = 0; i < ncl; i++) { 3967d032714SBosko Milekic ((union mcluster *)p)->mcl_next = mclfree.m_head; 3977d032714SBosko Milekic mclfree.m_head = (union mcluster *)p; 398df8bae1dSRodney W. Grimes p += MCLBYTES; 399df8bae1dSRodney W. Grimes mbstat.m_clfree++; 400df8bae1dSRodney W. Grimes } 401df8bae1dSRodney W. Grimes mbstat.m_clusters += ncl; 402df8bae1dSRodney W. Grimes return (1); 403df8bae1dSRodney W. Grimes } 404df8bae1dSRodney W. Grimes 405df8bae1dSRodney W. Grimes /* 406f48b807fSBrian Feldman * Once the mb_map submap has been exhausted and the allocation is called with 4072a0c503eSBosko Milekic * M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will 408f48b807fSBrian Feldman * sleep for a designated amount of time (mbuf_wait) or until we're woken up 409f48b807fSBrian Feldman * due to sudden mcluster availability. 4107d032714SBosko Milekic * 4117d032714SBosko Milekic * Must be called with the mclfree lock held. 412f48b807fSBrian Feldman */ 413f48b807fSBrian Feldman caddr_t 414f48b807fSBrian Feldman m_clalloc_wait(void) 415f48b807fSBrian Feldman { 4167d032714SBosko Milekic caddr_t p = NULL; 417f48b807fSBrian Feldman 418f48b807fSBrian Feldman m_clalloc_wid++; 41956acb799SBosko Milekic msleep(&m_clalloc_wid, &mclfree.m_mtx, PVM, "mclalc", mbuf_wait); 420f48b807fSBrian Feldman m_clalloc_wid--; 421f48b807fSBrian Feldman 422f48b807fSBrian Feldman /* 4237d032714SBosko Milekic * Now that we (think) that we've got something, try again. 424f48b807fSBrian Feldman */ 425a5c4836dSDavid Malone _MCLALLOC(p, M_DONTWAIT); 426f48b807fSBrian Feldman 4277d032714SBosko Milekic /* If we waited and got something ... */ 4287d032714SBosko Milekic if (p != NULL) { 4297d032714SBosko Milekic atomic_add_long(&mbstat.m_wait, 1); 4307d032714SBosko Milekic if (mclfree.m_head != NULL) 4317d032714SBosko Milekic MBWAKEUP(m_clalloc_wid); 4327d032714SBosko Milekic } else 4337d032714SBosko Milekic atomic_add_long(&mbstat.m_drops, 1); 434f48b807fSBrian Feldman 435f48b807fSBrian Feldman return (p); 436f48b807fSBrian Feldman } 437f48b807fSBrian Feldman 438f48b807fSBrian Feldman /* 4397d032714SBosko Milekic * m_reclaim: drain protocols in hopes to free up some resources... 4407d032714SBosko Milekic * 441d113d385SBosko Milekic * XXX: No locks should be held going in here. The drain routines have 442d113d385SBosko Milekic * to presently acquire some locks which raises the possibility of lock 443d113d385SBosko Milekic * order violation if we're holding any mutex if that mutex is acquired in 444d113d385SBosko Milekic * reverse order relative to one of the locks in the drain routines. 445df8bae1dSRodney W. Grimes */ 44687b6de2bSPoul-Henning Kamp static void 447122a814aSBosko Milekic m_reclaim(void) 448df8bae1dSRodney W. Grimes { 449122a814aSBosko Milekic struct domain *dp; 450122a814aSBosko Milekic struct protosw *pr; 451df8bae1dSRodney W. Grimes 45235c05ac6SBosko Milekic #ifdef WITNESS 45335c05ac6SBosko Milekic KASSERT(witness_list(CURPROC) == 0, 45435c05ac6SBosko Milekic ("m_reclaim called with locks held")); 45535c05ac6SBosko Milekic #endif 45635c05ac6SBosko Milekic 457df8bae1dSRodney W. Grimes for (dp = domains; dp; dp = dp->dom_next) 458df8bae1dSRodney W. Grimes for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 459df8bae1dSRodney W. Grimes if (pr->pr_drain) 460df8bae1dSRodney W. Grimes (*pr->pr_drain)(); 461df8bae1dSRodney W. Grimes mbstat.m_drain++; 462df8bae1dSRodney W. Grimes } 463df8bae1dSRodney W. Grimes 464df8bae1dSRodney W. Grimes /* 465df8bae1dSRodney W. Grimes * Space allocation routines. 466df8bae1dSRodney W. Grimes * These are also available as macros 467df8bae1dSRodney W. Grimes * for critical paths. 468df8bae1dSRodney W. Grimes */ 469df8bae1dSRodney W. Grimes struct mbuf * 470122a814aSBosko Milekic m_get(int how, int type) 471df8bae1dSRodney W. Grimes { 472122a814aSBosko Milekic struct mbuf *m; 473df8bae1dSRodney W. Grimes 47464cfdf46SBruce Evans MGET(m, how, type); 475df8bae1dSRodney W. Grimes return (m); 476df8bae1dSRodney W. Grimes } 477df8bae1dSRodney W. Grimes 478df8bae1dSRodney W. Grimes struct mbuf * 479122a814aSBosko Milekic m_gethdr(int how, int type) 480df8bae1dSRodney W. Grimes { 481122a814aSBosko Milekic struct mbuf *m; 482df8bae1dSRodney W. Grimes 48364cfdf46SBruce Evans MGETHDR(m, how, type); 484df8bae1dSRodney W. Grimes return (m); 485df8bae1dSRodney W. Grimes } 486df8bae1dSRodney W. Grimes 487df8bae1dSRodney W. Grimes struct mbuf * 488122a814aSBosko Milekic m_getclr(int how, int type) 489df8bae1dSRodney W. Grimes { 490122a814aSBosko Milekic struct mbuf *m; 491df8bae1dSRodney W. Grimes 49264cfdf46SBruce Evans MGET(m, how, type); 493122a814aSBosko Milekic if (m == NULL) 494122a814aSBosko Milekic return (NULL); 495df8bae1dSRodney W. Grimes bzero(mtod(m, caddr_t), MLEN); 496df8bae1dSRodney W. Grimes return (m); 497df8bae1dSRodney W. Grimes } 498df8bae1dSRodney W. Grimes 499df8bae1dSRodney W. Grimes struct mbuf * 500122a814aSBosko Milekic m_free(struct mbuf *m) 501df8bae1dSRodney W. Grimes { 502122a814aSBosko Milekic struct mbuf *n; 503df8bae1dSRodney W. Grimes 504df8bae1dSRodney W. Grimes MFREE(m, n); 505df8bae1dSRodney W. Grimes return (n); 506df8bae1dSRodney W. Grimes } 507df8bae1dSRodney W. Grimes 508df8bae1dSRodney W. Grimes void 509122a814aSBosko Milekic m_freem(struct mbuf *m) 510df8bae1dSRodney W. Grimes { 511122a814aSBosko Milekic struct mbuf *n; 512df8bae1dSRodney W. Grimes 513df8bae1dSRodney W. Grimes if (m == NULL) 514df8bae1dSRodney W. Grimes return; 515df8bae1dSRodney W. Grimes do { 516686cdd19SJun-ichiro itojun Hagino /* 517686cdd19SJun-ichiro itojun Hagino * we do need to check non-first mbuf, since some of existing 518686cdd19SJun-ichiro itojun Hagino * code does not call M_PREPEND properly. 519686cdd19SJun-ichiro itojun Hagino * (example: call to bpf_mtap from drivers) 520686cdd19SJun-ichiro itojun Hagino */ 521686cdd19SJun-ichiro itojun Hagino if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 522686cdd19SJun-ichiro itojun Hagino m_freem(m->m_pkthdr.aux); 523686cdd19SJun-ichiro itojun Hagino m->m_pkthdr.aux = NULL; 524686cdd19SJun-ichiro itojun Hagino } 525df8bae1dSRodney W. Grimes MFREE(m, n); 526797f2d22SPoul-Henning Kamp m = n; 527797f2d22SPoul-Henning Kamp } while (m); 528df8bae1dSRodney W. Grimes } 529df8bae1dSRodney W. Grimes 530df8bae1dSRodney W. Grimes /* 531df8bae1dSRodney W. Grimes * Lesser-used path for M_PREPEND: 532df8bae1dSRodney W. Grimes * allocate new mbuf to prepend to chain, 533df8bae1dSRodney W. Grimes * copy junk along. 534df8bae1dSRodney W. Grimes */ 535df8bae1dSRodney W. Grimes struct mbuf * 536122a814aSBosko Milekic m_prepend(struct mbuf *m, int len, int how) 537df8bae1dSRodney W. Grimes { 538df8bae1dSRodney W. Grimes struct mbuf *mn; 539df8bae1dSRodney W. Grimes 540df8bae1dSRodney W. Grimes MGET(mn, how, m->m_type); 541122a814aSBosko Milekic if (mn == NULL) { 542df8bae1dSRodney W. Grimes m_freem(m); 543122a814aSBosko Milekic return (NULL); 544df8bae1dSRodney W. Grimes } 545df8bae1dSRodney W. Grimes if (m->m_flags & M_PKTHDR) { 546df8bae1dSRodney W. Grimes M_COPY_PKTHDR(mn, m); 547df8bae1dSRodney W. Grimes m->m_flags &= ~M_PKTHDR; 548df8bae1dSRodney W. Grimes } 549df8bae1dSRodney W. Grimes mn->m_next = m; 550df8bae1dSRodney W. Grimes m = mn; 551df8bae1dSRodney W. Grimes if (len < MHLEN) 552df8bae1dSRodney W. Grimes MH_ALIGN(m, len); 553df8bae1dSRodney W. Grimes m->m_len = len; 554df8bae1dSRodney W. Grimes return (m); 555df8bae1dSRodney W. Grimes } 556df8bae1dSRodney W. Grimes 557df8bae1dSRodney W. Grimes /* 558df8bae1dSRodney W. Grimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 559df8bae1dSRodney W. Grimes * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 5602a0c503eSBosko Milekic * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller. 5611c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 5621c38f2eaSArchie Cobbs * only their reference counts are incremented. 563df8bae1dSRodney W. Grimes */ 564639acc13SGarrett Wollman #define MCFail (mbstat.m_mcfail) 565df8bae1dSRodney W. Grimes 566df8bae1dSRodney W. Grimes struct mbuf * 567122a814aSBosko Milekic m_copym(struct mbuf *m, int off0, int len, int wait) 568df8bae1dSRodney W. Grimes { 569122a814aSBosko Milekic struct mbuf *n, **np; 570122a814aSBosko Milekic int off = off0; 571df8bae1dSRodney W. Grimes struct mbuf *top; 572df8bae1dSRodney W. Grimes int copyhdr = 0; 573df8bae1dSRodney W. Grimes 574e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copym, negative off %d", off)); 575e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copym, negative len %d", len)); 576df8bae1dSRodney W. Grimes if (off == 0 && m->m_flags & M_PKTHDR) 577df8bae1dSRodney W. Grimes copyhdr = 1; 578df8bae1dSRodney W. Grimes while (off > 0) { 579e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 580df8bae1dSRodney W. Grimes if (off < m->m_len) 581df8bae1dSRodney W. Grimes break; 582df8bae1dSRodney W. Grimes off -= m->m_len; 583df8bae1dSRodney W. Grimes m = m->m_next; 584df8bae1dSRodney W. Grimes } 585df8bae1dSRodney W. Grimes np = ⊤ 586df8bae1dSRodney W. Grimes top = 0; 587df8bae1dSRodney W. Grimes while (len > 0) { 588122a814aSBosko Milekic if (m == NULL) { 589e0a653ddSAlfred Perlstein KASSERT(len == M_COPYALL, 590e0a653ddSAlfred Perlstein ("m_copym, length > size of mbuf chain")); 591df8bae1dSRodney W. Grimes break; 592df8bae1dSRodney W. Grimes } 593df8bae1dSRodney W. Grimes MGET(n, wait, m->m_type); 594df8bae1dSRodney W. Grimes *np = n; 595122a814aSBosko Milekic if (n == NULL) 596df8bae1dSRodney W. Grimes goto nospace; 597df8bae1dSRodney W. Grimes if (copyhdr) { 598df8bae1dSRodney W. Grimes M_COPY_PKTHDR(n, m); 599df8bae1dSRodney W. Grimes if (len == M_COPYALL) 600df8bae1dSRodney W. Grimes n->m_pkthdr.len -= off0; 601df8bae1dSRodney W. Grimes else 602df8bae1dSRodney W. Grimes n->m_pkthdr.len = len; 603df8bae1dSRodney W. Grimes copyhdr = 0; 604df8bae1dSRodney W. Grimes } 605df8bae1dSRodney W. Grimes n->m_len = min(len, m->m_len - off); 606df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 607df8bae1dSRodney W. Grimes n->m_data = m->m_data + off; 608df8bae1dSRodney W. Grimes n->m_ext = m->m_ext; 609df8bae1dSRodney W. Grimes n->m_flags |= M_EXT; 610a5c4836dSDavid Malone MEXT_ADD_REF(m); 611df8bae1dSRodney W. Grimes } else 612df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 613df8bae1dSRodney W. Grimes (unsigned)n->m_len); 614df8bae1dSRodney W. Grimes if (len != M_COPYALL) 615df8bae1dSRodney W. Grimes len -= n->m_len; 616df8bae1dSRodney W. Grimes off = 0; 617df8bae1dSRodney W. Grimes m = m->m_next; 618df8bae1dSRodney W. Grimes np = &n->m_next; 619df8bae1dSRodney W. Grimes } 620122a814aSBosko Milekic if (top == NULL) 6217d032714SBosko Milekic atomic_add_long(&MCFail, 1); 622df8bae1dSRodney W. Grimes return (top); 623df8bae1dSRodney W. Grimes nospace: 624df8bae1dSRodney W. Grimes m_freem(top); 6257d032714SBosko Milekic atomic_add_long(&MCFail, 1); 626122a814aSBosko Milekic return (NULL); 627df8bae1dSRodney W. Grimes } 628df8bae1dSRodney W. Grimes 629df8bae1dSRodney W. Grimes /* 6306a06dea0SGarrett Wollman * Copy an entire packet, including header (which must be present). 6316a06dea0SGarrett Wollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 6321c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 6331c38f2eaSArchie Cobbs * only their reference counts are incremented. 6346a06dea0SGarrett Wollman */ 6356a06dea0SGarrett Wollman struct mbuf * 636122a814aSBosko Milekic m_copypacket(struct mbuf *m, int how) 6376a06dea0SGarrett Wollman { 6386a06dea0SGarrett Wollman struct mbuf *top, *n, *o; 6396a06dea0SGarrett Wollman 6406a06dea0SGarrett Wollman MGET(n, how, m->m_type); 6416a06dea0SGarrett Wollman top = n; 642122a814aSBosko Milekic if (n == NULL) 6436a06dea0SGarrett Wollman goto nospace; 6446a06dea0SGarrett Wollman 6456a06dea0SGarrett Wollman M_COPY_PKTHDR(n, m); 6466a06dea0SGarrett Wollman n->m_len = m->m_len; 6476a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 6486a06dea0SGarrett Wollman n->m_data = m->m_data; 6496a06dea0SGarrett Wollman n->m_ext = m->m_ext; 6506a06dea0SGarrett Wollman n->m_flags |= M_EXT; 651a5c4836dSDavid Malone MEXT_ADD_REF(m); 6526a06dea0SGarrett Wollman } else { 6536a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 6546a06dea0SGarrett Wollman } 6556a06dea0SGarrett Wollman 6566a06dea0SGarrett Wollman m = m->m_next; 6576a06dea0SGarrett Wollman while (m) { 6586a06dea0SGarrett Wollman MGET(o, how, m->m_type); 659122a814aSBosko Milekic if (o == NULL) 6606a06dea0SGarrett Wollman goto nospace; 6616a06dea0SGarrett Wollman 6626a06dea0SGarrett Wollman n->m_next = o; 6636a06dea0SGarrett Wollman n = n->m_next; 6646a06dea0SGarrett Wollman 6656a06dea0SGarrett Wollman n->m_len = m->m_len; 6666a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 6676a06dea0SGarrett Wollman n->m_data = m->m_data; 6686a06dea0SGarrett Wollman n->m_ext = m->m_ext; 6696a06dea0SGarrett Wollman n->m_flags |= M_EXT; 670a5c4836dSDavid Malone MEXT_ADD_REF(m); 6716a06dea0SGarrett Wollman } else { 6726a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 6736a06dea0SGarrett Wollman } 6746a06dea0SGarrett Wollman 6756a06dea0SGarrett Wollman m = m->m_next; 6766a06dea0SGarrett Wollman } 6776a06dea0SGarrett Wollman return top; 6786a06dea0SGarrett Wollman nospace: 6796a06dea0SGarrett Wollman m_freem(top); 6807d032714SBosko Milekic atomic_add_long(&MCFail, 1); 681122a814aSBosko Milekic return (NULL); 6826a06dea0SGarrett Wollman } 6836a06dea0SGarrett Wollman 6846a06dea0SGarrett Wollman /* 685df8bae1dSRodney W. Grimes * Copy data from an mbuf chain starting "off" bytes from the beginning, 686df8bae1dSRodney W. Grimes * continuing for "len" bytes, into the indicated buffer. 687df8bae1dSRodney W. Grimes */ 68826f9a767SRodney W. Grimes void 689122a814aSBosko Milekic m_copydata(struct mbuf *m, int off, int len, caddr_t cp) 690df8bae1dSRodney W. Grimes { 691122a814aSBosko Milekic unsigned count; 692df8bae1dSRodney W. Grimes 693e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 694e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 695df8bae1dSRodney W. Grimes while (off > 0) { 696e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 697df8bae1dSRodney W. Grimes if (off < m->m_len) 698df8bae1dSRodney W. Grimes break; 699df8bae1dSRodney W. Grimes off -= m->m_len; 700df8bae1dSRodney W. Grimes m = m->m_next; 701df8bae1dSRodney W. Grimes } 702df8bae1dSRodney W. Grimes while (len > 0) { 703e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 704df8bae1dSRodney W. Grimes count = min(m->m_len - off, len); 705df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + off, cp, count); 706df8bae1dSRodney W. Grimes len -= count; 707df8bae1dSRodney W. Grimes cp += count; 708df8bae1dSRodney W. Grimes off = 0; 709df8bae1dSRodney W. Grimes m = m->m_next; 710df8bae1dSRodney W. Grimes } 711df8bae1dSRodney W. Grimes } 712df8bae1dSRodney W. Grimes 713df8bae1dSRodney W. Grimes /* 7141c38f2eaSArchie Cobbs * Copy a packet header mbuf chain into a completely new chain, including 7151c38f2eaSArchie Cobbs * copying any mbuf clusters. Use this instead of m_copypacket() when 7161c38f2eaSArchie Cobbs * you need a writable copy of an mbuf chain. 7171c38f2eaSArchie Cobbs */ 7181c38f2eaSArchie Cobbs struct mbuf * 719122a814aSBosko Milekic m_dup(struct mbuf *m, int how) 7201c38f2eaSArchie Cobbs { 7211c38f2eaSArchie Cobbs struct mbuf **p, *top = NULL; 7221c38f2eaSArchie Cobbs int remain, moff, nsize; 7231c38f2eaSArchie Cobbs 7241c38f2eaSArchie Cobbs /* Sanity check */ 7251c38f2eaSArchie Cobbs if (m == NULL) 726122a814aSBosko Milekic return (NULL); 7271c38f2eaSArchie Cobbs KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 7281c38f2eaSArchie Cobbs 7291c38f2eaSArchie Cobbs /* While there's more data, get a new mbuf, tack it on, and fill it */ 7301c38f2eaSArchie Cobbs remain = m->m_pkthdr.len; 7311c38f2eaSArchie Cobbs moff = 0; 7321c38f2eaSArchie Cobbs p = ⊤ 7331c38f2eaSArchie Cobbs while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 7341c38f2eaSArchie Cobbs struct mbuf *n; 7351c38f2eaSArchie Cobbs 7361c38f2eaSArchie Cobbs /* Get the next new mbuf */ 7371c38f2eaSArchie Cobbs MGET(n, how, m->m_type); 7381c38f2eaSArchie Cobbs if (n == NULL) 7391c38f2eaSArchie Cobbs goto nospace; 7401c38f2eaSArchie Cobbs if (top == NULL) { /* first one, must be PKTHDR */ 7411c38f2eaSArchie Cobbs M_COPY_PKTHDR(n, m); 7421c38f2eaSArchie Cobbs nsize = MHLEN; 7431c38f2eaSArchie Cobbs } else /* not the first one */ 7441c38f2eaSArchie Cobbs nsize = MLEN; 7451c38f2eaSArchie Cobbs if (remain >= MINCLSIZE) { 7461c38f2eaSArchie Cobbs MCLGET(n, how); 7471c38f2eaSArchie Cobbs if ((n->m_flags & M_EXT) == 0) { 7481c38f2eaSArchie Cobbs (void)m_free(n); 7491c38f2eaSArchie Cobbs goto nospace; 7501c38f2eaSArchie Cobbs } 7511c38f2eaSArchie Cobbs nsize = MCLBYTES; 7521c38f2eaSArchie Cobbs } 7531c38f2eaSArchie Cobbs n->m_len = 0; 7541c38f2eaSArchie Cobbs 7551c38f2eaSArchie Cobbs /* Link it into the new chain */ 7561c38f2eaSArchie Cobbs *p = n; 7571c38f2eaSArchie Cobbs p = &n->m_next; 7581c38f2eaSArchie Cobbs 7591c38f2eaSArchie Cobbs /* Copy data from original mbuf(s) into new mbuf */ 7601c38f2eaSArchie Cobbs while (n->m_len < nsize && m != NULL) { 7611c38f2eaSArchie Cobbs int chunk = min(nsize - n->m_len, m->m_len - moff); 7621c38f2eaSArchie Cobbs 7631c38f2eaSArchie Cobbs bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 7641c38f2eaSArchie Cobbs moff += chunk; 7651c38f2eaSArchie Cobbs n->m_len += chunk; 7661c38f2eaSArchie Cobbs remain -= chunk; 7671c38f2eaSArchie Cobbs if (moff == m->m_len) { 7681c38f2eaSArchie Cobbs m = m->m_next; 7691c38f2eaSArchie Cobbs moff = 0; 7701c38f2eaSArchie Cobbs } 7711c38f2eaSArchie Cobbs } 7721c38f2eaSArchie Cobbs 7731c38f2eaSArchie Cobbs /* Check correct total mbuf length */ 7741c38f2eaSArchie Cobbs KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 7751c38f2eaSArchie Cobbs ("%s: bogus m_pkthdr.len", __FUNCTION__)); 7761c38f2eaSArchie Cobbs } 7771c38f2eaSArchie Cobbs return (top); 7781c38f2eaSArchie Cobbs 7791c38f2eaSArchie Cobbs nospace: 7801c38f2eaSArchie Cobbs m_freem(top); 7817d032714SBosko Milekic atomic_add_long(&MCFail, 1); 782122a814aSBosko Milekic return (NULL); 7831c38f2eaSArchie Cobbs } 7841c38f2eaSArchie Cobbs 7851c38f2eaSArchie Cobbs /* 786df8bae1dSRodney W. Grimes * Concatenate mbuf chain n to m. 787df8bae1dSRodney W. Grimes * Both chains must be of the same type (e.g. MT_DATA). 788df8bae1dSRodney W. Grimes * Any m_pkthdr is not updated. 789df8bae1dSRodney W. Grimes */ 79026f9a767SRodney W. Grimes void 791122a814aSBosko Milekic m_cat(struct mbuf *m, struct mbuf *n) 792df8bae1dSRodney W. Grimes { 793df8bae1dSRodney W. Grimes while (m->m_next) 794df8bae1dSRodney W. Grimes m = m->m_next; 795df8bae1dSRodney W. Grimes while (n) { 796df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT || 797df8bae1dSRodney W. Grimes m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 798df8bae1dSRodney W. Grimes /* just join the two chains */ 799df8bae1dSRodney W. Grimes m->m_next = n; 800df8bae1dSRodney W. Grimes return; 801df8bae1dSRodney W. Grimes } 802df8bae1dSRodney W. Grimes /* splat the data from one into the other */ 803df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 804df8bae1dSRodney W. Grimes (u_int)n->m_len); 805df8bae1dSRodney W. Grimes m->m_len += n->m_len; 806df8bae1dSRodney W. Grimes n = m_free(n); 807df8bae1dSRodney W. Grimes } 808df8bae1dSRodney W. Grimes } 809df8bae1dSRodney W. Grimes 81026f9a767SRodney W. Grimes void 811122a814aSBosko Milekic m_adj(struct mbuf *mp, int req_len) 812df8bae1dSRodney W. Grimes { 813122a814aSBosko Milekic int len = req_len; 814122a814aSBosko Milekic struct mbuf *m; 815122a814aSBosko Milekic int count; 816df8bae1dSRodney W. Grimes 817df8bae1dSRodney W. Grimes if ((m = mp) == NULL) 818df8bae1dSRodney W. Grimes return; 819df8bae1dSRodney W. Grimes if (len >= 0) { 820df8bae1dSRodney W. Grimes /* 821df8bae1dSRodney W. Grimes * Trim from head. 822df8bae1dSRodney W. Grimes */ 823df8bae1dSRodney W. Grimes while (m != NULL && len > 0) { 824df8bae1dSRodney W. Grimes if (m->m_len <= len) { 825df8bae1dSRodney W. Grimes len -= m->m_len; 826df8bae1dSRodney W. Grimes m->m_len = 0; 827df8bae1dSRodney W. Grimes m = m->m_next; 828df8bae1dSRodney W. Grimes } else { 829df8bae1dSRodney W. Grimes m->m_len -= len; 830df8bae1dSRodney W. Grimes m->m_data += len; 831df8bae1dSRodney W. Grimes len = 0; 832df8bae1dSRodney W. Grimes } 833df8bae1dSRodney W. Grimes } 834df8bae1dSRodney W. Grimes m = mp; 835df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 836df8bae1dSRodney W. Grimes m->m_pkthdr.len -= (req_len - len); 837df8bae1dSRodney W. Grimes } else { 838df8bae1dSRodney W. Grimes /* 839df8bae1dSRodney W. Grimes * Trim from tail. Scan the mbuf chain, 840df8bae1dSRodney W. Grimes * calculating its length and finding the last mbuf. 841df8bae1dSRodney W. Grimes * If the adjustment only affects this mbuf, then just 842df8bae1dSRodney W. Grimes * adjust and return. Otherwise, rescan and truncate 843df8bae1dSRodney W. Grimes * after the remaining size. 844df8bae1dSRodney W. Grimes */ 845df8bae1dSRodney W. Grimes len = -len; 846df8bae1dSRodney W. Grimes count = 0; 847df8bae1dSRodney W. Grimes for (;;) { 848df8bae1dSRodney W. Grimes count += m->m_len; 849df8bae1dSRodney W. Grimes if (m->m_next == (struct mbuf *)0) 850df8bae1dSRodney W. Grimes break; 851df8bae1dSRodney W. Grimes m = m->m_next; 852df8bae1dSRodney W. Grimes } 853df8bae1dSRodney W. Grimes if (m->m_len >= len) { 854df8bae1dSRodney W. Grimes m->m_len -= len; 855df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 856df8bae1dSRodney W. Grimes mp->m_pkthdr.len -= len; 857df8bae1dSRodney W. Grimes return; 858df8bae1dSRodney W. Grimes } 859df8bae1dSRodney W. Grimes count -= len; 860df8bae1dSRodney W. Grimes if (count < 0) 861df8bae1dSRodney W. Grimes count = 0; 862df8bae1dSRodney W. Grimes /* 863df8bae1dSRodney W. Grimes * Correct length for chain is "count". 864df8bae1dSRodney W. Grimes * Find the mbuf with last data, adjust its length, 865df8bae1dSRodney W. Grimes * and toss data from remaining mbufs on chain. 866df8bae1dSRodney W. Grimes */ 867df8bae1dSRodney W. Grimes m = mp; 868df8bae1dSRodney W. Grimes if (m->m_flags & M_PKTHDR) 869df8bae1dSRodney W. Grimes m->m_pkthdr.len = count; 870df8bae1dSRodney W. Grimes for (; m; m = m->m_next) { 871df8bae1dSRodney W. Grimes if (m->m_len >= count) { 872df8bae1dSRodney W. Grimes m->m_len = count; 873df8bae1dSRodney W. Grimes break; 874df8bae1dSRodney W. Grimes } 875df8bae1dSRodney W. Grimes count -= m->m_len; 876df8bae1dSRodney W. Grimes } 877797f2d22SPoul-Henning Kamp while (m->m_next) 878797f2d22SPoul-Henning Kamp (m = m->m_next) ->m_len = 0; 879df8bae1dSRodney W. Grimes } 880df8bae1dSRodney W. Grimes } 881df8bae1dSRodney W. Grimes 882df8bae1dSRodney W. Grimes /* 883df8bae1dSRodney W. Grimes * Rearange an mbuf chain so that len bytes are contiguous 884df8bae1dSRodney W. Grimes * and in the data area of an mbuf (so that mtod and dtom 885df8bae1dSRodney W. Grimes * will work for a structure of size len). Returns the resulting 886df8bae1dSRodney W. Grimes * mbuf chain on success, frees it and returns null on failure. 887df8bae1dSRodney W. Grimes * If there is room, it will add up to max_protohdr-len extra bytes to the 888df8bae1dSRodney W. Grimes * contiguous region in an attempt to avoid being called next time. 889df8bae1dSRodney W. Grimes */ 890639acc13SGarrett Wollman #define MPFail (mbstat.m_mpfail) 891df8bae1dSRodney W. Grimes 892df8bae1dSRodney W. Grimes struct mbuf * 893122a814aSBosko Milekic m_pullup(struct mbuf *n, int len) 894df8bae1dSRodney W. Grimes { 895122a814aSBosko Milekic struct mbuf *m; 896122a814aSBosko Milekic int count; 897df8bae1dSRodney W. Grimes int space; 898df8bae1dSRodney W. Grimes 899df8bae1dSRodney W. Grimes /* 900df8bae1dSRodney W. Grimes * If first mbuf has no cluster, and has room for len bytes 901df8bae1dSRodney W. Grimes * without shifting current data, pullup into it, 902df8bae1dSRodney W. Grimes * otherwise allocate a new mbuf to prepend to the chain. 903df8bae1dSRodney W. Grimes */ 904df8bae1dSRodney W. Grimes if ((n->m_flags & M_EXT) == 0 && 905df8bae1dSRodney W. Grimes n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 906df8bae1dSRodney W. Grimes if (n->m_len >= len) 907df8bae1dSRodney W. Grimes return (n); 908df8bae1dSRodney W. Grimes m = n; 909df8bae1dSRodney W. Grimes n = n->m_next; 910df8bae1dSRodney W. Grimes len -= m->m_len; 911df8bae1dSRodney W. Grimes } else { 912df8bae1dSRodney W. Grimes if (len > MHLEN) 913df8bae1dSRodney W. Grimes goto bad; 914df8bae1dSRodney W. Grimes MGET(m, M_DONTWAIT, n->m_type); 915122a814aSBosko Milekic if (m == NULL) 916df8bae1dSRodney W. Grimes goto bad; 917df8bae1dSRodney W. Grimes m->m_len = 0; 918df8bae1dSRodney W. Grimes if (n->m_flags & M_PKTHDR) { 919df8bae1dSRodney W. Grimes M_COPY_PKTHDR(m, n); 920df8bae1dSRodney W. Grimes n->m_flags &= ~M_PKTHDR; 921df8bae1dSRodney W. Grimes } 922df8bae1dSRodney W. Grimes } 923df8bae1dSRodney W. Grimes space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 924df8bae1dSRodney W. Grimes do { 925df8bae1dSRodney W. Grimes count = min(min(max(len, max_protohdr), space), n->m_len); 926df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 927df8bae1dSRodney W. Grimes (unsigned)count); 928df8bae1dSRodney W. Grimes len -= count; 929df8bae1dSRodney W. Grimes m->m_len += count; 930df8bae1dSRodney W. Grimes n->m_len -= count; 931df8bae1dSRodney W. Grimes space -= count; 932df8bae1dSRodney W. Grimes if (n->m_len) 933df8bae1dSRodney W. Grimes n->m_data += count; 934df8bae1dSRodney W. Grimes else 935df8bae1dSRodney W. Grimes n = m_free(n); 936df8bae1dSRodney W. Grimes } while (len > 0 && n); 937df8bae1dSRodney W. Grimes if (len > 0) { 938df8bae1dSRodney W. Grimes (void) m_free(m); 939df8bae1dSRodney W. Grimes goto bad; 940df8bae1dSRodney W. Grimes } 941df8bae1dSRodney W. Grimes m->m_next = n; 942df8bae1dSRodney W. Grimes return (m); 943df8bae1dSRodney W. Grimes bad: 944df8bae1dSRodney W. Grimes m_freem(n); 9457d032714SBosko Milekic atomic_add_long(&MPFail, 1); 946122a814aSBosko Milekic return (NULL); 947df8bae1dSRodney W. Grimes } 948df8bae1dSRodney W. Grimes 949df8bae1dSRodney W. Grimes /* 950df8bae1dSRodney W. Grimes * Partition an mbuf chain in two pieces, returning the tail -- 951df8bae1dSRodney W. Grimes * all but the first len0 bytes. In case of failure, it returns NULL and 952df8bae1dSRodney W. Grimes * attempts to restore the chain to its original state. 953df8bae1dSRodney W. Grimes */ 954df8bae1dSRodney W. Grimes struct mbuf * 955122a814aSBosko Milekic m_split(struct mbuf *m0, int len0, int wait) 956df8bae1dSRodney W. Grimes { 957122a814aSBosko Milekic struct mbuf *m, *n; 958df8bae1dSRodney W. Grimes unsigned len = len0, remain; 959df8bae1dSRodney W. Grimes 960df8bae1dSRodney W. Grimes for (m = m0; m && len > m->m_len; m = m->m_next) 961df8bae1dSRodney W. Grimes len -= m->m_len; 962122a814aSBosko Milekic if (m == NULL) 963122a814aSBosko Milekic return (NULL); 964df8bae1dSRodney W. Grimes remain = m->m_len - len; 965df8bae1dSRodney W. Grimes if (m0->m_flags & M_PKTHDR) { 966df8bae1dSRodney W. Grimes MGETHDR(n, wait, m0->m_type); 967122a814aSBosko Milekic if (n == NULL) 968122a814aSBosko Milekic return (NULL); 969df8bae1dSRodney W. Grimes n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 970df8bae1dSRodney W. Grimes n->m_pkthdr.len = m0->m_pkthdr.len - len0; 971df8bae1dSRodney W. Grimes m0->m_pkthdr.len = len0; 972df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) 973df8bae1dSRodney W. Grimes goto extpacket; 974df8bae1dSRodney W. Grimes if (remain > MHLEN) { 975df8bae1dSRodney W. Grimes /* m can't be the lead packet */ 976df8bae1dSRodney W. Grimes MH_ALIGN(n, 0); 977df8bae1dSRodney W. Grimes n->m_next = m_split(m, len, wait); 978122a814aSBosko Milekic if (n->m_next == NULL) { 979df8bae1dSRodney W. Grimes (void) m_free(n); 980122a814aSBosko Milekic return (NULL); 981df8bae1dSRodney W. Grimes } else 982df8bae1dSRodney W. Grimes return (n); 983df8bae1dSRodney W. Grimes } else 984df8bae1dSRodney W. Grimes MH_ALIGN(n, remain); 985df8bae1dSRodney W. Grimes } else if (remain == 0) { 986df8bae1dSRodney W. Grimes n = m->m_next; 987122a814aSBosko Milekic m->m_next = NULL; 988df8bae1dSRodney W. Grimes return (n); 989df8bae1dSRodney W. Grimes } else { 990df8bae1dSRodney W. Grimes MGET(n, wait, m->m_type); 991122a814aSBosko Milekic if (n == NULL) 992122a814aSBosko Milekic return (NULL); 993df8bae1dSRodney W. Grimes M_ALIGN(n, remain); 994df8bae1dSRodney W. Grimes } 995df8bae1dSRodney W. Grimes extpacket: 996df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 997df8bae1dSRodney W. Grimes n->m_flags |= M_EXT; 998df8bae1dSRodney W. Grimes n->m_ext = m->m_ext; 999a5c4836dSDavid Malone MEXT_ADD_REF(m); 1000df8bae1dSRodney W. Grimes m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1001df8bae1dSRodney W. Grimes n->m_data = m->m_data + len; 1002df8bae1dSRodney W. Grimes } else { 1003df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1004df8bae1dSRodney W. Grimes } 1005df8bae1dSRodney W. Grimes n->m_len = remain; 1006df8bae1dSRodney W. Grimes m->m_len = len; 1007df8bae1dSRodney W. Grimes n->m_next = m->m_next; 1008122a814aSBosko Milekic m->m_next = NULL; 1009df8bae1dSRodney W. Grimes return (n); 1010df8bae1dSRodney W. Grimes } 1011df8bae1dSRodney W. Grimes /* 1012df8bae1dSRodney W. Grimes * Routine to copy from device local memory into mbufs. 1013df8bae1dSRodney W. Grimes */ 1014df8bae1dSRodney W. Grimes struct mbuf * 1015122a814aSBosko Milekic m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, 1016122a814aSBosko Milekic void (*copy)(char *from, caddr_t to, u_int len)) 1017df8bae1dSRodney W. Grimes { 1018122a814aSBosko Milekic struct mbuf *m; 1019df8bae1dSRodney W. Grimes struct mbuf *top = 0, **mp = ⊤ 1020122a814aSBosko Milekic int off = off0, len; 1021122a814aSBosko Milekic char *cp; 1022df8bae1dSRodney W. Grimes char *epkt; 1023df8bae1dSRodney W. Grimes 1024df8bae1dSRodney W. Grimes cp = buf; 1025df8bae1dSRodney W. Grimes epkt = cp + totlen; 1026df8bae1dSRodney W. Grimes if (off) { 1027df8bae1dSRodney W. Grimes cp += off + 2 * sizeof(u_short); 1028df8bae1dSRodney W. Grimes totlen -= 2 * sizeof(u_short); 1029df8bae1dSRodney W. Grimes } 1030df8bae1dSRodney W. Grimes MGETHDR(m, M_DONTWAIT, MT_DATA); 1031122a814aSBosko Milekic if (m == NULL) 1032122a814aSBosko Milekic return (NULL); 1033df8bae1dSRodney W. Grimes m->m_pkthdr.rcvif = ifp; 1034df8bae1dSRodney W. Grimes m->m_pkthdr.len = totlen; 1035df8bae1dSRodney W. Grimes m->m_len = MHLEN; 1036df8bae1dSRodney W. Grimes 1037df8bae1dSRodney W. Grimes while (totlen > 0) { 1038df8bae1dSRodney W. Grimes if (top) { 1039df8bae1dSRodney W. Grimes MGET(m, M_DONTWAIT, MT_DATA); 1040122a814aSBosko Milekic if (m == NULL) { 1041df8bae1dSRodney W. Grimes m_freem(top); 1042122a814aSBosko Milekic return (NULL); 1043df8bae1dSRodney W. Grimes } 1044df8bae1dSRodney W. Grimes m->m_len = MLEN; 1045df8bae1dSRodney W. Grimes } 1046df8bae1dSRodney W. Grimes len = min(totlen, epkt - cp); 1047df8bae1dSRodney W. Grimes if (len >= MINCLSIZE) { 1048df8bae1dSRodney W. Grimes MCLGET(m, M_DONTWAIT); 1049df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) 1050df8bae1dSRodney W. Grimes m->m_len = len = min(len, MCLBYTES); 1051df8bae1dSRodney W. Grimes else 1052df8bae1dSRodney W. Grimes len = m->m_len; 1053df8bae1dSRodney W. Grimes } else { 1054df8bae1dSRodney W. Grimes /* 1055df8bae1dSRodney W. Grimes * Place initial small packet/header at end of mbuf. 1056df8bae1dSRodney W. Grimes */ 1057df8bae1dSRodney W. Grimes if (len < m->m_len) { 1058122a814aSBosko Milekic if (top == NULL && len + 1059122a814aSBosko Milekic max_linkhdr <= m->m_len) 1060df8bae1dSRodney W. Grimes m->m_data += max_linkhdr; 1061df8bae1dSRodney W. Grimes m->m_len = len; 1062df8bae1dSRodney W. Grimes } else 1063df8bae1dSRodney W. Grimes len = m->m_len; 1064df8bae1dSRodney W. Grimes } 1065df8bae1dSRodney W. Grimes if (copy) 1066df8bae1dSRodney W. Grimes copy(cp, mtod(m, caddr_t), (unsigned)len); 1067df8bae1dSRodney W. Grimes else 1068df8bae1dSRodney W. Grimes bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1069df8bae1dSRodney W. Grimes cp += len; 1070df8bae1dSRodney W. Grimes *mp = m; 1071df8bae1dSRodney W. Grimes mp = &m->m_next; 1072df8bae1dSRodney W. Grimes totlen -= len; 1073df8bae1dSRodney W. Grimes if (cp == epkt) 1074df8bae1dSRodney W. Grimes cp = buf; 1075df8bae1dSRodney W. Grimes } 1076df8bae1dSRodney W. Grimes return (top); 1077df8bae1dSRodney W. Grimes } 1078c5789ba3SPoul-Henning Kamp 1079c5789ba3SPoul-Henning Kamp /* 1080c5789ba3SPoul-Henning Kamp * Copy data from a buffer back into the indicated mbuf chain, 1081c5789ba3SPoul-Henning Kamp * starting "off" bytes from the beginning, extending the mbuf 1082c5789ba3SPoul-Henning Kamp * chain if necessary. 1083c5789ba3SPoul-Henning Kamp */ 1084c5789ba3SPoul-Henning Kamp void 1085122a814aSBosko Milekic m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1086c5789ba3SPoul-Henning Kamp { 1087122a814aSBosko Milekic int mlen; 1088122a814aSBosko Milekic struct mbuf *m = m0, *n; 1089c5789ba3SPoul-Henning Kamp int totlen = 0; 1090c5789ba3SPoul-Henning Kamp 1091122a814aSBosko Milekic if (m0 == NULL) 1092c5789ba3SPoul-Henning Kamp return; 1093c5789ba3SPoul-Henning Kamp while (off > (mlen = m->m_len)) { 1094c5789ba3SPoul-Henning Kamp off -= mlen; 1095c5789ba3SPoul-Henning Kamp totlen += mlen; 1096122a814aSBosko Milekic if (m->m_next == NULL) { 1097c5789ba3SPoul-Henning Kamp n = m_getclr(M_DONTWAIT, m->m_type); 1098122a814aSBosko Milekic if (n == NULL) 1099c5789ba3SPoul-Henning Kamp goto out; 1100c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len + off); 1101c5789ba3SPoul-Henning Kamp m->m_next = n; 1102c5789ba3SPoul-Henning Kamp } 1103c5789ba3SPoul-Henning Kamp m = m->m_next; 1104c5789ba3SPoul-Henning Kamp } 1105c5789ba3SPoul-Henning Kamp while (len > 0) { 1106c5789ba3SPoul-Henning Kamp mlen = min (m->m_len - off, len); 1107c5789ba3SPoul-Henning Kamp bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1108c5789ba3SPoul-Henning Kamp cp += mlen; 1109c5789ba3SPoul-Henning Kamp len -= mlen; 1110c5789ba3SPoul-Henning Kamp mlen += off; 1111c5789ba3SPoul-Henning Kamp off = 0; 1112c5789ba3SPoul-Henning Kamp totlen += mlen; 1113c5789ba3SPoul-Henning Kamp if (len == 0) 1114c5789ba3SPoul-Henning Kamp break; 1115122a814aSBosko Milekic if (m->m_next == NULL) { 1116c5789ba3SPoul-Henning Kamp n = m_get(M_DONTWAIT, m->m_type); 1117122a814aSBosko Milekic if (n == NULL) 1118c5789ba3SPoul-Henning Kamp break; 1119c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len); 1120c5789ba3SPoul-Henning Kamp m->m_next = n; 1121c5789ba3SPoul-Henning Kamp } 1122c5789ba3SPoul-Henning Kamp m = m->m_next; 1123c5789ba3SPoul-Henning Kamp } 1124c5789ba3SPoul-Henning Kamp out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1125c5789ba3SPoul-Henning Kamp m->m_pkthdr.len = totlen; 1126c5789ba3SPoul-Henning Kamp } 1127ce4a64f7SPoul-Henning Kamp 1128ce4a64f7SPoul-Henning Kamp void 1129ce4a64f7SPoul-Henning Kamp m_print(const struct mbuf *m) 1130ce4a64f7SPoul-Henning Kamp { 1131ce4a64f7SPoul-Henning Kamp int len; 11326357e7b5SEivind Eklund const struct mbuf *m2; 1133ce4a64f7SPoul-Henning Kamp 1134ce4a64f7SPoul-Henning Kamp len = m->m_pkthdr.len; 1135ce4a64f7SPoul-Henning Kamp m2 = m; 1136ce4a64f7SPoul-Henning Kamp while (len) { 1137ce4a64f7SPoul-Henning Kamp printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1138ce4a64f7SPoul-Henning Kamp len -= m2->m_len; 1139ce4a64f7SPoul-Henning Kamp m2 = m2->m_next; 1140ce4a64f7SPoul-Henning Kamp } 1141ce4a64f7SPoul-Henning Kamp return; 1142ce4a64f7SPoul-Henning Kamp } 1143