19454b2d8SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1982, 1986, 1988, 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 8df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 9df8bae1dSRodney W. Grimes * are met: 10df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 12df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 14df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 1569a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors 16df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 17df8bae1dSRodney W. Grimes * without specific prior written permission. 18df8bae1dSRodney W. Grimes * 19df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29df8bae1dSRodney W. Grimes * SUCH DAMAGE. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 32df8bae1dSRodney W. Grimes */ 33df8bae1dSRodney W. Grimes 34677b542eSDavid E. O'Brien #include <sys/cdefs.h> 35677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 36677b542eSDavid E. O'Brien 37240ef842SDavid E. O'Brien #include "opt_param.h" 38352d050eSMike Silbersack #include "opt_mbuf_stress_test.h" 396eeac1d9SJulian Elischer #include "opt_mbuf_profiling.h" 40e32a5b94SRobert Watson 41df8bae1dSRodney W. Grimes #include <sys/param.h> 42df8bae1dSRodney W. Grimes #include <sys/systm.h> 43fb919e4dSMark Murray #include <sys/kernel.h> 44beb699c7SMike Silbersack #include <sys/limits.h> 45fb919e4dSMark Murray #include <sys/lock.h> 46f9d0d524SRobert Watson #include <sys/malloc.h> 47df8bae1dSRodney W. Grimes #include <sys/mbuf.h> 48639acc13SGarrett Wollman #include <sys/sysctl.h> 49df8bae1dSRodney W. Grimes #include <sys/domain.h> 50df8bae1dSRodney W. Grimes #include <sys/protosw.h> 51beb699c7SMike Silbersack #include <sys/uio.h> 5282334850SJohn Baldwin #include <sys/vmmeter.h> 53480f4e94SGeorge V. Neville-Neil #include <sys/sdt.h> 5482334850SJohn Baldwin #include <vm/vm.h> 5582334850SJohn Baldwin #include <vm/vm_pageout.h> 5682334850SJohn Baldwin #include <vm/vm_page.h> 57480f4e94SGeorge V. Neville-Neil 58dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE5_XLATE(sdt, , , m__init, 59480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *", 60480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 61480f4e94SGeorge V. Neville-Neil "uint16_t", "uint16_t", 62480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 63480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t"); 64480f4e94SGeorge V. Neville-Neil 65dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE3_XLATE(sdt, , , m__gethdr, 66480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 67480f4e94SGeorge V. Neville-Neil "uint16_t", "uint16_t", 68480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *"); 69480f4e94SGeorge V. Neville-Neil 70dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE3_XLATE(sdt, , , m__get, 71480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 72480f4e94SGeorge V. Neville-Neil "uint16_t", "uint16_t", 73480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *"); 74480f4e94SGeorge V. Neville-Neil 75dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE4_XLATE(sdt, , , m__getcl, 76480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 77480f4e94SGeorge V. Neville-Neil "uint16_t", "uint16_t", 78480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 79480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *"); 80480f4e94SGeorge V. Neville-Neil 81dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE3_XLATE(sdt, , , m__clget, 82480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *", 83480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 84480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t"); 85480f4e94SGeorge V. Neville-Neil 86dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE4_XLATE(sdt, , , m__cljget, 87480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *", 88480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 89480f4e94SGeorge V. Neville-Neil "uint32_t", "uint32_t", 90480f4e94SGeorge V. Neville-Neil "void*", "void*"); 91480f4e94SGeorge V. Neville-Neil 92dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE(sdt, , , m__cljset); 93480f4e94SGeorge V. Neville-Neil 94dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE1_XLATE(sdt, , , m__free, 95480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *"); 96480f4e94SGeorge V. Neville-Neil 97dcd070d8SGeorge V. Neville-Neil SDT_PROBE_DEFINE1_XLATE(sdt, , , m__freem, 98480f4e94SGeorge V. Neville-Neil "struct mbuf *", "mbufinfo_t *"); 99fb919e4dSMark Murray 1005e4bc63bSGleb Smirnoff #include <security/mac/mac_framework.h> 1015e4bc63bSGleb Smirnoff 10228f8db14SBruce Evans int max_linkhdr; 10328f8db14SBruce Evans int max_protohdr; 10428f8db14SBruce Evans int max_hdr; 10528f8db14SBruce Evans int max_datalen; 10651710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 10755e9f80dSMike Silbersack int m_defragpackets; 10855e9f80dSMike Silbersack int m_defragbytes; 10955e9f80dSMike Silbersack int m_defraguseless; 11055e9f80dSMike Silbersack int m_defragfailure; 111352d050eSMike Silbersack int m_defragrandomfailures; 112352d050eSMike Silbersack #endif 1137d032714SBosko Milekic 1147d032714SBosko Milekic /* 1157d032714SBosko Milekic * sysctl(8) exported objects 1167d032714SBosko Milekic */ 11780444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD, 11880444f88SAndre Oppermann &max_linkhdr, 0, "Size of largest link layer header"); 11980444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD, 12080444f88SAndre Oppermann &max_protohdr, 0, "Size of largest protocol layer header"); 12180444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD, 12280444f88SAndre Oppermann &max_hdr, 0, "Size of largest link plus protocol header"); 12380444f88SAndre Oppermann SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD, 12480444f88SAndre Oppermann &max_datalen, 0, "Minimum space left in mbuf after max_hdr"); 12551710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 12655e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD, 12755e9f80dSMike Silbersack &m_defragpackets, 0, ""); 12855e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD, 12955e9f80dSMike Silbersack &m_defragbytes, 0, ""); 13055e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD, 13155e9f80dSMike Silbersack &m_defraguseless, 0, ""); 13255e9f80dSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD, 13355e9f80dSMike Silbersack &m_defragfailure, 0, ""); 134352d050eSMike Silbersack SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW, 135352d050eSMike Silbersack &m_defragrandomfailures, 0, ""); 136352d050eSMike Silbersack #endif 137df8bae1dSRodney W. Grimes 138df8bae1dSRodney W. Grimes /* 139f729ede6SAndre Oppermann * Ensure the correct size of various mbuf parameters. It could be off due 140f729ede6SAndre Oppermann * to compiler-induced padding and alignment artifacts. 141f729ede6SAndre Oppermann */ 142f729ede6SAndre Oppermann CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN); 143f729ede6SAndre Oppermann CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN); 144f729ede6SAndre Oppermann 145f729ede6SAndre Oppermann /* 1463d1a9ed3SRobert Watson * mbuf data storage should be 64-bit aligned regardless of architectural 1473d1a9ed3SRobert Watson * pointer size; check this is the case with and without a packet header. 1483d1a9ed3SRobert Watson */ 1493d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) % 8 == 0); 1503d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_pktdat) % 8 == 0); 1513d1a9ed3SRobert Watson 1523d1a9ed3SRobert Watson /* 1533d1a9ed3SRobert Watson * While the specific values here don't matter too much (i.e., +/- a few 1543d1a9ed3SRobert Watson * words), we do want to ensure that changes to these values are carefully 1553d1a9ed3SRobert Watson * reasoned about and properly documented. This is especially the case as 1563d1a9ed3SRobert Watson * network-protocol and device-driver modules encode these layouts, and must 1573d1a9ed3SRobert Watson * be recompiled if the structures change. Check these values at compile time 1583d1a9ed3SRobert Watson * against the ones documented in comments in mbuf.h. 1593d1a9ed3SRobert Watson * 1603d1a9ed3SRobert Watson * NB: Possibly they should be documented there via #define's and not just 1613d1a9ed3SRobert Watson * comments. 1623d1a9ed3SRobert Watson */ 1633d1a9ed3SRobert Watson #if defined(__LP64__) 1643d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) == 32); 1653d1a9ed3SRobert Watson CTASSERT(sizeof(struct pkthdr) == 56); 1660c103266SGleb Smirnoff CTASSERT(sizeof(struct m_ext) == 160); 1673d1a9ed3SRobert Watson #else 1683d1a9ed3SRobert Watson CTASSERT(offsetof(struct mbuf, m_dat) == 24); 1693d1a9ed3SRobert Watson CTASSERT(sizeof(struct pkthdr) == 48); 1700c103266SGleb Smirnoff CTASSERT(sizeof(struct m_ext) == 180); 1713d1a9ed3SRobert Watson #endif 1723d1a9ed3SRobert Watson 1733d1a9ed3SRobert Watson /* 174ec9d83ddSGleb Smirnoff * Assert that the queue(3) macros produce code of the same size as an old 175ec9d83ddSGleb Smirnoff * plain pointer does. 176ec9d83ddSGleb Smirnoff */ 177ec9d83ddSGleb Smirnoff #ifdef INVARIANTS 178d53d6fa9SMark Johnston static struct mbuf __used m_assertbuf; 179ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_slist) == sizeof(m_assertbuf.m_next)); 180ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_stailq) == sizeof(m_assertbuf.m_next)); 181ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_slistpkt) == sizeof(m_assertbuf.m_nextpkt)); 182ec9d83ddSGleb Smirnoff CTASSERT(sizeof(m_assertbuf.m_stailqpkt) == sizeof(m_assertbuf.m_nextpkt)); 183ec9d83ddSGleb Smirnoff #endif 184ec9d83ddSGleb Smirnoff 185ec9d83ddSGleb Smirnoff /* 1866bccea7cSRebecca Cran * Attach the cluster from *m to *n, set up m_ext in *n 18756a4e45aSAndre Oppermann * and bump the refcount of the cluster. 18856a4e45aSAndre Oppermann */ 18986a996e6SHiren Panchasara void 19056a5f52eSGleb Smirnoff mb_dupcl(struct mbuf *n, struct mbuf *m) 19156a4e45aSAndre Oppermann { 19256a5f52eSGleb Smirnoff volatile u_int *refcnt; 19356a4e45aSAndre Oppermann 1941fbe6a82SGleb Smirnoff KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 1951fbe6a82SGleb Smirnoff KASSERT(!(n->m_flags & M_EXT), ("%s: M_EXT set on %p", __func__, n)); 1961fbe6a82SGleb Smirnoff 19707e87a1dSGleb Smirnoff /* 1980c103266SGleb Smirnoff * Cache access optimization. 1990c103266SGleb Smirnoff * 2000c103266SGleb Smirnoff * o Regular M_EXT storage doesn't need full copy of m_ext, since 2010c103266SGleb Smirnoff * the holder of the 'ext_count' is responsible to carry the free 2020c103266SGleb Smirnoff * routine and its arguments. 2030c103266SGleb Smirnoff * o EXT_PGS data is split between main part of mbuf and m_ext, the 2040c103266SGleb Smirnoff * main part is copied in full, the m_ext part is similar to M_EXT. 2050c103266SGleb Smirnoff * o EXT_EXTREF, where 'ext_cnt' doesn't point into mbuf at all, is 2060c103266SGleb Smirnoff * special - it needs full copy of m_ext into each mbuf, since any 2070c103266SGleb Smirnoff * copy could end up as the last to free. 20807e87a1dSGleb Smirnoff */ 2090c103266SGleb Smirnoff switch (m->m_ext.ext_type) { 2100c103266SGleb Smirnoff case EXT_PGS: 2110c103266SGleb Smirnoff bcopy(&m->m_ext, &n->m_ext, m_epg_copylen); 21223feb563SAndrew Gallatin bcopy(&m->m_ext_pgs, &n->m_ext_pgs, 21323feb563SAndrew Gallatin sizeof(struct mbuf_ext_pgs)); 2140c103266SGleb Smirnoff break; 2150c103266SGleb Smirnoff case EXT_EXTREF: 2160c103266SGleb Smirnoff bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext)); 2170c103266SGleb Smirnoff break; 2180c103266SGleb Smirnoff default: 21907e87a1dSGleb Smirnoff bcopy(&m->m_ext, &n->m_ext, m_ext_copylen); 2200c103266SGleb Smirnoff } 2210c103266SGleb Smirnoff 22256a4e45aSAndre Oppermann n->m_flags |= M_EXT; 22382334850SJohn Baldwin n->m_flags |= m->m_flags & (M_RDONLY | M_NOMAP); 22456a5f52eSGleb Smirnoff 22556a5f52eSGleb Smirnoff /* See if this is the mbuf that holds the embedded refcount. */ 22656a5f52eSGleb Smirnoff if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 22756a5f52eSGleb Smirnoff refcnt = n->m_ext.ext_cnt = &m->m_ext.ext_count; 22856a5f52eSGleb Smirnoff n->m_ext.ext_flags &= ~EXT_FLAG_EMBREF; 22956a5f52eSGleb Smirnoff } else { 23056a5f52eSGleb Smirnoff KASSERT(m->m_ext.ext_cnt != NULL, 23156a5f52eSGleb Smirnoff ("%s: no refcounting pointer on %p", __func__, m)); 23256a5f52eSGleb Smirnoff refcnt = m->m_ext.ext_cnt; 23356a5f52eSGleb Smirnoff } 23456a5f52eSGleb Smirnoff 23556a5f52eSGleb Smirnoff if (*refcnt == 1) 23656a5f52eSGleb Smirnoff *refcnt += 1; 23756a5f52eSGleb Smirnoff else 23856a5f52eSGleb Smirnoff atomic_add_int(refcnt, 1); 23956a4e45aSAndre Oppermann } 24056a4e45aSAndre Oppermann 2419523d1bfSNavdeep Parhar void 2429523d1bfSNavdeep Parhar m_demote_pkthdr(struct mbuf *m) 2439523d1bfSNavdeep Parhar { 2449523d1bfSNavdeep Parhar 2459523d1bfSNavdeep Parhar M_ASSERTPKTHDR(m); 2469523d1bfSNavdeep Parhar 2479523d1bfSNavdeep Parhar m_tag_delete_chain(m, NULL); 2489523d1bfSNavdeep Parhar m->m_flags &= ~M_PKTHDR; 2499523d1bfSNavdeep Parhar bzero(&m->m_pkthdr, sizeof(struct pkthdr)); 2509523d1bfSNavdeep Parhar } 2519523d1bfSNavdeep Parhar 25256a4e45aSAndre Oppermann /* 253ed111688SAndre Oppermann * Clean up mbuf (chain) from any tags and packet headers. 254e0068c3aSAndre Oppermann * If "all" is set then the first mbuf in the chain will be 255e0068c3aSAndre Oppermann * cleaned too. 256ed111688SAndre Oppermann */ 257ed111688SAndre Oppermann void 258651e4e6aSGleb Smirnoff m_demote(struct mbuf *m0, int all, int flags) 259ed111688SAndre Oppermann { 260ed111688SAndre Oppermann struct mbuf *m; 261ed111688SAndre Oppermann 262ed111688SAndre Oppermann for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) { 2637ee2d058SGleb Smirnoff KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt in m %p, m0 %p", 2647ee2d058SGleb Smirnoff __func__, m, m0)); 2659523d1bfSNavdeep Parhar if (m->m_flags & M_PKTHDR) 2669523d1bfSNavdeep Parhar m_demote_pkthdr(m); 26782334850SJohn Baldwin m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | 26882334850SJohn Baldwin M_NOMAP | flags); 269ed111688SAndre Oppermann } 270ed111688SAndre Oppermann } 271ed111688SAndre Oppermann 272ed111688SAndre Oppermann /* 273fdcc028dSAndre Oppermann * Sanity checks on mbuf (chain) for use in KASSERT() and general 274fdcc028dSAndre Oppermann * debugging. 275fdcc028dSAndre Oppermann * Returns 0 or panics when bad and 1 on all tests passed. 276fdcc028dSAndre Oppermann * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they 277fdcc028dSAndre Oppermann * blow up later. 278a048affbSAndre Oppermann */ 279a048affbSAndre Oppermann int 280a048affbSAndre Oppermann m_sanity(struct mbuf *m0, int sanitize) 281a048affbSAndre Oppermann { 282a048affbSAndre Oppermann struct mbuf *m; 283a048affbSAndre Oppermann caddr_t a, b; 284a048affbSAndre Oppermann int pktlen = 0; 285a048affbSAndre Oppermann 28621ee3e7aSKip Macy #ifdef INVARIANTS 28721ee3e7aSKip Macy #define M_SANITY_ACTION(s) panic("mbuf %p: " s, m) 28821ee3e7aSKip Macy #else 28921ee3e7aSKip Macy #define M_SANITY_ACTION(s) printf("mbuf %p: " s, m) 29021ee3e7aSKip Macy #endif 291a048affbSAndre Oppermann 292fdcc028dSAndre Oppermann for (m = m0; m != NULL; m = m->m_next) { 293a048affbSAndre Oppermann /* 294a048affbSAndre Oppermann * Basic pointer checks. If any of these fails then some 295a048affbSAndre Oppermann * unrelated kernel memory before or after us is trashed. 296a048affbSAndre Oppermann * No way to recover from that. 297a048affbSAndre Oppermann */ 298b66f2a48SRobert Watson a = M_START(m); 299b66f2a48SRobert Watson b = a + M_SIZE(m); 300a048affbSAndre Oppermann if ((caddr_t)m->m_data < a) 301a048affbSAndre Oppermann M_SANITY_ACTION("m_data outside mbuf data range left"); 302a048affbSAndre Oppermann if ((caddr_t)m->m_data > b) 303a048affbSAndre Oppermann M_SANITY_ACTION("m_data outside mbuf data range right"); 304a048affbSAndre Oppermann if ((caddr_t)m->m_data + m->m_len > b) 305a048affbSAndre Oppermann M_SANITY_ACTION("m_data + m_len exeeds mbuf space"); 306a048affbSAndre Oppermann 307a048affbSAndre Oppermann /* m->m_nextpkt may only be set on first mbuf in chain. */ 308fdcc028dSAndre Oppermann if (m != m0 && m->m_nextpkt != NULL) { 309a048affbSAndre Oppermann if (sanitize) { 310a048affbSAndre Oppermann m_freem(m->m_nextpkt); 311a048affbSAndre Oppermann m->m_nextpkt = (struct mbuf *)0xDEADC0DE; 312a048affbSAndre Oppermann } else 313a048affbSAndre Oppermann M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf"); 314a048affbSAndre Oppermann } 315a048affbSAndre Oppermann 316a048affbSAndre Oppermann /* packet length (not mbuf length!) calculation */ 317a048affbSAndre Oppermann if (m0->m_flags & M_PKTHDR) 318a048affbSAndre Oppermann pktlen += m->m_len; 319a048affbSAndre Oppermann 320a048affbSAndre Oppermann /* m_tags may only be attached to first mbuf in chain. */ 321a048affbSAndre Oppermann if (m != m0 && m->m_flags & M_PKTHDR && 322a048affbSAndre Oppermann !SLIST_EMPTY(&m->m_pkthdr.tags)) { 323a048affbSAndre Oppermann if (sanitize) { 324a048affbSAndre Oppermann m_tag_delete_chain(m, NULL); 325a048affbSAndre Oppermann /* put in 0xDEADC0DE perhaps? */ 326fdcc028dSAndre Oppermann } else 327a048affbSAndre Oppermann M_SANITY_ACTION("m_tags on in-chain mbuf"); 328a048affbSAndre Oppermann } 329a048affbSAndre Oppermann 330a048affbSAndre Oppermann /* M_PKTHDR may only be set on first mbuf in chain */ 331a048affbSAndre Oppermann if (m != m0 && m->m_flags & M_PKTHDR) { 332a048affbSAndre Oppermann if (sanitize) { 333a048affbSAndre Oppermann bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 334a048affbSAndre Oppermann m->m_flags &= ~M_PKTHDR; 335a048affbSAndre Oppermann /* put in 0xDEADCODE and leave hdr flag in */ 336a048affbSAndre Oppermann } else 337a048affbSAndre Oppermann M_SANITY_ACTION("M_PKTHDR on in-chain mbuf"); 338a048affbSAndre Oppermann } 339a048affbSAndre Oppermann } 340fdcc028dSAndre Oppermann m = m0; 341fdcc028dSAndre Oppermann if (pktlen && pktlen != m->m_pkthdr.len) { 342a048affbSAndre Oppermann if (sanitize) 343fdcc028dSAndre Oppermann m->m_pkthdr.len = 0; 344a048affbSAndre Oppermann else 345a048affbSAndre Oppermann M_SANITY_ACTION("m_pkthdr.len != mbuf chain length"); 346a048affbSAndre Oppermann } 347a048affbSAndre Oppermann return 1; 348fdcc028dSAndre Oppermann 349fdcc028dSAndre Oppermann #undef M_SANITY_ACTION 350a048affbSAndre Oppermann } 351a048affbSAndre Oppermann 3525e4bc63bSGleb Smirnoff /* 3535e4bc63bSGleb Smirnoff * Non-inlined part of m_init(). 3545e4bc63bSGleb Smirnoff */ 3555e4bc63bSGleb Smirnoff int 3565e4bc63bSGleb Smirnoff m_pkthdr_init(struct mbuf *m, int how) 3575e4bc63bSGleb Smirnoff { 3585e4bc63bSGleb Smirnoff #ifdef MAC 3595e4bc63bSGleb Smirnoff int error; 3605e4bc63bSGleb Smirnoff #endif 3615e4bc63bSGleb Smirnoff m->m_data = m->m_pktdat; 3625e4bc63bSGleb Smirnoff bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 36350575ce1SAndrew Gallatin #ifdef NUMA 36450575ce1SAndrew Gallatin m->m_pkthdr.numa_domain = M_NODOM; 36550575ce1SAndrew Gallatin #endif 3665e4bc63bSGleb Smirnoff #ifdef MAC 3675e4bc63bSGleb Smirnoff /* If the label init fails, fail the alloc */ 3685e4bc63bSGleb Smirnoff error = mac_mbuf_init(m, how); 3695e4bc63bSGleb Smirnoff if (error) 3705e4bc63bSGleb Smirnoff return (error); 3715e4bc63bSGleb Smirnoff #endif 3725e4bc63bSGleb Smirnoff 3735e4bc63bSGleb Smirnoff return (0); 3745e4bc63bSGleb Smirnoff } 375a048affbSAndre Oppermann 376a048affbSAndre Oppermann /* 3779967cafcSSam Leffler * "Move" mbuf pkthdr from "from" to "to". 378e37b1fcdSRobert Watson * "from" must have M_PKTHDR set, and "to" must be empty. 379e37b1fcdSRobert Watson */ 380e37b1fcdSRobert Watson void 3819967cafcSSam Leffler m_move_pkthdr(struct mbuf *to, struct mbuf *from) 382e37b1fcdSRobert Watson { 383e37b1fcdSRobert Watson 384e37b1fcdSRobert Watson #if 0 3859967cafcSSam Leffler /* see below for why these are not enabled */ 386fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(to); 387225bff6fSRobert Watson /* Note: with MAC, this may not be a good assertion. */ 3889967cafcSSam Leffler KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), 3899967cafcSSam Leffler ("m_move_pkthdr: to has tags")); 390e37b1fcdSRobert Watson #endif 391e32a5b94SRobert Watson #ifdef MAC 392225bff6fSRobert Watson /* 393225bff6fSRobert Watson * XXXMAC: It could be this should also occur for non-MAC? 394225bff6fSRobert Watson */ 395e32a5b94SRobert Watson if (to->m_flags & M_PKTHDR) 396225bff6fSRobert Watson m_tag_delete_chain(to, NULL); 397e32a5b94SRobert Watson #endif 39882334850SJohn Baldwin to->m_flags = (from->m_flags & M_COPYFLAGS) | 39982334850SJohn Baldwin (to->m_flags & (M_EXT | M_NOMAP)); 400a4e71429SSam Leffler if ((to->m_flags & M_EXT) == 0) 4019967cafcSSam Leffler to->m_data = to->m_pktdat; 4029967cafcSSam Leffler to->m_pkthdr = from->m_pkthdr; /* especially tags */ 4039967cafcSSam Leffler SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 4049967cafcSSam Leffler from->m_flags &= ~M_PKTHDR; 405fb3bc596SJohn Baldwin if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) { 406fb3bc596SJohn Baldwin from->m_pkthdr.csum_flags &= ~CSUM_SND_TAG; 407fb3bc596SJohn Baldwin from->m_pkthdr.snd_tag = NULL; 408fb3bc596SJohn Baldwin } 4099967cafcSSam Leffler } 4109967cafcSSam Leffler 4119967cafcSSam Leffler /* 4129967cafcSSam Leffler * Duplicate "from"'s mbuf pkthdr in "to". 4139967cafcSSam Leffler * "from" must have M_PKTHDR set, and "to" must be empty. 4149967cafcSSam Leffler * In particular, this does a deep copy of the packet tags. 4159967cafcSSam Leffler */ 4169967cafcSSam Leffler int 4170cbefd30SAlexander V. Chernikov m_dup_pkthdr(struct mbuf *to, const struct mbuf *from, int how) 4189967cafcSSam Leffler { 4199967cafcSSam Leffler 4209967cafcSSam Leffler #if 0 4219967cafcSSam Leffler /* 4229967cafcSSam Leffler * The mbuf allocator only initializes the pkthdr 423c95be8b5SGleb Smirnoff * when the mbuf is allocated with m_gethdr(). Many users 424c95be8b5SGleb Smirnoff * (e.g. m_copy*, m_prepend) use m_get() and then 4259967cafcSSam Leffler * smash the pkthdr as needed causing these 4269967cafcSSam Leffler * assertions to trip. For now just disable them. 4279967cafcSSam Leffler */ 428fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(to); 429225bff6fSRobert Watson /* Note: with MAC, this may not be a good assertion. */ 4309967cafcSSam Leffler KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags")); 4319967cafcSSam Leffler #endif 432063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 4339967cafcSSam Leffler #ifdef MAC 4349967cafcSSam Leffler if (to->m_flags & M_PKTHDR) 435225bff6fSRobert Watson m_tag_delete_chain(to, NULL); 4369967cafcSSam Leffler #endif 43782334850SJohn Baldwin to->m_flags = (from->m_flags & M_COPYFLAGS) | 43882334850SJohn Baldwin (to->m_flags & (M_EXT | M_NOMAP)); 439df8c7fc9SMike Silbersack if ((to->m_flags & M_EXT) == 0) 4409967cafcSSam Leffler to->m_data = to->m_pktdat; 441e37b1fcdSRobert Watson to->m_pkthdr = from->m_pkthdr; 442fb3bc596SJohn Baldwin if (from->m_pkthdr.csum_flags & CSUM_SND_TAG) 443fb3bc596SJohn Baldwin m_snd_tag_ref(from->m_pkthdr.snd_tag); 4449967cafcSSam Leffler SLIST_INIT(&to->m_pkthdr.tags); 44594985f74SGleb Smirnoff return (m_tag_copy_chain(to, from, how)); 446e37b1fcdSRobert Watson } 447e37b1fcdSRobert Watson 448e37b1fcdSRobert Watson /* 449df8bae1dSRodney W. Grimes * Lesser-used path for M_PREPEND: 450df8bae1dSRodney W. Grimes * allocate new mbuf to prepend to chain, 451df8bae1dSRodney W. Grimes * copy junk along. 452df8bae1dSRodney W. Grimes */ 453df8bae1dSRodney W. Grimes struct mbuf * 454122a814aSBosko Milekic m_prepend(struct mbuf *m, int len, int how) 455df8bae1dSRodney W. Grimes { 456df8bae1dSRodney W. Grimes struct mbuf *mn; 457df8bae1dSRodney W. Grimes 458f8bf8e39SMike Silbersack if (m->m_flags & M_PKTHDR) 459c95be8b5SGleb Smirnoff mn = m_gethdr(how, m->m_type); 460f8bf8e39SMike Silbersack else 461c95be8b5SGleb Smirnoff mn = m_get(how, m->m_type); 462122a814aSBosko Milekic if (mn == NULL) { 463df8bae1dSRodney W. Grimes m_freem(m); 464122a814aSBosko Milekic return (NULL); 465df8bae1dSRodney W. Grimes } 466225bff6fSRobert Watson if (m->m_flags & M_PKTHDR) 467c95be8b5SGleb Smirnoff m_move_pkthdr(mn, m); 468df8bae1dSRodney W. Grimes mn->m_next = m; 469df8bae1dSRodney W. Grimes m = mn; 470ed6a66caSRobert Watson if (len < M_SIZE(m)) 4715288989fSRandall Stewart M_ALIGN(m, len); 472df8bae1dSRodney W. Grimes m->m_len = len; 473df8bae1dSRodney W. Grimes return (m); 474df8bae1dSRodney W. Grimes } 475df8bae1dSRodney W. Grimes 476df8bae1dSRodney W. Grimes /* 477df8bae1dSRodney W. Grimes * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 478df8bae1dSRodney W. Grimes * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 479eb1b1807SGleb Smirnoff * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller. 4801c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 4811c38f2eaSArchie Cobbs * only their reference counts are incremented. 482df8bae1dSRodney W. Grimes */ 483df8bae1dSRodney W. Grimes struct mbuf * 48456a5f52eSGleb Smirnoff m_copym(struct mbuf *m, int off0, int len, int wait) 485df8bae1dSRodney W. Grimes { 486122a814aSBosko Milekic struct mbuf *n, **np; 487122a814aSBosko Milekic int off = off0; 488df8bae1dSRodney W. Grimes struct mbuf *top; 489df8bae1dSRodney W. Grimes int copyhdr = 0; 490df8bae1dSRodney W. Grimes 491e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copym, negative off %d", off)); 492e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copym, negative len %d", len)); 493063d8114SAlfred Perlstein MBUF_CHECKSLEEP(wait); 494df8bae1dSRodney W. Grimes if (off == 0 && m->m_flags & M_PKTHDR) 495df8bae1dSRodney W. Grimes copyhdr = 1; 496df8bae1dSRodney W. Grimes while (off > 0) { 497e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 498df8bae1dSRodney W. Grimes if (off < m->m_len) 499df8bae1dSRodney W. Grimes break; 500df8bae1dSRodney W. Grimes off -= m->m_len; 501df8bae1dSRodney W. Grimes m = m->m_next; 502df8bae1dSRodney W. Grimes } 503df8bae1dSRodney W. Grimes np = ⊤ 504b85f65afSPedro F. Giffuni top = NULL; 505df8bae1dSRodney W. Grimes while (len > 0) { 506122a814aSBosko Milekic if (m == NULL) { 507e0a653ddSAlfred Perlstein KASSERT(len == M_COPYALL, 508e0a653ddSAlfred Perlstein ("m_copym, length > size of mbuf chain")); 509df8bae1dSRodney W. Grimes break; 510df8bae1dSRodney W. Grimes } 511f8bf8e39SMike Silbersack if (copyhdr) 512c95be8b5SGleb Smirnoff n = m_gethdr(wait, m->m_type); 513f8bf8e39SMike Silbersack else 514c95be8b5SGleb Smirnoff n = m_get(wait, m->m_type); 515df8bae1dSRodney W. Grimes *np = n; 516122a814aSBosko Milekic if (n == NULL) 517df8bae1dSRodney W. Grimes goto nospace; 518df8bae1dSRodney W. Grimes if (copyhdr) { 5199967cafcSSam Leffler if (!m_dup_pkthdr(n, m, wait)) 5209967cafcSSam Leffler goto nospace; 521df8bae1dSRodney W. Grimes if (len == M_COPYALL) 522df8bae1dSRodney W. Grimes n->m_pkthdr.len -= off0; 523df8bae1dSRodney W. Grimes else 524df8bae1dSRodney W. Grimes n->m_pkthdr.len = len; 525df8bae1dSRodney W. Grimes copyhdr = 0; 526df8bae1dSRodney W. Grimes } 527df8bae1dSRodney W. Grimes n->m_len = min(len, m->m_len - off); 528df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 529df8bae1dSRodney W. Grimes n->m_data = m->m_data + off; 53056a4e45aSAndre Oppermann mb_dupcl(n, m); 531df8bae1dSRodney W. Grimes } else 532df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 533bd395ae8SBosko Milekic (u_int)n->m_len); 534df8bae1dSRodney W. Grimes if (len != M_COPYALL) 535df8bae1dSRodney W. Grimes len -= n->m_len; 536df8bae1dSRodney W. Grimes off = 0; 537df8bae1dSRodney W. Grimes m = m->m_next; 538df8bae1dSRodney W. Grimes np = &n->m_next; 539df8bae1dSRodney W. Grimes } 54008442f8aSBosko Milekic 541df8bae1dSRodney W. Grimes return (top); 542df8bae1dSRodney W. Grimes nospace: 543df8bae1dSRodney W. Grimes m_freem(top); 544122a814aSBosko Milekic return (NULL); 545df8bae1dSRodney W. Grimes } 546df8bae1dSRodney W. Grimes 547df8bae1dSRodney W. Grimes /* 5486a06dea0SGarrett Wollman * Copy an entire packet, including header (which must be present). 5496a06dea0SGarrett Wollman * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 5501c38f2eaSArchie Cobbs * Note that the copy is read-only, because clusters are not copied, 5511c38f2eaSArchie Cobbs * only their reference counts are incremented. 5525fe86675SLuigi Rizzo * Preserve alignment of the first mbuf so if the creator has left 5535fe86675SLuigi Rizzo * some room at the beginning (e.g. for inserting protocol headers) 5545fe86675SLuigi Rizzo * the copies still have the room available. 5556a06dea0SGarrett Wollman */ 5566a06dea0SGarrett Wollman struct mbuf * 557122a814aSBosko Milekic m_copypacket(struct mbuf *m, int how) 5586a06dea0SGarrett Wollman { 5596a06dea0SGarrett Wollman struct mbuf *top, *n, *o; 5606a06dea0SGarrett Wollman 561063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 562c95be8b5SGleb Smirnoff n = m_get(how, m->m_type); 5636a06dea0SGarrett Wollman top = n; 564122a814aSBosko Milekic if (n == NULL) 5656a06dea0SGarrett Wollman goto nospace; 5666a06dea0SGarrett Wollman 5679967cafcSSam Leffler if (!m_dup_pkthdr(n, m, how)) 5689967cafcSSam Leffler goto nospace; 5696a06dea0SGarrett Wollman n->m_len = m->m_len; 5706a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 5716a06dea0SGarrett Wollman n->m_data = m->m_data; 57256a4e45aSAndre Oppermann mb_dupcl(n, m); 5736a06dea0SGarrett Wollman } else { 5745fe86675SLuigi Rizzo n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat ); 5756a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 5766a06dea0SGarrett Wollman } 5776a06dea0SGarrett Wollman 5786a06dea0SGarrett Wollman m = m->m_next; 5796a06dea0SGarrett Wollman while (m) { 580c95be8b5SGleb Smirnoff o = m_get(how, m->m_type); 581122a814aSBosko Milekic if (o == NULL) 5826a06dea0SGarrett Wollman goto nospace; 5836a06dea0SGarrett Wollman 5846a06dea0SGarrett Wollman n->m_next = o; 5856a06dea0SGarrett Wollman n = n->m_next; 5866a06dea0SGarrett Wollman 5876a06dea0SGarrett Wollman n->m_len = m->m_len; 5886a06dea0SGarrett Wollman if (m->m_flags & M_EXT) { 5896a06dea0SGarrett Wollman n->m_data = m->m_data; 59056a4e45aSAndre Oppermann mb_dupcl(n, m); 5916a06dea0SGarrett Wollman } else { 5926a06dea0SGarrett Wollman bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 5936a06dea0SGarrett Wollman } 5946a06dea0SGarrett Wollman 5956a06dea0SGarrett Wollman m = m->m_next; 5966a06dea0SGarrett Wollman } 5976a06dea0SGarrett Wollman return top; 5986a06dea0SGarrett Wollman nospace: 5996a06dea0SGarrett Wollman m_freem(top); 600122a814aSBosko Milekic return (NULL); 6016a06dea0SGarrett Wollman } 6026a06dea0SGarrett Wollman 60382334850SJohn Baldwin static void 60482334850SJohn Baldwin m_copyfromunmapped(const struct mbuf *m, int off, int len, caddr_t cp) 60582334850SJohn Baldwin { 60682334850SJohn Baldwin struct iovec iov; 60782334850SJohn Baldwin struct uio uio; 60882334850SJohn Baldwin int error; 60982334850SJohn Baldwin 61082334850SJohn Baldwin KASSERT(off >= 0, ("m_copyfromunmapped: negative off %d", off)); 61182334850SJohn Baldwin KASSERT(len >= 0, ("m_copyfromunmapped: negative len %d", len)); 61282334850SJohn Baldwin KASSERT(off < m->m_len, 61382334850SJohn Baldwin ("m_copyfromunmapped: len exceeds mbuf length")); 61482334850SJohn Baldwin iov.iov_base = cp; 61582334850SJohn Baldwin iov.iov_len = len; 61682334850SJohn Baldwin uio.uio_resid = len; 61782334850SJohn Baldwin uio.uio_iov = &iov; 61882334850SJohn Baldwin uio.uio_segflg = UIO_SYSSPACE; 61982334850SJohn Baldwin uio.uio_iovcnt = 1; 62082334850SJohn Baldwin uio.uio_offset = 0; 62182334850SJohn Baldwin uio.uio_rw = UIO_READ; 62282334850SJohn Baldwin error = m_unmappedtouio(m, off, &uio, len); 62382334850SJohn Baldwin KASSERT(error == 0, ("m_unmappedtouio failed: off %d, len %d", off, 62482334850SJohn Baldwin len)); 62582334850SJohn Baldwin } 62682334850SJohn Baldwin 6276a06dea0SGarrett Wollman /* 628df8bae1dSRodney W. Grimes * Copy data from an mbuf chain starting "off" bytes from the beginning, 629df8bae1dSRodney W. Grimes * continuing for "len" bytes, into the indicated buffer. 630df8bae1dSRodney W. Grimes */ 63126f9a767SRodney W. Grimes void 632a8cfc0eeSJulian Elischer m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 633df8bae1dSRodney W. Grimes { 634bd395ae8SBosko Milekic u_int count; 635df8bae1dSRodney W. Grimes 636e0a653ddSAlfred Perlstein KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 637e0a653ddSAlfred Perlstein KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 638df8bae1dSRodney W. Grimes while (off > 0) { 639e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 640df8bae1dSRodney W. Grimes if (off < m->m_len) 641df8bae1dSRodney W. Grimes break; 642df8bae1dSRodney W. Grimes off -= m->m_len; 643df8bae1dSRodney W. Grimes m = m->m_next; 644df8bae1dSRodney W. Grimes } 645df8bae1dSRodney W. Grimes while (len > 0) { 646e0a653ddSAlfred Perlstein KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 647df8bae1dSRodney W. Grimes count = min(m->m_len - off, len); 64882334850SJohn Baldwin if ((m->m_flags & M_NOMAP) != 0) 64982334850SJohn Baldwin m_copyfromunmapped(m, off, count, cp); 65082334850SJohn Baldwin else 651df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + off, cp, count); 652df8bae1dSRodney W. Grimes len -= count; 653df8bae1dSRodney W. Grimes cp += count; 654df8bae1dSRodney W. Grimes off = 0; 655df8bae1dSRodney W. Grimes m = m->m_next; 656df8bae1dSRodney W. Grimes } 657df8bae1dSRodney W. Grimes } 658df8bae1dSRodney W. Grimes 659df8bae1dSRodney W. Grimes /* 6601c38f2eaSArchie Cobbs * Copy a packet header mbuf chain into a completely new chain, including 6611c38f2eaSArchie Cobbs * copying any mbuf clusters. Use this instead of m_copypacket() when 6621c38f2eaSArchie Cobbs * you need a writable copy of an mbuf chain. 6631c38f2eaSArchie Cobbs */ 6641c38f2eaSArchie Cobbs struct mbuf * 6650cbefd30SAlexander V. Chernikov m_dup(const struct mbuf *m, int how) 6661c38f2eaSArchie Cobbs { 6671c38f2eaSArchie Cobbs struct mbuf **p, *top = NULL; 6681c38f2eaSArchie Cobbs int remain, moff, nsize; 6691c38f2eaSArchie Cobbs 670063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 6711c38f2eaSArchie Cobbs /* Sanity check */ 6721c38f2eaSArchie Cobbs if (m == NULL) 673122a814aSBosko Milekic return (NULL); 674fe584538SDag-Erling Smørgrav M_ASSERTPKTHDR(m); 6751c38f2eaSArchie Cobbs 6761c38f2eaSArchie Cobbs /* While there's more data, get a new mbuf, tack it on, and fill it */ 6771c38f2eaSArchie Cobbs remain = m->m_pkthdr.len; 6781c38f2eaSArchie Cobbs moff = 0; 6791c38f2eaSArchie Cobbs p = ⊤ 6801c38f2eaSArchie Cobbs while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 6811c38f2eaSArchie Cobbs struct mbuf *n; 6821c38f2eaSArchie Cobbs 6831c38f2eaSArchie Cobbs /* Get the next new mbuf */ 684099a0e58SBosko Milekic if (remain >= MINCLSIZE) { 685099a0e58SBosko Milekic n = m_getcl(how, m->m_type, 0); 686099a0e58SBosko Milekic nsize = MCLBYTES; 687099a0e58SBosko Milekic } else { 688099a0e58SBosko Milekic n = m_get(how, m->m_type); 689099a0e58SBosko Milekic nsize = MLEN; 690099a0e58SBosko Milekic } 6911c38f2eaSArchie Cobbs if (n == NULL) 6921c38f2eaSArchie Cobbs goto nospace; 693099a0e58SBosko Milekic 694099a0e58SBosko Milekic if (top == NULL) { /* First one, must be PKTHDR */ 695099a0e58SBosko Milekic if (!m_dup_pkthdr(n, m, how)) { 696099a0e58SBosko Milekic m_free(n); 6971c38f2eaSArchie Cobbs goto nospace; 6981c38f2eaSArchie Cobbs } 69963e6f390SEd Maste if ((n->m_flags & M_EXT) == 0) 700099a0e58SBosko Milekic nsize = MHLEN; 701089bb672SAndrey V. Elsukov n->m_flags &= ~M_RDONLY; 7021c38f2eaSArchie Cobbs } 7031c38f2eaSArchie Cobbs n->m_len = 0; 7041c38f2eaSArchie Cobbs 7051c38f2eaSArchie Cobbs /* Link it into the new chain */ 7061c38f2eaSArchie Cobbs *p = n; 7071c38f2eaSArchie Cobbs p = &n->m_next; 7081c38f2eaSArchie Cobbs 7091c38f2eaSArchie Cobbs /* Copy data from original mbuf(s) into new mbuf */ 7101c38f2eaSArchie Cobbs while (n->m_len < nsize && m != NULL) { 7111c38f2eaSArchie Cobbs int chunk = min(nsize - n->m_len, m->m_len - moff); 7121c38f2eaSArchie Cobbs 7131c38f2eaSArchie Cobbs bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 7141c38f2eaSArchie Cobbs moff += chunk; 7151c38f2eaSArchie Cobbs n->m_len += chunk; 7161c38f2eaSArchie Cobbs remain -= chunk; 7171c38f2eaSArchie Cobbs if (moff == m->m_len) { 7181c38f2eaSArchie Cobbs m = m->m_next; 7191c38f2eaSArchie Cobbs moff = 0; 7201c38f2eaSArchie Cobbs } 7211c38f2eaSArchie Cobbs } 7221c38f2eaSArchie Cobbs 7231c38f2eaSArchie Cobbs /* Check correct total mbuf length */ 7241c38f2eaSArchie Cobbs KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 725a48740b6SDavid E. O'Brien ("%s: bogus m_pkthdr.len", __func__)); 7261c38f2eaSArchie Cobbs } 7271c38f2eaSArchie Cobbs return (top); 7281c38f2eaSArchie Cobbs 7291c38f2eaSArchie Cobbs nospace: 7301c38f2eaSArchie Cobbs m_freem(top); 731122a814aSBosko Milekic return (NULL); 7321c38f2eaSArchie Cobbs } 7331c38f2eaSArchie Cobbs 7341c38f2eaSArchie Cobbs /* 735df8bae1dSRodney W. Grimes * Concatenate mbuf chain n to m. 736df8bae1dSRodney W. Grimes * Both chains must be of the same type (e.g. MT_DATA). 737df8bae1dSRodney W. Grimes * Any m_pkthdr is not updated. 738df8bae1dSRodney W. Grimes */ 73926f9a767SRodney W. Grimes void 740122a814aSBosko Milekic m_cat(struct mbuf *m, struct mbuf *n) 741df8bae1dSRodney W. Grimes { 742df8bae1dSRodney W. Grimes while (m->m_next) 743df8bae1dSRodney W. Grimes m = m->m_next; 744df8bae1dSRodney W. Grimes while (n) { 74514d7c5b1SAndre Oppermann if (!M_WRITABLE(m) || 74682334850SJohn Baldwin (n->m_flags & M_NOMAP) != 0 || 74714d7c5b1SAndre Oppermann M_TRAILINGSPACE(m) < n->m_len) { 748df8bae1dSRodney W. Grimes /* just join the two chains */ 749df8bae1dSRodney W. Grimes m->m_next = n; 750df8bae1dSRodney W. Grimes return; 751df8bae1dSRodney W. Grimes } 752df8bae1dSRodney W. Grimes /* splat the data from one into the other */ 753df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 754df8bae1dSRodney W. Grimes (u_int)n->m_len); 755df8bae1dSRodney W. Grimes m->m_len += n->m_len; 756df8bae1dSRodney W. Grimes n = m_free(n); 757df8bae1dSRodney W. Grimes } 758df8bae1dSRodney W. Grimes } 759df8bae1dSRodney W. Grimes 7601967edbaSGleb Smirnoff /* 7611967edbaSGleb Smirnoff * Concatenate two pkthdr mbuf chains. 7621967edbaSGleb Smirnoff */ 7631967edbaSGleb Smirnoff void 7641967edbaSGleb Smirnoff m_catpkt(struct mbuf *m, struct mbuf *n) 7651967edbaSGleb Smirnoff { 7661967edbaSGleb Smirnoff 7671967edbaSGleb Smirnoff M_ASSERTPKTHDR(m); 7681967edbaSGleb Smirnoff M_ASSERTPKTHDR(n); 7691967edbaSGleb Smirnoff 7701967edbaSGleb Smirnoff m->m_pkthdr.len += n->m_pkthdr.len; 771651e4e6aSGleb Smirnoff m_demote(n, 1, 0); 7721967edbaSGleb Smirnoff 7731967edbaSGleb Smirnoff m_cat(m, n); 7741967edbaSGleb Smirnoff } 7751967edbaSGleb Smirnoff 77626f9a767SRodney W. Grimes void 777122a814aSBosko Milekic m_adj(struct mbuf *mp, int req_len) 778df8bae1dSRodney W. Grimes { 779122a814aSBosko Milekic int len = req_len; 780122a814aSBosko Milekic struct mbuf *m; 781122a814aSBosko Milekic int count; 782df8bae1dSRodney W. Grimes 783df8bae1dSRodney W. Grimes if ((m = mp) == NULL) 784df8bae1dSRodney W. Grimes return; 785df8bae1dSRodney W. Grimes if (len >= 0) { 786df8bae1dSRodney W. Grimes /* 787df8bae1dSRodney W. Grimes * Trim from head. 788df8bae1dSRodney W. Grimes */ 789df8bae1dSRodney W. Grimes while (m != NULL && len > 0) { 790df8bae1dSRodney W. Grimes if (m->m_len <= len) { 791df8bae1dSRodney W. Grimes len -= m->m_len; 792df8bae1dSRodney W. Grimes m->m_len = 0; 793df8bae1dSRodney W. Grimes m = m->m_next; 794df8bae1dSRodney W. Grimes } else { 795df8bae1dSRodney W. Grimes m->m_len -= len; 796df8bae1dSRodney W. Grimes m->m_data += len; 797df8bae1dSRodney W. Grimes len = 0; 798df8bae1dSRodney W. Grimes } 799df8bae1dSRodney W. Grimes } 800df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 801a83baab6SMarko Zec mp->m_pkthdr.len -= (req_len - len); 802df8bae1dSRodney W. Grimes } else { 803df8bae1dSRodney W. Grimes /* 804df8bae1dSRodney W. Grimes * Trim from tail. Scan the mbuf chain, 805df8bae1dSRodney W. Grimes * calculating its length and finding the last mbuf. 806df8bae1dSRodney W. Grimes * If the adjustment only affects this mbuf, then just 807df8bae1dSRodney W. Grimes * adjust and return. Otherwise, rescan and truncate 808df8bae1dSRodney W. Grimes * after the remaining size. 809df8bae1dSRodney W. Grimes */ 810df8bae1dSRodney W. Grimes len = -len; 811df8bae1dSRodney W. Grimes count = 0; 812df8bae1dSRodney W. Grimes for (;;) { 813df8bae1dSRodney W. Grimes count += m->m_len; 814df8bae1dSRodney W. Grimes if (m->m_next == (struct mbuf *)0) 815df8bae1dSRodney W. Grimes break; 816df8bae1dSRodney W. Grimes m = m->m_next; 817df8bae1dSRodney W. Grimes } 818df8bae1dSRodney W. Grimes if (m->m_len >= len) { 819df8bae1dSRodney W. Grimes m->m_len -= len; 820df8bae1dSRodney W. Grimes if (mp->m_flags & M_PKTHDR) 821df8bae1dSRodney W. Grimes mp->m_pkthdr.len -= len; 822df8bae1dSRodney W. Grimes return; 823df8bae1dSRodney W. Grimes } 824df8bae1dSRodney W. Grimes count -= len; 825df8bae1dSRodney W. Grimes if (count < 0) 826df8bae1dSRodney W. Grimes count = 0; 827df8bae1dSRodney W. Grimes /* 828df8bae1dSRodney W. Grimes * Correct length for chain is "count". 829df8bae1dSRodney W. Grimes * Find the mbuf with last data, adjust its length, 830df8bae1dSRodney W. Grimes * and toss data from remaining mbufs on chain. 831df8bae1dSRodney W. Grimes */ 832df8bae1dSRodney W. Grimes m = mp; 833df8bae1dSRodney W. Grimes if (m->m_flags & M_PKTHDR) 834df8bae1dSRodney W. Grimes m->m_pkthdr.len = count; 835df8bae1dSRodney W. Grimes for (; m; m = m->m_next) { 836df8bae1dSRodney W. Grimes if (m->m_len >= count) { 837df8bae1dSRodney W. Grimes m->m_len = count; 83859d8b310SSam Leffler if (m->m_next != NULL) { 83959d8b310SSam Leffler m_freem(m->m_next); 84059d8b310SSam Leffler m->m_next = NULL; 84159d8b310SSam Leffler } 842df8bae1dSRodney W. Grimes break; 843df8bae1dSRodney W. Grimes } 844df8bae1dSRodney W. Grimes count -= m->m_len; 845df8bae1dSRodney W. Grimes } 846df8bae1dSRodney W. Grimes } 847df8bae1dSRodney W. Grimes } 848df8bae1dSRodney W. Grimes 849df8bae1dSRodney W. Grimes /* 850df8bae1dSRodney W. Grimes * Rearange an mbuf chain so that len bytes are contiguous 851a2c36a02SKevin Lo * and in the data area of an mbuf (so that mtod will work 852a2c36a02SKevin Lo * for a structure of size len). Returns the resulting 853df8bae1dSRodney W. Grimes * mbuf chain on success, frees it and returns null on failure. 854df8bae1dSRodney W. Grimes * If there is room, it will add up to max_protohdr-len extra bytes to the 855df8bae1dSRodney W. Grimes * contiguous region in an attempt to avoid being called next time. 856df8bae1dSRodney W. Grimes */ 857df8bae1dSRodney W. Grimes struct mbuf * 858122a814aSBosko Milekic m_pullup(struct mbuf *n, int len) 859df8bae1dSRodney W. Grimes { 860122a814aSBosko Milekic struct mbuf *m; 861122a814aSBosko Milekic int count; 862df8bae1dSRodney W. Grimes int space; 863df8bae1dSRodney W. Grimes 86482334850SJohn Baldwin KASSERT((n->m_flags & M_NOMAP) == 0, 86582334850SJohn Baldwin ("%s: unmapped mbuf %p", __func__, n)); 86682334850SJohn Baldwin 867df8bae1dSRodney W. Grimes /* 868df8bae1dSRodney W. Grimes * If first mbuf has no cluster, and has room for len bytes 869df8bae1dSRodney W. Grimes * without shifting current data, pullup into it, 870df8bae1dSRodney W. Grimes * otherwise allocate a new mbuf to prepend to the chain. 871df8bae1dSRodney W. Grimes */ 872df8bae1dSRodney W. Grimes if ((n->m_flags & M_EXT) == 0 && 873df8bae1dSRodney W. Grimes n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 874df8bae1dSRodney W. Grimes if (n->m_len >= len) 875df8bae1dSRodney W. Grimes return (n); 876df8bae1dSRodney W. Grimes m = n; 877df8bae1dSRodney W. Grimes n = n->m_next; 878df8bae1dSRodney W. Grimes len -= m->m_len; 879df8bae1dSRodney W. Grimes } else { 880df8bae1dSRodney W. Grimes if (len > MHLEN) 881df8bae1dSRodney W. Grimes goto bad; 882c95be8b5SGleb Smirnoff m = m_get(M_NOWAIT, n->m_type); 883122a814aSBosko Milekic if (m == NULL) 884df8bae1dSRodney W. Grimes goto bad; 8859967cafcSSam Leffler if (n->m_flags & M_PKTHDR) 886c95be8b5SGleb Smirnoff m_move_pkthdr(m, n); 887df8bae1dSRodney W. Grimes } 888df8bae1dSRodney W. Grimes space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 889df8bae1dSRodney W. Grimes do { 890df8bae1dSRodney W. Grimes count = min(min(max(len, max_protohdr), space), n->m_len); 891df8bae1dSRodney W. Grimes bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 892bd395ae8SBosko Milekic (u_int)count); 893df8bae1dSRodney W. Grimes len -= count; 894df8bae1dSRodney W. Grimes m->m_len += count; 895df8bae1dSRodney W. Grimes n->m_len -= count; 896df8bae1dSRodney W. Grimes space -= count; 897df8bae1dSRodney W. Grimes if (n->m_len) 898df8bae1dSRodney W. Grimes n->m_data += count; 899df8bae1dSRodney W. Grimes else 900df8bae1dSRodney W. Grimes n = m_free(n); 901df8bae1dSRodney W. Grimes } while (len > 0 && n); 902df8bae1dSRodney W. Grimes if (len > 0) { 903df8bae1dSRodney W. Grimes (void) m_free(m); 904df8bae1dSRodney W. Grimes goto bad; 905df8bae1dSRodney W. Grimes } 906df8bae1dSRodney W. Grimes m->m_next = n; 907df8bae1dSRodney W. Grimes return (m); 908df8bae1dSRodney W. Grimes bad: 909df8bae1dSRodney W. Grimes m_freem(n); 910122a814aSBosko Milekic return (NULL); 911df8bae1dSRodney W. Grimes } 912df8bae1dSRodney W. Grimes 913df8bae1dSRodney W. Grimes /* 9147ac139a9SJohn-Mark Gurney * Like m_pullup(), except a new mbuf is always allocated, and we allow 9157ac139a9SJohn-Mark Gurney * the amount of empty space before the data in the new mbuf to be specified 9167ac139a9SJohn-Mark Gurney * (in the event that the caller expects to prepend later). 9177ac139a9SJohn-Mark Gurney */ 9187ac139a9SJohn-Mark Gurney struct mbuf * 9197ac139a9SJohn-Mark Gurney m_copyup(struct mbuf *n, int len, int dstoff) 9207ac139a9SJohn-Mark Gurney { 9217ac139a9SJohn-Mark Gurney struct mbuf *m; 9227ac139a9SJohn-Mark Gurney int count, space; 9237ac139a9SJohn-Mark Gurney 9247ac139a9SJohn-Mark Gurney if (len > (MHLEN - dstoff)) 9257ac139a9SJohn-Mark Gurney goto bad; 926c95be8b5SGleb Smirnoff m = m_get(M_NOWAIT, n->m_type); 9277ac139a9SJohn-Mark Gurney if (m == NULL) 9287ac139a9SJohn-Mark Gurney goto bad; 9297ac139a9SJohn-Mark Gurney if (n->m_flags & M_PKTHDR) 930c95be8b5SGleb Smirnoff m_move_pkthdr(m, n); 9317ac139a9SJohn-Mark Gurney m->m_data += dstoff; 9327ac139a9SJohn-Mark Gurney space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 9337ac139a9SJohn-Mark Gurney do { 9347ac139a9SJohn-Mark Gurney count = min(min(max(len, max_protohdr), space), n->m_len); 9357ac139a9SJohn-Mark Gurney memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), 9367ac139a9SJohn-Mark Gurney (unsigned)count); 9377ac139a9SJohn-Mark Gurney len -= count; 9387ac139a9SJohn-Mark Gurney m->m_len += count; 9397ac139a9SJohn-Mark Gurney n->m_len -= count; 9407ac139a9SJohn-Mark Gurney space -= count; 9417ac139a9SJohn-Mark Gurney if (n->m_len) 9427ac139a9SJohn-Mark Gurney n->m_data += count; 9437ac139a9SJohn-Mark Gurney else 9447ac139a9SJohn-Mark Gurney n = m_free(n); 9457ac139a9SJohn-Mark Gurney } while (len > 0 && n); 9467ac139a9SJohn-Mark Gurney if (len > 0) { 9477ac139a9SJohn-Mark Gurney (void) m_free(m); 9487ac139a9SJohn-Mark Gurney goto bad; 9497ac139a9SJohn-Mark Gurney } 9507ac139a9SJohn-Mark Gurney m->m_next = n; 9517ac139a9SJohn-Mark Gurney return (m); 9527ac139a9SJohn-Mark Gurney bad: 9537ac139a9SJohn-Mark Gurney m_freem(n); 9547ac139a9SJohn-Mark Gurney return (NULL); 9557ac139a9SJohn-Mark Gurney } 9567ac139a9SJohn-Mark Gurney 9577ac139a9SJohn-Mark Gurney /* 958df8bae1dSRodney W. Grimes * Partition an mbuf chain in two pieces, returning the tail -- 959df8bae1dSRodney W. Grimes * all but the first len0 bytes. In case of failure, it returns NULL and 960df8bae1dSRodney W. Grimes * attempts to restore the chain to its original state. 96148d183faSArchie Cobbs * 96248d183faSArchie Cobbs * Note that the resulting mbufs might be read-only, because the new 96348d183faSArchie Cobbs * mbuf can end up sharing an mbuf cluster with the original mbuf if 96448d183faSArchie Cobbs * the "breaking point" happens to lie within a cluster mbuf. Use the 96548d183faSArchie Cobbs * M_WRITABLE() macro to check for this case. 966df8bae1dSRodney W. Grimes */ 967df8bae1dSRodney W. Grimes struct mbuf * 968122a814aSBosko Milekic m_split(struct mbuf *m0, int len0, int wait) 969df8bae1dSRodney W. Grimes { 970122a814aSBosko Milekic struct mbuf *m, *n; 971bd395ae8SBosko Milekic u_int len = len0, remain; 972df8bae1dSRodney W. Grimes 973063d8114SAlfred Perlstein MBUF_CHECKSLEEP(wait); 974df8bae1dSRodney W. Grimes for (m = m0; m && len > m->m_len; m = m->m_next) 975df8bae1dSRodney W. Grimes len -= m->m_len; 976122a814aSBosko Milekic if (m == NULL) 977122a814aSBosko Milekic return (NULL); 978df8bae1dSRodney W. Grimes remain = m->m_len - len; 97921f39848SGleb Smirnoff if (m0->m_flags & M_PKTHDR && remain == 0) { 98021f39848SGleb Smirnoff n = m_gethdr(wait, m0->m_type); 98177badb18SGleb Smirnoff if (n == NULL) 98221f39848SGleb Smirnoff return (NULL); 98321f39848SGleb Smirnoff n->m_next = m->m_next; 98421f39848SGleb Smirnoff m->m_next = NULL; 985fb3bc596SJohn Baldwin if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 986fb3bc596SJohn Baldwin n->m_pkthdr.snd_tag = 987fb3bc596SJohn Baldwin m_snd_tag_ref(m0->m_pkthdr.snd_tag); 988fb3bc596SJohn Baldwin n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 989fb3bc596SJohn Baldwin } else 99021f39848SGleb Smirnoff n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 99121f39848SGleb Smirnoff n->m_pkthdr.len = m0->m_pkthdr.len - len0; 99221f39848SGleb Smirnoff m0->m_pkthdr.len = len0; 99321f39848SGleb Smirnoff return (n); 99421f39848SGleb Smirnoff } else if (m0->m_flags & M_PKTHDR) { 995c95be8b5SGleb Smirnoff n = m_gethdr(wait, m0->m_type); 996122a814aSBosko Milekic if (n == NULL) 997122a814aSBosko Milekic return (NULL); 998fb3bc596SJohn Baldwin if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG) { 999fb3bc596SJohn Baldwin n->m_pkthdr.snd_tag = 1000fb3bc596SJohn Baldwin m_snd_tag_ref(m0->m_pkthdr.snd_tag); 1001fb3bc596SJohn Baldwin n->m_pkthdr.csum_flags |= CSUM_SND_TAG; 1002fb3bc596SJohn Baldwin } else 1003df8bae1dSRodney W. Grimes n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1004df8bae1dSRodney W. Grimes n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1005df8bae1dSRodney W. Grimes m0->m_pkthdr.len = len0; 1006df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) 1007df8bae1dSRodney W. Grimes goto extpacket; 1008df8bae1dSRodney W. Grimes if (remain > MHLEN) { 1009df8bae1dSRodney W. Grimes /* m can't be the lead packet */ 1010ed6a66caSRobert Watson M_ALIGN(n, 0); 1011df8bae1dSRodney W. Grimes n->m_next = m_split(m, len, wait); 1012122a814aSBosko Milekic if (n->m_next == NULL) { 1013df8bae1dSRodney W. Grimes (void) m_free(n); 1014122a814aSBosko Milekic return (NULL); 101540376987SJeffrey Hsu } else { 101640376987SJeffrey Hsu n->m_len = 0; 1017df8bae1dSRodney W. Grimes return (n); 101840376987SJeffrey Hsu } 1019df8bae1dSRodney W. Grimes } else 1020ed6a66caSRobert Watson M_ALIGN(n, remain); 1021df8bae1dSRodney W. Grimes } else if (remain == 0) { 1022df8bae1dSRodney W. Grimes n = m->m_next; 1023122a814aSBosko Milekic m->m_next = NULL; 1024df8bae1dSRodney W. Grimes return (n); 1025df8bae1dSRodney W. Grimes } else { 1026c95be8b5SGleb Smirnoff n = m_get(wait, m->m_type); 1027122a814aSBosko Milekic if (n == NULL) 1028122a814aSBosko Milekic return (NULL); 1029df8bae1dSRodney W. Grimes M_ALIGN(n, remain); 1030df8bae1dSRodney W. Grimes } 1031df8bae1dSRodney W. Grimes extpacket: 1032df8bae1dSRodney W. Grimes if (m->m_flags & M_EXT) { 1033df8bae1dSRodney W. Grimes n->m_data = m->m_data + len; 103456a4e45aSAndre Oppermann mb_dupcl(n, m); 1035df8bae1dSRodney W. Grimes } else { 1036df8bae1dSRodney W. Grimes bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1037df8bae1dSRodney W. Grimes } 1038df8bae1dSRodney W. Grimes n->m_len = remain; 1039df8bae1dSRodney W. Grimes m->m_len = len; 1040df8bae1dSRodney W. Grimes n->m_next = m->m_next; 1041122a814aSBosko Milekic m->m_next = NULL; 1042df8bae1dSRodney W. Grimes return (n); 1043df8bae1dSRodney W. Grimes } 1044df8bae1dSRodney W. Grimes /* 1045df8bae1dSRodney W. Grimes * Routine to copy from device local memory into mbufs. 1046f5eece3fSBosko Milekic * Note that `off' argument is offset into first mbuf of target chain from 1047f5eece3fSBosko Milekic * which to begin copying the data to. 1048df8bae1dSRodney W. Grimes */ 1049df8bae1dSRodney W. Grimes struct mbuf * 1050f5eece3fSBosko Milekic m_devget(char *buf, int totlen, int off, struct ifnet *ifp, 1051122a814aSBosko Milekic void (*copy)(char *from, caddr_t to, u_int len)) 1052df8bae1dSRodney W. Grimes { 1053122a814aSBosko Milekic struct mbuf *m; 1054099a0e58SBosko Milekic struct mbuf *top = NULL, **mp = ⊤ 1055f5eece3fSBosko Milekic int len; 1056df8bae1dSRodney W. Grimes 1057f5eece3fSBosko Milekic if (off < 0 || off > MHLEN) 1058f5eece3fSBosko Milekic return (NULL); 1059f5eece3fSBosko Milekic 1060df8bae1dSRodney W. Grimes while (totlen > 0) { 1061099a0e58SBosko Milekic if (top == NULL) { /* First one, must be PKTHDR */ 1062f5eece3fSBosko Milekic if (totlen + off >= MINCLSIZE) { 1063eb1b1807SGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1064f5eece3fSBosko Milekic len = MCLBYTES; 1065df8bae1dSRodney W. Grimes } else { 1066eb1b1807SGleb Smirnoff m = m_gethdr(M_NOWAIT, MT_DATA); 1067099a0e58SBosko Milekic len = MHLEN; 1068099a0e58SBosko Milekic 1069099a0e58SBosko Milekic /* Place initial small packet/header at end of mbuf */ 1070cee4a056SKevin Lo if (m && totlen + off + max_linkhdr <= MHLEN) { 1071df8bae1dSRodney W. Grimes m->m_data += max_linkhdr; 1072f5eece3fSBosko Milekic len -= max_linkhdr; 1073df8bae1dSRodney W. Grimes } 1074f5eece3fSBosko Milekic } 1075099a0e58SBosko Milekic if (m == NULL) 1076099a0e58SBosko Milekic return NULL; 1077099a0e58SBosko Milekic m->m_pkthdr.rcvif = ifp; 1078099a0e58SBosko Milekic m->m_pkthdr.len = totlen; 1079099a0e58SBosko Milekic } else { 1080099a0e58SBosko Milekic if (totlen + off >= MINCLSIZE) { 1081eb1b1807SGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, 0); 1082099a0e58SBosko Milekic len = MCLBYTES; 1083099a0e58SBosko Milekic } else { 1084eb1b1807SGleb Smirnoff m = m_get(M_NOWAIT, MT_DATA); 1085099a0e58SBosko Milekic len = MLEN; 1086099a0e58SBosko Milekic } 1087099a0e58SBosko Milekic if (m == NULL) { 1088099a0e58SBosko Milekic m_freem(top); 1089099a0e58SBosko Milekic return NULL; 1090099a0e58SBosko Milekic } 1091099a0e58SBosko Milekic } 1092f5eece3fSBosko Milekic if (off) { 1093f5eece3fSBosko Milekic m->m_data += off; 1094f5eece3fSBosko Milekic len -= off; 1095f5eece3fSBosko Milekic off = 0; 1096f5eece3fSBosko Milekic } 1097f5eece3fSBosko Milekic m->m_len = len = min(totlen, len); 1098df8bae1dSRodney W. Grimes if (copy) 1099bd395ae8SBosko Milekic copy(buf, mtod(m, caddr_t), (u_int)len); 1100df8bae1dSRodney W. Grimes else 1101bd395ae8SBosko Milekic bcopy(buf, mtod(m, caddr_t), (u_int)len); 1102f5eece3fSBosko Milekic buf += len; 1103df8bae1dSRodney W. Grimes *mp = m; 1104df8bae1dSRodney W. Grimes mp = &m->m_next; 1105df8bae1dSRodney W. Grimes totlen -= len; 1106df8bae1dSRodney W. Grimes } 1107df8bae1dSRodney W. Grimes return (top); 1108df8bae1dSRodney W. Grimes } 1109c5789ba3SPoul-Henning Kamp 1110c5789ba3SPoul-Henning Kamp /* 1111c5789ba3SPoul-Henning Kamp * Copy data from a buffer back into the indicated mbuf chain, 1112c5789ba3SPoul-Henning Kamp * starting "off" bytes from the beginning, extending the mbuf 1113c5789ba3SPoul-Henning Kamp * chain if necessary. 1114c5789ba3SPoul-Henning Kamp */ 1115c5789ba3SPoul-Henning Kamp void 111624665342SLuigi Rizzo m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) 1117c5789ba3SPoul-Henning Kamp { 1118122a814aSBosko Milekic int mlen; 1119122a814aSBosko Milekic struct mbuf *m = m0, *n; 1120c5789ba3SPoul-Henning Kamp int totlen = 0; 1121c5789ba3SPoul-Henning Kamp 1122122a814aSBosko Milekic if (m0 == NULL) 1123c5789ba3SPoul-Henning Kamp return; 1124c5789ba3SPoul-Henning Kamp while (off > (mlen = m->m_len)) { 1125c5789ba3SPoul-Henning Kamp off -= mlen; 1126c5789ba3SPoul-Henning Kamp totlen += mlen; 1127122a814aSBosko Milekic if (m->m_next == NULL) { 1128eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 1129122a814aSBosko Milekic if (n == NULL) 1130c5789ba3SPoul-Henning Kamp goto out; 1131099a0e58SBosko Milekic bzero(mtod(n, caddr_t), MLEN); 1132c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len + off); 1133c5789ba3SPoul-Henning Kamp m->m_next = n; 1134c5789ba3SPoul-Henning Kamp } 1135c5789ba3SPoul-Henning Kamp m = m->m_next; 1136c5789ba3SPoul-Henning Kamp } 1137c5789ba3SPoul-Henning Kamp while (len > 0) { 1138129c5c81SAlexander Motin if (m->m_next == NULL && (len > m->m_len - off)) { 1139129c5c81SAlexander Motin m->m_len += min(len - (m->m_len - off), 1140129c5c81SAlexander Motin M_TRAILINGSPACE(m)); 1141129c5c81SAlexander Motin } 1142c5789ba3SPoul-Henning Kamp mlen = min (m->m_len - off, len); 1143bd395ae8SBosko Milekic bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1144c5789ba3SPoul-Henning Kamp cp += mlen; 1145c5789ba3SPoul-Henning Kamp len -= mlen; 1146c5789ba3SPoul-Henning Kamp mlen += off; 1147c5789ba3SPoul-Henning Kamp off = 0; 1148c5789ba3SPoul-Henning Kamp totlen += mlen; 1149c5789ba3SPoul-Henning Kamp if (len == 0) 1150c5789ba3SPoul-Henning Kamp break; 1151122a814aSBosko Milekic if (m->m_next == NULL) { 1152eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 1153122a814aSBosko Milekic if (n == NULL) 1154c5789ba3SPoul-Henning Kamp break; 1155c5789ba3SPoul-Henning Kamp n->m_len = min(MLEN, len); 1156c5789ba3SPoul-Henning Kamp m->m_next = n; 1157c5789ba3SPoul-Henning Kamp } 1158c5789ba3SPoul-Henning Kamp m = m->m_next; 1159c5789ba3SPoul-Henning Kamp } 1160c5789ba3SPoul-Henning Kamp out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1161c5789ba3SPoul-Henning Kamp m->m_pkthdr.len = totlen; 1162c5789ba3SPoul-Henning Kamp } 1163ce4a64f7SPoul-Henning Kamp 116437621fd5SBruce M Simpson /* 11654873d175SSam Leffler * Append the specified data to the indicated mbuf chain, 11664873d175SSam Leffler * Extend the mbuf chain if the new data does not fit in 11674873d175SSam Leffler * existing space. 11684873d175SSam Leffler * 11694873d175SSam Leffler * Return 1 if able to complete the job; otherwise 0. 11704873d175SSam Leffler */ 11714873d175SSam Leffler int 11724873d175SSam Leffler m_append(struct mbuf *m0, int len, c_caddr_t cp) 11734873d175SSam Leffler { 11744873d175SSam Leffler struct mbuf *m, *n; 11754873d175SSam Leffler int remainder, space; 11764873d175SSam Leffler 11774873d175SSam Leffler for (m = m0; m->m_next != NULL; m = m->m_next) 11784873d175SSam Leffler ; 11794873d175SSam Leffler remainder = len; 11804873d175SSam Leffler space = M_TRAILINGSPACE(m); 11814873d175SSam Leffler if (space > 0) { 11824873d175SSam Leffler /* 11834873d175SSam Leffler * Copy into available space. 11844873d175SSam Leffler */ 11854873d175SSam Leffler if (space > remainder) 11864873d175SSam Leffler space = remainder; 11874873d175SSam Leffler bcopy(cp, mtod(m, caddr_t) + m->m_len, space); 11884873d175SSam Leffler m->m_len += space; 11894873d175SSam Leffler cp += space, remainder -= space; 11904873d175SSam Leffler } 11914873d175SSam Leffler while (remainder > 0) { 11924873d175SSam Leffler /* 11934873d175SSam Leffler * Allocate a new mbuf; could check space 11944873d175SSam Leffler * and allocate a cluster instead. 11954873d175SSam Leffler */ 1196eb1b1807SGleb Smirnoff n = m_get(M_NOWAIT, m->m_type); 11974873d175SSam Leffler if (n == NULL) 11984873d175SSam Leffler break; 11994873d175SSam Leffler n->m_len = min(MLEN, remainder); 1200a37c415eSSam Leffler bcopy(cp, mtod(n, caddr_t), n->m_len); 1201a37c415eSSam Leffler cp += n->m_len, remainder -= n->m_len; 12024873d175SSam Leffler m->m_next = n; 12034873d175SSam Leffler m = n; 12044873d175SSam Leffler } 12054873d175SSam Leffler if (m0->m_flags & M_PKTHDR) 12064873d175SSam Leffler m0->m_pkthdr.len += len - remainder; 12074873d175SSam Leffler return (remainder == 0); 12084873d175SSam Leffler } 12094873d175SSam Leffler 12104873d175SSam Leffler /* 121137621fd5SBruce M Simpson * Apply function f to the data in an mbuf chain starting "off" bytes from 121237621fd5SBruce M Simpson * the beginning, continuing for "len" bytes. 121337621fd5SBruce M Simpson */ 121437621fd5SBruce M Simpson int 121537621fd5SBruce M Simpson m_apply(struct mbuf *m, int off, int len, 121654065297SBruce M Simpson int (*f)(void *, void *, u_int), void *arg) 121737621fd5SBruce M Simpson { 121854065297SBruce M Simpson u_int count; 121937621fd5SBruce M Simpson int rval; 122037621fd5SBruce M Simpson 122137621fd5SBruce M Simpson KASSERT(off >= 0, ("m_apply, negative off %d", off)); 122237621fd5SBruce M Simpson KASSERT(len >= 0, ("m_apply, negative len %d", len)); 122337621fd5SBruce M Simpson while (off > 0) { 122437621fd5SBruce M Simpson KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 122537621fd5SBruce M Simpson if (off < m->m_len) 122637621fd5SBruce M Simpson break; 122737621fd5SBruce M Simpson off -= m->m_len; 122837621fd5SBruce M Simpson m = m->m_next; 122937621fd5SBruce M Simpson } 123037621fd5SBruce M Simpson while (len > 0) { 123137621fd5SBruce M Simpson KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain")); 123237621fd5SBruce M Simpson count = min(m->m_len - off, len); 123337621fd5SBruce M Simpson rval = (*f)(arg, mtod(m, caddr_t) + off, count); 123437621fd5SBruce M Simpson if (rval) 123537621fd5SBruce M Simpson return (rval); 123637621fd5SBruce M Simpson len -= count; 123737621fd5SBruce M Simpson off = 0; 123837621fd5SBruce M Simpson m = m->m_next; 123937621fd5SBruce M Simpson } 124037621fd5SBruce M Simpson return (0); 124137621fd5SBruce M Simpson } 124237621fd5SBruce M Simpson 124337621fd5SBruce M Simpson /* 124437621fd5SBruce M Simpson * Return a pointer to mbuf/offset of location in mbuf chain. 124537621fd5SBruce M Simpson */ 124637621fd5SBruce M Simpson struct mbuf * 124737621fd5SBruce M Simpson m_getptr(struct mbuf *m, int loc, int *off) 124837621fd5SBruce M Simpson { 124937621fd5SBruce M Simpson 125037621fd5SBruce M Simpson while (loc >= 0) { 125154065297SBruce M Simpson /* Normal end of search. */ 125237621fd5SBruce M Simpson if (m->m_len > loc) { 125337621fd5SBruce M Simpson *off = loc; 125437621fd5SBruce M Simpson return (m); 125537621fd5SBruce M Simpson } else { 125637621fd5SBruce M Simpson loc -= m->m_len; 125737621fd5SBruce M Simpson if (m->m_next == NULL) { 125837621fd5SBruce M Simpson if (loc == 0) { 125954065297SBruce M Simpson /* Point at the end of valid data. */ 126037621fd5SBruce M Simpson *off = m->m_len; 126137621fd5SBruce M Simpson return (m); 126254065297SBruce M Simpson } 126337621fd5SBruce M Simpson return (NULL); 126454065297SBruce M Simpson } 126537621fd5SBruce M Simpson m = m->m_next; 126637621fd5SBruce M Simpson } 126737621fd5SBruce M Simpson } 126837621fd5SBruce M Simpson return (NULL); 126937621fd5SBruce M Simpson } 127037621fd5SBruce M Simpson 1271ce4a64f7SPoul-Henning Kamp void 12727b125090SJohn-Mark Gurney m_print(const struct mbuf *m, int maxlen) 1273ce4a64f7SPoul-Henning Kamp { 1274ce4a64f7SPoul-Henning Kamp int len; 12757b125090SJohn-Mark Gurney int pdata; 12766357e7b5SEivind Eklund const struct mbuf *m2; 1277ce4a64f7SPoul-Henning Kamp 12787e949c46SKenneth D. Merry if (m == NULL) { 12797e949c46SKenneth D. Merry printf("mbuf: %p\n", m); 12807e949c46SKenneth D. Merry return; 12817e949c46SKenneth D. Merry } 12827e949c46SKenneth D. Merry 12837b125090SJohn-Mark Gurney if (m->m_flags & M_PKTHDR) 1284ce4a64f7SPoul-Henning Kamp len = m->m_pkthdr.len; 12857b125090SJohn-Mark Gurney else 12867b125090SJohn-Mark Gurney len = -1; 1287ce4a64f7SPoul-Henning Kamp m2 = m; 12887b125090SJohn-Mark Gurney while (m2 != NULL && (len == -1 || len)) { 12897b125090SJohn-Mark Gurney pdata = m2->m_len; 12907b125090SJohn-Mark Gurney if (maxlen != -1 && pdata > maxlen) 12917b125090SJohn-Mark Gurney pdata = maxlen; 12927b125090SJohn-Mark Gurney printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len, 12937b125090SJohn-Mark Gurney m2->m_next, m2->m_flags, "\20\20freelist\17skipfw" 12947b125090SJohn-Mark Gurney "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly" 12957b125090SJohn-Mark Gurney "\3eor\2pkthdr\1ext", pdata ? "" : "\n"); 12967b125090SJohn-Mark Gurney if (pdata) 129745e0d0aaSJohn-Mark Gurney printf(", %*D\n", pdata, (u_char *)m2->m_data, "-"); 12987b125090SJohn-Mark Gurney if (len != -1) 1299ce4a64f7SPoul-Henning Kamp len -= m2->m_len; 1300ce4a64f7SPoul-Henning Kamp m2 = m2->m_next; 1301ce4a64f7SPoul-Henning Kamp } 13027b125090SJohn-Mark Gurney if (len > 0) 13037b125090SJohn-Mark Gurney printf("%d bytes unaccounted for.\n", len); 1304ce4a64f7SPoul-Henning Kamp return; 1305ce4a64f7SPoul-Henning Kamp } 13063f2e06c5SPoul-Henning Kamp 1307bd395ae8SBosko Milekic u_int 13083f2e06c5SPoul-Henning Kamp m_fixhdr(struct mbuf *m0) 13093f2e06c5SPoul-Henning Kamp { 1310bd395ae8SBosko Milekic u_int len; 13113f2e06c5SPoul-Henning Kamp 1312ac6e585dSPoul-Henning Kamp len = m_length(m0, NULL); 13133f2e06c5SPoul-Henning Kamp m0->m_pkthdr.len = len; 1314ac6e585dSPoul-Henning Kamp return (len); 1315ac6e585dSPoul-Henning Kamp } 1316ac6e585dSPoul-Henning Kamp 1317bd395ae8SBosko Milekic u_int 1318ac6e585dSPoul-Henning Kamp m_length(struct mbuf *m0, struct mbuf **last) 1319ac6e585dSPoul-Henning Kamp { 1320ac6e585dSPoul-Henning Kamp struct mbuf *m; 1321bd395ae8SBosko Milekic u_int len; 1322ac6e585dSPoul-Henning Kamp 1323ac6e585dSPoul-Henning Kamp len = 0; 1324ac6e585dSPoul-Henning Kamp for (m = m0; m != NULL; m = m->m_next) { 1325ac6e585dSPoul-Henning Kamp len += m->m_len; 1326ac6e585dSPoul-Henning Kamp if (m->m_next == NULL) 1327ac6e585dSPoul-Henning Kamp break; 1328ac6e585dSPoul-Henning Kamp } 1329ac6e585dSPoul-Henning Kamp if (last != NULL) 1330ac6e585dSPoul-Henning Kamp *last = m; 1331ac6e585dSPoul-Henning Kamp return (len); 13323f2e06c5SPoul-Henning Kamp } 133355e9f80dSMike Silbersack 133455e9f80dSMike Silbersack /* 133555e9f80dSMike Silbersack * Defragment a mbuf chain, returning the shortest possible 133655e9f80dSMike Silbersack * chain of mbufs and clusters. If allocation fails and 133755e9f80dSMike Silbersack * this cannot be completed, NULL will be returned, but 133855e9f80dSMike Silbersack * the passed in chain will be unchanged. Upon success, 133955e9f80dSMike Silbersack * the original chain will be freed, and the new chain 134055e9f80dSMike Silbersack * will be returned. 134155e9f80dSMike Silbersack * 134255e9f80dSMike Silbersack * If a non-packet header is passed in, the original 134355e9f80dSMike Silbersack * mbuf (chain?) will be returned unharmed. 134455e9f80dSMike Silbersack */ 134555e9f80dSMike Silbersack struct mbuf * 134655e9f80dSMike Silbersack m_defrag(struct mbuf *m0, int how) 134755e9f80dSMike Silbersack { 134855e9f80dSMike Silbersack struct mbuf *m_new = NULL, *m_final = NULL; 134955e9f80dSMike Silbersack int progress = 0, length; 135055e9f80dSMike Silbersack 1351063d8114SAlfred Perlstein MBUF_CHECKSLEEP(how); 135255e9f80dSMike Silbersack if (!(m0->m_flags & M_PKTHDR)) 135355e9f80dSMike Silbersack return (m0); 135455e9f80dSMike Silbersack 1355f8bf8e39SMike Silbersack m_fixhdr(m0); /* Needed sanity check */ 1356f8bf8e39SMike Silbersack 1357352d050eSMike Silbersack #ifdef MBUF_STRESS_TEST 1358352d050eSMike Silbersack if (m_defragrandomfailures) { 1359352d050eSMike Silbersack int temp = arc4random() & 0xff; 1360352d050eSMike Silbersack if (temp == 0xba) 1361352d050eSMike Silbersack goto nospace; 1362352d050eSMike Silbersack } 1363352d050eSMike Silbersack #endif 136455e9f80dSMike Silbersack 136555e9f80dSMike Silbersack if (m0->m_pkthdr.len > MHLEN) 136655e9f80dSMike Silbersack m_final = m_getcl(how, MT_DATA, M_PKTHDR); 136755e9f80dSMike Silbersack else 136855e9f80dSMike Silbersack m_final = m_gethdr(how, MT_DATA); 136955e9f80dSMike Silbersack 137055e9f80dSMike Silbersack if (m_final == NULL) 137155e9f80dSMike Silbersack goto nospace; 137255e9f80dSMike Silbersack 1373a89ec05eSPeter Wemm if (m_dup_pkthdr(m_final, m0, how) == 0) 137455e9f80dSMike Silbersack goto nospace; 137555e9f80dSMike Silbersack 137655e9f80dSMike Silbersack m_new = m_final; 137755e9f80dSMike Silbersack 137855e9f80dSMike Silbersack while (progress < m0->m_pkthdr.len) { 137955e9f80dSMike Silbersack length = m0->m_pkthdr.len - progress; 138055e9f80dSMike Silbersack if (length > MCLBYTES) 138155e9f80dSMike Silbersack length = MCLBYTES; 138255e9f80dSMike Silbersack 138355e9f80dSMike Silbersack if (m_new == NULL) { 138455e9f80dSMike Silbersack if (length > MLEN) 138555e9f80dSMike Silbersack m_new = m_getcl(how, MT_DATA, 0); 138655e9f80dSMike Silbersack else 138755e9f80dSMike Silbersack m_new = m_get(how, MT_DATA); 138855e9f80dSMike Silbersack if (m_new == NULL) 138955e9f80dSMike Silbersack goto nospace; 139055e9f80dSMike Silbersack } 139155e9f80dSMike Silbersack 139255e9f80dSMike Silbersack m_copydata(m0, progress, length, mtod(m_new, caddr_t)); 139355e9f80dSMike Silbersack progress += length; 139455e9f80dSMike Silbersack m_new->m_len = length; 139555e9f80dSMike Silbersack if (m_new != m_final) 139655e9f80dSMike Silbersack m_cat(m_final, m_new); 139755e9f80dSMike Silbersack m_new = NULL; 139855e9f80dSMike Silbersack } 139951710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 140055e9f80dSMike Silbersack if (m0->m_next == NULL) 140155e9f80dSMike Silbersack m_defraguseless++; 140251710a45SMike Silbersack #endif 140355e9f80dSMike Silbersack m_freem(m0); 140455e9f80dSMike Silbersack m0 = m_final; 140551710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 140655e9f80dSMike Silbersack m_defragpackets++; 140755e9f80dSMike Silbersack m_defragbytes += m0->m_pkthdr.len; 140851710a45SMike Silbersack #endif 140955e9f80dSMike Silbersack return (m0); 141055e9f80dSMike Silbersack nospace: 141151710a45SMike Silbersack #ifdef MBUF_STRESS_TEST 141255e9f80dSMike Silbersack m_defragfailure++; 141351710a45SMike Silbersack #endif 141455e9f80dSMike Silbersack if (m_final) 141555e9f80dSMike Silbersack m_freem(m_final); 141655e9f80dSMike Silbersack return (NULL); 141755e9f80dSMike Silbersack } 14183390d476SMike Silbersack 1419eeb76a18SSam Leffler /* 142082334850SJohn Baldwin * Return the number of fragments an mbuf will use. This is usually 142182334850SJohn Baldwin * used as a proxy for the number of scatter/gather elements needed by 142282334850SJohn Baldwin * a DMA engine to access an mbuf. In general mapped mbufs are 142382334850SJohn Baldwin * assumed to be backed by physically contiguous buffers that only 142482334850SJohn Baldwin * need a single fragment. Unmapped mbufs, on the other hand, can 142582334850SJohn Baldwin * span disjoint physical pages. 142682334850SJohn Baldwin */ 142782334850SJohn Baldwin static int 142882334850SJohn Baldwin frags_per_mbuf(struct mbuf *m) 142982334850SJohn Baldwin { 143082334850SJohn Baldwin struct mbuf_ext_pgs *ext_pgs; 143182334850SJohn Baldwin int frags; 143282334850SJohn Baldwin 143382334850SJohn Baldwin if ((m->m_flags & M_NOMAP) == 0) 143482334850SJohn Baldwin return (1); 143582334850SJohn Baldwin 143682334850SJohn Baldwin /* 143782334850SJohn Baldwin * The header and trailer are counted as a single fragment 143882334850SJohn Baldwin * each when present. 143982334850SJohn Baldwin * 144082334850SJohn Baldwin * XXX: This overestimates the number of fragments by assuming 144182334850SJohn Baldwin * all the backing physical pages are disjoint. 144282334850SJohn Baldwin */ 144323feb563SAndrew Gallatin ext_pgs = &m->m_ext_pgs; 144482334850SJohn Baldwin frags = 0; 144582334850SJohn Baldwin if (ext_pgs->hdr_len != 0) 144682334850SJohn Baldwin frags++; 144782334850SJohn Baldwin frags += ext_pgs->npgs; 144882334850SJohn Baldwin if (ext_pgs->trail_len != 0) 144982334850SJohn Baldwin frags++; 145082334850SJohn Baldwin 145182334850SJohn Baldwin return (frags); 145282334850SJohn Baldwin } 145382334850SJohn Baldwin 145482334850SJohn Baldwin /* 1455eeb76a18SSam Leffler * Defragment an mbuf chain, returning at most maxfrags separate 1456eeb76a18SSam Leffler * mbufs+clusters. If this is not possible NULL is returned and 145728323addSBryan Drewery * the original mbuf chain is left in its present (potentially 1458eeb76a18SSam Leffler * modified) state. We use two techniques: collapsing consecutive 1459eeb76a18SSam Leffler * mbufs and replacing consecutive mbufs by a cluster. 1460eeb76a18SSam Leffler * 1461eeb76a18SSam Leffler * NB: this should really be named m_defrag but that name is taken 1462eeb76a18SSam Leffler */ 1463eeb76a18SSam Leffler struct mbuf * 1464eeb76a18SSam Leffler m_collapse(struct mbuf *m0, int how, int maxfrags) 1465eeb76a18SSam Leffler { 1466eeb76a18SSam Leffler struct mbuf *m, *n, *n2, **prev; 1467eeb76a18SSam Leffler u_int curfrags; 1468eeb76a18SSam Leffler 1469eeb76a18SSam Leffler /* 1470eeb76a18SSam Leffler * Calculate the current number of frags. 1471eeb76a18SSam Leffler */ 1472eeb76a18SSam Leffler curfrags = 0; 1473eeb76a18SSam Leffler for (m = m0; m != NULL; m = m->m_next) 147482334850SJohn Baldwin curfrags += frags_per_mbuf(m); 1475eeb76a18SSam Leffler /* 1476eeb76a18SSam Leffler * First, try to collapse mbufs. Note that we always collapse 1477eeb76a18SSam Leffler * towards the front so we don't need to deal with moving the 1478eeb76a18SSam Leffler * pkthdr. This may be suboptimal if the first mbuf has much 1479eeb76a18SSam Leffler * less data than the following. 1480eeb76a18SSam Leffler */ 1481eeb76a18SSam Leffler m = m0; 1482eeb76a18SSam Leffler again: 1483eeb76a18SSam Leffler for (;;) { 1484eeb76a18SSam Leffler n = m->m_next; 1485eeb76a18SSam Leffler if (n == NULL) 1486eeb76a18SSam Leffler break; 148714d7c5b1SAndre Oppermann if (M_WRITABLE(m) && 1488eeb76a18SSam Leffler n->m_len < M_TRAILINGSPACE(m)) { 148982334850SJohn Baldwin m_copydata(n, 0, n->m_len, 149082334850SJohn Baldwin mtod(m, char *) + m->m_len); 1491eeb76a18SSam Leffler m->m_len += n->m_len; 1492eeb76a18SSam Leffler m->m_next = n->m_next; 149382334850SJohn Baldwin curfrags -= frags_per_mbuf(n); 1494eeb76a18SSam Leffler m_free(n); 149582334850SJohn Baldwin if (curfrags <= maxfrags) 1496eeb76a18SSam Leffler return m0; 1497eeb76a18SSam Leffler } else 1498eeb76a18SSam Leffler m = n; 1499eeb76a18SSam Leffler } 1500eeb76a18SSam Leffler KASSERT(maxfrags > 1, 1501eeb76a18SSam Leffler ("maxfrags %u, but normal collapse failed", maxfrags)); 1502eeb76a18SSam Leffler /* 1503eeb76a18SSam Leffler * Collapse consecutive mbufs to a cluster. 1504eeb76a18SSam Leffler */ 1505eeb76a18SSam Leffler prev = &m0->m_next; /* NB: not the first mbuf */ 1506eeb76a18SSam Leffler while ((n = *prev) != NULL) { 1507eeb76a18SSam Leffler if ((n2 = n->m_next) != NULL && 1508eeb76a18SSam Leffler n->m_len + n2->m_len < MCLBYTES) { 1509eeb76a18SSam Leffler m = m_getcl(how, MT_DATA, 0); 1510eeb76a18SSam Leffler if (m == NULL) 1511eeb76a18SSam Leffler goto bad; 151282334850SJohn Baldwin m_copydata(n, 0, n->m_len, mtod(m, char *)); 151382334850SJohn Baldwin m_copydata(n2, 0, n2->m_len, 151482334850SJohn Baldwin mtod(m, char *) + n->m_len); 1515eeb76a18SSam Leffler m->m_len = n->m_len + n2->m_len; 1516eeb76a18SSam Leffler m->m_next = n2->m_next; 1517eeb76a18SSam Leffler *prev = m; 151882334850SJohn Baldwin curfrags += 1; /* For the new cluster */ 151982334850SJohn Baldwin curfrags -= frags_per_mbuf(n); 152082334850SJohn Baldwin curfrags -= frags_per_mbuf(n2); 1521eeb76a18SSam Leffler m_free(n); 1522eeb76a18SSam Leffler m_free(n2); 152382334850SJohn Baldwin if (curfrags <= maxfrags) 1524eeb76a18SSam Leffler return m0; 1525eeb76a18SSam Leffler /* 1526eeb76a18SSam Leffler * Still not there, try the normal collapse 1527eeb76a18SSam Leffler * again before we allocate another cluster. 1528eeb76a18SSam Leffler */ 1529eeb76a18SSam Leffler goto again; 1530eeb76a18SSam Leffler } 1531eeb76a18SSam Leffler prev = &n->m_next; 1532eeb76a18SSam Leffler } 1533eeb76a18SSam Leffler /* 1534eeb76a18SSam Leffler * No place where we can collapse to a cluster; punt. 1535eeb76a18SSam Leffler * This can occur if, for example, you request 2 frags 1536eeb76a18SSam Leffler * but the packet requires that both be clusters (we 1537eeb76a18SSam Leffler * never reallocate the first mbuf to avoid moving the 1538eeb76a18SSam Leffler * packet header). 1539eeb76a18SSam Leffler */ 1540eeb76a18SSam Leffler bad: 1541eeb76a18SSam Leffler return NULL; 1542eeb76a18SSam Leffler } 1543eeb76a18SSam Leffler 15443390d476SMike Silbersack #ifdef MBUF_STRESS_TEST 15453390d476SMike Silbersack 15463390d476SMike Silbersack /* 15473390d476SMike Silbersack * Fragment an mbuf chain. There's no reason you'd ever want to do 15483390d476SMike Silbersack * this in normal usage, but it's great for stress testing various 15493390d476SMike Silbersack * mbuf consumers. 15503390d476SMike Silbersack * 15513390d476SMike Silbersack * If fragmentation is not possible, the original chain will be 15523390d476SMike Silbersack * returned. 15533390d476SMike Silbersack * 15543390d476SMike Silbersack * Possible length values: 15553390d476SMike Silbersack * 0 no fragmentation will occur 15563390d476SMike Silbersack * > 0 each fragment will be of the specified length 15573390d476SMike Silbersack * -1 each fragment will be the same random value in length 15583390d476SMike Silbersack * -2 each fragment's length will be entirely random 15593390d476SMike Silbersack * (Random values range from 1 to 256) 15603390d476SMike Silbersack */ 15613390d476SMike Silbersack struct mbuf * 15623390d476SMike Silbersack m_fragment(struct mbuf *m0, int how, int length) 15633390d476SMike Silbersack { 156466234298SAndriy Voskoboinyk struct mbuf *m_first, *m_last; 156566234298SAndriy Voskoboinyk int divisor = 255, progress = 0, fraglen; 15663390d476SMike Silbersack 15673390d476SMike Silbersack if (!(m0->m_flags & M_PKTHDR)) 15683390d476SMike Silbersack return (m0); 15693390d476SMike Silbersack 157066234298SAndriy Voskoboinyk if (length == 0 || length < -2) 15713390d476SMike Silbersack return (m0); 157266234298SAndriy Voskoboinyk if (length > MCLBYTES) 157366234298SAndriy Voskoboinyk length = MCLBYTES; 157466234298SAndriy Voskoboinyk if (length < 0 && divisor > MCLBYTES) 157566234298SAndriy Voskoboinyk divisor = MCLBYTES; 157666234298SAndriy Voskoboinyk if (length == -1) 157766234298SAndriy Voskoboinyk length = 1 + (arc4random() % divisor); 157866234298SAndriy Voskoboinyk if (length > 0) 157966234298SAndriy Voskoboinyk fraglen = length; 15803390d476SMike Silbersack 15813390d476SMike Silbersack m_fixhdr(m0); /* Needed sanity check */ 15823390d476SMike Silbersack 158366234298SAndriy Voskoboinyk m_first = m_getcl(how, MT_DATA, M_PKTHDR); 158466234298SAndriy Voskoboinyk if (m_first == NULL) 15853390d476SMike Silbersack goto nospace; 15863390d476SMike Silbersack 158766234298SAndriy Voskoboinyk if (m_dup_pkthdr(m_first, m0, how) == 0) 15883390d476SMike Silbersack goto nospace; 15893390d476SMike Silbersack 159066234298SAndriy Voskoboinyk m_last = m_first; 15913390d476SMike Silbersack 15923390d476SMike Silbersack while (progress < m0->m_pkthdr.len) { 159366234298SAndriy Voskoboinyk if (length == -2) 159466234298SAndriy Voskoboinyk fraglen = 1 + (arc4random() % divisor); 15953390d476SMike Silbersack if (fraglen > m0->m_pkthdr.len - progress) 15963390d476SMike Silbersack fraglen = m0->m_pkthdr.len - progress; 15973390d476SMike Silbersack 159866234298SAndriy Voskoboinyk if (progress != 0) { 159966234298SAndriy Voskoboinyk struct mbuf *m_new = m_getcl(how, MT_DATA, 0); 16003390d476SMike Silbersack if (m_new == NULL) 16013390d476SMike Silbersack goto nospace; 160266234298SAndriy Voskoboinyk 160366234298SAndriy Voskoboinyk m_last->m_next = m_new; 160466234298SAndriy Voskoboinyk m_last = m_new; 16053390d476SMike Silbersack } 16063390d476SMike Silbersack 160766234298SAndriy Voskoboinyk m_copydata(m0, progress, fraglen, mtod(m_last, caddr_t)); 16083390d476SMike Silbersack progress += fraglen; 160966234298SAndriy Voskoboinyk m_last->m_len = fraglen; 16103390d476SMike Silbersack } 16113390d476SMike Silbersack m_freem(m0); 161266234298SAndriy Voskoboinyk m0 = m_first; 16133390d476SMike Silbersack return (m0); 16143390d476SMike Silbersack nospace: 161566234298SAndriy Voskoboinyk if (m_first) 161666234298SAndriy Voskoboinyk m_freem(m_first); 16173390d476SMike Silbersack /* Return the original chain on failure */ 16183390d476SMike Silbersack return (m0); 16193390d476SMike Silbersack } 16203390d476SMike Silbersack 16213390d476SMike Silbersack #endif 1622beb699c7SMike Silbersack 16235e20f43dSAndre Oppermann /* 162482334850SJohn Baldwin * Free pages from mbuf_ext_pgs, assuming they were allocated via 162582334850SJohn Baldwin * vm_page_alloc() and aren't associated with any object. Complement 162682334850SJohn Baldwin * to allocator from m_uiotombuf_nomap(). 162782334850SJohn Baldwin */ 162882334850SJohn Baldwin void 162982334850SJohn Baldwin mb_free_mext_pgs(struct mbuf *m) 163082334850SJohn Baldwin { 163182334850SJohn Baldwin struct mbuf_ext_pgs *ext_pgs; 163282334850SJohn Baldwin vm_page_t pg; 163382334850SJohn Baldwin 163482334850SJohn Baldwin MBUF_EXT_PGS_ASSERT(m); 163523feb563SAndrew Gallatin ext_pgs = &m->m_ext_pgs; 163682334850SJohn Baldwin for (int i = 0; i < ext_pgs->npgs; i++) { 16370c103266SGleb Smirnoff pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); 16389fb7c918SMark Johnston vm_page_unwire_noq(pg); 16399fb7c918SMark Johnston vm_page_free(pg); 164082334850SJohn Baldwin } 164182334850SJohn Baldwin } 164282334850SJohn Baldwin 164382334850SJohn Baldwin static struct mbuf * 164482334850SJohn Baldwin m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags) 164582334850SJohn Baldwin { 164682334850SJohn Baldwin struct mbuf *m, *mb, *prev; 164782334850SJohn Baldwin struct mbuf_ext_pgs *pgs; 164882334850SJohn Baldwin vm_page_t pg_array[MBUF_PEXT_MAX_PGS]; 16499fb7c918SMark Johnston int error, length, i, needed; 165082334850SJohn Baldwin ssize_t total; 16519fb7c918SMark Johnston int pflags = malloc2vm_flags(how) | VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP | 16529fb7c918SMark Johnston VM_ALLOC_WIRED; 165382334850SJohn Baldwin 165482334850SJohn Baldwin /* 165582334850SJohn Baldwin * len can be zero or an arbitrary large value bound by 165682334850SJohn Baldwin * the total data supplied by the uio. 165782334850SJohn Baldwin */ 165882334850SJohn Baldwin if (len > 0) 165982334850SJohn Baldwin total = MIN(uio->uio_resid, len); 166082334850SJohn Baldwin else 166182334850SJohn Baldwin total = uio->uio_resid; 166282334850SJohn Baldwin 166382334850SJohn Baldwin if (maxseg == 0) 166482334850SJohn Baldwin maxseg = MBUF_PEXT_MAX_PGS * PAGE_SIZE; 166582334850SJohn Baldwin 166682334850SJohn Baldwin /* 166782334850SJohn Baldwin * Allocate the pages 166882334850SJohn Baldwin */ 166982334850SJohn Baldwin m = NULL; 167023feb563SAndrew Gallatin MPASS((flags & M_PKTHDR) == 0); 167182334850SJohn Baldwin while (total > 0) { 167223feb563SAndrew Gallatin mb = mb_alloc_ext_pgs(how, mb_free_mext_pgs); 167382334850SJohn Baldwin if (mb == NULL) 167482334850SJohn Baldwin goto failed; 167582334850SJohn Baldwin if (m == NULL) 167682334850SJohn Baldwin m = mb; 167782334850SJohn Baldwin else 167882334850SJohn Baldwin prev->m_next = mb; 167982334850SJohn Baldwin prev = mb; 168023feb563SAndrew Gallatin pgs = &mb->m_ext_pgs; 16817433a5a9SGleb Smirnoff pgs->flags = EPG_FLAG_ANON; 168282334850SJohn Baldwin needed = length = MIN(maxseg, total); 168382334850SJohn Baldwin for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) { 168482334850SJohn Baldwin retry_page: 168582334850SJohn Baldwin pg_array[i] = vm_page_alloc(NULL, 0, pflags); 168682334850SJohn Baldwin if (pg_array[i] == NULL) { 168782334850SJohn Baldwin if (how & M_NOWAIT) { 168882334850SJohn Baldwin goto failed; 168982334850SJohn Baldwin } else { 169082334850SJohn Baldwin vm_wait(NULL); 169182334850SJohn Baldwin goto retry_page; 169282334850SJohn Baldwin } 169382334850SJohn Baldwin } 169482334850SJohn Baldwin pg_array[i]->flags &= ~PG_ZERO; 16950c103266SGleb Smirnoff mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]); 169682334850SJohn Baldwin pgs->npgs++; 169782334850SJohn Baldwin } 169882334850SJohn Baldwin pgs->last_pg_len = length - PAGE_SIZE * (pgs->npgs - 1); 16990c103266SGleb Smirnoff MBUF_EXT_PGS_ASSERT_SANITY(mb); 170082334850SJohn Baldwin total -= length; 170182334850SJohn Baldwin error = uiomove_fromphys(pg_array, 0, length, uio); 170282334850SJohn Baldwin if (error != 0) 170382334850SJohn Baldwin goto failed; 170482334850SJohn Baldwin mb->m_len = length; 170582334850SJohn Baldwin mb->m_ext.ext_size += PAGE_SIZE * pgs->npgs; 170682334850SJohn Baldwin if (flags & M_PKTHDR) 170782334850SJohn Baldwin m->m_pkthdr.len += length; 170882334850SJohn Baldwin } 170982334850SJohn Baldwin return (m); 171082334850SJohn Baldwin 171182334850SJohn Baldwin failed: 171282334850SJohn Baldwin m_freem(m); 171382334850SJohn Baldwin return (NULL); 171482334850SJohn Baldwin } 171582334850SJohn Baldwin 171682334850SJohn Baldwin /* 17175e20f43dSAndre Oppermann * Copy the contents of uio into a properly sized mbuf chain. 17185e20f43dSAndre Oppermann */ 1719beb699c7SMike Silbersack struct mbuf * 17205e20f43dSAndre Oppermann m_uiotombuf(struct uio *uio, int how, int len, int align, int flags) 1721beb699c7SMike Silbersack { 17225e20f43dSAndre Oppermann struct mbuf *m, *mb; 1723526d0bd5SKonstantin Belousov int error, length; 1724526d0bd5SKonstantin Belousov ssize_t total; 17255e20f43dSAndre Oppermann int progress = 0; 1726beb699c7SMike Silbersack 172782334850SJohn Baldwin if (flags & M_NOMAP) 172882334850SJohn Baldwin return (m_uiotombuf_nomap(uio, how, len, align, flags)); 172982334850SJohn Baldwin 17305e20f43dSAndre Oppermann /* 17315e20f43dSAndre Oppermann * len can be zero or an arbitrary large value bound by 17325e20f43dSAndre Oppermann * the total data supplied by the uio. 17335e20f43dSAndre Oppermann */ 1734beb699c7SMike Silbersack if (len > 0) 1735f5b7359aSConrad Meyer total = (uio->uio_resid < len) ? uio->uio_resid : len; 1736beb699c7SMike Silbersack else 1737beb699c7SMike Silbersack total = uio->uio_resid; 17385e20f43dSAndre Oppermann 17395e20f43dSAndre Oppermann /* 17405e20f43dSAndre Oppermann * The smallest unit returned by m_getm2() is a single mbuf 17419128ec21SAndrew Thompson * with pkthdr. We can't align past it. 17425e20f43dSAndre Oppermann */ 174375ae2570SMaksim Yevmenkin if (align >= MHLEN) 1744beb699c7SMike Silbersack return (NULL); 17455e20f43dSAndre Oppermann 17467c32173bSAndre Oppermann /* 17477c32173bSAndre Oppermann * Give us the full allocation or nothing. 17487c32173bSAndre Oppermann * If len is zero return the smallest empty mbuf. 17497c32173bSAndre Oppermann */ 17507c32173bSAndre Oppermann m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags); 17515e20f43dSAndre Oppermann if (m == NULL) 17525e20f43dSAndre Oppermann return (NULL); 17535e20f43dSAndre Oppermann m->m_data += align; 17545e20f43dSAndre Oppermann 17555e20f43dSAndre Oppermann /* Fill all mbufs with uio data and update header information. */ 17565e20f43dSAndre Oppermann for (mb = m; mb != NULL; mb = mb->m_next) { 17575e20f43dSAndre Oppermann length = min(M_TRAILINGSPACE(mb), total - progress); 17585e20f43dSAndre Oppermann 17595e20f43dSAndre Oppermann error = uiomove(mtod(mb, void *), length, uio); 17605e20f43dSAndre Oppermann if (error) { 17615e20f43dSAndre Oppermann m_freem(m); 17625e20f43dSAndre Oppermann return (NULL); 17635e20f43dSAndre Oppermann } 17645e20f43dSAndre Oppermann 17655e20f43dSAndre Oppermann mb->m_len = length; 17665e20f43dSAndre Oppermann progress += length; 17675e20f43dSAndre Oppermann if (flags & M_PKTHDR) 17685e20f43dSAndre Oppermann m->m_pkthdr.len += length; 17695e20f43dSAndre Oppermann } 17705e20f43dSAndre Oppermann KASSERT(progress == total, ("%s: progress != total", __func__)); 17715e20f43dSAndre Oppermann 17725e20f43dSAndre Oppermann return (m); 1773beb699c7SMike Silbersack } 1774ab8ab90cSSam Leffler 1775ab8ab90cSSam Leffler /* 177682334850SJohn Baldwin * Copy data from an unmapped mbuf into a uio limited by len if set. 177782334850SJohn Baldwin */ 177882334850SJohn Baldwin int 177982334850SJohn Baldwin m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len) 178082334850SJohn Baldwin { 178182334850SJohn Baldwin struct mbuf_ext_pgs *ext_pgs; 178282334850SJohn Baldwin vm_page_t pg; 178382334850SJohn Baldwin int error, i, off, pglen, pgoff, seglen, segoff; 178482334850SJohn Baldwin 178582334850SJohn Baldwin MBUF_EXT_PGS_ASSERT(m); 178623feb563SAndrew Gallatin ext_pgs = __DECONST(void *, &m->m_ext_pgs); 178782334850SJohn Baldwin error = 0; 178882334850SJohn Baldwin 178982334850SJohn Baldwin /* Skip over any data removed from the front. */ 179082334850SJohn Baldwin off = mtod(m, vm_offset_t); 179182334850SJohn Baldwin 179282334850SJohn Baldwin off += m_off; 179382334850SJohn Baldwin if (ext_pgs->hdr_len != 0) { 179482334850SJohn Baldwin if (off >= ext_pgs->hdr_len) { 179582334850SJohn Baldwin off -= ext_pgs->hdr_len; 179682334850SJohn Baldwin } else { 179782334850SJohn Baldwin seglen = ext_pgs->hdr_len - off; 179882334850SJohn Baldwin segoff = off; 179982334850SJohn Baldwin seglen = min(seglen, len); 180082334850SJohn Baldwin off = 0; 180182334850SJohn Baldwin len -= seglen; 18020c103266SGleb Smirnoff error = uiomove(__DECONST(void *, 18030c103266SGleb Smirnoff &m->m_epg_hdr[segoff]), seglen, uio); 180482334850SJohn Baldwin } 180582334850SJohn Baldwin } 180682334850SJohn Baldwin pgoff = ext_pgs->first_pg_off; 180782334850SJohn Baldwin for (i = 0; i < ext_pgs->npgs && error == 0 && len > 0; i++) { 1808*c4ee38f8SGleb Smirnoff pglen = m_epg_pagelen(m, i, pgoff); 180982334850SJohn Baldwin if (off >= pglen) { 181082334850SJohn Baldwin off -= pglen; 181182334850SJohn Baldwin pgoff = 0; 181282334850SJohn Baldwin continue; 181382334850SJohn Baldwin } 181482334850SJohn Baldwin seglen = pglen - off; 181582334850SJohn Baldwin segoff = pgoff + off; 181682334850SJohn Baldwin off = 0; 181782334850SJohn Baldwin seglen = min(seglen, len); 181882334850SJohn Baldwin len -= seglen; 18190c103266SGleb Smirnoff pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); 182082334850SJohn Baldwin error = uiomove_fromphys(&pg, segoff, seglen, uio); 182182334850SJohn Baldwin pgoff = 0; 182282334850SJohn Baldwin }; 182382334850SJohn Baldwin if (len != 0 && error == 0) { 182482334850SJohn Baldwin KASSERT((off + len) <= ext_pgs->trail_len, 182582334850SJohn Baldwin ("off + len > trail (%d + %d > %d, m_off = %d)", off, len, 182682334850SJohn Baldwin ext_pgs->trail_len, m_off)); 18270c103266SGleb Smirnoff error = uiomove(__DECONST(void *, &m->m_epg_trail[off]), 18280c103266SGleb Smirnoff len, uio); 182982334850SJohn Baldwin } 183082334850SJohn Baldwin return (error); 183182334850SJohn Baldwin } 183282334850SJohn Baldwin 183382334850SJohn Baldwin /* 1834bc05b2f6SAndre Oppermann * Copy an mbuf chain into a uio limited by len if set. 1835bc05b2f6SAndre Oppermann */ 1836bc05b2f6SAndre Oppermann int 183714984031SGleb Smirnoff m_mbuftouio(struct uio *uio, const struct mbuf *m, int len) 1838bc05b2f6SAndre Oppermann { 1839bc05b2f6SAndre Oppermann int error, length, total; 1840bc05b2f6SAndre Oppermann int progress = 0; 1841bc05b2f6SAndre Oppermann 1842bc05b2f6SAndre Oppermann if (len > 0) 1843bc05b2f6SAndre Oppermann total = min(uio->uio_resid, len); 1844bc05b2f6SAndre Oppermann else 1845bc05b2f6SAndre Oppermann total = uio->uio_resid; 1846bc05b2f6SAndre Oppermann 1847bc05b2f6SAndre Oppermann /* Fill the uio with data from the mbufs. */ 1848bc05b2f6SAndre Oppermann for (; m != NULL; m = m->m_next) { 1849bc05b2f6SAndre Oppermann length = min(m->m_len, total - progress); 1850bc05b2f6SAndre Oppermann 185182334850SJohn Baldwin if ((m->m_flags & M_NOMAP) != 0) 185282334850SJohn Baldwin error = m_unmappedtouio(m, 0, uio, length); 185382334850SJohn Baldwin else 1854bc05b2f6SAndre Oppermann error = uiomove(mtod(m, void *), length, uio); 1855bc05b2f6SAndre Oppermann if (error) 1856bc05b2f6SAndre Oppermann return (error); 1857bc05b2f6SAndre Oppermann 1858bc05b2f6SAndre Oppermann progress += length; 1859bc05b2f6SAndre Oppermann } 1860bc05b2f6SAndre Oppermann 1861bc05b2f6SAndre Oppermann return (0); 1862bc05b2f6SAndre Oppermann } 1863bc05b2f6SAndre Oppermann 1864bc05b2f6SAndre Oppermann /* 186547e2996eSSam Leffler * Create a writable copy of the mbuf chain. While doing this 186647e2996eSSam Leffler * we compact the chain with a goal of producing a chain with 186747e2996eSSam Leffler * at most two mbufs. The second mbuf in this chain is likely 186847e2996eSSam Leffler * to be a cluster. The primary purpose of this work is to create 186947e2996eSSam Leffler * a writable packet for encryption, compression, etc. The 187047e2996eSSam Leffler * secondary goal is to linearize the data so the data can be 187147e2996eSSam Leffler * passed to crypto hardware in the most efficient manner possible. 187247e2996eSSam Leffler */ 187347e2996eSSam Leffler struct mbuf * 187447e2996eSSam Leffler m_unshare(struct mbuf *m0, int how) 187547e2996eSSam Leffler { 187647e2996eSSam Leffler struct mbuf *m, *mprev; 187747e2996eSSam Leffler struct mbuf *n, *mfirst, *mlast; 187847e2996eSSam Leffler int len, off; 187947e2996eSSam Leffler 188047e2996eSSam Leffler mprev = NULL; 188147e2996eSSam Leffler for (m = m0; m != NULL; m = mprev->m_next) { 188247e2996eSSam Leffler /* 188347e2996eSSam Leffler * Regular mbufs are ignored unless there's a cluster 188447e2996eSSam Leffler * in front of it that we can use to coalesce. We do 188547e2996eSSam Leffler * the latter mainly so later clusters can be coalesced 188647e2996eSSam Leffler * also w/o having to handle them specially (i.e. convert 188747e2996eSSam Leffler * mbuf+cluster -> cluster). This optimization is heavily 188847e2996eSSam Leffler * influenced by the assumption that we're running over 188947e2996eSSam Leffler * Ethernet where MCLBYTES is large enough that the max 189047e2996eSSam Leffler * packet size will permit lots of coalescing into a 189147e2996eSSam Leffler * single cluster. This in turn permits efficient 189247e2996eSSam Leffler * crypto operations, especially when using hardware. 189347e2996eSSam Leffler */ 189447e2996eSSam Leffler if ((m->m_flags & M_EXT) == 0) { 189547e2996eSSam Leffler if (mprev && (mprev->m_flags & M_EXT) && 189647e2996eSSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 189747e2996eSSam Leffler /* XXX: this ignores mbuf types */ 189847e2996eSSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 189947e2996eSSam Leffler mtod(m, caddr_t), m->m_len); 190047e2996eSSam Leffler mprev->m_len += m->m_len; 190147e2996eSSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 190247e2996eSSam Leffler m_free(m); /* reclaim mbuf */ 190347e2996eSSam Leffler } else { 190447e2996eSSam Leffler mprev = m; 190547e2996eSSam Leffler } 190647e2996eSSam Leffler continue; 190747e2996eSSam Leffler } 190847e2996eSSam Leffler /* 190947e2996eSSam Leffler * Writable mbufs are left alone (for now). 191047e2996eSSam Leffler */ 191147e2996eSSam Leffler if (M_WRITABLE(m)) { 191247e2996eSSam Leffler mprev = m; 191347e2996eSSam Leffler continue; 191447e2996eSSam Leffler } 191547e2996eSSam Leffler 191647e2996eSSam Leffler /* 191747e2996eSSam Leffler * Not writable, replace with a copy or coalesce with 191847e2996eSSam Leffler * the previous mbuf if possible (since we have to copy 191947e2996eSSam Leffler * it anyway, we try to reduce the number of mbufs and 192047e2996eSSam Leffler * clusters so that future work is easier). 192147e2996eSSam Leffler */ 192247e2996eSSam Leffler KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags)); 192347e2996eSSam Leffler /* NB: we only coalesce into a cluster or larger */ 192447e2996eSSam Leffler if (mprev != NULL && (mprev->m_flags & M_EXT) && 192547e2996eSSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 192647e2996eSSam Leffler /* XXX: this ignores mbuf types */ 192747e2996eSSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 192847e2996eSSam Leffler mtod(m, caddr_t), m->m_len); 192947e2996eSSam Leffler mprev->m_len += m->m_len; 193047e2996eSSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 193147e2996eSSam Leffler m_free(m); /* reclaim mbuf */ 193247e2996eSSam Leffler continue; 193347e2996eSSam Leffler } 193447e2996eSSam Leffler 193547e2996eSSam Leffler /* 19365368b81eSGleb Smirnoff * Allocate new space to hold the copy and copy the data. 19375368b81eSGleb Smirnoff * We deal with jumbo mbufs (i.e. m_len > MCLBYTES) by 19385368b81eSGleb Smirnoff * splitting them into clusters. We could just malloc a 19395368b81eSGleb Smirnoff * buffer and make it external but too many device drivers 19405368b81eSGleb Smirnoff * don't know how to break up the non-contiguous memory when 19415368b81eSGleb Smirnoff * doing DMA. 194247e2996eSSam Leffler */ 1943fddd4f62SNavdeep Parhar n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 194447e2996eSSam Leffler if (n == NULL) { 194547e2996eSSam Leffler m_freem(m0); 194647e2996eSSam Leffler return (NULL); 194747e2996eSSam Leffler } 1948e40e8705SGleb Smirnoff if (m->m_flags & M_PKTHDR) { 1949e40e8705SGleb Smirnoff KASSERT(mprev == NULL, ("%s: m0 %p, m %p has M_PKTHDR", 1950e40e8705SGleb Smirnoff __func__, m0, m)); 1951e40e8705SGleb Smirnoff m_move_pkthdr(n, m); 1952e40e8705SGleb Smirnoff } 195347e2996eSSam Leffler len = m->m_len; 195447e2996eSSam Leffler off = 0; 195547e2996eSSam Leffler mfirst = n; 195647e2996eSSam Leffler mlast = NULL; 195747e2996eSSam Leffler for (;;) { 195847e2996eSSam Leffler int cc = min(len, MCLBYTES); 195947e2996eSSam Leffler memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 196047e2996eSSam Leffler n->m_len = cc; 196147e2996eSSam Leffler if (mlast != NULL) 196247e2996eSSam Leffler mlast->m_next = n; 196347e2996eSSam Leffler mlast = n; 196447e2996eSSam Leffler #if 0 196547e2996eSSam Leffler newipsecstat.ips_clcopied++; 196647e2996eSSam Leffler #endif 196747e2996eSSam Leffler 196847e2996eSSam Leffler len -= cc; 196947e2996eSSam Leffler if (len <= 0) 197047e2996eSSam Leffler break; 197147e2996eSSam Leffler off += cc; 197247e2996eSSam Leffler 1973fddd4f62SNavdeep Parhar n = m_getcl(how, m->m_type, m->m_flags & M_COPYFLAGS); 197447e2996eSSam Leffler if (n == NULL) { 197547e2996eSSam Leffler m_freem(mfirst); 197647e2996eSSam Leffler m_freem(m0); 197747e2996eSSam Leffler return (NULL); 197847e2996eSSam Leffler } 197947e2996eSSam Leffler } 198047e2996eSSam Leffler n->m_next = m->m_next; 198147e2996eSSam Leffler if (mprev == NULL) 198247e2996eSSam Leffler m0 = mfirst; /* new head of chain */ 198347e2996eSSam Leffler else 198447e2996eSSam Leffler mprev->m_next = mfirst; /* replace old mbuf */ 198547e2996eSSam Leffler m_free(m); /* release old mbuf */ 198647e2996eSSam Leffler mprev = mfirst; 198747e2996eSSam Leffler } 198847e2996eSSam Leffler return (m0); 198947e2996eSSam Leffler } 19906eeac1d9SJulian Elischer 19916eeac1d9SJulian Elischer #ifdef MBUF_PROFILING 19926eeac1d9SJulian Elischer 19936eeac1d9SJulian Elischer #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/ 19946eeac1d9SJulian Elischer struct mbufprofile { 19952182c0cfSJulian Elischer uintmax_t wasted[MP_BUCKETS]; 19962182c0cfSJulian Elischer uintmax_t used[MP_BUCKETS]; 19972182c0cfSJulian Elischer uintmax_t segments[MP_BUCKETS]; 19986eeac1d9SJulian Elischer } mbprof; 19996eeac1d9SJulian Elischer 20006eeac1d9SJulian Elischer #define MP_MAXDIGITS 21 /* strlen("16,000,000,000,000,000,000") == 21 */ 20016eeac1d9SJulian Elischer #define MP_NUMLINES 6 20026eeac1d9SJulian Elischer #define MP_NUMSPERLINE 16 20036eeac1d9SJulian Elischer #define MP_EXTRABYTES 64 /* > strlen("used:\nwasted:\nsegments:\n") */ 20046eeac1d9SJulian Elischer /* work out max space needed and add a bit of spare space too */ 20056eeac1d9SJulian Elischer #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE) 20066eeac1d9SJulian Elischer #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES) 20076eeac1d9SJulian Elischer 20086eeac1d9SJulian Elischer char mbprofbuf[MP_BUFSIZE]; 20096eeac1d9SJulian Elischer 20106eeac1d9SJulian Elischer void 20116eeac1d9SJulian Elischer m_profile(struct mbuf *m) 20126eeac1d9SJulian Elischer { 20136eeac1d9SJulian Elischer int segments = 0; 20146eeac1d9SJulian Elischer int used = 0; 20156eeac1d9SJulian Elischer int wasted = 0; 20166eeac1d9SJulian Elischer 20176eeac1d9SJulian Elischer while (m) { 20186eeac1d9SJulian Elischer segments++; 20196eeac1d9SJulian Elischer used += m->m_len; 20206eeac1d9SJulian Elischer if (m->m_flags & M_EXT) { 20216eeac1d9SJulian Elischer wasted += MHLEN - sizeof(m->m_ext) + 20226eeac1d9SJulian Elischer m->m_ext.ext_size - m->m_len; 20236eeac1d9SJulian Elischer } else { 20246eeac1d9SJulian Elischer if (m->m_flags & M_PKTHDR) 20256eeac1d9SJulian Elischer wasted += MHLEN - m->m_len; 20266eeac1d9SJulian Elischer else 20276eeac1d9SJulian Elischer wasted += MLEN - m->m_len; 20286eeac1d9SJulian Elischer } 20296eeac1d9SJulian Elischer m = m->m_next; 20306eeac1d9SJulian Elischer } 20316eeac1d9SJulian Elischer /* be paranoid.. it helps */ 20326eeac1d9SJulian Elischer if (segments > MP_BUCKETS - 1) 20336eeac1d9SJulian Elischer segments = MP_BUCKETS - 1; 20346eeac1d9SJulian Elischer if (used > 100000) 20356eeac1d9SJulian Elischer used = 100000; 20366eeac1d9SJulian Elischer if (wasted > 100000) 20376eeac1d9SJulian Elischer wasted = 100000; 20386eeac1d9SJulian Elischer /* store in the appropriate bucket */ 20396eeac1d9SJulian Elischer /* don't bother locking. if it's slightly off, so what? */ 20406eeac1d9SJulian Elischer mbprof.segments[segments]++; 20416eeac1d9SJulian Elischer mbprof.used[fls(used)]++; 20426eeac1d9SJulian Elischer mbprof.wasted[fls(wasted)]++; 20436eeac1d9SJulian Elischer } 20446eeac1d9SJulian Elischer 20456eeac1d9SJulian Elischer static void 20466eeac1d9SJulian Elischer mbprof_textify(void) 20476eeac1d9SJulian Elischer { 20486eeac1d9SJulian Elischer int offset; 20496eeac1d9SJulian Elischer char *c; 205060ae52f7SEd Schouten uint64_t *p; 20516eeac1d9SJulian Elischer 20526eeac1d9SJulian Elischer p = &mbprof.wasted[0]; 20536eeac1d9SJulian Elischer c = mbprofbuf; 20546eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 20556eeac1d9SJulian Elischer "wasted:\n" 20562182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20572182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20586eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20596eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20606eeac1d9SJulian Elischer #ifdef BIG_ARRAY 20616eeac1d9SJulian Elischer p = &mbprof.wasted[16]; 20626eeac1d9SJulian Elischer c += offset; 20636eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 20642182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20652182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20666eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20676eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20686eeac1d9SJulian Elischer #endif 20696eeac1d9SJulian Elischer p = &mbprof.used[0]; 20706eeac1d9SJulian Elischer c += offset; 20716eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 20726eeac1d9SJulian Elischer "used:\n" 20732182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20742182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20756eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20766eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20776eeac1d9SJulian Elischer #ifdef BIG_ARRAY 20786eeac1d9SJulian Elischer p = &mbprof.used[16]; 20796eeac1d9SJulian Elischer c += offset; 20806eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 20812182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20822182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20836eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20846eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20856eeac1d9SJulian Elischer #endif 20866eeac1d9SJulian Elischer p = &mbprof.segments[0]; 20876eeac1d9SJulian Elischer c += offset; 20886eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE + 10, 20896eeac1d9SJulian Elischer "segments:\n" 20902182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20912182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju\n", 20926eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 20936eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 20946eeac1d9SJulian Elischer #ifdef BIG_ARRAY 20956eeac1d9SJulian Elischer p = &mbprof.segments[16]; 20966eeac1d9SJulian Elischer c += offset; 20976eeac1d9SJulian Elischer offset = snprintf(c, MP_MAXLINE, 20982182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %ju " 20992182c0cfSJulian Elischer "%ju %ju %ju %ju %ju %ju %ju %jju", 21006eeac1d9SJulian Elischer p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 21016eeac1d9SJulian Elischer p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]); 21026eeac1d9SJulian Elischer #endif 21036eeac1d9SJulian Elischer } 21046eeac1d9SJulian Elischer 21056eeac1d9SJulian Elischer static int 21066eeac1d9SJulian Elischer mbprof_handler(SYSCTL_HANDLER_ARGS) 21076eeac1d9SJulian Elischer { 21086eeac1d9SJulian Elischer int error; 21096eeac1d9SJulian Elischer 21106eeac1d9SJulian Elischer mbprof_textify(); 21116eeac1d9SJulian Elischer error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1); 21126eeac1d9SJulian Elischer return (error); 21136eeac1d9SJulian Elischer } 21146eeac1d9SJulian Elischer 21156eeac1d9SJulian Elischer static int 21166eeac1d9SJulian Elischer mbprof_clr_handler(SYSCTL_HANDLER_ARGS) 21176eeac1d9SJulian Elischer { 21186eeac1d9SJulian Elischer int clear, error; 21196eeac1d9SJulian Elischer 21206eeac1d9SJulian Elischer clear = 0; 21216eeac1d9SJulian Elischer error = sysctl_handle_int(oidp, &clear, 0, req); 21226eeac1d9SJulian Elischer if (error || !req->newptr) 21236eeac1d9SJulian Elischer return (error); 21246eeac1d9SJulian Elischer 21256eeac1d9SJulian Elischer if (clear) { 21266eeac1d9SJulian Elischer bzero(&mbprof, sizeof(mbprof)); 21276eeac1d9SJulian Elischer } 21286eeac1d9SJulian Elischer 21296eeac1d9SJulian Elischer return (error); 21306eeac1d9SJulian Elischer } 21316eeac1d9SJulian Elischer 21327029da5cSPawel Biernacki SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, 21337029da5cSPawel Biernacki CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 21347029da5cSPawel Biernacki mbprof_handler, "A", 21357029da5cSPawel Biernacki "mbuf profiling statistics"); 21366eeac1d9SJulian Elischer 21377029da5cSPawel Biernacki SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, 21387029da5cSPawel Biernacki CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, 21397029da5cSPawel Biernacki mbprof_clr_handler, "I", 21407029da5cSPawel Biernacki "clear mbuf profiling statistics"); 21416eeac1d9SJulian Elischer #endif 21426eeac1d9SJulian Elischer 2143