1aaea26efSSam Leffler /*- 2aaea26efSSam Leffler * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting 3aaea26efSSam Leffler * All rights reserved. 4aaea26efSSam Leffler * 5aaea26efSSam Leffler * Redistribution and use in source and binary forms, with or without 6aaea26efSSam Leffler * modification, are permitted provided that the following conditions 7aaea26efSSam Leffler * are met: 8aaea26efSSam Leffler * 1. Redistributions of source code must retain the above copyright 9aaea26efSSam Leffler * notice, this list of conditions and the following disclaimer. 10aaea26efSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 11aaea26efSSam Leffler * notice, this list of conditions and the following disclaimer in the 12aaea26efSSam Leffler * documentation and/or other materials provided with the distribution. 13aaea26efSSam Leffler * 14aaea26efSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15aaea26efSSam Leffler * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16aaea26efSSam Leffler * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17aaea26efSSam Leffler * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18aaea26efSSam Leffler * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19aaea26efSSam Leffler * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20aaea26efSSam Leffler * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21aaea26efSSam Leffler * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22aaea26efSSam Leffler * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23aaea26efSSam Leffler * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24aaea26efSSam Leffler * SUCH DAMAGE. 25aaea26efSSam Leffler * 26aaea26efSSam Leffler * $FreeBSD$ 27aaea26efSSam Leffler */ 2888768458SSam Leffler 2988768458SSam Leffler /* 3088768458SSam Leffler * IPsec-specific mbuf routines. 3188768458SSam Leffler */ 3288768458SSam Leffler 3388768458SSam Leffler #include "opt_param.h" 3488768458SSam Leffler 3588768458SSam Leffler #include <sys/param.h> 3688768458SSam Leffler #include <sys/systm.h> 3788768458SSam Leffler #include <sys/mbuf.h> 3888768458SSam Leffler #include <sys/socket.h> 3988768458SSam Leffler 4088768458SSam Leffler #include <net/route.h> 4188768458SSam Leffler #include <netinet/in.h> 4288768458SSam Leffler 4388768458SSam Leffler #include <netipsec/ipsec.h> 4488768458SSam Leffler 4588768458SSam Leffler extern struct mbuf *m_getptr(struct mbuf *, int, int *); 4688768458SSam Leffler 4788768458SSam Leffler /* 4888768458SSam Leffler * Create a writable copy of the mbuf chain. While doing this 4988768458SSam Leffler * we compact the chain with a goal of producing a chain with 5088768458SSam Leffler * at most two mbufs. The second mbuf in this chain is likely 5188768458SSam Leffler * to be a cluster. The primary purpose of this work is to create 5288768458SSam Leffler * a writable packet for encryption, compression, etc. The 5388768458SSam Leffler * secondary goal is to linearize the data so the data can be 5488768458SSam Leffler * passed to crypto hardware in the most efficient manner possible. 5588768458SSam Leffler */ 5688768458SSam Leffler struct mbuf * 5788768458SSam Leffler m_clone(struct mbuf *m0) 5888768458SSam Leffler { 5988768458SSam Leffler struct mbuf *m, *mprev; 60972136faSSam Leffler struct mbuf *n, *mfirst, *mlast; 61972136faSSam Leffler int len, off; 6288768458SSam Leffler 6388768458SSam Leffler KASSERT(m0 != NULL, ("m_clone: null mbuf")); 6488768458SSam Leffler 6588768458SSam Leffler mprev = NULL; 6688768458SSam Leffler for (m = m0; m != NULL; m = mprev->m_next) { 6788768458SSam Leffler /* 6888768458SSam Leffler * Regular mbufs are ignored unless there's a cluster 6988768458SSam Leffler * in front of it that we can use to coalesce. We do 7088768458SSam Leffler * the latter mainly so later clusters can be coalesced 7188768458SSam Leffler * also w/o having to handle them specially (i.e. convert 7288768458SSam Leffler * mbuf+cluster -> cluster). This optimization is heavily 7388768458SSam Leffler * influenced by the assumption that we're running over 74972136faSSam Leffler * Ethernet where MCLBYTES is large enough that the max 7588768458SSam Leffler * packet size will permit lots of coalescing into a 7688768458SSam Leffler * single cluster. This in turn permits efficient 7788768458SSam Leffler * crypto operations, especially when using hardware. 7888768458SSam Leffler */ 7988768458SSam Leffler if ((m->m_flags & M_EXT) == 0) { 8088768458SSam Leffler if (mprev && (mprev->m_flags & M_EXT) && 8188768458SSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 8288768458SSam Leffler /* XXX: this ignores mbuf types */ 8388768458SSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 8488768458SSam Leffler mtod(m, caddr_t), m->m_len); 8588768458SSam Leffler mprev->m_len += m->m_len; 8688768458SSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 8788768458SSam Leffler m_free(m); /* reclaim mbuf */ 8888768458SSam Leffler newipsecstat.ips_mbcoalesced++; 8988768458SSam Leffler } else { 9088768458SSam Leffler mprev = m; 9188768458SSam Leffler } 9288768458SSam Leffler continue; 9388768458SSam Leffler } 9488768458SSam Leffler /* 95972136faSSam Leffler * Writable mbufs are left alone (for now). 9688768458SSam Leffler */ 9788768458SSam Leffler if (!MEXT_IS_REF(m)) { 9888768458SSam Leffler mprev = m; 9988768458SSam Leffler continue; 10088768458SSam Leffler } 101972136faSSam Leffler 10288768458SSam Leffler /* 10388768458SSam Leffler * Not writable, replace with a copy or coalesce with 10488768458SSam Leffler * the previous mbuf if possible (since we have to copy 10588768458SSam Leffler * it anyway, we try to reduce the number of mbufs and 10688768458SSam Leffler * clusters so that future work is easier). 10788768458SSam Leffler */ 10888768458SSam Leffler KASSERT(m->m_flags & M_EXT, 10988768458SSam Leffler ("m_clone: m_flags 0x%x", m->m_flags)); 110972136faSSam Leffler /* NB: we only coalesce into a cluster or larger */ 111972136faSSam Leffler if (mprev != NULL && (mprev->m_flags & M_EXT) && 112972136faSSam Leffler m->m_len <= M_TRAILINGSPACE(mprev)) { 113972136faSSam Leffler /* XXX: this ignores mbuf types */ 114972136faSSam Leffler memcpy(mtod(mprev, caddr_t) + mprev->m_len, 115972136faSSam Leffler mtod(m, caddr_t), m->m_len); 116972136faSSam Leffler mprev->m_len += m->m_len; 117972136faSSam Leffler mprev->m_next = m->m_next; /* unlink from chain */ 118972136faSSam Leffler m_free(m); /* reclaim mbuf */ 119972136faSSam Leffler newipsecstat.ips_clcoalesced++; 120972136faSSam Leffler continue; 121972136faSSam Leffler } 12288768458SSam Leffler 12388768458SSam Leffler /* 124972136faSSam Leffler * Allocate new space to hold the copy... 12588768458SSam Leffler */ 126972136faSSam Leffler /* XXX why can M_PKTHDR be set past the first mbuf? */ 127d47693ebSSam Leffler if (mprev == NULL && (m->m_flags & M_PKTHDR)) { 128d47693ebSSam Leffler /* 129972136faSSam Leffler * NB: if a packet header is present we must 130972136faSSam Leffler * allocate the mbuf separately from any cluster 131972136faSSam Leffler * because M_MOVE_PKTHDR will smash the data 132972136faSSam Leffler * pointer and drop the M_EXT marker. 133d47693ebSSam Leffler */ 134a163d034SWarner Losh MGETHDR(n, M_DONTWAIT, m->m_type); 135d47693ebSSam Leffler if (n == NULL) { 136d47693ebSSam Leffler m_freem(m0); 137d47693ebSSam Leffler return (NULL); 138d47693ebSSam Leffler } 1399967cafcSSam Leffler M_MOVE_PKTHDR(n, m); 140a163d034SWarner Losh MCLGET(n, M_DONTWAIT); 141d47693ebSSam Leffler if ((n->m_flags & M_EXT) == 0) { 142d47693ebSSam Leffler m_free(n); 143d47693ebSSam Leffler m_freem(m0); 144d47693ebSSam Leffler return (NULL); 145d47693ebSSam Leffler } 146d47693ebSSam Leffler } else { 147a163d034SWarner Losh n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags); 14888768458SSam Leffler if (n == NULL) { 14988768458SSam Leffler m_freem(m0); 15088768458SSam Leffler return (NULL); 15188768458SSam Leffler } 152d47693ebSSam Leffler } 153972136faSSam Leffler /* 154972136faSSam Leffler * ... and copy the data. We deal with jumbo mbufs 155972136faSSam Leffler * (i.e. m_len > MCLBYTES) by splitting them into 156972136faSSam Leffler * clusters. We could just malloc a buffer and make 157972136faSSam Leffler * it external but too many device drivers don't know 158972136faSSam Leffler * how to break up the non-contiguous memory when 159972136faSSam Leffler * doing DMA. 160972136faSSam Leffler */ 161972136faSSam Leffler len = m->m_len; 162972136faSSam Leffler off = 0; 163972136faSSam Leffler mfirst = n; 164972136faSSam Leffler mlast = NULL; 165972136faSSam Leffler for (;;) { 166972136faSSam Leffler int cc = min(len, MCLBYTES); 167972136faSSam Leffler memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc); 168972136faSSam Leffler n->m_len = cc; 169972136faSSam Leffler if (mlast != NULL) 170972136faSSam Leffler mlast->m_next = n; 171972136faSSam Leffler mlast = n; 172972136faSSam Leffler newipsecstat.ips_clcopied++; 173972136faSSam Leffler 174972136faSSam Leffler len -= cc; 175972136faSSam Leffler if (len <= 0) 176972136faSSam Leffler break; 177972136faSSam Leffler off += cc; 178972136faSSam Leffler 179a163d034SWarner Losh n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags); 180972136faSSam Leffler if (n == NULL) { 181972136faSSam Leffler m_freem(mfirst); 182972136faSSam Leffler m_freem(m0); 183972136faSSam Leffler return (NULL); 184972136faSSam Leffler } 185972136faSSam Leffler } 18688768458SSam Leffler n->m_next = m->m_next; 18788768458SSam Leffler if (mprev == NULL) 188972136faSSam Leffler m0 = mfirst; /* new head of chain */ 18988768458SSam Leffler else 190972136faSSam Leffler mprev->m_next = mfirst; /* replace old mbuf */ 19188768458SSam Leffler m_free(m); /* release old mbuf */ 192972136faSSam Leffler mprev = mfirst; 19388768458SSam Leffler } 19488768458SSam Leffler return (m0); 19588768458SSam Leffler } 19688768458SSam Leffler 19788768458SSam Leffler /* 198972136faSSam Leffler * Make space for a new header of length hlen at skip bytes 199972136faSSam Leffler * into the packet. When doing this we allocate new mbufs only 20088768458SSam Leffler * when absolutely necessary. The mbuf where the new header 20188768458SSam Leffler * is to go is returned together with an offset into the mbuf. 20288768458SSam Leffler * If NULL is returned then the mbuf chain may have been modified; 20388768458SSam Leffler * the caller is assumed to always free the chain. 20488768458SSam Leffler */ 20588768458SSam Leffler struct mbuf * 20688768458SSam Leffler m_makespace(struct mbuf *m0, int skip, int hlen, int *off) 20788768458SSam Leffler { 20888768458SSam Leffler struct mbuf *m; 20988768458SSam Leffler unsigned remain; 21088768458SSam Leffler 21188768458SSam Leffler KASSERT(m0 != NULL, ("m_dmakespace: null mbuf")); 21288768458SSam Leffler KASSERT(hlen < MHLEN, ("m_makespace: hlen too big: %u", hlen)); 21388768458SSam Leffler 21488768458SSam Leffler for (m = m0; m && skip > m->m_len; m = m->m_next) 21588768458SSam Leffler skip -= m->m_len; 21688768458SSam Leffler if (m == NULL) 21788768458SSam Leffler return (NULL); 21888768458SSam Leffler /* 21988768458SSam Leffler * At this point skip is the offset into the mbuf m 22088768458SSam Leffler * where the new header should be placed. Figure out 22188768458SSam Leffler * if there's space to insert the new header. If so, 22288768458SSam Leffler * and copying the remainder makese sense then do so. 22388768458SSam Leffler * Otherwise insert a new mbuf in the chain, splitting 22488768458SSam Leffler * the contents of m as needed. 22588768458SSam Leffler */ 22688768458SSam Leffler remain = m->m_len - skip; /* data to move */ 22788768458SSam Leffler if (hlen > M_TRAILINGSPACE(m)) { 22888768458SSam Leffler struct mbuf *n; 22988768458SSam Leffler 230d47693ebSSam Leffler /* XXX code doesn't handle clusters XXX */ 231d47693ebSSam Leffler KASSERT(remain < MLEN, 232d47693ebSSam Leffler ("m_makespace: remainder too big: %u", remain)); 23388768458SSam Leffler /* 23488768458SSam Leffler * Not enough space in m, split the contents 23588768458SSam Leffler * of m, inserting new mbufs as required. 23688768458SSam Leffler * 23788768458SSam Leffler * NB: this ignores mbuf types. 23888768458SSam Leffler */ 239a163d034SWarner Losh MGET(n, M_DONTWAIT, MT_DATA); 24088768458SSam Leffler if (n == NULL) 24188768458SSam Leffler return (NULL); 24288768458SSam Leffler n->m_next = m->m_next; /* splice new mbuf */ 24388768458SSam Leffler m->m_next = n; 24488768458SSam Leffler newipsecstat.ips_mbinserted++; 24588768458SSam Leffler if (hlen <= M_TRAILINGSPACE(m) + remain) { 24688768458SSam Leffler /* 24788768458SSam Leffler * New header fits in the old mbuf if we copy 24888768458SSam Leffler * the remainder; just do the copy to the new 24988768458SSam Leffler * mbuf and we're good to go. 25088768458SSam Leffler */ 25188768458SSam Leffler memcpy(mtod(n, caddr_t), 25288768458SSam Leffler mtod(m, caddr_t) + skip, remain); 25388768458SSam Leffler n->m_len = remain; 25488768458SSam Leffler m->m_len = skip + hlen; 25588768458SSam Leffler *off = skip; 25688768458SSam Leffler } else { 25788768458SSam Leffler /* 25888768458SSam Leffler * No space in the old mbuf for the new header. 25988768458SSam Leffler * Make space in the new mbuf and check the 26088768458SSam Leffler * remainder'd data fits too. If not then we 26188768458SSam Leffler * must allocate an additional mbuf (yech). 26288768458SSam Leffler */ 26388768458SSam Leffler n->m_len = 0; 26488768458SSam Leffler if (remain + hlen > M_TRAILINGSPACE(n)) { 26588768458SSam Leffler struct mbuf *n2; 26688768458SSam Leffler 267a163d034SWarner Losh MGET(n2, M_DONTWAIT, MT_DATA); 26888768458SSam Leffler /* NB: new mbuf is on chain, let caller free */ 26988768458SSam Leffler if (n2 == NULL) 27088768458SSam Leffler return (NULL); 27188768458SSam Leffler n2->m_len = 0; 27288768458SSam Leffler memcpy(mtod(n2, caddr_t), 27388768458SSam Leffler mtod(m, caddr_t) + skip, remain); 27488768458SSam Leffler n2->m_len = remain; 27588768458SSam Leffler /* splice in second mbuf */ 27688768458SSam Leffler n2->m_next = n->m_next; 27788768458SSam Leffler n->m_next = n2; 27888768458SSam Leffler newipsecstat.ips_mbinserted++; 27988768458SSam Leffler } else { 28088768458SSam Leffler memcpy(mtod(n, caddr_t) + hlen, 28188768458SSam Leffler mtod(m, caddr_t) + skip, remain); 28288768458SSam Leffler n->m_len += remain; 28388768458SSam Leffler } 28488768458SSam Leffler m->m_len -= remain; 28588768458SSam Leffler n->m_len += hlen; 28688768458SSam Leffler m = n; /* header is at front ... */ 28788768458SSam Leffler *off = 0; /* ... of new mbuf */ 28888768458SSam Leffler } 28988768458SSam Leffler } else { 29088768458SSam Leffler /* 29188768458SSam Leffler * Copy the remainder to the back of the mbuf 29288768458SSam Leffler * so there's space to write the new header. 29388768458SSam Leffler */ 2942aebee88SDag-Erling Smørgrav bcopy(mtod(m, caddr_t) + skip, 29588768458SSam Leffler mtod(m, caddr_t) + skip + hlen, remain); 29688768458SSam Leffler m->m_len += hlen; 29788768458SSam Leffler *off = skip; 29888768458SSam Leffler } 29988768458SSam Leffler m0->m_pkthdr.len += hlen; /* adjust packet length */ 30088768458SSam Leffler return m; 30188768458SSam Leffler } 30288768458SSam Leffler 30388768458SSam Leffler /* 30488768458SSam Leffler * m_pad(m, n) pads <m> with <n> bytes at the end. The packet header 30588768458SSam Leffler * length is updated, and a pointer to the first byte of the padding 30688768458SSam Leffler * (which is guaranteed to be all in one mbuf) is returned. 30788768458SSam Leffler */ 30888768458SSam Leffler caddr_t 30988768458SSam Leffler m_pad(struct mbuf *m, int n) 31088768458SSam Leffler { 31188768458SSam Leffler register struct mbuf *m0, *m1; 31288768458SSam Leffler register int len, pad; 31388768458SSam Leffler caddr_t retval; 31488768458SSam Leffler 31588768458SSam Leffler if (n <= 0) { /* No stupid arguments. */ 31688768458SSam Leffler DPRINTF(("m_pad: pad length invalid (%d)\n", n)); 31788768458SSam Leffler m_freem(m); 31888768458SSam Leffler return NULL; 31988768458SSam Leffler } 32088768458SSam Leffler 32188768458SSam Leffler len = m->m_pkthdr.len; 32288768458SSam Leffler pad = n; 32388768458SSam Leffler m0 = m; 32488768458SSam Leffler 32588768458SSam Leffler while (m0->m_len < len) { 32688768458SSam Leffler KASSERT(m0->m_next != NULL, ("m_pad: m0 null, len %u m_len %u", len, m0->m_len));/*XXX*/ 32788768458SSam Leffler len -= m0->m_len; 32888768458SSam Leffler m0 = m0->m_next; 32988768458SSam Leffler } 33088768458SSam Leffler 33188768458SSam Leffler if (m0->m_len != len) { 33288768458SSam Leffler DPRINTF(("m_pad: length mismatch (should be %d instead of %d)\n", 33388768458SSam Leffler m->m_pkthdr.len, m->m_pkthdr.len + m0->m_len - len)); 33488768458SSam Leffler 33588768458SSam Leffler m_freem(m); 33688768458SSam Leffler return NULL; 33788768458SSam Leffler } 33888768458SSam Leffler 33988768458SSam Leffler /* Check for zero-length trailing mbufs, and find the last one. */ 34088768458SSam Leffler for (m1 = m0; m1->m_next; m1 = m1->m_next) { 34188768458SSam Leffler if (m1->m_next->m_len != 0) { 34288768458SSam Leffler DPRINTF(("m_pad: length mismatch (should be %d " 34388768458SSam Leffler "instead of %d)\n", 34488768458SSam Leffler m->m_pkthdr.len, 34588768458SSam Leffler m->m_pkthdr.len + m1->m_next->m_len)); 34688768458SSam Leffler 34788768458SSam Leffler m_freem(m); 34888768458SSam Leffler return NULL; 34988768458SSam Leffler } 35088768458SSam Leffler 35188768458SSam Leffler m0 = m1->m_next; 35288768458SSam Leffler } 35388768458SSam Leffler 35488768458SSam Leffler if (pad > M_TRAILINGSPACE(m0)) { 35588768458SSam Leffler /* Add an mbuf to the chain. */ 356a163d034SWarner Losh MGET(m1, M_DONTWAIT, MT_DATA); 35788768458SSam Leffler if (m1 == 0) { 35888768458SSam Leffler m_freem(m0); 35988768458SSam Leffler DPRINTF(("m_pad: unable to get extra mbuf\n")); 36088768458SSam Leffler return NULL; 36188768458SSam Leffler } 36288768458SSam Leffler 36388768458SSam Leffler m0->m_next = m1; 36488768458SSam Leffler m0 = m1; 36588768458SSam Leffler m0->m_len = 0; 36688768458SSam Leffler } 36788768458SSam Leffler 36888768458SSam Leffler retval = m0->m_data + m0->m_len; 36988768458SSam Leffler m0->m_len += pad; 37088768458SSam Leffler m->m_pkthdr.len += pad; 37188768458SSam Leffler 37288768458SSam Leffler return retval; 37388768458SSam Leffler } 37488768458SSam Leffler 37588768458SSam Leffler /* 37688768458SSam Leffler * Remove hlen data at offset skip in the packet. This is used by 37788768458SSam Leffler * the protocols strip protocol headers and associated data (e.g. IV, 37888768458SSam Leffler * authenticator) on input. 37988768458SSam Leffler */ 38088768458SSam Leffler int 38188768458SSam Leffler m_striphdr(struct mbuf *m, int skip, int hlen) 38288768458SSam Leffler { 38388768458SSam Leffler struct mbuf *m1; 38488768458SSam Leffler int roff; 38588768458SSam Leffler 38688768458SSam Leffler /* Find beginning of header */ 38788768458SSam Leffler m1 = m_getptr(m, skip, &roff); 38888768458SSam Leffler if (m1 == NULL) 38988768458SSam Leffler return (EINVAL); 39088768458SSam Leffler 39188768458SSam Leffler /* Remove the header and associated data from the mbuf. */ 39288768458SSam Leffler if (roff == 0) { 39388768458SSam Leffler /* The header was at the beginning of the mbuf */ 39488768458SSam Leffler newipsecstat.ips_input_front++; 39588768458SSam Leffler m_adj(m1, hlen); 39688768458SSam Leffler if ((m1->m_flags & M_PKTHDR) == 0) 39788768458SSam Leffler m->m_pkthdr.len -= hlen; 39888768458SSam Leffler } else if (roff + hlen >= m1->m_len) { 39988768458SSam Leffler struct mbuf *mo; 40088768458SSam Leffler 40188768458SSam Leffler /* 40288768458SSam Leffler * Part or all of the header is at the end of this mbuf, 40388768458SSam Leffler * so first let's remove the remainder of the header from 40488768458SSam Leffler * the beginning of the remainder of the mbuf chain, if any. 40588768458SSam Leffler */ 40688768458SSam Leffler newipsecstat.ips_input_end++; 40788768458SSam Leffler if (roff + hlen > m1->m_len) { 40888768458SSam Leffler /* Adjust the next mbuf by the remainder */ 40988768458SSam Leffler m_adj(m1->m_next, roff + hlen - m1->m_len); 41088768458SSam Leffler 41188768458SSam Leffler /* The second mbuf is guaranteed not to have a pkthdr... */ 41288768458SSam Leffler m->m_pkthdr.len -= (roff + hlen - m1->m_len); 41388768458SSam Leffler } 41488768458SSam Leffler 41588768458SSam Leffler /* Now, let's unlink the mbuf chain for a second...*/ 41688768458SSam Leffler mo = m1->m_next; 41788768458SSam Leffler m1->m_next = NULL; 41888768458SSam Leffler 41988768458SSam Leffler /* ...and trim the end of the first part of the chain...sick */ 42088768458SSam Leffler m_adj(m1, -(m1->m_len - roff)); 42188768458SSam Leffler if ((m1->m_flags & M_PKTHDR) == 0) 42288768458SSam Leffler m->m_pkthdr.len -= (m1->m_len - roff); 42388768458SSam Leffler 42488768458SSam Leffler /* Finally, let's relink */ 42588768458SSam Leffler m1->m_next = mo; 42688768458SSam Leffler } else { 42788768458SSam Leffler /* 42888768458SSam Leffler * The header lies in the "middle" of the mbuf; copy 42988768458SSam Leffler * the remainder of the mbuf down over the header. 43088768458SSam Leffler */ 43188768458SSam Leffler newipsecstat.ips_input_middle++; 43288768458SSam Leffler bcopy(mtod(m1, u_char *) + roff + hlen, 43388768458SSam Leffler mtod(m1, u_char *) + roff, 43488768458SSam Leffler m1->m_len - (roff + hlen)); 43588768458SSam Leffler m1->m_len -= hlen; 43688768458SSam Leffler m->m_pkthdr.len -= hlen; 43788768458SSam Leffler } 43888768458SSam Leffler return (0); 43988768458SSam Leffler } 44088768458SSam Leffler 44188768458SSam Leffler /* 44288768458SSam Leffler * Diagnostic routine to check mbuf alignment as required by the 44388768458SSam Leffler * crypto device drivers (that use DMA). 44488768458SSam Leffler */ 44588768458SSam Leffler void 44688768458SSam Leffler m_checkalignment(const char* where, struct mbuf *m0, int off, int len) 44788768458SSam Leffler { 44888768458SSam Leffler int roff; 44988768458SSam Leffler struct mbuf *m = m_getptr(m0, off, &roff); 45088768458SSam Leffler caddr_t addr; 45188768458SSam Leffler 45288768458SSam Leffler if (m == NULL) 45388768458SSam Leffler return; 45488768458SSam Leffler printf("%s (off %u len %u): ", where, off, len); 45588768458SSam Leffler addr = mtod(m, caddr_t) + roff; 45688768458SSam Leffler do { 45788768458SSam Leffler int mlen; 45888768458SSam Leffler 45988768458SSam Leffler if (((uintptr_t) addr) & 3) { 46088768458SSam Leffler printf("addr misaligned %p,", addr); 46188768458SSam Leffler break; 46288768458SSam Leffler } 46388768458SSam Leffler mlen = m->m_len; 46488768458SSam Leffler if (mlen > len) 46588768458SSam Leffler mlen = len; 46688768458SSam Leffler len -= mlen; 46788768458SSam Leffler if (len && (mlen & 3)) { 46888768458SSam Leffler printf("len mismatch %u,", mlen); 46988768458SSam Leffler break; 47088768458SSam Leffler } 47188768458SSam Leffler m = m->m_next; 47288768458SSam Leffler addr = m ? mtod(m, caddr_t) : NULL; 47388768458SSam Leffler } while (m && len > 0); 47488768458SSam Leffler for (m = m0; m; m = m->m_next) 47588768458SSam Leffler printf(" [%p:%u]", mtod(m, caddr_t), m->m_len); 47688768458SSam Leffler printf("\n"); 47788768458SSam Leffler } 478