xref: /freebsd/sys/netipsec/ipsec_mbuf.c (revision 972136fa24b620ce4da9fb29bfbe746150b30b73)
188768458SSam Leffler /*	$FreeBSD$	*/
288768458SSam Leffler 
388768458SSam Leffler /*
488768458SSam Leffler  * IPsec-specific mbuf routines.
588768458SSam Leffler  */
688768458SSam Leffler 
788768458SSam Leffler #include "opt_param.h"
888768458SSam Leffler 
988768458SSam Leffler #include <sys/param.h>
1088768458SSam Leffler #include <sys/systm.h>
1188768458SSam Leffler #include <sys/mbuf.h>
1288768458SSam Leffler #include <sys/socket.h>
1388768458SSam Leffler 
1488768458SSam Leffler #include <net/route.h>
1588768458SSam Leffler #include <netinet/in.h>
1688768458SSam Leffler 
1788768458SSam Leffler #include <netipsec/ipsec.h>
1888768458SSam Leffler 
1988768458SSam Leffler extern	struct mbuf *m_getptr(struct mbuf *, int, int *);
2088768458SSam Leffler 
2188768458SSam Leffler /*
2288768458SSam Leffler  * Create a writable copy of the mbuf chain.  While doing this
2388768458SSam Leffler  * we compact the chain with a goal of producing a chain with
2488768458SSam Leffler  * at most two mbufs.  The second mbuf in this chain is likely
2588768458SSam Leffler  * to be a cluster.  The primary purpose of this work is to create
2688768458SSam Leffler  * a writable packet for encryption, compression, etc.  The
2788768458SSam Leffler  * secondary goal is to linearize the data so the data can be
2888768458SSam Leffler  * passed to crypto hardware in the most efficient manner possible.
2988768458SSam Leffler  */
3088768458SSam Leffler struct mbuf *
3188768458SSam Leffler m_clone(struct mbuf *m0)
3288768458SSam Leffler {
3388768458SSam Leffler 	struct mbuf *m, *mprev;
34972136faSSam Leffler 	struct mbuf *n, *mfirst, *mlast;
35972136faSSam Leffler 	int len, off;
3688768458SSam Leffler 
3788768458SSam Leffler 	KASSERT(m0 != NULL, ("m_clone: null mbuf"));
3888768458SSam Leffler 
3988768458SSam Leffler 	mprev = NULL;
4088768458SSam Leffler 	for (m = m0; m != NULL; m = mprev->m_next) {
4188768458SSam Leffler 		/*
4288768458SSam Leffler 		 * Regular mbufs are ignored unless there's a cluster
4388768458SSam Leffler 		 * in front of it that we can use to coalesce.  We do
4488768458SSam Leffler 		 * the latter mainly so later clusters can be coalesced
4588768458SSam Leffler 		 * also w/o having to handle them specially (i.e. convert
4688768458SSam Leffler 		 * mbuf+cluster -> cluster).  This optimization is heavily
4788768458SSam Leffler 		 * influenced by the assumption that we're running over
48972136faSSam Leffler 		 * Ethernet where MCLBYTES is large enough that the max
4988768458SSam Leffler 		 * packet size will permit lots of coalescing into a
5088768458SSam Leffler 		 * single cluster.  This in turn permits efficient
5188768458SSam Leffler 		 * crypto operations, especially when using hardware.
5288768458SSam Leffler 		 */
5388768458SSam Leffler 		if ((m->m_flags & M_EXT) == 0) {
5488768458SSam Leffler 			if (mprev && (mprev->m_flags & M_EXT) &&
5588768458SSam Leffler 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
5688768458SSam Leffler 				/* XXX: this ignores mbuf types */
5788768458SSam Leffler 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
5888768458SSam Leffler 				       mtod(m, caddr_t), m->m_len);
5988768458SSam Leffler 				mprev->m_len += m->m_len;
6088768458SSam Leffler 				mprev->m_next = m->m_next;	/* unlink from chain */
6188768458SSam Leffler 				m_free(m);			/* reclaim mbuf */
6288768458SSam Leffler 				newipsecstat.ips_mbcoalesced++;
6388768458SSam Leffler 			} else {
6488768458SSam Leffler 				mprev = m;
6588768458SSam Leffler 			}
6688768458SSam Leffler 			continue;
6788768458SSam Leffler 		}
6888768458SSam Leffler 		/*
69972136faSSam Leffler 		 * Writable mbufs are left alone (for now).
7088768458SSam Leffler 		 */
7188768458SSam Leffler 		if (!MEXT_IS_REF(m)) {
7288768458SSam Leffler 			mprev = m;
7388768458SSam Leffler 			continue;
7488768458SSam Leffler 		}
75972136faSSam Leffler 
7688768458SSam Leffler 		/*
7788768458SSam Leffler 		 * Not writable, replace with a copy or coalesce with
7888768458SSam Leffler 		 * the previous mbuf if possible (since we have to copy
7988768458SSam Leffler 		 * it anyway, we try to reduce the number of mbufs and
8088768458SSam Leffler 		 * clusters so that future work is easier).
8188768458SSam Leffler 		 */
8288768458SSam Leffler 		KASSERT(m->m_flags & M_EXT,
8388768458SSam Leffler 			("m_clone: m_flags 0x%x", m->m_flags));
84972136faSSam Leffler 		/* NB: we only coalesce into a cluster or larger */
85972136faSSam Leffler 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
86972136faSSam Leffler 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
87972136faSSam Leffler 			/* XXX: this ignores mbuf types */
88972136faSSam Leffler 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
89972136faSSam Leffler 			       mtod(m, caddr_t), m->m_len);
90972136faSSam Leffler 			mprev->m_len += m->m_len;
91972136faSSam Leffler 			mprev->m_next = m->m_next;	/* unlink from chain */
92972136faSSam Leffler 			m_free(m);			/* reclaim mbuf */
93972136faSSam Leffler 			newipsecstat.ips_clcoalesced++;
94972136faSSam Leffler 			continue;
95972136faSSam Leffler 		}
9688768458SSam Leffler 
9788768458SSam Leffler 		/*
98972136faSSam Leffler 		 * Allocate new space to hold the copy...
9988768458SSam Leffler 		 */
100972136faSSam Leffler 		/* XXX why can M_PKTHDR be set past the first mbuf? */
101d47693ebSSam Leffler 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
102d47693ebSSam Leffler 			/*
103972136faSSam Leffler 			 * NB: if a packet header is present we must
104972136faSSam Leffler 			 * allocate the mbuf separately from any cluster
105972136faSSam Leffler 			 * because M_MOVE_PKTHDR will smash the data
106972136faSSam Leffler 			 * pointer and drop the M_EXT marker.
107d47693ebSSam Leffler 			 */
108d47693ebSSam Leffler 			MGETHDR(n, M_DONTWAIT, m->m_type);
109d47693ebSSam Leffler 			if (n == NULL) {
110d47693ebSSam Leffler 				m_freem(m0);
111d47693ebSSam Leffler 				return (NULL);
112d47693ebSSam Leffler 			}
1139967cafcSSam Leffler 			M_MOVE_PKTHDR(n, m);
114d47693ebSSam Leffler 			MCLGET(n, M_DONTWAIT);
115d47693ebSSam Leffler 			if ((n->m_flags & M_EXT) == 0) {
116d47693ebSSam Leffler 				m_free(n);
117d47693ebSSam Leffler 				m_freem(m0);
118d47693ebSSam Leffler 				return (NULL);
119d47693ebSSam Leffler 			}
120d47693ebSSam Leffler 		} else {
12188768458SSam Leffler 			n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags);
12288768458SSam Leffler 			if (n == NULL) {
12388768458SSam Leffler 				m_freem(m0);
12488768458SSam Leffler 				return (NULL);
12588768458SSam Leffler 			}
126d47693ebSSam Leffler 		}
127972136faSSam Leffler 		/*
128972136faSSam Leffler 		 * ... and copy the data.  We deal with jumbo mbufs
129972136faSSam Leffler 		 * (i.e. m_len > MCLBYTES) by splitting them into
130972136faSSam Leffler 		 * clusters.  We could just malloc a buffer and make
131972136faSSam Leffler 		 * it external but too many device drivers don't know
132972136faSSam Leffler 		 * how to break up the non-contiguous memory when
133972136faSSam Leffler 		 * doing DMA.
134972136faSSam Leffler 		 */
135972136faSSam Leffler 		len = m->m_len;
136972136faSSam Leffler 		off = 0;
137972136faSSam Leffler 		mfirst = n;
138972136faSSam Leffler 		mlast = NULL;
139972136faSSam Leffler 		for (;;) {
140972136faSSam Leffler 			int cc = min(len, MCLBYTES);
141972136faSSam Leffler 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
142972136faSSam Leffler 			n->m_len = cc;
143972136faSSam Leffler 			if (mlast != NULL)
144972136faSSam Leffler 				mlast->m_next = n;
145972136faSSam Leffler 			mlast = n;
146972136faSSam Leffler 			newipsecstat.ips_clcopied++;
147972136faSSam Leffler 
148972136faSSam Leffler 			len -= cc;
149972136faSSam Leffler 			if (len <= 0)
150972136faSSam Leffler 				break;
151972136faSSam Leffler 			off += cc;
152972136faSSam Leffler 
153972136faSSam Leffler 			n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags);
154972136faSSam Leffler 			if (n == NULL) {
155972136faSSam Leffler 				m_freem(mfirst);
156972136faSSam Leffler 				m_freem(m0);
157972136faSSam Leffler 				return (NULL);
158972136faSSam Leffler 			}
159972136faSSam Leffler 		}
16088768458SSam Leffler 		n->m_next = m->m_next;
16188768458SSam Leffler 		if (mprev == NULL)
162972136faSSam Leffler 			m0 = mfirst;		/* new head of chain */
16388768458SSam Leffler 		else
164972136faSSam Leffler 			mprev->m_next = mfirst;	/* replace old mbuf */
16588768458SSam Leffler 		m_free(m);			/* release old mbuf */
166972136faSSam Leffler 		mprev = mfirst;
16788768458SSam Leffler 	}
16888768458SSam Leffler 	return (m0);
16988768458SSam Leffler }
17088768458SSam Leffler 
17188768458SSam Leffler /*
172972136faSSam Leffler  * Make space for a new header of length hlen at skip bytes
173972136faSSam Leffler  * into the packet.  When doing this we allocate new mbufs only
17488768458SSam Leffler  * when absolutely necessary.  The mbuf where the new header
17588768458SSam Leffler  * is to go is returned together with an offset into the mbuf.
17688768458SSam Leffler  * If NULL is returned then the mbuf chain may have been modified;
17788768458SSam Leffler  * the caller is assumed to always free the chain.
17888768458SSam Leffler  */
17988768458SSam Leffler struct mbuf *
18088768458SSam Leffler m_makespace(struct mbuf *m0, int skip, int hlen, int *off)
18188768458SSam Leffler {
18288768458SSam Leffler 	struct mbuf *m;
18388768458SSam Leffler 	unsigned remain;
18488768458SSam Leffler 
18588768458SSam Leffler 	KASSERT(m0 != NULL, ("m_dmakespace: null mbuf"));
18688768458SSam Leffler 	KASSERT(hlen < MHLEN, ("m_makespace: hlen too big: %u", hlen));
18788768458SSam Leffler 
18888768458SSam Leffler 	for (m = m0; m && skip > m->m_len; m = m->m_next)
18988768458SSam Leffler 		skip -= m->m_len;
19088768458SSam Leffler 	if (m == NULL)
19188768458SSam Leffler 		return (NULL);
19288768458SSam Leffler 	/*
19388768458SSam Leffler 	 * At this point skip is the offset into the mbuf m
19488768458SSam Leffler 	 * where the new header should be placed.  Figure out
19588768458SSam Leffler 	 * if there's space to insert the new header.  If so,
19688768458SSam Leffler 	 * and copying the remainder makese sense then do so.
19788768458SSam Leffler 	 * Otherwise insert a new mbuf in the chain, splitting
19888768458SSam Leffler 	 * the contents of m as needed.
19988768458SSam Leffler 	 */
20088768458SSam Leffler 	remain = m->m_len - skip;		/* data to move */
20188768458SSam Leffler 	if (hlen > M_TRAILINGSPACE(m)) {
20288768458SSam Leffler 		struct mbuf *n;
20388768458SSam Leffler 
204d47693ebSSam Leffler 		/* XXX code doesn't handle clusters XXX */
205d47693ebSSam Leffler 		KASSERT(remain < MLEN,
206d47693ebSSam Leffler 			("m_makespace: remainder too big: %u", remain));
20788768458SSam Leffler 		/*
20888768458SSam Leffler 		 * Not enough space in m, split the contents
20988768458SSam Leffler 		 * of m, inserting new mbufs as required.
21088768458SSam Leffler 		 *
21188768458SSam Leffler 		 * NB: this ignores mbuf types.
21288768458SSam Leffler 		 */
21388768458SSam Leffler 		MGET(n, M_DONTWAIT, MT_DATA);
21488768458SSam Leffler 		if (n == NULL)
21588768458SSam Leffler 			return (NULL);
21688768458SSam Leffler 		n->m_next = m->m_next;		/* splice new mbuf */
21788768458SSam Leffler 		m->m_next = n;
21888768458SSam Leffler 		newipsecstat.ips_mbinserted++;
21988768458SSam Leffler 		if (hlen <= M_TRAILINGSPACE(m) + remain) {
22088768458SSam Leffler 			/*
22188768458SSam Leffler 			 * New header fits in the old mbuf if we copy
22288768458SSam Leffler 			 * the remainder; just do the copy to the new
22388768458SSam Leffler 			 * mbuf and we're good to go.
22488768458SSam Leffler 			 */
22588768458SSam Leffler 			memcpy(mtod(n, caddr_t),
22688768458SSam Leffler 			       mtod(m, caddr_t) + skip, remain);
22788768458SSam Leffler 			n->m_len = remain;
22888768458SSam Leffler 			m->m_len = skip + hlen;
22988768458SSam Leffler 			*off = skip;
23088768458SSam Leffler 		} else {
23188768458SSam Leffler 			/*
23288768458SSam Leffler 			 * No space in the old mbuf for the new header.
23388768458SSam Leffler 			 * Make space in the new mbuf and check the
23488768458SSam Leffler 			 * remainder'd data fits too.  If not then we
23588768458SSam Leffler 			 * must allocate an additional mbuf (yech).
23688768458SSam Leffler 			 */
23788768458SSam Leffler 			n->m_len = 0;
23888768458SSam Leffler 			if (remain + hlen > M_TRAILINGSPACE(n)) {
23988768458SSam Leffler 				struct mbuf *n2;
24088768458SSam Leffler 
24188768458SSam Leffler 				MGET(n2, M_DONTWAIT, MT_DATA);
24288768458SSam Leffler 				/* NB: new mbuf is on chain, let caller free */
24388768458SSam Leffler 				if (n2 == NULL)
24488768458SSam Leffler 					return (NULL);
24588768458SSam Leffler 				n2->m_len = 0;
24688768458SSam Leffler 				memcpy(mtod(n2, caddr_t),
24788768458SSam Leffler 				       mtod(m, caddr_t) + skip, remain);
24888768458SSam Leffler 				n2->m_len = remain;
24988768458SSam Leffler 				/* splice in second mbuf */
25088768458SSam Leffler 				n2->m_next = n->m_next;
25188768458SSam Leffler 				n->m_next = n2;
25288768458SSam Leffler 				newipsecstat.ips_mbinserted++;
25388768458SSam Leffler 			} else {
25488768458SSam Leffler 				memcpy(mtod(n, caddr_t) + hlen,
25588768458SSam Leffler 				       mtod(m, caddr_t) + skip, remain);
25688768458SSam Leffler 				n->m_len += remain;
25788768458SSam Leffler 			}
25888768458SSam Leffler 			m->m_len -= remain;
25988768458SSam Leffler 			n->m_len += hlen;
26088768458SSam Leffler 			m = n;			/* header is at front ... */
26188768458SSam Leffler 			*off = 0;		/* ... of new mbuf */
26288768458SSam Leffler 		}
26388768458SSam Leffler 	} else {
26488768458SSam Leffler 		/*
26588768458SSam Leffler 		 * Copy the remainder to the back of the mbuf
26688768458SSam Leffler 		 * so there's space to write the new header.
26788768458SSam Leffler 		 */
26888768458SSam Leffler 		/* XXX can this be memcpy? does it handle overlap? */
26988768458SSam Leffler 		ovbcopy(mtod(m, caddr_t) + skip,
27088768458SSam Leffler 			mtod(m, caddr_t) + skip + hlen, remain);
27188768458SSam Leffler 		m->m_len += hlen;
27288768458SSam Leffler 		*off = skip;
27388768458SSam Leffler 	}
27488768458SSam Leffler 	m0->m_pkthdr.len += hlen;		/* adjust packet length */
27588768458SSam Leffler 	return m;
27688768458SSam Leffler }
27788768458SSam Leffler 
27888768458SSam Leffler /*
27988768458SSam Leffler  * m_pad(m, n) pads <m> with <n> bytes at the end. The packet header
28088768458SSam Leffler  * length is updated, and a pointer to the first byte of the padding
28188768458SSam Leffler  * (which is guaranteed to be all in one mbuf) is returned.
28288768458SSam Leffler  */
28388768458SSam Leffler caddr_t
28488768458SSam Leffler m_pad(struct mbuf *m, int n)
28588768458SSam Leffler {
28688768458SSam Leffler 	register struct mbuf *m0, *m1;
28788768458SSam Leffler 	register int len, pad;
28888768458SSam Leffler 	caddr_t retval;
28988768458SSam Leffler 
29088768458SSam Leffler 	if (n <= 0) {  /* No stupid arguments. */
29188768458SSam Leffler 		DPRINTF(("m_pad: pad length invalid (%d)\n", n));
29288768458SSam Leffler 		m_freem(m);
29388768458SSam Leffler 		return NULL;
29488768458SSam Leffler 	}
29588768458SSam Leffler 
29688768458SSam Leffler 	len = m->m_pkthdr.len;
29788768458SSam Leffler 	pad = n;
29888768458SSam Leffler 	m0 = m;
29988768458SSam Leffler 
30088768458SSam Leffler 	while (m0->m_len < len) {
30188768458SSam Leffler KASSERT(m0->m_next != NULL, ("m_pad: m0 null, len %u m_len %u", len, m0->m_len));/*XXX*/
30288768458SSam Leffler 		len -= m0->m_len;
30388768458SSam Leffler 		m0 = m0->m_next;
30488768458SSam Leffler 	}
30588768458SSam Leffler 
30688768458SSam Leffler 	if (m0->m_len != len) {
30788768458SSam Leffler 		DPRINTF(("m_pad: length mismatch (should be %d instead of %d)\n",
30888768458SSam Leffler 		    m->m_pkthdr.len, m->m_pkthdr.len + m0->m_len - len));
30988768458SSam Leffler 
31088768458SSam Leffler 		m_freem(m);
31188768458SSam Leffler 		return NULL;
31288768458SSam Leffler 	}
31388768458SSam Leffler 
31488768458SSam Leffler 	/* Check for zero-length trailing mbufs, and find the last one. */
31588768458SSam Leffler 	for (m1 = m0; m1->m_next; m1 = m1->m_next) {
31688768458SSam Leffler 		if (m1->m_next->m_len != 0) {
31788768458SSam Leffler 			DPRINTF(("m_pad: length mismatch (should be %d "
31888768458SSam Leffler 			    "instead of %d)\n",
31988768458SSam Leffler 			    m->m_pkthdr.len,
32088768458SSam Leffler 			    m->m_pkthdr.len + m1->m_next->m_len));
32188768458SSam Leffler 
32288768458SSam Leffler 			m_freem(m);
32388768458SSam Leffler 			return NULL;
32488768458SSam Leffler 		}
32588768458SSam Leffler 
32688768458SSam Leffler 		m0 = m1->m_next;
32788768458SSam Leffler 	}
32888768458SSam Leffler 
32988768458SSam Leffler 	if (pad > M_TRAILINGSPACE(m0)) {
33088768458SSam Leffler 		/* Add an mbuf to the chain. */
33188768458SSam Leffler 		MGET(m1, M_DONTWAIT, MT_DATA);
33288768458SSam Leffler 		if (m1 == 0) {
33388768458SSam Leffler 			m_freem(m0);
33488768458SSam Leffler 			DPRINTF(("m_pad: unable to get extra mbuf\n"));
33588768458SSam Leffler 			return NULL;
33688768458SSam Leffler 		}
33788768458SSam Leffler 
33888768458SSam Leffler 		m0->m_next = m1;
33988768458SSam Leffler 		m0 = m1;
34088768458SSam Leffler 		m0->m_len = 0;
34188768458SSam Leffler 	}
34288768458SSam Leffler 
34388768458SSam Leffler 	retval = m0->m_data + m0->m_len;
34488768458SSam Leffler 	m0->m_len += pad;
34588768458SSam Leffler 	m->m_pkthdr.len += pad;
34688768458SSam Leffler 
34788768458SSam Leffler 	return retval;
34888768458SSam Leffler }
34988768458SSam Leffler 
35088768458SSam Leffler /*
35188768458SSam Leffler  * Remove hlen data at offset skip in the packet.  This is used by
35288768458SSam Leffler  * the protocols strip protocol headers and associated data (e.g. IV,
35388768458SSam Leffler  * authenticator) on input.
35488768458SSam Leffler  */
35588768458SSam Leffler int
35688768458SSam Leffler m_striphdr(struct mbuf *m, int skip, int hlen)
35788768458SSam Leffler {
35888768458SSam Leffler 	struct mbuf *m1;
35988768458SSam Leffler 	int roff;
36088768458SSam Leffler 
36188768458SSam Leffler 	/* Find beginning of header */
36288768458SSam Leffler 	m1 = m_getptr(m, skip, &roff);
36388768458SSam Leffler 	if (m1 == NULL)
36488768458SSam Leffler 		return (EINVAL);
36588768458SSam Leffler 
36688768458SSam Leffler 	/* Remove the header and associated data from the mbuf. */
36788768458SSam Leffler 	if (roff == 0) {
36888768458SSam Leffler 		/* The header was at the beginning of the mbuf */
36988768458SSam Leffler 		newipsecstat.ips_input_front++;
37088768458SSam Leffler 		m_adj(m1, hlen);
37188768458SSam Leffler 		if ((m1->m_flags & M_PKTHDR) == 0)
37288768458SSam Leffler 			m->m_pkthdr.len -= hlen;
37388768458SSam Leffler 	} else if (roff + hlen >= m1->m_len) {
37488768458SSam Leffler 		struct mbuf *mo;
37588768458SSam Leffler 
37688768458SSam Leffler 		/*
37788768458SSam Leffler 		 * Part or all of the header is at the end of this mbuf,
37888768458SSam Leffler 		 * so first let's remove the remainder of the header from
37988768458SSam Leffler 		 * the beginning of the remainder of the mbuf chain, if any.
38088768458SSam Leffler 		 */
38188768458SSam Leffler 		newipsecstat.ips_input_end++;
38288768458SSam Leffler 		if (roff + hlen > m1->m_len) {
38388768458SSam Leffler 			/* Adjust the next mbuf by the remainder */
38488768458SSam Leffler 			m_adj(m1->m_next, roff + hlen - m1->m_len);
38588768458SSam Leffler 
38688768458SSam Leffler 			/* The second mbuf is guaranteed not to have a pkthdr... */
38788768458SSam Leffler 			m->m_pkthdr.len -= (roff + hlen - m1->m_len);
38888768458SSam Leffler 		}
38988768458SSam Leffler 
39088768458SSam Leffler 		/* Now, let's unlink the mbuf chain for a second...*/
39188768458SSam Leffler 		mo = m1->m_next;
39288768458SSam Leffler 		m1->m_next = NULL;
39388768458SSam Leffler 
39488768458SSam Leffler 		/* ...and trim the end of the first part of the chain...sick */
39588768458SSam Leffler 		m_adj(m1, -(m1->m_len - roff));
39688768458SSam Leffler 		if ((m1->m_flags & M_PKTHDR) == 0)
39788768458SSam Leffler 			m->m_pkthdr.len -= (m1->m_len - roff);
39888768458SSam Leffler 
39988768458SSam Leffler 		/* Finally, let's relink */
40088768458SSam Leffler 		m1->m_next = mo;
40188768458SSam Leffler 	} else {
40288768458SSam Leffler 		/*
40388768458SSam Leffler 		 * The header lies in the "middle" of the mbuf; copy
40488768458SSam Leffler 		 * the remainder of the mbuf down over the header.
40588768458SSam Leffler 		 */
40688768458SSam Leffler 		newipsecstat.ips_input_middle++;
40788768458SSam Leffler 		bcopy(mtod(m1, u_char *) + roff + hlen,
40888768458SSam Leffler 		      mtod(m1, u_char *) + roff,
40988768458SSam Leffler 		      m1->m_len - (roff + hlen));
41088768458SSam Leffler 		m1->m_len -= hlen;
41188768458SSam Leffler 		m->m_pkthdr.len -= hlen;
41288768458SSam Leffler 	}
41388768458SSam Leffler 	return (0);
41488768458SSam Leffler }
41588768458SSam Leffler 
41688768458SSam Leffler /*
41788768458SSam Leffler  * Diagnostic routine to check mbuf alignment as required by the
41888768458SSam Leffler  * crypto device drivers (that use DMA).
41988768458SSam Leffler  */
42088768458SSam Leffler void
42188768458SSam Leffler m_checkalignment(const char* where, struct mbuf *m0, int off, int len)
42288768458SSam Leffler {
42388768458SSam Leffler 	int roff;
42488768458SSam Leffler 	struct mbuf *m = m_getptr(m0, off, &roff);
42588768458SSam Leffler 	caddr_t addr;
42688768458SSam Leffler 
42788768458SSam Leffler 	if (m == NULL)
42888768458SSam Leffler 		return;
42988768458SSam Leffler 	printf("%s (off %u len %u): ", where, off, len);
43088768458SSam Leffler 	addr = mtod(m, caddr_t) + roff;
43188768458SSam Leffler 	do {
43288768458SSam Leffler 		int mlen;
43388768458SSam Leffler 
43488768458SSam Leffler 		if (((uintptr_t) addr) & 3) {
43588768458SSam Leffler 			printf("addr misaligned %p,", addr);
43688768458SSam Leffler 			break;
43788768458SSam Leffler 		}
43888768458SSam Leffler 		mlen = m->m_len;
43988768458SSam Leffler 		if (mlen > len)
44088768458SSam Leffler 			mlen = len;
44188768458SSam Leffler 		len -= mlen;
44288768458SSam Leffler 		if (len && (mlen & 3)) {
44388768458SSam Leffler 			printf("len mismatch %u,", mlen);
44488768458SSam Leffler 			break;
44588768458SSam Leffler 		}
44688768458SSam Leffler 		m = m->m_next;
44788768458SSam Leffler 		addr = m ? mtod(m, caddr_t) : NULL;
44888768458SSam Leffler 	} while (m && len > 0);
44988768458SSam Leffler 	for (m = m0; m; m = m->m_next)
45088768458SSam Leffler 		printf(" [%p:%u]", mtod(m, caddr_t), m->m_len);
45188768458SSam Leffler 	printf("\n");
45288768458SSam Leffler }
453