xref: /freebsd/sys/netipsec/ipsec_mbuf.c (revision 88768458d21c0a0eef6cf91053630b94a58fef3c)
188768458SSam Leffler /*	$FreeBSD$	*/
288768458SSam Leffler 
388768458SSam Leffler /*
488768458SSam Leffler  * IPsec-specific mbuf routines.
588768458SSam Leffler  */
688768458SSam Leffler 
788768458SSam Leffler #include "opt_param.h"
888768458SSam Leffler 
988768458SSam Leffler #include <sys/param.h>
1088768458SSam Leffler #include <sys/systm.h>
1188768458SSam Leffler #include <sys/mbuf.h>
1288768458SSam Leffler #include <sys/socket.h>
1388768458SSam Leffler 
1488768458SSam Leffler #include <net/route.h>
1588768458SSam Leffler #include <netinet/in.h>
1688768458SSam Leffler 
1788768458SSam Leffler #include <netipsec/ipsec.h>
1888768458SSam Leffler 
1988768458SSam Leffler extern	struct mbuf *m_getptr(struct mbuf *, int, int *);
2088768458SSam Leffler 
2188768458SSam Leffler /*
2288768458SSam Leffler  * Create a writable copy of the mbuf chain.  While doing this
2388768458SSam Leffler  * we compact the chain with a goal of producing a chain with
2488768458SSam Leffler  * at most two mbufs.  The second mbuf in this chain is likely
2588768458SSam Leffler  * to be a cluster.  The primary purpose of this work is to create
2688768458SSam Leffler  * a writable packet for encryption, compression, etc.  The
2788768458SSam Leffler  * secondary goal is to linearize the data so the data can be
2888768458SSam Leffler  * passed to crypto hardware in the most efficient manner possible.
2988768458SSam Leffler  */
3088768458SSam Leffler struct mbuf *
3188768458SSam Leffler m_clone(struct mbuf *m0)
3288768458SSam Leffler {
3388768458SSam Leffler 	struct mbuf *m, *mprev;
3488768458SSam Leffler 
3588768458SSam Leffler 	KASSERT(m0 != NULL, ("m_clone: null mbuf"));
3688768458SSam Leffler 
3788768458SSam Leffler 	mprev = NULL;
3888768458SSam Leffler 	for (m = m0; m != NULL; m = mprev->m_next) {
3988768458SSam Leffler 		/*
4088768458SSam Leffler 		 * Regular mbufs are ignored unless there's a cluster
4188768458SSam Leffler 		 * in front of it that we can use to coalesce.  We do
4288768458SSam Leffler 		 * the latter mainly so later clusters can be coalesced
4388768458SSam Leffler 		 * also w/o having to handle them specially (i.e. convert
4488768458SSam Leffler 		 * mbuf+cluster -> cluster).  This optimization is heavily
4588768458SSam Leffler 		 * influenced by the assumption that we're running over
4688768458SSam Leffler 		 * Ethernet where MCBYTES is large enough that the max
4788768458SSam Leffler 		 * packet size will permit lots of coalescing into a
4888768458SSam Leffler 		 * single cluster.  This in turn permits efficient
4988768458SSam Leffler 		 * crypto operations, especially when using hardware.
5088768458SSam Leffler 		 */
5188768458SSam Leffler 		if ((m->m_flags & M_EXT) == 0) {
5288768458SSam Leffler 			if (mprev && (mprev->m_flags & M_EXT) &&
5388768458SSam Leffler 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
5488768458SSam Leffler 				/* XXX: this ignores mbuf types */
5588768458SSam Leffler 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
5688768458SSam Leffler 				       mtod(m, caddr_t), m->m_len);
5788768458SSam Leffler 				mprev->m_len += m->m_len;
5888768458SSam Leffler 				mprev->m_next = m->m_next;	/* unlink from chain */
5988768458SSam Leffler 				m_free(m);			/* reclaim mbuf */
6088768458SSam Leffler 				newipsecstat.ips_mbcoalesced++;
6188768458SSam Leffler 			} else {
6288768458SSam Leffler 				mprev = m;
6388768458SSam Leffler 			}
6488768458SSam Leffler 			continue;
6588768458SSam Leffler 		}
6688768458SSam Leffler 		/*
6788768458SSam Leffler 		 * Cluster'd mbufs are left alone (for now).
6888768458SSam Leffler 		 */
6988768458SSam Leffler 		if (!MEXT_IS_REF(m)) {
7088768458SSam Leffler 			mprev = m;
7188768458SSam Leffler 			continue;
7288768458SSam Leffler 		}
7388768458SSam Leffler 		/*
7488768458SSam Leffler 		 * Not writable, replace with a copy or coalesce with
7588768458SSam Leffler 		 * the previous mbuf if possible (since we have to copy
7688768458SSam Leffler 		 * it anyway, we try to reduce the number of mbufs and
7788768458SSam Leffler 		 * clusters so that future work is easier).
7888768458SSam Leffler 		 */
7988768458SSam Leffler 		/* XXX why can M_PKTHDR be set past the first mbuf? */
8088768458SSam Leffler 		KASSERT(m->m_flags & M_EXT,
8188768458SSam Leffler 			("m_clone: m_flags 0x%x", m->m_flags));
8288768458SSam Leffler 		/* NB: we only coalesce into a cluster */
8388768458SSam Leffler 		if (mprev == NULL || (mprev->m_flags & M_EXT) == 0 ||
8488768458SSam Leffler 		    m->m_len > M_TRAILINGSPACE(mprev)) {
8588768458SSam Leffler 			struct mbuf *n;
8688768458SSam Leffler 
8788768458SSam Leffler 			/*
8888768458SSam Leffler 			 * Allocate a new page, copy the data to the front
8988768458SSam Leffler 			 * and release the reference to the old page.
9088768458SSam Leffler 			 */
9188768458SSam Leffler 			n = m_getcl(M_DONTWAIT, m->m_type, m->m_flags);
9288768458SSam Leffler 			if (n == NULL) {
9388768458SSam Leffler 				m_freem(m0);
9488768458SSam Leffler 				return (NULL);
9588768458SSam Leffler 			}
9688768458SSam Leffler 			if (mprev == NULL && (m->m_flags & M_PKTHDR))
9788768458SSam Leffler 				M_COPY_PKTHDR(n, m);
9888768458SSam Leffler 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t), m->m_len);
9988768458SSam Leffler 			n->m_len = m->m_len;
10088768458SSam Leffler 			n->m_next = m->m_next;
10188768458SSam Leffler 			if (mprev == NULL)
10288768458SSam Leffler 				m0 = n;			/* new head of chain */
10388768458SSam Leffler 			else
10488768458SSam Leffler 				mprev->m_next = n;	/* replace old mbuf */
10588768458SSam Leffler 			m_free(m);			/* release old mbuf */
10688768458SSam Leffler 			mprev = n;
10788768458SSam Leffler 			newipsecstat.ips_clcopied++;
10888768458SSam Leffler 		} else {
10988768458SSam Leffler 			/* XXX: this ignores mbuf types */
11088768458SSam Leffler 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
11188768458SSam Leffler 			       mtod(m, caddr_t), m->m_len);
11288768458SSam Leffler 			mprev->m_len += m->m_len;
11388768458SSam Leffler 			mprev->m_next = m->m_next;	/* unlink from chain */
11488768458SSam Leffler 			m_free(m);			/* reclaim mbuf */
11588768458SSam Leffler 			newipsecstat.ips_clcoalesced++;
11688768458SSam Leffler 		}
11788768458SSam Leffler 	}
11888768458SSam Leffler 	return (m0);
11988768458SSam Leffler }
12088768458SSam Leffler 
12188768458SSam Leffler /*
12288768458SSam Leffler  * Make space for a new header of length hlen at offset off
12388768458SSam Leffler  * in the packet.  When doing this we allocate new mbufs only
12488768458SSam Leffler  * when absolutely necessary.  The mbuf where the new header
12588768458SSam Leffler  * is to go is returned together with an offset into the mbuf.
12688768458SSam Leffler  * If NULL is returned then the mbuf chain may have been modified;
12788768458SSam Leffler  * the caller is assumed to always free the chain.
12888768458SSam Leffler  */
12988768458SSam Leffler struct mbuf *
13088768458SSam Leffler m_makespace(struct mbuf *m0, int skip, int hlen, int *off)
13188768458SSam Leffler {
13288768458SSam Leffler 	struct mbuf *m;
13388768458SSam Leffler 	unsigned remain;
13488768458SSam Leffler 
13588768458SSam Leffler 	KASSERT(m0 != NULL, ("m_dmakespace: null mbuf"));
13688768458SSam Leffler 	KASSERT(hlen < MHLEN, ("m_makespace: hlen too big: %u", hlen));
13788768458SSam Leffler 
13888768458SSam Leffler 	for (m = m0; m && skip > m->m_len; m = m->m_next)
13988768458SSam Leffler 		skip -= m->m_len;
14088768458SSam Leffler 	if (m == NULL)
14188768458SSam Leffler 		return (NULL);
14288768458SSam Leffler 	/*
14388768458SSam Leffler 	 * At this point skip is the offset into the mbuf m
14488768458SSam Leffler 	 * where the new header should be placed.  Figure out
14588768458SSam Leffler 	 * if there's space to insert the new header.  If so,
14688768458SSam Leffler 	 * and copying the remainder makese sense then do so.
14788768458SSam Leffler 	 * Otherwise insert a new mbuf in the chain, splitting
14888768458SSam Leffler 	 * the contents of m as needed.
14988768458SSam Leffler 	 */
15088768458SSam Leffler 	remain = m->m_len - skip;		/* data to move */
15188768458SSam Leffler 	/* XXX code doesn't handle clusters XXX */
15288768458SSam Leffler 	KASSERT(remain < MLEN, ("m_makespace: remainder too big: %u", remain));
15388768458SSam Leffler 	if (hlen > M_TRAILINGSPACE(m)) {
15488768458SSam Leffler 		struct mbuf *n;
15588768458SSam Leffler 
15688768458SSam Leffler 		/*
15788768458SSam Leffler 		 * Not enough space in m, split the contents
15888768458SSam Leffler 		 * of m, inserting new mbufs as required.
15988768458SSam Leffler 		 *
16088768458SSam Leffler 		 * NB: this ignores mbuf types.
16188768458SSam Leffler 		 */
16288768458SSam Leffler 		MGET(n, M_DONTWAIT, MT_DATA);
16388768458SSam Leffler 		if (n == NULL)
16488768458SSam Leffler 			return (NULL);
16588768458SSam Leffler 		n->m_next = m->m_next;		/* splice new mbuf */
16688768458SSam Leffler 		m->m_next = n;
16788768458SSam Leffler 		newipsecstat.ips_mbinserted++;
16888768458SSam Leffler 		if (hlen <= M_TRAILINGSPACE(m) + remain) {
16988768458SSam Leffler 			/*
17088768458SSam Leffler 			 * New header fits in the old mbuf if we copy
17188768458SSam Leffler 			 * the remainder; just do the copy to the new
17288768458SSam Leffler 			 * mbuf and we're good to go.
17388768458SSam Leffler 			 */
17488768458SSam Leffler 			memcpy(mtod(n, caddr_t),
17588768458SSam Leffler 			       mtod(m, caddr_t) + skip, remain);
17688768458SSam Leffler 			n->m_len = remain;
17788768458SSam Leffler 			m->m_len = skip + hlen;
17888768458SSam Leffler 			*off = skip;
17988768458SSam Leffler 		} else {
18088768458SSam Leffler 			/*
18188768458SSam Leffler 			 * No space in the old mbuf for the new header.
18288768458SSam Leffler 			 * Make space in the new mbuf and check the
18388768458SSam Leffler 			 * remainder'd data fits too.  If not then we
18488768458SSam Leffler 			 * must allocate an additional mbuf (yech).
18588768458SSam Leffler 			 */
18688768458SSam Leffler 			n->m_len = 0;
18788768458SSam Leffler 			if (remain + hlen > M_TRAILINGSPACE(n)) {
18888768458SSam Leffler 				struct mbuf *n2;
18988768458SSam Leffler 
19088768458SSam Leffler 				MGET(n2, M_DONTWAIT, MT_DATA);
19188768458SSam Leffler 				/* NB: new mbuf is on chain, let caller free */
19288768458SSam Leffler 				if (n2 == NULL)
19388768458SSam Leffler 					return (NULL);
19488768458SSam Leffler 				n2->m_len = 0;
19588768458SSam Leffler 				memcpy(mtod(n2, caddr_t),
19688768458SSam Leffler 				       mtod(m, caddr_t) + skip, remain);
19788768458SSam Leffler 				n2->m_len = remain;
19888768458SSam Leffler 				/* splice in second mbuf */
19988768458SSam Leffler 				n2->m_next = n->m_next;
20088768458SSam Leffler 				n->m_next = n2;
20188768458SSam Leffler 				newipsecstat.ips_mbinserted++;
20288768458SSam Leffler 			} else {
20388768458SSam Leffler 				memcpy(mtod(n, caddr_t) + hlen,
20488768458SSam Leffler 				       mtod(m, caddr_t) + skip, remain);
20588768458SSam Leffler 				n->m_len += remain;
20688768458SSam Leffler 			}
20788768458SSam Leffler 			m->m_len -= remain;
20888768458SSam Leffler 			n->m_len += hlen;
20988768458SSam Leffler 			m = n;			/* header is at front ... */
21088768458SSam Leffler 			*off = 0;		/* ... of new mbuf */
21188768458SSam Leffler 		}
21288768458SSam Leffler 	} else {
21388768458SSam Leffler 		/*
21488768458SSam Leffler 		 * Copy the remainder to the back of the mbuf
21588768458SSam Leffler 		 * so there's space to write the new header.
21688768458SSam Leffler 		 */
21788768458SSam Leffler 		/* XXX can this be memcpy? does it handle overlap? */
21888768458SSam Leffler 		ovbcopy(mtod(m, caddr_t) + skip,
21988768458SSam Leffler 			mtod(m, caddr_t) + skip + hlen, remain);
22088768458SSam Leffler 		m->m_len += hlen;
22188768458SSam Leffler 		*off = skip;
22288768458SSam Leffler 	}
22388768458SSam Leffler 	m0->m_pkthdr.len += hlen;		/* adjust packet length */
22488768458SSam Leffler 	return m;
22588768458SSam Leffler }
22688768458SSam Leffler 
22788768458SSam Leffler /*
22888768458SSam Leffler  * m_pad(m, n) pads <m> with <n> bytes at the end. The packet header
22988768458SSam Leffler  * length is updated, and a pointer to the first byte of the padding
23088768458SSam Leffler  * (which is guaranteed to be all in one mbuf) is returned.
23188768458SSam Leffler  */
23288768458SSam Leffler caddr_t
23388768458SSam Leffler m_pad(struct mbuf *m, int n)
23488768458SSam Leffler {
23588768458SSam Leffler 	register struct mbuf *m0, *m1;
23688768458SSam Leffler 	register int len, pad;
23788768458SSam Leffler 	caddr_t retval;
23888768458SSam Leffler 
23988768458SSam Leffler 	if (n <= 0) {  /* No stupid arguments. */
24088768458SSam Leffler 		DPRINTF(("m_pad: pad length invalid (%d)\n", n));
24188768458SSam Leffler 		m_freem(m);
24288768458SSam Leffler 		return NULL;
24388768458SSam Leffler 	}
24488768458SSam Leffler 
24588768458SSam Leffler 	len = m->m_pkthdr.len;
24688768458SSam Leffler 	pad = n;
24788768458SSam Leffler 	m0 = m;
24888768458SSam Leffler 
24988768458SSam Leffler 	while (m0->m_len < len) {
25088768458SSam Leffler KASSERT(m0->m_next != NULL, ("m_pad: m0 null, len %u m_len %u", len, m0->m_len));/*XXX*/
25188768458SSam Leffler 		len -= m0->m_len;
25288768458SSam Leffler 		m0 = m0->m_next;
25388768458SSam Leffler 	}
25488768458SSam Leffler 
25588768458SSam Leffler 	if (m0->m_len != len) {
25688768458SSam Leffler 		DPRINTF(("m_pad: length mismatch (should be %d instead of %d)\n",
25788768458SSam Leffler 		    m->m_pkthdr.len, m->m_pkthdr.len + m0->m_len - len));
25888768458SSam Leffler 
25988768458SSam Leffler 		m_freem(m);
26088768458SSam Leffler 		return NULL;
26188768458SSam Leffler 	}
26288768458SSam Leffler 
26388768458SSam Leffler 	/* Check for zero-length trailing mbufs, and find the last one. */
26488768458SSam Leffler 	for (m1 = m0; m1->m_next; m1 = m1->m_next) {
26588768458SSam Leffler 		if (m1->m_next->m_len != 0) {
26688768458SSam Leffler 			DPRINTF(("m_pad: length mismatch (should be %d "
26788768458SSam Leffler 			    "instead of %d)\n",
26888768458SSam Leffler 			    m->m_pkthdr.len,
26988768458SSam Leffler 			    m->m_pkthdr.len + m1->m_next->m_len));
27088768458SSam Leffler 
27188768458SSam Leffler 			m_freem(m);
27288768458SSam Leffler 			return NULL;
27388768458SSam Leffler 		}
27488768458SSam Leffler 
27588768458SSam Leffler 		m0 = m1->m_next;
27688768458SSam Leffler 	}
27788768458SSam Leffler 
27888768458SSam Leffler 	if (pad > M_TRAILINGSPACE(m0)) {
27988768458SSam Leffler 		/* Add an mbuf to the chain. */
28088768458SSam Leffler 		MGET(m1, M_DONTWAIT, MT_DATA);
28188768458SSam Leffler 		if (m1 == 0) {
28288768458SSam Leffler 			m_freem(m0);
28388768458SSam Leffler 			DPRINTF(("m_pad: unable to get extra mbuf\n"));
28488768458SSam Leffler 			return NULL;
28588768458SSam Leffler 		}
28688768458SSam Leffler 
28788768458SSam Leffler 		m0->m_next = m1;
28888768458SSam Leffler 		m0 = m1;
28988768458SSam Leffler 		m0->m_len = 0;
29088768458SSam Leffler 	}
29188768458SSam Leffler 
29288768458SSam Leffler 	retval = m0->m_data + m0->m_len;
29388768458SSam Leffler 	m0->m_len += pad;
29488768458SSam Leffler 	m->m_pkthdr.len += pad;
29588768458SSam Leffler 
29688768458SSam Leffler 	return retval;
29788768458SSam Leffler }
29888768458SSam Leffler 
29988768458SSam Leffler /*
30088768458SSam Leffler  * Remove hlen data at offset skip in the packet.  This is used by
30188768458SSam Leffler  * the protocols strip protocol headers and associated data (e.g. IV,
30288768458SSam Leffler  * authenticator) on input.
30388768458SSam Leffler  */
30488768458SSam Leffler int
30588768458SSam Leffler m_striphdr(struct mbuf *m, int skip, int hlen)
30688768458SSam Leffler {
30788768458SSam Leffler 	struct mbuf *m1;
30888768458SSam Leffler 	int roff;
30988768458SSam Leffler 
31088768458SSam Leffler 	/* Find beginning of header */
31188768458SSam Leffler 	m1 = m_getptr(m, skip, &roff);
31288768458SSam Leffler 	if (m1 == NULL)
31388768458SSam Leffler 		return (EINVAL);
31488768458SSam Leffler 
31588768458SSam Leffler 	/* Remove the header and associated data from the mbuf. */
31688768458SSam Leffler 	if (roff == 0) {
31788768458SSam Leffler 		/* The header was at the beginning of the mbuf */
31888768458SSam Leffler 		newipsecstat.ips_input_front++;
31988768458SSam Leffler 		m_adj(m1, hlen);
32088768458SSam Leffler 		if ((m1->m_flags & M_PKTHDR) == 0)
32188768458SSam Leffler 			m->m_pkthdr.len -= hlen;
32288768458SSam Leffler 	} else if (roff + hlen >= m1->m_len) {
32388768458SSam Leffler 		struct mbuf *mo;
32488768458SSam Leffler 
32588768458SSam Leffler 		/*
32688768458SSam Leffler 		 * Part or all of the header is at the end of this mbuf,
32788768458SSam Leffler 		 * so first let's remove the remainder of the header from
32888768458SSam Leffler 		 * the beginning of the remainder of the mbuf chain, if any.
32988768458SSam Leffler 		 */
33088768458SSam Leffler 		newipsecstat.ips_input_end++;
33188768458SSam Leffler 		if (roff + hlen > m1->m_len) {
33288768458SSam Leffler 			/* Adjust the next mbuf by the remainder */
33388768458SSam Leffler 			m_adj(m1->m_next, roff + hlen - m1->m_len);
33488768458SSam Leffler 
33588768458SSam Leffler 			/* The second mbuf is guaranteed not to have a pkthdr... */
33688768458SSam Leffler 			m->m_pkthdr.len -= (roff + hlen - m1->m_len);
33788768458SSam Leffler 		}
33888768458SSam Leffler 
33988768458SSam Leffler 		/* Now, let's unlink the mbuf chain for a second...*/
34088768458SSam Leffler 		mo = m1->m_next;
34188768458SSam Leffler 		m1->m_next = NULL;
34288768458SSam Leffler 
34388768458SSam Leffler 		/* ...and trim the end of the first part of the chain...sick */
34488768458SSam Leffler 		m_adj(m1, -(m1->m_len - roff));
34588768458SSam Leffler 		if ((m1->m_flags & M_PKTHDR) == 0)
34688768458SSam Leffler 			m->m_pkthdr.len -= (m1->m_len - roff);
34788768458SSam Leffler 
34888768458SSam Leffler 		/* Finally, let's relink */
34988768458SSam Leffler 		m1->m_next = mo;
35088768458SSam Leffler 	} else {
35188768458SSam Leffler 		/*
35288768458SSam Leffler 		 * The header lies in the "middle" of the mbuf; copy
35388768458SSam Leffler 		 * the remainder of the mbuf down over the header.
35488768458SSam Leffler 		 */
35588768458SSam Leffler 		newipsecstat.ips_input_middle++;
35688768458SSam Leffler 		bcopy(mtod(m1, u_char *) + roff + hlen,
35788768458SSam Leffler 		      mtod(m1, u_char *) + roff,
35888768458SSam Leffler 		      m1->m_len - (roff + hlen));
35988768458SSam Leffler 		m1->m_len -= hlen;
36088768458SSam Leffler 		m->m_pkthdr.len -= hlen;
36188768458SSam Leffler 	}
36288768458SSam Leffler 	return (0);
36388768458SSam Leffler }
36488768458SSam Leffler 
36588768458SSam Leffler /*
36688768458SSam Leffler  * Diagnostic routine to check mbuf alignment as required by the
36788768458SSam Leffler  * crypto device drivers (that use DMA).
36888768458SSam Leffler  */
36988768458SSam Leffler void
37088768458SSam Leffler m_checkalignment(const char* where, struct mbuf *m0, int off, int len)
37188768458SSam Leffler {
37288768458SSam Leffler 	int roff;
37388768458SSam Leffler 	struct mbuf *m = m_getptr(m0, off, &roff);
37488768458SSam Leffler 	caddr_t addr;
37588768458SSam Leffler 
37688768458SSam Leffler 	if (m == NULL)
37788768458SSam Leffler 		return;
37888768458SSam Leffler 	printf("%s (off %u len %u): ", where, off, len);
37988768458SSam Leffler 	addr = mtod(m, caddr_t) + roff;
38088768458SSam Leffler 	do {
38188768458SSam Leffler 		int mlen;
38288768458SSam Leffler 
38388768458SSam Leffler 		if (((uintptr_t) addr) & 3) {
38488768458SSam Leffler 			printf("addr misaligned %p,", addr);
38588768458SSam Leffler 			break;
38688768458SSam Leffler 		}
38788768458SSam Leffler 		mlen = m->m_len;
38888768458SSam Leffler 		if (mlen > len)
38988768458SSam Leffler 			mlen = len;
39088768458SSam Leffler 		len -= mlen;
39188768458SSam Leffler 		if (len && (mlen & 3)) {
39288768458SSam Leffler 			printf("len mismatch %u,", mlen);
39388768458SSam Leffler 			break;
39488768458SSam Leffler 		}
39588768458SSam Leffler 		m = m->m_next;
39688768458SSam Leffler 		addr = m ? mtod(m, caddr_t) : NULL;
39788768458SSam Leffler 	} while (m && len > 0);
39888768458SSam Leffler 	for (m = m0; m; m = m->m_next)
39988768458SSam Leffler 		printf(" [%p:%u]", mtod(m, caddr_t), m->m_len);
40088768458SSam Leffler 	printf("\n");
40188768458SSam Leffler }
402