xref: /freebsd/sys/kern/uipc_mbuf.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_mac.h"
36 #include "opt_param.h"
37 #include "opt_mbuf_stress_test.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/sysctl.h>
47 #include <sys/domain.h>
48 #include <sys/protosw.h>
49 #include <sys/uio.h>
50 
51 #include <security/mac/mac_framework.h>
52 
53 int	max_linkhdr;
54 int	max_protohdr;
55 int	max_hdr;
56 int	max_datalen;
57 #ifdef MBUF_STRESS_TEST
58 int	m_defragpackets;
59 int	m_defragbytes;
60 int	m_defraguseless;
61 int	m_defragfailure;
62 int	m_defragrandomfailures;
63 #endif
64 
65 /*
66  * sysctl(8) exported objects
67  */
68 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
69 	   &max_linkhdr, 0, "Size of largest link layer header");
70 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
71 	   &max_protohdr, 0, "Size of largest protocol layer header");
72 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
73 	   &max_hdr, 0, "Size of largest link plus protocol header");
74 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
75 	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
76 #ifdef MBUF_STRESS_TEST
77 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
78 	   &m_defragpackets, 0, "");
79 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
80 	   &m_defragbytes, 0, "");
81 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
82 	   &m_defraguseless, 0, "");
83 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
84 	   &m_defragfailure, 0, "");
85 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
86 	   &m_defragrandomfailures, 0, "");
87 #endif
88 
89 /*
90  * Allocate a given length worth of mbufs and/or clusters (whatever fits
91  * best) and return a pointer to the top of the allocated chain.  If an
92  * existing mbuf chain is provided, then we will append the new chain
93  * to the existing one but still return the top of the newly allocated
94  * chain.
95  */
96 struct mbuf *
97 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
98 {
99 	struct mbuf *mb, *nm = NULL, *mtail = NULL;
100 
101 	KASSERT(len >= 0, ("%s: len is < 0", __func__));
102 
103 	/* Validate flags. */
104 	flags &= (M_PKTHDR | M_EOR);
105 
106 	/* Packet header mbuf must be first in chain. */
107 	if ((flags & M_PKTHDR) && m != NULL)
108 		flags &= ~M_PKTHDR;
109 
110 	/* Loop and append maximum sized mbufs to the chain tail. */
111 	while (len > 0) {
112 		if (len > MCLBYTES)
113 			mb = m_getjcl(how, type, (flags & M_PKTHDR),
114 			    MJUMPAGESIZE);
115 		else if (len >= MINCLSIZE)
116 			mb = m_getcl(how, type, (flags & M_PKTHDR));
117 		else if (flags & M_PKTHDR)
118 			mb = m_gethdr(how, type);
119 		else
120 			mb = m_get(how, type);
121 
122 		/* Fail the whole operation if one mbuf can't be allocated. */
123 		if (mb == NULL) {
124 			if (nm != NULL)
125 				m_freem(nm);
126 			return (NULL);
127 		}
128 
129 		/* Book keeping. */
130 		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
131 			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
132 		if (mtail != NULL)
133 			mtail->m_next = mb;
134 		else
135 			nm = mb;
136 		mtail = mb;
137 		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
138 	}
139 	if (flags & M_EOR)
140 		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
141 
142 	/* If mbuf was supplied, append new chain to the end of it. */
143 	if (m != NULL) {
144 		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
145 			;
146 		mtail->m_next = nm;
147 		mtail->m_flags &= ~M_EOR;
148 	} else
149 		m = nm;
150 
151 	return (m);
152 }
153 
154 /*
155  * Free an entire chain of mbufs and associated external buffers, if
156  * applicable.
157  */
158 void
159 m_freem(struct mbuf *mb)
160 {
161 
162 	while (mb != NULL)
163 		mb = m_free(mb);
164 }
165 
166 /*-
167  * Configure a provided mbuf to refer to the provided external storage
168  * buffer and setup a reference count for said buffer.  If the setting
169  * up of the reference count fails, the M_EXT bit will not be set.  If
170  * successfull, the M_EXT bit is set in the mbuf's flags.
171  *
172  * Arguments:
173  *    mb     The existing mbuf to which to attach the provided buffer.
174  *    buf    The address of the provided external storage buffer.
175  *    size   The size of the provided buffer.
176  *    freef  A pointer to a routine that is responsible for freeing the
177  *           provided external storage buffer.
178  *    args   A pointer to an argument structure (of any type) to be passed
179  *           to the provided freef routine (may be NULL).
180  *    flags  Any other flags to be passed to the provided mbuf.
181  *    type   The type that the external storage buffer should be
182  *           labeled with.
183  *
184  * Returns:
185  *    Nothing.
186  */
187 void
188 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
189     void (*freef)(void *, void *), void *args, int flags, int type)
190 {
191 	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
192 
193 	if (type != EXT_EXTREF)
194 		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
195 	if (mb->m_ext.ref_cnt != NULL) {
196 		*(mb->m_ext.ref_cnt) = 1;
197 		mb->m_flags |= (M_EXT | flags);
198 		mb->m_ext.ext_buf = buf;
199 		mb->m_data = mb->m_ext.ext_buf;
200 		mb->m_ext.ext_size = size;
201 		mb->m_ext.ext_free = freef;
202 		mb->m_ext.ext_args = args;
203 		mb->m_ext.ext_type = type;
204         }
205 }
206 
207 /*
208  * Non-directly-exported function to clean up after mbufs with M_EXT
209  * storage attached to them if the reference count hits 1.
210  */
211 void
212 mb_free_ext(struct mbuf *m)
213 {
214 	int skipmbuf;
215 
216 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
217 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
218 
219 
220 	/*
221 	 * check if the header is embedded in the cluster
222 	 */
223 	skipmbuf = (m->m_flags & M_NOFREE);
224 
225 	/* Free attached storage if this mbuf is the only reference to it. */
226 	if (*(m->m_ext.ref_cnt) == 1 ||
227 	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
228 		switch (m->m_ext.ext_type) {
229 		case EXT_PACKET:	/* The packet zone is special. */
230 			if (*(m->m_ext.ref_cnt) == 0)
231 				*(m->m_ext.ref_cnt) = 1;
232 			uma_zfree(zone_pack, m);
233 			return;		/* Job done. */
234 		case EXT_CLUSTER:
235 			uma_zfree(zone_clust, m->m_ext.ext_buf);
236 			break;
237 		case EXT_JUMBOP:
238 			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
239 			break;
240 		case EXT_JUMBO9:
241 			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
242 			break;
243 		case EXT_JUMBO16:
244 			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
245 			break;
246 		case EXT_SFBUF:
247 		case EXT_NET_DRV:
248 		case EXT_MOD_TYPE:
249 		case EXT_DISPOSABLE:
250 			*(m->m_ext.ref_cnt) = 0;
251 			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
252 				m->m_ext.ref_cnt));
253 			/* FALLTHROUGH */
254 		case EXT_EXTREF:
255 			KASSERT(m->m_ext.ext_free != NULL,
256 				("%s: ext_free not set", __func__));
257 			(*(m->m_ext.ext_free))(m->m_ext.ext_buf,
258 			    m->m_ext.ext_args);
259 			break;
260 		default:
261 			KASSERT(m->m_ext.ext_type == 0,
262 				("%s: unknown ext_type", __func__));
263 		}
264 	}
265 	if (skipmbuf)
266 		return;
267 
268 	/*
269 	 * Free this mbuf back to the mbuf zone with all m_ext
270 	 * information purged.
271 	 */
272 	m->m_ext.ext_buf = NULL;
273 	m->m_ext.ext_free = NULL;
274 	m->m_ext.ext_args = NULL;
275 	m->m_ext.ref_cnt = NULL;
276 	m->m_ext.ext_size = 0;
277 	m->m_ext.ext_type = 0;
278 	m->m_flags &= ~M_EXT;
279 	uma_zfree(zone_mbuf, m);
280 }
281 
282 /*
283  * Attach the the cluster from *m to *n, set up m_ext in *n
284  * and bump the refcount of the cluster.
285  */
286 static void
287 mb_dupcl(struct mbuf *n, struct mbuf *m)
288 {
289 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
290 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
291 	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
292 
293 	if (*(m->m_ext.ref_cnt) == 1)
294 		*(m->m_ext.ref_cnt) += 1;
295 	else
296 		atomic_add_int(m->m_ext.ref_cnt, 1);
297 	n->m_ext.ext_buf = m->m_ext.ext_buf;
298 	n->m_ext.ext_free = m->m_ext.ext_free;
299 	n->m_ext.ext_args = m->m_ext.ext_args;
300 	n->m_ext.ext_size = m->m_ext.ext_size;
301 	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
302 	n->m_ext.ext_type = m->m_ext.ext_type;
303 	n->m_flags |= M_EXT;
304 }
305 
306 /*
307  * Clean up mbuf (chain) from any tags and packet headers.
308  * If "all" is set then the first mbuf in the chain will be
309  * cleaned too.
310  */
311 void
312 m_demote(struct mbuf *m0, int all)
313 {
314 	struct mbuf *m;
315 
316 	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
317 		if (m->m_flags & M_PKTHDR) {
318 			m_tag_delete_chain(m, NULL);
319 			m->m_flags &= ~M_PKTHDR;
320 			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
321 		}
322 		if (m->m_type == MT_HEADER)
323 			m->m_type = MT_DATA;
324 		if (m != m0 && m->m_nextpkt != NULL)
325 			m->m_nextpkt = NULL;
326 		m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
327 	}
328 }
329 
330 /*
331  * Sanity checks on mbuf (chain) for use in KASSERT() and general
332  * debugging.
333  * Returns 0 or panics when bad and 1 on all tests passed.
334  * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
335  * blow up later.
336  */
337 int
338 m_sanity(struct mbuf *m0, int sanitize)
339 {
340 	struct mbuf *m;
341 	caddr_t a, b;
342 	int pktlen = 0;
343 
344 #ifdef INVARIANTS
345 #define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
346 #else
347 #define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
348 #endif
349 
350 	for (m = m0; m != NULL; m = m->m_next) {
351 		/*
352 		 * Basic pointer checks.  If any of these fails then some
353 		 * unrelated kernel memory before or after us is trashed.
354 		 * No way to recover from that.
355 		 */
356 		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
357 			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
358 			 (caddr_t)(&m->m_dat)) );
359 		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
360 			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
361 		if ((caddr_t)m->m_data < a)
362 			M_SANITY_ACTION("m_data outside mbuf data range left");
363 		if ((caddr_t)m->m_data > b)
364 			M_SANITY_ACTION("m_data outside mbuf data range right");
365 		if ((caddr_t)m->m_data + m->m_len > b)
366 			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
367 		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
368 			if ((caddr_t)m->m_pkthdr.header < a ||
369 			    (caddr_t)m->m_pkthdr.header > b)
370 				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
371 		}
372 
373 		/* m->m_nextpkt may only be set on first mbuf in chain. */
374 		if (m != m0 && m->m_nextpkt != NULL) {
375 			if (sanitize) {
376 				m_freem(m->m_nextpkt);
377 				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
378 			} else
379 				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
380 		}
381 
382 		/* packet length (not mbuf length!) calculation */
383 		if (m0->m_flags & M_PKTHDR)
384 			pktlen += m->m_len;
385 
386 		/* m_tags may only be attached to first mbuf in chain. */
387 		if (m != m0 && m->m_flags & M_PKTHDR &&
388 		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
389 			if (sanitize) {
390 				m_tag_delete_chain(m, NULL);
391 				/* put in 0xDEADC0DE perhaps? */
392 			} else
393 				M_SANITY_ACTION("m_tags on in-chain mbuf");
394 		}
395 
396 		/* M_PKTHDR may only be set on first mbuf in chain */
397 		if (m != m0 && m->m_flags & M_PKTHDR) {
398 			if (sanitize) {
399 				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
400 				m->m_flags &= ~M_PKTHDR;
401 				/* put in 0xDEADCODE and leave hdr flag in */
402 			} else
403 				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
404 		}
405 	}
406 	m = m0;
407 	if (pktlen && pktlen != m->m_pkthdr.len) {
408 		if (sanitize)
409 			m->m_pkthdr.len = 0;
410 		else
411 			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
412 	}
413 	return 1;
414 
415 #undef	M_SANITY_ACTION
416 }
417 
418 
419 /*
420  * "Move" mbuf pkthdr from "from" to "to".
421  * "from" must have M_PKTHDR set, and "to" must be empty.
422  */
423 void
424 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
425 {
426 
427 #if 0
428 	/* see below for why these are not enabled */
429 	M_ASSERTPKTHDR(to);
430 	/* Note: with MAC, this may not be a good assertion. */
431 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
432 	    ("m_move_pkthdr: to has tags"));
433 #endif
434 #ifdef MAC
435 	/*
436 	 * XXXMAC: It could be this should also occur for non-MAC?
437 	 */
438 	if (to->m_flags & M_PKTHDR)
439 		m_tag_delete_chain(to, NULL);
440 #endif
441 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
442 	if ((to->m_flags & M_EXT) == 0)
443 		to->m_data = to->m_pktdat;
444 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
445 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
446 	from->m_flags &= ~M_PKTHDR;
447 }
448 
449 /*
450  * Duplicate "from"'s mbuf pkthdr in "to".
451  * "from" must have M_PKTHDR set, and "to" must be empty.
452  * In particular, this does a deep copy of the packet tags.
453  */
454 int
455 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
456 {
457 
458 #if 0
459 	/*
460 	 * The mbuf allocator only initializes the pkthdr
461 	 * when the mbuf is allocated with MGETHDR. Many users
462 	 * (e.g. m_copy*, m_prepend) use MGET and then
463 	 * smash the pkthdr as needed causing these
464 	 * assertions to trip.  For now just disable them.
465 	 */
466 	M_ASSERTPKTHDR(to);
467 	/* Note: with MAC, this may not be a good assertion. */
468 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
469 #endif
470 	MBUF_CHECKSLEEP(how);
471 #ifdef MAC
472 	if (to->m_flags & M_PKTHDR)
473 		m_tag_delete_chain(to, NULL);
474 #endif
475 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
476 	if ((to->m_flags & M_EXT) == 0)
477 		to->m_data = to->m_pktdat;
478 	to->m_pkthdr = from->m_pkthdr;
479 	SLIST_INIT(&to->m_pkthdr.tags);
480 	return (m_tag_copy_chain(to, from, MBTOM(how)));
481 }
482 
483 /*
484  * Lesser-used path for M_PREPEND:
485  * allocate new mbuf to prepend to chain,
486  * copy junk along.
487  */
488 struct mbuf *
489 m_prepend(struct mbuf *m, int len, int how)
490 {
491 	struct mbuf *mn;
492 
493 	if (m->m_flags & M_PKTHDR)
494 		MGETHDR(mn, how, m->m_type);
495 	else
496 		MGET(mn, how, m->m_type);
497 	if (mn == NULL) {
498 		m_freem(m);
499 		return (NULL);
500 	}
501 	if (m->m_flags & M_PKTHDR)
502 		M_MOVE_PKTHDR(mn, m);
503 	mn->m_next = m;
504 	m = mn;
505 	if(m->m_flags & M_PKTHDR) {
506 		if (len < MHLEN)
507 			MH_ALIGN(m, len);
508 	} else {
509 		if (len < MLEN)
510 			M_ALIGN(m, len);
511 	}
512 	m->m_len = len;
513 	return (m);
514 }
515 
516 /*
517  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
518  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
519  * The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
520  * Note that the copy is read-only, because clusters are not copied,
521  * only their reference counts are incremented.
522  */
523 struct mbuf *
524 m_copym(struct mbuf *m, int off0, int len, int wait)
525 {
526 	struct mbuf *n, **np;
527 	int off = off0;
528 	struct mbuf *top;
529 	int copyhdr = 0;
530 
531 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
532 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
533 	MBUF_CHECKSLEEP(wait);
534 	if (off == 0 && m->m_flags & M_PKTHDR)
535 		copyhdr = 1;
536 	while (off > 0) {
537 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
538 		if (off < m->m_len)
539 			break;
540 		off -= m->m_len;
541 		m = m->m_next;
542 	}
543 	np = &top;
544 	top = 0;
545 	while (len > 0) {
546 		if (m == NULL) {
547 			KASSERT(len == M_COPYALL,
548 			    ("m_copym, length > size of mbuf chain"));
549 			break;
550 		}
551 		if (copyhdr)
552 			MGETHDR(n, wait, m->m_type);
553 		else
554 			MGET(n, wait, m->m_type);
555 		*np = n;
556 		if (n == NULL)
557 			goto nospace;
558 		if (copyhdr) {
559 			if (!m_dup_pkthdr(n, m, wait))
560 				goto nospace;
561 			if (len == M_COPYALL)
562 				n->m_pkthdr.len -= off0;
563 			else
564 				n->m_pkthdr.len = len;
565 			copyhdr = 0;
566 		}
567 		n->m_len = min(len, m->m_len - off);
568 		if (m->m_flags & M_EXT) {
569 			n->m_data = m->m_data + off;
570 			mb_dupcl(n, m);
571 		} else
572 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
573 			    (u_int)n->m_len);
574 		if (len != M_COPYALL)
575 			len -= n->m_len;
576 		off = 0;
577 		m = m->m_next;
578 		np = &n->m_next;
579 	}
580 	if (top == NULL)
581 		mbstat.m_mcfail++;	/* XXX: No consistency. */
582 
583 	return (top);
584 nospace:
585 	m_freem(top);
586 	mbstat.m_mcfail++;	/* XXX: No consistency. */
587 	return (NULL);
588 }
589 
590 /*
591  * Returns mbuf chain with new head for the prepending case.
592  * Copies from mbuf (chain) n from off for len to mbuf (chain) m
593  * either prepending or appending the data.
594  * The resulting mbuf (chain) m is fully writeable.
595  * m is destination (is made writeable)
596  * n is source, off is offset in source, len is len from offset
597  * dir, 0 append, 1 prepend
598  * how, wait or nowait
599  */
600 
601 static int
602 m_bcopyxxx(void *s, void *t, u_int len)
603 {
604 	bcopy(s, t, (size_t)len);
605 	return 0;
606 }
607 
608 struct mbuf *
609 m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
610     int prep, int how)
611 {
612 	struct mbuf *mm, *x, *z, *prev = NULL;
613 	caddr_t p;
614 	int i, nlen = 0;
615 	caddr_t buf[MLEN];
616 
617 	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
618 	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
619 	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
620 	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
621 
622 	mm = m;
623 	if (!prep) {
624 		while(mm->m_next) {
625 			prev = mm;
626 			mm = mm->m_next;
627 		}
628 	}
629 	for (z = n; z != NULL; z = z->m_next)
630 		nlen += z->m_len;
631 	if (len == M_COPYALL)
632 		len = nlen - off;
633 	if (off + len > nlen || len < 1)
634 		return NULL;
635 
636 	if (!M_WRITABLE(mm)) {
637 		/* XXX: Use proper m_xxx function instead. */
638 		x = m_getcl(how, MT_DATA, mm->m_flags);
639 		if (x == NULL)
640 			return NULL;
641 		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
642 		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
643 		x->m_data = p;
644 		mm->m_next = NULL;
645 		if (mm != m)
646 			prev->m_next = x;
647 		m_free(mm);
648 		mm = x;
649 	}
650 
651 	/*
652 	 * Append/prepend the data.  Allocating mbufs as necessary.
653 	 */
654 	/* Shortcut if enough free space in first/last mbuf. */
655 	if (!prep && M_TRAILINGSPACE(mm) >= len) {
656 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
657 			 mm->m_len);
658 		mm->m_len += len;
659 		mm->m_pkthdr.len += len;
660 		return m;
661 	}
662 	if (prep && M_LEADINGSPACE(mm) >= len) {
663 		mm->m_data = mtod(mm, caddr_t) - len;
664 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
665 		mm->m_len += len;
666 		mm->m_pkthdr.len += len;
667 		return mm;
668 	}
669 
670 	/* Expand first/last mbuf to cluster if possible. */
671 	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
672 		bcopy(mm->m_data, &buf, mm->m_len);
673 		m_clget(mm, how);
674 		if (!(mm->m_flags & M_EXT))
675 			return NULL;
676 		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
677 		mm->m_data = mm->m_ext.ext_buf;
678 		mm->m_pkthdr.header = NULL;
679 	}
680 	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
681 		bcopy(mm->m_data, &buf, mm->m_len);
682 		m_clget(mm, how);
683 		if (!(mm->m_flags & M_EXT))
684 			return NULL;
685 		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
686 		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
687 		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
688 			      mm->m_ext.ext_size - mm->m_len;
689 		mm->m_pkthdr.header = NULL;
690 	}
691 
692 	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
693 	if (!prep && len > M_TRAILINGSPACE(mm)) {
694 		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
695 			return NULL;
696 	}
697 	if (prep && len > M_LEADINGSPACE(mm)) {
698 		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
699 			return NULL;
700 		i = 0;
701 		for (x = z; x != NULL; x = x->m_next) {
702 			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
703 			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
704 			if (!x->m_next)
705 				break;
706 		}
707 		z->m_data += i - len;
708 		m_move_pkthdr(mm, z);
709 		x->m_next = mm;
710 		mm = z;
711 	}
712 
713 	/* Seek to start position in source mbuf. Optimization for long chains. */
714 	while (off > 0) {
715 		if (off < n->m_len)
716 			break;
717 		off -= n->m_len;
718 		n = n->m_next;
719 	}
720 
721 	/* Copy data into target mbuf. */
722 	z = mm;
723 	while (len > 0) {
724 		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
725 		i = M_TRAILINGSPACE(z);
726 		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
727 		z->m_len += i;
728 		/* fixup pkthdr.len if necessary */
729 		if ((prep ? mm : m)->m_flags & M_PKTHDR)
730 			(prep ? mm : m)->m_pkthdr.len += i;
731 		off += i;
732 		len -= i;
733 		z = z->m_next;
734 	}
735 	return (prep ? mm : m);
736 }
737 
738 /*
739  * Copy an entire packet, including header (which must be present).
740  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
741  * Note that the copy is read-only, because clusters are not copied,
742  * only their reference counts are incremented.
743  * Preserve alignment of the first mbuf so if the creator has left
744  * some room at the beginning (e.g. for inserting protocol headers)
745  * the copies still have the room available.
746  */
747 struct mbuf *
748 m_copypacket(struct mbuf *m, int how)
749 {
750 	struct mbuf *top, *n, *o;
751 
752 	MBUF_CHECKSLEEP(how);
753 	MGET(n, how, m->m_type);
754 	top = n;
755 	if (n == NULL)
756 		goto nospace;
757 
758 	if (!m_dup_pkthdr(n, m, how))
759 		goto nospace;
760 	n->m_len = m->m_len;
761 	if (m->m_flags & M_EXT) {
762 		n->m_data = m->m_data;
763 		mb_dupcl(n, m);
764 	} else {
765 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
766 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
767 	}
768 
769 	m = m->m_next;
770 	while (m) {
771 		MGET(o, how, m->m_type);
772 		if (o == NULL)
773 			goto nospace;
774 
775 		n->m_next = o;
776 		n = n->m_next;
777 
778 		n->m_len = m->m_len;
779 		if (m->m_flags & M_EXT) {
780 			n->m_data = m->m_data;
781 			mb_dupcl(n, m);
782 		} else {
783 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
784 		}
785 
786 		m = m->m_next;
787 	}
788 	return top;
789 nospace:
790 	m_freem(top);
791 	mbstat.m_mcfail++;	/* XXX: No consistency. */
792 	return (NULL);
793 }
794 
795 /*
796  * Copy data from an mbuf chain starting "off" bytes from the beginning,
797  * continuing for "len" bytes, into the indicated buffer.
798  */
799 void
800 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
801 {
802 	u_int count;
803 
804 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
805 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
806 	while (off > 0) {
807 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
808 		if (off < m->m_len)
809 			break;
810 		off -= m->m_len;
811 		m = m->m_next;
812 	}
813 	while (len > 0) {
814 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
815 		count = min(m->m_len - off, len);
816 		bcopy(mtod(m, caddr_t) + off, cp, count);
817 		len -= count;
818 		cp += count;
819 		off = 0;
820 		m = m->m_next;
821 	}
822 }
823 
824 /*
825  * Copy a packet header mbuf chain into a completely new chain, including
826  * copying any mbuf clusters.  Use this instead of m_copypacket() when
827  * you need a writable copy of an mbuf chain.
828  */
829 struct mbuf *
830 m_dup(struct mbuf *m, int how)
831 {
832 	struct mbuf **p, *top = NULL;
833 	int remain, moff, nsize;
834 
835 	MBUF_CHECKSLEEP(how);
836 	/* Sanity check */
837 	if (m == NULL)
838 		return (NULL);
839 	M_ASSERTPKTHDR(m);
840 
841 	/* While there's more data, get a new mbuf, tack it on, and fill it */
842 	remain = m->m_pkthdr.len;
843 	moff = 0;
844 	p = &top;
845 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
846 		struct mbuf *n;
847 
848 		/* Get the next new mbuf */
849 		if (remain >= MINCLSIZE) {
850 			n = m_getcl(how, m->m_type, 0);
851 			nsize = MCLBYTES;
852 		} else {
853 			n = m_get(how, m->m_type);
854 			nsize = MLEN;
855 		}
856 		if (n == NULL)
857 			goto nospace;
858 
859 		if (top == NULL) {		/* First one, must be PKTHDR */
860 			if (!m_dup_pkthdr(n, m, how)) {
861 				m_free(n);
862 				goto nospace;
863 			}
864 			if ((n->m_flags & M_EXT) == 0)
865 				nsize = MHLEN;
866 		}
867 		n->m_len = 0;
868 
869 		/* Link it into the new chain */
870 		*p = n;
871 		p = &n->m_next;
872 
873 		/* Copy data from original mbuf(s) into new mbuf */
874 		while (n->m_len < nsize && m != NULL) {
875 			int chunk = min(nsize - n->m_len, m->m_len - moff);
876 
877 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
878 			moff += chunk;
879 			n->m_len += chunk;
880 			remain -= chunk;
881 			if (moff == m->m_len) {
882 				m = m->m_next;
883 				moff = 0;
884 			}
885 		}
886 
887 		/* Check correct total mbuf length */
888 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
889 		    	("%s: bogus m_pkthdr.len", __func__));
890 	}
891 	return (top);
892 
893 nospace:
894 	m_freem(top);
895 	mbstat.m_mcfail++;	/* XXX: No consistency. */
896 	return (NULL);
897 }
898 
899 /*
900  * Concatenate mbuf chain n to m.
901  * Both chains must be of the same type (e.g. MT_DATA).
902  * Any m_pkthdr is not updated.
903  */
904 void
905 m_cat(struct mbuf *m, struct mbuf *n)
906 {
907 	while (m->m_next)
908 		m = m->m_next;
909 	while (n) {
910 		if (m->m_flags & M_EXT ||
911 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
912 			/* just join the two chains */
913 			m->m_next = n;
914 			return;
915 		}
916 		/* splat the data from one into the other */
917 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
918 		    (u_int)n->m_len);
919 		m->m_len += n->m_len;
920 		n = m_free(n);
921 	}
922 }
923 
924 void
925 m_adj(struct mbuf *mp, int req_len)
926 {
927 	int len = req_len;
928 	struct mbuf *m;
929 	int count;
930 
931 	if ((m = mp) == NULL)
932 		return;
933 	if (len >= 0) {
934 		/*
935 		 * Trim from head.
936 		 */
937 		while (m != NULL && len > 0) {
938 			if (m->m_len <= len) {
939 				len -= m->m_len;
940 				m->m_len = 0;
941 				m = m->m_next;
942 			} else {
943 				m->m_len -= len;
944 				m->m_data += len;
945 				len = 0;
946 			}
947 		}
948 		m = mp;
949 		if (mp->m_flags & M_PKTHDR)
950 			m->m_pkthdr.len -= (req_len - len);
951 	} else {
952 		/*
953 		 * Trim from tail.  Scan the mbuf chain,
954 		 * calculating its length and finding the last mbuf.
955 		 * If the adjustment only affects this mbuf, then just
956 		 * adjust and return.  Otherwise, rescan and truncate
957 		 * after the remaining size.
958 		 */
959 		len = -len;
960 		count = 0;
961 		for (;;) {
962 			count += m->m_len;
963 			if (m->m_next == (struct mbuf *)0)
964 				break;
965 			m = m->m_next;
966 		}
967 		if (m->m_len >= len) {
968 			m->m_len -= len;
969 			if (mp->m_flags & M_PKTHDR)
970 				mp->m_pkthdr.len -= len;
971 			return;
972 		}
973 		count -= len;
974 		if (count < 0)
975 			count = 0;
976 		/*
977 		 * Correct length for chain is "count".
978 		 * Find the mbuf with last data, adjust its length,
979 		 * and toss data from remaining mbufs on chain.
980 		 */
981 		m = mp;
982 		if (m->m_flags & M_PKTHDR)
983 			m->m_pkthdr.len = count;
984 		for (; m; m = m->m_next) {
985 			if (m->m_len >= count) {
986 				m->m_len = count;
987 				if (m->m_next != NULL) {
988 					m_freem(m->m_next);
989 					m->m_next = NULL;
990 				}
991 				break;
992 			}
993 			count -= m->m_len;
994 		}
995 	}
996 }
997 
998 /*
999  * Rearange an mbuf chain so that len bytes are contiguous
1000  * and in the data area of an mbuf (so that mtod and dtom
1001  * will work for a structure of size len).  Returns the resulting
1002  * mbuf chain on success, frees it and returns null on failure.
1003  * If there is room, it will add up to max_protohdr-len extra bytes to the
1004  * contiguous region in an attempt to avoid being called next time.
1005  */
1006 struct mbuf *
1007 m_pullup(struct mbuf *n, int len)
1008 {
1009 	struct mbuf *m;
1010 	int count;
1011 	int space;
1012 
1013 	/*
1014 	 * If first mbuf has no cluster, and has room for len bytes
1015 	 * without shifting current data, pullup into it,
1016 	 * otherwise allocate a new mbuf to prepend to the chain.
1017 	 */
1018 	if ((n->m_flags & M_EXT) == 0 &&
1019 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1020 		if (n->m_len >= len)
1021 			return (n);
1022 		m = n;
1023 		n = n->m_next;
1024 		len -= m->m_len;
1025 	} else {
1026 		if (len > MHLEN)
1027 			goto bad;
1028 		MGET(m, M_DONTWAIT, n->m_type);
1029 		if (m == NULL)
1030 			goto bad;
1031 		m->m_len = 0;
1032 		if (n->m_flags & M_PKTHDR)
1033 			M_MOVE_PKTHDR(m, n);
1034 	}
1035 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1036 	do {
1037 		count = min(min(max(len, max_protohdr), space), n->m_len);
1038 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1039 		  (u_int)count);
1040 		len -= count;
1041 		m->m_len += count;
1042 		n->m_len -= count;
1043 		space -= count;
1044 		if (n->m_len)
1045 			n->m_data += count;
1046 		else
1047 			n = m_free(n);
1048 	} while (len > 0 && n);
1049 	if (len > 0) {
1050 		(void) m_free(m);
1051 		goto bad;
1052 	}
1053 	m->m_next = n;
1054 	return (m);
1055 bad:
1056 	m_freem(n);
1057 	mbstat.m_mpfail++;	/* XXX: No consistency. */
1058 	return (NULL);
1059 }
1060 
1061 /*
1062  * Like m_pullup(), except a new mbuf is always allocated, and we allow
1063  * the amount of empty space before the data in the new mbuf to be specified
1064  * (in the event that the caller expects to prepend later).
1065  */
1066 int MSFail;
1067 
1068 struct mbuf *
1069 m_copyup(struct mbuf *n, int len, int dstoff)
1070 {
1071 	struct mbuf *m;
1072 	int count, space;
1073 
1074 	if (len > (MHLEN - dstoff))
1075 		goto bad;
1076 	MGET(m, M_DONTWAIT, n->m_type);
1077 	if (m == NULL)
1078 		goto bad;
1079 	m->m_len = 0;
1080 	if (n->m_flags & M_PKTHDR)
1081 		M_MOVE_PKTHDR(m, n);
1082 	m->m_data += dstoff;
1083 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1084 	do {
1085 		count = min(min(max(len, max_protohdr), space), n->m_len);
1086 		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1087 		    (unsigned)count);
1088 		len -= count;
1089 		m->m_len += count;
1090 		n->m_len -= count;
1091 		space -= count;
1092 		if (n->m_len)
1093 			n->m_data += count;
1094 		else
1095 			n = m_free(n);
1096 	} while (len > 0 && n);
1097 	if (len > 0) {
1098 		(void) m_free(m);
1099 		goto bad;
1100 	}
1101 	m->m_next = n;
1102 	return (m);
1103  bad:
1104 	m_freem(n);
1105 	MSFail++;
1106 	return (NULL);
1107 }
1108 
1109 /*
1110  * Partition an mbuf chain in two pieces, returning the tail --
1111  * all but the first len0 bytes.  In case of failure, it returns NULL and
1112  * attempts to restore the chain to its original state.
1113  *
1114  * Note that the resulting mbufs might be read-only, because the new
1115  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1116  * the "breaking point" happens to lie within a cluster mbuf. Use the
1117  * M_WRITABLE() macro to check for this case.
1118  */
1119 struct mbuf *
1120 m_split(struct mbuf *m0, int len0, int wait)
1121 {
1122 	struct mbuf *m, *n;
1123 	u_int len = len0, remain;
1124 
1125 	MBUF_CHECKSLEEP(wait);
1126 	for (m = m0; m && len > m->m_len; m = m->m_next)
1127 		len -= m->m_len;
1128 	if (m == NULL)
1129 		return (NULL);
1130 	remain = m->m_len - len;
1131 	if (m0->m_flags & M_PKTHDR) {
1132 		MGETHDR(n, wait, m0->m_type);
1133 		if (n == NULL)
1134 			return (NULL);
1135 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1136 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1137 		m0->m_pkthdr.len = len0;
1138 		if (m->m_flags & M_EXT)
1139 			goto extpacket;
1140 		if (remain > MHLEN) {
1141 			/* m can't be the lead packet */
1142 			MH_ALIGN(n, 0);
1143 			n->m_next = m_split(m, len, wait);
1144 			if (n->m_next == NULL) {
1145 				(void) m_free(n);
1146 				return (NULL);
1147 			} else {
1148 				n->m_len = 0;
1149 				return (n);
1150 			}
1151 		} else
1152 			MH_ALIGN(n, remain);
1153 	} else if (remain == 0) {
1154 		n = m->m_next;
1155 		m->m_next = NULL;
1156 		return (n);
1157 	} else {
1158 		MGET(n, wait, m->m_type);
1159 		if (n == NULL)
1160 			return (NULL);
1161 		M_ALIGN(n, remain);
1162 	}
1163 extpacket:
1164 	if (m->m_flags & M_EXT) {
1165 		n->m_data = m->m_data + len;
1166 		mb_dupcl(n, m);
1167 	} else {
1168 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1169 	}
1170 	n->m_len = remain;
1171 	m->m_len = len;
1172 	n->m_next = m->m_next;
1173 	m->m_next = NULL;
1174 	return (n);
1175 }
1176 /*
1177  * Routine to copy from device local memory into mbufs.
1178  * Note that `off' argument is offset into first mbuf of target chain from
1179  * which to begin copying the data to.
1180  */
1181 struct mbuf *
1182 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1183     void (*copy)(char *from, caddr_t to, u_int len))
1184 {
1185 	struct mbuf *m;
1186 	struct mbuf *top = NULL, **mp = &top;
1187 	int len;
1188 
1189 	if (off < 0 || off > MHLEN)
1190 		return (NULL);
1191 
1192 	while (totlen > 0) {
1193 		if (top == NULL) {	/* First one, must be PKTHDR */
1194 			if (totlen + off >= MINCLSIZE) {
1195 				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1196 				len = MCLBYTES;
1197 			} else {
1198 				m = m_gethdr(M_DONTWAIT, MT_DATA);
1199 				len = MHLEN;
1200 
1201 				/* Place initial small packet/header at end of mbuf */
1202 				if (m && totlen + off + max_linkhdr <= MLEN) {
1203 					m->m_data += max_linkhdr;
1204 					len -= max_linkhdr;
1205 				}
1206 			}
1207 			if (m == NULL)
1208 				return NULL;
1209 			m->m_pkthdr.rcvif = ifp;
1210 			m->m_pkthdr.len = totlen;
1211 		} else {
1212 			if (totlen + off >= MINCLSIZE) {
1213 				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1214 				len = MCLBYTES;
1215 			} else {
1216 				m = m_get(M_DONTWAIT, MT_DATA);
1217 				len = MLEN;
1218 			}
1219 			if (m == NULL) {
1220 				m_freem(top);
1221 				return NULL;
1222 			}
1223 		}
1224 		if (off) {
1225 			m->m_data += off;
1226 			len -= off;
1227 			off = 0;
1228 		}
1229 		m->m_len = len = min(totlen, len);
1230 		if (copy)
1231 			copy(buf, mtod(m, caddr_t), (u_int)len);
1232 		else
1233 			bcopy(buf, mtod(m, caddr_t), (u_int)len);
1234 		buf += len;
1235 		*mp = m;
1236 		mp = &m->m_next;
1237 		totlen -= len;
1238 	}
1239 	return (top);
1240 }
1241 
1242 /*
1243  * Copy data from a buffer back into the indicated mbuf chain,
1244  * starting "off" bytes from the beginning, extending the mbuf
1245  * chain if necessary.
1246  */
1247 void
1248 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1249 {
1250 	int mlen;
1251 	struct mbuf *m = m0, *n;
1252 	int totlen = 0;
1253 
1254 	if (m0 == NULL)
1255 		return;
1256 	while (off > (mlen = m->m_len)) {
1257 		off -= mlen;
1258 		totlen += mlen;
1259 		if (m->m_next == NULL) {
1260 			n = m_get(M_DONTWAIT, m->m_type);
1261 			if (n == NULL)
1262 				goto out;
1263 			bzero(mtod(n, caddr_t), MLEN);
1264 			n->m_len = min(MLEN, len + off);
1265 			m->m_next = n;
1266 		}
1267 		m = m->m_next;
1268 	}
1269 	while (len > 0) {
1270 		mlen = min (m->m_len - off, len);
1271 		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1272 		cp += mlen;
1273 		len -= mlen;
1274 		mlen += off;
1275 		off = 0;
1276 		totlen += mlen;
1277 		if (len == 0)
1278 			break;
1279 		if (m->m_next == NULL) {
1280 			n = m_get(M_DONTWAIT, m->m_type);
1281 			if (n == NULL)
1282 				break;
1283 			n->m_len = min(MLEN, len);
1284 			m->m_next = n;
1285 		}
1286 		m = m->m_next;
1287 	}
1288 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1289 		m->m_pkthdr.len = totlen;
1290 }
1291 
1292 /*
1293  * Append the specified data to the indicated mbuf chain,
1294  * Extend the mbuf chain if the new data does not fit in
1295  * existing space.
1296  *
1297  * Return 1 if able to complete the job; otherwise 0.
1298  */
1299 int
1300 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1301 {
1302 	struct mbuf *m, *n;
1303 	int remainder, space;
1304 
1305 	for (m = m0; m->m_next != NULL; m = m->m_next)
1306 		;
1307 	remainder = len;
1308 	space = M_TRAILINGSPACE(m);
1309 	if (space > 0) {
1310 		/*
1311 		 * Copy into available space.
1312 		 */
1313 		if (space > remainder)
1314 			space = remainder;
1315 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1316 		m->m_len += space;
1317 		cp += space, remainder -= space;
1318 	}
1319 	while (remainder > 0) {
1320 		/*
1321 		 * Allocate a new mbuf; could check space
1322 		 * and allocate a cluster instead.
1323 		 */
1324 		n = m_get(M_DONTWAIT, m->m_type);
1325 		if (n == NULL)
1326 			break;
1327 		n->m_len = min(MLEN, remainder);
1328 		bcopy(cp, mtod(n, caddr_t), n->m_len);
1329 		cp += n->m_len, remainder -= n->m_len;
1330 		m->m_next = n;
1331 		m = n;
1332 	}
1333 	if (m0->m_flags & M_PKTHDR)
1334 		m0->m_pkthdr.len += len - remainder;
1335 	return (remainder == 0);
1336 }
1337 
1338 /*
1339  * Apply function f to the data in an mbuf chain starting "off" bytes from
1340  * the beginning, continuing for "len" bytes.
1341  */
1342 int
1343 m_apply(struct mbuf *m, int off, int len,
1344     int (*f)(void *, void *, u_int), void *arg)
1345 {
1346 	u_int count;
1347 	int rval;
1348 
1349 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1350 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1351 	while (off > 0) {
1352 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1353 		if (off < m->m_len)
1354 			break;
1355 		off -= m->m_len;
1356 		m = m->m_next;
1357 	}
1358 	while (len > 0) {
1359 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1360 		count = min(m->m_len - off, len);
1361 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1362 		if (rval)
1363 			return (rval);
1364 		len -= count;
1365 		off = 0;
1366 		m = m->m_next;
1367 	}
1368 	return (0);
1369 }
1370 
1371 /*
1372  * Return a pointer to mbuf/offset of location in mbuf chain.
1373  */
1374 struct mbuf *
1375 m_getptr(struct mbuf *m, int loc, int *off)
1376 {
1377 
1378 	while (loc >= 0) {
1379 		/* Normal end of search. */
1380 		if (m->m_len > loc) {
1381 			*off = loc;
1382 			return (m);
1383 		} else {
1384 			loc -= m->m_len;
1385 			if (m->m_next == NULL) {
1386 				if (loc == 0) {
1387 					/* Point at the end of valid data. */
1388 					*off = m->m_len;
1389 					return (m);
1390 				}
1391 				return (NULL);
1392 			}
1393 			m = m->m_next;
1394 		}
1395 	}
1396 	return (NULL);
1397 }
1398 
1399 void
1400 m_print(const struct mbuf *m, int maxlen)
1401 {
1402 	int len;
1403 	int pdata;
1404 	const struct mbuf *m2;
1405 
1406 	if (m->m_flags & M_PKTHDR)
1407 		len = m->m_pkthdr.len;
1408 	else
1409 		len = -1;
1410 	m2 = m;
1411 	while (m2 != NULL && (len == -1 || len)) {
1412 		pdata = m2->m_len;
1413 		if (maxlen != -1 && pdata > maxlen)
1414 			pdata = maxlen;
1415 		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1416 		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1417 		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1418 		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1419 		if (pdata)
1420 			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1421 		if (len != -1)
1422 			len -= m2->m_len;
1423 		m2 = m2->m_next;
1424 	}
1425 	if (len > 0)
1426 		printf("%d bytes unaccounted for.\n", len);
1427 	return;
1428 }
1429 
1430 u_int
1431 m_fixhdr(struct mbuf *m0)
1432 {
1433 	u_int len;
1434 
1435 	len = m_length(m0, NULL);
1436 	m0->m_pkthdr.len = len;
1437 	return (len);
1438 }
1439 
1440 u_int
1441 m_length(struct mbuf *m0, struct mbuf **last)
1442 {
1443 	struct mbuf *m;
1444 	u_int len;
1445 
1446 	len = 0;
1447 	for (m = m0; m != NULL; m = m->m_next) {
1448 		len += m->m_len;
1449 		if (m->m_next == NULL)
1450 			break;
1451 	}
1452 	if (last != NULL)
1453 		*last = m;
1454 	return (len);
1455 }
1456 
1457 /*
1458  * Defragment a mbuf chain, returning the shortest possible
1459  * chain of mbufs and clusters.  If allocation fails and
1460  * this cannot be completed, NULL will be returned, but
1461  * the passed in chain will be unchanged.  Upon success,
1462  * the original chain will be freed, and the new chain
1463  * will be returned.
1464  *
1465  * If a non-packet header is passed in, the original
1466  * mbuf (chain?) will be returned unharmed.
1467  */
1468 struct mbuf *
1469 m_defrag(struct mbuf *m0, int how)
1470 {
1471 	struct mbuf *m_new = NULL, *m_final = NULL;
1472 	int progress = 0, length;
1473 
1474 	MBUF_CHECKSLEEP(how);
1475 	if (!(m0->m_flags & M_PKTHDR))
1476 		return (m0);
1477 
1478 	m_fixhdr(m0); /* Needed sanity check */
1479 
1480 #ifdef MBUF_STRESS_TEST
1481 	if (m_defragrandomfailures) {
1482 		int temp = arc4random() & 0xff;
1483 		if (temp == 0xba)
1484 			goto nospace;
1485 	}
1486 #endif
1487 
1488 	if (m0->m_pkthdr.len > MHLEN)
1489 		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1490 	else
1491 		m_final = m_gethdr(how, MT_DATA);
1492 
1493 	if (m_final == NULL)
1494 		goto nospace;
1495 
1496 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1497 		goto nospace;
1498 
1499 	m_new = m_final;
1500 
1501 	while (progress < m0->m_pkthdr.len) {
1502 		length = m0->m_pkthdr.len - progress;
1503 		if (length > MCLBYTES)
1504 			length = MCLBYTES;
1505 
1506 		if (m_new == NULL) {
1507 			if (length > MLEN)
1508 				m_new = m_getcl(how, MT_DATA, 0);
1509 			else
1510 				m_new = m_get(how, MT_DATA);
1511 			if (m_new == NULL)
1512 				goto nospace;
1513 		}
1514 
1515 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1516 		progress += length;
1517 		m_new->m_len = length;
1518 		if (m_new != m_final)
1519 			m_cat(m_final, m_new);
1520 		m_new = NULL;
1521 	}
1522 #ifdef MBUF_STRESS_TEST
1523 	if (m0->m_next == NULL)
1524 		m_defraguseless++;
1525 #endif
1526 	m_freem(m0);
1527 	m0 = m_final;
1528 #ifdef MBUF_STRESS_TEST
1529 	m_defragpackets++;
1530 	m_defragbytes += m0->m_pkthdr.len;
1531 #endif
1532 	return (m0);
1533 nospace:
1534 #ifdef MBUF_STRESS_TEST
1535 	m_defragfailure++;
1536 #endif
1537 	if (m_final)
1538 		m_freem(m_final);
1539 	return (NULL);
1540 }
1541 
1542 #ifdef MBUF_STRESS_TEST
1543 
1544 /*
1545  * Fragment an mbuf chain.  There's no reason you'd ever want to do
1546  * this in normal usage, but it's great for stress testing various
1547  * mbuf consumers.
1548  *
1549  * If fragmentation is not possible, the original chain will be
1550  * returned.
1551  *
1552  * Possible length values:
1553  * 0	 no fragmentation will occur
1554  * > 0	each fragment will be of the specified length
1555  * -1	each fragment will be the same random value in length
1556  * -2	each fragment's length will be entirely random
1557  * (Random values range from 1 to 256)
1558  */
1559 struct mbuf *
1560 m_fragment(struct mbuf *m0, int how, int length)
1561 {
1562 	struct mbuf *m_new = NULL, *m_final = NULL;
1563 	int progress = 0;
1564 
1565 	if (!(m0->m_flags & M_PKTHDR))
1566 		return (m0);
1567 
1568 	if ((length == 0) || (length < -2))
1569 		return (m0);
1570 
1571 	m_fixhdr(m0); /* Needed sanity check */
1572 
1573 	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1574 
1575 	if (m_final == NULL)
1576 		goto nospace;
1577 
1578 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1579 		goto nospace;
1580 
1581 	m_new = m_final;
1582 
1583 	if (length == -1)
1584 		length = 1 + (arc4random() & 255);
1585 
1586 	while (progress < m0->m_pkthdr.len) {
1587 		int fraglen;
1588 
1589 		if (length > 0)
1590 			fraglen = length;
1591 		else
1592 			fraglen = 1 + (arc4random() & 255);
1593 		if (fraglen > m0->m_pkthdr.len - progress)
1594 			fraglen = m0->m_pkthdr.len - progress;
1595 
1596 		if (fraglen > MCLBYTES)
1597 			fraglen = MCLBYTES;
1598 
1599 		if (m_new == NULL) {
1600 			m_new = m_getcl(how, MT_DATA, 0);
1601 			if (m_new == NULL)
1602 				goto nospace;
1603 		}
1604 
1605 		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1606 		progress += fraglen;
1607 		m_new->m_len = fraglen;
1608 		if (m_new != m_final)
1609 			m_cat(m_final, m_new);
1610 		m_new = NULL;
1611 	}
1612 	m_freem(m0);
1613 	m0 = m_final;
1614 	return (m0);
1615 nospace:
1616 	if (m_final)
1617 		m_freem(m_final);
1618 	/* Return the original chain on failure */
1619 	return (m0);
1620 }
1621 
1622 #endif
1623 
1624 /*
1625  * Copy the contents of uio into a properly sized mbuf chain.
1626  */
1627 struct mbuf *
1628 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1629 {
1630 	struct mbuf *m, *mb;
1631 	int error, length, total;
1632 	int progress = 0;
1633 
1634 	/*
1635 	 * len can be zero or an arbitrary large value bound by
1636 	 * the total data supplied by the uio.
1637 	 */
1638 	if (len > 0)
1639 		total = min(uio->uio_resid, len);
1640 	else
1641 		total = uio->uio_resid;
1642 
1643 	/*
1644 	 * The smallest unit returned by m_getm2() is a single mbuf
1645 	 * with pkthdr.  We can't align past it.  Align align itself.
1646 	 */
1647 	if (align)
1648 		align &= ~(sizeof(long) - 1);
1649 	if (align >= MHLEN)
1650 		return (NULL);
1651 
1652 	/*
1653 	 * Give us the full allocation or nothing.
1654 	 * If len is zero return the smallest empty mbuf.
1655 	 */
1656 	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1657 	if (m == NULL)
1658 		return (NULL);
1659 	m->m_data += align;
1660 
1661 	/* Fill all mbufs with uio data and update header information. */
1662 	for (mb = m; mb != NULL; mb = mb->m_next) {
1663 		length = min(M_TRAILINGSPACE(mb), total - progress);
1664 
1665 		error = uiomove(mtod(mb, void *), length, uio);
1666 		if (error) {
1667 			m_freem(m);
1668 			return (NULL);
1669 		}
1670 
1671 		mb->m_len = length;
1672 		progress += length;
1673 		if (flags & M_PKTHDR)
1674 			m->m_pkthdr.len += length;
1675 	}
1676 	KASSERT(progress == total, ("%s: progress != total", __func__));
1677 
1678 	return (m);
1679 }
1680 
1681 /*
1682  * Set the m_data pointer of a newly-allocated mbuf
1683  * to place an object of the specified size at the
1684  * end of the mbuf, longword aligned.
1685  */
1686 void
1687 m_align(struct mbuf *m, int len)
1688 {
1689 	int adjust;
1690 
1691 	if (m->m_flags & M_EXT)
1692 		adjust = m->m_ext.ext_size - len;
1693 	else if (m->m_flags & M_PKTHDR)
1694 		adjust = MHLEN - len;
1695 	else
1696 		adjust = MLEN - len;
1697 	m->m_data += adjust &~ (sizeof(long)-1);
1698 }
1699 
1700 /*
1701  * Create a writable copy of the mbuf chain.  While doing this
1702  * we compact the chain with a goal of producing a chain with
1703  * at most two mbufs.  The second mbuf in this chain is likely
1704  * to be a cluster.  The primary purpose of this work is to create
1705  * a writable packet for encryption, compression, etc.  The
1706  * secondary goal is to linearize the data so the data can be
1707  * passed to crypto hardware in the most efficient manner possible.
1708  */
1709 struct mbuf *
1710 m_unshare(struct mbuf *m0, int how)
1711 {
1712 	struct mbuf *m, *mprev;
1713 	struct mbuf *n, *mfirst, *mlast;
1714 	int len, off;
1715 
1716 	mprev = NULL;
1717 	for (m = m0; m != NULL; m = mprev->m_next) {
1718 		/*
1719 		 * Regular mbufs are ignored unless there's a cluster
1720 		 * in front of it that we can use to coalesce.  We do
1721 		 * the latter mainly so later clusters can be coalesced
1722 		 * also w/o having to handle them specially (i.e. convert
1723 		 * mbuf+cluster -> cluster).  This optimization is heavily
1724 		 * influenced by the assumption that we're running over
1725 		 * Ethernet where MCLBYTES is large enough that the max
1726 		 * packet size will permit lots of coalescing into a
1727 		 * single cluster.  This in turn permits efficient
1728 		 * crypto operations, especially when using hardware.
1729 		 */
1730 		if ((m->m_flags & M_EXT) == 0) {
1731 			if (mprev && (mprev->m_flags & M_EXT) &&
1732 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1733 				/* XXX: this ignores mbuf types */
1734 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1735 				       mtod(m, caddr_t), m->m_len);
1736 				mprev->m_len += m->m_len;
1737 				mprev->m_next = m->m_next;	/* unlink from chain */
1738 				m_free(m);			/* reclaim mbuf */
1739 #if 0
1740 				newipsecstat.ips_mbcoalesced++;
1741 #endif
1742 			} else {
1743 				mprev = m;
1744 			}
1745 			continue;
1746 		}
1747 		/*
1748 		 * Writable mbufs are left alone (for now).
1749 		 */
1750 		if (M_WRITABLE(m)) {
1751 			mprev = m;
1752 			continue;
1753 		}
1754 
1755 		/*
1756 		 * Not writable, replace with a copy or coalesce with
1757 		 * the previous mbuf if possible (since we have to copy
1758 		 * it anyway, we try to reduce the number of mbufs and
1759 		 * clusters so that future work is easier).
1760 		 */
1761 		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1762 		/* NB: we only coalesce into a cluster or larger */
1763 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1764 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1765 			/* XXX: this ignores mbuf types */
1766 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1767 			       mtod(m, caddr_t), m->m_len);
1768 			mprev->m_len += m->m_len;
1769 			mprev->m_next = m->m_next;	/* unlink from chain */
1770 			m_free(m);			/* reclaim mbuf */
1771 #if 0
1772 			newipsecstat.ips_clcoalesced++;
1773 #endif
1774 			continue;
1775 		}
1776 
1777 		/*
1778 		 * Allocate new space to hold the copy...
1779 		 */
1780 		/* XXX why can M_PKTHDR be set past the first mbuf? */
1781 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1782 			/*
1783 			 * NB: if a packet header is present we must
1784 			 * allocate the mbuf separately from any cluster
1785 			 * because M_MOVE_PKTHDR will smash the data
1786 			 * pointer and drop the M_EXT marker.
1787 			 */
1788 			MGETHDR(n, how, m->m_type);
1789 			if (n == NULL) {
1790 				m_freem(m0);
1791 				return (NULL);
1792 			}
1793 			M_MOVE_PKTHDR(n, m);
1794 			MCLGET(n, how);
1795 			if ((n->m_flags & M_EXT) == 0) {
1796 				m_free(n);
1797 				m_freem(m0);
1798 				return (NULL);
1799 			}
1800 		} else {
1801 			n = m_getcl(how, m->m_type, m->m_flags);
1802 			if (n == NULL) {
1803 				m_freem(m0);
1804 				return (NULL);
1805 			}
1806 		}
1807 		/*
1808 		 * ... and copy the data.  We deal with jumbo mbufs
1809 		 * (i.e. m_len > MCLBYTES) by splitting them into
1810 		 * clusters.  We could just malloc a buffer and make
1811 		 * it external but too many device drivers don't know
1812 		 * how to break up the non-contiguous memory when
1813 		 * doing DMA.
1814 		 */
1815 		len = m->m_len;
1816 		off = 0;
1817 		mfirst = n;
1818 		mlast = NULL;
1819 		for (;;) {
1820 			int cc = min(len, MCLBYTES);
1821 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1822 			n->m_len = cc;
1823 			if (mlast != NULL)
1824 				mlast->m_next = n;
1825 			mlast = n;
1826 #if 0
1827 			newipsecstat.ips_clcopied++;
1828 #endif
1829 
1830 			len -= cc;
1831 			if (len <= 0)
1832 				break;
1833 			off += cc;
1834 
1835 			n = m_getcl(how, m->m_type, m->m_flags);
1836 			if (n == NULL) {
1837 				m_freem(mfirst);
1838 				m_freem(m0);
1839 				return (NULL);
1840 			}
1841 		}
1842 		n->m_next = m->m_next;
1843 		if (mprev == NULL)
1844 			m0 = mfirst;		/* new head of chain */
1845 		else
1846 			mprev->m_next = mfirst;	/* replace old mbuf */
1847 		m_free(m);			/* release old mbuf */
1848 		mprev = mfirst;
1849 	}
1850 	return (m0);
1851 }
1852