xref: /freebsd/sys/kern/uipc_mbuf.c (revision ea906c4152774dff300bb26fbfc1e4188351c89a)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_mac.h"
36 #include "opt_param.h"
37 #include "opt_mbuf_stress_test.h"
38 #include "opt_mbuf_profiling.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/sysctl.h>
48 #include <sys/domain.h>
49 #include <sys/protosw.h>
50 #include <sys/uio.h>
51 
52 #include <security/mac/mac_framework.h>
53 
54 int	max_linkhdr;
55 int	max_protohdr;
56 int	max_hdr;
57 int	max_datalen;
58 #ifdef MBUF_STRESS_TEST
59 int	m_defragpackets;
60 int	m_defragbytes;
61 int	m_defraguseless;
62 int	m_defragfailure;
63 int	m_defragrandomfailures;
64 #endif
65 
66 /*
67  * sysctl(8) exported objects
68  */
69 SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RD,
70 	   &max_linkhdr, 0, "Size of largest link layer header");
71 SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RD,
72 	   &max_protohdr, 0, "Size of largest protocol layer header");
73 SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RD,
74 	   &max_hdr, 0, "Size of largest link plus protocol header");
75 SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RD,
76 	   &max_datalen, 0, "Minimum space left in mbuf after max_hdr");
77 #ifdef MBUF_STRESS_TEST
78 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragpackets, CTLFLAG_RD,
79 	   &m_defragpackets, 0, "");
80 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragbytes, CTLFLAG_RD,
81 	   &m_defragbytes, 0, "");
82 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defraguseless, CTLFLAG_RD,
83 	   &m_defraguseless, 0, "");
84 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
85 	   &m_defragfailure, 0, "");
86 SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
87 	   &m_defragrandomfailures, 0, "");
88 #endif
89 
90 /*
91  * Allocate a given length worth of mbufs and/or clusters (whatever fits
92  * best) and return a pointer to the top of the allocated chain.  If an
93  * existing mbuf chain is provided, then we will append the new chain
94  * to the existing one but still return the top of the newly allocated
95  * chain.
96  */
97 struct mbuf *
98 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
99 {
100 	struct mbuf *mb, *nm = NULL, *mtail = NULL;
101 
102 	KASSERT(len >= 0, ("%s: len is < 0", __func__));
103 
104 	/* Validate flags. */
105 	flags &= (M_PKTHDR | M_EOR);
106 
107 	/* Packet header mbuf must be first in chain. */
108 	if ((flags & M_PKTHDR) && m != NULL)
109 		flags &= ~M_PKTHDR;
110 
111 	/* Loop and append maximum sized mbufs to the chain tail. */
112 	while (len > 0) {
113 		if (len > MCLBYTES)
114 			mb = m_getjcl(how, type, (flags & M_PKTHDR),
115 			    MJUMPAGESIZE);
116 		else if (len >= MINCLSIZE)
117 			mb = m_getcl(how, type, (flags & M_PKTHDR));
118 		else if (flags & M_PKTHDR)
119 			mb = m_gethdr(how, type);
120 		else
121 			mb = m_get(how, type);
122 
123 		/* Fail the whole operation if one mbuf can't be allocated. */
124 		if (mb == NULL) {
125 			if (nm != NULL)
126 				m_freem(nm);
127 			return (NULL);
128 		}
129 
130 		/* Book keeping. */
131 		len -= (mb->m_flags & M_EXT) ? mb->m_ext.ext_size :
132 			((mb->m_flags & M_PKTHDR) ? MHLEN : MLEN);
133 		if (mtail != NULL)
134 			mtail->m_next = mb;
135 		else
136 			nm = mb;
137 		mtail = mb;
138 		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
139 	}
140 	if (flags & M_EOR)
141 		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
142 
143 	/* If mbuf was supplied, append new chain to the end of it. */
144 	if (m != NULL) {
145 		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
146 			;
147 		mtail->m_next = nm;
148 		mtail->m_flags &= ~M_EOR;
149 	} else
150 		m = nm;
151 
152 	return (m);
153 }
154 
155 /*
156  * Free an entire chain of mbufs and associated external buffers, if
157  * applicable.
158  */
159 void
160 m_freem(struct mbuf *mb)
161 {
162 
163 	while (mb != NULL)
164 		mb = m_free(mb);
165 }
166 
167 /*-
168  * Configure a provided mbuf to refer to the provided external storage
169  * buffer and setup a reference count for said buffer.  If the setting
170  * up of the reference count fails, the M_EXT bit will not be set.  If
171  * successfull, the M_EXT bit is set in the mbuf's flags.
172  *
173  * Arguments:
174  *    mb     The existing mbuf to which to attach the provided buffer.
175  *    buf    The address of the provided external storage buffer.
176  *    size   The size of the provided buffer.
177  *    freef  A pointer to a routine that is responsible for freeing the
178  *           provided external storage buffer.
179  *    args   A pointer to an argument structure (of any type) to be passed
180  *           to the provided freef routine (may be NULL).
181  *    flags  Any other flags to be passed to the provided mbuf.
182  *    type   The type that the external storage buffer should be
183  *           labeled with.
184  *
185  * Returns:
186  *    Nothing.
187  */
188 void
189 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
190     void (*freef)(void *, void *), void *arg1, void *arg2, int flags, int type)
191 {
192 	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
193 
194 	if (type != EXT_EXTREF)
195 		mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
196 	if (mb->m_ext.ref_cnt != NULL) {
197 		*(mb->m_ext.ref_cnt) = 1;
198 		mb->m_flags |= (M_EXT | flags);
199 		mb->m_ext.ext_buf = buf;
200 		mb->m_data = mb->m_ext.ext_buf;
201 		mb->m_ext.ext_size = size;
202 		mb->m_ext.ext_free = freef;
203 		mb->m_ext.ext_arg1 = arg1;
204 		mb->m_ext.ext_arg2 = arg2;
205 		mb->m_ext.ext_type = type;
206         }
207 }
208 
209 /*
210  * Non-directly-exported function to clean up after mbufs with M_EXT
211  * storage attached to them if the reference count hits 1.
212  */
213 void
214 mb_free_ext(struct mbuf *m)
215 {
216 	int skipmbuf;
217 
218 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
219 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
220 
221 
222 	/*
223 	 * check if the header is embedded in the cluster
224 	 */
225 	skipmbuf = (m->m_flags & M_NOFREE);
226 
227 	/* Free attached storage if this mbuf is the only reference to it. */
228 	if (*(m->m_ext.ref_cnt) == 1 ||
229 	    atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
230 		switch (m->m_ext.ext_type) {
231 		case EXT_PACKET:	/* The packet zone is special. */
232 			if (*(m->m_ext.ref_cnt) == 0)
233 				*(m->m_ext.ref_cnt) = 1;
234 			uma_zfree(zone_pack, m);
235 			return;		/* Job done. */
236 		case EXT_CLUSTER:
237 			uma_zfree(zone_clust, m->m_ext.ext_buf);
238 			break;
239 		case EXT_JUMBOP:
240 			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
241 			break;
242 		case EXT_JUMBO9:
243 			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
244 			break;
245 		case EXT_JUMBO16:
246 			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
247 			break;
248 		case EXT_SFBUF:
249 		case EXT_NET_DRV:
250 		case EXT_MOD_TYPE:
251 		case EXT_DISPOSABLE:
252 			*(m->m_ext.ref_cnt) = 0;
253 			uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
254 				m->m_ext.ref_cnt));
255 			/* FALLTHROUGH */
256 		case EXT_EXTREF:
257 			KASSERT(m->m_ext.ext_free != NULL,
258 				("%s: ext_free not set", __func__));
259 			(*(m->m_ext.ext_free))(m->m_ext.ext_arg1,
260 			    m->m_ext.ext_arg2);
261 			break;
262 		default:
263 			KASSERT(m->m_ext.ext_type == 0,
264 				("%s: unknown ext_type", __func__));
265 		}
266 	}
267 	if (skipmbuf)
268 		return;
269 
270 	/*
271 	 * Free this mbuf back to the mbuf zone with all m_ext
272 	 * information purged.
273 	 */
274 	m->m_ext.ext_buf = NULL;
275 	m->m_ext.ext_free = NULL;
276 	m->m_ext.ext_arg1 = NULL;
277 	m->m_ext.ext_arg2 = NULL;
278 	m->m_ext.ref_cnt = NULL;
279 	m->m_ext.ext_size = 0;
280 	m->m_ext.ext_type = 0;
281 	m->m_flags &= ~M_EXT;
282 	uma_zfree(zone_mbuf, m);
283 }
284 
285 /*
286  * Attach the the cluster from *m to *n, set up m_ext in *n
287  * and bump the refcount of the cluster.
288  */
289 static void
290 mb_dupcl(struct mbuf *n, struct mbuf *m)
291 {
292 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
293 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
294 	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
295 
296 	if (*(m->m_ext.ref_cnt) == 1)
297 		*(m->m_ext.ref_cnt) += 1;
298 	else
299 		atomic_add_int(m->m_ext.ref_cnt, 1);
300 	n->m_ext.ext_buf = m->m_ext.ext_buf;
301 	n->m_ext.ext_free = m->m_ext.ext_free;
302 	n->m_ext.ext_arg1 = m->m_ext.ext_arg1;
303 	n->m_ext.ext_arg2 = m->m_ext.ext_arg2;
304 	n->m_ext.ext_size = m->m_ext.ext_size;
305 	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
306 	n->m_ext.ext_type = m->m_ext.ext_type;
307 	n->m_flags |= M_EXT;
308 }
309 
310 /*
311  * Clean up mbuf (chain) from any tags and packet headers.
312  * If "all" is set then the first mbuf in the chain will be
313  * cleaned too.
314  */
315 void
316 m_demote(struct mbuf *m0, int all)
317 {
318 	struct mbuf *m;
319 
320 	for (m = all ? m0 : m0->m_next; m != NULL; m = m->m_next) {
321 		if (m->m_flags & M_PKTHDR) {
322 			m_tag_delete_chain(m, NULL);
323 			m->m_flags &= ~M_PKTHDR;
324 			bzero(&m->m_pkthdr, sizeof(struct pkthdr));
325 		}
326 		if (m->m_type == MT_HEADER)
327 			m->m_type = MT_DATA;
328 		if (m != m0 && m->m_nextpkt != NULL)
329 			m->m_nextpkt = NULL;
330 		m->m_flags = m->m_flags & (M_EXT|M_EOR|M_RDONLY|M_FREELIST);
331 	}
332 }
333 
334 /*
335  * Sanity checks on mbuf (chain) for use in KASSERT() and general
336  * debugging.
337  * Returns 0 or panics when bad and 1 on all tests passed.
338  * Sanitize, 0 to run M_SANITY_ACTION, 1 to garble things so they
339  * blow up later.
340  */
341 int
342 m_sanity(struct mbuf *m0, int sanitize)
343 {
344 	struct mbuf *m;
345 	caddr_t a, b;
346 	int pktlen = 0;
347 
348 #ifdef INVARIANTS
349 #define	M_SANITY_ACTION(s)	panic("mbuf %p: " s, m)
350 #else
351 #define	M_SANITY_ACTION(s)	printf("mbuf %p: " s, m)
352 #endif
353 
354 	for (m = m0; m != NULL; m = m->m_next) {
355 		/*
356 		 * Basic pointer checks.  If any of these fails then some
357 		 * unrelated kernel memory before or after us is trashed.
358 		 * No way to recover from that.
359 		 */
360 		a = ((m->m_flags & M_EXT) ? m->m_ext.ext_buf :
361 			((m->m_flags & M_PKTHDR) ? (caddr_t)(&m->m_pktdat) :
362 			 (caddr_t)(&m->m_dat)) );
363 		b = (caddr_t)(a + (m->m_flags & M_EXT ? m->m_ext.ext_size :
364 			((m->m_flags & M_PKTHDR) ? MHLEN : MLEN)));
365 		if ((caddr_t)m->m_data < a)
366 			M_SANITY_ACTION("m_data outside mbuf data range left");
367 		if ((caddr_t)m->m_data > b)
368 			M_SANITY_ACTION("m_data outside mbuf data range right");
369 		if ((caddr_t)m->m_data + m->m_len > b)
370 			M_SANITY_ACTION("m_data + m_len exeeds mbuf space");
371 		if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.header) {
372 			if ((caddr_t)m->m_pkthdr.header < a ||
373 			    (caddr_t)m->m_pkthdr.header > b)
374 				M_SANITY_ACTION("m_pkthdr.header outside mbuf data range");
375 		}
376 
377 		/* m->m_nextpkt may only be set on first mbuf in chain. */
378 		if (m != m0 && m->m_nextpkt != NULL) {
379 			if (sanitize) {
380 				m_freem(m->m_nextpkt);
381 				m->m_nextpkt = (struct mbuf *)0xDEADC0DE;
382 			} else
383 				M_SANITY_ACTION("m->m_nextpkt on in-chain mbuf");
384 		}
385 
386 		/* packet length (not mbuf length!) calculation */
387 		if (m0->m_flags & M_PKTHDR)
388 			pktlen += m->m_len;
389 
390 		/* m_tags may only be attached to first mbuf in chain. */
391 		if (m != m0 && m->m_flags & M_PKTHDR &&
392 		    !SLIST_EMPTY(&m->m_pkthdr.tags)) {
393 			if (sanitize) {
394 				m_tag_delete_chain(m, NULL);
395 				/* put in 0xDEADC0DE perhaps? */
396 			} else
397 				M_SANITY_ACTION("m_tags on in-chain mbuf");
398 		}
399 
400 		/* M_PKTHDR may only be set on first mbuf in chain */
401 		if (m != m0 && m->m_flags & M_PKTHDR) {
402 			if (sanitize) {
403 				bzero(&m->m_pkthdr, sizeof(m->m_pkthdr));
404 				m->m_flags &= ~M_PKTHDR;
405 				/* put in 0xDEADCODE and leave hdr flag in */
406 			} else
407 				M_SANITY_ACTION("M_PKTHDR on in-chain mbuf");
408 		}
409 	}
410 	m = m0;
411 	if (pktlen && pktlen != m->m_pkthdr.len) {
412 		if (sanitize)
413 			m->m_pkthdr.len = 0;
414 		else
415 			M_SANITY_ACTION("m_pkthdr.len != mbuf chain length");
416 	}
417 	return 1;
418 
419 #undef	M_SANITY_ACTION
420 }
421 
422 
423 /*
424  * "Move" mbuf pkthdr from "from" to "to".
425  * "from" must have M_PKTHDR set, and "to" must be empty.
426  */
427 void
428 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
429 {
430 
431 #if 0
432 	/* see below for why these are not enabled */
433 	M_ASSERTPKTHDR(to);
434 	/* Note: with MAC, this may not be a good assertion. */
435 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags),
436 	    ("m_move_pkthdr: to has tags"));
437 #endif
438 #ifdef MAC
439 	/*
440 	 * XXXMAC: It could be this should also occur for non-MAC?
441 	 */
442 	if (to->m_flags & M_PKTHDR)
443 		m_tag_delete_chain(to, NULL);
444 #endif
445 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
446 	if ((to->m_flags & M_EXT) == 0)
447 		to->m_data = to->m_pktdat;
448 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
449 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
450 	from->m_flags &= ~M_PKTHDR;
451 }
452 
453 /*
454  * Duplicate "from"'s mbuf pkthdr in "to".
455  * "from" must have M_PKTHDR set, and "to" must be empty.
456  * In particular, this does a deep copy of the packet tags.
457  */
458 int
459 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
460 {
461 
462 #if 0
463 	/*
464 	 * The mbuf allocator only initializes the pkthdr
465 	 * when the mbuf is allocated with MGETHDR. Many users
466 	 * (e.g. m_copy*, m_prepend) use MGET and then
467 	 * smash the pkthdr as needed causing these
468 	 * assertions to trip.  For now just disable them.
469 	 */
470 	M_ASSERTPKTHDR(to);
471 	/* Note: with MAC, this may not be a good assertion. */
472 	KASSERT(SLIST_EMPTY(&to->m_pkthdr.tags), ("m_dup_pkthdr: to has tags"));
473 #endif
474 	MBUF_CHECKSLEEP(how);
475 #ifdef MAC
476 	if (to->m_flags & M_PKTHDR)
477 		m_tag_delete_chain(to, NULL);
478 #endif
479 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
480 	if ((to->m_flags & M_EXT) == 0)
481 		to->m_data = to->m_pktdat;
482 	to->m_pkthdr = from->m_pkthdr;
483 	SLIST_INIT(&to->m_pkthdr.tags);
484 	return (m_tag_copy_chain(to, from, MBTOM(how)));
485 }
486 
487 /*
488  * Lesser-used path for M_PREPEND:
489  * allocate new mbuf to prepend to chain,
490  * copy junk along.
491  */
492 struct mbuf *
493 m_prepend(struct mbuf *m, int len, int how)
494 {
495 	struct mbuf *mn;
496 
497 	if (m->m_flags & M_PKTHDR)
498 		MGETHDR(mn, how, m->m_type);
499 	else
500 		MGET(mn, how, m->m_type);
501 	if (mn == NULL) {
502 		m_freem(m);
503 		return (NULL);
504 	}
505 	if (m->m_flags & M_PKTHDR)
506 		M_MOVE_PKTHDR(mn, m);
507 	mn->m_next = m;
508 	m = mn;
509 	if(m->m_flags & M_PKTHDR) {
510 		if (len < MHLEN)
511 			MH_ALIGN(m, len);
512 	} else {
513 		if (len < MLEN)
514 			M_ALIGN(m, len);
515 	}
516 	m->m_len = len;
517 	return (m);
518 }
519 
520 /*
521  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
522  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
523  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
524  * Note that the copy is read-only, because clusters are not copied,
525  * only their reference counts are incremented.
526  */
527 struct mbuf *
528 m_copym(struct mbuf *m, int off0, int len, int wait)
529 {
530 	struct mbuf *n, **np;
531 	int off = off0;
532 	struct mbuf *top;
533 	int copyhdr = 0;
534 
535 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
536 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
537 	MBUF_CHECKSLEEP(wait);
538 	if (off == 0 && m->m_flags & M_PKTHDR)
539 		copyhdr = 1;
540 	while (off > 0) {
541 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
542 		if (off < m->m_len)
543 			break;
544 		off -= m->m_len;
545 		m = m->m_next;
546 	}
547 	np = &top;
548 	top = 0;
549 	while (len > 0) {
550 		if (m == NULL) {
551 			KASSERT(len == M_COPYALL,
552 			    ("m_copym, length > size of mbuf chain"));
553 			break;
554 		}
555 		if (copyhdr)
556 			MGETHDR(n, wait, m->m_type);
557 		else
558 			MGET(n, wait, m->m_type);
559 		*np = n;
560 		if (n == NULL)
561 			goto nospace;
562 		if (copyhdr) {
563 			if (!m_dup_pkthdr(n, m, wait))
564 				goto nospace;
565 			if (len == M_COPYALL)
566 				n->m_pkthdr.len -= off0;
567 			else
568 				n->m_pkthdr.len = len;
569 			copyhdr = 0;
570 		}
571 		n->m_len = min(len, m->m_len - off);
572 		if (m->m_flags & M_EXT) {
573 			n->m_data = m->m_data + off;
574 			mb_dupcl(n, m);
575 		} else
576 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
577 			    (u_int)n->m_len);
578 		if (len != M_COPYALL)
579 			len -= n->m_len;
580 		off = 0;
581 		m = m->m_next;
582 		np = &n->m_next;
583 	}
584 	if (top == NULL)
585 		mbstat.m_mcfail++;	/* XXX: No consistency. */
586 
587 	return (top);
588 nospace:
589 	m_freem(top);
590 	mbstat.m_mcfail++;	/* XXX: No consistency. */
591 	return (NULL);
592 }
593 
594 /*
595  * Returns mbuf chain with new head for the prepending case.
596  * Copies from mbuf (chain) n from off for len to mbuf (chain) m
597  * either prepending or appending the data.
598  * The resulting mbuf (chain) m is fully writeable.
599  * m is destination (is made writeable)
600  * n is source, off is offset in source, len is len from offset
601  * dir, 0 append, 1 prepend
602  * how, wait or nowait
603  */
604 
605 static int
606 m_bcopyxxx(void *s, void *t, u_int len)
607 {
608 	bcopy(s, t, (size_t)len);
609 	return 0;
610 }
611 
612 struct mbuf *
613 m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
614     int prep, int how)
615 {
616 	struct mbuf *mm, *x, *z, *prev = NULL;
617 	caddr_t p;
618 	int i, nlen = 0;
619 	caddr_t buf[MLEN];
620 
621 	KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
622 	KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
623 	KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
624 	KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
625 
626 	mm = m;
627 	if (!prep) {
628 		while(mm->m_next) {
629 			prev = mm;
630 			mm = mm->m_next;
631 		}
632 	}
633 	for (z = n; z != NULL; z = z->m_next)
634 		nlen += z->m_len;
635 	if (len == M_COPYALL)
636 		len = nlen - off;
637 	if (off + len > nlen || len < 1)
638 		return NULL;
639 
640 	if (!M_WRITABLE(mm)) {
641 		/* XXX: Use proper m_xxx function instead. */
642 		x = m_getcl(how, MT_DATA, mm->m_flags);
643 		if (x == NULL)
644 			return NULL;
645 		bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
646 		p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
647 		x->m_data = p;
648 		mm->m_next = NULL;
649 		if (mm != m)
650 			prev->m_next = x;
651 		m_free(mm);
652 		mm = x;
653 	}
654 
655 	/*
656 	 * Append/prepend the data.  Allocating mbufs as necessary.
657 	 */
658 	/* Shortcut if enough free space in first/last mbuf. */
659 	if (!prep && M_TRAILINGSPACE(mm) >= len) {
660 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
661 			 mm->m_len);
662 		mm->m_len += len;
663 		mm->m_pkthdr.len += len;
664 		return m;
665 	}
666 	if (prep && M_LEADINGSPACE(mm) >= len) {
667 		mm->m_data = mtod(mm, caddr_t) - len;
668 		m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t));
669 		mm->m_len += len;
670 		mm->m_pkthdr.len += len;
671 		return mm;
672 	}
673 
674 	/* Expand first/last mbuf to cluster if possible. */
675 	if (!prep && !(mm->m_flags & M_EXT) && len > M_TRAILINGSPACE(mm)) {
676 		bcopy(mm->m_data, &buf, mm->m_len);
677 		m_clget(mm, how);
678 		if (!(mm->m_flags & M_EXT))
679 			return NULL;
680 		bcopy(&buf, mm->m_ext.ext_buf, mm->m_len);
681 		mm->m_data = mm->m_ext.ext_buf;
682 		mm->m_pkthdr.header = NULL;
683 	}
684 	if (prep && !(mm->m_flags & M_EXT) && len > M_LEADINGSPACE(mm)) {
685 		bcopy(mm->m_data, &buf, mm->m_len);
686 		m_clget(mm, how);
687 		if (!(mm->m_flags & M_EXT))
688 			return NULL;
689 		bcopy(&buf, (caddr_t *)mm->m_ext.ext_buf +
690 		       mm->m_ext.ext_size - mm->m_len, mm->m_len);
691 		mm->m_data = (caddr_t)mm->m_ext.ext_buf +
692 			      mm->m_ext.ext_size - mm->m_len;
693 		mm->m_pkthdr.header = NULL;
694 	}
695 
696 	/* Append/prepend as many mbuf (clusters) as necessary to fit len. */
697 	if (!prep && len > M_TRAILINGSPACE(mm)) {
698 		if (!m_getm(mm, len - M_TRAILINGSPACE(mm), how, MT_DATA))
699 			return NULL;
700 	}
701 	if (prep && len > M_LEADINGSPACE(mm)) {
702 		if (!(z = m_getm(NULL, len - M_LEADINGSPACE(mm), how, MT_DATA)))
703 			return NULL;
704 		i = 0;
705 		for (x = z; x != NULL; x = x->m_next) {
706 			i += x->m_flags & M_EXT ? x->m_ext.ext_size :
707 			      (x->m_flags & M_PKTHDR ? MHLEN : MLEN);
708 			if (!x->m_next)
709 				break;
710 		}
711 		z->m_data += i - len;
712 		m_move_pkthdr(mm, z);
713 		x->m_next = mm;
714 		mm = z;
715 	}
716 
717 	/* Seek to start position in source mbuf. Optimization for long chains. */
718 	while (off > 0) {
719 		if (off < n->m_len)
720 			break;
721 		off -= n->m_len;
722 		n = n->m_next;
723 	}
724 
725 	/* Copy data into target mbuf. */
726 	z = mm;
727 	while (len > 0) {
728 		KASSERT(z != NULL, ("m_copymdata, falling off target edge"));
729 		i = M_TRAILINGSPACE(z);
730 		m_apply(n, off, i, m_bcopyxxx, mtod(z, caddr_t) + z->m_len);
731 		z->m_len += i;
732 		/* fixup pkthdr.len if necessary */
733 		if ((prep ? mm : m)->m_flags & M_PKTHDR)
734 			(prep ? mm : m)->m_pkthdr.len += i;
735 		off += i;
736 		len -= i;
737 		z = z->m_next;
738 	}
739 	return (prep ? mm : m);
740 }
741 
742 /*
743  * Copy an entire packet, including header (which must be present).
744  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
745  * Note that the copy is read-only, because clusters are not copied,
746  * only their reference counts are incremented.
747  * Preserve alignment of the first mbuf so if the creator has left
748  * some room at the beginning (e.g. for inserting protocol headers)
749  * the copies still have the room available.
750  */
751 struct mbuf *
752 m_copypacket(struct mbuf *m, int how)
753 {
754 	struct mbuf *top, *n, *o;
755 
756 	MBUF_CHECKSLEEP(how);
757 	MGET(n, how, m->m_type);
758 	top = n;
759 	if (n == NULL)
760 		goto nospace;
761 
762 	if (!m_dup_pkthdr(n, m, how))
763 		goto nospace;
764 	n->m_len = m->m_len;
765 	if (m->m_flags & M_EXT) {
766 		n->m_data = m->m_data;
767 		mb_dupcl(n, m);
768 	} else {
769 		n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
770 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
771 	}
772 
773 	m = m->m_next;
774 	while (m) {
775 		MGET(o, how, m->m_type);
776 		if (o == NULL)
777 			goto nospace;
778 
779 		n->m_next = o;
780 		n = n->m_next;
781 
782 		n->m_len = m->m_len;
783 		if (m->m_flags & M_EXT) {
784 			n->m_data = m->m_data;
785 			mb_dupcl(n, m);
786 		} else {
787 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
788 		}
789 
790 		m = m->m_next;
791 	}
792 	return top;
793 nospace:
794 	m_freem(top);
795 	mbstat.m_mcfail++;	/* XXX: No consistency. */
796 	return (NULL);
797 }
798 
799 /*
800  * Copy data from an mbuf chain starting "off" bytes from the beginning,
801  * continuing for "len" bytes, into the indicated buffer.
802  */
803 void
804 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
805 {
806 	u_int count;
807 
808 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
809 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
810 	while (off > 0) {
811 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
812 		if (off < m->m_len)
813 			break;
814 		off -= m->m_len;
815 		m = m->m_next;
816 	}
817 	while (len > 0) {
818 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
819 		count = min(m->m_len - off, len);
820 		bcopy(mtod(m, caddr_t) + off, cp, count);
821 		len -= count;
822 		cp += count;
823 		off = 0;
824 		m = m->m_next;
825 	}
826 }
827 
828 /*
829  * Copy a packet header mbuf chain into a completely new chain, including
830  * copying any mbuf clusters.  Use this instead of m_copypacket() when
831  * you need a writable copy of an mbuf chain.
832  */
833 struct mbuf *
834 m_dup(struct mbuf *m, int how)
835 {
836 	struct mbuf **p, *top = NULL;
837 	int remain, moff, nsize;
838 
839 	MBUF_CHECKSLEEP(how);
840 	/* Sanity check */
841 	if (m == NULL)
842 		return (NULL);
843 	M_ASSERTPKTHDR(m);
844 
845 	/* While there's more data, get a new mbuf, tack it on, and fill it */
846 	remain = m->m_pkthdr.len;
847 	moff = 0;
848 	p = &top;
849 	while (remain > 0 || top == NULL) {	/* allow m->m_pkthdr.len == 0 */
850 		struct mbuf *n;
851 
852 		/* Get the next new mbuf */
853 		if (remain >= MINCLSIZE) {
854 			n = m_getcl(how, m->m_type, 0);
855 			nsize = MCLBYTES;
856 		} else {
857 			n = m_get(how, m->m_type);
858 			nsize = MLEN;
859 		}
860 		if (n == NULL)
861 			goto nospace;
862 
863 		if (top == NULL) {		/* First one, must be PKTHDR */
864 			if (!m_dup_pkthdr(n, m, how)) {
865 				m_free(n);
866 				goto nospace;
867 			}
868 			if ((n->m_flags & M_EXT) == 0)
869 				nsize = MHLEN;
870 		}
871 		n->m_len = 0;
872 
873 		/* Link it into the new chain */
874 		*p = n;
875 		p = &n->m_next;
876 
877 		/* Copy data from original mbuf(s) into new mbuf */
878 		while (n->m_len < nsize && m != NULL) {
879 			int chunk = min(nsize - n->m_len, m->m_len - moff);
880 
881 			bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
882 			moff += chunk;
883 			n->m_len += chunk;
884 			remain -= chunk;
885 			if (moff == m->m_len) {
886 				m = m->m_next;
887 				moff = 0;
888 			}
889 		}
890 
891 		/* Check correct total mbuf length */
892 		KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
893 		    	("%s: bogus m_pkthdr.len", __func__));
894 	}
895 	return (top);
896 
897 nospace:
898 	m_freem(top);
899 	mbstat.m_mcfail++;	/* XXX: No consistency. */
900 	return (NULL);
901 }
902 
903 /*
904  * Concatenate mbuf chain n to m.
905  * Both chains must be of the same type (e.g. MT_DATA).
906  * Any m_pkthdr is not updated.
907  */
908 void
909 m_cat(struct mbuf *m, struct mbuf *n)
910 {
911 	while (m->m_next)
912 		m = m->m_next;
913 	while (n) {
914 		if (m->m_flags & M_EXT ||
915 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
916 			/* just join the two chains */
917 			m->m_next = n;
918 			return;
919 		}
920 		/* splat the data from one into the other */
921 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
922 		    (u_int)n->m_len);
923 		m->m_len += n->m_len;
924 		n = m_free(n);
925 	}
926 }
927 
928 void
929 m_adj(struct mbuf *mp, int req_len)
930 {
931 	int len = req_len;
932 	struct mbuf *m;
933 	int count;
934 
935 	if ((m = mp) == NULL)
936 		return;
937 	if (len >= 0) {
938 		/*
939 		 * Trim from head.
940 		 */
941 		while (m != NULL && len > 0) {
942 			if (m->m_len <= len) {
943 				len -= m->m_len;
944 				m->m_len = 0;
945 				m = m->m_next;
946 			} else {
947 				m->m_len -= len;
948 				m->m_data += len;
949 				len = 0;
950 			}
951 		}
952 		m = mp;
953 		if (mp->m_flags & M_PKTHDR)
954 			m->m_pkthdr.len -= (req_len - len);
955 	} else {
956 		/*
957 		 * Trim from tail.  Scan the mbuf chain,
958 		 * calculating its length and finding the last mbuf.
959 		 * If the adjustment only affects this mbuf, then just
960 		 * adjust and return.  Otherwise, rescan and truncate
961 		 * after the remaining size.
962 		 */
963 		len = -len;
964 		count = 0;
965 		for (;;) {
966 			count += m->m_len;
967 			if (m->m_next == (struct mbuf *)0)
968 				break;
969 			m = m->m_next;
970 		}
971 		if (m->m_len >= len) {
972 			m->m_len -= len;
973 			if (mp->m_flags & M_PKTHDR)
974 				mp->m_pkthdr.len -= len;
975 			return;
976 		}
977 		count -= len;
978 		if (count < 0)
979 			count = 0;
980 		/*
981 		 * Correct length for chain is "count".
982 		 * Find the mbuf with last data, adjust its length,
983 		 * and toss data from remaining mbufs on chain.
984 		 */
985 		m = mp;
986 		if (m->m_flags & M_PKTHDR)
987 			m->m_pkthdr.len = count;
988 		for (; m; m = m->m_next) {
989 			if (m->m_len >= count) {
990 				m->m_len = count;
991 				if (m->m_next != NULL) {
992 					m_freem(m->m_next);
993 					m->m_next = NULL;
994 				}
995 				break;
996 			}
997 			count -= m->m_len;
998 		}
999 	}
1000 }
1001 
1002 /*
1003  * Rearange an mbuf chain so that len bytes are contiguous
1004  * and in the data area of an mbuf (so that mtod and dtom
1005  * will work for a structure of size len).  Returns the resulting
1006  * mbuf chain on success, frees it and returns null on failure.
1007  * If there is room, it will add up to max_protohdr-len extra bytes to the
1008  * contiguous region in an attempt to avoid being called next time.
1009  */
1010 struct mbuf *
1011 m_pullup(struct mbuf *n, int len)
1012 {
1013 	struct mbuf *m;
1014 	int count;
1015 	int space;
1016 
1017 	/*
1018 	 * If first mbuf has no cluster, and has room for len bytes
1019 	 * without shifting current data, pullup into it,
1020 	 * otherwise allocate a new mbuf to prepend to the chain.
1021 	 */
1022 	if ((n->m_flags & M_EXT) == 0 &&
1023 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
1024 		if (n->m_len >= len)
1025 			return (n);
1026 		m = n;
1027 		n = n->m_next;
1028 		len -= m->m_len;
1029 	} else {
1030 		if (len > MHLEN)
1031 			goto bad;
1032 		MGET(m, M_DONTWAIT, n->m_type);
1033 		if (m == NULL)
1034 			goto bad;
1035 		m->m_len = 0;
1036 		if (n->m_flags & M_PKTHDR)
1037 			M_MOVE_PKTHDR(m, n);
1038 	}
1039 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1040 	do {
1041 		count = min(min(max(len, max_protohdr), space), n->m_len);
1042 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
1043 		  (u_int)count);
1044 		len -= count;
1045 		m->m_len += count;
1046 		n->m_len -= count;
1047 		space -= count;
1048 		if (n->m_len)
1049 			n->m_data += count;
1050 		else
1051 			n = m_free(n);
1052 	} while (len > 0 && n);
1053 	if (len > 0) {
1054 		(void) m_free(m);
1055 		goto bad;
1056 	}
1057 	m->m_next = n;
1058 	return (m);
1059 bad:
1060 	m_freem(n);
1061 	mbstat.m_mpfail++;	/* XXX: No consistency. */
1062 	return (NULL);
1063 }
1064 
1065 /*
1066  * Like m_pullup(), except a new mbuf is always allocated, and we allow
1067  * the amount of empty space before the data in the new mbuf to be specified
1068  * (in the event that the caller expects to prepend later).
1069  */
1070 int MSFail;
1071 
1072 struct mbuf *
1073 m_copyup(struct mbuf *n, int len, int dstoff)
1074 {
1075 	struct mbuf *m;
1076 	int count, space;
1077 
1078 	if (len > (MHLEN - dstoff))
1079 		goto bad;
1080 	MGET(m, M_DONTWAIT, n->m_type);
1081 	if (m == NULL)
1082 		goto bad;
1083 	m->m_len = 0;
1084 	if (n->m_flags & M_PKTHDR)
1085 		M_MOVE_PKTHDR(m, n);
1086 	m->m_data += dstoff;
1087 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
1088 	do {
1089 		count = min(min(max(len, max_protohdr), space), n->m_len);
1090 		memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t),
1091 		    (unsigned)count);
1092 		len -= count;
1093 		m->m_len += count;
1094 		n->m_len -= count;
1095 		space -= count;
1096 		if (n->m_len)
1097 			n->m_data += count;
1098 		else
1099 			n = m_free(n);
1100 	} while (len > 0 && n);
1101 	if (len > 0) {
1102 		(void) m_free(m);
1103 		goto bad;
1104 	}
1105 	m->m_next = n;
1106 	return (m);
1107  bad:
1108 	m_freem(n);
1109 	MSFail++;
1110 	return (NULL);
1111 }
1112 
1113 /*
1114  * Partition an mbuf chain in two pieces, returning the tail --
1115  * all but the first len0 bytes.  In case of failure, it returns NULL and
1116  * attempts to restore the chain to its original state.
1117  *
1118  * Note that the resulting mbufs might be read-only, because the new
1119  * mbuf can end up sharing an mbuf cluster with the original mbuf if
1120  * the "breaking point" happens to lie within a cluster mbuf. Use the
1121  * M_WRITABLE() macro to check for this case.
1122  */
1123 struct mbuf *
1124 m_split(struct mbuf *m0, int len0, int wait)
1125 {
1126 	struct mbuf *m, *n;
1127 	u_int len = len0, remain;
1128 
1129 	MBUF_CHECKSLEEP(wait);
1130 	for (m = m0; m && len > m->m_len; m = m->m_next)
1131 		len -= m->m_len;
1132 	if (m == NULL)
1133 		return (NULL);
1134 	remain = m->m_len - len;
1135 	if (m0->m_flags & M_PKTHDR) {
1136 		MGETHDR(n, wait, m0->m_type);
1137 		if (n == NULL)
1138 			return (NULL);
1139 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
1140 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
1141 		m0->m_pkthdr.len = len0;
1142 		if (m->m_flags & M_EXT)
1143 			goto extpacket;
1144 		if (remain > MHLEN) {
1145 			/* m can't be the lead packet */
1146 			MH_ALIGN(n, 0);
1147 			n->m_next = m_split(m, len, wait);
1148 			if (n->m_next == NULL) {
1149 				(void) m_free(n);
1150 				return (NULL);
1151 			} else {
1152 				n->m_len = 0;
1153 				return (n);
1154 			}
1155 		} else
1156 			MH_ALIGN(n, remain);
1157 	} else if (remain == 0) {
1158 		n = m->m_next;
1159 		m->m_next = NULL;
1160 		return (n);
1161 	} else {
1162 		MGET(n, wait, m->m_type);
1163 		if (n == NULL)
1164 			return (NULL);
1165 		M_ALIGN(n, remain);
1166 	}
1167 extpacket:
1168 	if (m->m_flags & M_EXT) {
1169 		n->m_data = m->m_data + len;
1170 		mb_dupcl(n, m);
1171 	} else {
1172 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1173 	}
1174 	n->m_len = remain;
1175 	m->m_len = len;
1176 	n->m_next = m->m_next;
1177 	m->m_next = NULL;
1178 	return (n);
1179 }
1180 /*
1181  * Routine to copy from device local memory into mbufs.
1182  * Note that `off' argument is offset into first mbuf of target chain from
1183  * which to begin copying the data to.
1184  */
1185 struct mbuf *
1186 m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
1187     void (*copy)(char *from, caddr_t to, u_int len))
1188 {
1189 	struct mbuf *m;
1190 	struct mbuf *top = NULL, **mp = &top;
1191 	int len;
1192 
1193 	if (off < 0 || off > MHLEN)
1194 		return (NULL);
1195 
1196 	while (totlen > 0) {
1197 		if (top == NULL) {	/* First one, must be PKTHDR */
1198 			if (totlen + off >= MINCLSIZE) {
1199 				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1200 				len = MCLBYTES;
1201 			} else {
1202 				m = m_gethdr(M_DONTWAIT, MT_DATA);
1203 				len = MHLEN;
1204 
1205 				/* Place initial small packet/header at end of mbuf */
1206 				if (m && totlen + off + max_linkhdr <= MLEN) {
1207 					m->m_data += max_linkhdr;
1208 					len -= max_linkhdr;
1209 				}
1210 			}
1211 			if (m == NULL)
1212 				return NULL;
1213 			m->m_pkthdr.rcvif = ifp;
1214 			m->m_pkthdr.len = totlen;
1215 		} else {
1216 			if (totlen + off >= MINCLSIZE) {
1217 				m = m_getcl(M_DONTWAIT, MT_DATA, 0);
1218 				len = MCLBYTES;
1219 			} else {
1220 				m = m_get(M_DONTWAIT, MT_DATA);
1221 				len = MLEN;
1222 			}
1223 			if (m == NULL) {
1224 				m_freem(top);
1225 				return NULL;
1226 			}
1227 		}
1228 		if (off) {
1229 			m->m_data += off;
1230 			len -= off;
1231 			off = 0;
1232 		}
1233 		m->m_len = len = min(totlen, len);
1234 		if (copy)
1235 			copy(buf, mtod(m, caddr_t), (u_int)len);
1236 		else
1237 			bcopy(buf, mtod(m, caddr_t), (u_int)len);
1238 		buf += len;
1239 		*mp = m;
1240 		mp = &m->m_next;
1241 		totlen -= len;
1242 	}
1243 	return (top);
1244 }
1245 
1246 /*
1247  * Copy data from a buffer back into the indicated mbuf chain,
1248  * starting "off" bytes from the beginning, extending the mbuf
1249  * chain if necessary.
1250  */
1251 void
1252 m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp)
1253 {
1254 	int mlen;
1255 	struct mbuf *m = m0, *n;
1256 	int totlen = 0;
1257 
1258 	if (m0 == NULL)
1259 		return;
1260 	while (off > (mlen = m->m_len)) {
1261 		off -= mlen;
1262 		totlen += mlen;
1263 		if (m->m_next == NULL) {
1264 			n = m_get(M_DONTWAIT, m->m_type);
1265 			if (n == NULL)
1266 				goto out;
1267 			bzero(mtod(n, caddr_t), MLEN);
1268 			n->m_len = min(MLEN, len + off);
1269 			m->m_next = n;
1270 		}
1271 		m = m->m_next;
1272 	}
1273 	while (len > 0) {
1274 		mlen = min (m->m_len - off, len);
1275 		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
1276 		cp += mlen;
1277 		len -= mlen;
1278 		mlen += off;
1279 		off = 0;
1280 		totlen += mlen;
1281 		if (len == 0)
1282 			break;
1283 		if (m->m_next == NULL) {
1284 			n = m_get(M_DONTWAIT, m->m_type);
1285 			if (n == NULL)
1286 				break;
1287 			n->m_len = min(MLEN, len);
1288 			m->m_next = n;
1289 		}
1290 		m = m->m_next;
1291 	}
1292 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1293 		m->m_pkthdr.len = totlen;
1294 }
1295 
1296 /*
1297  * Append the specified data to the indicated mbuf chain,
1298  * Extend the mbuf chain if the new data does not fit in
1299  * existing space.
1300  *
1301  * Return 1 if able to complete the job; otherwise 0.
1302  */
1303 int
1304 m_append(struct mbuf *m0, int len, c_caddr_t cp)
1305 {
1306 	struct mbuf *m, *n;
1307 	int remainder, space;
1308 
1309 	for (m = m0; m->m_next != NULL; m = m->m_next)
1310 		;
1311 	remainder = len;
1312 	space = M_TRAILINGSPACE(m);
1313 	if (space > 0) {
1314 		/*
1315 		 * Copy into available space.
1316 		 */
1317 		if (space > remainder)
1318 			space = remainder;
1319 		bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1320 		m->m_len += space;
1321 		cp += space, remainder -= space;
1322 	}
1323 	while (remainder > 0) {
1324 		/*
1325 		 * Allocate a new mbuf; could check space
1326 		 * and allocate a cluster instead.
1327 		 */
1328 		n = m_get(M_DONTWAIT, m->m_type);
1329 		if (n == NULL)
1330 			break;
1331 		n->m_len = min(MLEN, remainder);
1332 		bcopy(cp, mtod(n, caddr_t), n->m_len);
1333 		cp += n->m_len, remainder -= n->m_len;
1334 		m->m_next = n;
1335 		m = n;
1336 	}
1337 	if (m0->m_flags & M_PKTHDR)
1338 		m0->m_pkthdr.len += len - remainder;
1339 	return (remainder == 0);
1340 }
1341 
1342 /*
1343  * Apply function f to the data in an mbuf chain starting "off" bytes from
1344  * the beginning, continuing for "len" bytes.
1345  */
1346 int
1347 m_apply(struct mbuf *m, int off, int len,
1348     int (*f)(void *, void *, u_int), void *arg)
1349 {
1350 	u_int count;
1351 	int rval;
1352 
1353 	KASSERT(off >= 0, ("m_apply, negative off %d", off));
1354 	KASSERT(len >= 0, ("m_apply, negative len %d", len));
1355 	while (off > 0) {
1356 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1357 		if (off < m->m_len)
1358 			break;
1359 		off -= m->m_len;
1360 		m = m->m_next;
1361 	}
1362 	while (len > 0) {
1363 		KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
1364 		count = min(m->m_len - off, len);
1365 		rval = (*f)(arg, mtod(m, caddr_t) + off, count);
1366 		if (rval)
1367 			return (rval);
1368 		len -= count;
1369 		off = 0;
1370 		m = m->m_next;
1371 	}
1372 	return (0);
1373 }
1374 
1375 /*
1376  * Return a pointer to mbuf/offset of location in mbuf chain.
1377  */
1378 struct mbuf *
1379 m_getptr(struct mbuf *m, int loc, int *off)
1380 {
1381 
1382 	while (loc >= 0) {
1383 		/* Normal end of search. */
1384 		if (m->m_len > loc) {
1385 			*off = loc;
1386 			return (m);
1387 		} else {
1388 			loc -= m->m_len;
1389 			if (m->m_next == NULL) {
1390 				if (loc == 0) {
1391 					/* Point at the end of valid data. */
1392 					*off = m->m_len;
1393 					return (m);
1394 				}
1395 				return (NULL);
1396 			}
1397 			m = m->m_next;
1398 		}
1399 	}
1400 	return (NULL);
1401 }
1402 
1403 void
1404 m_print(const struct mbuf *m, int maxlen)
1405 {
1406 	int len;
1407 	int pdata;
1408 	const struct mbuf *m2;
1409 
1410 	if (m->m_flags & M_PKTHDR)
1411 		len = m->m_pkthdr.len;
1412 	else
1413 		len = -1;
1414 	m2 = m;
1415 	while (m2 != NULL && (len == -1 || len)) {
1416 		pdata = m2->m_len;
1417 		if (maxlen != -1 && pdata > maxlen)
1418 			pdata = maxlen;
1419 		printf("mbuf: %p len: %d, next: %p, %b%s", m2, m2->m_len,
1420 		    m2->m_next, m2->m_flags, "\20\20freelist\17skipfw"
1421 		    "\11proto5\10proto4\7proto3\6proto2\5proto1\4rdonly"
1422 		    "\3eor\2pkthdr\1ext", pdata ? "" : "\n");
1423 		if (pdata)
1424 			printf(", %*D\n", pdata, (u_char *)m2->m_data, "-");
1425 		if (len != -1)
1426 			len -= m2->m_len;
1427 		m2 = m2->m_next;
1428 	}
1429 	if (len > 0)
1430 		printf("%d bytes unaccounted for.\n", len);
1431 	return;
1432 }
1433 
1434 u_int
1435 m_fixhdr(struct mbuf *m0)
1436 {
1437 	u_int len;
1438 
1439 	len = m_length(m0, NULL);
1440 	m0->m_pkthdr.len = len;
1441 	return (len);
1442 }
1443 
1444 u_int
1445 m_length(struct mbuf *m0, struct mbuf **last)
1446 {
1447 	struct mbuf *m;
1448 	u_int len;
1449 
1450 	len = 0;
1451 	for (m = m0; m != NULL; m = m->m_next) {
1452 		len += m->m_len;
1453 		if (m->m_next == NULL)
1454 			break;
1455 	}
1456 	if (last != NULL)
1457 		*last = m;
1458 	return (len);
1459 }
1460 
1461 /*
1462  * Defragment a mbuf chain, returning the shortest possible
1463  * chain of mbufs and clusters.  If allocation fails and
1464  * this cannot be completed, NULL will be returned, but
1465  * the passed in chain will be unchanged.  Upon success,
1466  * the original chain will be freed, and the new chain
1467  * will be returned.
1468  *
1469  * If a non-packet header is passed in, the original
1470  * mbuf (chain?) will be returned unharmed.
1471  */
1472 struct mbuf *
1473 m_defrag(struct mbuf *m0, int how)
1474 {
1475 	struct mbuf *m_new = NULL, *m_final = NULL;
1476 	int progress = 0, length;
1477 
1478 	MBUF_CHECKSLEEP(how);
1479 	if (!(m0->m_flags & M_PKTHDR))
1480 		return (m0);
1481 
1482 	m_fixhdr(m0); /* Needed sanity check */
1483 
1484 #ifdef MBUF_STRESS_TEST
1485 	if (m_defragrandomfailures) {
1486 		int temp = arc4random() & 0xff;
1487 		if (temp == 0xba)
1488 			goto nospace;
1489 	}
1490 #endif
1491 
1492 	if (m0->m_pkthdr.len > MHLEN)
1493 		m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1494 	else
1495 		m_final = m_gethdr(how, MT_DATA);
1496 
1497 	if (m_final == NULL)
1498 		goto nospace;
1499 
1500 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1501 		goto nospace;
1502 
1503 	m_new = m_final;
1504 
1505 	while (progress < m0->m_pkthdr.len) {
1506 		length = m0->m_pkthdr.len - progress;
1507 		if (length > MCLBYTES)
1508 			length = MCLBYTES;
1509 
1510 		if (m_new == NULL) {
1511 			if (length > MLEN)
1512 				m_new = m_getcl(how, MT_DATA, 0);
1513 			else
1514 				m_new = m_get(how, MT_DATA);
1515 			if (m_new == NULL)
1516 				goto nospace;
1517 		}
1518 
1519 		m_copydata(m0, progress, length, mtod(m_new, caddr_t));
1520 		progress += length;
1521 		m_new->m_len = length;
1522 		if (m_new != m_final)
1523 			m_cat(m_final, m_new);
1524 		m_new = NULL;
1525 	}
1526 #ifdef MBUF_STRESS_TEST
1527 	if (m0->m_next == NULL)
1528 		m_defraguseless++;
1529 #endif
1530 	m_freem(m0);
1531 	m0 = m_final;
1532 #ifdef MBUF_STRESS_TEST
1533 	m_defragpackets++;
1534 	m_defragbytes += m0->m_pkthdr.len;
1535 #endif
1536 	return (m0);
1537 nospace:
1538 #ifdef MBUF_STRESS_TEST
1539 	m_defragfailure++;
1540 #endif
1541 	if (m_final)
1542 		m_freem(m_final);
1543 	return (NULL);
1544 }
1545 
1546 /*
1547  * Defragment an mbuf chain, returning at most maxfrags separate
1548  * mbufs+clusters.  If this is not possible NULL is returned and
1549  * the original mbuf chain is left in it's present (potentially
1550  * modified) state.  We use two techniques: collapsing consecutive
1551  * mbufs and replacing consecutive mbufs by a cluster.
1552  *
1553  * NB: this should really be named m_defrag but that name is taken
1554  */
1555 struct mbuf *
1556 m_collapse(struct mbuf *m0, int how, int maxfrags)
1557 {
1558 	struct mbuf *m, *n, *n2, **prev;
1559 	u_int curfrags;
1560 
1561 	/*
1562 	 * Calculate the current number of frags.
1563 	 */
1564 	curfrags = 0;
1565 	for (m = m0; m != NULL; m = m->m_next)
1566 		curfrags++;
1567 	/*
1568 	 * First, try to collapse mbufs.  Note that we always collapse
1569 	 * towards the front so we don't need to deal with moving the
1570 	 * pkthdr.  This may be suboptimal if the first mbuf has much
1571 	 * less data than the following.
1572 	 */
1573 	m = m0;
1574 again:
1575 	for (;;) {
1576 		n = m->m_next;
1577 		if (n == NULL)
1578 			break;
1579 		if ((m->m_flags & M_RDONLY) == 0 &&
1580 		    n->m_len < M_TRAILINGSPACE(m)) {
1581 			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1582 				n->m_len);
1583 			m->m_len += n->m_len;
1584 			m->m_next = n->m_next;
1585 			m_free(n);
1586 			if (--curfrags <= maxfrags)
1587 				return m0;
1588 		} else
1589 			m = n;
1590 	}
1591 	KASSERT(maxfrags > 1,
1592 		("maxfrags %u, but normal collapse failed", maxfrags));
1593 	/*
1594 	 * Collapse consecutive mbufs to a cluster.
1595 	 */
1596 	prev = &m0->m_next;		/* NB: not the first mbuf */
1597 	while ((n = *prev) != NULL) {
1598 		if ((n2 = n->m_next) != NULL &&
1599 		    n->m_len + n2->m_len < MCLBYTES) {
1600 			m = m_getcl(how, MT_DATA, 0);
1601 			if (m == NULL)
1602 				goto bad;
1603 			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1604 			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1605 				n2->m_len);
1606 			m->m_len = n->m_len + n2->m_len;
1607 			m->m_next = n2->m_next;
1608 			*prev = m;
1609 			m_free(n);
1610 			m_free(n2);
1611 			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1612 				return m0;
1613 			/*
1614 			 * Still not there, try the normal collapse
1615 			 * again before we allocate another cluster.
1616 			 */
1617 			goto again;
1618 		}
1619 		prev = &n->m_next;
1620 	}
1621 	/*
1622 	 * No place where we can collapse to a cluster; punt.
1623 	 * This can occur if, for example, you request 2 frags
1624 	 * but the packet requires that both be clusters (we
1625 	 * never reallocate the first mbuf to avoid moving the
1626 	 * packet header).
1627 	 */
1628 bad:
1629 	return NULL;
1630 }
1631 
1632 #ifdef MBUF_STRESS_TEST
1633 
1634 /*
1635  * Fragment an mbuf chain.  There's no reason you'd ever want to do
1636  * this in normal usage, but it's great for stress testing various
1637  * mbuf consumers.
1638  *
1639  * If fragmentation is not possible, the original chain will be
1640  * returned.
1641  *
1642  * Possible length values:
1643  * 0	 no fragmentation will occur
1644  * > 0	each fragment will be of the specified length
1645  * -1	each fragment will be the same random value in length
1646  * -2	each fragment's length will be entirely random
1647  * (Random values range from 1 to 256)
1648  */
1649 struct mbuf *
1650 m_fragment(struct mbuf *m0, int how, int length)
1651 {
1652 	struct mbuf *m_new = NULL, *m_final = NULL;
1653 	int progress = 0;
1654 
1655 	if (!(m0->m_flags & M_PKTHDR))
1656 		return (m0);
1657 
1658 	if ((length == 0) || (length < -2))
1659 		return (m0);
1660 
1661 	m_fixhdr(m0); /* Needed sanity check */
1662 
1663 	m_final = m_getcl(how, MT_DATA, M_PKTHDR);
1664 
1665 	if (m_final == NULL)
1666 		goto nospace;
1667 
1668 	if (m_dup_pkthdr(m_final, m0, how) == 0)
1669 		goto nospace;
1670 
1671 	m_new = m_final;
1672 
1673 	if (length == -1)
1674 		length = 1 + (arc4random() & 255);
1675 
1676 	while (progress < m0->m_pkthdr.len) {
1677 		int fraglen;
1678 
1679 		if (length > 0)
1680 			fraglen = length;
1681 		else
1682 			fraglen = 1 + (arc4random() & 255);
1683 		if (fraglen > m0->m_pkthdr.len - progress)
1684 			fraglen = m0->m_pkthdr.len - progress;
1685 
1686 		if (fraglen > MCLBYTES)
1687 			fraglen = MCLBYTES;
1688 
1689 		if (m_new == NULL) {
1690 			m_new = m_getcl(how, MT_DATA, 0);
1691 			if (m_new == NULL)
1692 				goto nospace;
1693 		}
1694 
1695 		m_copydata(m0, progress, fraglen, mtod(m_new, caddr_t));
1696 		progress += fraglen;
1697 		m_new->m_len = fraglen;
1698 		if (m_new != m_final)
1699 			m_cat(m_final, m_new);
1700 		m_new = NULL;
1701 	}
1702 	m_freem(m0);
1703 	m0 = m_final;
1704 	return (m0);
1705 nospace:
1706 	if (m_final)
1707 		m_freem(m_final);
1708 	/* Return the original chain on failure */
1709 	return (m0);
1710 }
1711 
1712 #endif
1713 
1714 /*
1715  * Copy the contents of uio into a properly sized mbuf chain.
1716  */
1717 struct mbuf *
1718 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
1719 {
1720 	struct mbuf *m, *mb;
1721 	int error, length, total;
1722 	int progress = 0;
1723 
1724 	/*
1725 	 * len can be zero or an arbitrary large value bound by
1726 	 * the total data supplied by the uio.
1727 	 */
1728 	if (len > 0)
1729 		total = min(uio->uio_resid, len);
1730 	else
1731 		total = uio->uio_resid;
1732 
1733 	/*
1734 	 * The smallest unit returned by m_getm2() is a single mbuf
1735 	 * with pkthdr.  We can't align past it.  Align align itself.
1736 	 */
1737 	if (align)
1738 		align &= ~(sizeof(long) - 1);
1739 	if (align >= MHLEN)
1740 		return (NULL);
1741 
1742 	/*
1743 	 * Give us the full allocation or nothing.
1744 	 * If len is zero return the smallest empty mbuf.
1745 	 */
1746 	m = m_getm2(NULL, max(total + align, 1), how, MT_DATA, flags);
1747 	if (m == NULL)
1748 		return (NULL);
1749 	m->m_data += align;
1750 
1751 	/* Fill all mbufs with uio data and update header information. */
1752 	for (mb = m; mb != NULL; mb = mb->m_next) {
1753 		length = min(M_TRAILINGSPACE(mb), total - progress);
1754 
1755 		error = uiomove(mtod(mb, void *), length, uio);
1756 		if (error) {
1757 			m_freem(m);
1758 			return (NULL);
1759 		}
1760 
1761 		mb->m_len = length;
1762 		progress += length;
1763 		if (flags & M_PKTHDR)
1764 			m->m_pkthdr.len += length;
1765 	}
1766 	KASSERT(progress == total, ("%s: progress != total", __func__));
1767 
1768 	return (m);
1769 }
1770 
1771 /*
1772  * Set the m_data pointer of a newly-allocated mbuf
1773  * to place an object of the specified size at the
1774  * end of the mbuf, longword aligned.
1775  */
1776 void
1777 m_align(struct mbuf *m, int len)
1778 {
1779 	int adjust;
1780 
1781 	if (m->m_flags & M_EXT)
1782 		adjust = m->m_ext.ext_size - len;
1783 	else if (m->m_flags & M_PKTHDR)
1784 		adjust = MHLEN - len;
1785 	else
1786 		adjust = MLEN - len;
1787 	m->m_data += adjust &~ (sizeof(long)-1);
1788 }
1789 
1790 /*
1791  * Create a writable copy of the mbuf chain.  While doing this
1792  * we compact the chain with a goal of producing a chain with
1793  * at most two mbufs.  The second mbuf in this chain is likely
1794  * to be a cluster.  The primary purpose of this work is to create
1795  * a writable packet for encryption, compression, etc.  The
1796  * secondary goal is to linearize the data so the data can be
1797  * passed to crypto hardware in the most efficient manner possible.
1798  */
1799 struct mbuf *
1800 m_unshare(struct mbuf *m0, int how)
1801 {
1802 	struct mbuf *m, *mprev;
1803 	struct mbuf *n, *mfirst, *mlast;
1804 	int len, off;
1805 
1806 	mprev = NULL;
1807 	for (m = m0; m != NULL; m = mprev->m_next) {
1808 		/*
1809 		 * Regular mbufs are ignored unless there's a cluster
1810 		 * in front of it that we can use to coalesce.  We do
1811 		 * the latter mainly so later clusters can be coalesced
1812 		 * also w/o having to handle them specially (i.e. convert
1813 		 * mbuf+cluster -> cluster).  This optimization is heavily
1814 		 * influenced by the assumption that we're running over
1815 		 * Ethernet where MCLBYTES is large enough that the max
1816 		 * packet size will permit lots of coalescing into a
1817 		 * single cluster.  This in turn permits efficient
1818 		 * crypto operations, especially when using hardware.
1819 		 */
1820 		if ((m->m_flags & M_EXT) == 0) {
1821 			if (mprev && (mprev->m_flags & M_EXT) &&
1822 			    m->m_len <= M_TRAILINGSPACE(mprev)) {
1823 				/* XXX: this ignores mbuf types */
1824 				memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1825 				       mtod(m, caddr_t), m->m_len);
1826 				mprev->m_len += m->m_len;
1827 				mprev->m_next = m->m_next;	/* unlink from chain */
1828 				m_free(m);			/* reclaim mbuf */
1829 #if 0
1830 				newipsecstat.ips_mbcoalesced++;
1831 #endif
1832 			} else {
1833 				mprev = m;
1834 			}
1835 			continue;
1836 		}
1837 		/*
1838 		 * Writable mbufs are left alone (for now).
1839 		 */
1840 		if (M_WRITABLE(m)) {
1841 			mprev = m;
1842 			continue;
1843 		}
1844 
1845 		/*
1846 		 * Not writable, replace with a copy or coalesce with
1847 		 * the previous mbuf if possible (since we have to copy
1848 		 * it anyway, we try to reduce the number of mbufs and
1849 		 * clusters so that future work is easier).
1850 		 */
1851 		KASSERT(m->m_flags & M_EXT, ("m_flags 0x%x", m->m_flags));
1852 		/* NB: we only coalesce into a cluster or larger */
1853 		if (mprev != NULL && (mprev->m_flags & M_EXT) &&
1854 		    m->m_len <= M_TRAILINGSPACE(mprev)) {
1855 			/* XXX: this ignores mbuf types */
1856 			memcpy(mtod(mprev, caddr_t) + mprev->m_len,
1857 			       mtod(m, caddr_t), m->m_len);
1858 			mprev->m_len += m->m_len;
1859 			mprev->m_next = m->m_next;	/* unlink from chain */
1860 			m_free(m);			/* reclaim mbuf */
1861 #if 0
1862 			newipsecstat.ips_clcoalesced++;
1863 #endif
1864 			continue;
1865 		}
1866 
1867 		/*
1868 		 * Allocate new space to hold the copy...
1869 		 */
1870 		/* XXX why can M_PKTHDR be set past the first mbuf? */
1871 		if (mprev == NULL && (m->m_flags & M_PKTHDR)) {
1872 			/*
1873 			 * NB: if a packet header is present we must
1874 			 * allocate the mbuf separately from any cluster
1875 			 * because M_MOVE_PKTHDR will smash the data
1876 			 * pointer and drop the M_EXT marker.
1877 			 */
1878 			MGETHDR(n, how, m->m_type);
1879 			if (n == NULL) {
1880 				m_freem(m0);
1881 				return (NULL);
1882 			}
1883 			M_MOVE_PKTHDR(n, m);
1884 			MCLGET(n, how);
1885 			if ((n->m_flags & M_EXT) == 0) {
1886 				m_free(n);
1887 				m_freem(m0);
1888 				return (NULL);
1889 			}
1890 		} else {
1891 			n = m_getcl(how, m->m_type, m->m_flags);
1892 			if (n == NULL) {
1893 				m_freem(m0);
1894 				return (NULL);
1895 			}
1896 		}
1897 		/*
1898 		 * ... and copy the data.  We deal with jumbo mbufs
1899 		 * (i.e. m_len > MCLBYTES) by splitting them into
1900 		 * clusters.  We could just malloc a buffer and make
1901 		 * it external but too many device drivers don't know
1902 		 * how to break up the non-contiguous memory when
1903 		 * doing DMA.
1904 		 */
1905 		len = m->m_len;
1906 		off = 0;
1907 		mfirst = n;
1908 		mlast = NULL;
1909 		for (;;) {
1910 			int cc = min(len, MCLBYTES);
1911 			memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, cc);
1912 			n->m_len = cc;
1913 			if (mlast != NULL)
1914 				mlast->m_next = n;
1915 			mlast = n;
1916 #if 0
1917 			newipsecstat.ips_clcopied++;
1918 #endif
1919 
1920 			len -= cc;
1921 			if (len <= 0)
1922 				break;
1923 			off += cc;
1924 
1925 			n = m_getcl(how, m->m_type, m->m_flags);
1926 			if (n == NULL) {
1927 				m_freem(mfirst);
1928 				m_freem(m0);
1929 				return (NULL);
1930 			}
1931 		}
1932 		n->m_next = m->m_next;
1933 		if (mprev == NULL)
1934 			m0 = mfirst;		/* new head of chain */
1935 		else
1936 			mprev->m_next = mfirst;	/* replace old mbuf */
1937 		m_free(m);			/* release old mbuf */
1938 		mprev = mfirst;
1939 	}
1940 	return (m0);
1941 }
1942 
1943 #ifdef MBUF_PROFILING
1944 
1945 #define MP_BUCKETS 32 /* don't just change this as things may overflow.*/
1946 struct mbufprofile {
1947 	uintmax_t wasted[MP_BUCKETS];
1948 	uintmax_t used[MP_BUCKETS];
1949 	uintmax_t segments[MP_BUCKETS];
1950 } mbprof;
1951 
1952 #define MP_MAXDIGITS 21	/* strlen("16,000,000,000,000,000,000") == 21 */
1953 #define MP_NUMLINES 6
1954 #define MP_NUMSPERLINE 16
1955 #define MP_EXTRABYTES 64	/* > strlen("used:\nwasted:\nsegments:\n") */
1956 /* work out max space needed and add a bit of spare space too */
1957 #define MP_MAXLINE ((MP_MAXDIGITS+1) * MP_NUMSPERLINE)
1958 #define MP_BUFSIZE ((MP_MAXLINE * MP_NUMLINES) + 1 + MP_EXTRABYTES)
1959 
1960 char mbprofbuf[MP_BUFSIZE];
1961 
1962 void
1963 m_profile(struct mbuf *m)
1964 {
1965 	int segments = 0;
1966 	int used = 0;
1967 	int wasted = 0;
1968 
1969 	while (m) {
1970 		segments++;
1971 		used += m->m_len;
1972 		if (m->m_flags & M_EXT) {
1973 			wasted += MHLEN - sizeof(m->m_ext) +
1974 			    m->m_ext.ext_size - m->m_len;
1975 		} else {
1976 			if (m->m_flags & M_PKTHDR)
1977 				wasted += MHLEN - m->m_len;
1978 			else
1979 				wasted += MLEN - m->m_len;
1980 		}
1981 		m = m->m_next;
1982 	}
1983 	/* be paranoid.. it helps */
1984 	if (segments > MP_BUCKETS - 1)
1985 		segments = MP_BUCKETS - 1;
1986 	if (used > 100000)
1987 		used = 100000;
1988 	if (wasted > 100000)
1989 		wasted = 100000;
1990 	/* store in the appropriate bucket */
1991 	/* don't bother locking. if it's slightly off, so what? */
1992 	mbprof.segments[segments]++;
1993 	mbprof.used[fls(used)]++;
1994 	mbprof.wasted[fls(wasted)]++;
1995 }
1996 
1997 static void
1998 mbprof_textify(void)
1999 {
2000 	int offset;
2001 	char *c;
2002 	u_int64_t *p;
2003 
2004 
2005 	p = &mbprof.wasted[0];
2006 	c = mbprofbuf;
2007 	offset = snprintf(c, MP_MAXLINE + 10,
2008 	    "wasted:\n"
2009 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2010 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2011 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2012 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2013 #ifdef BIG_ARRAY
2014 	p = &mbprof.wasted[16];
2015 	c += offset;
2016 	offset = snprintf(c, MP_MAXLINE,
2017 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2018 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2019 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2020 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2021 #endif
2022 	p = &mbprof.used[0];
2023 	c += offset;
2024 	offset = snprintf(c, MP_MAXLINE + 10,
2025 	    "used:\n"
2026 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2027 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2028 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2029 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2030 #ifdef BIG_ARRAY
2031 	p = &mbprof.used[16];
2032 	c += offset;
2033 	offset = snprintf(c, MP_MAXLINE,
2034 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2035 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2036 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2037 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2038 #endif
2039 	p = &mbprof.segments[0];
2040 	c += offset;
2041 	offset = snprintf(c, MP_MAXLINE + 10,
2042 	    "segments:\n"
2043 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2044 	    "%ju %ju %ju %ju %ju %ju %ju %ju\n",
2045 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2046 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2047 #ifdef BIG_ARRAY
2048 	p = &mbprof.segments[16];
2049 	c += offset;
2050 	offset = snprintf(c, MP_MAXLINE,
2051 	    "%ju %ju %ju %ju %ju %ju %ju %ju "
2052 	    "%ju %ju %ju %ju %ju %ju %ju %jju",
2053 	    p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
2054 	    p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
2055 #endif
2056 }
2057 
2058 static int
2059 mbprof_handler(SYSCTL_HANDLER_ARGS)
2060 {
2061 	int error;
2062 
2063 	mbprof_textify();
2064 	error = SYSCTL_OUT(req, mbprofbuf, strlen(mbprofbuf) + 1);
2065 	return (error);
2066 }
2067 
2068 static int
2069 mbprof_clr_handler(SYSCTL_HANDLER_ARGS)
2070 {
2071 	int clear, error;
2072 
2073 	clear = 0;
2074 	error = sysctl_handle_int(oidp, &clear, 0, req);
2075 	if (error || !req->newptr)
2076 		return (error);
2077 
2078 	if (clear) {
2079 		bzero(&mbprof, sizeof(mbprof));
2080 	}
2081 
2082 	return (error);
2083 }
2084 
2085 
2086 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofile, CTLTYPE_STRING|CTLFLAG_RD,
2087 	    NULL, 0, mbprof_handler, "A", "mbuf profiling statistics");
2088 
2089 SYSCTL_PROC(_kern_ipc, OID_AUTO, mbufprofileclr, CTLTYPE_INT|CTLFLAG_RW,
2090 	    NULL, 0, mbprof_clr_handler, "I", "clear mbuf profiling statistics");
2091 #endif
2092 
2093