xref: /freebsd/sys/kern/uipc_mbuf.c (revision 8e537d168674d6b65869f73c20813001af875738)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34  * $Id: uipc_mbuf.c,v 1.23 1996/05/12 07:48:47 phk Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #define MBTYPES
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 
53 static void mbinit __P((void *));
54 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
55 
56 struct mbuf *mbutl;
57 char	*mclrefcnt;
58 struct mbstat mbstat;
59 struct mbuf *mmbfree;
60 union mcluster *mclfree;
61 int	max_linkhdr;
62 int	max_protohdr;
63 int	max_hdr;
64 int	max_datalen;
65 
66 static void	m_reclaim __P((void));
67 
68 /* "number of clusters of pages" */
69 #define NCL_INIT	1
70 
71 #define NMB_INIT	16
72 
73 /* ARGSUSED*/
74 static void
75 mbinit(dummy)
76 	void *dummy;
77 {
78 	int s;
79 
80 	mmbfree = NULL; mclfree = NULL;
81 	s = splimp();
82 	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
83 		goto bad;
84 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
85 		goto bad;
86 	splx(s);
87 	return;
88 bad:
89 	panic("mbinit");
90 }
91 
92 /*
93  * Allocate at least nmb mbufs and place on mbuf free list.
94  * Must be called at splimp.
95  */
96 /* ARGSUSED */
97 int
98 m_mballoc(nmb, nowait)
99 	register int nmb;
100 	int nowait;
101 {
102 	register caddr_t p;
103 	register int i;
104 	int nbytes;
105 
106 	/* Once we run out of map space, it will be impossible to get
107 	 * any more (nothing is ever freed back to the map) (XXX which
108 	 * is dumb). (however you are not dead as m_reclaim might
109 	 * still be able to free a substantial amount of space).
110 	 */
111 	if (mb_map_full)
112 		return (0);
113 
114 	nbytes = round_page(nmb * MSIZE);
115 	p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
116 	/*
117 	 * Either the map is now full, or this is nowait and there
118 	 * are no pages left.
119 	 */
120 	if (p == NULL)
121 		return (0);
122 
123 	nmb = nbytes / MSIZE;
124 	for (i = 0; i < nmb; i++) {
125 		((struct mbuf *)p)->m_next = mmbfree;
126 		mmbfree = (struct mbuf *)p;
127 		p += MSIZE;
128 	}
129 	mbstat.m_mbufs += nmb;
130 	return (1);
131 }
132 
133 /*
134  * Allocate some number of mbuf clusters
135  * and place on cluster free list.
136  * Must be called at splimp.
137  */
138 /* ARGSUSED */
139 int
140 m_clalloc(ncl, nowait)
141 	register int ncl;
142 	int nowait;
143 {
144 	register caddr_t p;
145 	register int i;
146 	int npg;
147 
148 	/*
149 	 * Once we run out of map space, it will be impossible
150 	 * to get any more (nothing is ever freed back to the
151 	 * map).
152 	 */
153 	if (mcl_map_full)
154 		return (0);
155 
156 	npg = ncl;
157 	p = (caddr_t)kmem_malloc(mcl_map, ctob(npg),
158 				 nowait ? M_NOWAIT : M_WAITOK);
159 	/*
160 	 * Either the map is now full, or this is nowait and there
161 	 * are no pages left.
162 	 */
163 	if (p == NULL)
164 		return (0);
165 
166 	ncl = ncl * PAGE_SIZE / MCLBYTES;
167 	for (i = 0; i < ncl; i++) {
168 		((union mcluster *)p)->mcl_next = mclfree;
169 		mclfree = (union mcluster *)p;
170 		p += MCLBYTES;
171 		mbstat.m_clfree++;
172 	}
173 	mbstat.m_clusters += ncl;
174 	return (1);
175 }
176 
177 /*
178  * When MGET failes, ask protocols to free space when short of memory,
179  * then re-attempt to allocate an mbuf.
180  */
181 struct mbuf *
182 m_retry(i, t)
183 	int i, t;
184 {
185 	register struct mbuf *m;
186 
187 	m_reclaim();
188 #define m_retry(i, t)	(struct mbuf *)0
189 	MGET(m, i, t);
190 #undef m_retry
191 	if (m != NULL)
192 		mbstat.m_wait++;
193 	else
194 		mbstat.m_drops++;
195 	return (m);
196 }
197 
198 /*
199  * As above; retry an MGETHDR.
200  */
201 struct mbuf *
202 m_retryhdr(i, t)
203 	int i, t;
204 {
205 	register struct mbuf *m;
206 
207 	m_reclaim();
208 #define m_retryhdr(i, t) (struct mbuf *)0
209 	MGETHDR(m, i, t);
210 #undef m_retryhdr
211 	if (m != NULL)
212 		mbstat.m_wait++;
213 	else
214 		mbstat.m_drops++;
215 	return (m);
216 }
217 
218 static void
219 m_reclaim()
220 {
221 	register struct domain *dp;
222 	register struct protosw *pr;
223 	int s = splimp();
224 
225 	for (dp = domains; dp; dp = dp->dom_next)
226 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
227 			if (pr->pr_drain)
228 				(*pr->pr_drain)();
229 	splx(s);
230 	mbstat.m_drain++;
231 }
232 
233 /*
234  * Space allocation routines.
235  * These are also available as macros
236  * for critical paths.
237  */
238 struct mbuf *
239 m_get(nowait, type)
240 	int nowait, type;
241 {
242 	register struct mbuf *m;
243 
244 	MGET(m, nowait, type);
245 	return (m);
246 }
247 
248 struct mbuf *
249 m_gethdr(nowait, type)
250 	int nowait, type;
251 {
252 	register struct mbuf *m;
253 
254 	MGETHDR(m, nowait, type);
255 	return (m);
256 }
257 
258 struct mbuf *
259 m_getclr(nowait, type)
260 	int nowait, type;
261 {
262 	register struct mbuf *m;
263 
264 	MGET(m, nowait, type);
265 	if (m == 0)
266 		return (0);
267 	bzero(mtod(m, caddr_t), MLEN);
268 	return (m);
269 }
270 
271 struct mbuf *
272 m_free(m)
273 	struct mbuf *m;
274 {
275 	register struct mbuf *n;
276 
277 	MFREE(m, n);
278 	return (n);
279 }
280 
281 void
282 m_freem(m)
283 	register struct mbuf *m;
284 {
285 	register struct mbuf *n;
286 
287 	if (m == NULL)
288 		return;
289 	do {
290 		MFREE(m, n);
291 		m = n;
292 	} while (m);
293 }
294 
295 /*
296  * Mbuffer utility routines.
297  */
298 
299 /*
300  * Lesser-used path for M_PREPEND:
301  * allocate new mbuf to prepend to chain,
302  * copy junk along.
303  */
304 struct mbuf *
305 m_prepend(m, len, how)
306 	register struct mbuf *m;
307 	int len, how;
308 {
309 	struct mbuf *mn;
310 
311 	MGET(mn, how, m->m_type);
312 	if (mn == (struct mbuf *)NULL) {
313 		m_freem(m);
314 		return ((struct mbuf *)NULL);
315 	}
316 	if (m->m_flags & M_PKTHDR) {
317 		M_COPY_PKTHDR(mn, m);
318 		m->m_flags &= ~M_PKTHDR;
319 	}
320 	mn->m_next = m;
321 	m = mn;
322 	if (len < MHLEN)
323 		MH_ALIGN(m, len);
324 	m->m_len = len;
325 	return (m);
326 }
327 
328 /*
329  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
330  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
331  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
332  */
333 static int MCFail;
334 
335 struct mbuf *
336 m_copym(m, off0, len, wait)
337 	register struct mbuf *m;
338 	int off0, wait;
339 	register int len;
340 {
341 	register struct mbuf *n, **np;
342 	register int off = off0;
343 	struct mbuf *top;
344 	int copyhdr = 0;
345 
346 	if (off < 0 || len < 0)
347 		panic("m_copym");
348 	if (off == 0 && m->m_flags & M_PKTHDR)
349 		copyhdr = 1;
350 	while (off > 0) {
351 		if (m == 0)
352 			panic("m_copym");
353 		if (off < m->m_len)
354 			break;
355 		off -= m->m_len;
356 		m = m->m_next;
357 	}
358 	np = &top;
359 	top = 0;
360 	while (len > 0) {
361 		if (m == 0) {
362 			if (len != M_COPYALL)
363 				panic("m_copym");
364 			break;
365 		}
366 		MGET(n, wait, m->m_type);
367 		*np = n;
368 		if (n == 0)
369 			goto nospace;
370 		if (copyhdr) {
371 			M_COPY_PKTHDR(n, m);
372 			if (len == M_COPYALL)
373 				n->m_pkthdr.len -= off0;
374 			else
375 				n->m_pkthdr.len = len;
376 			copyhdr = 0;
377 		}
378 		n->m_len = min(len, m->m_len - off);
379 		if (m->m_flags & M_EXT) {
380 			n->m_data = m->m_data + off;
381 			if(!m->m_ext.ext_ref)
382 				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
383 			else
384 				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
385 							m->m_ext.ext_size);
386 			n->m_ext = m->m_ext;
387 			n->m_flags |= M_EXT;
388 		} else
389 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
390 			    (unsigned)n->m_len);
391 		if (len != M_COPYALL)
392 			len -= n->m_len;
393 		off = 0;
394 		m = m->m_next;
395 		np = &n->m_next;
396 	}
397 	if (top == 0)
398 		MCFail++;
399 	return (top);
400 nospace:
401 	m_freem(top);
402 	MCFail++;
403 	return (0);
404 }
405 
406 /*
407  * Copy an entire packet, including header (which must be present).
408  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
409  */
410 struct mbuf *
411 m_copypacket(m, how)
412 	struct mbuf *m;
413 	int how;
414 {
415 	struct mbuf *top, *n, *o;
416 
417 	MGET(n, how, m->m_type);
418 	top = n;
419 	if (!n)
420 		goto nospace;
421 
422 	M_COPY_PKTHDR(n, m);
423 	n->m_len = m->m_len;
424 	if (m->m_flags & M_EXT) {
425 		n->m_data = m->m_data;
426 		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
427 		n->m_ext = m->m_ext;
428 		n->m_flags |= M_EXT;
429 	} else {
430 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
431 	}
432 
433 	m = m->m_next;
434 	while (m) {
435 		MGET(o, how, m->m_type);
436 		if (!o)
437 			goto nospace;
438 
439 		n->m_next = o;
440 		n = n->m_next;
441 
442 		n->m_len = m->m_len;
443 		if (m->m_flags & M_EXT) {
444 			n->m_data = m->m_data;
445 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
446 			n->m_ext = m->m_ext;
447 			n->m_flags |= M_EXT;
448 		} else {
449 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
450 		}
451 
452 		m = m->m_next;
453 	}
454 	return top;
455 nospace:
456 	m_freem(top);
457 	MCFail++;
458 	return 0;
459 }
460 
461 /*
462  * Copy data from an mbuf chain starting "off" bytes from the beginning,
463  * continuing for "len" bytes, into the indicated buffer.
464  */
465 void
466 m_copydata(m, off, len, cp)
467 	register struct mbuf *m;
468 	register int off;
469 	register int len;
470 	caddr_t cp;
471 {
472 	register unsigned count;
473 
474 	if (off < 0 || len < 0)
475 		panic("m_copydata");
476 	while (off > 0) {
477 		if (m == 0)
478 			panic("m_copydata");
479 		if (off < m->m_len)
480 			break;
481 		off -= m->m_len;
482 		m = m->m_next;
483 	}
484 	while (len > 0) {
485 		if (m == 0)
486 			panic("m_copydata");
487 		count = min(m->m_len - off, len);
488 		bcopy(mtod(m, caddr_t) + off, cp, count);
489 		len -= count;
490 		cp += count;
491 		off = 0;
492 		m = m->m_next;
493 	}
494 }
495 
496 /*
497  * Concatenate mbuf chain n to m.
498  * Both chains must be of the same type (e.g. MT_DATA).
499  * Any m_pkthdr is not updated.
500  */
501 void
502 m_cat(m, n)
503 	register struct mbuf *m, *n;
504 {
505 	while (m->m_next)
506 		m = m->m_next;
507 	while (n) {
508 		if (m->m_flags & M_EXT ||
509 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
510 			/* just join the two chains */
511 			m->m_next = n;
512 			return;
513 		}
514 		/* splat the data from one into the other */
515 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
516 		    (u_int)n->m_len);
517 		m->m_len += n->m_len;
518 		n = m_free(n);
519 	}
520 }
521 
522 void
523 m_adj(mp, req_len)
524 	struct mbuf *mp;
525 	int req_len;
526 {
527 	register int len = req_len;
528 	register struct mbuf *m;
529 	register count;
530 
531 	if ((m = mp) == NULL)
532 		return;
533 	if (len >= 0) {
534 		/*
535 		 * Trim from head.
536 		 */
537 		while (m != NULL && len > 0) {
538 			if (m->m_len <= len) {
539 				len -= m->m_len;
540 				m->m_len = 0;
541 				m = m->m_next;
542 			} else {
543 				m->m_len -= len;
544 				m->m_data += len;
545 				len = 0;
546 			}
547 		}
548 		m = mp;
549 		if (mp->m_flags & M_PKTHDR)
550 			m->m_pkthdr.len -= (req_len - len);
551 	} else {
552 		/*
553 		 * Trim from tail.  Scan the mbuf chain,
554 		 * calculating its length and finding the last mbuf.
555 		 * If the adjustment only affects this mbuf, then just
556 		 * adjust and return.  Otherwise, rescan and truncate
557 		 * after the remaining size.
558 		 */
559 		len = -len;
560 		count = 0;
561 		for (;;) {
562 			count += m->m_len;
563 			if (m->m_next == (struct mbuf *)0)
564 				break;
565 			m = m->m_next;
566 		}
567 		if (m->m_len >= len) {
568 			m->m_len -= len;
569 			if (mp->m_flags & M_PKTHDR)
570 				mp->m_pkthdr.len -= len;
571 			return;
572 		}
573 		count -= len;
574 		if (count < 0)
575 			count = 0;
576 		/*
577 		 * Correct length for chain is "count".
578 		 * Find the mbuf with last data, adjust its length,
579 		 * and toss data from remaining mbufs on chain.
580 		 */
581 		m = mp;
582 		if (m->m_flags & M_PKTHDR)
583 			m->m_pkthdr.len = count;
584 		for (; m; m = m->m_next) {
585 			if (m->m_len >= count) {
586 				m->m_len = count;
587 				break;
588 			}
589 			count -= m->m_len;
590 		}
591 		while (m->m_next)
592 			(m = m->m_next) ->m_len = 0;
593 	}
594 }
595 
596 /*
597  * Rearange an mbuf chain so that len bytes are contiguous
598  * and in the data area of an mbuf (so that mtod and dtom
599  * will work for a structure of size len).  Returns the resulting
600  * mbuf chain on success, frees it and returns null on failure.
601  * If there is room, it will add up to max_protohdr-len extra bytes to the
602  * contiguous region in an attempt to avoid being called next time.
603  */
604 static int MPFail;
605 
606 struct mbuf *
607 m_pullup(n, len)
608 	register struct mbuf *n;
609 	int len;
610 {
611 	register struct mbuf *m;
612 	register int count;
613 	int space;
614 
615 	/*
616 	 * If first mbuf has no cluster, and has room for len bytes
617 	 * without shifting current data, pullup into it,
618 	 * otherwise allocate a new mbuf to prepend to the chain.
619 	 */
620 	if ((n->m_flags & M_EXT) == 0 &&
621 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
622 		if (n->m_len >= len)
623 			return (n);
624 		m = n;
625 		n = n->m_next;
626 		len -= m->m_len;
627 	} else {
628 		if (len > MHLEN)
629 			goto bad;
630 		MGET(m, M_DONTWAIT, n->m_type);
631 		if (m == 0)
632 			goto bad;
633 		m->m_len = 0;
634 		if (n->m_flags & M_PKTHDR) {
635 			M_COPY_PKTHDR(m, n);
636 			n->m_flags &= ~M_PKTHDR;
637 		}
638 	}
639 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
640 	do {
641 		count = min(min(max(len, max_protohdr), space), n->m_len);
642 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
643 		  (unsigned)count);
644 		len -= count;
645 		m->m_len += count;
646 		n->m_len -= count;
647 		space -= count;
648 		if (n->m_len)
649 			n->m_data += count;
650 		else
651 			n = m_free(n);
652 	} while (len > 0 && n);
653 	if (len > 0) {
654 		(void) m_free(m);
655 		goto bad;
656 	}
657 	m->m_next = n;
658 	return (m);
659 bad:
660 	m_freem(n);
661 	MPFail++;
662 	return (0);
663 }
664 
665 /*
666  * Partition an mbuf chain in two pieces, returning the tail --
667  * all but the first len0 bytes.  In case of failure, it returns NULL and
668  * attempts to restore the chain to its original state.
669  */
670 struct mbuf *
671 m_split(m0, len0, wait)
672 	register struct mbuf *m0;
673 	int len0, wait;
674 {
675 	register struct mbuf *m, *n;
676 	unsigned len = len0, remain;
677 
678 	for (m = m0; m && len > m->m_len; m = m->m_next)
679 		len -= m->m_len;
680 	if (m == 0)
681 		return (0);
682 	remain = m->m_len - len;
683 	if (m0->m_flags & M_PKTHDR) {
684 		MGETHDR(n, wait, m0->m_type);
685 		if (n == 0)
686 			return (0);
687 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
688 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
689 		m0->m_pkthdr.len = len0;
690 		if (m->m_flags & M_EXT)
691 			goto extpacket;
692 		if (remain > MHLEN) {
693 			/* m can't be the lead packet */
694 			MH_ALIGN(n, 0);
695 			n->m_next = m_split(m, len, wait);
696 			if (n->m_next == 0) {
697 				(void) m_free(n);
698 				return (0);
699 			} else
700 				return (n);
701 		} else
702 			MH_ALIGN(n, remain);
703 	} else if (remain == 0) {
704 		n = m->m_next;
705 		m->m_next = 0;
706 		return (n);
707 	} else {
708 		MGET(n, wait, m->m_type);
709 		if (n == 0)
710 			return (0);
711 		M_ALIGN(n, remain);
712 	}
713 extpacket:
714 	if (m->m_flags & M_EXT) {
715 		n->m_flags |= M_EXT;
716 		n->m_ext = m->m_ext;
717 		if(!m->m_ext.ext_ref)
718 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
719 		else
720 			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
721 						m->m_ext.ext_size);
722 		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
723 		n->m_data = m->m_data + len;
724 	} else {
725 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
726 	}
727 	n->m_len = remain;
728 	m->m_len = len;
729 	n->m_next = m->m_next;
730 	m->m_next = 0;
731 	return (n);
732 }
733 /*
734  * Routine to copy from device local memory into mbufs.
735  */
736 struct mbuf *
737 m_devget(buf, totlen, off0, ifp, copy)
738 	char *buf;
739 	int totlen, off0;
740 	struct ifnet *ifp;
741 	void (*copy) __P((char *from, caddr_t to, u_int len));
742 {
743 	register struct mbuf *m;
744 	struct mbuf *top = 0, **mp = &top;
745 	register int off = off0, len;
746 	register char *cp;
747 	char *epkt;
748 
749 	cp = buf;
750 	epkt = cp + totlen;
751 	if (off) {
752 		cp += off + 2 * sizeof(u_short);
753 		totlen -= 2 * sizeof(u_short);
754 	}
755 	MGETHDR(m, M_DONTWAIT, MT_DATA);
756 	if (m == 0)
757 		return (0);
758 	m->m_pkthdr.rcvif = ifp;
759 	m->m_pkthdr.len = totlen;
760 	m->m_len = MHLEN;
761 
762 	while (totlen > 0) {
763 		if (top) {
764 			MGET(m, M_DONTWAIT, MT_DATA);
765 			if (m == 0) {
766 				m_freem(top);
767 				return (0);
768 			}
769 			m->m_len = MLEN;
770 		}
771 		len = min(totlen, epkt - cp);
772 		if (len >= MINCLSIZE) {
773 			MCLGET(m, M_DONTWAIT);
774 			if (m->m_flags & M_EXT)
775 				m->m_len = len = min(len, MCLBYTES);
776 			else
777 				len = m->m_len;
778 		} else {
779 			/*
780 			 * Place initial small packet/header at end of mbuf.
781 			 */
782 			if (len < m->m_len) {
783 				if (top == 0 && len + max_linkhdr <= m->m_len)
784 					m->m_data += max_linkhdr;
785 				m->m_len = len;
786 			} else
787 				len = m->m_len;
788 		}
789 		if (copy)
790 			copy(cp, mtod(m, caddr_t), (unsigned)len);
791 		else
792 			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
793 		cp += len;
794 		*mp = m;
795 		mp = &m->m_next;
796 		totlen -= len;
797 		if (cp == epkt)
798 			cp = buf;
799 	}
800 	return (top);
801 }
802 
803 /*
804  * Copy data from a buffer back into the indicated mbuf chain,
805  * starting "off" bytes from the beginning, extending the mbuf
806  * chain if necessary.
807  */
808 void
809 m_copyback(m0, off, len, cp)
810 	struct	mbuf *m0;
811 	register int off;
812 	register int len;
813 	caddr_t cp;
814 {
815 	register int mlen;
816 	register struct mbuf *m = m0, *n;
817 	int totlen = 0;
818 
819 	if (m0 == 0)
820 		return;
821 	while (off > (mlen = m->m_len)) {
822 		off -= mlen;
823 		totlen += mlen;
824 		if (m->m_next == 0) {
825 			n = m_getclr(M_DONTWAIT, m->m_type);
826 			if (n == 0)
827 				goto out;
828 			n->m_len = min(MLEN, len + off);
829 			m->m_next = n;
830 		}
831 		m = m->m_next;
832 	}
833 	while (len > 0) {
834 		mlen = min (m->m_len - off, len);
835 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
836 		cp += mlen;
837 		len -= mlen;
838 		mlen += off;
839 		off = 0;
840 		totlen += mlen;
841 		if (len == 0)
842 			break;
843 		if (m->m_next == 0) {
844 			n = m_get(M_DONTWAIT, m->m_type);
845 			if (n == 0)
846 				break;
847 			n->m_len = min(MLEN, len);
848 			m->m_next = n;
849 		}
850 		m = m->m_next;
851 	}
852 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
853 		m->m_pkthdr.len = totlen;
854 }
855