xref: /freebsd/sys/kern/uipc_mbuf.c (revision 3e0f6b97b257a96f7275e4442204263e44b16686)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)uipc_mbuf.c	8.2 (Berkeley) 1/4/94
34  * $FreeBSD$
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #define MBTYPES
42 #include <sys/mbuf.h>
43 #include <sys/kernel.h>
44 #include <sys/syslog.h>
45 #include <sys/domain.h>
46 #include <sys/protosw.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_extern.h>
52 
53 static void mbinit __P((void *));
54 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
55 
56 struct mbuf *mbutl;
57 char	*mclrefcnt;
58 struct mbstat mbstat;
59 struct mbuf *mmbfree;
60 union mcluster *mclfree;
61 int	max_linkhdr;
62 int	max_protohdr;
63 int	max_hdr;
64 int	max_datalen;
65 
66 static void	m_reclaim __P((void));
67 
68 /* "number of clusters of pages" */
69 #define NCL_INIT	1
70 
71 #define NMB_INIT	16
72 
73 /* ARGSUSED*/
74 static void
75 mbinit(dummy)
76 	void *dummy;
77 {
78 	int s;
79 
80 	mmbfree = NULL; mclfree = NULL;
81 	s = splimp();
82 	if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
83 		goto bad;
84 #if MCLBYTES <= PAGE_SIZE
85 	if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
86 		goto bad;
87 #else
88 	/* It's OK to call contigmalloc in this context. */
89 	if (m_clalloc(16, 0) == 0)
90 		goto bad;
91 #endif
92 	splx(s);
93 	return;
94 bad:
95 	panic("mbinit");
96 }
97 
98 /*
99  * Allocate at least nmb mbufs and place on mbuf free list.
100  * Must be called at splimp.
101  */
102 /* ARGSUSED */
103 int
104 m_mballoc(nmb, nowait)
105 	register int nmb;
106 	int nowait;
107 {
108 	register caddr_t p;
109 	register int i;
110 	int nbytes;
111 
112 	/* Once we run out of map space, it will be impossible to get
113 	 * any more (nothing is ever freed back to the map) (XXX which
114 	 * is dumb). (however you are not dead as m_reclaim might
115 	 * still be able to free a substantial amount of space).
116 	 */
117 	if (mb_map_full)
118 		return (0);
119 
120 	nbytes = round_page(nmb * MSIZE);
121 	p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
122 	/*
123 	 * Either the map is now full, or this is nowait and there
124 	 * are no pages left.
125 	 */
126 	if (p == NULL)
127 		return (0);
128 
129 	nmb = nbytes / MSIZE;
130 	for (i = 0; i < nmb; i++) {
131 		((struct mbuf *)p)->m_next = mmbfree;
132 		mmbfree = (struct mbuf *)p;
133 		p += MSIZE;
134 	}
135 	mbstat.m_mbufs += nmb;
136 	return (1);
137 }
138 
139 #if MCLBYTES > PAGE_SIZE
140 int i_want_my_mcl;
141 
142 void
143 kproc_mclalloc(void)
144 {
145 	int status;
146 
147 	while (1) {
148 		tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
149 
150 		for (; i_want_my_mcl; i_want_my_mcl--) {
151 			if (m_clalloc(1, 0) == 0)
152 				printf("m_clalloc failed even in process context!\n");
153 		}
154 	}
155 }
156 
157 static struct proc *mclallocproc;
158 static struct kproc_desc mclalloc_kp = {
159 	"mclalloc",
160 	kproc_mclalloc,
161 	&mclallocproc
162 };
163 SYSINIT_KT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
164 	   &mclalloc_kp);
165 #endif
166 
167 /*
168  * Allocate some number of mbuf clusters
169  * and place on cluster free list.
170  * Must be called at splimp.
171  */
172 /* ARGSUSED */
173 int
174 m_clalloc(ncl, nowait)
175 	register int ncl;
176 	int nowait;
177 {
178 	register caddr_t p;
179 	register int i;
180 	int npg;
181 
182 	/*
183 	 * Once we run out of map space, it will be impossible
184 	 * to get any more (nothing is ever freed back to the
185 	 * map).
186 	 */
187 	if (mb_map_full)
188 		return (0);
189 
190 #if MCLBYTES > PAGE_SIZE
191 	if (nowait) {
192 		i_want_my_mcl += ncl;
193 		wakeup(&i_want_my_mcl);
194 		p = 0;
195 	} else {
196 		p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul,
197 				  ~0ul, PAGE_SIZE, 0, mb_map);
198 	}
199 #else
200 	npg = ncl;
201 	p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
202 				 nowait ? M_NOWAIT : M_WAITOK);
203 	ncl = ncl * PAGE_SIZE / MCLBYTES;
204 #endif
205 	/*
206 	 * Either the map is now full, or this is nowait and there
207 	 * are no pages left.
208 	 */
209 	if (p == NULL)
210 		return (0);
211 
212 	for (i = 0; i < ncl; i++) {
213 		((union mcluster *)p)->mcl_next = mclfree;
214 		mclfree = (union mcluster *)p;
215 		p += MCLBYTES;
216 		mbstat.m_clfree++;
217 	}
218 	mbstat.m_clusters += ncl;
219 	return (1);
220 }
221 
222 /*
223  * When MGET failes, ask protocols to free space when short of memory,
224  * then re-attempt to allocate an mbuf.
225  */
226 struct mbuf *
227 m_retry(i, t)
228 	int i, t;
229 {
230 	register struct mbuf *m;
231 
232 	m_reclaim();
233 #define m_retry(i, t)	(struct mbuf *)0
234 	MGET(m, i, t);
235 #undef m_retry
236 	if (m != NULL)
237 		mbstat.m_wait++;
238 	else
239 		mbstat.m_drops++;
240 	return (m);
241 }
242 
243 /*
244  * As above; retry an MGETHDR.
245  */
246 struct mbuf *
247 m_retryhdr(i, t)
248 	int i, t;
249 {
250 	register struct mbuf *m;
251 
252 	m_reclaim();
253 #define m_retryhdr(i, t) (struct mbuf *)0
254 	MGETHDR(m, i, t);
255 #undef m_retryhdr
256 	if (m != NULL)
257 		mbstat.m_wait++;
258 	else
259 		mbstat.m_drops++;
260 	return (m);
261 }
262 
263 static void
264 m_reclaim()
265 {
266 	register struct domain *dp;
267 	register struct protosw *pr;
268 	int s = splimp();
269 
270 	for (dp = domains; dp; dp = dp->dom_next)
271 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
272 			if (pr->pr_drain)
273 				(*pr->pr_drain)();
274 	splx(s);
275 	mbstat.m_drain++;
276 }
277 
278 /*
279  * Space allocation routines.
280  * These are also available as macros
281  * for critical paths.
282  */
283 struct mbuf *
284 m_get(nowait, type)
285 	int nowait, type;
286 {
287 	register struct mbuf *m;
288 
289 	MGET(m, nowait, type);
290 	return (m);
291 }
292 
293 struct mbuf *
294 m_gethdr(nowait, type)
295 	int nowait, type;
296 {
297 	register struct mbuf *m;
298 
299 	MGETHDR(m, nowait, type);
300 	return (m);
301 }
302 
303 struct mbuf *
304 m_getclr(nowait, type)
305 	int nowait, type;
306 {
307 	register struct mbuf *m;
308 
309 	MGET(m, nowait, type);
310 	if (m == 0)
311 		return (0);
312 	bzero(mtod(m, caddr_t), MLEN);
313 	return (m);
314 }
315 
316 struct mbuf *
317 m_free(m)
318 	struct mbuf *m;
319 {
320 	register struct mbuf *n;
321 
322 	MFREE(m, n);
323 	return (n);
324 }
325 
326 void
327 m_freem(m)
328 	register struct mbuf *m;
329 {
330 	register struct mbuf *n;
331 
332 	if (m == NULL)
333 		return;
334 	do {
335 		MFREE(m, n);
336 		m = n;
337 	} while (m);
338 }
339 
340 /*
341  * Mbuffer utility routines.
342  */
343 
344 /*
345  * Lesser-used path for M_PREPEND:
346  * allocate new mbuf to prepend to chain,
347  * copy junk along.
348  */
349 struct mbuf *
350 m_prepend(m, len, how)
351 	register struct mbuf *m;
352 	int len, how;
353 {
354 	struct mbuf *mn;
355 
356 	MGET(mn, how, m->m_type);
357 	if (mn == (struct mbuf *)NULL) {
358 		m_freem(m);
359 		return ((struct mbuf *)NULL);
360 	}
361 	if (m->m_flags & M_PKTHDR) {
362 		M_COPY_PKTHDR(mn, m);
363 		m->m_flags &= ~M_PKTHDR;
364 	}
365 	mn->m_next = m;
366 	m = mn;
367 	if (len < MHLEN)
368 		MH_ALIGN(m, len);
369 	m->m_len = len;
370 	return (m);
371 }
372 
373 /*
374  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
375  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
376  * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
377  */
378 static int MCFail;
379 
380 struct mbuf *
381 m_copym(m, off0, len, wait)
382 	register struct mbuf *m;
383 	int off0, wait;
384 	register int len;
385 {
386 	register struct mbuf *n, **np;
387 	register int off = off0;
388 	struct mbuf *top;
389 	int copyhdr = 0;
390 
391 	if (off < 0 || len < 0)
392 		panic("m_copym");
393 	if (off == 0 && m->m_flags & M_PKTHDR)
394 		copyhdr = 1;
395 	while (off > 0) {
396 		if (m == 0)
397 			panic("m_copym");
398 		if (off < m->m_len)
399 			break;
400 		off -= m->m_len;
401 		m = m->m_next;
402 	}
403 	np = &top;
404 	top = 0;
405 	while (len > 0) {
406 		if (m == 0) {
407 			if (len != M_COPYALL)
408 				panic("m_copym");
409 			break;
410 		}
411 		MGET(n, wait, m->m_type);
412 		*np = n;
413 		if (n == 0)
414 			goto nospace;
415 		if (copyhdr) {
416 			M_COPY_PKTHDR(n, m);
417 			if (len == M_COPYALL)
418 				n->m_pkthdr.len -= off0;
419 			else
420 				n->m_pkthdr.len = len;
421 			copyhdr = 0;
422 		}
423 		n->m_len = min(len, m->m_len - off);
424 		if (m->m_flags & M_EXT) {
425 			n->m_data = m->m_data + off;
426 			if(!m->m_ext.ext_ref)
427 				mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
428 			else
429 				(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
430 							m->m_ext.ext_size);
431 			n->m_ext = m->m_ext;
432 			n->m_flags |= M_EXT;
433 		} else
434 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
435 			    (unsigned)n->m_len);
436 		if (len != M_COPYALL)
437 			len -= n->m_len;
438 		off = 0;
439 		m = m->m_next;
440 		np = &n->m_next;
441 	}
442 	if (top == 0)
443 		MCFail++;
444 	return (top);
445 nospace:
446 	m_freem(top);
447 	MCFail++;
448 	return (0);
449 }
450 
451 /*
452  * Copy an entire packet, including header (which must be present).
453  * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
454  */
455 struct mbuf *
456 m_copypacket(m, how)
457 	struct mbuf *m;
458 	int how;
459 {
460 	struct mbuf *top, *n, *o;
461 
462 	MGET(n, how, m->m_type);
463 	top = n;
464 	if (!n)
465 		goto nospace;
466 
467 	M_COPY_PKTHDR(n, m);
468 	n->m_len = m->m_len;
469 	if (m->m_flags & M_EXT) {
470 		n->m_data = m->m_data;
471 		mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
472 		n->m_ext = m->m_ext;
473 		n->m_flags |= M_EXT;
474 	} else {
475 		bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
476 	}
477 
478 	m = m->m_next;
479 	while (m) {
480 		MGET(o, how, m->m_type);
481 		if (!o)
482 			goto nospace;
483 
484 		n->m_next = o;
485 		n = n->m_next;
486 
487 		n->m_len = m->m_len;
488 		if (m->m_flags & M_EXT) {
489 			n->m_data = m->m_data;
490 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
491 			n->m_ext = m->m_ext;
492 			n->m_flags |= M_EXT;
493 		} else {
494 			bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
495 		}
496 
497 		m = m->m_next;
498 	}
499 	return top;
500 nospace:
501 	m_freem(top);
502 	MCFail++;
503 	return 0;
504 }
505 
506 /*
507  * Copy data from an mbuf chain starting "off" bytes from the beginning,
508  * continuing for "len" bytes, into the indicated buffer.
509  */
510 void
511 m_copydata(m, off, len, cp)
512 	register struct mbuf *m;
513 	register int off;
514 	register int len;
515 	caddr_t cp;
516 {
517 	register unsigned count;
518 
519 	if (off < 0 || len < 0)
520 		panic("m_copydata");
521 	while (off > 0) {
522 		if (m == 0)
523 			panic("m_copydata");
524 		if (off < m->m_len)
525 			break;
526 		off -= m->m_len;
527 		m = m->m_next;
528 	}
529 	while (len > 0) {
530 		if (m == 0)
531 			panic("m_copydata");
532 		count = min(m->m_len - off, len);
533 		bcopy(mtod(m, caddr_t) + off, cp, count);
534 		len -= count;
535 		cp += count;
536 		off = 0;
537 		m = m->m_next;
538 	}
539 }
540 
541 /*
542  * Concatenate mbuf chain n to m.
543  * Both chains must be of the same type (e.g. MT_DATA).
544  * Any m_pkthdr is not updated.
545  */
546 void
547 m_cat(m, n)
548 	register struct mbuf *m, *n;
549 {
550 	while (m->m_next)
551 		m = m->m_next;
552 	while (n) {
553 		if (m->m_flags & M_EXT ||
554 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
555 			/* just join the two chains */
556 			m->m_next = n;
557 			return;
558 		}
559 		/* splat the data from one into the other */
560 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
561 		    (u_int)n->m_len);
562 		m->m_len += n->m_len;
563 		n = m_free(n);
564 	}
565 }
566 
567 void
568 m_adj(mp, req_len)
569 	struct mbuf *mp;
570 	int req_len;
571 {
572 	register int len = req_len;
573 	register struct mbuf *m;
574 	register count;
575 
576 	if ((m = mp) == NULL)
577 		return;
578 	if (len >= 0) {
579 		/*
580 		 * Trim from head.
581 		 */
582 		while (m != NULL && len > 0) {
583 			if (m->m_len <= len) {
584 				len -= m->m_len;
585 				m->m_len = 0;
586 				m = m->m_next;
587 			} else {
588 				m->m_len -= len;
589 				m->m_data += len;
590 				len = 0;
591 			}
592 		}
593 		m = mp;
594 		if (mp->m_flags & M_PKTHDR)
595 			m->m_pkthdr.len -= (req_len - len);
596 	} else {
597 		/*
598 		 * Trim from tail.  Scan the mbuf chain,
599 		 * calculating its length and finding the last mbuf.
600 		 * If the adjustment only affects this mbuf, then just
601 		 * adjust and return.  Otherwise, rescan and truncate
602 		 * after the remaining size.
603 		 */
604 		len = -len;
605 		count = 0;
606 		for (;;) {
607 			count += m->m_len;
608 			if (m->m_next == (struct mbuf *)0)
609 				break;
610 			m = m->m_next;
611 		}
612 		if (m->m_len >= len) {
613 			m->m_len -= len;
614 			if (mp->m_flags & M_PKTHDR)
615 				mp->m_pkthdr.len -= len;
616 			return;
617 		}
618 		count -= len;
619 		if (count < 0)
620 			count = 0;
621 		/*
622 		 * Correct length for chain is "count".
623 		 * Find the mbuf with last data, adjust its length,
624 		 * and toss data from remaining mbufs on chain.
625 		 */
626 		m = mp;
627 		if (m->m_flags & M_PKTHDR)
628 			m->m_pkthdr.len = count;
629 		for (; m; m = m->m_next) {
630 			if (m->m_len >= count) {
631 				m->m_len = count;
632 				break;
633 			}
634 			count -= m->m_len;
635 		}
636 		while (m->m_next)
637 			(m = m->m_next) ->m_len = 0;
638 	}
639 }
640 
641 /*
642  * Rearange an mbuf chain so that len bytes are contiguous
643  * and in the data area of an mbuf (so that mtod and dtom
644  * will work for a structure of size len).  Returns the resulting
645  * mbuf chain on success, frees it and returns null on failure.
646  * If there is room, it will add up to max_protohdr-len extra bytes to the
647  * contiguous region in an attempt to avoid being called next time.
648  */
649 static int MPFail;
650 
651 struct mbuf *
652 m_pullup(n, len)
653 	register struct mbuf *n;
654 	int len;
655 {
656 	register struct mbuf *m;
657 	register int count;
658 	int space;
659 
660 	/*
661 	 * If first mbuf has no cluster, and has room for len bytes
662 	 * without shifting current data, pullup into it,
663 	 * otherwise allocate a new mbuf to prepend to the chain.
664 	 */
665 	if ((n->m_flags & M_EXT) == 0 &&
666 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
667 		if (n->m_len >= len)
668 			return (n);
669 		m = n;
670 		n = n->m_next;
671 		len -= m->m_len;
672 	} else {
673 		if (len > MHLEN)
674 			goto bad;
675 		MGET(m, M_DONTWAIT, n->m_type);
676 		if (m == 0)
677 			goto bad;
678 		m->m_len = 0;
679 		if (n->m_flags & M_PKTHDR) {
680 			M_COPY_PKTHDR(m, n);
681 			n->m_flags &= ~M_PKTHDR;
682 		}
683 	}
684 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
685 	do {
686 		count = min(min(max(len, max_protohdr), space), n->m_len);
687 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
688 		  (unsigned)count);
689 		len -= count;
690 		m->m_len += count;
691 		n->m_len -= count;
692 		space -= count;
693 		if (n->m_len)
694 			n->m_data += count;
695 		else
696 			n = m_free(n);
697 	} while (len > 0 && n);
698 	if (len > 0) {
699 		(void) m_free(m);
700 		goto bad;
701 	}
702 	m->m_next = n;
703 	return (m);
704 bad:
705 	m_freem(n);
706 	MPFail++;
707 	return (0);
708 }
709 
710 /*
711  * Partition an mbuf chain in two pieces, returning the tail --
712  * all but the first len0 bytes.  In case of failure, it returns NULL and
713  * attempts to restore the chain to its original state.
714  */
715 struct mbuf *
716 m_split(m0, len0, wait)
717 	register struct mbuf *m0;
718 	int len0, wait;
719 {
720 	register struct mbuf *m, *n;
721 	unsigned len = len0, remain;
722 
723 	for (m = m0; m && len > m->m_len; m = m->m_next)
724 		len -= m->m_len;
725 	if (m == 0)
726 		return (0);
727 	remain = m->m_len - len;
728 	if (m0->m_flags & M_PKTHDR) {
729 		MGETHDR(n, wait, m0->m_type);
730 		if (n == 0)
731 			return (0);
732 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
733 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
734 		m0->m_pkthdr.len = len0;
735 		if (m->m_flags & M_EXT)
736 			goto extpacket;
737 		if (remain > MHLEN) {
738 			/* m can't be the lead packet */
739 			MH_ALIGN(n, 0);
740 			n->m_next = m_split(m, len, wait);
741 			if (n->m_next == 0) {
742 				(void) m_free(n);
743 				return (0);
744 			} else
745 				return (n);
746 		} else
747 			MH_ALIGN(n, remain);
748 	} else if (remain == 0) {
749 		n = m->m_next;
750 		m->m_next = 0;
751 		return (n);
752 	} else {
753 		MGET(n, wait, m->m_type);
754 		if (n == 0)
755 			return (0);
756 		M_ALIGN(n, remain);
757 	}
758 extpacket:
759 	if (m->m_flags & M_EXT) {
760 		n->m_flags |= M_EXT;
761 		n->m_ext = m->m_ext;
762 		if(!m->m_ext.ext_ref)
763 			mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
764 		else
765 			(*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
766 						m->m_ext.ext_size);
767 		m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
768 		n->m_data = m->m_data + len;
769 	} else {
770 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
771 	}
772 	n->m_len = remain;
773 	m->m_len = len;
774 	n->m_next = m->m_next;
775 	m->m_next = 0;
776 	return (n);
777 }
778 /*
779  * Routine to copy from device local memory into mbufs.
780  */
781 struct mbuf *
782 m_devget(buf, totlen, off0, ifp, copy)
783 	char *buf;
784 	int totlen, off0;
785 	struct ifnet *ifp;
786 	void (*copy) __P((char *from, caddr_t to, u_int len));
787 {
788 	register struct mbuf *m;
789 	struct mbuf *top = 0, **mp = &top;
790 	register int off = off0, len;
791 	register char *cp;
792 	char *epkt;
793 
794 	cp = buf;
795 	epkt = cp + totlen;
796 	if (off) {
797 		cp += off + 2 * sizeof(u_short);
798 		totlen -= 2 * sizeof(u_short);
799 	}
800 	MGETHDR(m, M_DONTWAIT, MT_DATA);
801 	if (m == 0)
802 		return (0);
803 	m->m_pkthdr.rcvif = ifp;
804 	m->m_pkthdr.len = totlen;
805 	m->m_len = MHLEN;
806 
807 	while (totlen > 0) {
808 		if (top) {
809 			MGET(m, M_DONTWAIT, MT_DATA);
810 			if (m == 0) {
811 				m_freem(top);
812 				return (0);
813 			}
814 			m->m_len = MLEN;
815 		}
816 		len = min(totlen, epkt - cp);
817 		if (len >= MINCLSIZE) {
818 			MCLGET(m, M_DONTWAIT);
819 			if (m->m_flags & M_EXT)
820 				m->m_len = len = min(len, MCLBYTES);
821 			else
822 				len = m->m_len;
823 		} else {
824 			/*
825 			 * Place initial small packet/header at end of mbuf.
826 			 */
827 			if (len < m->m_len) {
828 				if (top == 0 && len + max_linkhdr <= m->m_len)
829 					m->m_data += max_linkhdr;
830 				m->m_len = len;
831 			} else
832 				len = m->m_len;
833 		}
834 		if (copy)
835 			copy(cp, mtod(m, caddr_t), (unsigned)len);
836 		else
837 			bcopy(cp, mtod(m, caddr_t), (unsigned)len);
838 		cp += len;
839 		*mp = m;
840 		mp = &m->m_next;
841 		totlen -= len;
842 		if (cp == epkt)
843 			cp = buf;
844 	}
845 	return (top);
846 }
847 
848 /*
849  * Copy data from a buffer back into the indicated mbuf chain,
850  * starting "off" bytes from the beginning, extending the mbuf
851  * chain if necessary.
852  */
853 void
854 m_copyback(m0, off, len, cp)
855 	struct	mbuf *m0;
856 	register int off;
857 	register int len;
858 	caddr_t cp;
859 {
860 	register int mlen;
861 	register struct mbuf *m = m0, *n;
862 	int totlen = 0;
863 
864 	if (m0 == 0)
865 		return;
866 	while (off > (mlen = m->m_len)) {
867 		off -= mlen;
868 		totlen += mlen;
869 		if (m->m_next == 0) {
870 			n = m_getclr(M_DONTWAIT, m->m_type);
871 			if (n == 0)
872 				goto out;
873 			n->m_len = min(MLEN, len + off);
874 			m->m_next = n;
875 		}
876 		m = m->m_next;
877 	}
878 	while (len > 0) {
879 		mlen = min (m->m_len - off, len);
880 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
881 		cp += mlen;
882 		len -= mlen;
883 		mlen += off;
884 		off = 0;
885 		totlen += mlen;
886 		if (len == 0)
887 			break;
888 		if (m->m_next == 0) {
889 			n = m_get(M_DONTWAIT, m->m_type);
890 			if (n == 0)
891 				break;
892 			n->m_len = min(MLEN, len);
893 			m->m_next = n;
894 		}
895 		m = m->m_next;
896 	}
897 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
898 		m->m_pkthdr.len = totlen;
899 }
900