xref: /freebsd/sys/sys/mbuf.h (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)mbuf.h	8.5 (Berkeley) 2/19/95
34  * $FreeBSD$
35  */
36 
37 #ifndef _SYS_MBUF_H_
38 #define	_SYS_MBUF_H_
39 
40 #include <sys/mutex.h>	/* XXX */
41 
42 /*
43  * Mbufs are of a single size, MSIZE (machine/param.h), which
44  * includes overhead.  An mbuf may add a single "mbuf cluster" of size
45  * MCLBYTES (also in machine/param.h), which has no additional overhead
46  * and is used instead of the internal data area; this is done when
47  * at least MINCLSIZE of data must be stored.
48  */
49 
50 #define	MLEN		(MSIZE - sizeof(struct m_hdr))	/* normal data len */
51 #define	MHLEN		(MLEN - sizeof(struct pkthdr))	/* data len w/pkthdr */
52 
53 #define	MINCLSIZE	(MHLEN + 1)	/* smallest amount to put in cluster */
54 #define	M_MAXCOMPRESS	(MHLEN / 2)	/* max amount to copy for compression */
55 
56 /*
57  * Maximum number of allocatable counters for external buffers. This
58  * ensures enough VM address space for the allocation of counters
59  * in the extreme case where all possible external buffers are allocated.
60  *
61  * Note: When new types of external storage are allocated, EXT_COUNTERS
62  * 	 must be tuned accordingly. Practically, this isn't a big deal
63  *	 as each counter is only a word long, so we can fit
64  *	 (PAGE_SIZE / length of word) counters in a single page.
65  *
66  * XXX: Must increase this if using any of if_ti, if_wb, if_sk drivers,
67  *	or any other drivers which may manage their own buffers and
68  *	eventually attach them to mbufs.
69  */
70 #define EXT_COUNTERS (nmbclusters + nsfbufs)
71 
72 /*
73  * Macros for type conversion
74  * mtod(m, t) -	convert mbuf pointer to data pointer of correct type
75  * dtom(x) -	convert data pointer within mbuf to mbuf pointer (XXX)
76  */
77 #define	mtod(m, t)	((t)((m)->m_data))
78 #define	dtom(x)		((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
79 
80 /* header at beginning of each mbuf: */
81 struct m_hdr {
82 	struct	mbuf *mh_next;		/* next buffer in chain */
83 	struct	mbuf *mh_nextpkt;	/* next chain in queue/record */
84 	caddr_t	mh_data;		/* location of data */
85 	int	mh_len;			/* amount of data in this mbuf */
86 	short	mh_type;		/* type of data in this mbuf */
87 	short	mh_flags;		/* flags; see below */
88 };
89 
90 /* record/packet header in first mbuf of chain; valid if M_PKTHDR set */
91 struct pkthdr {
92 	struct	ifnet *rcvif;		/* rcv interface */
93 	int	len;			/* total packet length */
94 	/* variables for ip and tcp reassembly */
95 	void	*header;		/* pointer to packet header */
96 	/* variables for hardware checksum */
97 	int	csum_flags;		/* flags regarding checksum */
98 	int	csum_data;		/* data field used by csum routines */
99 	struct	mbuf *aux;		/* extra data buffer; ipsec/others */
100 };
101 
102 /* description of external storage mapped into mbuf, valid if M_EXT set */
103 struct m_ext {
104 	caddr_t	ext_buf;		/* start of buffer */
105 	void	(*ext_free)		/* free routine if not the usual */
106 		__P((caddr_t, void *));
107 	void	*ext_args;		/* optional argument pointer */
108 	u_int	ext_size;		/* size of buffer, for ext_free */
109 	union	mext_refcnt *ref_cnt;	/* pointer to ref count info */
110 	int	ext_type;		/* type of external storage */
111 };
112 
113 struct mbuf {
114 	struct	m_hdr m_hdr;
115 	union {
116 		struct {
117 			struct	pkthdr MH_pkthdr;	/* M_PKTHDR set */
118 			union {
119 				struct	m_ext MH_ext;	/* M_EXT set */
120 				char	MH_databuf[MHLEN];
121 			} MH_dat;
122 		} MH;
123 		char	M_databuf[MLEN];		/* !M_PKTHDR, !M_EXT */
124 	} M_dat;
125 };
126 #define	m_next		m_hdr.mh_next
127 #define	m_len		m_hdr.mh_len
128 #define	m_data		m_hdr.mh_data
129 #define	m_type		m_hdr.mh_type
130 #define	m_flags		m_hdr.mh_flags
131 #define	m_nextpkt	m_hdr.mh_nextpkt
132 #define	m_act		m_nextpkt
133 #define	m_pkthdr	M_dat.MH.MH_pkthdr
134 #define	m_ext		M_dat.MH.MH_dat.MH_ext
135 #define	m_pktdat	M_dat.MH.MH_dat.MH_databuf
136 #define	m_dat		M_dat.M_databuf
137 
138 /* mbuf flags */
139 #define	M_EXT		0x0001	/* has associated external storage */
140 #define	M_PKTHDR	0x0002	/* start of record */
141 #define	M_EOR		0x0004	/* end of record */
142 #define M_RDONLY	0x0008	/* associated data is marked read-only */
143 #define	M_PROTO1	0x0010	/* protocol-specific */
144 #define	M_PROTO2	0x0020	/* protocol-specific */
145 #define	M_PROTO3	0x0040	/* protocol-specific */
146 #define	M_PROTO4	0x0080	/* protocol-specific */
147 #define	M_PROTO5	0x0100	/* protocol-specific */
148 
149 /* mbuf pkthdr flags, also in m_flags */
150 #define	M_BCAST		0x0200	/* send/received as link-level broadcast */
151 #define	M_MCAST		0x0400	/* send/received as link-level multicast */
152 #define	M_FRAG		0x0800	/* packet is a fragment of a larger packet */
153 #define	M_FIRSTFRAG	0x1000	/* packet is first fragment */
154 #define	M_LASTFRAG	0x2000	/* packet is last fragment */
155 
156 /* external buffer types: identify ext_buf type */
157 #define	EXT_CLUSTER	1	/* mbuf cluster */
158 #define	EXT_SFBUF	2	/* sendfile(2)'s sf_bufs */
159 #define	EXT_NET_DRV	100	/* custom ext_buf provided by net driver(s) */
160 #define	EXT_MOD_TYPE	200	/* custom module's ext_buf type */
161 
162 /* flags copied when copying m_pkthdr */
163 #define	M_COPYFLAGS	(M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \
164 			    M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG|M_RDONLY)
165 
166 /* flags indicating hw checksum support and sw checksum requirements */
167 #define CSUM_IP			0x0001		/* will csum IP */
168 #define CSUM_TCP		0x0002		/* will csum TCP */
169 #define CSUM_UDP		0x0004		/* will csum UDP */
170 #define CSUM_IP_FRAGS		0x0008		/* will csum IP fragments */
171 #define CSUM_FRAGMENT		0x0010		/* will do IP fragmentation */
172 
173 #define CSUM_IP_CHECKED		0x0100		/* did csum IP */
174 #define CSUM_IP_VALID		0x0200		/*   ... the csum is valid */
175 #define CSUM_DATA_VALID		0x0400		/* csum_data field is valid */
176 #define CSUM_PSEUDO_HDR		0x0800		/* csum_data has pseudo hdr */
177 
178 #define CSUM_DELAY_DATA		(CSUM_TCP | CSUM_UDP)
179 #define CSUM_DELAY_IP		(CSUM_IP)	/* XXX add ipv6 here too? */
180 
181 /* mbuf types */
182 #define	MT_FREE		0	/* should be on free list */
183 #define	MT_DATA		1	/* dynamic (data) allocation */
184 #define	MT_HEADER	2	/* packet header */
185 #if 0
186 #define	MT_SOCKET	3	/* socket structure */
187 #define	MT_PCB		4	/* protocol control block */
188 #define	MT_RTABLE	5	/* routing tables */
189 #define	MT_HTABLE	6	/* IMP host tables */
190 #define	MT_ATABLE	7	/* address resolution tables */
191 #endif
192 #define	MT_SONAME	8	/* socket name */
193 #if 0
194 #define	MT_SOOPTS	10	/* socket options */
195 #endif
196 #define	MT_FTABLE	11	/* fragment reassembly header */
197 #if 0
198 #define	MT_RIGHTS	12	/* access rights */
199 #define	MT_IFADDR	13	/* interface address */
200 #endif
201 #define	MT_CONTROL	14	/* extra-data protocol message */
202 #define	MT_OOBDATA	15	/* expedited data  */
203 
204 #define	MT_NTYPES	16	/* number of mbuf types for mbtypes[] */
205 
206 /*
207  * mbuf statistics
208  */
209 struct mbstat {
210 	u_long	m_mbufs;	/* # mbufs obtained from page pool */
211 	u_long	m_clusters;	/* # clusters obtained from page pool */
212 	u_long	m_clfree;	/* # clusters on freelist (cache) */
213 	u_long	m_refcnt;	/* # ref counters obtained from page pool */
214 	u_long	m_refree;	/* # ref counters on freelist (cache) */
215 	u_long	m_spare;	/* spare field */
216 	u_long	m_drops;	/* times failed to find space */
217 	u_long	m_wait;		/* times waited for space */
218 	u_long	m_drain;	/* times drained protocols for space */
219 	u_long	m_mcfail;	/* times m_copym failed */
220 	u_long	m_mpfail;	/* times m_pullup failed */
221 	u_long	m_msize;	/* length of an mbuf */
222 	u_long	m_mclbytes;	/* length of an mbuf cluster */
223 	u_long	m_minclsize;	/* min length of data to allocate a cluster */
224 	u_long	m_mlen;		/* length of data in an mbuf */
225 	u_long	m_mhlen;	/* length of data in a header mbuf */
226 };
227 
228 /* flags to m_get/MGET */
229 #define	M_DONTWAIT	1
230 #define	M_WAIT		0
231 
232 /*
233  * Normal mbuf clusters are normally treated as character arrays
234  * after allocation, but use the first word of the buffer as a free list
235  * pointer while on the free list.
236  */
237 union mcluster {
238 	union	mcluster *mcl_next;
239 	char	mcl_buf[MCLBYTES];
240 };
241 
242 /*
243  * The m_ext object reference counter structure.
244  */
245 union mext_refcnt {
246 	union	mext_refcnt *next_ref;
247 	u_long	refcnt;
248 };
249 
250 /*
251  * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst
252  */
253 struct mbffree_lst {
254 	struct mbuf *m_head;
255 	struct mtx m_mtx;
256 };
257 
258 struct mclfree_lst {
259         union mcluster *m_head;
260         struct mtx m_mtx;
261 };
262 
263 struct mcntfree_lst {
264         union mext_refcnt *m_head;
265         struct mtx m_mtx;
266 };
267 
268 /*
269  * Wake up the next instance (if any) of a sleeping allocation - which is
270  * waiting for a {cluster, mbuf} to be freed.
271  *
272  * Must be called with the appropriate mutex held.
273  */
274 #define	MBWAKEUP(m_wid) do {						\
275 	if ((m_wid)) {							\
276 		m_wid--;						\
277 		wakeup_one(&(m_wid)); 					\
278 	}								\
279 } while (0)
280 
281 /*
282  * mbuf external reference count management macros:
283  *
284  * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing
285  *     the external buffer ext_buf
286  * MEXT_REM_REF(m): remove reference to m_ext object
287  * MEXT_ADD_REF(m): add reference to m_ext object already
288  *     referred to by (m)
289  * MEXT_INIT_REF(m): allocate and initialize an external
290  *     object reference counter for (m)
291  */
292 #define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1)
293 
294 #define MEXT_REM_REF(m) do {						\
295 	KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0"));	\
296 	atomic_subtract_long(&((m)->m_ext.ref_cnt->refcnt), 1);		\
297 } while(0)
298 
299 #define MEXT_ADD_REF(m) atomic_add_long(&((m)->m_ext.ref_cnt->refcnt), 1)
300 
301 #define _MEXT_ALLOC_CNT(m_cnt, how) do {				\
302 	union mext_refcnt *__mcnt;					\
303 									\
304 	mtx_enter(&mcntfree.m_mtx, MTX_DEF);				\
305 	if (mcntfree.m_head == NULL)					\
306 		m_alloc_ref(1, (how));					\
307 	__mcnt = mcntfree.m_head;					\
308 	if (__mcnt != NULL) {						\
309 		mcntfree.m_head = __mcnt->next_ref;			\
310 		mbstat.m_refree--;					\
311 		__mcnt->refcnt = 0;					\
312 	}								\
313 	mtx_exit(&mcntfree.m_mtx, MTX_DEF);				\
314 	(m_cnt) = __mcnt;						\
315 } while (0)
316 
317 #define _MEXT_DEALLOC_CNT(m_cnt) do {					\
318 	union mext_refcnt *__mcnt = (m_cnt);				\
319 									\
320 	mtx_enter(&mcntfree.m_mtx, MTX_DEF);				\
321 	__mcnt->next_ref = mcntfree.m_head;				\
322 	mcntfree.m_head = __mcnt;					\
323 	mbstat.m_refree++;						\
324 	mtx_exit(&mcntfree.m_mtx, MTX_DEF);				\
325 } while (0)
326 
327 #define MEXT_INIT_REF(m, how) do {					\
328 	struct mbuf *__mmm = (m);					\
329 									\
330 	_MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how));			\
331 	if (__mmm->m_ext.ref_cnt != NULL)				\
332 		MEXT_ADD_REF(__mmm);					\
333 } while (0)
334 
335 /*
336  * mbuf allocation/deallocation macros:
337  *
338  *	MGET(struct mbuf *m, int how, int type)
339  * allocates an mbuf and initializes it to contain internal data.
340  *
341  *	MGETHDR(struct mbuf *m, int how, int type)
342  * allocates an mbuf and initializes it to contain a packet header
343  * and internal data.
344  */
345 /*
346  * Lower-level macros for MGET(HDR)... Not to be used outside the
347  * subsystem ("non-exportable" macro names are prepended with "_").
348  */
349 #define _MGET_SETUP(m_set, m_set_type) do {				\
350 	(m_set)->m_type = (m_set_type);					\
351 	(m_set)->m_next = NULL;						\
352 	(m_set)->m_nextpkt = NULL;					\
353 	(m_set)->m_data = (m_set)->m_dat;				\
354 	(m_set)->m_flags = 0;						\
355 } while (0)
356 
357 #define	_MGET(m_mget, m_get_how) do {					\
358 	if (mmbfree.m_head == NULL)					\
359 		m_mballoc(1, (m_get_how));				\
360 	(m_mget) = mmbfree.m_head;					\
361 	if ((m_mget) != NULL) {						\
362 		mmbfree.m_head = (m_mget)->m_next;			\
363 		mbtypes[MT_FREE]--;					\
364 	} else {							\
365 		if ((m_get_how) == M_WAIT)				\
366 			(m_mget) = m_mballoc_wait();			\
367 	}								\
368 } while (0)
369 
370 #define MGET(m, how, type) do {						\
371 	struct mbuf *_mm;						\
372 	int _mhow = (how);						\
373 	int _mtype = (type);						\
374 									\
375 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
376 	_MGET(_mm, _mhow);						\
377 	if (_mm != NULL) {						\
378 		mbtypes[_mtype]++;					\
379 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
380 		_MGET_SETUP(_mm, _mtype);				\
381 	} else								\
382 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
383 	(m) = _mm;							\
384 } while (0)
385 
386 #define _MGETHDR_SETUP(m_set, m_set_type) do {				\
387 	(m_set)->m_type = (m_set_type);					\
388 	(m_set)->m_next = NULL;						\
389 	(m_set)->m_nextpkt = NULL;					\
390 	(m_set)->m_data = (m_set)->m_pktdat;				\
391 	(m_set)->m_flags = M_PKTHDR;					\
392 	(m_set)->m_pkthdr.rcvif = NULL;					\
393 	(m_set)->m_pkthdr.csum_flags = 0;				\
394 	(m_set)->m_pkthdr.aux = NULL;					\
395 } while (0)
396 
397 #define MGETHDR(m, how, type) do {					\
398 	struct mbuf *_mm;						\
399 	int _mhow = (how);						\
400 	int _mtype = (type);						\
401 									\
402 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
403 	_MGET(_mm, _mhow);						\
404 	if (_mm != NULL) {						\
405 		mbtypes[_mtype]++;					\
406 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
407 		_MGETHDR_SETUP(_mm, _mtype);				\
408 	} else								\
409 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
410 	(m) = _mm;							\
411 } while (0)
412 
413 /*
414  * mbuf external storage macros:
415  *
416  *   MCLGET allocates and refers an mcluster to an mbuf
417  *   MEXTADD sets up pre-allocated external storage and refers to mbuf
418  *   MEXTFREE removes reference to external object and frees it if
419  *       necessary
420  */
421 #define	_MCLALLOC(p, how) do {						\
422 	caddr_t _mp;							\
423 	int _mhow = (how);						\
424 									\
425 	if (mclfree.m_head == NULL)					\
426 		m_clalloc(1, _mhow);					\
427 	_mp = (caddr_t)mclfree.m_head;					\
428 	if (_mp != NULL) {						\
429 		mbstat.m_clfree--;					\
430 		mclfree.m_head = ((union mcluster *)_mp)->mcl_next;	\
431 	} else {							\
432 		if (_mhow == M_WAIT)					\
433 			_mp = m_clalloc_wait();				\
434 	}								\
435 	(p) = _mp;							\
436 } while (0)
437 
438 #define	MCLGET(m, how) do {						\
439 	struct mbuf *_mm = (m);						\
440 									\
441 	mtx_enter(&mclfree.m_mtx, MTX_DEF);				\
442 	_MCLALLOC(_mm->m_ext.ext_buf, (how));				\
443 	mtx_exit(&mclfree.m_mtx, MTX_DEF);				\
444 	if (_mm->m_ext.ext_buf != NULL) {				\
445 		MEXT_INIT_REF(_mm, (how));				\
446 		if (_mm->m_ext.ref_cnt == NULL) {			\
447 			_MCLFREE(_mm->m_ext.ext_buf);			\
448 			_mm->m_ext.ext_buf = NULL;			\
449 		} else {						\
450 			_mm->m_data = _mm->m_ext.ext_buf;		\
451 			_mm->m_flags |= M_EXT;				\
452 			_mm->m_ext.ext_free = NULL;			\
453 			_mm->m_ext.ext_args = NULL;			\
454 			_mm->m_ext.ext_size = MCLBYTES;			\
455 			_mm->m_ext.ext_type = EXT_CLUSTER;		\
456 		}							\
457 	}								\
458 } while (0)
459 
460 #define MEXTADD(m, buf, size, free, args, flags, type) do {		\
461 	struct mbuf *_mm = (m);						\
462 									\
463 	MEXT_INIT_REF(_mm, M_WAIT);					\
464 	if (_mm->m_ext.ref_cnt != NULL) {				\
465 		_mm->m_flags |= (M_EXT | (flags));			\
466 		_mm->m_ext.ext_buf = (caddr_t)(buf);			\
467 		_mm->m_data = _mm->m_ext.ext_buf;			\
468 		_mm->m_ext.ext_size = (size);				\
469 		_mm->m_ext.ext_free = (free);				\
470 		_mm->m_ext.ext_args = (args);				\
471 		_mm->m_ext.ext_type = (type);				\
472 	}								\
473 } while (0)
474 
475 #define	_MCLFREE(p) do {						\
476 	union mcluster *_mp = (union mcluster *)(p);			\
477 									\
478 	mtx_enter(&mclfree.m_mtx, MTX_DEF);				\
479 	_mp->mcl_next = mclfree.m_head;					\
480 	mclfree.m_head = _mp;						\
481 	mbstat.m_clfree++;						\
482 	MBWAKEUP(m_clalloc_wid);					\
483 	mtx_exit(&mclfree.m_mtx, MTX_DEF); 				\
484 } while (0)
485 
486 #define	MEXTFREE(m) do {						\
487 	struct mbuf *_mmm = (m);					\
488 									\
489 	if (MEXT_IS_REF(_mmm))						\
490 		MEXT_REM_REF(_mmm);					\
491 	else if (_mmm->m_ext.ext_type != EXT_CLUSTER) {			\
492 		(*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf,		\
493 		    _mmm->m_ext.ext_args);				\
494 		_MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt);			\
495 	} else {							\
496 		_MCLFREE(_mmm->m_ext.ext_buf);				\
497 		_MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt);			\
498 	}								\
499 	_mmm->m_flags &= ~M_EXT;					\
500 } while (0)
501 
502 /*
503  * MFREE(struct mbuf *m, struct mbuf *n)
504  * Free a single mbuf and associated external storage.
505  * Place the successor, if any, in n.
506  */
507 #define	MFREE(m, n) do {						\
508 	struct mbuf *_mm = (m);						\
509 									\
510 	KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf"));		\
511 	if (_mm->m_flags & M_EXT)					\
512 		MEXTFREE(_mm);						\
513 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
514 	mbtypes[_mm->m_type]--;						\
515 	_mm->m_type = MT_FREE;						\
516 	mbtypes[MT_FREE]++;						\
517 	(n) = _mm->m_next;						\
518 	_mm->m_next = mmbfree.m_head;					\
519 	mmbfree.m_head = _mm;						\
520 	MBWAKEUP(m_mballoc_wid);					\
521 	mtx_exit(&mmbfree.m_mtx, MTX_DEF); 				\
522 } while (0)
523 
524 /*
525  * M_WRITABLE(m)
526  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this
527  * can be both the local data payload, or an external buffer area,
528  * depending on whether M_EXT is set).
529  */
530 #define M_WRITABLE(m)	(!((m)->m_flags & M_RDONLY) && (!((m)->m_flags  \
531 			    & M_EXT) || !MEXT_IS_REF(m)))
532 
533 /*
534  * Copy mbuf pkthdr from "from" to "to".
535  * from must have M_PKTHDR set, and to must be empty.
536  * aux pointer will be moved to `to'.
537  */
538 #define	M_COPY_PKTHDR(to, from) do {					\
539 	struct mbuf *_mfrom = (from);					\
540 	struct mbuf *_mto = (to);					\
541 									\
542 	_mto->m_data = _mto->m_pktdat;					\
543 	_mto->m_flags = _mfrom->m_flags & M_COPYFLAGS;			\
544 	_mto->m_pkthdr = _mfrom->m_pkthdr;				\
545 	_mfrom->m_pkthdr.aux = (struct mbuf *)NULL;			\
546 } while (0)
547 
548 /*
549  * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
550  * an object of the specified size at the end of the mbuf, longword aligned.
551  */
552 #define	M_ALIGN(m, len) do {						\
553 	(m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1);		\
554 } while (0)
555 
556 /*
557  * As above, for mbufs allocated with m_gethdr/MGETHDR
558  * or initialized by M_COPY_PKTHDR.
559  */
560 #define	MH_ALIGN(m, len) do {						\
561 	(m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1);		\
562 } while (0)
563 
564 /*
565  * Compute the amount of space available
566  * before the current start of data in an mbuf.
567  */
568 #define	M_LEADINGSPACE(m)						\
569 	((m)->m_flags & M_EXT ?						\
570 	    /* (m)->m_data - (m)->m_ext.ext_buf */ 0 :			\
571 	    (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat :	\
572 	    (m)->m_data - (m)->m_dat)
573 
574 /*
575  * Compute the amount of space available
576  * after the end of data in an mbuf.
577  */
578 #define	M_TRAILINGSPACE(m)						\
579 	((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf +			\
580 	    (m)->m_ext.ext_size - ((m)->m_data + (m)->m_len) :		\
581 	    &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
582 
583 /*
584  * Arrange to prepend space of size plen to mbuf m.
585  * If a new mbuf must be allocated, how specifies whether to wait.
586  * If how is M_DONTWAIT and allocation fails, the original mbuf chain
587  * is freed and m is set to NULL.
588  */
589 #define	M_PREPEND(m, plen, how) do {					\
590 	struct mbuf **_mmp = &(m);					\
591 	struct mbuf *_mm = *_mmp;					\
592 	int _mplen = (plen);						\
593 	int __mhow = (how);						\
594 									\
595 	if (M_LEADINGSPACE(_mm) >= _mplen) {				\
596 		_mm->m_data -= _mplen;					\
597 		_mm->m_len += _mplen;					\
598 	} else								\
599 		_mm = m_prepend(_mm, _mplen, __mhow);			\
600 	if (_mm != NULL && _mm->m_flags & M_PKTHDR)			\
601 		_mm->m_pkthdr.len += _mplen;				\
602 	*_mmp = _mm;							\
603 } while (0)
604 
605 /*
606  * change mbuf to new type
607  */
608 #define	MCHTYPE(m, t) do {						\
609 	struct mbuf *_mm = (m);						\
610 	int _mt = (t);							\
611 									\
612 	atomic_subtract_long(&mbtypes[_mm->m_type], 1);			\
613 	atomic_add_long(&mbtypes[_mt], 1);				\
614 	_mm->m_type = (_mt);						\
615 } while (0)
616 
617 /* length to m_copy to copy all */
618 #define	M_COPYALL	1000000000
619 
620 /* compatibility with 4.3 */
621 #define	m_copy(m, o, l)	m_copym((m), (o), (l), M_DONTWAIT)
622 
623 /*
624  * pkthdr.aux type tags.
625  */
626 struct mauxtag {
627 	int	af;
628 	int	type;
629 };
630 
631 #ifdef _KERNEL
632 extern	u_long		 m_clalloc_wid;	/* mbuf cluster wait count */
633 extern	u_long		 m_mballoc_wid;	/* mbuf wait count */
634 extern	int		 max_linkhdr;	/* largest link-level header */
635 extern	int		 max_protohdr;	/* largest protocol header */
636 extern	int		 max_hdr;	/* largest link+protocol header */
637 extern	int		 max_datalen;	/* MHLEN - max_hdr */
638 extern	struct mbstat	 mbstat;
639 extern	u_long		 mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
640 extern	int		 mbuf_wait;	/* mbuf sleep time */
641 extern	struct mbuf	*mbutl;		/* virtual address of mclusters */
642 extern	struct mclfree_lst	mclfree;
643 extern	struct mbffree_lst	mmbfree;
644 extern	struct mcntfree_lst	mcntfree;
645 extern	int		 nmbclusters;
646 extern	int		 nmbufs;
647 extern	int		 nsfbufs;
648 
649 void	m_adj __P((struct mbuf *, int));
650 int	m_alloc_ref __P((u_int, int));
651 void	m_cat __P((struct mbuf *,struct mbuf *));
652 int	m_clalloc __P((int, int));
653 caddr_t	m_clalloc_wait __P((void));
654 void	m_copyback __P((struct mbuf *, int, int, caddr_t));
655 void	m_copydata __P((struct mbuf *,int,int,caddr_t));
656 struct	mbuf *m_copym __P((struct mbuf *, int, int, int));
657 struct	mbuf *m_copypacket __P((struct mbuf *, int));
658 struct	mbuf *m_devget __P((char *, int, int, struct ifnet *,
659     void (*copy)(char *, caddr_t, u_int)));
660 struct	mbuf *m_dup __P((struct mbuf *, int));
661 struct	mbuf *m_free __P((struct mbuf *));
662 void	m_freem __P((struct mbuf *));
663 struct	mbuf *m_get __P((int, int));
664 struct	mbuf *m_getclr __P((int, int));
665 struct	mbuf *m_gethdr __P((int, int));
666 int	m_mballoc __P((int, int));
667 struct	mbuf *m_mballoc_wait __P((void));
668 struct	mbuf *m_prepend __P((struct mbuf *,int,int));
669 struct	mbuf *m_pulldown __P((struct mbuf *, int, int, int *));
670 void	m_print __P((const struct mbuf *m));
671 struct	mbuf *m_pullup __P((struct mbuf *, int));
672 struct	mbuf *m_split __P((struct mbuf *,int,int));
673 struct	mbuf *m_aux_add __P((struct mbuf *, int, int));
674 struct	mbuf *m_aux_find __P((struct mbuf *, int, int));
675 void	m_aux_delete __P((struct mbuf *, struct mbuf *));
676 #endif /* _KERNEL */
677 
678 #endif /* !_SYS_MBUF_H_ */
679