xref: /freebsd/sys/sys/mbuf.h (revision c68159a6d8eede11766cf13896d0f7670dbd51aa)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)mbuf.h	8.5 (Berkeley) 2/19/95
34  * $FreeBSD$
35  */
36 
37 #ifndef _SYS_MBUF_H_
38 #define	_SYS_MBUF_H_
39 
40 #include <sys/mutex.h>	/* XXX */
41 
42 /*
43  * Mbufs are of a single size, MSIZE (machine/param.h), which
44  * includes overhead.  An mbuf may add a single "mbuf cluster" of size
45  * MCLBYTES (also in machine/param.h), which has no additional overhead
46  * and is used instead of the internal data area; this is done when
47  * at least MINCLSIZE of data must be stored.
48  */
49 
50 #define	MLEN		(MSIZE - sizeof(struct m_hdr))	/* normal data len */
51 #define	MHLEN		(MLEN - sizeof(struct pkthdr))	/* data len w/pkthdr */
52 
53 #define	MINCLSIZE	(MHLEN + 1)	/* smallest amount to put in cluster */
54 #define	M_MAXCOMPRESS	(MHLEN / 2)	/* max amount to copy for compression */
55 
56 /*
57  * Maximum number of allocatable counters for external buffers. This
58  * ensures enough VM address space for the allocation of counters
59  * in the extreme case where all possible external buffers are allocated.
60  *
61  * Note: When new types of external storage are allocated, EXT_COUNTERS
62  * 	 must be tuned accordingly. Practically, this isn't a big deal
63  *	 as each counter is only a word long, so we can fit
64  *	 (PAGE_SIZE / length of word) counters in a single page.
65  *
66  * XXX: Must increase this if using any of if_ti, if_wb, if_sk drivers,
67  *	or any other drivers which may manage their own buffers and
68  *	eventually attach them to mbufs.
69  */
70 #define EXT_COUNTERS (nmbclusters + nsfbufs)
71 
72 /*
73  * Macros for type conversion
74  * mtod(m, t) -	convert mbuf pointer to data pointer of correct type
75  * dtom(x) -	convert data pointer within mbuf to mbuf pointer (XXX)
76  */
77 #define	mtod(m, t)	((t)((m)->m_data))
78 #define	dtom(x)		((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
79 
80 /* header at beginning of each mbuf: */
81 struct m_hdr {
82 	struct	mbuf *mh_next;		/* next buffer in chain */
83 	struct	mbuf *mh_nextpkt;	/* next chain in queue/record */
84 	caddr_t	mh_data;		/* location of data */
85 	int	mh_len;			/* amount of data in this mbuf */
86 	short	mh_type;		/* type of data in this mbuf */
87 	short	mh_flags;		/* flags; see below */
88 };
89 
90 /* record/packet header in first mbuf of chain; valid if M_PKTHDR set */
91 struct pkthdr {
92 	struct	ifnet *rcvif;		/* rcv interface */
93 	int	len;			/* total packet length */
94 	/* variables for ip and tcp reassembly */
95 	void	*header;		/* pointer to packet header */
96 	/* variables for hardware checksum */
97 	int	csum_flags;		/* flags regarding checksum */
98 	int	csum_data;		/* data field used by csum routines */
99 	struct	mbuf *aux;		/* extra data buffer; ipsec/others */
100 };
101 
102 /* description of external storage mapped into mbuf, valid if M_EXT set */
103 struct m_ext {
104 	caddr_t	ext_buf;		/* start of buffer */
105 	void	(*ext_free)		/* free routine if not the usual */
106 		__P((caddr_t, void *));
107 	void	*ext_args;		/* optional argument pointer */
108 	u_int	ext_size;		/* size of buffer, for ext_free */
109 	union	mext_refcnt *ref_cnt;	/* pointer to ref count info */
110 	int	ext_type;		/* type of external storage */
111 };
112 
113 struct mbuf {
114 	struct	m_hdr m_hdr;
115 	union {
116 		struct {
117 			struct	pkthdr MH_pkthdr;	/* M_PKTHDR set */
118 			union {
119 				struct	m_ext MH_ext;	/* M_EXT set */
120 				char	MH_databuf[MHLEN];
121 			} MH_dat;
122 		} MH;
123 		char	M_databuf[MLEN];		/* !M_PKTHDR, !M_EXT */
124 	} M_dat;
125 };
126 #define	m_next		m_hdr.mh_next
127 #define	m_len		m_hdr.mh_len
128 #define	m_data		m_hdr.mh_data
129 #define	m_type		m_hdr.mh_type
130 #define	m_flags		m_hdr.mh_flags
131 #define	m_nextpkt	m_hdr.mh_nextpkt
132 #define	m_act		m_nextpkt
133 #define	m_pkthdr	M_dat.MH.MH_pkthdr
134 #define	m_ext		M_dat.MH.MH_dat.MH_ext
135 #define	m_pktdat	M_dat.MH.MH_dat.MH_databuf
136 #define	m_dat		M_dat.M_databuf
137 
138 /* mbuf flags */
139 #define	M_EXT		0x0001	/* has associated external storage */
140 #define	M_PKTHDR	0x0002	/* start of record */
141 #define	M_EOR		0x0004	/* end of record */
142 #define M_RDONLY	0x0008	/* associated data is marked read-only */
143 #define	M_PROTO1	0x0010	/* protocol-specific */
144 #define	M_PROTO2	0x0020	/* protocol-specific */
145 #define	M_PROTO3	0x0040	/* protocol-specific */
146 #define	M_PROTO4	0x0080	/* protocol-specific */
147 #define	M_PROTO5	0x0100	/* protocol-specific */
148 
149 /* mbuf pkthdr flags, also in m_flags */
150 #define	M_BCAST		0x0200	/* send/received as link-level broadcast */
151 #define	M_MCAST		0x0400	/* send/received as link-level multicast */
152 #define	M_FRAG		0x0800	/* packet is a fragment of a larger packet */
153 #define	M_FIRSTFRAG	0x1000	/* packet is first fragment */
154 #define	M_LASTFRAG	0x2000	/* packet is last fragment */
155 
156 /* external buffer types: identify ext_buf type */
157 #define	EXT_CLUSTER	1	/* mbuf cluster */
158 #define	EXT_SFBUF	2	/* sendfile(2)'s sf_bufs */
159 #define	EXT_NET_DRV	100	/* custom ext_buf provided by net driver(s) */
160 #define	EXT_MOD_TYPE	200	/* custom module's ext_buf type */
161 
162 /* flags copied when copying m_pkthdr */
163 #define	M_COPYFLAGS	(M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \
164 			    M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG|M_RDONLY)
165 
166 /* flags indicating hw checksum support and sw checksum requirements */
167 #define CSUM_IP			0x0001		/* will csum IP */
168 #define CSUM_TCP		0x0002		/* will csum TCP */
169 #define CSUM_UDP		0x0004		/* will csum UDP */
170 #define CSUM_IP_FRAGS		0x0008		/* will csum IP fragments */
171 #define CSUM_FRAGMENT		0x0010		/* will do IP fragmentation */
172 
173 #define CSUM_IP_CHECKED		0x0100		/* did csum IP */
174 #define CSUM_IP_VALID		0x0200		/*   ... the csum is valid */
175 #define CSUM_DATA_VALID		0x0400		/* csum_data field is valid */
176 #define CSUM_PSEUDO_HDR		0x0800		/* csum_data has pseudo hdr */
177 
178 #define CSUM_DELAY_DATA		(CSUM_TCP | CSUM_UDP)
179 #define CSUM_DELAY_IP		(CSUM_IP)	/* XXX add ipv6 here too? */
180 
181 /* mbuf types */
182 #define	MT_FREE		0	/* should be on free list */
183 #define	MT_DATA		1	/* dynamic (data) allocation */
184 #define	MT_HEADER	2	/* packet header */
185 #if 0
186 #define	MT_SOCKET	3	/* socket structure */
187 #define	MT_PCB		4	/* protocol control block */
188 #define	MT_RTABLE	5	/* routing tables */
189 #define	MT_HTABLE	6	/* IMP host tables */
190 #define	MT_ATABLE	7	/* address resolution tables */
191 #endif
192 #define	MT_SONAME	8	/* socket name */
193 #if 0
194 #define	MT_SOOPTS	10	/* socket options */
195 #endif
196 #define	MT_FTABLE	11	/* fragment reassembly header */
197 #if 0
198 #define	MT_RIGHTS	12	/* access rights */
199 #define	MT_IFADDR	13	/* interface address */
200 #endif
201 #define	MT_CONTROL	14	/* extra-data protocol message */
202 #define	MT_OOBDATA	15	/* expedited data  */
203 
204 #define	MT_NTYPES	16	/* number of mbuf types for mbtypes[] */
205 
206 /*
207  * mbuf statistics
208  */
209 struct mbstat {
210 	u_long	m_mbufs;	/* # mbufs obtained from page pool */
211 	u_long	m_clusters;	/* # clusters obtained from page pool */
212 	u_long	m_clfree;	/* # clusters on freelist (cache) */
213 	u_long	m_refcnt;	/* # ref counters obtained from page pool */
214 	u_long	m_refree;	/* # ref counters on freelist (cache) */
215 	u_long	m_spare;	/* spare field */
216 	u_long	m_drops;	/* times failed to find space */
217 	u_long	m_wait;		/* times waited for space */
218 	u_long	m_drain;	/* times drained protocols for space */
219 	u_long	m_mcfail;	/* times m_copym failed */
220 	u_long	m_mpfail;	/* times m_pullup failed */
221 	u_long	m_msize;	/* length of an mbuf */
222 	u_long	m_mclbytes;	/* length of an mbuf cluster */
223 	u_long	m_minclsize;	/* min length of data to allocate a cluster */
224 	u_long	m_mlen;		/* length of data in an mbuf */
225 	u_long	m_mhlen;	/* length of data in a header mbuf */
226 };
227 
228 /* flags to m_get/MGET */
229 #define	M_DONTWAIT	1
230 #define	M_TRYWAIT	0
231 #define	M_WAIT		M_TRYWAIT	/* XXX: Deprecated. */
232 
233 /*
234  * Normal mbuf clusters are normally treated as character arrays
235  * after allocation, but use the first word of the buffer as a free list
236  * pointer while on the free list.
237  */
238 union mcluster {
239 	union	mcluster *mcl_next;
240 	char	mcl_buf[MCLBYTES];
241 };
242 
243 /*
244  * The m_ext object reference counter structure.
245  */
246 union mext_refcnt {
247 	union	mext_refcnt *next_ref;
248 	u_int	refcnt;
249 };
250 
251 /*
252  * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst
253  */
254 struct mbffree_lst {
255 	struct mbuf *m_head;
256 	struct mtx m_mtx;
257 };
258 
259 struct mclfree_lst {
260         union mcluster *m_head;
261         struct mtx m_mtx;
262 };
263 
264 struct mcntfree_lst {
265         union mext_refcnt *m_head;
266         struct mtx m_mtx;
267 };
268 
269 /*
270  * Wake up the next instance (if any) of a sleeping allocation - which is
271  * waiting for a {cluster, mbuf} to be freed.
272  *
273  * Must be called with the appropriate mutex held.
274  */
275 #define	MBWAKEUP(m_wid) do {						\
276 	if ((m_wid)) {							\
277 		m_wid--;						\
278 		wakeup_one(&(m_wid)); 					\
279 	}								\
280 } while (0)
281 
282 /*
283  * mbuf external reference count management macros:
284  *
285  * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing
286  *     the external buffer ext_buf
287  * MEXT_REM_REF(m): remove reference to m_ext object
288  * MEXT_ADD_REF(m): add reference to m_ext object already
289  *     referred to by (m)
290  * MEXT_INIT_REF(m): allocate and initialize an external
291  *     object reference counter for (m)
292  */
293 #define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1)
294 
295 #define MEXT_REM_REF(m) do {						\
296 	KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0"));	\
297 	atomic_subtract_int(&((m)->m_ext.ref_cnt->refcnt), 1);		\
298 } while(0)
299 
300 #define MEXT_ADD_REF(m) atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1)
301 
302 #define _MEXT_ALLOC_CNT(m_cnt, how) do {				\
303 	union mext_refcnt *__mcnt;					\
304 									\
305 	mtx_enter(&mcntfree.m_mtx, MTX_DEF);				\
306 	if (mcntfree.m_head == NULL)					\
307 		m_alloc_ref(1, (how));					\
308 	__mcnt = mcntfree.m_head;					\
309 	if (__mcnt != NULL) {						\
310 		mcntfree.m_head = __mcnt->next_ref;			\
311 		mbstat.m_refree--;					\
312 		__mcnt->refcnt = 0;					\
313 	}								\
314 	mtx_exit(&mcntfree.m_mtx, MTX_DEF);				\
315 	(m_cnt) = __mcnt;						\
316 } while (0)
317 
318 #define _MEXT_DEALLOC_CNT(m_cnt) do {					\
319 	union mext_refcnt *__mcnt = (m_cnt);				\
320 									\
321 	mtx_enter(&mcntfree.m_mtx, MTX_DEF);				\
322 	__mcnt->next_ref = mcntfree.m_head;				\
323 	mcntfree.m_head = __mcnt;					\
324 	mbstat.m_refree++;						\
325 	mtx_exit(&mcntfree.m_mtx, MTX_DEF);				\
326 } while (0)
327 
328 #define MEXT_INIT_REF(m, how) do {					\
329 	struct mbuf *__mmm = (m);					\
330 									\
331 	_MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how));			\
332 	if (__mmm->m_ext.ref_cnt != NULL)				\
333 		MEXT_ADD_REF(__mmm);					\
334 } while (0)
335 
336 /*
337  * mbuf allocation/deallocation macros:
338  *
339  *	MGET(struct mbuf *m, int how, int type)
340  * allocates an mbuf and initializes it to contain internal data.
341  *
342  *	MGETHDR(struct mbuf *m, int how, int type)
343  * allocates an mbuf and initializes it to contain a packet header
344  * and internal data.
345  */
346 /*
347  * Lower-level macros for MGET(HDR)... Not to be used outside the
348  * subsystem ("non-exportable" macro names are prepended with "_").
349  */
350 #define _MGET_SETUP(m_set, m_set_type) do {				\
351 	(m_set)->m_type = (m_set_type);					\
352 	(m_set)->m_next = NULL;						\
353 	(m_set)->m_nextpkt = NULL;					\
354 	(m_set)->m_data = (m_set)->m_dat;				\
355 	(m_set)->m_flags = 0;						\
356 } while (0)
357 
358 #define	_MGET(m_mget, m_get_how) do {					\
359 	if (mmbfree.m_head == NULL)					\
360 		m_mballoc(1, (m_get_how));				\
361 	(m_mget) = mmbfree.m_head;					\
362 	if ((m_mget) != NULL) {						\
363 		mmbfree.m_head = (m_mget)->m_next;			\
364 		mbtypes[MT_FREE]--;					\
365 	} else {							\
366 		if ((m_get_how) == M_TRYWAIT)				\
367 			(m_mget) = m_mballoc_wait();			\
368 	}								\
369 } while (0)
370 
371 #define MGET(m, how, type) do {						\
372 	struct mbuf *_mm;						\
373 	int _mhow = (how);						\
374 	int _mtype = (type);						\
375 									\
376 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
377 	_MGET(_mm, _mhow);						\
378 	if (_mm != NULL) {						\
379 		mbtypes[_mtype]++;					\
380 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
381 		_MGET_SETUP(_mm, _mtype);				\
382 	} else								\
383 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
384 	(m) = _mm;							\
385 } while (0)
386 
387 #define _MGETHDR_SETUP(m_set, m_set_type) do {				\
388 	(m_set)->m_type = (m_set_type);					\
389 	(m_set)->m_next = NULL;						\
390 	(m_set)->m_nextpkt = NULL;					\
391 	(m_set)->m_data = (m_set)->m_pktdat;				\
392 	(m_set)->m_flags = M_PKTHDR;					\
393 	(m_set)->m_pkthdr.rcvif = NULL;					\
394 	(m_set)->m_pkthdr.csum_flags = 0;				\
395 	(m_set)->m_pkthdr.aux = NULL;					\
396 } while (0)
397 
398 #define MGETHDR(m, how, type) do {					\
399 	struct mbuf *_mm;						\
400 	int _mhow = (how);						\
401 	int _mtype = (type);						\
402 									\
403 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
404 	_MGET(_mm, _mhow);						\
405 	if (_mm != NULL) {						\
406 		mbtypes[_mtype]++;					\
407 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
408 		_MGETHDR_SETUP(_mm, _mtype);				\
409 	} else								\
410 		mtx_exit(&mmbfree.m_mtx, MTX_DEF);			\
411 	(m) = _mm;							\
412 } while (0)
413 
414 /*
415  * mbuf external storage macros:
416  *
417  *   MCLGET allocates and refers an mcluster to an mbuf
418  *   MEXTADD sets up pre-allocated external storage and refers to mbuf
419  *   MEXTFREE removes reference to external object and frees it if
420  *       necessary
421  */
422 #define	_MCLALLOC(p, how) do {						\
423 	caddr_t _mp;							\
424 	int _mhow = (how);						\
425 									\
426 	if (mclfree.m_head == NULL)					\
427 		m_clalloc(1, _mhow);					\
428 	_mp = (caddr_t)mclfree.m_head;					\
429 	if (_mp != NULL) {						\
430 		mbstat.m_clfree--;					\
431 		mclfree.m_head = ((union mcluster *)_mp)->mcl_next;	\
432 	} else {							\
433 		if (_mhow == M_TRYWAIT)					\
434 			_mp = m_clalloc_wait();				\
435 	}								\
436 	(p) = _mp;							\
437 } while (0)
438 
439 #define	MCLGET(m, how) do {						\
440 	struct mbuf *_mm = (m);						\
441 									\
442 	mtx_enter(&mclfree.m_mtx, MTX_DEF);				\
443 	_MCLALLOC(_mm->m_ext.ext_buf, (how));				\
444 	mtx_exit(&mclfree.m_mtx, MTX_DEF);				\
445 	if (_mm->m_ext.ext_buf != NULL) {				\
446 		MEXT_INIT_REF(_mm, (how));				\
447 		if (_mm->m_ext.ref_cnt == NULL) {			\
448 			_MCLFREE(_mm->m_ext.ext_buf);			\
449 			_mm->m_ext.ext_buf = NULL;			\
450 		} else {						\
451 			_mm->m_data = _mm->m_ext.ext_buf;		\
452 			_mm->m_flags |= M_EXT;				\
453 			_mm->m_ext.ext_free = NULL;			\
454 			_mm->m_ext.ext_args = NULL;			\
455 			_mm->m_ext.ext_size = MCLBYTES;			\
456 			_mm->m_ext.ext_type = EXT_CLUSTER;		\
457 		}							\
458 	}								\
459 } while (0)
460 
461 #define MEXTADD(m, buf, size, free, args, flags, type) do {		\
462 	struct mbuf *_mm = (m);						\
463 									\
464 	MEXT_INIT_REF(_mm, M_TRYWAIT);					\
465 	if (_mm->m_ext.ref_cnt != NULL) {				\
466 		_mm->m_flags |= (M_EXT | (flags));			\
467 		_mm->m_ext.ext_buf = (caddr_t)(buf);			\
468 		_mm->m_data = _mm->m_ext.ext_buf;			\
469 		_mm->m_ext.ext_size = (size);				\
470 		_mm->m_ext.ext_free = (free);				\
471 		_mm->m_ext.ext_args = (args);				\
472 		_mm->m_ext.ext_type = (type);				\
473 	}								\
474 } while (0)
475 
476 #define	_MCLFREE(p) do {						\
477 	union mcluster *_mp = (union mcluster *)(p);			\
478 									\
479 	mtx_enter(&mclfree.m_mtx, MTX_DEF);				\
480 	_mp->mcl_next = mclfree.m_head;					\
481 	mclfree.m_head = _mp;						\
482 	mbstat.m_clfree++;						\
483 	MBWAKEUP(m_clalloc_wid);					\
484 	mtx_exit(&mclfree.m_mtx, MTX_DEF); 				\
485 } while (0)
486 
487 /* MEXTFREE:
488  * If the atomic_cmpset_int() returns 0, then we effectively do nothing
489  * in terms of "cleaning up" (freeing the ext buf and ref. counter) as
490  * this means that either there are still references, or another thread
491  * is taking care of the clean-up.
492  */
493 #define	MEXTFREE(m) do {						\
494 	struct mbuf *_mmm = (m);					\
495 									\
496 	MEXT_REM_REF(_mmm);						\
497 	if (atomic_cmpset_int(&_mmm->m_ext.ref_cnt->refcnt, 0, 1)) {	\
498 		if (_mmm->m_ext.ext_type != EXT_CLUSTER) {		\
499 			(*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf,	\
500 			    _mmm->m_ext.ext_args);			\
501 		} else							\
502 			_MCLFREE(_mmm->m_ext.ext_buf);			\
503 		_MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt);			\
504 	}								\
505 	_mmm->m_flags &= ~M_EXT;					\
506 } while (0)
507 
508 /*
509  * MFREE(struct mbuf *m, struct mbuf *n)
510  * Free a single mbuf and associated external storage.
511  * Place the successor, if any, in n.
512  */
513 #define	MFREE(m, n) do {						\
514 	struct mbuf *_mm = (m);						\
515 									\
516 	KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf"));		\
517 	if (_mm->m_flags & M_EXT)					\
518 		MEXTFREE(_mm);						\
519 	mtx_enter(&mmbfree.m_mtx, MTX_DEF);				\
520 	mbtypes[_mm->m_type]--;						\
521 	_mm->m_type = MT_FREE;						\
522 	mbtypes[MT_FREE]++;						\
523 	(n) = _mm->m_next;						\
524 	_mm->m_next = mmbfree.m_head;					\
525 	mmbfree.m_head = _mm;						\
526 	MBWAKEUP(m_mballoc_wid);					\
527 	mtx_exit(&mmbfree.m_mtx, MTX_DEF); 				\
528 } while (0)
529 
530 /*
531  * M_WRITABLE(m)
532  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this
533  * can be both the local data payload, or an external buffer area,
534  * depending on whether M_EXT is set).
535  */
536 #define M_WRITABLE(m)	(!((m)->m_flags & M_RDONLY) && (!((m)->m_flags  \
537 			    & M_EXT) || !MEXT_IS_REF(m)))
538 
539 /*
540  * Copy mbuf pkthdr from "from" to "to".
541  * from must have M_PKTHDR set, and to must be empty.
542  * aux pointer will be moved to `to'.
543  */
544 #define	M_COPY_PKTHDR(to, from) do {					\
545 	struct mbuf *_mfrom = (from);					\
546 	struct mbuf *_mto = (to);					\
547 									\
548 	_mto->m_data = _mto->m_pktdat;					\
549 	_mto->m_flags = _mfrom->m_flags & M_COPYFLAGS;			\
550 	_mto->m_pkthdr = _mfrom->m_pkthdr;				\
551 	_mfrom->m_pkthdr.aux = (struct mbuf *)NULL;			\
552 } while (0)
553 
554 /*
555  * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
556  * an object of the specified size at the end of the mbuf, longword aligned.
557  */
558 #define	M_ALIGN(m, len) do {						\
559 	(m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1);		\
560 } while (0)
561 
562 /*
563  * As above, for mbufs allocated with m_gethdr/MGETHDR
564  * or initialized by M_COPY_PKTHDR.
565  */
566 #define	MH_ALIGN(m, len) do {						\
567 	(m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1);		\
568 } while (0)
569 
570 /*
571  * Compute the amount of space available
572  * before the current start of data in an mbuf.
573  */
574 #define	M_LEADINGSPACE(m)						\
575 	((m)->m_flags & M_EXT ?						\
576 	    /* (m)->m_data - (m)->m_ext.ext_buf */ 0 :			\
577 	    (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat :	\
578 	    (m)->m_data - (m)->m_dat)
579 
580 /*
581  * Compute the amount of space available
582  * after the end of data in an mbuf.
583  */
584 #define	M_TRAILINGSPACE(m)						\
585 	((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf +			\
586 	    (m)->m_ext.ext_size - ((m)->m_data + (m)->m_len) :		\
587 	    &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
588 
589 /*
590  * Arrange to prepend space of size plen to mbuf m.
591  * If a new mbuf must be allocated, how specifies whether to wait.
592  * If the allocation fails, the original mbuf chain is freed and m is
593  * set to NULL.
594  */
595 #define	M_PREPEND(m, plen, how) do {					\
596 	struct mbuf **_mmp = &(m);					\
597 	struct mbuf *_mm = *_mmp;					\
598 	int _mplen = (plen);						\
599 	int __mhow = (how);						\
600 									\
601 	if (M_LEADINGSPACE(_mm) >= _mplen) {				\
602 		_mm->m_data -= _mplen;					\
603 		_mm->m_len += _mplen;					\
604 	} else								\
605 		_mm = m_prepend(_mm, _mplen, __mhow);			\
606 	if (_mm != NULL && _mm->m_flags & M_PKTHDR)			\
607 		_mm->m_pkthdr.len += _mplen;				\
608 	*_mmp = _mm;							\
609 } while (0)
610 
611 /*
612  * change mbuf to new type
613  */
614 #define	MCHTYPE(m, t) do {						\
615 	struct mbuf *_mm = (m);						\
616 	int _mt = (t);							\
617 									\
618 	atomic_subtract_long(&mbtypes[_mm->m_type], 1);			\
619 	atomic_add_long(&mbtypes[_mt], 1);				\
620 	_mm->m_type = (_mt);						\
621 } while (0)
622 
623 /* length to m_copy to copy all */
624 #define	M_COPYALL	1000000000
625 
626 /* compatibility with 4.3 */
627 #define	m_copy(m, o, l)	m_copym((m), (o), (l), M_DONTWAIT)
628 
629 /*
630  * pkthdr.aux type tags.
631  */
632 struct mauxtag {
633 	int	af;
634 	int	type;
635 };
636 
637 #ifdef _KERNEL
638 extern	u_long		 m_clalloc_wid;	/* mbuf cluster wait count */
639 extern	u_long		 m_mballoc_wid;	/* mbuf wait count */
640 extern	int		 max_linkhdr;	/* largest link-level header */
641 extern	int		 max_protohdr;	/* largest protocol header */
642 extern	int		 max_hdr;	/* largest link+protocol header */
643 extern	int		 max_datalen;	/* MHLEN - max_hdr */
644 extern	struct mbstat	 mbstat;
645 extern	u_long		 mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
646 extern	int		 mbuf_wait;	/* mbuf sleep time */
647 extern	struct mbuf	*mbutl;		/* virtual address of mclusters */
648 extern	struct mclfree_lst	mclfree;
649 extern	struct mbffree_lst	mmbfree;
650 extern	struct mcntfree_lst	mcntfree;
651 extern	int		 nmbclusters;
652 extern	int		 nmbufs;
653 extern	int		 nsfbufs;
654 
655 void	m_adj __P((struct mbuf *, int));
656 int	m_alloc_ref __P((u_int, int));
657 void	m_cat __P((struct mbuf *,struct mbuf *));
658 int	m_clalloc __P((int, int));
659 caddr_t	m_clalloc_wait __P((void));
660 void	m_copyback __P((struct mbuf *, int, int, caddr_t));
661 void	m_copydata __P((struct mbuf *,int,int,caddr_t));
662 struct	mbuf *m_copym __P((struct mbuf *, int, int, int));
663 struct	mbuf *m_copypacket __P((struct mbuf *, int));
664 struct	mbuf *m_devget __P((char *, int, int, struct ifnet *,
665     void (*copy)(char *, caddr_t, u_int)));
666 struct	mbuf *m_dup __P((struct mbuf *, int));
667 struct	mbuf *m_free __P((struct mbuf *));
668 void	m_freem __P((struct mbuf *));
669 struct	mbuf *m_get __P((int, int));
670 struct	mbuf *m_getclr __P((int, int));
671 struct	mbuf *m_gethdr __P((int, int));
672 int	m_mballoc __P((int, int));
673 struct	mbuf *m_mballoc_wait __P((void));
674 struct	mbuf *m_prepend __P((struct mbuf *,int,int));
675 struct	mbuf *m_pulldown __P((struct mbuf *, int, int, int *));
676 void	m_print __P((const struct mbuf *m));
677 struct	mbuf *m_pullup __P((struct mbuf *, int));
678 struct	mbuf *m_split __P((struct mbuf *,int,int));
679 struct	mbuf *m_aux_add __P((struct mbuf *, int, int));
680 struct	mbuf *m_aux_find __P((struct mbuf *, int, int));
681 void	m_aux_delete __P((struct mbuf *, struct mbuf *));
682 #endif /* _KERNEL */
683 
684 #endif /* !_SYS_MBUF_H_ */
685