xref: /freebsd/sys/sys/mbuf.h (revision ee41f1b1cf5e3d4f586cb85b46123b416275862c)
1 /*
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)mbuf.h	8.5 (Berkeley) 2/19/95
34  * $FreeBSD$
35  */
36 
37 #ifndef _SYS_MBUF_H_
38 #define	_SYS_MBUF_H_
39 
40 #include <sys/mutex.h>	/* XXX */
41 
42 /*
43  * Mbufs are of a single size, MSIZE (machine/param.h), which
44  * includes overhead.  An mbuf may add a single "mbuf cluster" of size
45  * MCLBYTES (also in machine/param.h), which has no additional overhead
46  * and is used instead of the internal data area; this is done when
47  * at least MINCLSIZE of data must be stored.
48  */
49 
50 #define	MLEN		(MSIZE - sizeof(struct m_hdr))	/* normal data len */
51 #define	MHLEN		(MLEN - sizeof(struct pkthdr))	/* data len w/pkthdr */
52 
53 #define	MINCLSIZE	(MHLEN + 1)	/* smallest amount to put in cluster */
54 #define	M_MAXCOMPRESS	(MHLEN / 2)	/* max amount to copy for compression */
55 
56 /*
57  * Maximum number of allocatable counters for external buffers. This
58  * ensures enough VM address space for the allocation of counters
59  * in the extreme case where all possible external buffers are allocated.
60  *
61  * Note: When new types of external storage are allocated, EXT_COUNTERS
62  * 	 must be tuned accordingly. Practically, this isn't a big deal
63  *	 as each counter is only a word long, so we can fit
64  *	 (PAGE_SIZE / length of word) counters in a single page.
65  *
66  * XXX: Must increase this if using any of if_ti, if_wb, if_sk drivers,
67  *	or any other drivers which may manage their own buffers and
68  *	eventually attach them to mbufs.
69  */
70 #define EXT_COUNTERS (nmbclusters + nsfbufs)
71 
72 /*
73  * Macros for type conversion
74  * mtod(m, t) -	convert mbuf pointer to data pointer of correct type
75  * dtom(x) -	convert data pointer within mbuf to mbuf pointer (XXX)
76  */
77 #define	mtod(m, t)	((t)((m)->m_data))
78 #define	dtom(x)		((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1)))
79 
80 /* header at beginning of each mbuf: */
81 struct m_hdr {
82 	struct	mbuf *mh_next;		/* next buffer in chain */
83 	struct	mbuf *mh_nextpkt;	/* next chain in queue/record */
84 	caddr_t	mh_data;		/* location of data */
85 	int	mh_len;			/* amount of data in this mbuf */
86 	short	mh_type;		/* type of data in this mbuf */
87 	short	mh_flags;		/* flags; see below */
88 };
89 
90 /* record/packet header in first mbuf of chain; valid if M_PKTHDR set */
91 struct pkthdr {
92 	struct	ifnet *rcvif;		/* rcv interface */
93 	int	len;			/* total packet length */
94 	/* variables for ip and tcp reassembly */
95 	void	*header;		/* pointer to packet header */
96 	/* variables for hardware checksum */
97 	int	csum_flags;		/* flags regarding checksum */
98 	int	csum_data;		/* data field used by csum routines */
99 	struct	mbuf *aux;		/* extra data buffer; ipsec/others */
100 };
101 
102 /* description of external storage mapped into mbuf, valid if M_EXT set */
103 struct m_ext {
104 	caddr_t	ext_buf;		/* start of buffer */
105 	void	(*ext_free)		/* free routine if not the usual */
106 		    (caddr_t, void *);
107 	void	*ext_args;		/* optional argument pointer */
108 	u_int	ext_size;		/* size of buffer, for ext_free */
109 	union	mext_refcnt *ref_cnt;	/* pointer to ref count info */
110 	int	ext_type;		/* type of external storage */
111 };
112 
113 struct mbuf {
114 	struct	m_hdr m_hdr;
115 	union {
116 		struct {
117 			struct	pkthdr MH_pkthdr;	/* M_PKTHDR set */
118 			union {
119 				struct	m_ext MH_ext;	/* M_EXT set */
120 				char	MH_databuf[MHLEN];
121 			} MH_dat;
122 		} MH;
123 		char	M_databuf[MLEN];		/* !M_PKTHDR, !M_EXT */
124 	} M_dat;
125 };
126 #define	m_next		m_hdr.mh_next
127 #define	m_len		m_hdr.mh_len
128 #define	m_data		m_hdr.mh_data
129 #define	m_type		m_hdr.mh_type
130 #define	m_flags		m_hdr.mh_flags
131 #define	m_nextpkt	m_hdr.mh_nextpkt
132 #define	m_act		m_nextpkt
133 #define	m_pkthdr	M_dat.MH.MH_pkthdr
134 #define	m_ext		M_dat.MH.MH_dat.MH_ext
135 #define	m_pktdat	M_dat.MH.MH_dat.MH_databuf
136 #define	m_dat		M_dat.M_databuf
137 
138 /* mbuf flags */
139 #define	M_EXT		0x0001	/* has associated external storage */
140 #define	M_PKTHDR	0x0002	/* start of record */
141 #define	M_EOR		0x0004	/* end of record */
142 #define M_RDONLY	0x0008	/* associated data is marked read-only */
143 #define	M_PROTO1	0x0010	/* protocol-specific */
144 #define	M_PROTO2	0x0020	/* protocol-specific */
145 #define	M_PROTO3	0x0040	/* protocol-specific */
146 #define	M_PROTO4	0x0080	/* protocol-specific */
147 #define	M_PROTO5	0x0100	/* protocol-specific */
148 
149 /* mbuf pkthdr flags, also in m_flags */
150 #define	M_BCAST		0x0200	/* send/received as link-level broadcast */
151 #define	M_MCAST		0x0400	/* send/received as link-level multicast */
152 #define	M_FRAG		0x0800	/* packet is a fragment of a larger packet */
153 #define	M_FIRSTFRAG	0x1000	/* packet is first fragment */
154 #define	M_LASTFRAG	0x2000	/* packet is last fragment */
155 
156 /* external buffer types: identify ext_buf type */
157 #define	EXT_CLUSTER	1	/* mbuf cluster */
158 #define	EXT_SFBUF	2	/* sendfile(2)'s sf_bufs */
159 #define	EXT_NET_DRV	100	/* custom ext_buf provided by net driver(s) */
160 #define	EXT_MOD_TYPE	200	/* custom module's ext_buf type */
161 
162 /* flags copied when copying m_pkthdr */
163 #define	M_COPYFLAGS	(M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \
164 			    M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG|M_RDONLY)
165 
166 /* flags indicating hw checksum support and sw checksum requirements */
167 #define CSUM_IP			0x0001		/* will csum IP */
168 #define CSUM_TCP		0x0002		/* will csum TCP */
169 #define CSUM_UDP		0x0004		/* will csum UDP */
170 #define CSUM_IP_FRAGS		0x0008		/* will csum IP fragments */
171 #define CSUM_FRAGMENT		0x0010		/* will do IP fragmentation */
172 
173 #define CSUM_IP_CHECKED		0x0100		/* did csum IP */
174 #define CSUM_IP_VALID		0x0200		/*   ... the csum is valid */
175 #define CSUM_DATA_VALID		0x0400		/* csum_data field is valid */
176 #define CSUM_PSEUDO_HDR		0x0800		/* csum_data has pseudo hdr */
177 
178 #define CSUM_DELAY_DATA		(CSUM_TCP | CSUM_UDP)
179 #define CSUM_DELAY_IP		(CSUM_IP)	/* XXX add ipv6 here too? */
180 
181 /* mbuf types */
182 #define	MT_FREE		0	/* should be on free list */
183 #define	MT_DATA		1	/* dynamic (data) allocation */
184 #define	MT_HEADER	2	/* packet header */
185 #if 0
186 #define	MT_SOCKET	3	/* socket structure */
187 #define	MT_PCB		4	/* protocol control block */
188 #define	MT_RTABLE	5	/* routing tables */
189 #define	MT_HTABLE	6	/* IMP host tables */
190 #define	MT_ATABLE	7	/* address resolution tables */
191 #endif
192 #define	MT_SONAME	8	/* socket name */
193 #if 0
194 #define	MT_SOOPTS	10	/* socket options */
195 #endif
196 #define	MT_FTABLE	11	/* fragment reassembly header */
197 #if 0
198 #define	MT_RIGHTS	12	/* access rights */
199 #define	MT_IFADDR	13	/* interface address */
200 #endif
201 #define	MT_CONTROL	14	/* extra-data protocol message */
202 #define	MT_OOBDATA	15	/* expedited data  */
203 
204 #define	MT_NTYPES	16	/* number of mbuf types for mbtypes[] */
205 
206 /*
207  * mbuf statistics
208  */
209 struct mbstat {
210 	u_long	m_mbufs;	/* # mbufs obtained from page pool */
211 	u_long	m_clusters;	/* # clusters obtained from page pool */
212 	u_long	m_clfree;	/* # clusters on freelist (cache) */
213 	u_long	m_refcnt;	/* # ref counters obtained from page pool */
214 	u_long	m_refree;	/* # ref counters on freelist (cache) */
215 	u_long	m_spare;	/* spare field */
216 	u_long	m_drops;	/* times failed to find space */
217 	u_long	m_wait;		/* times waited for space */
218 	u_long	m_drain;	/* times drained protocols for space */
219 	u_long	m_mcfail;	/* times m_copym failed */
220 	u_long	m_mpfail;	/* times m_pullup failed */
221 	u_long	m_msize;	/* length of an mbuf */
222 	u_long	m_mclbytes;	/* length of an mbuf cluster */
223 	u_long	m_minclsize;	/* min length of data to allocate a cluster */
224 	u_long	m_mlen;		/* length of data in an mbuf */
225 	u_long	m_mhlen;	/* length of data in a header mbuf */
226 };
227 
228 /* flags to m_get/MGET */
229 #define	M_DONTWAIT	1
230 #define	M_TRYWAIT	0
231 #define	M_WAIT		M_TRYWAIT	/* XXX: Deprecated. */
232 
233 /*
234  * Normal mbuf clusters are normally treated as character arrays
235  * after allocation, but use the first word of the buffer as a free list
236  * pointer while on the free list.
237  */
238 union mcluster {
239 	union	mcluster *mcl_next;
240 	char	mcl_buf[MCLBYTES];
241 };
242 
243 /*
244  * The m_ext object reference counter structure.
245  */
246 union mext_refcnt {
247 	union	mext_refcnt *next_ref;
248 	u_int	refcnt;
249 };
250 
251 /*
252  * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst
253  */
254 struct mbffree_lst {
255 	struct mbuf *m_head;
256 	struct mtx m_mtx;
257 };
258 
259 struct mclfree_lst {
260         union mcluster *m_head;
261         struct mtx m_mtx;
262 };
263 
264 struct mcntfree_lst {
265         union mext_refcnt *m_head;
266         struct mtx m_mtx;
267 };
268 
269 /*
270  * Wake up the next instance (if any) of a sleeping allocation - which is
271  * waiting for a {cluster, mbuf} to be freed.
272  *
273  * Must be called with the appropriate mutex held.
274  */
275 #define	MBWAKEUP(m_wid) do {						\
276 	if ((m_wid))							\
277 		wakeup_one(&(m_wid)); 					\
278 } while (0)
279 
280 /*
281  * mbuf external reference count management macros:
282  *
283  * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing
284  *     the external buffer ext_buf
285  * MEXT_REM_REF(m): remove reference to m_ext object
286  * MEXT_ADD_REF(m): add reference to m_ext object already
287  *     referred to by (m)
288  * MEXT_INIT_REF(m): allocate and initialize an external
289  *     object reference counter for (m)
290  */
291 #define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1)
292 
293 #define MEXT_REM_REF(m) do {						\
294 	KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0"));	\
295 	atomic_subtract_int(&((m)->m_ext.ref_cnt->refcnt), 1);		\
296 } while(0)
297 
298 #define MEXT_ADD_REF(m) atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1)
299 
300 #define _MEXT_ALLOC_CNT(m_cnt, how) do {				\
301 	union mext_refcnt *__mcnt;					\
302 									\
303 	mtx_lock(&mcntfree.m_mtx);					\
304 	if (mcntfree.m_head == NULL)					\
305 		m_alloc_ref(1, (how));					\
306 	__mcnt = mcntfree.m_head;					\
307 	if (__mcnt != NULL) {						\
308 		mcntfree.m_head = __mcnt->next_ref;			\
309 		mbstat.m_refree--;					\
310 		__mcnt->refcnt = 0;					\
311 	}								\
312 	mtx_unlock(&mcntfree.m_mtx);					\
313 	(m_cnt) = __mcnt;						\
314 } while (0)
315 
316 #define _MEXT_DEALLOC_CNT(m_cnt) do {					\
317 	union mext_refcnt *__mcnt = (m_cnt);				\
318 									\
319 	mtx_lock(&mcntfree.m_mtx);					\
320 	__mcnt->next_ref = mcntfree.m_head;				\
321 	mcntfree.m_head = __mcnt;					\
322 	mbstat.m_refree++;						\
323 	mtx_unlock(&mcntfree.m_mtx);					\
324 } while (0)
325 
326 #define MEXT_INIT_REF(m, how) do {					\
327 	struct mbuf *__mmm = (m);					\
328 									\
329 	_MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how));			\
330 	if (__mmm->m_ext.ref_cnt != NULL)				\
331 		MEXT_ADD_REF(__mmm);					\
332 } while (0)
333 
334 /*
335  * mbuf allocation/deallocation macros:
336  *
337  *	MGET(struct mbuf *m, int how, int type)
338  * allocates an mbuf and initializes it to contain internal data.
339  *
340  *	MGETHDR(struct mbuf *m, int how, int type)
341  * allocates an mbuf and initializes it to contain a packet header
342  * and internal data.
343  */
344 /*
345  * Lower-level macros for MGET(HDR)... Not to be used outside the
346  * subsystem ("non-exportable" macro names are prepended with "_").
347  */
348 #define _MGET_SETUP(m_set, m_set_type) do {				\
349 	(m_set)->m_type = (m_set_type);					\
350 	(m_set)->m_next = NULL;						\
351 	(m_set)->m_nextpkt = NULL;					\
352 	(m_set)->m_data = (m_set)->m_dat;				\
353 	(m_set)->m_flags = 0;						\
354 } while (0)
355 
356 #define	_MGET(m_mget, m_get_how) do {					\
357 	if (mmbfree.m_head == NULL)					\
358 		m_mballoc(1, (m_get_how));				\
359 	(m_mget) = mmbfree.m_head;					\
360 	if ((m_mget) != NULL) {						\
361 		mmbfree.m_head = (m_mget)->m_next;			\
362 		mbtypes[MT_FREE]--;					\
363 	} else {							\
364 		if ((m_get_how) == M_TRYWAIT)				\
365 			(m_mget) = m_mballoc_wait();			\
366 	}								\
367 } while (0)
368 
369 #define MGET(m, how, type) do {						\
370 	struct mbuf *_mm;						\
371 	int _mhow = (how);						\
372 	int _mtype = (type);						\
373 									\
374 	mtx_lock(&mmbfree.m_mtx);					\
375 	_MGET(_mm, _mhow);						\
376 	if (_mm != NULL) {						\
377 		mbtypes[_mtype]++;					\
378 		mtx_unlock(&mmbfree.m_mtx);				\
379 		_MGET_SETUP(_mm, _mtype);				\
380 	} else								\
381 		mtx_unlock(&mmbfree.m_mtx);				\
382 	(m) = _mm;							\
383 } while (0)
384 
385 #define _MGETHDR_SETUP(m_set, m_set_type) do {				\
386 	(m_set)->m_type = (m_set_type);					\
387 	(m_set)->m_next = NULL;						\
388 	(m_set)->m_nextpkt = NULL;					\
389 	(m_set)->m_data = (m_set)->m_pktdat;				\
390 	(m_set)->m_flags = M_PKTHDR;					\
391 	(m_set)->m_pkthdr.rcvif = NULL;					\
392 	(m_set)->m_pkthdr.csum_flags = 0;				\
393 	(m_set)->m_pkthdr.aux = NULL;					\
394 } while (0)
395 
396 #define MGETHDR(m, how, type) do {					\
397 	struct mbuf *_mm;						\
398 	int _mhow = (how);						\
399 	int _mtype = (type);						\
400 									\
401 	mtx_lock(&mmbfree.m_mtx);					\
402 	_MGET(_mm, _mhow);						\
403 	if (_mm != NULL) {						\
404 		mbtypes[_mtype]++;					\
405 		mtx_unlock(&mmbfree.m_mtx);				\
406 		_MGETHDR_SETUP(_mm, _mtype);				\
407 	} else								\
408 		mtx_unlock(&mmbfree.m_mtx);				\
409 	(m) = _mm;							\
410 } while (0)
411 
412 /*
413  * mbuf external storage macros:
414  *
415  *   MCLGET allocates and refers an mcluster to an mbuf
416  *   MEXTADD sets up pre-allocated external storage and refers to mbuf
417  *   MEXTFREE removes reference to external object and frees it if
418  *       necessary
419  */
420 #define	_MCLALLOC(p, how) do {						\
421 	caddr_t _mp;							\
422 	int _mhow = (how);						\
423 									\
424 	if (mclfree.m_head == NULL)					\
425 		m_clalloc(1, _mhow);					\
426 	_mp = (caddr_t)mclfree.m_head;					\
427 	if (_mp != NULL) {						\
428 		mbstat.m_clfree--;					\
429 		mclfree.m_head = ((union mcluster *)_mp)->mcl_next;	\
430 	} else {							\
431 		if (_mhow == M_TRYWAIT)					\
432 			_mp = m_clalloc_wait();				\
433 	}								\
434 	(p) = _mp;							\
435 } while (0)
436 
437 #define	MCLGET(m, how) do {						\
438 	struct mbuf *_mm = (m);						\
439 									\
440 	mtx_lock(&mclfree.m_mtx);					\
441 	_MCLALLOC(_mm->m_ext.ext_buf, (how));				\
442 	mtx_unlock(&mclfree.m_mtx);					\
443 	if (_mm->m_ext.ext_buf != NULL) {				\
444 		MEXT_INIT_REF(_mm, (how));				\
445 		if (_mm->m_ext.ref_cnt == NULL) {			\
446 			_MCLFREE(_mm->m_ext.ext_buf);			\
447 			_mm->m_ext.ext_buf = NULL;			\
448 		} else {						\
449 			_mm->m_data = _mm->m_ext.ext_buf;		\
450 			_mm->m_flags |= M_EXT;				\
451 			_mm->m_ext.ext_free = NULL;			\
452 			_mm->m_ext.ext_args = NULL;			\
453 			_mm->m_ext.ext_size = MCLBYTES;			\
454 			_mm->m_ext.ext_type = EXT_CLUSTER;		\
455 		}							\
456 	}								\
457 } while (0)
458 
459 #define MEXTADD(m, buf, size, free, args, flags, type) do {		\
460 	struct mbuf *_mm = (m);						\
461 									\
462 	MEXT_INIT_REF(_mm, M_TRYWAIT);					\
463 	if (_mm->m_ext.ref_cnt != NULL) {				\
464 		_mm->m_flags |= (M_EXT | (flags));			\
465 		_mm->m_ext.ext_buf = (caddr_t)(buf);			\
466 		_mm->m_data = _mm->m_ext.ext_buf;			\
467 		_mm->m_ext.ext_size = (size);				\
468 		_mm->m_ext.ext_free = (free);				\
469 		_mm->m_ext.ext_args = (args);				\
470 		_mm->m_ext.ext_type = (type);				\
471 	}								\
472 } while (0)
473 
474 #define	_MCLFREE(p) do {						\
475 	union mcluster *_mp = (union mcluster *)(p);			\
476 									\
477 	mtx_lock(&mclfree.m_mtx);					\
478 	_mp->mcl_next = mclfree.m_head;					\
479 	mclfree.m_head = _mp;						\
480 	mbstat.m_clfree++;						\
481 	MBWAKEUP(m_clalloc_wid);					\
482 	mtx_unlock(&mclfree.m_mtx); 					\
483 } while (0)
484 
485 /* MEXTFREE:
486  * If the atomic_cmpset_int() returns 0, then we effectively do nothing
487  * in terms of "cleaning up" (freeing the ext buf and ref. counter) as
488  * this means that either there are still references, or another thread
489  * is taking care of the clean-up.
490  */
491 #define	MEXTFREE(m) do {						\
492 	struct mbuf *_mmm = (m);					\
493 									\
494 	MEXT_REM_REF(_mmm);						\
495 	if (atomic_cmpset_int(&_mmm->m_ext.ref_cnt->refcnt, 0, 1)) {	\
496 		if (_mmm->m_ext.ext_type != EXT_CLUSTER) {		\
497 			(*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf,	\
498 			    _mmm->m_ext.ext_args);			\
499 		} else							\
500 			_MCLFREE(_mmm->m_ext.ext_buf);			\
501 		_MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt);			\
502 	}								\
503 	_mmm->m_flags &= ~M_EXT;					\
504 } while (0)
505 
506 /*
507  * MFREE(struct mbuf *m, struct mbuf *n)
508  * Free a single mbuf and associated external storage.
509  * Place the successor, if any, in n.
510  */
511 #define	MFREE(m, n) do {						\
512 	struct mbuf *_mm = (m);						\
513 									\
514 	KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf"));		\
515 	if (_mm->m_flags & M_EXT)					\
516 		MEXTFREE(_mm);						\
517 	mtx_lock(&mmbfree.m_mtx);					\
518 	mbtypes[_mm->m_type]--;						\
519 	_mm->m_type = MT_FREE;						\
520 	mbtypes[MT_FREE]++;						\
521 	(n) = _mm->m_next;						\
522 	_mm->m_next = mmbfree.m_head;					\
523 	mmbfree.m_head = _mm;						\
524 	MBWAKEUP(m_mballoc_wid);					\
525 	mtx_unlock(&mmbfree.m_mtx); 					\
526 } while (0)
527 
528 /*
529  * M_WRITABLE(m)
530  * Evaluate TRUE if it's safe to write to the mbuf m's data region (this
531  * can be both the local data payload, or an external buffer area,
532  * depending on whether M_EXT is set).
533  */
534 #define M_WRITABLE(m)	(!((m)->m_flags & M_RDONLY) && (!((m)->m_flags  \
535 			    & M_EXT) || !MEXT_IS_REF(m)))
536 
537 /*
538  * Copy mbuf pkthdr from "from" to "to".
539  * from must have M_PKTHDR set, and to must be empty.
540  * aux pointer will be moved to `to'.
541  */
542 #define	M_COPY_PKTHDR(to, from) do {					\
543 	struct mbuf *_mfrom = (from);					\
544 	struct mbuf *_mto = (to);					\
545 									\
546 	_mto->m_data = _mto->m_pktdat;					\
547 	_mto->m_flags = _mfrom->m_flags & M_COPYFLAGS;			\
548 	_mto->m_pkthdr = _mfrom->m_pkthdr;				\
549 	_mfrom->m_pkthdr.aux = NULL;					\
550 } while (0)
551 
552 /*
553  * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
554  * an object of the specified size at the end of the mbuf, longword aligned.
555  */
556 #define	M_ALIGN(m, len) do {						\
557 	(m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1);		\
558 } while (0)
559 
560 /*
561  * As above, for mbufs allocated with m_gethdr/MGETHDR
562  * or initialized by M_COPY_PKTHDR.
563  */
564 #define	MH_ALIGN(m, len) do {						\
565 	(m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1);		\
566 } while (0)
567 
568 /*
569  * Compute the amount of space available
570  * before the current start of data in an mbuf.
571  */
572 #define	M_LEADINGSPACE(m)						\
573 	((m)->m_flags & M_EXT ?						\
574 	    /* (m)->m_data - (m)->m_ext.ext_buf */ 0 :			\
575 	    (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat :	\
576 	    (m)->m_data - (m)->m_dat)
577 
578 /*
579  * Compute the amount of space available
580  * after the end of data in an mbuf.
581  */
582 #define	M_TRAILINGSPACE(m)						\
583 	((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf +			\
584 	    (m)->m_ext.ext_size - ((m)->m_data + (m)->m_len) :		\
585 	    &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
586 
587 /*
588  * Arrange to prepend space of size plen to mbuf m.
589  * If a new mbuf must be allocated, how specifies whether to wait.
590  * If the allocation fails, the original mbuf chain is freed and m is
591  * set to NULL.
592  */
593 #define	M_PREPEND(m, plen, how) do {					\
594 	struct mbuf **_mmp = &(m);					\
595 	struct mbuf *_mm = *_mmp;					\
596 	int _mplen = (plen);						\
597 	int __mhow = (how);						\
598 									\
599 	if (M_LEADINGSPACE(_mm) >= _mplen) {				\
600 		_mm->m_data -= _mplen;					\
601 		_mm->m_len += _mplen;					\
602 	} else								\
603 		_mm = m_prepend(_mm, _mplen, __mhow);			\
604 	if (_mm != NULL && _mm->m_flags & M_PKTHDR)			\
605 		_mm->m_pkthdr.len += _mplen;				\
606 	*_mmp = _mm;							\
607 } while (0)
608 
609 /*
610  * change mbuf to new type
611  */
612 #define	MCHTYPE(m, t) do {						\
613 	struct mbuf *_mm = (m);						\
614 	int _mt = (t);							\
615 									\
616 	atomic_subtract_long(&mbtypes[_mm->m_type], 1);			\
617 	atomic_add_long(&mbtypes[_mt], 1);				\
618 	_mm->m_type = (_mt);						\
619 } while (0)
620 
621 /* length to m_copy to copy all */
622 #define	M_COPYALL	1000000000
623 
624 /* compatibility with 4.3 */
625 #define	m_copy(m, o, l)	m_copym((m), (o), (l), M_DONTWAIT)
626 
627 /*
628  * pkthdr.aux type tags.
629  */
630 struct mauxtag {
631 	int	af;
632 	int	type;
633 };
634 
635 #ifdef _KERNEL
636 extern	u_long		 m_clalloc_wid;	/* mbuf cluster wait count */
637 extern	u_long		 m_mballoc_wid;	/* mbuf wait count */
638 extern	int		 max_linkhdr;	/* largest link-level header */
639 extern	int		 max_protohdr;	/* largest protocol header */
640 extern	int		 max_hdr;	/* largest link+protocol header */
641 extern	int		 max_datalen;	/* MHLEN - max_hdr */
642 extern	struct mbstat	 mbstat;
643 extern	u_long		 mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
644 extern	int		 mbuf_wait;	/* mbuf sleep time */
645 extern	struct mbuf	*mbutl;		/* virtual address of mclusters */
646 extern	struct mclfree_lst	mclfree;
647 extern	struct mbffree_lst	mmbfree;
648 extern	struct mcntfree_lst	mcntfree;
649 extern	int		 nmbclusters;
650 extern	int		 nmbufs;
651 extern	int		 nsfbufs;
652 
653 void	m_adj(struct mbuf *, int);
654 int	m_alloc_ref(u_int, int);
655 void	m_cat(struct mbuf *,struct mbuf *);
656 int	m_clalloc(int, int);
657 caddr_t	m_clalloc_wait(void);
658 void	m_copyback(struct mbuf *, int, int, caddr_t);
659 void	m_copydata(struct mbuf *,int,int,caddr_t);
660 struct	mbuf *m_copym(struct mbuf *, int, int, int);
661 struct	mbuf *m_copypacket(struct mbuf *, int);
662 struct	mbuf *m_devget(char *, int, int, struct ifnet *,
663     void (*copy)(char *, caddr_t, u_int));
664 struct	mbuf *m_dup(struct mbuf *, int);
665 struct	mbuf *m_free(struct mbuf *);
666 void	m_freem(struct mbuf *);
667 struct	mbuf *m_get(int, int);
668 struct	mbuf *m_getm(struct mbuf *, int, int, int);
669 struct	mbuf *m_getclr(int, int);
670 struct	mbuf *m_gethdr(int, int);
671 int	m_mballoc(int, int);
672 struct	mbuf *m_mballoc_wait(void);
673 struct	mbuf *m_prepend(struct mbuf *,int,int);
674 struct	mbuf *m_pulldown(struct mbuf *, int, int, int *);
675 void	m_print(const struct mbuf *m);
676 struct	mbuf *m_pullup(struct mbuf *, int);
677 struct	mbuf *m_split(struct mbuf *,int,int);
678 struct	mbuf *m_aux_add(struct mbuf *, int, int);
679 struct	mbuf *m_aux_find(struct mbuf *, int, int);
680 void	m_aux_delete(struct mbuf *, struct mbuf *);
681 #endif /* _KERNEL */
682 
683 #endif /* !_SYS_MBUF_H_ */
684