xref: /freebsd/sys/kern/kern_mbuf.c (revision bc4a1b8ccd37465150c5d75ed580c6609422f433)
1099a0e58SBosko Milekic /*-
28076cb52SBosko Milekic  * Copyright (c) 2004, 2005,
38076cb52SBosko Milekic  *	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4099a0e58SBosko Milekic  *
5099a0e58SBosko Milekic  * Redistribution and use in source and binary forms, with or without
6099a0e58SBosko Milekic  * modification, are permitted provided that the following conditions
7099a0e58SBosko Milekic  * are met:
8099a0e58SBosko Milekic  * 1. Redistributions of source code must retain the above copyright
9099a0e58SBosko Milekic  *    notice unmodified, this list of conditions and the following
10099a0e58SBosko Milekic  *    disclaimer.
11099a0e58SBosko Milekic  * 2. Redistributions in binary form must reproduce the above copyright
12099a0e58SBosko Milekic  *    notice, this list of conditions and the following disclaimer in the
13099a0e58SBosko Milekic  *    documentation and/or other materials provided with the distribution.
14099a0e58SBosko Milekic  *
15099a0e58SBosko Milekic  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16099a0e58SBosko Milekic  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17099a0e58SBosko Milekic  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18099a0e58SBosko Milekic  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19099a0e58SBosko Milekic  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20099a0e58SBosko Milekic  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21099a0e58SBosko Milekic  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22099a0e58SBosko Milekic  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23099a0e58SBosko Milekic  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24099a0e58SBosko Milekic  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25099a0e58SBosko Milekic  * SUCH DAMAGE.
26099a0e58SBosko Milekic  */
27099a0e58SBosko Milekic 
28099a0e58SBosko Milekic #include <sys/cdefs.h>
29099a0e58SBosko Milekic __FBSDID("$FreeBSD$");
30099a0e58SBosko Milekic 
31099a0e58SBosko Milekic #include "opt_param.h"
32099a0e58SBosko Milekic 
33099a0e58SBosko Milekic #include <sys/param.h>
34099a0e58SBosko Milekic #include <sys/malloc.h>
35099a0e58SBosko Milekic #include <sys/systm.h>
36099a0e58SBosko Milekic #include <sys/mbuf.h>
37099a0e58SBosko Milekic #include <sys/domain.h>
38099a0e58SBosko Milekic #include <sys/eventhandler.h>
39099a0e58SBosko Milekic #include <sys/kernel.h>
40099a0e58SBosko Milekic #include <sys/protosw.h>
41099a0e58SBosko Milekic #include <sys/smp.h>
42099a0e58SBosko Milekic #include <sys/sysctl.h>
43099a0e58SBosko Milekic 
44aed55708SRobert Watson #include <security/mac/mac_framework.h>
45aed55708SRobert Watson 
46099a0e58SBosko Milekic #include <vm/vm.h>
47c45c0034SAlan Cox #include <vm/vm_extern.h>
48c45c0034SAlan Cox #include <vm/vm_kern.h>
49099a0e58SBosko Milekic #include <vm/vm_page.h>
5037140716SAndre Oppermann #include <vm/vm_map.h>
51099a0e58SBosko Milekic #include <vm/uma.h>
52121f0509SMike Silbersack #include <vm/uma_int.h>
53121f0509SMike Silbersack #include <vm/uma_dbg.h>
54099a0e58SBosko Milekic 
55099a0e58SBosko Milekic /*
56099a0e58SBosko Milekic  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
57099a0e58SBosko Milekic  * Zones.
58099a0e58SBosko Milekic  *
59099a0e58SBosko Milekic  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
60099a0e58SBosko Milekic  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
61099a0e58SBosko Milekic  * administrator so desires.
62099a0e58SBosko Milekic  *
63099a0e58SBosko Milekic  * Mbufs are allocated from a UMA Master Zone called the Mbuf
64099a0e58SBosko Milekic  * Zone.
65099a0e58SBosko Milekic  *
66099a0e58SBosko Milekic  * Additionally, FreeBSD provides a Packet Zone, which it
67099a0e58SBosko Milekic  * configures as a Secondary Zone to the Mbuf Master Zone,
68099a0e58SBosko Milekic  * thus sharing backend Slab kegs with the Mbuf Master Zone.
69099a0e58SBosko Milekic  *
70099a0e58SBosko Milekic  * Thus common-case allocations and locking are simplified:
71099a0e58SBosko Milekic  *
72099a0e58SBosko Milekic  *  m_clget()                m_getcl()
73099a0e58SBosko Milekic  *    |                         |
74099a0e58SBosko Milekic  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
75099a0e58SBosko Milekic  *    |   |             [     Packet   ]            |
76099a0e58SBosko Milekic  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
77099a0e58SBosko Milekic  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
78099a0e58SBosko Milekic  *        |                       \________         |
79099a0e58SBosko Milekic  *  [ Cluster Keg   ]                      \       /
80099a0e58SBosko Milekic  *        |	                         [ Mbuf Keg   ]
81099a0e58SBosko Milekic  *  [ Cluster Slabs ]                         |
82099a0e58SBosko Milekic  *        |                              [ Mbuf Slabs ]
83099a0e58SBosko Milekic  *         \____________(VM)_________________/
8456a4e45aSAndre Oppermann  *
8556a4e45aSAndre Oppermann  *
86fcf90618SGleb Smirnoff  * Whenever an object is allocated with uma_zalloc() out of
8756a4e45aSAndre Oppermann  * one of the Zones its _ctor_ function is executed.  The same
88fcf90618SGleb Smirnoff  * for any deallocation through uma_zfree() the _dtor_ function
8956a4e45aSAndre Oppermann  * is executed.
9056a4e45aSAndre Oppermann  *
9156a4e45aSAndre Oppermann  * Caches are per-CPU and are filled from the Master Zone.
9256a4e45aSAndre Oppermann  *
93fcf90618SGleb Smirnoff  * Whenever an object is allocated from the underlying global
9456a4e45aSAndre Oppermann  * memory pool it gets pre-initialized with the _zinit_ functions.
9556a4e45aSAndre Oppermann  * When the Keg's are overfull objects get decomissioned with
9656a4e45aSAndre Oppermann  * _zfini_ functions and free'd back to the global memory pool.
9756a4e45aSAndre Oppermann  *
98099a0e58SBosko Milekic  */
99099a0e58SBosko Milekic 
100ead46972SAndre Oppermann int nmbufs;			/* limits number of mbufs */
10156a4e45aSAndre Oppermann int nmbclusters;		/* limits number of mbuf clusters */
102ec63cb90SAndre Oppermann int nmbjumbop;			/* limits number of page size jumbo clusters */
10356a4e45aSAndre Oppermann int nmbjumbo9;			/* limits number of 9k jumbo clusters */
10456a4e45aSAndre Oppermann int nmbjumbo16;			/* limits number of 16k jumbo clusters */
105099a0e58SBosko Milekic struct mbstat mbstat;
106099a0e58SBosko Milekic 
107e0c00addSAndre Oppermann static quad_t maxmbufmem;	/* overall real memory limit for all mbufs */
108e0c00addSAndre Oppermann 
109e0c00addSAndre Oppermann SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN, &maxmbufmem, 0,
110e0c00addSAndre Oppermann     "Maximum real memory allocateable to various mbuf types");
111e0c00addSAndre Oppermann 
11262938659SBjoern A. Zeeb /*
11337140716SAndre Oppermann  * tunable_mbinit() has to be run before any mbuf allocations are done.
11462938659SBjoern A. Zeeb  */
115099a0e58SBosko Milekic static void
116099a0e58SBosko Milekic tunable_mbinit(void *dummy)
117099a0e58SBosko Milekic {
118e0c00addSAndre Oppermann 	quad_t realmem;
11937140716SAndre Oppermann 
12037140716SAndre Oppermann 	/*
12137140716SAndre Oppermann 	 * The default limit for all mbuf related memory is 1/2 of all
12237140716SAndre Oppermann 	 * available kernel memory (physical or kmem).
12337140716SAndre Oppermann 	 * At most it can be 3/4 of available kernel memory.
12437140716SAndre Oppermann 	 */
12537140716SAndre Oppermann 	realmem = qmin((quad_t)physmem * PAGE_SIZE,
1262ebcc8acSAndre Oppermann 	    vm_map_max(kmem_map) - vm_map_min(kmem_map));
12737140716SAndre Oppermann 	maxmbufmem = realmem / 2;
128e0c00addSAndre Oppermann 	TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
12937140716SAndre Oppermann 	if (maxmbufmem > realmem / 4 * 3)
13037140716SAndre Oppermann 		maxmbufmem = realmem / 4 * 3;
131099a0e58SBosko Milekic 
132812302c3SNavdeep Parhar 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
133416a434cSAndre Oppermann 	if (nmbclusters == 0)
134416a434cSAndre Oppermann 		nmbclusters = maxmbufmem / MCLBYTES / 4;
135812302c3SNavdeep Parhar 
136812302c3SNavdeep Parhar 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
137812302c3SNavdeep Parhar 	if (nmbjumbop == 0)
138416a434cSAndre Oppermann 		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
139812302c3SNavdeep Parhar 
140812302c3SNavdeep Parhar 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
141812302c3SNavdeep Parhar 	if (nmbjumbo9 == 0)
142416a434cSAndre Oppermann 		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
143812302c3SNavdeep Parhar 
144812302c3SNavdeep Parhar 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
145812302c3SNavdeep Parhar 	if (nmbjumbo16 == 0)
146416a434cSAndre Oppermann 		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
147416a434cSAndre Oppermann 
148416a434cSAndre Oppermann 	/*
149416a434cSAndre Oppermann 	 * We need at least as many mbufs as we have clusters of
150416a434cSAndre Oppermann 	 * the various types added together.
151416a434cSAndre Oppermann 	 */
152416a434cSAndre Oppermann 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
153416a434cSAndre Oppermann 	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
154416a434cSAndre Oppermann 		nmbufs = lmax(maxmbufmem / MSIZE / 5,
155416a434cSAndre Oppermann 		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
156099a0e58SBosko Milekic }
15737140716SAndre Oppermann SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
158099a0e58SBosko Milekic 
1594f590175SPaul Saab static int
1604f590175SPaul Saab sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
1614f590175SPaul Saab {
1624f590175SPaul Saab 	int error, newnmbclusters;
1634f590175SPaul Saab 
1644f590175SPaul Saab 	newnmbclusters = nmbclusters;
165041b706bSDavid Malone 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
1664f590175SPaul Saab 	if (error == 0 && req->newptr) {
167ead46972SAndre Oppermann 		if (newnmbclusters > nmbclusters &&
168ead46972SAndre Oppermann 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
1694f590175SPaul Saab 			nmbclusters = newnmbclusters;
170*bc4a1b8cSAndre Oppermann 			nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
1714f590175SPaul Saab 			EVENTHANDLER_INVOKE(nmbclusters_change);
1724f590175SPaul Saab 		} else
1734f590175SPaul Saab 			error = EINVAL;
1744f590175SPaul Saab 	}
1754f590175SPaul Saab 	return (error);
1764f590175SPaul Saab }
1774f590175SPaul Saab SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
1784f590175SPaul Saab &nmbclusters, 0, sysctl_nmbclusters, "IU",
179099a0e58SBosko Milekic     "Maximum number of mbuf clusters allowed");
180cf70a46bSRandall Stewart 
181cf70a46bSRandall Stewart static int
182cf70a46bSRandall Stewart sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
183cf70a46bSRandall Stewart {
184cf70a46bSRandall Stewart 	int error, newnmbjumbop;
185cf70a46bSRandall Stewart 
186cf70a46bSRandall Stewart 	newnmbjumbop = nmbjumbop;
187cf70a46bSRandall Stewart 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
188cf70a46bSRandall Stewart 	if (error == 0 && req->newptr) {
189ead46972SAndre Oppermann 		if (newnmbjumbop > nmbjumbop &&
190ead46972SAndre Oppermann 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
191cf70a46bSRandall Stewart 			nmbjumbop = newnmbjumbop;
192*bc4a1b8cSAndre Oppermann 			nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
193cf70a46bSRandall Stewart 		} else
194cf70a46bSRandall Stewart 			error = EINVAL;
195cf70a46bSRandall Stewart 	}
196cf70a46bSRandall Stewart 	return (error);
197cf70a46bSRandall Stewart }
198cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
199cf70a46bSRandall Stewart &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
200ec63cb90SAndre Oppermann     "Maximum number of mbuf page size jumbo clusters allowed");
201cf70a46bSRandall Stewart 
202cf70a46bSRandall Stewart static int
203cf70a46bSRandall Stewart sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
204cf70a46bSRandall Stewart {
205cf70a46bSRandall Stewart 	int error, newnmbjumbo9;
206cf70a46bSRandall Stewart 
207cf70a46bSRandall Stewart 	newnmbjumbo9 = nmbjumbo9;
208cf70a46bSRandall Stewart 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
209cf70a46bSRandall Stewart 	if (error == 0 && req->newptr) {
210ead46972SAndre Oppermann 		if (newnmbjumbo9 > nmbjumbo9 &&
211ead46972SAndre Oppermann 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
212cf70a46bSRandall Stewart 			nmbjumbo9 = newnmbjumbo9;
213*bc4a1b8cSAndre Oppermann 			nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
214cf70a46bSRandall Stewart 		} else
215cf70a46bSRandall Stewart 			error = EINVAL;
216cf70a46bSRandall Stewart 	}
217cf70a46bSRandall Stewart 	return (error);
218cf70a46bSRandall Stewart }
219cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
220cf70a46bSRandall Stewart &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
22156a4e45aSAndre Oppermann     "Maximum number of mbuf 9k jumbo clusters allowed");
222cf70a46bSRandall Stewart 
223cf70a46bSRandall Stewart static int
224cf70a46bSRandall Stewart sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
225cf70a46bSRandall Stewart {
226cf70a46bSRandall Stewart 	int error, newnmbjumbo16;
227cf70a46bSRandall Stewart 
228cf70a46bSRandall Stewart 	newnmbjumbo16 = nmbjumbo16;
229cf70a46bSRandall Stewart 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
230cf70a46bSRandall Stewart 	if (error == 0 && req->newptr) {
231ead46972SAndre Oppermann 		if (newnmbjumbo16 > nmbjumbo16 &&
232ead46972SAndre Oppermann 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
233cf70a46bSRandall Stewart 			nmbjumbo16 = newnmbjumbo16;
234*bc4a1b8cSAndre Oppermann 			nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
235cf70a46bSRandall Stewart 		} else
236cf70a46bSRandall Stewart 			error = EINVAL;
237cf70a46bSRandall Stewart 	}
238cf70a46bSRandall Stewart 	return (error);
239cf70a46bSRandall Stewart }
240cf70a46bSRandall Stewart SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
241cf70a46bSRandall Stewart &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
24256a4e45aSAndre Oppermann     "Maximum number of mbuf 16k jumbo clusters allowed");
243cf70a46bSRandall Stewart 
244ead46972SAndre Oppermann static int
245ead46972SAndre Oppermann sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
246ead46972SAndre Oppermann {
247ead46972SAndre Oppermann 	int error, newnmbufs;
248ead46972SAndre Oppermann 
249ead46972SAndre Oppermann 	newnmbufs = nmbufs;
250ead46972SAndre Oppermann 	error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
251ead46972SAndre Oppermann 	if (error == 0 && req->newptr) {
252ead46972SAndre Oppermann 		if (newnmbufs > nmbufs) {
253ead46972SAndre Oppermann 			nmbufs = newnmbufs;
254*bc4a1b8cSAndre Oppermann 			nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
255ead46972SAndre Oppermann 			EVENTHANDLER_INVOKE(nmbufs_change);
256ead46972SAndre Oppermann 		} else
257ead46972SAndre Oppermann 			error = EINVAL;
258ead46972SAndre Oppermann 	}
259ead46972SAndre Oppermann 	return (error);
260ead46972SAndre Oppermann }
261e0c00addSAndre Oppermann SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW,
262ead46972SAndre Oppermann &nmbufs, 0, sysctl_nmbufs, "IU",
263ead46972SAndre Oppermann     "Maximum number of mbufs allowed");
264cf70a46bSRandall Stewart 
265099a0e58SBosko Milekic SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
266099a0e58SBosko Milekic     "Mbuf general information and statistics");
267099a0e58SBosko Milekic 
268099a0e58SBosko Milekic /*
269099a0e58SBosko Milekic  * Zones from which we allocate.
270099a0e58SBosko Milekic  */
271099a0e58SBosko Milekic uma_zone_t	zone_mbuf;
272099a0e58SBosko Milekic uma_zone_t	zone_clust;
273099a0e58SBosko Milekic uma_zone_t	zone_pack;
274ec63cb90SAndre Oppermann uma_zone_t	zone_jumbop;
27556a4e45aSAndre Oppermann uma_zone_t	zone_jumbo9;
27656a4e45aSAndre Oppermann uma_zone_t	zone_jumbo16;
27756a4e45aSAndre Oppermann uma_zone_t	zone_ext_refcnt;
278099a0e58SBosko Milekic 
279099a0e58SBosko Milekic /*
280099a0e58SBosko Milekic  * Local prototypes.
281099a0e58SBosko Milekic  */
282b23f72e9SBrian Feldman static int	mb_ctor_mbuf(void *, int, void *, int);
283b23f72e9SBrian Feldman static int	mb_ctor_clust(void *, int, void *, int);
284b23f72e9SBrian Feldman static int	mb_ctor_pack(void *, int, void *, int);
285099a0e58SBosko Milekic static void	mb_dtor_mbuf(void *, int, void *);
28656a4e45aSAndre Oppermann static void	mb_dtor_clust(void *, int, void *);
28756a4e45aSAndre Oppermann static void	mb_dtor_pack(void *, int, void *);
28856a4e45aSAndre Oppermann static int	mb_zinit_pack(void *, int, int);
28956a4e45aSAndre Oppermann static void	mb_zfini_pack(void *, int);
290099a0e58SBosko Milekic 
291099a0e58SBosko Milekic static void	mb_reclaim(void *);
29260ae52f7SEd Schouten static void    *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
293099a0e58SBosko Milekic 
29437140716SAndre Oppermann /* Ensure that MSIZE is a power of 2. */
295a04946cfSBrian Somers CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
296a04946cfSBrian Somers 
297099a0e58SBosko Milekic /*
298099a0e58SBosko Milekic  * Initialize FreeBSD Network buffer allocation.
299099a0e58SBosko Milekic  */
300099a0e58SBosko Milekic static void
301099a0e58SBosko Milekic mbuf_init(void *dummy)
302099a0e58SBosko Milekic {
303099a0e58SBosko Milekic 
304099a0e58SBosko Milekic 	/*
305099a0e58SBosko Milekic 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
306099a0e58SBosko Milekic 	 */
30756a4e45aSAndre Oppermann 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
30856a4e45aSAndre Oppermann 	    mb_ctor_mbuf, mb_dtor_mbuf,
309121f0509SMike Silbersack #ifdef INVARIANTS
31056a4e45aSAndre Oppermann 	    trash_init, trash_fini,
311121f0509SMike Silbersack #else
31256a4e45aSAndre Oppermann 	    NULL, NULL,
313121f0509SMike Silbersack #endif
31456a4e45aSAndre Oppermann 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
31545fe0bf7SPawel Jakub Dawidek 	if (nmbufs > 0)
31645fe0bf7SPawel Jakub Dawidek 		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
3176e0b6746SPawel Jakub Dawidek 	uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
31856a4e45aSAndre Oppermann 
31968352adfSRobert Watson 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
32056a4e45aSAndre Oppermann 	    mb_ctor_clust, mb_dtor_clust,
321121f0509SMike Silbersack #ifdef INVARIANTS
32256a4e45aSAndre Oppermann 	    trash_init, trash_fini,
323121f0509SMike Silbersack #else
32456a4e45aSAndre Oppermann 	    NULL, NULL,
325121f0509SMike Silbersack #endif
32656a4e45aSAndre Oppermann 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
32745fe0bf7SPawel Jakub Dawidek 	if (nmbclusters > 0)
32845fe0bf7SPawel Jakub Dawidek 		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
3296e0b6746SPawel Jakub Dawidek 	uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
330099a0e58SBosko Milekic 
33156a4e45aSAndre Oppermann 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
33256a4e45aSAndre Oppermann 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
33356a4e45aSAndre Oppermann 
334fcf90618SGleb Smirnoff 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
335ec63cb90SAndre Oppermann 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
336d5269a63SAndre Oppermann 	    mb_ctor_clust, mb_dtor_clust,
337d5269a63SAndre Oppermann #ifdef INVARIANTS
338d5269a63SAndre Oppermann 	    trash_init, trash_fini,
339d5269a63SAndre Oppermann #else
340d5269a63SAndre Oppermann 	    NULL, NULL,
341d5269a63SAndre Oppermann #endif
342d5269a63SAndre Oppermann 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
34345fe0bf7SPawel Jakub Dawidek 	if (nmbjumbop > 0)
34445fe0bf7SPawel Jakub Dawidek 		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
3456e0b6746SPawel Jakub Dawidek 	uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
346d5269a63SAndre Oppermann 
34756a4e45aSAndre Oppermann 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
34856a4e45aSAndre Oppermann 	    mb_ctor_clust, mb_dtor_clust,
34956a4e45aSAndre Oppermann #ifdef INVARIANTS
35056a4e45aSAndre Oppermann 	    trash_init, trash_fini,
35156a4e45aSAndre Oppermann #else
35256a4e45aSAndre Oppermann 	    NULL, NULL,
35356a4e45aSAndre Oppermann #endif
35456a4e45aSAndre Oppermann 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
355ba63339aSAlan Cox 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
35645fe0bf7SPawel Jakub Dawidek 	if (nmbjumbo9 > 0)
35745fe0bf7SPawel Jakub Dawidek 		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
3586e0b6746SPawel Jakub Dawidek 	uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
35956a4e45aSAndre Oppermann 
36056a4e45aSAndre Oppermann 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
36156a4e45aSAndre Oppermann 	    mb_ctor_clust, mb_dtor_clust,
36256a4e45aSAndre Oppermann #ifdef INVARIANTS
36356a4e45aSAndre Oppermann 	    trash_init, trash_fini,
36456a4e45aSAndre Oppermann #else
36556a4e45aSAndre Oppermann 	    NULL, NULL,
36656a4e45aSAndre Oppermann #endif
36756a4e45aSAndre Oppermann 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
368ba63339aSAlan Cox 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
36945fe0bf7SPawel Jakub Dawidek 	if (nmbjumbo16 > 0)
37045fe0bf7SPawel Jakub Dawidek 		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
3716e0b6746SPawel Jakub Dawidek 	uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
37256a4e45aSAndre Oppermann 
37356a4e45aSAndre Oppermann 	zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
37456a4e45aSAndre Oppermann 	    NULL, NULL,
37556a4e45aSAndre Oppermann 	    NULL, NULL,
37656a4e45aSAndre Oppermann 	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
37756a4e45aSAndre Oppermann 
37856a4e45aSAndre Oppermann 	/* uma_prealloc() goes here... */
379099a0e58SBosko Milekic 
380099a0e58SBosko Milekic 	/*
381099a0e58SBosko Milekic 	 * Hook event handler for low-memory situation, used to
382099a0e58SBosko Milekic 	 * drain protocols and push data back to the caches (UMA
383099a0e58SBosko Milekic 	 * later pushes it back to VM).
384099a0e58SBosko Milekic 	 */
385099a0e58SBosko Milekic 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
386099a0e58SBosko Milekic 	    EVENTHANDLER_PRI_FIRST);
387099a0e58SBosko Milekic 
388099a0e58SBosko Milekic 	/*
389099a0e58SBosko Milekic 	 * [Re]set counters and local statistics knobs.
390099a0e58SBosko Milekic 	 * XXX Some of these should go and be replaced, but UMA stat
391099a0e58SBosko Milekic 	 * gathering needs to be revised.
392099a0e58SBosko Milekic 	 */
393099a0e58SBosko Milekic 	mbstat.m_mbufs = 0;
394099a0e58SBosko Milekic 	mbstat.m_mclusts = 0;
395099a0e58SBosko Milekic 	mbstat.m_drain = 0;
396099a0e58SBosko Milekic 	mbstat.m_msize = MSIZE;
397099a0e58SBosko Milekic 	mbstat.m_mclbytes = MCLBYTES;
398099a0e58SBosko Milekic 	mbstat.m_minclsize = MINCLSIZE;
399099a0e58SBosko Milekic 	mbstat.m_mlen = MLEN;
400099a0e58SBosko Milekic 	mbstat.m_mhlen = MHLEN;
401099a0e58SBosko Milekic 	mbstat.m_numtypes = MT_NTYPES;
402099a0e58SBosko Milekic 
403099a0e58SBosko Milekic 	mbstat.m_mcfail = mbstat.m_mpfail = 0;
404099a0e58SBosko Milekic 	mbstat.sf_iocnt = 0;
405099a0e58SBosko Milekic 	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
406099a0e58SBosko Milekic }
40737140716SAndre Oppermann SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
408099a0e58SBosko Milekic 
409099a0e58SBosko Milekic /*
410ba63339aSAlan Cox  * UMA backend page allocator for the jumbo frame zones.
411ba63339aSAlan Cox  *
412ba63339aSAlan Cox  * Allocates kernel virtual memory that is backed by contiguous physical
413ba63339aSAlan Cox  * pages.
414ba63339aSAlan Cox  */
415ba63339aSAlan Cox static void *
41660ae52f7SEd Schouten mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
417ba63339aSAlan Cox {
418ba63339aSAlan Cox 
4197630c265SAlan Cox 	/* Inform UMA that this allocator uses kernel_map/object. */
4207630c265SAlan Cox 	*flags = UMA_SLAB_KERNEL;
421c45c0034SAlan Cox 	return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
4223153e878SAlan Cox 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
423ba63339aSAlan Cox }
424ba63339aSAlan Cox 
425ba63339aSAlan Cox /*
426099a0e58SBosko Milekic  * Constructor for Mbuf master zone.
427099a0e58SBosko Milekic  *
428099a0e58SBosko Milekic  * The 'arg' pointer points to a mb_args structure which
429099a0e58SBosko Milekic  * contains call-specific information required to support the
43056a4e45aSAndre Oppermann  * mbuf allocation API.  See mbuf.h.
431099a0e58SBosko Milekic  */
432b23f72e9SBrian Feldman static int
433b23f72e9SBrian Feldman mb_ctor_mbuf(void *mem, int size, void *arg, int how)
434099a0e58SBosko Milekic {
435099a0e58SBosko Milekic 	struct mbuf *m;
436099a0e58SBosko Milekic 	struct mb_args *args;
437b23f72e9SBrian Feldman #ifdef MAC
438b23f72e9SBrian Feldman 	int error;
439b23f72e9SBrian Feldman #endif
440099a0e58SBosko Milekic 	int flags;
441099a0e58SBosko Milekic 	short type;
442099a0e58SBosko Milekic 
443121f0509SMike Silbersack #ifdef INVARIANTS
444121f0509SMike Silbersack 	trash_ctor(mem, size, arg, how);
445121f0509SMike Silbersack #endif
446099a0e58SBosko Milekic 	m = (struct mbuf *)mem;
447099a0e58SBosko Milekic 	args = (struct mb_args *)arg;
448099a0e58SBosko Milekic 	flags = args->flags;
449099a0e58SBosko Milekic 	type = args->type;
450099a0e58SBosko Milekic 
45156a4e45aSAndre Oppermann 	/*
45256a4e45aSAndre Oppermann 	 * The mbuf is initialized later.  The caller has the
453fcf90618SGleb Smirnoff 	 * responsibility to set up any MAC labels too.
45456a4e45aSAndre Oppermann 	 */
45556a4e45aSAndre Oppermann 	if (type == MT_NOINIT)
45656a4e45aSAndre Oppermann 		return (0);
45756a4e45aSAndre Oppermann 
458099a0e58SBosko Milekic 	m->m_next = NULL;
459099a0e58SBosko Milekic 	m->m_nextpkt = NULL;
46056a4e45aSAndre Oppermann 	m->m_len = 0;
4616bc72ab9SBosko Milekic 	m->m_flags = flags;
46256a4e45aSAndre Oppermann 	m->m_type = type;
463099a0e58SBosko Milekic 	if (flags & M_PKTHDR) {
464099a0e58SBosko Milekic 		m->m_data = m->m_pktdat;
465099a0e58SBosko Milekic 		m->m_pkthdr.rcvif = NULL;
46656a4e45aSAndre Oppermann 		m->m_pkthdr.header = NULL;
4678aa7a581SKip Macy 		m->m_pkthdr.len = 0;
468099a0e58SBosko Milekic 		m->m_pkthdr.csum_flags = 0;
46956a4e45aSAndre Oppermann 		m->m_pkthdr.csum_data = 0;
470a855e2b4SAndre Oppermann 		m->m_pkthdr.tso_segsz = 0;
471a855e2b4SAndre Oppermann 		m->m_pkthdr.ether_vtag = 0;
472877e8812SRobert Watson 		m->m_pkthdr.flowid = 0;
4734591f0d3SJulian Elischer 		m->m_pkthdr.fibnum = 0;
474099a0e58SBosko Milekic 		SLIST_INIT(&m->m_pkthdr.tags);
475099a0e58SBosko Milekic #ifdef MAC
476099a0e58SBosko Milekic 		/* If the label init fails, fail the alloc */
47730d239bcSRobert Watson 		error = mac_mbuf_init(m, how);
478b23f72e9SBrian Feldman 		if (error)
479b23f72e9SBrian Feldman 			return (error);
480099a0e58SBosko Milekic #endif
4816bc72ab9SBosko Milekic 	} else
482099a0e58SBosko Milekic 		m->m_data = m->m_dat;
483b23f72e9SBrian Feldman 	return (0);
484099a0e58SBosko Milekic }
485099a0e58SBosko Milekic 
486099a0e58SBosko Milekic /*
48756a4e45aSAndre Oppermann  * The Mbuf master zone destructor.
488099a0e58SBosko Milekic  */
489099a0e58SBosko Milekic static void
490099a0e58SBosko Milekic mb_dtor_mbuf(void *mem, int size, void *arg)
491099a0e58SBosko Milekic {
492099a0e58SBosko Milekic 	struct mbuf *m;
493629b9e08SKip Macy 	unsigned long flags;
494099a0e58SBosko Milekic 
495099a0e58SBosko Milekic 	m = (struct mbuf *)mem;
496629b9e08SKip Macy 	flags = (unsigned long)arg;
497629b9e08SKip Macy 
498629b9e08SKip Macy 	if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
499099a0e58SBosko Milekic 		m_tag_delete_chain(m, NULL);
50056a4e45aSAndre Oppermann 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
501457869b9SKip Macy 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
502121f0509SMike Silbersack #ifdef INVARIANTS
503121f0509SMike Silbersack 	trash_dtor(mem, size, arg);
504121f0509SMike Silbersack #endif
505099a0e58SBosko Milekic }
506099a0e58SBosko Milekic 
50756a4e45aSAndre Oppermann /*
50856a4e45aSAndre Oppermann  * The Mbuf Packet zone destructor.
50956a4e45aSAndre Oppermann  */
510099a0e58SBosko Milekic static void
511099a0e58SBosko Milekic mb_dtor_pack(void *mem, int size, void *arg)
512099a0e58SBosko Milekic {
513099a0e58SBosko Milekic 	struct mbuf *m;
514099a0e58SBosko Milekic 
515099a0e58SBosko Milekic 	m = (struct mbuf *)mem;
516099a0e58SBosko Milekic 	if ((m->m_flags & M_PKTHDR) != 0)
517099a0e58SBosko Milekic 		m_tag_delete_chain(m, NULL);
51856a4e45aSAndre Oppermann 
51956a4e45aSAndre Oppermann 	/* Make sure we've got a clean cluster back. */
52056a4e45aSAndre Oppermann 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
52156a4e45aSAndre Oppermann 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
52256a4e45aSAndre Oppermann 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
523cf827063SPoul-Henning Kamp 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
524cf827063SPoul-Henning Kamp 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
52556a4e45aSAndre Oppermann 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
52649d46b61SGleb Smirnoff 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
52756a4e45aSAndre Oppermann 	KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
528121f0509SMike Silbersack #ifdef INVARIANTS
529121f0509SMike Silbersack 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
530121f0509SMike Silbersack #endif
5316c125b8dSMohan Srinivasan 	/*
532ef44c8d2SDavid E. O'Brien 	 * If there are processes blocked on zone_clust, waiting for pages
533ef44c8d2SDavid E. O'Brien 	 * to be freed up, * cause them to be woken up by draining the
534ef44c8d2SDavid E. O'Brien 	 * packet zone.  We are exposed to a race here * (in the check for
535ef44c8d2SDavid E. O'Brien 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
536ef44c8d2SDavid E. O'Brien 	 * is deliberate. We don't want to acquire the zone lock for every
537ef44c8d2SDavid E. O'Brien 	 * mbuf free.
5386c125b8dSMohan Srinivasan 	 */
5396c125b8dSMohan Srinivasan 	if (uma_zone_exhausted_nolock(zone_clust))
5406c125b8dSMohan Srinivasan 		zone_drain(zone_pack);
541099a0e58SBosko Milekic }
542099a0e58SBosko Milekic 
543099a0e58SBosko Milekic /*
544ec63cb90SAndre Oppermann  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
545099a0e58SBosko Milekic  *
546099a0e58SBosko Milekic  * Here the 'arg' pointer points to the Mbuf which we
54756a4e45aSAndre Oppermann  * are configuring cluster storage for.  If 'arg' is
54856a4e45aSAndre Oppermann  * empty we allocate just the cluster without setting
54956a4e45aSAndre Oppermann  * the mbuf to it.  See mbuf.h.
550099a0e58SBosko Milekic  */
551b23f72e9SBrian Feldman static int
552b23f72e9SBrian Feldman mb_ctor_clust(void *mem, int size, void *arg, int how)
553099a0e58SBosko Milekic {
554099a0e58SBosko Milekic 	struct mbuf *m;
55556a4e45aSAndre Oppermann 	u_int *refcnt;
5560f4d9d04SKip Macy 	int type;
5570f4d9d04SKip Macy 	uma_zone_t zone;
558099a0e58SBosko Milekic 
559121f0509SMike Silbersack #ifdef INVARIANTS
560121f0509SMike Silbersack 	trash_ctor(mem, size, arg, how);
561121f0509SMike Silbersack #endif
56256a4e45aSAndre Oppermann 	switch (size) {
56356a4e45aSAndre Oppermann 	case MCLBYTES:
56456a4e45aSAndre Oppermann 		type = EXT_CLUSTER;
5650f4d9d04SKip Macy 		zone = zone_clust;
56656a4e45aSAndre Oppermann 		break;
567ec63cb90SAndre Oppermann #if MJUMPAGESIZE != MCLBYTES
568ec63cb90SAndre Oppermann 	case MJUMPAGESIZE:
569ec63cb90SAndre Oppermann 		type = EXT_JUMBOP;
5700f4d9d04SKip Macy 		zone = zone_jumbop;
571d5269a63SAndre Oppermann 		break;
57236ae3fd3SAndre Oppermann #endif
57356a4e45aSAndre Oppermann 	case MJUM9BYTES:
57456a4e45aSAndre Oppermann 		type = EXT_JUMBO9;
5750f4d9d04SKip Macy 		zone = zone_jumbo9;
57656a4e45aSAndre Oppermann 		break;
57756a4e45aSAndre Oppermann 	case MJUM16BYTES:
57856a4e45aSAndre Oppermann 		type = EXT_JUMBO16;
5790f4d9d04SKip Macy 		zone = zone_jumbo16;
58056a4e45aSAndre Oppermann 		break;
58156a4e45aSAndre Oppermann 	default:
58256a4e45aSAndre Oppermann 		panic("unknown cluster size");
58356a4e45aSAndre Oppermann 		break;
58456a4e45aSAndre Oppermann 	}
5850f4d9d04SKip Macy 
5860f4d9d04SKip Macy 	m = (struct mbuf *)arg;
5870f4d9d04SKip Macy 	refcnt = uma_find_refcnt(zone, mem);
5880f4d9d04SKip Macy 	*refcnt = 1;
5890f4d9d04SKip Macy 	if (m != NULL) {
590099a0e58SBosko Milekic 		m->m_ext.ext_buf = (caddr_t)mem;
591099a0e58SBosko Milekic 		m->m_data = m->m_ext.ext_buf;
592099a0e58SBosko Milekic 		m->m_flags |= M_EXT;
593099a0e58SBosko Milekic 		m->m_ext.ext_free = NULL;
594cf827063SPoul-Henning Kamp 		m->m_ext.ext_arg1 = NULL;
595cf827063SPoul-Henning Kamp 		m->m_ext.ext_arg2 = NULL;
59656a4e45aSAndre Oppermann 		m->m_ext.ext_size = size;
59756a4e45aSAndre Oppermann 		m->m_ext.ext_type = type;
5980f4d9d04SKip Macy 		m->m_ext.ref_cnt = refcnt;
59956a4e45aSAndre Oppermann 	}
6000f4d9d04SKip Macy 
601b23f72e9SBrian Feldman 	return (0);
602099a0e58SBosko Milekic }
603099a0e58SBosko Milekic 
60456a4e45aSAndre Oppermann /*
60556a4e45aSAndre Oppermann  * The Mbuf Cluster zone destructor.
60656a4e45aSAndre Oppermann  */
607099a0e58SBosko Milekic static void
608099a0e58SBosko Milekic mb_dtor_clust(void *mem, int size, void *arg)
609099a0e58SBosko Milekic {
610121f0509SMike Silbersack #ifdef INVARIANTS
6110f4d9d04SKip Macy 	uma_zone_t zone;
6120f4d9d04SKip Macy 
6130f4d9d04SKip Macy 	zone = m_getzone(size);
6140f4d9d04SKip Macy 	KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
6150f4d9d04SKip Macy 		("%s: refcnt incorrect %u", __func__,
6160f4d9d04SKip Macy 		 *(uma_find_refcnt(zone, mem))) );
6170f4d9d04SKip Macy 
618121f0509SMike Silbersack 	trash_dtor(mem, size, arg);
619121f0509SMike Silbersack #endif
620099a0e58SBosko Milekic }
621099a0e58SBosko Milekic 
622099a0e58SBosko Milekic /*
623099a0e58SBosko Milekic  * The Packet secondary zone's init routine, executed on the
62456a4e45aSAndre Oppermann  * object's transition from mbuf keg slab to zone cache.
625099a0e58SBosko Milekic  */
626b23f72e9SBrian Feldman static int
62756a4e45aSAndre Oppermann mb_zinit_pack(void *mem, int size, int how)
628099a0e58SBosko Milekic {
629099a0e58SBosko Milekic 	struct mbuf *m;
630099a0e58SBosko Milekic 
63156a4e45aSAndre Oppermann 	m = (struct mbuf *)mem;		/* m is virgin. */
632a7bd90efSAndre Oppermann 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
633a7bd90efSAndre Oppermann 	    m->m_ext.ext_buf == NULL)
634b23f72e9SBrian Feldman 		return (ENOMEM);
635cd5bb63bSAndre Oppermann 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
636121f0509SMike Silbersack #ifdef INVARIANTS
637121f0509SMike Silbersack 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
638121f0509SMike Silbersack #endif
639b23f72e9SBrian Feldman 	return (0);
640099a0e58SBosko Milekic }
641099a0e58SBosko Milekic 
642099a0e58SBosko Milekic /*
643099a0e58SBosko Milekic  * The Packet secondary zone's fini routine, executed on the
644099a0e58SBosko Milekic  * object's transition from zone cache to keg slab.
645099a0e58SBosko Milekic  */
646099a0e58SBosko Milekic static void
64756a4e45aSAndre Oppermann mb_zfini_pack(void *mem, int size)
648099a0e58SBosko Milekic {
649099a0e58SBosko Milekic 	struct mbuf *m;
650099a0e58SBosko Milekic 
651099a0e58SBosko Milekic 	m = (struct mbuf *)mem;
652121f0509SMike Silbersack #ifdef INVARIANTS
653121f0509SMike Silbersack 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
654121f0509SMike Silbersack #endif
655099a0e58SBosko Milekic 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
656a7b844d2SMike Silbersack #ifdef INVARIANTS
657a7b844d2SMike Silbersack 	trash_dtor(mem, size, NULL);
658a7b844d2SMike Silbersack #endif
659099a0e58SBosko Milekic }
660099a0e58SBosko Milekic 
661099a0e58SBosko Milekic /*
662099a0e58SBosko Milekic  * The "packet" keg constructor.
663099a0e58SBosko Milekic  */
664b23f72e9SBrian Feldman static int
665b23f72e9SBrian Feldman mb_ctor_pack(void *mem, int size, void *arg, int how)
666099a0e58SBosko Milekic {
667099a0e58SBosko Milekic 	struct mbuf *m;
668099a0e58SBosko Milekic 	struct mb_args *args;
669b23f72e9SBrian Feldman #ifdef MAC
670b23f72e9SBrian Feldman 	int error;
671b23f72e9SBrian Feldman #endif
672b23f72e9SBrian Feldman 	int flags;
673099a0e58SBosko Milekic 	short type;
674099a0e58SBosko Milekic 
675099a0e58SBosko Milekic 	m = (struct mbuf *)mem;
676099a0e58SBosko Milekic 	args = (struct mb_args *)arg;
677099a0e58SBosko Milekic 	flags = args->flags;
678099a0e58SBosko Milekic 	type = args->type;
679099a0e58SBosko Milekic 
680121f0509SMike Silbersack #ifdef INVARIANTS
681121f0509SMike Silbersack 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
682121f0509SMike Silbersack #endif
683099a0e58SBosko Milekic 	m->m_next = NULL;
6846bc72ab9SBosko Milekic 	m->m_nextpkt = NULL;
685099a0e58SBosko Milekic 	m->m_data = m->m_ext.ext_buf;
68656a4e45aSAndre Oppermann 	m->m_len = 0;
68756a4e45aSAndre Oppermann 	m->m_flags = (flags | M_EXT);
68856a4e45aSAndre Oppermann 	m->m_type = type;
689099a0e58SBosko Milekic 
690099a0e58SBosko Milekic 	if (flags & M_PKTHDR) {
691099a0e58SBosko Milekic 		m->m_pkthdr.rcvif = NULL;
69256a4e45aSAndre Oppermann 		m->m_pkthdr.len = 0;
69356a4e45aSAndre Oppermann 		m->m_pkthdr.header = NULL;
694099a0e58SBosko Milekic 		m->m_pkthdr.csum_flags = 0;
69556a4e45aSAndre Oppermann 		m->m_pkthdr.csum_data = 0;
696a855e2b4SAndre Oppermann 		m->m_pkthdr.tso_segsz = 0;
697a855e2b4SAndre Oppermann 		m->m_pkthdr.ether_vtag = 0;
698877e8812SRobert Watson 		m->m_pkthdr.flowid = 0;
6994591f0d3SJulian Elischer 		m->m_pkthdr.fibnum = 0;
700099a0e58SBosko Milekic 		SLIST_INIT(&m->m_pkthdr.tags);
701099a0e58SBosko Milekic #ifdef MAC
702099a0e58SBosko Milekic 		/* If the label init fails, fail the alloc */
70330d239bcSRobert Watson 		error = mac_mbuf_init(m, how);
704b23f72e9SBrian Feldman 		if (error)
705b23f72e9SBrian Feldman 			return (error);
706099a0e58SBosko Milekic #endif
707099a0e58SBosko Milekic 	}
70856a4e45aSAndre Oppermann 	/* m_ext is already initialized. */
70956a4e45aSAndre Oppermann 
710b23f72e9SBrian Feldman 	return (0);
711099a0e58SBosko Milekic }
712099a0e58SBosko Milekic 
7135b204a11SKip Macy int
7145b204a11SKip Macy m_pkthdr_init(struct mbuf *m, int how)
7155b204a11SKip Macy {
7165b204a11SKip Macy #ifdef MAC
7175b204a11SKip Macy 	int error;
7185b204a11SKip Macy #endif
7195b204a11SKip Macy 	m->m_data = m->m_pktdat;
7205b204a11SKip Macy 	SLIST_INIT(&m->m_pkthdr.tags);
7215b204a11SKip Macy 	m->m_pkthdr.rcvif = NULL;
7225b204a11SKip Macy 	m->m_pkthdr.header = NULL;
7235b204a11SKip Macy 	m->m_pkthdr.len = 0;
7245b204a11SKip Macy 	m->m_pkthdr.flowid = 0;
7254591f0d3SJulian Elischer 	m->m_pkthdr.fibnum = 0;
7265b204a11SKip Macy 	m->m_pkthdr.csum_flags = 0;
7275b204a11SKip Macy 	m->m_pkthdr.csum_data = 0;
7285b204a11SKip Macy 	m->m_pkthdr.tso_segsz = 0;
7295b204a11SKip Macy 	m->m_pkthdr.ether_vtag = 0;
7305b204a11SKip Macy #ifdef MAC
7315b204a11SKip Macy 	/* If the label init fails, fail the alloc */
7325b204a11SKip Macy 	error = mac_mbuf_init(m, how);
7335b204a11SKip Macy 	if (error)
7345b204a11SKip Macy 		return (error);
7355b204a11SKip Macy #endif
7365b204a11SKip Macy 
7375b204a11SKip Macy 	return (0);
7385b204a11SKip Macy }
7395b204a11SKip Macy 
740099a0e58SBosko Milekic /*
741099a0e58SBosko Milekic  * This is the protocol drain routine.
742099a0e58SBosko Milekic  *
743099a0e58SBosko Milekic  * No locks should be held when this is called.  The drain routines have to
744099a0e58SBosko Milekic  * presently acquire some locks which raises the possibility of lock order
745099a0e58SBosko Milekic  * reversal.
746099a0e58SBosko Milekic  */
747099a0e58SBosko Milekic static void
748099a0e58SBosko Milekic mb_reclaim(void *junk)
749099a0e58SBosko Milekic {
750099a0e58SBosko Milekic 	struct domain *dp;
751099a0e58SBosko Milekic 	struct protosw *pr;
752099a0e58SBosko Milekic 
753099a0e58SBosko Milekic 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
754099a0e58SBosko Milekic 	    "mb_reclaim()");
755099a0e58SBosko Milekic 
756099a0e58SBosko Milekic 	for (dp = domains; dp != NULL; dp = dp->dom_next)
757099a0e58SBosko Milekic 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
758099a0e58SBosko Milekic 			if (pr->pr_drain != NULL)
759099a0e58SBosko Milekic 				(*pr->pr_drain)();
760099a0e58SBosko Milekic }
761