xref: /freebsd/sys/kern/kern_mbuf.c (revision 12f110aa1ad3c9d0ead55bf80f2f994b4b845ffb)
1 /*-
2  * Copyright (c) 2004, 2005,
3  *	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_param.h"
32 
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
36 #include <sys/mbuf.h>
37 #include <sys/domain.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/protosw.h>
41 #include <sys/smp.h>
42 #include <sys/sysctl.h>
43 
44 #include <security/mac/mac_framework.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
50 #include <vm/uma.h>
51 #include <vm/uma_int.h>
52 #include <vm/uma_dbg.h>
53 
54 /*
55  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
56  * Zones.
57  *
58  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
59  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
60  * administrator so desires.
61  *
62  * Mbufs are allocated from a UMA Master Zone called the Mbuf
63  * Zone.
64  *
65  * Additionally, FreeBSD provides a Packet Zone, which it
66  * configures as a Secondary Zone to the Mbuf Master Zone,
67  * thus sharing backend Slab kegs with the Mbuf Master Zone.
68  *
69  * Thus common-case allocations and locking are simplified:
70  *
71  *  m_clget()                m_getcl()
72  *    |                         |
73  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
74  *    |   |             [     Packet   ]            |
75  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
76  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
77  *        |                       \________         |
78  *  [ Cluster Keg   ]                      \       /
79  *        |	                         [ Mbuf Keg   ]
80  *  [ Cluster Slabs ]                         |
81  *        |                              [ Mbuf Slabs ]
82  *         \____________(VM)_________________/
83  *
84  *
85  * Whenever an object is allocated with uma_zalloc() out of
86  * one of the Zones its _ctor_ function is executed.  The same
87  * for any deallocation through uma_zfree() the _dtor_ function
88  * is executed.
89  *
90  * Caches are per-CPU and are filled from the Master Zone.
91  *
92  * Whenever an object is allocated from the underlying global
93  * memory pool it gets pre-initialized with the _zinit_ functions.
94  * When the Keg's are overfull objects get decomissioned with
95  * _zfini_ functions and free'd back to the global memory pool.
96  *
97  */
98 
99 int nmbufs;			/* limits number of mbufs */
100 int nmbclusters;		/* limits number of mbuf clusters */
101 int nmbjumbop;			/* limits number of page size jumbo clusters */
102 int nmbjumbo9;			/* limits number of 9k jumbo clusters */
103 int nmbjumbo16;			/* limits number of 16k jumbo clusters */
104 struct mbstat mbstat;
105 
106 /*
107  * tunable_mbinit() has to be run before init_maxsockets() thus
108  * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
109  * runs at SI_ORDER_ANY.
110  *
111  * NB: This has to be done before VM init.
112  */
113 static void
114 tunable_mbinit(void *dummy)
115 {
116 
117 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
118 	if (nmbclusters == 0)
119 		nmbclusters = maxmbufmem / MCLBYTES / 4;
120 
121 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
122 	if (nmbjumbop == 0)
123 		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
124 
125 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
126 	if (nmbjumbo9 == 0)
127 		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
128 
129 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
130 	if (nmbjumbo16 == 0)
131 		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
132 
133 	/*
134 	 * We need at least as many mbufs as we have clusters of
135 	 * the various types added together.
136 	 */
137 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
138 	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
139 		nmbufs = lmax(maxmbufmem / MSIZE / 5,
140 		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
141 }
142 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
143 
144 static int
145 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
146 {
147 	int error, newnmbclusters;
148 
149 	newnmbclusters = nmbclusters;
150 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
151 	if (error == 0 && req->newptr) {
152 		if (newnmbclusters > nmbclusters &&
153 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
154 			nmbclusters = newnmbclusters;
155 			uma_zone_set_max(zone_clust, nmbclusters);
156 			nmbclusters = uma_zone_get_max(zone_clust);
157 			EVENTHANDLER_INVOKE(nmbclusters_change);
158 		} else
159 			error = EINVAL;
160 	}
161 	return (error);
162 }
163 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
164 &nmbclusters, 0, sysctl_nmbclusters, "IU",
165     "Maximum number of mbuf clusters allowed");
166 
167 static int
168 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
169 {
170 	int error, newnmbjumbop;
171 
172 	newnmbjumbop = nmbjumbop;
173 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
174 	if (error == 0 && req->newptr) {
175 		if (newnmbjumbop > nmbjumbop &&
176 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
177 			nmbjumbop = newnmbjumbop;
178 			uma_zone_set_max(zone_jumbop, nmbjumbop);
179 			nmbjumbop = uma_zone_get_max(zone_jumbop);
180 		} else
181 			error = EINVAL;
182 	}
183 	return (error);
184 }
185 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
186 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
187     "Maximum number of mbuf page size jumbo clusters allowed");
188 
189 static int
190 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
191 {
192 	int error, newnmbjumbo9;
193 
194 	newnmbjumbo9 = nmbjumbo9;
195 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
196 	if (error == 0 && req->newptr) {
197 		if (newnmbjumbo9 > nmbjumbo9&&
198 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
199 			nmbjumbo9 = newnmbjumbo9;
200 			uma_zone_set_max(zone_jumbo9, nmbjumbo9);
201 			nmbjumbo9 = uma_zone_get_max(zone_jumbo9);
202 		} else
203 			error = EINVAL;
204 	}
205 	return (error);
206 }
207 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
208 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
209     "Maximum number of mbuf 9k jumbo clusters allowed");
210 
211 static int
212 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
213 {
214 	int error, newnmbjumbo16;
215 
216 	newnmbjumbo16 = nmbjumbo16;
217 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
218 	if (error == 0 && req->newptr) {
219 		if (newnmbjumbo16 > nmbjumbo16 &&
220 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
221 			nmbjumbo16 = newnmbjumbo16;
222 			uma_zone_set_max(zone_jumbo16, nmbjumbo16);
223 			nmbjumbo16 = uma_zone_get_max(zone_jumbo16);
224 		} else
225 			error = EINVAL;
226 	}
227 	return (error);
228 }
229 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
230 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
231     "Maximum number of mbuf 16k jumbo clusters allowed");
232 
233 static int
234 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
235 {
236 	int error, newnmbufs;
237 
238 	newnmbufs = nmbufs;
239 	error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
240 	if (error == 0 && req->newptr) {
241 		if (newnmbufs > nmbufs) {
242 			nmbufs = newnmbufs;
243 			uma_zone_set_max(zone_mbuf, nmbufs);
244 			nmbufs = uma_zone_get_max(zone_mbuf);
245 			EVENTHANDLER_INVOKE(nmbufs_change);
246 		} else
247 			error = EINVAL;
248 	}
249 	return (error);
250 }
251 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbuf, CTLTYPE_INT|CTLFLAG_RW,
252 &nmbufs, 0, sysctl_nmbufs, "IU",
253     "Maximum number of mbufs allowed");
254 
255 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
256     "Mbuf general information and statistics");
257 
258 /*
259  * Zones from which we allocate.
260  */
261 uma_zone_t	zone_mbuf;
262 uma_zone_t	zone_clust;
263 uma_zone_t	zone_pack;
264 uma_zone_t	zone_jumbop;
265 uma_zone_t	zone_jumbo9;
266 uma_zone_t	zone_jumbo16;
267 uma_zone_t	zone_ext_refcnt;
268 
269 /*
270  * Local prototypes.
271  */
272 static int	mb_ctor_mbuf(void *, int, void *, int);
273 static int	mb_ctor_clust(void *, int, void *, int);
274 static int	mb_ctor_pack(void *, int, void *, int);
275 static void	mb_dtor_mbuf(void *, int, void *);
276 static void	mb_dtor_clust(void *, int, void *);
277 static void	mb_dtor_pack(void *, int, void *);
278 static int	mb_zinit_pack(void *, int, int);
279 static void	mb_zfini_pack(void *, int);
280 
281 static void	mb_reclaim(void *);
282 static void	mbuf_init(void *);
283 static void    *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
284 
285 /* Ensure that MSIZE must be a power of 2. */
286 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
287 
288 /*
289  * Initialize FreeBSD Network buffer allocation.
290  */
291 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
292 static void
293 mbuf_init(void *dummy)
294 {
295 
296 	/*
297 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
298 	 */
299 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
300 	    mb_ctor_mbuf, mb_dtor_mbuf,
301 #ifdef INVARIANTS
302 	    trash_init, trash_fini,
303 #else
304 	    NULL, NULL,
305 #endif
306 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
307 	if (nmbufs > 0)
308 		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
309 	uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
310 
311 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
312 	    mb_ctor_clust, mb_dtor_clust,
313 #ifdef INVARIANTS
314 	    trash_init, trash_fini,
315 #else
316 	    NULL, NULL,
317 #endif
318 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
319 	if (nmbclusters > 0)
320 		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
321 	uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
322 
323 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
324 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
325 
326 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
327 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
328 	    mb_ctor_clust, mb_dtor_clust,
329 #ifdef INVARIANTS
330 	    trash_init, trash_fini,
331 #else
332 	    NULL, NULL,
333 #endif
334 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
335 	if (nmbjumbop > 0)
336 		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
337 	uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
338 
339 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
340 	    mb_ctor_clust, mb_dtor_clust,
341 #ifdef INVARIANTS
342 	    trash_init, trash_fini,
343 #else
344 	    NULL, NULL,
345 #endif
346 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
347 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
348 	if (nmbjumbo9 > 0)
349 		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
350 	uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
351 
352 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
353 	    mb_ctor_clust, mb_dtor_clust,
354 #ifdef INVARIANTS
355 	    trash_init, trash_fini,
356 #else
357 	    NULL, NULL,
358 #endif
359 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
360 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
361 	if (nmbjumbo16 > 0)
362 		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
363 	uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
364 
365 	zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
366 	    NULL, NULL,
367 	    NULL, NULL,
368 	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
369 
370 	/* uma_prealloc() goes here... */
371 
372 	/*
373 	 * Hook event handler for low-memory situation, used to
374 	 * drain protocols and push data back to the caches (UMA
375 	 * later pushes it back to VM).
376 	 */
377 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
378 	    EVENTHANDLER_PRI_FIRST);
379 
380 	/*
381 	 * [Re]set counters and local statistics knobs.
382 	 * XXX Some of these should go and be replaced, but UMA stat
383 	 * gathering needs to be revised.
384 	 */
385 	mbstat.m_mbufs = 0;
386 	mbstat.m_mclusts = 0;
387 	mbstat.m_drain = 0;
388 	mbstat.m_msize = MSIZE;
389 	mbstat.m_mclbytes = MCLBYTES;
390 	mbstat.m_minclsize = MINCLSIZE;
391 	mbstat.m_mlen = MLEN;
392 	mbstat.m_mhlen = MHLEN;
393 	mbstat.m_numtypes = MT_NTYPES;
394 
395 	mbstat.m_mcfail = mbstat.m_mpfail = 0;
396 	mbstat.sf_iocnt = 0;
397 	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
398 }
399 
400 /*
401  * UMA backend page allocator for the jumbo frame zones.
402  *
403  * Allocates kernel virtual memory that is backed by contiguous physical
404  * pages.
405  */
406 static void *
407 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
408 {
409 
410 	/* Inform UMA that this allocator uses kernel_map/object. */
411 	*flags = UMA_SLAB_KERNEL;
412 	return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
413 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
414 }
415 
416 /*
417  * Constructor for Mbuf master zone.
418  *
419  * The 'arg' pointer points to a mb_args structure which
420  * contains call-specific information required to support the
421  * mbuf allocation API.  See mbuf.h.
422  */
423 static int
424 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
425 {
426 	struct mbuf *m;
427 	struct mb_args *args;
428 #ifdef MAC
429 	int error;
430 #endif
431 	int flags;
432 	short type;
433 
434 #ifdef INVARIANTS
435 	trash_ctor(mem, size, arg, how);
436 #endif
437 	m = (struct mbuf *)mem;
438 	args = (struct mb_args *)arg;
439 	flags = args->flags;
440 	type = args->type;
441 
442 	/*
443 	 * The mbuf is initialized later.  The caller has the
444 	 * responsibility to set up any MAC labels too.
445 	 */
446 	if (type == MT_NOINIT)
447 		return (0);
448 
449 	m->m_next = NULL;
450 	m->m_nextpkt = NULL;
451 	m->m_len = 0;
452 	m->m_flags = flags;
453 	m->m_type = type;
454 	if (flags & M_PKTHDR) {
455 		m->m_data = m->m_pktdat;
456 		m->m_pkthdr.rcvif = NULL;
457 		m->m_pkthdr.header = NULL;
458 		m->m_pkthdr.len = 0;
459 		m->m_pkthdr.csum_flags = 0;
460 		m->m_pkthdr.csum_data = 0;
461 		m->m_pkthdr.tso_segsz = 0;
462 		m->m_pkthdr.ether_vtag = 0;
463 		m->m_pkthdr.flowid = 0;
464 		SLIST_INIT(&m->m_pkthdr.tags);
465 #ifdef MAC
466 		/* If the label init fails, fail the alloc */
467 		error = mac_mbuf_init(m, how);
468 		if (error)
469 			return (error);
470 #endif
471 	} else
472 		m->m_data = m->m_dat;
473 	return (0);
474 }
475 
476 /*
477  * The Mbuf master zone destructor.
478  */
479 static void
480 mb_dtor_mbuf(void *mem, int size, void *arg)
481 {
482 	struct mbuf *m;
483 	unsigned long flags;
484 
485 	m = (struct mbuf *)mem;
486 	flags = (unsigned long)arg;
487 
488 	if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
489 		m_tag_delete_chain(m, NULL);
490 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
491 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
492 #ifdef INVARIANTS
493 	trash_dtor(mem, size, arg);
494 #endif
495 }
496 
497 /*
498  * The Mbuf Packet zone destructor.
499  */
500 static void
501 mb_dtor_pack(void *mem, int size, void *arg)
502 {
503 	struct mbuf *m;
504 
505 	m = (struct mbuf *)mem;
506 	if ((m->m_flags & M_PKTHDR) != 0)
507 		m_tag_delete_chain(m, NULL);
508 
509 	/* Make sure we've got a clean cluster back. */
510 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
511 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
512 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
513 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
514 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
515 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
516 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
517 	KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
518 #ifdef INVARIANTS
519 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
520 #endif
521 	/*
522 	 * If there are processes blocked on zone_clust, waiting for pages
523 	 * to be freed up, * cause them to be woken up by draining the
524 	 * packet zone.  We are exposed to a race here * (in the check for
525 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
526 	 * is deliberate. We don't want to acquire the zone lock for every
527 	 * mbuf free.
528 	 */
529 	if (uma_zone_exhausted_nolock(zone_clust))
530 		zone_drain(zone_pack);
531 }
532 
533 /*
534  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
535  *
536  * Here the 'arg' pointer points to the Mbuf which we
537  * are configuring cluster storage for.  If 'arg' is
538  * empty we allocate just the cluster without setting
539  * the mbuf to it.  See mbuf.h.
540  */
541 static int
542 mb_ctor_clust(void *mem, int size, void *arg, int how)
543 {
544 	struct mbuf *m;
545 	u_int *refcnt;
546 	int type;
547 	uma_zone_t zone;
548 
549 #ifdef INVARIANTS
550 	trash_ctor(mem, size, arg, how);
551 #endif
552 	switch (size) {
553 	case MCLBYTES:
554 		type = EXT_CLUSTER;
555 		zone = zone_clust;
556 		break;
557 #if MJUMPAGESIZE != MCLBYTES
558 	case MJUMPAGESIZE:
559 		type = EXT_JUMBOP;
560 		zone = zone_jumbop;
561 		break;
562 #endif
563 	case MJUM9BYTES:
564 		type = EXT_JUMBO9;
565 		zone = zone_jumbo9;
566 		break;
567 	case MJUM16BYTES:
568 		type = EXT_JUMBO16;
569 		zone = zone_jumbo16;
570 		break;
571 	default:
572 		panic("unknown cluster size");
573 		break;
574 	}
575 
576 	m = (struct mbuf *)arg;
577 	refcnt = uma_find_refcnt(zone, mem);
578 	*refcnt = 1;
579 	if (m != NULL) {
580 		m->m_ext.ext_buf = (caddr_t)mem;
581 		m->m_data = m->m_ext.ext_buf;
582 		m->m_flags |= M_EXT;
583 		m->m_ext.ext_free = NULL;
584 		m->m_ext.ext_arg1 = NULL;
585 		m->m_ext.ext_arg2 = NULL;
586 		m->m_ext.ext_size = size;
587 		m->m_ext.ext_type = type;
588 		m->m_ext.ref_cnt = refcnt;
589 	}
590 
591 	return (0);
592 }
593 
594 /*
595  * The Mbuf Cluster zone destructor.
596  */
597 static void
598 mb_dtor_clust(void *mem, int size, void *arg)
599 {
600 #ifdef INVARIANTS
601 	uma_zone_t zone;
602 
603 	zone = m_getzone(size);
604 	KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
605 		("%s: refcnt incorrect %u", __func__,
606 		 *(uma_find_refcnt(zone, mem))) );
607 
608 	trash_dtor(mem, size, arg);
609 #endif
610 }
611 
612 /*
613  * The Packet secondary zone's init routine, executed on the
614  * object's transition from mbuf keg slab to zone cache.
615  */
616 static int
617 mb_zinit_pack(void *mem, int size, int how)
618 {
619 	struct mbuf *m;
620 
621 	m = (struct mbuf *)mem;		/* m is virgin. */
622 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
623 	    m->m_ext.ext_buf == NULL)
624 		return (ENOMEM);
625 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
626 #ifdef INVARIANTS
627 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
628 #endif
629 	return (0);
630 }
631 
632 /*
633  * The Packet secondary zone's fini routine, executed on the
634  * object's transition from zone cache to keg slab.
635  */
636 static void
637 mb_zfini_pack(void *mem, int size)
638 {
639 	struct mbuf *m;
640 
641 	m = (struct mbuf *)mem;
642 #ifdef INVARIANTS
643 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
644 #endif
645 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
646 #ifdef INVARIANTS
647 	trash_dtor(mem, size, NULL);
648 #endif
649 }
650 
651 /*
652  * The "packet" keg constructor.
653  */
654 static int
655 mb_ctor_pack(void *mem, int size, void *arg, int how)
656 {
657 	struct mbuf *m;
658 	struct mb_args *args;
659 #ifdef MAC
660 	int error;
661 #endif
662 	int flags;
663 	short type;
664 
665 	m = (struct mbuf *)mem;
666 	args = (struct mb_args *)arg;
667 	flags = args->flags;
668 	type = args->type;
669 
670 #ifdef INVARIANTS
671 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
672 #endif
673 	m->m_next = NULL;
674 	m->m_nextpkt = NULL;
675 	m->m_data = m->m_ext.ext_buf;
676 	m->m_len = 0;
677 	m->m_flags = (flags | M_EXT);
678 	m->m_type = type;
679 
680 	if (flags & M_PKTHDR) {
681 		m->m_pkthdr.rcvif = NULL;
682 		m->m_pkthdr.len = 0;
683 		m->m_pkthdr.header = NULL;
684 		m->m_pkthdr.csum_flags = 0;
685 		m->m_pkthdr.csum_data = 0;
686 		m->m_pkthdr.tso_segsz = 0;
687 		m->m_pkthdr.ether_vtag = 0;
688 		m->m_pkthdr.flowid = 0;
689 		SLIST_INIT(&m->m_pkthdr.tags);
690 #ifdef MAC
691 		/* If the label init fails, fail the alloc */
692 		error = mac_mbuf_init(m, how);
693 		if (error)
694 			return (error);
695 #endif
696 	}
697 	/* m_ext is already initialized. */
698 
699 	return (0);
700 }
701 
702 int
703 m_pkthdr_init(struct mbuf *m, int how)
704 {
705 #ifdef MAC
706 	int error;
707 #endif
708 	m->m_data = m->m_pktdat;
709 	SLIST_INIT(&m->m_pkthdr.tags);
710 	m->m_pkthdr.rcvif = NULL;
711 	m->m_pkthdr.header = NULL;
712 	m->m_pkthdr.len = 0;
713 	m->m_pkthdr.flowid = 0;
714 	m->m_pkthdr.csum_flags = 0;
715 	m->m_pkthdr.csum_data = 0;
716 	m->m_pkthdr.tso_segsz = 0;
717 	m->m_pkthdr.ether_vtag = 0;
718 #ifdef MAC
719 	/* If the label init fails, fail the alloc */
720 	error = mac_mbuf_init(m, how);
721 	if (error)
722 		return (error);
723 #endif
724 
725 	return (0);
726 }
727 
728 /*
729  * This is the protocol drain routine.
730  *
731  * No locks should be held when this is called.  The drain routines have to
732  * presently acquire some locks which raises the possibility of lock order
733  * reversal.
734  */
735 static void
736 mb_reclaim(void *junk)
737 {
738 	struct domain *dp;
739 	struct protosw *pr;
740 
741 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
742 	    "mb_reclaim()");
743 
744 	for (dp = domains; dp != NULL; dp = dp->dom_next)
745 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
746 			if (pr->pr_drain != NULL)
747 				(*pr->pr_drain)();
748 }
749