xref: /freebsd/sys/kern/kern_mbuf.c (revision 48c5129f93c5eb5419c87b08e4677d51513f1dc0)
1 /*-
2  * Copyright (c) 2004, 2005,
3  * 	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_param.h"
32 
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
36 #include <sys/mbuf.h>
37 #include <sys/domain.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/protosw.h>
41 #include <sys/smp.h>
42 #include <sys/sysctl.h>
43 
44 #include <security/mac/mac_framework.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
50 #include <vm/uma.h>
51 #include <vm/uma_int.h>
52 #include <vm/uma_dbg.h>
53 
54 /*
55  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
56  * Zones.
57  *
58  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
59  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
60  * administrator so desires.
61  *
62  * Mbufs are allocated from a UMA Master Zone called the Mbuf
63  * Zone.
64  *
65  * Additionally, FreeBSD provides a Packet Zone, which it
66  * configures as a Secondary Zone to the Mbuf Master Zone,
67  * thus sharing backend Slab kegs with the Mbuf Master Zone.
68  *
69  * Thus common-case allocations and locking are simplified:
70  *
71  *  m_clget()                m_getcl()
72  *    |                         |
73  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
74  *    |   |             [     Packet   ]            |
75  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
76  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
77  *        |                       \________         |
78  *  [ Cluster Keg   ]                      \       /
79  *        |    	                         [ Mbuf Keg   ]
80  *  [ Cluster Slabs ]                         |
81  *        |                              [ Mbuf Slabs ]
82  *         \____________(VM)_________________/
83  *
84  *
85  * Whenever an object is allocated with uma_zalloc() out of
86  * one of the Zones its _ctor_ function is executed.  The same
87  * for any deallocation through uma_zfree() the _dtor_ function
88  * is executed.
89  *
90  * Caches are per-CPU and are filled from the Master Zone.
91  *
92  * Whenever an object is allocated from the underlying global
93  * memory pool it gets pre-initialized with the _zinit_ functions.
94  * When the Keg's are overfull objects get decomissioned with
95  * _zfini_ functions and free'd back to the global memory pool.
96  *
97  */
98 
99 int nmbclusters;		/* limits number of mbuf clusters */
100 int nmbjumbop;			/* limits number of page size jumbo clusters */
101 int nmbjumbo9;			/* limits number of 9k jumbo clusters */
102 int nmbjumbo16;			/* limits number of 16k jumbo clusters */
103 struct mbstat mbstat;
104 
105 /*
106  * tunable_mbinit() has to be run before init_maxsockets() thus
107  * the SYSINIT order below is SI_ORDER_MIDDLE while init_maxsockets()
108  * runs at SI_ORDER_ANY.
109  */
110 static void
111 tunable_mbinit(void *dummy)
112 {
113 
114 	/* This has to be done before VM init. */
115 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
116 	if (nmbclusters == 0) {
117 #ifdef VM_AUTOTUNE_NMBCLUSTERS
118 		nmbclusters = VM_AUTOTUNE_NMBCLUSTERS;
119 #else
120 		nmbclusters = 1024 + maxusers * 64;
121 #endif
122 #ifdef VM_MAX_AUTOTUNE_NMBCLUSTERS
123 		if (nmbclusters > VM_MAX_AUTOTUNE_NMBCLUSTERS)
124 			nmbclusters = VM_MAX_AUTOTUNE_NMBCLUSTERS;
125 #endif
126 	}
127 
128 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
129 	if (nmbjumbop == 0)
130 		nmbjumbop = nmbclusters / 2;
131 
132 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
133 	if (nmbjumbo9 == 0)
134 		nmbjumbo9 = nmbclusters / 4;
135 
136 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
137 	if (nmbjumbo16 == 0)
138 		nmbjumbo16 = nmbclusters / 8;
139 }
140 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
141 
142 static int
143 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
144 {
145 	int error, newnmbclusters;
146 
147 	newnmbclusters = nmbclusters;
148 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
149 	if (error == 0 && req->newptr) {
150 		if (newnmbclusters > nmbclusters) {
151 			nmbclusters = newnmbclusters;
152 			uma_zone_set_max(zone_clust, nmbclusters);
153 			EVENTHANDLER_INVOKE(nmbclusters_change);
154 		} else
155 			error = EINVAL;
156 	}
157 	return (error);
158 }
159 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
160 &nmbclusters, 0, sysctl_nmbclusters, "IU",
161     "Maximum number of mbuf clusters allowed");
162 
163 static int
164 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
165 {
166 	int error, newnmbjumbop;
167 
168 	newnmbjumbop = nmbjumbop;
169 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
170 	if (error == 0 && req->newptr) {
171 		if (newnmbjumbop> nmbjumbop) {
172 			nmbjumbop = newnmbjumbop;
173 			uma_zone_set_max(zone_jumbop, nmbjumbop);
174 		} else
175 			error = EINVAL;
176 	}
177 	return (error);
178 }
179 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
180 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
181 	 "Maximum number of mbuf page size jumbo clusters allowed");
182 
183 
184 static int
185 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
186 {
187 	int error, newnmbjumbo9;
188 
189 	newnmbjumbo9 = nmbjumbo9;
190 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
191 	if (error == 0 && req->newptr) {
192 		if (newnmbjumbo9> nmbjumbo9) {
193 			nmbjumbo9 = newnmbjumbo9;
194 			uma_zone_set_max(zone_jumbo9, nmbjumbo9);
195 		} else
196 			error = EINVAL;
197 	}
198 	return (error);
199 }
200 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
201 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
202 	"Maximum number of mbuf 9k jumbo clusters allowed");
203 
204 static int
205 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
206 {
207 	int error, newnmbjumbo16;
208 
209 	newnmbjumbo16 = nmbjumbo16;
210 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
211 	if (error == 0 && req->newptr) {
212 		if (newnmbjumbo16> nmbjumbo16) {
213 			nmbjumbo16 = newnmbjumbo16;
214 			uma_zone_set_max(zone_jumbo16, nmbjumbo16);
215 		} else
216 			error = EINVAL;
217 	}
218 	return (error);
219 }
220 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
221 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
222     "Maximum number of mbuf 16k jumbo clusters allowed");
223 
224 
225 
226 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
227     "Mbuf general information and statistics");
228 
229 /*
230  * Zones from which we allocate.
231  */
232 uma_zone_t	zone_mbuf;
233 uma_zone_t	zone_clust;
234 uma_zone_t	zone_pack;
235 uma_zone_t	zone_jumbop;
236 uma_zone_t	zone_jumbo9;
237 uma_zone_t	zone_jumbo16;
238 uma_zone_t	zone_ext_refcnt;
239 
240 /*
241  * Local prototypes.
242  */
243 static int	mb_ctor_mbuf(void *, int, void *, int);
244 static int	mb_ctor_clust(void *, int, void *, int);
245 static int	mb_ctor_pack(void *, int, void *, int);
246 static void	mb_dtor_mbuf(void *, int, void *);
247 static void	mb_dtor_clust(void *, int, void *);
248 static void	mb_dtor_pack(void *, int, void *);
249 static int	mb_zinit_pack(void *, int, int);
250 static void	mb_zfini_pack(void *, int);
251 
252 static void	mb_reclaim(void *);
253 static void	mbuf_init(void *);
254 static void    *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
255 
256 /* Ensure that MSIZE must be a power of 2. */
257 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
258 
259 /*
260  * Initialize FreeBSD Network buffer allocation.
261  */
262 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
263 static void
264 mbuf_init(void *dummy)
265 {
266 
267 	/*
268 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
269 	 */
270 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
271 	    mb_ctor_mbuf, mb_dtor_mbuf,
272 #ifdef INVARIANTS
273 	    trash_init, trash_fini,
274 #else
275 	    NULL, NULL,
276 #endif
277 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
278 
279 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
280 	    mb_ctor_clust, mb_dtor_clust,
281 #ifdef INVARIANTS
282 	    trash_init, trash_fini,
283 #else
284 	    NULL, NULL,
285 #endif
286 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
287 	if (nmbclusters > 0)
288 		uma_zone_set_max(zone_clust, nmbclusters);
289 
290 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
291 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
292 
293 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
294 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
295 	    mb_ctor_clust, mb_dtor_clust,
296 #ifdef INVARIANTS
297 	    trash_init, trash_fini,
298 #else
299 	    NULL, NULL,
300 #endif
301 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
302 	if (nmbjumbop > 0)
303 		uma_zone_set_max(zone_jumbop, nmbjumbop);
304 
305 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
306 	    mb_ctor_clust, mb_dtor_clust,
307 #ifdef INVARIANTS
308 	    trash_init, trash_fini,
309 #else
310 	    NULL, NULL,
311 #endif
312 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
313 	if (nmbjumbo9 > 0)
314 		uma_zone_set_max(zone_jumbo9, nmbjumbo9);
315 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
316 
317 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
318 	    mb_ctor_clust, mb_dtor_clust,
319 #ifdef INVARIANTS
320 	    trash_init, trash_fini,
321 #else
322 	    NULL, NULL,
323 #endif
324 	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
325 	if (nmbjumbo16 > 0)
326 		uma_zone_set_max(zone_jumbo16, nmbjumbo16);
327 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
328 
329 	zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
330 	    NULL, NULL,
331 	    NULL, NULL,
332 	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
333 
334 	/* uma_prealloc() goes here... */
335 
336 	/*
337 	 * Hook event handler for low-memory situation, used to
338 	 * drain protocols and push data back to the caches (UMA
339 	 * later pushes it back to VM).
340 	 */
341 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
342 	    EVENTHANDLER_PRI_FIRST);
343 
344 	/*
345 	 * [Re]set counters and local statistics knobs.
346 	 * XXX Some of these should go and be replaced, but UMA stat
347 	 * gathering needs to be revised.
348 	 */
349 	mbstat.m_mbufs = 0;
350 	mbstat.m_mclusts = 0;
351 	mbstat.m_drain = 0;
352 	mbstat.m_msize = MSIZE;
353 	mbstat.m_mclbytes = MCLBYTES;
354 	mbstat.m_minclsize = MINCLSIZE;
355 	mbstat.m_mlen = MLEN;
356 	mbstat.m_mhlen = MHLEN;
357 	mbstat.m_numtypes = MT_NTYPES;
358 
359 	mbstat.m_mcfail = mbstat.m_mpfail = 0;
360 	mbstat.sf_iocnt = 0;
361 	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
362 }
363 
364 /*
365  * UMA backend page allocator for the jumbo frame zones.
366  *
367  * Allocates kernel virtual memory that is backed by contiguous physical
368  * pages.
369  */
370 static void *
371 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
372 {
373 
374 	/* Inform UMA that this allocator uses kernel_map/object. */
375 	*flags = UMA_SLAB_KERNEL;
376 	return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
377 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
378 }
379 
380 /*
381  * Constructor for Mbuf master zone.
382  *
383  * The 'arg' pointer points to a mb_args structure which
384  * contains call-specific information required to support the
385  * mbuf allocation API.  See mbuf.h.
386  */
387 static int
388 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
389 {
390 	struct mbuf *m;
391 	struct mb_args *args;
392 #ifdef MAC
393 	int error;
394 #endif
395 	int flags;
396 	short type;
397 
398 #ifdef INVARIANTS
399 	trash_ctor(mem, size, arg, how);
400 #endif
401 	m = (struct mbuf *)mem;
402 	args = (struct mb_args *)arg;
403 	flags = args->flags;
404 	type = args->type;
405 
406 	/*
407 	 * The mbuf is initialized later.  The caller has the
408 	 * responsibility to set up any MAC labels too.
409 	 */
410 	if (type == MT_NOINIT)
411 		return (0);
412 
413 	m->m_next = NULL;
414 	m->m_nextpkt = NULL;
415 	m->m_len = 0;
416 	m->m_flags = flags;
417 	m->m_type = type;
418 	if (flags & M_PKTHDR) {
419 		m->m_data = m->m_pktdat;
420 		m->m_pkthdr.rcvif = NULL;
421 		m->m_pkthdr.header = NULL;
422 		m->m_pkthdr.len = 0;
423 		m->m_pkthdr.csum_flags = 0;
424 		m->m_pkthdr.csum_data = 0;
425 		m->m_pkthdr.tso_segsz = 0;
426 		m->m_pkthdr.ether_vtag = 0;
427 		m->m_pkthdr.flowid = 0;
428 		SLIST_INIT(&m->m_pkthdr.tags);
429 #ifdef MAC
430 		/* If the label init fails, fail the alloc */
431 		error = mac_mbuf_init(m, how);
432 		if (error)
433 			return (error);
434 #endif
435 	} else
436 		m->m_data = m->m_dat;
437 	return (0);
438 }
439 
440 /*
441  * The Mbuf master zone destructor.
442  */
443 static void
444 mb_dtor_mbuf(void *mem, int size, void *arg)
445 {
446 	struct mbuf *m;
447 	unsigned long flags;
448 
449 	m = (struct mbuf *)mem;
450 	flags = (unsigned long)arg;
451 
452 	if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
453 		m_tag_delete_chain(m, NULL);
454 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
455 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
456 #ifdef INVARIANTS
457 	trash_dtor(mem, size, arg);
458 #endif
459 }
460 
461 /*
462  * The Mbuf Packet zone destructor.
463  */
464 static void
465 mb_dtor_pack(void *mem, int size, void *arg)
466 {
467 	struct mbuf *m;
468 
469 	m = (struct mbuf *)mem;
470 	if ((m->m_flags & M_PKTHDR) != 0)
471 		m_tag_delete_chain(m, NULL);
472 
473 	/* Make sure we've got a clean cluster back. */
474 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
475 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
476 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
477 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
478 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
479 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
480 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
481 	KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
482 #ifdef INVARIANTS
483 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
484 #endif
485 	/*
486 	 * If there are processes blocked on zone_clust, waiting for pages
487 	 * to be freed up, * cause them to be woken up by draining the
488 	 * packet zone.  We are exposed to a race here * (in the check for
489 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
490 	 * is deliberate. We don't want to acquire the zone lock for every
491 	 * mbuf free.
492 	 */
493 	if (uma_zone_exhausted_nolock(zone_clust))
494 		zone_drain(zone_pack);
495 }
496 
497 /*
498  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
499  *
500  * Here the 'arg' pointer points to the Mbuf which we
501  * are configuring cluster storage for.  If 'arg' is
502  * empty we allocate just the cluster without setting
503  * the mbuf to it.  See mbuf.h.
504  */
505 static int
506 mb_ctor_clust(void *mem, int size, void *arg, int how)
507 {
508 	struct mbuf *m;
509 	u_int *refcnt;
510 	int type;
511 	uma_zone_t zone;
512 
513 #ifdef INVARIANTS
514 	trash_ctor(mem, size, arg, how);
515 #endif
516 	switch (size) {
517 	case MCLBYTES:
518 		type = EXT_CLUSTER;
519 		zone = zone_clust;
520 		break;
521 #if MJUMPAGESIZE != MCLBYTES
522 	case MJUMPAGESIZE:
523 		type = EXT_JUMBOP;
524 		zone = zone_jumbop;
525 		break;
526 #endif
527 	case MJUM9BYTES:
528 		type = EXT_JUMBO9;
529 		zone = zone_jumbo9;
530 		break;
531 	case MJUM16BYTES:
532 		type = EXT_JUMBO16;
533 		zone = zone_jumbo16;
534 		break;
535 	default:
536 		panic("unknown cluster size");
537 		break;
538 	}
539 
540 	m = (struct mbuf *)arg;
541 	refcnt = uma_find_refcnt(zone, mem);
542 	*refcnt = 1;
543 	if (m != NULL) {
544 		m->m_ext.ext_buf = (caddr_t)mem;
545 		m->m_data = m->m_ext.ext_buf;
546 		m->m_flags |= M_EXT;
547 		m->m_ext.ext_free = NULL;
548 		m->m_ext.ext_arg1 = NULL;
549 		m->m_ext.ext_arg2 = NULL;
550 		m->m_ext.ext_size = size;
551 		m->m_ext.ext_type = type;
552 		m->m_ext.ref_cnt = refcnt;
553 	}
554 
555 	return (0);
556 }
557 
558 /*
559  * The Mbuf Cluster zone destructor.
560  */
561 static void
562 mb_dtor_clust(void *mem, int size, void *arg)
563 {
564 #ifdef INVARIANTS
565 	uma_zone_t zone;
566 
567 	zone = m_getzone(size);
568 	KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
569 		("%s: refcnt incorrect %u", __func__,
570 		 *(uma_find_refcnt(zone, mem))) );
571 
572 	trash_dtor(mem, size, arg);
573 #endif
574 }
575 
576 /*
577  * The Packet secondary zone's init routine, executed on the
578  * object's transition from mbuf keg slab to zone cache.
579  */
580 static int
581 mb_zinit_pack(void *mem, int size, int how)
582 {
583 	struct mbuf *m;
584 
585 	m = (struct mbuf *)mem;		/* m is virgin. */
586 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
587 	    m->m_ext.ext_buf == NULL)
588 		return (ENOMEM);
589 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
590 #ifdef INVARIANTS
591 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
592 #endif
593 	return (0);
594 }
595 
596 /*
597  * The Packet secondary zone's fini routine, executed on the
598  * object's transition from zone cache to keg slab.
599  */
600 static void
601 mb_zfini_pack(void *mem, int size)
602 {
603 	struct mbuf *m;
604 
605 	m = (struct mbuf *)mem;
606 #ifdef INVARIANTS
607 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
608 #endif
609 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
610 #ifdef INVARIANTS
611 	trash_dtor(mem, size, NULL);
612 #endif
613 }
614 
615 /*
616  * The "packet" keg constructor.
617  */
618 static int
619 mb_ctor_pack(void *mem, int size, void *arg, int how)
620 {
621 	struct mbuf *m;
622 	struct mb_args *args;
623 #ifdef MAC
624 	int error;
625 #endif
626 	int flags;
627 	short type;
628 
629 	m = (struct mbuf *)mem;
630 	args = (struct mb_args *)arg;
631 	flags = args->flags;
632 	type = args->type;
633 
634 #ifdef INVARIANTS
635 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
636 #endif
637 	m->m_next = NULL;
638 	m->m_nextpkt = NULL;
639 	m->m_data = m->m_ext.ext_buf;
640 	m->m_len = 0;
641 	m->m_flags = (flags | M_EXT);
642 	m->m_type = type;
643 
644 	if (flags & M_PKTHDR) {
645 		m->m_pkthdr.rcvif = NULL;
646 		m->m_pkthdr.len = 0;
647 		m->m_pkthdr.header = NULL;
648 		m->m_pkthdr.csum_flags = 0;
649 		m->m_pkthdr.csum_data = 0;
650 		m->m_pkthdr.tso_segsz = 0;
651 		m->m_pkthdr.ether_vtag = 0;
652 		m->m_pkthdr.flowid = 0;
653 		SLIST_INIT(&m->m_pkthdr.tags);
654 #ifdef MAC
655 		/* If the label init fails, fail the alloc */
656 		error = mac_mbuf_init(m, how);
657 		if (error)
658 			return (error);
659 #endif
660 	}
661 	/* m_ext is already initialized. */
662 
663 	return (0);
664 }
665 
666 int
667 m_pkthdr_init(struct mbuf *m, int how)
668 {
669 #ifdef MAC
670 	int error;
671 #endif
672 	m->m_data = m->m_pktdat;
673 	SLIST_INIT(&m->m_pkthdr.tags);
674 	m->m_pkthdr.rcvif = NULL;
675 	m->m_pkthdr.header = NULL;
676 	m->m_pkthdr.len = 0;
677 	m->m_pkthdr.flowid = 0;
678 	m->m_pkthdr.csum_flags = 0;
679 	m->m_pkthdr.csum_data = 0;
680 	m->m_pkthdr.tso_segsz = 0;
681 	m->m_pkthdr.ether_vtag = 0;
682 #ifdef MAC
683 	/* If the label init fails, fail the alloc */
684 	error = mac_mbuf_init(m, how);
685 	if (error)
686 		return (error);
687 #endif
688 
689 	return (0);
690 }
691 
692 /*
693  * This is the protocol drain routine.
694  *
695  * No locks should be held when this is called.  The drain routines have to
696  * presently acquire some locks which raises the possibility of lock order
697  * reversal.
698  */
699 static void
700 mb_reclaim(void *junk)
701 {
702 	struct domain *dp;
703 	struct protosw *pr;
704 
705 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
706 	    "mb_reclaim()");
707 
708 	for (dp = domains; dp != NULL; dp = dp->dom_next)
709 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
710 			if (pr->pr_drain != NULL)
711 				(*pr->pr_drain)();
712 }
713