xref: /freebsd/sys/kern/kern_mbuf.c (revision 0070b575f47d9fa116f7c4367faed3e1b5f88555)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004, 2005,
5  *	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_param.h"
34 
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/domainset.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/domain.h>
42 #include <sys/eventhandler.h>
43 #include <sys/kernel.h>
44 #include <sys/limits.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/protosw.h>
48 #include <sys/sf_buf.h>
49 #include <sys/smp.h>
50 #include <sys/socket.h>
51 #include <sys/sysctl.h>
52 
53 #include <net/if.h>
54 #include <net/if_var.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_map.h>
61 #include <vm/uma.h>
62 #include <vm/uma_dbg.h>
63 
64 /*
65  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
66  * Zones.
67  *
68  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
69  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
70  * administrator so desires.
71  *
72  * Mbufs are allocated from a UMA Master Zone called the Mbuf
73  * Zone.
74  *
75  * Additionally, FreeBSD provides a Packet Zone, which it
76  * configures as a Secondary Zone to the Mbuf Master Zone,
77  * thus sharing backend Slab kegs with the Mbuf Master Zone.
78  *
79  * Thus common-case allocations and locking are simplified:
80  *
81  *  m_clget()                m_getcl()
82  *    |                         |
83  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
84  *    |   |             [     Packet   ]            |
85  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
86  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
87  *        |                       \________         |
88  *  [ Cluster Keg   ]                      \       /
89  *        |	                         [ Mbuf Keg   ]
90  *  [ Cluster Slabs ]                         |
91  *        |                              [ Mbuf Slabs ]
92  *         \____________(VM)_________________/
93  *
94  *
95  * Whenever an object is allocated with uma_zalloc() out of
96  * one of the Zones its _ctor_ function is executed.  The same
97  * for any deallocation through uma_zfree() the _dtor_ function
98  * is executed.
99  *
100  * Caches are per-CPU and are filled from the Master Zone.
101  *
102  * Whenever an object is allocated from the underlying global
103  * memory pool it gets pre-initialized with the _zinit_ functions.
104  * When the Keg's are overfull objects get decommissioned with
105  * _zfini_ functions and free'd back to the global memory pool.
106  *
107  */
108 
109 int nmbufs;			/* limits number of mbufs */
110 int nmbclusters;		/* limits number of mbuf clusters */
111 int nmbjumbop;			/* limits number of page size jumbo clusters */
112 int nmbjumbo9;			/* limits number of 9k jumbo clusters */
113 int nmbjumbo16;			/* limits number of 16k jumbo clusters */
114 
115 bool mb_use_ext_pgs;		/* use EXT_PGS mbufs for sendfile */
116 SYSCTL_BOOL(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLFLAG_RWTUN,
117     &mb_use_ext_pgs, 0,
118     "Use unmapped mbufs for sendfile(2)");
119 
120 static quad_t maxmbufmem;	/* overall real memory limit for all mbufs */
121 
122 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0,
123     "Maximum real memory allocatable to various mbuf types");
124 
125 static counter_u64_t snd_tag_count;
126 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW,
127     &snd_tag_count, "# of active mbuf send tags");
128 
129 /*
130  * tunable_mbinit() has to be run before any mbuf allocations are done.
131  */
132 static void
133 tunable_mbinit(void *dummy)
134 {
135 	quad_t realmem;
136 
137 	/*
138 	 * The default limit for all mbuf related memory is 1/2 of all
139 	 * available kernel memory (physical or kmem).
140 	 * At most it can be 3/4 of available kernel memory.
141 	 */
142 	realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
143 	maxmbufmem = realmem / 2;
144 	TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
145 	if (maxmbufmem > realmem / 4 * 3)
146 		maxmbufmem = realmem / 4 * 3;
147 
148 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
149 	if (nmbclusters == 0)
150 		nmbclusters = maxmbufmem / MCLBYTES / 4;
151 
152 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
153 	if (nmbjumbop == 0)
154 		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
155 
156 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
157 	if (nmbjumbo9 == 0)
158 		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
159 
160 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
161 	if (nmbjumbo16 == 0)
162 		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
163 
164 	/*
165 	 * We need at least as many mbufs as we have clusters of
166 	 * the various types added together.
167 	 */
168 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
169 	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
170 		nmbufs = lmax(maxmbufmem / MSIZE / 5,
171 		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
172 }
173 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
174 
175 static int
176 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
177 {
178 	int error, newnmbclusters;
179 
180 	newnmbclusters = nmbclusters;
181 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
182 	if (error == 0 && req->newptr && newnmbclusters != nmbclusters) {
183 		if (newnmbclusters > nmbclusters &&
184 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
185 			nmbclusters = newnmbclusters;
186 			nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
187 			EVENTHANDLER_INVOKE(nmbclusters_change);
188 		} else
189 			error = EINVAL;
190 	}
191 	return (error);
192 }
193 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
194 &nmbclusters, 0, sysctl_nmbclusters, "IU",
195     "Maximum number of mbuf clusters allowed");
196 
197 static int
198 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
199 {
200 	int error, newnmbjumbop;
201 
202 	newnmbjumbop = nmbjumbop;
203 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
204 	if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) {
205 		if (newnmbjumbop > nmbjumbop &&
206 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
207 			nmbjumbop = newnmbjumbop;
208 			nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
209 		} else
210 			error = EINVAL;
211 	}
212 	return (error);
213 }
214 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
215 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
216     "Maximum number of mbuf page size jumbo clusters allowed");
217 
218 static int
219 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
220 {
221 	int error, newnmbjumbo9;
222 
223 	newnmbjumbo9 = nmbjumbo9;
224 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
225 	if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) {
226 		if (newnmbjumbo9 > nmbjumbo9 &&
227 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
228 			nmbjumbo9 = newnmbjumbo9;
229 			nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
230 		} else
231 			error = EINVAL;
232 	}
233 	return (error);
234 }
235 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
236 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
237     "Maximum number of mbuf 9k jumbo clusters allowed");
238 
239 static int
240 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
241 {
242 	int error, newnmbjumbo16;
243 
244 	newnmbjumbo16 = nmbjumbo16;
245 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
246 	if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
247 		if (newnmbjumbo16 > nmbjumbo16 &&
248 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
249 			nmbjumbo16 = newnmbjumbo16;
250 			nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
251 		} else
252 			error = EINVAL;
253 	}
254 	return (error);
255 }
256 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
257 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
258     "Maximum number of mbuf 16k jumbo clusters allowed");
259 
260 static int
261 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
262 {
263 	int error, newnmbufs;
264 
265 	newnmbufs = nmbufs;
266 	error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
267 	if (error == 0 && req->newptr && newnmbufs != nmbufs) {
268 		if (newnmbufs > nmbufs) {
269 			nmbufs = newnmbufs;
270 			nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
271 			EVENTHANDLER_INVOKE(nmbufs_change);
272 		} else
273 			error = EINVAL;
274 	}
275 	return (error);
276 }
277 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW,
278 &nmbufs, 0, sysctl_nmbufs, "IU",
279     "Maximum number of mbufs allowed");
280 
281 /*
282  * Zones from which we allocate.
283  */
284 uma_zone_t	zone_mbuf;
285 uma_zone_t	zone_clust;
286 uma_zone_t	zone_pack;
287 uma_zone_t	zone_jumbop;
288 uma_zone_t	zone_jumbo9;
289 uma_zone_t	zone_jumbo16;
290 uma_zone_t	zone_extpgs;
291 
292 /*
293  * Local prototypes.
294  */
295 static int	mb_ctor_mbuf(void *, int, void *, int);
296 static int	mb_ctor_clust(void *, int, void *, int);
297 static int	mb_ctor_pack(void *, int, void *, int);
298 static void	mb_dtor_mbuf(void *, int, void *);
299 static void	mb_dtor_pack(void *, int, void *);
300 static int	mb_zinit_pack(void *, int, int);
301 static void	mb_zfini_pack(void *, int);
302 static void	mb_reclaim(uma_zone_t, int);
303 static void    *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
304 
305 /* Ensure that MSIZE is a power of 2. */
306 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
307 
308 _Static_assert(sizeof(struct mbuf_ext_pgs) == 256,
309     "mbuf_ext_pgs size mismatch");
310 
311 /*
312  * Initialize FreeBSD Network buffer allocation.
313  */
314 static void
315 mbuf_init(void *dummy)
316 {
317 
318 	/*
319 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
320 	 */
321 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
322 	    mb_ctor_mbuf, mb_dtor_mbuf,
323 #ifdef INVARIANTS
324 	    trash_init, trash_fini,
325 #else
326 	    NULL, NULL,
327 #endif
328 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
329 	if (nmbufs > 0)
330 		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
331 	uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
332 	uma_zone_set_maxaction(zone_mbuf, mb_reclaim);
333 
334 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
335 	    mb_ctor_clust,
336 #ifdef INVARIANTS
337 	    trash_dtor, trash_init, trash_fini,
338 #else
339 	    NULL, NULL, NULL,
340 #endif
341 	    UMA_ALIGN_PTR, 0);
342 	if (nmbclusters > 0)
343 		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
344 	uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
345 	uma_zone_set_maxaction(zone_clust, mb_reclaim);
346 
347 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
348 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
349 
350 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
351 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
352 	    mb_ctor_clust,
353 #ifdef INVARIANTS
354 	    trash_dtor, trash_init, trash_fini,
355 #else
356 	    NULL, NULL, NULL,
357 #endif
358 	    UMA_ALIGN_PTR, 0);
359 	if (nmbjumbop > 0)
360 		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
361 	uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
362 	uma_zone_set_maxaction(zone_jumbop, mb_reclaim);
363 
364 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
365 	    mb_ctor_clust,
366 #ifdef INVARIANTS
367 	    trash_dtor, trash_init, trash_fini,
368 #else
369 	    NULL, NULL, NULL,
370 #endif
371 	    UMA_ALIGN_PTR, 0);
372 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
373 	if (nmbjumbo9 > 0)
374 		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
375 	uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
376 	uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);
377 
378 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
379 	    mb_ctor_clust,
380 #ifdef INVARIANTS
381 	    trash_dtor, trash_init, trash_fini,
382 #else
383 	    NULL, NULL, NULL,
384 #endif
385 	    UMA_ALIGN_PTR, 0);
386 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
387 	if (nmbjumbo16 > 0)
388 		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
389 	uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
390 	uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);
391 
392 	zone_extpgs = uma_zcreate(MBUF_EXTPGS_MEM_NAME,
393 	    sizeof(struct mbuf_ext_pgs),
394 #ifdef INVARIANTS
395 	    trash_ctor, trash_dtor, trash_init, trash_fini,
396 #else
397 	    NULL, NULL, NULL, NULL,
398 #endif
399 	    UMA_ALIGN_CACHE, 0);
400 
401 	/*
402 	 * Hook event handler for low-memory situation, used to
403 	 * drain protocols and push data back to the caches (UMA
404 	 * later pushes it back to VM).
405 	 */
406 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
407 	    EVENTHANDLER_PRI_FIRST);
408 
409 	snd_tag_count = counter_u64_alloc(M_WAITOK);
410 }
411 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
412 
413 #ifdef NETDUMP
414 /*
415  * netdump makes use of a pre-allocated pool of mbufs and clusters.  When
416  * netdump is configured, we initialize a set of UMA cache zones which return
417  * items from this pool.  At panic-time, the regular UMA zone pointers are
418  * overwritten with those of the cache zones so that drivers may allocate and
419  * free mbufs and clusters without attempting to allocate physical memory.
420  *
421  * We keep mbufs and clusters in a pair of mbuf queues.  In particular, for
422  * the purpose of caching clusters, we treat them as mbufs.
423  */
424 static struct mbufq nd_mbufq =
425     { STAILQ_HEAD_INITIALIZER(nd_mbufq.mq_head), 0, INT_MAX };
426 static struct mbufq nd_clustq =
427     { STAILQ_HEAD_INITIALIZER(nd_clustq.mq_head), 0, INT_MAX };
428 
429 static int nd_clsize;
430 static uma_zone_t nd_zone_mbuf;
431 static uma_zone_t nd_zone_clust;
432 static uma_zone_t nd_zone_pack;
433 
434 static int
435 nd_buf_import(void *arg, void **store, int count, int domain __unused,
436     int flags)
437 {
438 	struct mbufq *q;
439 	struct mbuf *m;
440 	int i;
441 
442 	q = arg;
443 
444 	for (i = 0; i < count; i++) {
445 		m = mbufq_dequeue(q);
446 		if (m == NULL)
447 			break;
448 		trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags);
449 		store[i] = m;
450 	}
451 	KASSERT((flags & M_WAITOK) == 0 || i == count,
452 	    ("%s: ran out of pre-allocated mbufs", __func__));
453 	return (i);
454 }
455 
456 static void
457 nd_buf_release(void *arg, void **store, int count)
458 {
459 	struct mbufq *q;
460 	struct mbuf *m;
461 	int i;
462 
463 	q = arg;
464 
465 	for (i = 0; i < count; i++) {
466 		m = store[i];
467 		(void)mbufq_enqueue(q, m);
468 	}
469 }
470 
471 static int
472 nd_pack_import(void *arg __unused, void **store, int count, int domain __unused,
473     int flags __unused)
474 {
475 	struct mbuf *m;
476 	void *clust;
477 	int i;
478 
479 	for (i = 0; i < count; i++) {
480 		m = m_get(MT_DATA, M_NOWAIT);
481 		if (m == NULL)
482 			break;
483 		clust = uma_zalloc(nd_zone_clust, M_NOWAIT);
484 		if (clust == NULL) {
485 			m_free(m);
486 			break;
487 		}
488 		mb_ctor_clust(clust, nd_clsize, m, 0);
489 		store[i] = m;
490 	}
491 	KASSERT((flags & M_WAITOK) == 0 || i == count,
492 	    ("%s: ran out of pre-allocated mbufs", __func__));
493 	return (i);
494 }
495 
496 static void
497 nd_pack_release(void *arg __unused, void **store, int count)
498 {
499 	struct mbuf *m;
500 	void *clust;
501 	int i;
502 
503 	for (i = 0; i < count; i++) {
504 		m = store[i];
505 		clust = m->m_ext.ext_buf;
506 		uma_zfree(nd_zone_clust, clust);
507 		uma_zfree(nd_zone_mbuf, m);
508 	}
509 }
510 
511 /*
512  * Free the pre-allocated mbufs and clusters reserved for netdump, and destroy
513  * the corresponding UMA cache zones.
514  */
515 void
516 netdump_mbuf_drain(void)
517 {
518 	struct mbuf *m;
519 	void *item;
520 
521 	if (nd_zone_mbuf != NULL) {
522 		uma_zdestroy(nd_zone_mbuf);
523 		nd_zone_mbuf = NULL;
524 	}
525 	if (nd_zone_clust != NULL) {
526 		uma_zdestroy(nd_zone_clust);
527 		nd_zone_clust = NULL;
528 	}
529 	if (nd_zone_pack != NULL) {
530 		uma_zdestroy(nd_zone_pack);
531 		nd_zone_pack = NULL;
532 	}
533 
534 	while ((m = mbufq_dequeue(&nd_mbufq)) != NULL)
535 		m_free(m);
536 	while ((item = mbufq_dequeue(&nd_clustq)) != NULL)
537 		uma_zfree(m_getzone(nd_clsize), item);
538 }
539 
540 /*
541  * Callback invoked immediately prior to starting a netdump.
542  */
543 void
544 netdump_mbuf_dump(void)
545 {
546 
547 	/*
548 	 * All cluster zones return buffers of the size requested by the
549 	 * drivers.  It's up to the driver to reinitialize the zones if the
550 	 * MTU of a netdump-enabled interface changes.
551 	 */
552 	printf("netdump: overwriting mbuf zone pointers\n");
553 	zone_mbuf = nd_zone_mbuf;
554 	zone_clust = nd_zone_clust;
555 	zone_pack = nd_zone_pack;
556 	zone_jumbop = nd_zone_clust;
557 	zone_jumbo9 = nd_zone_clust;
558 	zone_jumbo16 = nd_zone_clust;
559 }
560 
561 /*
562  * Reinitialize the netdump mbuf+cluster pool and cache zones.
563  */
564 void
565 netdump_mbuf_reinit(int nmbuf, int nclust, int clsize)
566 {
567 	struct mbuf *m;
568 	void *item;
569 
570 	netdump_mbuf_drain();
571 
572 	nd_clsize = clsize;
573 
574 	nd_zone_mbuf = uma_zcache_create("netdump_" MBUF_MEM_NAME,
575 	    MSIZE, mb_ctor_mbuf, mb_dtor_mbuf,
576 #ifdef INVARIANTS
577 	    trash_init, trash_fini,
578 #else
579 	    NULL, NULL,
580 #endif
581 	    nd_buf_import, nd_buf_release,
582 	    &nd_mbufq, UMA_ZONE_NOBUCKET);
583 
584 	nd_zone_clust = uma_zcache_create("netdump_" MBUF_CLUSTER_MEM_NAME,
585 	    clsize, mb_ctor_clust,
586 #ifdef INVARIANTS
587 	    trash_dtor, trash_init, trash_fini,
588 #else
589 	    NULL, NULL, NULL,
590 #endif
591 	    nd_buf_import, nd_buf_release,
592 	    &nd_clustq, UMA_ZONE_NOBUCKET);
593 
594 	nd_zone_pack = uma_zcache_create("netdump_" MBUF_PACKET_MEM_NAME,
595 	    MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
596 	    nd_pack_import, nd_pack_release,
597 	    NULL, UMA_ZONE_NOBUCKET);
598 
599 	while (nmbuf-- > 0) {
600 		m = m_get(MT_DATA, M_WAITOK);
601 		uma_zfree(nd_zone_mbuf, m);
602 	}
603 	while (nclust-- > 0) {
604 		item = uma_zalloc(m_getzone(nd_clsize), M_WAITOK);
605 		uma_zfree(nd_zone_clust, item);
606 	}
607 }
608 #endif /* NETDUMP */
609 
610 /*
611  * UMA backend page allocator for the jumbo frame zones.
612  *
613  * Allocates kernel virtual memory that is backed by contiguous physical
614  * pages.
615  */
616 static void *
617 mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
618     int wait)
619 {
620 
621 	/* Inform UMA that this allocator uses kernel_map/object. */
622 	*flags = UMA_SLAB_KERNEL;
623 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
624 	    bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
625 	    VM_MEMATTR_DEFAULT));
626 }
627 
628 /*
629  * Constructor for Mbuf master zone.
630  *
631  * The 'arg' pointer points to a mb_args structure which
632  * contains call-specific information required to support the
633  * mbuf allocation API.  See mbuf.h.
634  */
635 static int
636 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
637 {
638 	struct mbuf *m;
639 	struct mb_args *args;
640 	int error;
641 	int flags;
642 	short type;
643 
644 #ifdef INVARIANTS
645 	trash_ctor(mem, size, arg, how);
646 #endif
647 	args = (struct mb_args *)arg;
648 	type = args->type;
649 
650 	/*
651 	 * The mbuf is initialized later.  The caller has the
652 	 * responsibility to set up any MAC labels too.
653 	 */
654 	if (type == MT_NOINIT)
655 		return (0);
656 
657 	m = (struct mbuf *)mem;
658 	flags = args->flags;
659 	MPASS((flags & M_NOFREE) == 0);
660 
661 	error = m_init(m, how, type, flags);
662 
663 	return (error);
664 }
665 
666 /*
667  * The Mbuf master zone destructor.
668  */
669 static void
670 mb_dtor_mbuf(void *mem, int size, void *arg)
671 {
672 	struct mbuf *m;
673 	unsigned long flags;
674 
675 	m = (struct mbuf *)mem;
676 	flags = (unsigned long)arg;
677 
678 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
679 	if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
680 		m_tag_delete_chain(m, NULL);
681 #ifdef INVARIANTS
682 	trash_dtor(mem, size, arg);
683 #endif
684 }
685 
686 /*
687  * The Mbuf Packet zone destructor.
688  */
689 static void
690 mb_dtor_pack(void *mem, int size, void *arg)
691 {
692 	struct mbuf *m;
693 
694 	m = (struct mbuf *)mem;
695 	if ((m->m_flags & M_PKTHDR) != 0)
696 		m_tag_delete_chain(m, NULL);
697 
698 	/* Make sure we've got a clean cluster back. */
699 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
700 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
701 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
702 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
703 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
704 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
705 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
706 #ifdef INVARIANTS
707 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
708 #endif
709 	/*
710 	 * If there are processes blocked on zone_clust, waiting for pages
711 	 * to be freed up, * cause them to be woken up by draining the
712 	 * packet zone.  We are exposed to a race here * (in the check for
713 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
714 	 * is deliberate. We don't want to acquire the zone lock for every
715 	 * mbuf free.
716 	 */
717 	if (uma_zone_exhausted_nolock(zone_clust))
718 		zone_drain(zone_pack);
719 }
720 
721 /*
722  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
723  *
724  * Here the 'arg' pointer points to the Mbuf which we
725  * are configuring cluster storage for.  If 'arg' is
726  * empty we allocate just the cluster without setting
727  * the mbuf to it.  See mbuf.h.
728  */
729 static int
730 mb_ctor_clust(void *mem, int size, void *arg, int how)
731 {
732 	struct mbuf *m;
733 
734 #ifdef INVARIANTS
735 	trash_ctor(mem, size, arg, how);
736 #endif
737 	m = (struct mbuf *)arg;
738 	if (m != NULL) {
739 		m->m_ext.ext_buf = (char *)mem;
740 		m->m_data = m->m_ext.ext_buf;
741 		m->m_flags |= M_EXT;
742 		m->m_ext.ext_free = NULL;
743 		m->m_ext.ext_arg1 = NULL;
744 		m->m_ext.ext_arg2 = NULL;
745 		m->m_ext.ext_size = size;
746 		m->m_ext.ext_type = m_gettype(size);
747 		m->m_ext.ext_flags = EXT_FLAG_EMBREF;
748 		m->m_ext.ext_count = 1;
749 	}
750 
751 	return (0);
752 }
753 
754 /*
755  * The Packet secondary zone's init routine, executed on the
756  * object's transition from mbuf keg slab to zone cache.
757  */
758 static int
759 mb_zinit_pack(void *mem, int size, int how)
760 {
761 	struct mbuf *m;
762 
763 	m = (struct mbuf *)mem;		/* m is virgin. */
764 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
765 	    m->m_ext.ext_buf == NULL)
766 		return (ENOMEM);
767 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
768 #ifdef INVARIANTS
769 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
770 #endif
771 	return (0);
772 }
773 
774 /*
775  * The Packet secondary zone's fini routine, executed on the
776  * object's transition from zone cache to keg slab.
777  */
778 static void
779 mb_zfini_pack(void *mem, int size)
780 {
781 	struct mbuf *m;
782 
783 	m = (struct mbuf *)mem;
784 #ifdef INVARIANTS
785 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
786 #endif
787 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
788 #ifdef INVARIANTS
789 	trash_dtor(mem, size, NULL);
790 #endif
791 }
792 
793 /*
794  * The "packet" keg constructor.
795  */
796 static int
797 mb_ctor_pack(void *mem, int size, void *arg, int how)
798 {
799 	struct mbuf *m;
800 	struct mb_args *args;
801 	int error, flags;
802 	short type;
803 
804 	m = (struct mbuf *)mem;
805 	args = (struct mb_args *)arg;
806 	flags = args->flags;
807 	type = args->type;
808 	MPASS((flags & M_NOFREE) == 0);
809 
810 #ifdef INVARIANTS
811 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
812 #endif
813 
814 	error = m_init(m, how, type, flags);
815 
816 	/* m_ext is already initialized. */
817 	m->m_data = m->m_ext.ext_buf;
818  	m->m_flags = (flags | M_EXT);
819 
820 	return (error);
821 }
822 
823 /*
824  * This is the protocol drain routine.  Called by UMA whenever any of the
825  * mbuf zones is closed to its limit.
826  *
827  * No locks should be held when this is called.  The drain routines have to
828  * presently acquire some locks which raises the possibility of lock order
829  * reversal.
830  */
831 static void
832 mb_reclaim(uma_zone_t zone __unused, int pending __unused)
833 {
834 	struct domain *dp;
835 	struct protosw *pr;
836 
837 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__);
838 
839 	for (dp = domains; dp != NULL; dp = dp->dom_next)
840 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
841 			if (pr->pr_drain != NULL)
842 				(*pr->pr_drain)();
843 }
844 
845 /*
846  * Free "count" units of I/O from an mbuf chain.  They could be held
847  * in EXT_PGS or just as a normal mbuf.  This code is intended to be
848  * called in an error path (I/O error, closed connection, etc).
849  */
850 void
851 mb_free_notready(struct mbuf *m, int count)
852 {
853 	int i;
854 
855 	for (i = 0; i < count && m != NULL; i++) {
856 		if ((m->m_flags & M_EXT) != 0 &&
857 		    m->m_ext.ext_type == EXT_PGS) {
858 			m->m_ext.ext_pgs->nrdy--;
859 			if (m->m_ext.ext_pgs->nrdy != 0)
860 				continue;
861 		}
862 		m = m_free(m);
863 	}
864 	KASSERT(i == count, ("Removed only %d items from %p", i, m));
865 }
866 
867 /*
868  * Compress an unmapped mbuf into a simple mbuf when it holds a small
869  * amount of data.  This is used as a DOS defense to avoid having
870  * small packets tie up wired pages, an ext_pgs structure, and an
871  * mbuf.  Since this converts the existing mbuf in place, it can only
872  * be used if there are no other references to 'm'.
873  */
874 int
875 mb_unmapped_compress(struct mbuf *m)
876 {
877 	volatile u_int *refcnt;
878 	struct mbuf m_temp;
879 
880 	/*
881 	 * Assert that 'm' does not have a packet header.  If 'm' had
882 	 * a packet header, it would only be able to hold MHLEN bytes
883 	 * and m_data would have to be initialized differently.
884 	 */
885 	KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXT) &&
886 	    m->m_ext.ext_type == EXT_PGS,
887             ("%s: m %p !M_EXT or !EXT_PGS or M_PKTHDR", __func__, m));
888 	KASSERT(m->m_len <= MLEN, ("m_len too large %p", m));
889 
890 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
891 		refcnt = &m->m_ext.ext_count;
892 	} else {
893 		KASSERT(m->m_ext.ext_cnt != NULL,
894 		    ("%s: no refcounting pointer on %p", __func__, m));
895 		refcnt = m->m_ext.ext_cnt;
896 	}
897 
898 	if (*refcnt != 1)
899 		return (EBUSY);
900 
901 	/*
902 	 * Copy mbuf header and m_ext portion of 'm' to 'm_temp' to
903 	 * create a "fake" EXT_PGS mbuf that can be used with
904 	 * m_copydata() as well as the ext_free callback.
905 	 */
906 	memcpy(&m_temp, m, offsetof(struct mbuf, m_ext) + sizeof (m->m_ext));
907 	m_temp.m_next = NULL;
908 	m_temp.m_nextpkt = NULL;
909 
910 	/* Turn 'm' into a "normal" mbuf. */
911 	m->m_flags &= ~(M_EXT | M_RDONLY | M_NOMAP);
912 	m->m_data = m->m_dat;
913 
914 	/* Copy data from template's ext_pgs. */
915 	m_copydata(&m_temp, 0, m_temp.m_len, mtod(m, caddr_t));
916 
917 	/* Free the backing pages. */
918 	m_temp.m_ext.ext_free(&m_temp);
919 
920 	/* Finally, free the ext_pgs struct. */
921 	uma_zfree(zone_extpgs, m_temp.m_ext.ext_pgs);
922 	return (0);
923 }
924 
925 /*
926  * These next few routines are used to permit downgrading an unmapped
927  * mbuf to a chain of mapped mbufs.  This is used when an interface
928  * doesn't supported unmapped mbufs or if checksums need to be
929  * computed in software.
930  *
931  * Each unmapped mbuf is converted to a chain of mbufs.  First, any
932  * TLS header data is stored in a regular mbuf.  Second, each page of
933  * unmapped data is stored in an mbuf with an EXT_SFBUF external
934  * cluster.  These mbufs use an sf_buf to provide a valid KVA for the
935  * associated physical page.  They also hold a reference on the
936  * original EXT_PGS mbuf to ensure the physical page doesn't go away.
937  * Finally, any TLS trailer data is stored in a regular mbuf.
938  *
939  * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF
940  * mbufs.  It frees the associated sf_buf and releases its reference
941  * on the original EXT_PGS mbuf.
942  *
943  * _mb_unmapped_to_ext() is a helper function that converts a single
944  * unmapped mbuf into a chain of mbufs.
945  *
946  * mb_unmapped_to_ext() is the public function that walks an mbuf
947  * chain converting any unmapped mbufs to mapped mbufs.  It returns
948  * the new chain of unmapped mbufs on success.  On failure it frees
949  * the original mbuf chain and returns NULL.
950  */
951 static void
952 mb_unmapped_free_mext(struct mbuf *m)
953 {
954 	struct sf_buf *sf;
955 	struct mbuf *old_m;
956 
957 	sf = m->m_ext.ext_arg1;
958 	sf_buf_free(sf);
959 
960 	/* Drop the reference on the backing EXT_PGS mbuf. */
961 	old_m = m->m_ext.ext_arg2;
962 	mb_free_ext(old_m);
963 }
964 
965 static struct mbuf *
966 _mb_unmapped_to_ext(struct mbuf *m)
967 {
968 	struct mbuf_ext_pgs *ext_pgs;
969 	struct mbuf *m_new, *top, *prev, *mref;
970 	struct sf_buf *sf;
971 	vm_page_t pg;
972 	int i, len, off, pglen, pgoff, seglen, segoff;
973 	volatile u_int *refcnt;
974 	u_int ref_inc = 0;
975 
976 	MBUF_EXT_PGS_ASSERT(m);
977 	ext_pgs = m->m_ext.ext_pgs;
978 	len = m->m_len;
979 	KASSERT(ext_pgs->tls == NULL, ("%s: can't convert TLS mbuf %p",
980 	    __func__, m));
981 
982 	/* See if this is the mbuf that holds the embedded refcount. */
983 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
984 		refcnt = &m->m_ext.ext_count;
985 		mref = m;
986 	} else {
987 		KASSERT(m->m_ext.ext_cnt != NULL,
988 		    ("%s: no refcounting pointer on %p", __func__, m));
989 		refcnt = m->m_ext.ext_cnt;
990 		mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
991 	}
992 
993 	/* Skip over any data removed from the front. */
994 	off = mtod(m, vm_offset_t);
995 
996 	top = NULL;
997 	if (ext_pgs->hdr_len != 0) {
998 		if (off >= ext_pgs->hdr_len) {
999 			off -= ext_pgs->hdr_len;
1000 		} else {
1001 			seglen = ext_pgs->hdr_len - off;
1002 			segoff = off;
1003 			seglen = min(seglen, len);
1004 			off = 0;
1005 			len -= seglen;
1006 			m_new = m_get(M_NOWAIT, MT_DATA);
1007 			if (m_new == NULL)
1008 				goto fail;
1009 			m_new->m_len = seglen;
1010 			prev = top = m_new;
1011 			memcpy(mtod(m_new, void *), &ext_pgs->hdr[segoff],
1012 			    seglen);
1013 		}
1014 	}
1015 	pgoff = ext_pgs->first_pg_off;
1016 	for (i = 0; i < ext_pgs->npgs && len > 0; i++) {
1017 		pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff);
1018 		if (off >= pglen) {
1019 			off -= pglen;
1020 			pgoff = 0;
1021 			continue;
1022 		}
1023 		seglen = pglen - off;
1024 		segoff = pgoff + off;
1025 		off = 0;
1026 		seglen = min(seglen, len);
1027 		len -= seglen;
1028 
1029 		pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
1030 		m_new = m_get(M_NOWAIT, MT_DATA);
1031 		if (m_new == NULL)
1032 			goto fail;
1033 		if (top == NULL) {
1034 			top = prev = m_new;
1035 		} else {
1036 			prev->m_next = m_new;
1037 			prev = m_new;
1038 		}
1039 		sf = sf_buf_alloc(pg, SFB_NOWAIT);
1040 		if (sf == NULL)
1041 			goto fail;
1042 
1043 		ref_inc++;
1044 		m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE,
1045 		    mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF);
1046 		m_new->m_data += segoff;
1047 		m_new->m_len = seglen;
1048 
1049 		pgoff = 0;
1050 	};
1051 	if (len != 0) {
1052 		KASSERT((off + len) <= ext_pgs->trail_len,
1053 		    ("off + len > trail (%d + %d > %d)", off, len,
1054 		    ext_pgs->trail_len));
1055 		m_new = m_get(M_NOWAIT, MT_DATA);
1056 		if (m_new == NULL)
1057 			goto fail;
1058 		if (top == NULL)
1059 			top = m_new;
1060 		else
1061 			prev->m_next = m_new;
1062 		m_new->m_len = len;
1063 		memcpy(mtod(m_new, void *), &ext_pgs->trail[off], len);
1064 	}
1065 
1066 	if (ref_inc != 0) {
1067 		/*
1068 		 * Obtain an additional reference on the old mbuf for
1069 		 * each created EXT_SFBUF mbuf.  They will be dropped
1070 		 * in mb_unmapped_free_mext().
1071 		 */
1072 		if (*refcnt == 1)
1073 			*refcnt += ref_inc;
1074 		else
1075 			atomic_add_int(refcnt, ref_inc);
1076 	}
1077 	m_free(m);
1078 	return (top);
1079 
1080 fail:
1081 	if (ref_inc != 0) {
1082 		/*
1083 		 * Obtain an additional reference on the old mbuf for
1084 		 * each created EXT_SFBUF mbuf.  They will be
1085 		 * immediately dropped when these mbufs are freed
1086 		 * below.
1087 		 */
1088 		if (*refcnt == 1)
1089 			*refcnt += ref_inc;
1090 		else
1091 			atomic_add_int(refcnt, ref_inc);
1092 	}
1093 	m_free(m);
1094 	m_freem(top);
1095 	return (NULL);
1096 }
1097 
1098 struct mbuf *
1099 mb_unmapped_to_ext(struct mbuf *top)
1100 {
1101 	struct mbuf *m, *next, *prev = NULL;
1102 
1103 	prev = NULL;
1104 	for (m = top; m != NULL; m = next) {
1105 		/* m might be freed, so cache the next pointer. */
1106 		next = m->m_next;
1107 		if (m->m_flags & M_NOMAP) {
1108 			if (prev != NULL) {
1109 				/*
1110 				 * Remove 'm' from the new chain so
1111 				 * that the 'top' chain terminates
1112 				 * before 'm' in case 'top' is freed
1113 				 * due to an error.
1114 				 */
1115 				prev->m_next = NULL;
1116 			}
1117 			m = _mb_unmapped_to_ext(m);
1118 			if (m == NULL) {
1119 				m_freem(top);
1120 				m_freem(next);
1121 				return (NULL);
1122 			}
1123 			if (prev == NULL) {
1124 				top = m;
1125 			} else {
1126 				prev->m_next = m;
1127 			}
1128 
1129 			/*
1130 			 * Replaced one mbuf with a chain, so we must
1131 			 * find the end of chain.
1132 			 */
1133 			prev = m_last(m);
1134 		} else {
1135 			if (prev != NULL) {
1136 				prev->m_next = m;
1137 			}
1138 			prev = m;
1139 		}
1140 	}
1141 	return (top);
1142 }
1143 
1144 /*
1145  * Allocate an empty EXT_PGS mbuf.  The ext_free routine is
1146  * responsible for freeing any pages backing this mbuf when it is
1147  * freed.
1148  */
1149 struct mbuf *
1150 mb_alloc_ext_pgs(int how, bool pkthdr, m_ext_free_t ext_free)
1151 {
1152 	struct mbuf *m;
1153 	struct mbuf_ext_pgs *ext_pgs;
1154 
1155 	if (pkthdr)
1156 		m = m_gethdr(how, MT_DATA);
1157 	else
1158 		m = m_get(how, MT_DATA);
1159 	if (m == NULL)
1160 		return (NULL);
1161 
1162 	ext_pgs = uma_zalloc(zone_extpgs, how);
1163 	if (ext_pgs == NULL) {
1164 		m_free(m);
1165 		return (NULL);
1166 	}
1167 	ext_pgs->npgs = 0;
1168 	ext_pgs->nrdy = 0;
1169 	ext_pgs->first_pg_off = 0;
1170 	ext_pgs->last_pg_len = 0;
1171 	ext_pgs->hdr_len = 0;
1172 	ext_pgs->trail_len = 0;
1173 	ext_pgs->tls = NULL;
1174 	ext_pgs->so = NULL;
1175 	m->m_data = NULL;
1176 	m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
1177 	m->m_ext.ext_type = EXT_PGS;
1178 	m->m_ext.ext_flags = EXT_FLAG_EMBREF;
1179 	m->m_ext.ext_count = 1;
1180 	m->m_ext.ext_pgs = ext_pgs;
1181 	m->m_ext.ext_size = 0;
1182 	m->m_ext.ext_free = ext_free;
1183 	return (m);
1184 }
1185 
1186 #ifdef INVARIANT_SUPPORT
1187 void
1188 mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
1189 {
1190 
1191 	/*
1192 	 * NB: This expects a non-empty buffer (npgs > 0 and
1193 	 * last_pg_len > 0).
1194 	 */
1195 	KASSERT(ext_pgs->npgs > 0,
1196 	    ("ext_pgs with no valid pages: %p", ext_pgs));
1197 	KASSERT(ext_pgs->npgs <= nitems(ext_pgs->pa),
1198 	    ("ext_pgs with too many pages: %p", ext_pgs));
1199 	KASSERT(ext_pgs->nrdy <= ext_pgs->npgs,
1200 	    ("ext_pgs with too many ready pages: %p", ext_pgs));
1201 	KASSERT(ext_pgs->first_pg_off < PAGE_SIZE,
1202 	    ("ext_pgs with too large page offset: %p", ext_pgs));
1203 	KASSERT(ext_pgs->last_pg_len > 0,
1204 	    ("ext_pgs with zero last page length: %p", ext_pgs));
1205 	KASSERT(ext_pgs->last_pg_len <= PAGE_SIZE,
1206 	    ("ext_pgs with too large last page length: %p", ext_pgs));
1207 	if (ext_pgs->npgs == 1) {
1208 		KASSERT(ext_pgs->first_pg_off + ext_pgs->last_pg_len <=
1209 		    PAGE_SIZE, ("ext_pgs with single page too large: %p",
1210 		    ext_pgs));
1211 	}
1212 	KASSERT(ext_pgs->hdr_len <= sizeof(ext_pgs->hdr),
1213 	    ("ext_pgs with too large header length: %p", ext_pgs));
1214 	KASSERT(ext_pgs->trail_len <= sizeof(ext_pgs->trail),
1215 	    ("ext_pgs with too large header length: %p", ext_pgs));
1216 }
1217 #endif
1218 
1219 /*
1220  * Clean up after mbufs with M_EXT storage attached to them if the
1221  * reference count hits 1.
1222  */
1223 void
1224 mb_free_ext(struct mbuf *m)
1225 {
1226 	volatile u_int *refcnt;
1227 	struct mbuf *mref;
1228 	int freembuf;
1229 
1230 	KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
1231 
1232 	/* See if this is the mbuf that holds the embedded refcount. */
1233 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1234 		refcnt = &m->m_ext.ext_count;
1235 		mref = m;
1236 	} else {
1237 		KASSERT(m->m_ext.ext_cnt != NULL,
1238 		    ("%s: no refcounting pointer on %p", __func__, m));
1239 		refcnt = m->m_ext.ext_cnt;
1240 		mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1241 	}
1242 
1243 	/*
1244 	 * Check if the header is embedded in the cluster.  It is
1245 	 * important that we can't touch any of the mbuf fields
1246 	 * after we have freed the external storage, since mbuf
1247 	 * could have been embedded in it.  For now, the mbufs
1248 	 * embedded into the cluster are always of type EXT_EXTREF,
1249 	 * and for this type we won't free the mref.
1250 	 */
1251 	if (m->m_flags & M_NOFREE) {
1252 		freembuf = 0;
1253 		KASSERT(m->m_ext.ext_type == EXT_EXTREF ||
1254 		    m->m_ext.ext_type == EXT_RXRING,
1255 		    ("%s: no-free mbuf %p has wrong type", __func__, m));
1256 	} else
1257 		freembuf = 1;
1258 
1259 	/* Free attached storage if this mbuf is the only reference to it. */
1260 	if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1261 		switch (m->m_ext.ext_type) {
1262 		case EXT_PACKET:
1263 			/* The packet zone is special. */
1264 			if (*refcnt == 0)
1265 				*refcnt = 1;
1266 			uma_zfree(zone_pack, mref);
1267 			break;
1268 		case EXT_CLUSTER:
1269 			uma_zfree(zone_clust, m->m_ext.ext_buf);
1270 			uma_zfree(zone_mbuf, mref);
1271 			break;
1272 		case EXT_JUMBOP:
1273 			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
1274 			uma_zfree(zone_mbuf, mref);
1275 			break;
1276 		case EXT_JUMBO9:
1277 			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
1278 			uma_zfree(zone_mbuf, mref);
1279 			break;
1280 		case EXT_JUMBO16:
1281 			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
1282 			uma_zfree(zone_mbuf, mref);
1283 			break;
1284 		case EXT_PGS:
1285 			KASSERT(mref->m_ext.ext_free != NULL,
1286 			    ("%s: ext_free not set", __func__));
1287 			mref->m_ext.ext_free(mref);
1288 			uma_zfree(zone_extpgs, mref->m_ext.ext_pgs);
1289 			uma_zfree(zone_mbuf, mref);
1290 			break;
1291 		case EXT_SFBUF:
1292 		case EXT_NET_DRV:
1293 		case EXT_MOD_TYPE:
1294 		case EXT_DISPOSABLE:
1295 			KASSERT(mref->m_ext.ext_free != NULL,
1296 			    ("%s: ext_free not set", __func__));
1297 			mref->m_ext.ext_free(mref);
1298 			uma_zfree(zone_mbuf, mref);
1299 			break;
1300 		case EXT_EXTREF:
1301 			KASSERT(m->m_ext.ext_free != NULL,
1302 			    ("%s: ext_free not set", __func__));
1303 			m->m_ext.ext_free(m);
1304 			break;
1305 		case EXT_RXRING:
1306 			KASSERT(m->m_ext.ext_free == NULL,
1307 			    ("%s: ext_free is set", __func__));
1308 			break;
1309 		default:
1310 			KASSERT(m->m_ext.ext_type == 0,
1311 			    ("%s: unknown ext_type", __func__));
1312 		}
1313 	}
1314 
1315 	if (freembuf && m != mref)
1316 		uma_zfree(zone_mbuf, m);
1317 }
1318 
1319 /*
1320  * Official mbuf(9) allocation KPI for stack and drivers:
1321  *
1322  * m_get()	- a single mbuf without any attachments, sys/mbuf.h.
1323  * m_gethdr()	- a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1324  * m_getcl()	- an mbuf + 2k cluster, sys/mbuf.h.
1325  * m_clget()	- attach cluster to already allocated mbuf.
1326  * m_cljget()	- attach jumbo cluster to already allocated mbuf.
1327  * m_get2()	- allocate minimum mbuf that would fit size argument.
1328  * m_getm2()	- allocate a chain of mbufs/clusters.
1329  * m_extadd()	- attach external cluster to mbuf.
1330  *
1331  * m_free()	- free single mbuf with its tags and ext, sys/mbuf.h.
1332  * m_freem()	- free chain of mbufs.
1333  */
1334 
1335 int
1336 m_clget(struct mbuf *m, int how)
1337 {
1338 
1339 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1340 	    __func__, m));
1341 	m->m_ext.ext_buf = (char *)NULL;
1342 	uma_zalloc_arg(zone_clust, m, how);
1343 	/*
1344 	 * On a cluster allocation failure, drain the packet zone and retry,
1345 	 * we might be able to loosen a few clusters up on the drain.
1346 	 */
1347 	if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
1348 		zone_drain(zone_pack);
1349 		uma_zalloc_arg(zone_clust, m, how);
1350 	}
1351 	MBUF_PROBE2(m__clget, m, how);
1352 	return (m->m_flags & M_EXT);
1353 }
1354 
1355 /*
1356  * m_cljget() is different from m_clget() as it can allocate clusters without
1357  * attaching them to an mbuf.  In that case the return value is the pointer
1358  * to the cluster of the requested size.  If an mbuf was specified, it gets
1359  * the cluster attached to it and the return value can be safely ignored.
1360  * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1361  */
1362 void *
1363 m_cljget(struct mbuf *m, int how, int size)
1364 {
1365 	uma_zone_t zone;
1366 	void *retval;
1367 
1368 	if (m != NULL) {
1369 		KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1370 		    __func__, m));
1371 		m->m_ext.ext_buf = NULL;
1372 	}
1373 
1374 	zone = m_getzone(size);
1375 	retval = uma_zalloc_arg(zone, m, how);
1376 
1377 	MBUF_PROBE4(m__cljget, m, how, size, retval);
1378 
1379 	return (retval);
1380 }
1381 
1382 /*
1383  * m_get2() allocates minimum mbuf that would fit "size" argument.
1384  */
1385 struct mbuf *
1386 m_get2(int size, int how, short type, int flags)
1387 {
1388 	struct mb_args args;
1389 	struct mbuf *m, *n;
1390 
1391 	args.flags = flags;
1392 	args.type = type;
1393 
1394 	if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
1395 		return (uma_zalloc_arg(zone_mbuf, &args, how));
1396 	if (size <= MCLBYTES)
1397 		return (uma_zalloc_arg(zone_pack, &args, how));
1398 
1399 	if (size > MJUMPAGESIZE)
1400 		return (NULL);
1401 
1402 	m = uma_zalloc_arg(zone_mbuf, &args, how);
1403 	if (m == NULL)
1404 		return (NULL);
1405 
1406 	n = uma_zalloc_arg(zone_jumbop, m, how);
1407 	if (n == NULL) {
1408 		uma_zfree(zone_mbuf, m);
1409 		return (NULL);
1410 	}
1411 
1412 	return (m);
1413 }
1414 
1415 /*
1416  * m_getjcl() returns an mbuf with a cluster of the specified size attached.
1417  * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1418  */
1419 struct mbuf *
1420 m_getjcl(int how, short type, int flags, int size)
1421 {
1422 	struct mb_args args;
1423 	struct mbuf *m, *n;
1424 	uma_zone_t zone;
1425 
1426 	if (size == MCLBYTES)
1427 		return m_getcl(how, type, flags);
1428 
1429 	args.flags = flags;
1430 	args.type = type;
1431 
1432 	m = uma_zalloc_arg(zone_mbuf, &args, how);
1433 	if (m == NULL)
1434 		return (NULL);
1435 
1436 	zone = m_getzone(size);
1437 	n = uma_zalloc_arg(zone, m, how);
1438 	if (n == NULL) {
1439 		uma_zfree(zone_mbuf, m);
1440 		return (NULL);
1441 	}
1442 	return (m);
1443 }
1444 
1445 /*
1446  * Allocate a given length worth of mbufs and/or clusters (whatever fits
1447  * best) and return a pointer to the top of the allocated chain.  If an
1448  * existing mbuf chain is provided, then we will append the new chain
1449  * to the existing one and return a pointer to the provided mbuf.
1450  */
1451 struct mbuf *
1452 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
1453 {
1454 	struct mbuf *mb, *nm = NULL, *mtail = NULL;
1455 
1456 	KASSERT(len >= 0, ("%s: len is < 0", __func__));
1457 
1458 	/* Validate flags. */
1459 	flags &= (M_PKTHDR | M_EOR);
1460 
1461 	/* Packet header mbuf must be first in chain. */
1462 	if ((flags & M_PKTHDR) && m != NULL)
1463 		flags &= ~M_PKTHDR;
1464 
1465 	/* Loop and append maximum sized mbufs to the chain tail. */
1466 	while (len > 0) {
1467 		if (len > MCLBYTES)
1468 			mb = m_getjcl(how, type, (flags & M_PKTHDR),
1469 			    MJUMPAGESIZE);
1470 		else if (len >= MINCLSIZE)
1471 			mb = m_getcl(how, type, (flags & M_PKTHDR));
1472 		else if (flags & M_PKTHDR)
1473 			mb = m_gethdr(how, type);
1474 		else
1475 			mb = m_get(how, type);
1476 
1477 		/* Fail the whole operation if one mbuf can't be allocated. */
1478 		if (mb == NULL) {
1479 			if (nm != NULL)
1480 				m_freem(nm);
1481 			return (NULL);
1482 		}
1483 
1484 		/* Book keeping. */
1485 		len -= M_SIZE(mb);
1486 		if (mtail != NULL)
1487 			mtail->m_next = mb;
1488 		else
1489 			nm = mb;
1490 		mtail = mb;
1491 		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
1492 	}
1493 	if (flags & M_EOR)
1494 		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
1495 
1496 	/* If mbuf was supplied, append new chain to the end of it. */
1497 	if (m != NULL) {
1498 		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
1499 			;
1500 		mtail->m_next = nm;
1501 		mtail->m_flags &= ~M_EOR;
1502 	} else
1503 		m = nm;
1504 
1505 	return (m);
1506 }
1507 
1508 /*-
1509  * Configure a provided mbuf to refer to the provided external storage
1510  * buffer and setup a reference count for said buffer.
1511  *
1512  * Arguments:
1513  *    mb     The existing mbuf to which to attach the provided buffer.
1514  *    buf    The address of the provided external storage buffer.
1515  *    size   The size of the provided buffer.
1516  *    freef  A pointer to a routine that is responsible for freeing the
1517  *           provided external storage buffer.
1518  *    args   A pointer to an argument structure (of any type) to be passed
1519  *           to the provided freef routine (may be NULL).
1520  *    flags  Any other flags to be passed to the provided mbuf.
1521  *    type   The type that the external storage buffer should be
1522  *           labeled with.
1523  *
1524  * Returns:
1525  *    Nothing.
1526  */
1527 void
1528 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef,
1529     void *arg1, void *arg2, int flags, int type)
1530 {
1531 
1532 	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
1533 
1534 	mb->m_flags |= (M_EXT | flags);
1535 	mb->m_ext.ext_buf = buf;
1536 	mb->m_data = mb->m_ext.ext_buf;
1537 	mb->m_ext.ext_size = size;
1538 	mb->m_ext.ext_free = freef;
1539 	mb->m_ext.ext_arg1 = arg1;
1540 	mb->m_ext.ext_arg2 = arg2;
1541 	mb->m_ext.ext_type = type;
1542 
1543 	if (type != EXT_EXTREF) {
1544 		mb->m_ext.ext_count = 1;
1545 		mb->m_ext.ext_flags = EXT_FLAG_EMBREF;
1546 	} else
1547 		mb->m_ext.ext_flags = 0;
1548 }
1549 
1550 /*
1551  * Free an entire chain of mbufs and associated external buffers, if
1552  * applicable.
1553  */
1554 void
1555 m_freem(struct mbuf *mb)
1556 {
1557 
1558 	MBUF_PROBE1(m__freem, mb);
1559 	while (mb != NULL)
1560 		mb = m_free(mb);
1561 }
1562 
1563 void
1564 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp)
1565 {
1566 
1567 	if_ref(ifp);
1568 	mst->ifp = ifp;
1569 	refcount_init(&mst->refcount, 1);
1570 	counter_u64_add(snd_tag_count, 1);
1571 }
1572 
1573 void
1574 m_snd_tag_destroy(struct m_snd_tag *mst)
1575 {
1576 	struct ifnet *ifp;
1577 
1578 	ifp = mst->ifp;
1579 	ifp->if_snd_tag_free(mst);
1580 	if_rele(ifp);
1581 	counter_u64_add(snd_tag_count, -1);
1582 }
1583