xref: /freebsd/sys/kern/kern_mbuf.c (revision 48c779cdecb5f803e5fe5d761987e976ca9609db)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004, 2005,
5  *	Bosko Milekic <bmilekic@FreeBSD.org>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_param.h"
34 #include "opt_kern_tls.h"
35 
36 #include <sys/param.h>
37 #include <sys/conf.h>
38 #include <sys/domainset.h>
39 #include <sys/malloc.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/domain.h>
43 #include <sys/eventhandler.h>
44 #include <sys/kernel.h>
45 #include <sys/ktls.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/protosw.h>
50 #include <sys/refcount.h>
51 #include <sys/sf_buf.h>
52 #include <sys/smp.h>
53 #include <sys/socket.h>
54 #include <sys/sysctl.h>
55 
56 #include <net/if.h>
57 #include <net/if_var.h>
58 
59 #include <vm/vm.h>
60 #include <vm/vm_extern.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_map.h>
64 #include <vm/uma.h>
65 #include <vm/uma_dbg.h>
66 
67 /*
68  * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
69  * Zones.
70  *
71  * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
72  * Zone.  The Zone can be capped at kern.ipc.nmbclusters, if the
73  * administrator so desires.
74  *
75  * Mbufs are allocated from a UMA Master Zone called the Mbuf
76  * Zone.
77  *
78  * Additionally, FreeBSD provides a Packet Zone, which it
79  * configures as a Secondary Zone to the Mbuf Master Zone,
80  * thus sharing backend Slab kegs with the Mbuf Master Zone.
81  *
82  * Thus common-case allocations and locking are simplified:
83  *
84  *  m_clget()                m_getcl()
85  *    |                         |
86  *    |   .------------>[(Packet Cache)]    m_get(), m_gethdr()
87  *    |   |             [     Packet   ]            |
88  *  [(Cluster Cache)]   [    Secondary ]   [ (Mbuf Cache)     ]
89  *  [ Cluster Zone  ]   [     Zone     ]   [ Mbuf Master Zone ]
90  *        |                       \________         |
91  *  [ Cluster Keg   ]                      \       /
92  *        |	                         [ Mbuf Keg   ]
93  *  [ Cluster Slabs ]                         |
94  *        |                              [ Mbuf Slabs ]
95  *         \____________(VM)_________________/
96  *
97  *
98  * Whenever an object is allocated with uma_zalloc() out of
99  * one of the Zones its _ctor_ function is executed.  The same
100  * for any deallocation through uma_zfree() the _dtor_ function
101  * is executed.
102  *
103  * Caches are per-CPU and are filled from the Master Zone.
104  *
105  * Whenever an object is allocated from the underlying global
106  * memory pool it gets pre-initialized with the _zinit_ functions.
107  * When the Keg's are overfull objects get decommissioned with
108  * _zfini_ functions and free'd back to the global memory pool.
109  *
110  */
111 
112 int nmbufs;			/* limits number of mbufs */
113 int nmbclusters;		/* limits number of mbuf clusters */
114 int nmbjumbop;			/* limits number of page size jumbo clusters */
115 int nmbjumbo9;			/* limits number of 9k jumbo clusters */
116 int nmbjumbo16;			/* limits number of 16k jumbo clusters */
117 
118 bool mb_use_ext_pgs;		/* use EXT_PGS mbufs for sendfile & TLS */
119 SYSCTL_BOOL(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLFLAG_RWTUN,
120     &mb_use_ext_pgs, 0,
121     "Use unmapped mbufs for sendfile(2) and TLS offload");
122 
123 static quad_t maxmbufmem;	/* overall real memory limit for all mbufs */
124 
125 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0,
126     "Maximum real memory allocatable to various mbuf types");
127 
128 static counter_u64_t snd_tag_count;
129 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW,
130     &snd_tag_count, "# of active mbuf send tags");
131 
132 /*
133  * tunable_mbinit() has to be run before any mbuf allocations are done.
134  */
135 static void
136 tunable_mbinit(void *dummy)
137 {
138 	quad_t realmem;
139 
140 	/*
141 	 * The default limit for all mbuf related memory is 1/2 of all
142 	 * available kernel memory (physical or kmem).
143 	 * At most it can be 3/4 of available kernel memory.
144 	 */
145 	realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
146 	maxmbufmem = realmem / 2;
147 	TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
148 	if (maxmbufmem > realmem / 4 * 3)
149 		maxmbufmem = realmem / 4 * 3;
150 
151 	TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
152 	if (nmbclusters == 0)
153 		nmbclusters = maxmbufmem / MCLBYTES / 4;
154 
155 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
156 	if (nmbjumbop == 0)
157 		nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
158 
159 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
160 	if (nmbjumbo9 == 0)
161 		nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
162 
163 	TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
164 	if (nmbjumbo16 == 0)
165 		nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
166 
167 	/*
168 	 * We need at least as many mbufs as we have clusters of
169 	 * the various types added together.
170 	 */
171 	TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
172 	if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
173 		nmbufs = lmax(maxmbufmem / MSIZE / 5,
174 		    nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
175 }
176 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
177 
178 static int
179 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
180 {
181 	int error, newnmbclusters;
182 
183 	newnmbclusters = nmbclusters;
184 	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
185 	if (error == 0 && req->newptr && newnmbclusters != nmbclusters) {
186 		if (newnmbclusters > nmbclusters &&
187 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
188 			nmbclusters = newnmbclusters;
189 			nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
190 			EVENTHANDLER_INVOKE(nmbclusters_change);
191 		} else
192 			error = EINVAL;
193 	}
194 	return (error);
195 }
196 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
197 &nmbclusters, 0, sysctl_nmbclusters, "IU",
198     "Maximum number of mbuf clusters allowed");
199 
200 static int
201 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
202 {
203 	int error, newnmbjumbop;
204 
205 	newnmbjumbop = nmbjumbop;
206 	error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
207 	if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) {
208 		if (newnmbjumbop > nmbjumbop &&
209 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
210 			nmbjumbop = newnmbjumbop;
211 			nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
212 		} else
213 			error = EINVAL;
214 	}
215 	return (error);
216 }
217 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
218 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
219     "Maximum number of mbuf page size jumbo clusters allowed");
220 
221 static int
222 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
223 {
224 	int error, newnmbjumbo9;
225 
226 	newnmbjumbo9 = nmbjumbo9;
227 	error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
228 	if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) {
229 		if (newnmbjumbo9 > nmbjumbo9 &&
230 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
231 			nmbjumbo9 = newnmbjumbo9;
232 			nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
233 		} else
234 			error = EINVAL;
235 	}
236 	return (error);
237 }
238 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
239 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
240     "Maximum number of mbuf 9k jumbo clusters allowed");
241 
242 static int
243 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
244 {
245 	int error, newnmbjumbo16;
246 
247 	newnmbjumbo16 = nmbjumbo16;
248 	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
249 	if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
250 		if (newnmbjumbo16 > nmbjumbo16 &&
251 		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
252 			nmbjumbo16 = newnmbjumbo16;
253 			nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
254 		} else
255 			error = EINVAL;
256 	}
257 	return (error);
258 }
259 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
260 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
261     "Maximum number of mbuf 16k jumbo clusters allowed");
262 
263 static int
264 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
265 {
266 	int error, newnmbufs;
267 
268 	newnmbufs = nmbufs;
269 	error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
270 	if (error == 0 && req->newptr && newnmbufs != nmbufs) {
271 		if (newnmbufs > nmbufs) {
272 			nmbufs = newnmbufs;
273 			nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
274 			EVENTHANDLER_INVOKE(nmbufs_change);
275 		} else
276 			error = EINVAL;
277 	}
278 	return (error);
279 }
280 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW,
281 &nmbufs, 0, sysctl_nmbufs, "IU",
282     "Maximum number of mbufs allowed");
283 
284 /*
285  * Zones from which we allocate.
286  */
287 uma_zone_t	zone_mbuf;
288 uma_zone_t	zone_clust;
289 uma_zone_t	zone_pack;
290 uma_zone_t	zone_jumbop;
291 uma_zone_t	zone_jumbo9;
292 uma_zone_t	zone_jumbo16;
293 uma_zone_t	zone_extpgs;
294 
295 /*
296  * Local prototypes.
297  */
298 static int	mb_ctor_mbuf(void *, int, void *, int);
299 static int	mb_ctor_clust(void *, int, void *, int);
300 static int	mb_ctor_pack(void *, int, void *, int);
301 static void	mb_dtor_mbuf(void *, int, void *);
302 static void	mb_dtor_pack(void *, int, void *);
303 static int	mb_zinit_pack(void *, int, int);
304 static void	mb_zfini_pack(void *, int);
305 static void	mb_reclaim(uma_zone_t, int);
306 static void    *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
307 
308 /* Ensure that MSIZE is a power of 2. */
309 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
310 
311 _Static_assert(sizeof(struct mbuf_ext_pgs) == 256,
312     "mbuf_ext_pgs size mismatch");
313 
314 /*
315  * Initialize FreeBSD Network buffer allocation.
316  */
317 static void
318 mbuf_init(void *dummy)
319 {
320 
321 	/*
322 	 * Configure UMA zones for Mbufs, Clusters, and Packets.
323 	 */
324 	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
325 	    mb_ctor_mbuf, mb_dtor_mbuf,
326 #ifdef INVARIANTS
327 	    trash_init, trash_fini,
328 #else
329 	    NULL, NULL,
330 #endif
331 	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
332 	if (nmbufs > 0)
333 		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
334 	uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
335 	uma_zone_set_maxaction(zone_mbuf, mb_reclaim);
336 
337 	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
338 	    mb_ctor_clust,
339 #ifdef INVARIANTS
340 	    trash_dtor, trash_init, trash_fini,
341 #else
342 	    NULL, NULL, NULL,
343 #endif
344 	    UMA_ALIGN_PTR, 0);
345 	if (nmbclusters > 0)
346 		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
347 	uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
348 	uma_zone_set_maxaction(zone_clust, mb_reclaim);
349 
350 	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
351 	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
352 
353 	/* Make jumbo frame zone too. Page size, 9k and 16k. */
354 	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
355 	    mb_ctor_clust,
356 #ifdef INVARIANTS
357 	    trash_dtor, trash_init, trash_fini,
358 #else
359 	    NULL, NULL, NULL,
360 #endif
361 	    UMA_ALIGN_PTR, 0);
362 	if (nmbjumbop > 0)
363 		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
364 	uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
365 	uma_zone_set_maxaction(zone_jumbop, mb_reclaim);
366 
367 	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
368 	    mb_ctor_clust,
369 #ifdef INVARIANTS
370 	    trash_dtor, trash_init, trash_fini,
371 #else
372 	    NULL, NULL, NULL,
373 #endif
374 	    UMA_ALIGN_PTR, 0);
375 	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
376 	if (nmbjumbo9 > 0)
377 		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
378 	uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
379 	uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);
380 
381 	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
382 	    mb_ctor_clust,
383 #ifdef INVARIANTS
384 	    trash_dtor, trash_init, trash_fini,
385 #else
386 	    NULL, NULL, NULL,
387 #endif
388 	    UMA_ALIGN_PTR, 0);
389 	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
390 	if (nmbjumbo16 > 0)
391 		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
392 	uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
393 	uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);
394 
395 	zone_extpgs = uma_zcreate(MBUF_EXTPGS_MEM_NAME,
396 	    sizeof(struct mbuf_ext_pgs),
397 #ifdef INVARIANTS
398 	    trash_ctor, trash_dtor, trash_init, trash_fini,
399 #else
400 	    NULL, NULL, NULL, NULL,
401 #endif
402 	    UMA_ALIGN_CACHE, 0);
403 
404 	/*
405 	 * Hook event handler for low-memory situation, used to
406 	 * drain protocols and push data back to the caches (UMA
407 	 * later pushes it back to VM).
408 	 */
409 	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
410 	    EVENTHANDLER_PRI_FIRST);
411 
412 	snd_tag_count = counter_u64_alloc(M_WAITOK);
413 }
414 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
415 
416 #ifdef DEBUGNET
417 /*
418  * debugnet makes use of a pre-allocated pool of mbufs and clusters.  When
419  * debugnet is configured, we initialize a set of UMA cache zones which return
420  * items from this pool.  At panic-time, the regular UMA zone pointers are
421  * overwritten with those of the cache zones so that drivers may allocate and
422  * free mbufs and clusters without attempting to allocate physical memory.
423  *
424  * We keep mbufs and clusters in a pair of mbuf queues.  In particular, for
425  * the purpose of caching clusters, we treat them as mbufs.
426  */
427 static struct mbufq dn_mbufq =
428     { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX };
429 static struct mbufq dn_clustq =
430     { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX };
431 
432 static int dn_clsize;
433 static uma_zone_t dn_zone_mbuf;
434 static uma_zone_t dn_zone_clust;
435 static uma_zone_t dn_zone_pack;
436 
437 static struct debugnet_saved_zones {
438 	uma_zone_t dsz_mbuf;
439 	uma_zone_t dsz_clust;
440 	uma_zone_t dsz_pack;
441 	uma_zone_t dsz_jumbop;
442 	uma_zone_t dsz_jumbo9;
443 	uma_zone_t dsz_jumbo16;
444 	bool dsz_debugnet_zones_enabled;
445 } dn_saved_zones;
446 
447 static int
448 dn_buf_import(void *arg, void **store, int count, int domain __unused,
449     int flags)
450 {
451 	struct mbufq *q;
452 	struct mbuf *m;
453 	int i;
454 
455 	q = arg;
456 
457 	for (i = 0; i < count; i++) {
458 		m = mbufq_dequeue(q);
459 		if (m == NULL)
460 			break;
461 		trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags);
462 		store[i] = m;
463 	}
464 	KASSERT((flags & M_WAITOK) == 0 || i == count,
465 	    ("%s: ran out of pre-allocated mbufs", __func__));
466 	return (i);
467 }
468 
469 static void
470 dn_buf_release(void *arg, void **store, int count)
471 {
472 	struct mbufq *q;
473 	struct mbuf *m;
474 	int i;
475 
476 	q = arg;
477 
478 	for (i = 0; i < count; i++) {
479 		m = store[i];
480 		(void)mbufq_enqueue(q, m);
481 	}
482 }
483 
484 static int
485 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused,
486     int flags __unused)
487 {
488 	struct mbuf *m;
489 	void *clust;
490 	int i;
491 
492 	for (i = 0; i < count; i++) {
493 		m = m_get(MT_DATA, M_NOWAIT);
494 		if (m == NULL)
495 			break;
496 		clust = uma_zalloc(dn_zone_clust, M_NOWAIT);
497 		if (clust == NULL) {
498 			m_free(m);
499 			break;
500 		}
501 		mb_ctor_clust(clust, dn_clsize, m, 0);
502 		store[i] = m;
503 	}
504 	KASSERT((flags & M_WAITOK) == 0 || i == count,
505 	    ("%s: ran out of pre-allocated mbufs", __func__));
506 	return (i);
507 }
508 
509 static void
510 dn_pack_release(void *arg __unused, void **store, int count)
511 {
512 	struct mbuf *m;
513 	void *clust;
514 	int i;
515 
516 	for (i = 0; i < count; i++) {
517 		m = store[i];
518 		clust = m->m_ext.ext_buf;
519 		uma_zfree(dn_zone_clust, clust);
520 		uma_zfree(dn_zone_mbuf, m);
521 	}
522 }
523 
524 /*
525  * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy
526  * the corresponding UMA cache zones.
527  */
528 void
529 debugnet_mbuf_drain(void)
530 {
531 	struct mbuf *m;
532 	void *item;
533 
534 	if (dn_zone_mbuf != NULL) {
535 		uma_zdestroy(dn_zone_mbuf);
536 		dn_zone_mbuf = NULL;
537 	}
538 	if (dn_zone_clust != NULL) {
539 		uma_zdestroy(dn_zone_clust);
540 		dn_zone_clust = NULL;
541 	}
542 	if (dn_zone_pack != NULL) {
543 		uma_zdestroy(dn_zone_pack);
544 		dn_zone_pack = NULL;
545 	}
546 
547 	while ((m = mbufq_dequeue(&dn_mbufq)) != NULL)
548 		m_free(m);
549 	while ((item = mbufq_dequeue(&dn_clustq)) != NULL)
550 		uma_zfree(m_getzone(dn_clsize), item);
551 }
552 
553 /*
554  * Callback invoked immediately prior to starting a debugnet connection.
555  */
556 void
557 debugnet_mbuf_start(void)
558 {
559 
560 	MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled);
561 
562 	/* Save the old zone pointers to restore when debugnet is closed. */
563 	dn_saved_zones = (struct debugnet_saved_zones) {
564 		.dsz_debugnet_zones_enabled = true,
565 		.dsz_mbuf = zone_mbuf,
566 		.dsz_clust = zone_clust,
567 		.dsz_pack = zone_pack,
568 		.dsz_jumbop = zone_jumbop,
569 		.dsz_jumbo9 = zone_jumbo9,
570 		.dsz_jumbo16 = zone_jumbo16,
571 	};
572 
573 	/*
574 	 * All cluster zones return buffers of the size requested by the
575 	 * drivers.  It's up to the driver to reinitialize the zones if the
576 	 * MTU of a debugnet-enabled interface changes.
577 	 */
578 	printf("debugnet: overwriting mbuf zone pointers\n");
579 	zone_mbuf = dn_zone_mbuf;
580 	zone_clust = dn_zone_clust;
581 	zone_pack = dn_zone_pack;
582 	zone_jumbop = dn_zone_clust;
583 	zone_jumbo9 = dn_zone_clust;
584 	zone_jumbo16 = dn_zone_clust;
585 }
586 
587 /*
588  * Callback invoked when a debugnet connection is closed/finished.
589  */
590 void
591 debugnet_mbuf_finish(void)
592 {
593 
594 	MPASS(dn_saved_zones.dsz_debugnet_zones_enabled);
595 
596 	printf("debugnet: restoring mbuf zone pointers\n");
597 	zone_mbuf = dn_saved_zones.dsz_mbuf;
598 	zone_clust = dn_saved_zones.dsz_clust;
599 	zone_pack = dn_saved_zones.dsz_pack;
600 	zone_jumbop = dn_saved_zones.dsz_jumbop;
601 	zone_jumbo9 = dn_saved_zones.dsz_jumbo9;
602 	zone_jumbo16 = dn_saved_zones.dsz_jumbo16;
603 
604 	memset(&dn_saved_zones, 0, sizeof(dn_saved_zones));
605 }
606 
607 /*
608  * Reinitialize the debugnet mbuf+cluster pool and cache zones.
609  */
610 void
611 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize)
612 {
613 	struct mbuf *m;
614 	void *item;
615 
616 	debugnet_mbuf_drain();
617 
618 	dn_clsize = clsize;
619 
620 	dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME,
621 	    MSIZE, mb_ctor_mbuf, mb_dtor_mbuf,
622 #ifdef INVARIANTS
623 	    trash_init, trash_fini,
624 #else
625 	    NULL, NULL,
626 #endif
627 	    dn_buf_import, dn_buf_release,
628 	    &dn_mbufq, UMA_ZONE_NOBUCKET);
629 
630 	dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME,
631 	    clsize, mb_ctor_clust,
632 #ifdef INVARIANTS
633 	    trash_dtor, trash_init, trash_fini,
634 #else
635 	    NULL, NULL, NULL,
636 #endif
637 	    dn_buf_import, dn_buf_release,
638 	    &dn_clustq, UMA_ZONE_NOBUCKET);
639 
640 	dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME,
641 	    MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
642 	    dn_pack_import, dn_pack_release,
643 	    NULL, UMA_ZONE_NOBUCKET);
644 
645 	while (nmbuf-- > 0) {
646 		m = m_get(MT_DATA, M_WAITOK);
647 		uma_zfree(dn_zone_mbuf, m);
648 	}
649 	while (nclust-- > 0) {
650 		item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK);
651 		uma_zfree(dn_zone_clust, item);
652 	}
653 }
654 #endif /* DEBUGNET */
655 
656 /*
657  * UMA backend page allocator for the jumbo frame zones.
658  *
659  * Allocates kernel virtual memory that is backed by contiguous physical
660  * pages.
661  */
662 static void *
663 mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
664     int wait)
665 {
666 
667 	/* Inform UMA that this allocator uses kernel_map/object. */
668 	*flags = UMA_SLAB_KERNEL;
669 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
670 	    bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
671 	    VM_MEMATTR_DEFAULT));
672 }
673 
674 /*
675  * Constructor for Mbuf master zone.
676  *
677  * The 'arg' pointer points to a mb_args structure which
678  * contains call-specific information required to support the
679  * mbuf allocation API.  See mbuf.h.
680  */
681 static int
682 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
683 {
684 	struct mbuf *m;
685 	struct mb_args *args;
686 	int error;
687 	int flags;
688 	short type;
689 
690 #ifdef INVARIANTS
691 	trash_ctor(mem, size, arg, how);
692 #endif
693 	args = (struct mb_args *)arg;
694 	type = args->type;
695 
696 	/*
697 	 * The mbuf is initialized later.  The caller has the
698 	 * responsibility to set up any MAC labels too.
699 	 */
700 	if (type == MT_NOINIT)
701 		return (0);
702 
703 	m = (struct mbuf *)mem;
704 	flags = args->flags;
705 	MPASS((flags & M_NOFREE) == 0);
706 
707 	error = m_init(m, how, type, flags);
708 
709 	return (error);
710 }
711 
712 /*
713  * The Mbuf master zone destructor.
714  */
715 static void
716 mb_dtor_mbuf(void *mem, int size, void *arg)
717 {
718 	struct mbuf *m;
719 	unsigned long flags;
720 
721 	m = (struct mbuf *)mem;
722 	flags = (unsigned long)arg;
723 
724 	KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
725 	if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
726 		m_tag_delete_chain(m, NULL);
727 #ifdef INVARIANTS
728 	trash_dtor(mem, size, arg);
729 #endif
730 }
731 
732 /*
733  * The Mbuf Packet zone destructor.
734  */
735 static void
736 mb_dtor_pack(void *mem, int size, void *arg)
737 {
738 	struct mbuf *m;
739 
740 	m = (struct mbuf *)mem;
741 	if ((m->m_flags & M_PKTHDR) != 0)
742 		m_tag_delete_chain(m, NULL);
743 
744 	/* Make sure we've got a clean cluster back. */
745 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
746 	KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
747 	KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
748 	KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
749 	KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
750 	KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
751 	KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
752 #ifdef INVARIANTS
753 	trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
754 #endif
755 	/*
756 	 * If there are processes blocked on zone_clust, waiting for pages
757 	 * to be freed up, cause them to be woken up by draining the
758 	 * packet zone.  We are exposed to a race here (in the check for
759 	 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
760 	 * is deliberate. We don't want to acquire the zone lock for every
761 	 * mbuf free.
762 	 */
763 	if (uma_zone_exhausted_nolock(zone_clust))
764 		uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
765 }
766 
767 /*
768  * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
769  *
770  * Here the 'arg' pointer points to the Mbuf which we
771  * are configuring cluster storage for.  If 'arg' is
772  * empty we allocate just the cluster without setting
773  * the mbuf to it.  See mbuf.h.
774  */
775 static int
776 mb_ctor_clust(void *mem, int size, void *arg, int how)
777 {
778 	struct mbuf *m;
779 
780 #ifdef INVARIANTS
781 	trash_ctor(mem, size, arg, how);
782 #endif
783 	m = (struct mbuf *)arg;
784 	if (m != NULL) {
785 		m->m_ext.ext_buf = (char *)mem;
786 		m->m_data = m->m_ext.ext_buf;
787 		m->m_flags |= M_EXT;
788 		m->m_ext.ext_free = NULL;
789 		m->m_ext.ext_arg1 = NULL;
790 		m->m_ext.ext_arg2 = NULL;
791 		m->m_ext.ext_size = size;
792 		m->m_ext.ext_type = m_gettype(size);
793 		m->m_ext.ext_flags = EXT_FLAG_EMBREF;
794 		m->m_ext.ext_count = 1;
795 	}
796 
797 	return (0);
798 }
799 
800 /*
801  * The Packet secondary zone's init routine, executed on the
802  * object's transition from mbuf keg slab to zone cache.
803  */
804 static int
805 mb_zinit_pack(void *mem, int size, int how)
806 {
807 	struct mbuf *m;
808 
809 	m = (struct mbuf *)mem;		/* m is virgin. */
810 	if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
811 	    m->m_ext.ext_buf == NULL)
812 		return (ENOMEM);
813 	m->m_ext.ext_type = EXT_PACKET;	/* Override. */
814 #ifdef INVARIANTS
815 	trash_init(m->m_ext.ext_buf, MCLBYTES, how);
816 #endif
817 	return (0);
818 }
819 
820 /*
821  * The Packet secondary zone's fini routine, executed on the
822  * object's transition from zone cache to keg slab.
823  */
824 static void
825 mb_zfini_pack(void *mem, int size)
826 {
827 	struct mbuf *m;
828 
829 	m = (struct mbuf *)mem;
830 #ifdef INVARIANTS
831 	trash_fini(m->m_ext.ext_buf, MCLBYTES);
832 #endif
833 	uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
834 #ifdef INVARIANTS
835 	trash_dtor(mem, size, NULL);
836 #endif
837 }
838 
839 /*
840  * The "packet" keg constructor.
841  */
842 static int
843 mb_ctor_pack(void *mem, int size, void *arg, int how)
844 {
845 	struct mbuf *m;
846 	struct mb_args *args;
847 	int error, flags;
848 	short type;
849 
850 	m = (struct mbuf *)mem;
851 	args = (struct mb_args *)arg;
852 	flags = args->flags;
853 	type = args->type;
854 	MPASS((flags & M_NOFREE) == 0);
855 
856 #ifdef INVARIANTS
857 	trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
858 #endif
859 
860 	error = m_init(m, how, type, flags);
861 
862 	/* m_ext is already initialized. */
863 	m->m_data = m->m_ext.ext_buf;
864  	m->m_flags = (flags | M_EXT);
865 
866 	return (error);
867 }
868 
869 /*
870  * This is the protocol drain routine.  Called by UMA whenever any of the
871  * mbuf zones is closed to its limit.
872  *
873  * No locks should be held when this is called.  The drain routines have to
874  * presently acquire some locks which raises the possibility of lock order
875  * reversal.
876  */
877 static void
878 mb_reclaim(uma_zone_t zone __unused, int pending __unused)
879 {
880 	struct domain *dp;
881 	struct protosw *pr;
882 
883 	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__);
884 
885 	for (dp = domains; dp != NULL; dp = dp->dom_next)
886 		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
887 			if (pr->pr_drain != NULL)
888 				(*pr->pr_drain)();
889 }
890 
891 /*
892  * Free "count" units of I/O from an mbuf chain.  They could be held
893  * in EXT_PGS or just as a normal mbuf.  This code is intended to be
894  * called in an error path (I/O error, closed connection, etc).
895  */
896 void
897 mb_free_notready(struct mbuf *m, int count)
898 {
899 	int i;
900 
901 	for (i = 0; i < count && m != NULL; i++) {
902 		if ((m->m_flags & M_EXT) != 0 &&
903 		    m->m_ext.ext_type == EXT_PGS) {
904 			m->m_ext.ext_pgs->nrdy--;
905 			if (m->m_ext.ext_pgs->nrdy != 0)
906 				continue;
907 		}
908 		m = m_free(m);
909 	}
910 	KASSERT(i == count, ("Removed only %d items from %p", i, m));
911 }
912 
913 /*
914  * Compress an unmapped mbuf into a simple mbuf when it holds a small
915  * amount of data.  This is used as a DOS defense to avoid having
916  * small packets tie up wired pages, an ext_pgs structure, and an
917  * mbuf.  Since this converts the existing mbuf in place, it can only
918  * be used if there are no other references to 'm'.
919  */
920 int
921 mb_unmapped_compress(struct mbuf *m)
922 {
923 	volatile u_int *refcnt;
924 	struct mbuf m_temp;
925 
926 	/*
927 	 * Assert that 'm' does not have a packet header.  If 'm' had
928 	 * a packet header, it would only be able to hold MHLEN bytes
929 	 * and m_data would have to be initialized differently.
930 	 */
931 	KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXT) &&
932 	    m->m_ext.ext_type == EXT_PGS,
933             ("%s: m %p !M_EXT or !EXT_PGS or M_PKTHDR", __func__, m));
934 	KASSERT(m->m_len <= MLEN, ("m_len too large %p", m));
935 
936 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
937 		refcnt = &m->m_ext.ext_count;
938 	} else {
939 		KASSERT(m->m_ext.ext_cnt != NULL,
940 		    ("%s: no refcounting pointer on %p", __func__, m));
941 		refcnt = m->m_ext.ext_cnt;
942 	}
943 
944 	if (*refcnt != 1)
945 		return (EBUSY);
946 
947 	/*
948 	 * Copy mbuf header and m_ext portion of 'm' to 'm_temp' to
949 	 * create a "fake" EXT_PGS mbuf that can be used with
950 	 * m_copydata() as well as the ext_free callback.
951 	 */
952 	memcpy(&m_temp, m, offsetof(struct mbuf, m_ext) + sizeof (m->m_ext));
953 	m_temp.m_next = NULL;
954 	m_temp.m_nextpkt = NULL;
955 
956 	/* Turn 'm' into a "normal" mbuf. */
957 	m->m_flags &= ~(M_EXT | M_RDONLY | M_NOMAP);
958 	m->m_data = m->m_dat;
959 
960 	/* Copy data from template's ext_pgs. */
961 	m_copydata(&m_temp, 0, m_temp.m_len, mtod(m, caddr_t));
962 
963 	/* Free the backing pages. */
964 	m_temp.m_ext.ext_free(&m_temp);
965 
966 	/* Finally, free the ext_pgs struct. */
967 	uma_zfree(zone_extpgs, m_temp.m_ext.ext_pgs);
968 	return (0);
969 }
970 
971 /*
972  * These next few routines are used to permit downgrading an unmapped
973  * mbuf to a chain of mapped mbufs.  This is used when an interface
974  * doesn't supported unmapped mbufs or if checksums need to be
975  * computed in software.
976  *
977  * Each unmapped mbuf is converted to a chain of mbufs.  First, any
978  * TLS header data is stored in a regular mbuf.  Second, each page of
979  * unmapped data is stored in an mbuf with an EXT_SFBUF external
980  * cluster.  These mbufs use an sf_buf to provide a valid KVA for the
981  * associated physical page.  They also hold a reference on the
982  * original EXT_PGS mbuf to ensure the physical page doesn't go away.
983  * Finally, any TLS trailer data is stored in a regular mbuf.
984  *
985  * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF
986  * mbufs.  It frees the associated sf_buf and releases its reference
987  * on the original EXT_PGS mbuf.
988  *
989  * _mb_unmapped_to_ext() is a helper function that converts a single
990  * unmapped mbuf into a chain of mbufs.
991  *
992  * mb_unmapped_to_ext() is the public function that walks an mbuf
993  * chain converting any unmapped mbufs to mapped mbufs.  It returns
994  * the new chain of unmapped mbufs on success.  On failure it frees
995  * the original mbuf chain and returns NULL.
996  */
997 static void
998 mb_unmapped_free_mext(struct mbuf *m)
999 {
1000 	struct sf_buf *sf;
1001 	struct mbuf *old_m;
1002 
1003 	sf = m->m_ext.ext_arg1;
1004 	sf_buf_free(sf);
1005 
1006 	/* Drop the reference on the backing EXT_PGS mbuf. */
1007 	old_m = m->m_ext.ext_arg2;
1008 	mb_free_ext(old_m);
1009 }
1010 
1011 static struct mbuf *
1012 _mb_unmapped_to_ext(struct mbuf *m)
1013 {
1014 	struct mbuf_ext_pgs *ext_pgs;
1015 	struct mbuf *m_new, *top, *prev, *mref;
1016 	struct sf_buf *sf;
1017 	vm_page_t pg;
1018 	int i, len, off, pglen, pgoff, seglen, segoff;
1019 	volatile u_int *refcnt;
1020 	u_int ref_inc = 0;
1021 
1022 	MBUF_EXT_PGS_ASSERT(m);
1023 	ext_pgs = m->m_ext.ext_pgs;
1024 	len = m->m_len;
1025 	KASSERT(ext_pgs->tls == NULL, ("%s: can't convert TLS mbuf %p",
1026 	    __func__, m));
1027 
1028 	/* See if this is the mbuf that holds the embedded refcount. */
1029 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1030 		refcnt = &m->m_ext.ext_count;
1031 		mref = m;
1032 	} else {
1033 		KASSERT(m->m_ext.ext_cnt != NULL,
1034 		    ("%s: no refcounting pointer on %p", __func__, m));
1035 		refcnt = m->m_ext.ext_cnt;
1036 		mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1037 	}
1038 
1039 	/* Skip over any data removed from the front. */
1040 	off = mtod(m, vm_offset_t);
1041 
1042 	top = NULL;
1043 	if (ext_pgs->hdr_len != 0) {
1044 		if (off >= ext_pgs->hdr_len) {
1045 			off -= ext_pgs->hdr_len;
1046 		} else {
1047 			seglen = ext_pgs->hdr_len - off;
1048 			segoff = off;
1049 			seglen = min(seglen, len);
1050 			off = 0;
1051 			len -= seglen;
1052 			m_new = m_get(M_NOWAIT, MT_DATA);
1053 			if (m_new == NULL)
1054 				goto fail;
1055 			m_new->m_len = seglen;
1056 			prev = top = m_new;
1057 			memcpy(mtod(m_new, void *), &ext_pgs->hdr[segoff],
1058 			    seglen);
1059 		}
1060 	}
1061 	pgoff = ext_pgs->first_pg_off;
1062 	for (i = 0; i < ext_pgs->npgs && len > 0; i++) {
1063 		pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff);
1064 		if (off >= pglen) {
1065 			off -= pglen;
1066 			pgoff = 0;
1067 			continue;
1068 		}
1069 		seglen = pglen - off;
1070 		segoff = pgoff + off;
1071 		off = 0;
1072 		seglen = min(seglen, len);
1073 		len -= seglen;
1074 
1075 		pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
1076 		m_new = m_get(M_NOWAIT, MT_DATA);
1077 		if (m_new == NULL)
1078 			goto fail;
1079 		if (top == NULL) {
1080 			top = prev = m_new;
1081 		} else {
1082 			prev->m_next = m_new;
1083 			prev = m_new;
1084 		}
1085 		sf = sf_buf_alloc(pg, SFB_NOWAIT);
1086 		if (sf == NULL)
1087 			goto fail;
1088 
1089 		ref_inc++;
1090 		m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE,
1091 		    mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF);
1092 		m_new->m_data += segoff;
1093 		m_new->m_len = seglen;
1094 
1095 		pgoff = 0;
1096 	};
1097 	if (len != 0) {
1098 		KASSERT((off + len) <= ext_pgs->trail_len,
1099 		    ("off + len > trail (%d + %d > %d)", off, len,
1100 		    ext_pgs->trail_len));
1101 		m_new = m_get(M_NOWAIT, MT_DATA);
1102 		if (m_new == NULL)
1103 			goto fail;
1104 		if (top == NULL)
1105 			top = m_new;
1106 		else
1107 			prev->m_next = m_new;
1108 		m_new->m_len = len;
1109 		memcpy(mtod(m_new, void *), &ext_pgs->trail[off], len);
1110 	}
1111 
1112 	if (ref_inc != 0) {
1113 		/*
1114 		 * Obtain an additional reference on the old mbuf for
1115 		 * each created EXT_SFBUF mbuf.  They will be dropped
1116 		 * in mb_unmapped_free_mext().
1117 		 */
1118 		if (*refcnt == 1)
1119 			*refcnt += ref_inc;
1120 		else
1121 			atomic_add_int(refcnt, ref_inc);
1122 	}
1123 	m_free(m);
1124 	return (top);
1125 
1126 fail:
1127 	if (ref_inc != 0) {
1128 		/*
1129 		 * Obtain an additional reference on the old mbuf for
1130 		 * each created EXT_SFBUF mbuf.  They will be
1131 		 * immediately dropped when these mbufs are freed
1132 		 * below.
1133 		 */
1134 		if (*refcnt == 1)
1135 			*refcnt += ref_inc;
1136 		else
1137 			atomic_add_int(refcnt, ref_inc);
1138 	}
1139 	m_free(m);
1140 	m_freem(top);
1141 	return (NULL);
1142 }
1143 
1144 struct mbuf *
1145 mb_unmapped_to_ext(struct mbuf *top)
1146 {
1147 	struct mbuf *m, *next, *prev = NULL;
1148 
1149 	prev = NULL;
1150 	for (m = top; m != NULL; m = next) {
1151 		/* m might be freed, so cache the next pointer. */
1152 		next = m->m_next;
1153 		if (m->m_flags & M_NOMAP) {
1154 			if (prev != NULL) {
1155 				/*
1156 				 * Remove 'm' from the new chain so
1157 				 * that the 'top' chain terminates
1158 				 * before 'm' in case 'top' is freed
1159 				 * due to an error.
1160 				 */
1161 				prev->m_next = NULL;
1162 			}
1163 			m = _mb_unmapped_to_ext(m);
1164 			if (m == NULL) {
1165 				m_freem(top);
1166 				m_freem(next);
1167 				return (NULL);
1168 			}
1169 			if (prev == NULL) {
1170 				top = m;
1171 			} else {
1172 				prev->m_next = m;
1173 			}
1174 
1175 			/*
1176 			 * Replaced one mbuf with a chain, so we must
1177 			 * find the end of chain.
1178 			 */
1179 			prev = m_last(m);
1180 		} else {
1181 			if (prev != NULL) {
1182 				prev->m_next = m;
1183 			}
1184 			prev = m;
1185 		}
1186 	}
1187 	return (top);
1188 }
1189 
1190 /*
1191  * Allocate an empty EXT_PGS mbuf.  The ext_free routine is
1192  * responsible for freeing any pages backing this mbuf when it is
1193  * freed.
1194  */
1195 struct mbuf *
1196 mb_alloc_ext_pgs(int how, bool pkthdr, m_ext_free_t ext_free)
1197 {
1198 	struct mbuf *m;
1199 	struct mbuf_ext_pgs *ext_pgs;
1200 
1201 	if (pkthdr)
1202 		m = m_gethdr(how, MT_DATA);
1203 	else
1204 		m = m_get(how, MT_DATA);
1205 	if (m == NULL)
1206 		return (NULL);
1207 
1208 	ext_pgs = uma_zalloc(zone_extpgs, how);
1209 	if (ext_pgs == NULL) {
1210 		m_free(m);
1211 		return (NULL);
1212 	}
1213 	ext_pgs->npgs = 0;
1214 	ext_pgs->nrdy = 0;
1215 	ext_pgs->first_pg_off = 0;
1216 	ext_pgs->last_pg_len = 0;
1217 	ext_pgs->flags = 0;
1218 	ext_pgs->hdr_len = 0;
1219 	ext_pgs->trail_len = 0;
1220 	ext_pgs->tls = NULL;
1221 	ext_pgs->so = NULL;
1222 	m->m_data = NULL;
1223 	m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
1224 	m->m_ext.ext_type = EXT_PGS;
1225 	m->m_ext.ext_flags = EXT_FLAG_EMBREF;
1226 	m->m_ext.ext_count = 1;
1227 	m->m_ext.ext_pgs = ext_pgs;
1228 	m->m_ext.ext_size = 0;
1229 	m->m_ext.ext_free = ext_free;
1230 	return (m);
1231 }
1232 
1233 #ifdef INVARIANT_SUPPORT
1234 void
1235 mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs)
1236 {
1237 
1238 	/*
1239 	 * NB: This expects a non-empty buffer (npgs > 0 and
1240 	 * last_pg_len > 0).
1241 	 */
1242 	KASSERT(ext_pgs->npgs > 0,
1243 	    ("ext_pgs with no valid pages: %p", ext_pgs));
1244 	KASSERT(ext_pgs->npgs <= nitems(ext_pgs->pa),
1245 	    ("ext_pgs with too many pages: %p", ext_pgs));
1246 	KASSERT(ext_pgs->nrdy <= ext_pgs->npgs,
1247 	    ("ext_pgs with too many ready pages: %p", ext_pgs));
1248 	KASSERT(ext_pgs->first_pg_off < PAGE_SIZE,
1249 	    ("ext_pgs with too large page offset: %p", ext_pgs));
1250 	KASSERT(ext_pgs->last_pg_len > 0,
1251 	    ("ext_pgs with zero last page length: %p", ext_pgs));
1252 	KASSERT(ext_pgs->last_pg_len <= PAGE_SIZE,
1253 	    ("ext_pgs with too large last page length: %p", ext_pgs));
1254 	if (ext_pgs->npgs == 1) {
1255 		KASSERT(ext_pgs->first_pg_off + ext_pgs->last_pg_len <=
1256 		    PAGE_SIZE, ("ext_pgs with single page too large: %p",
1257 		    ext_pgs));
1258 	}
1259 	KASSERT(ext_pgs->hdr_len <= sizeof(ext_pgs->hdr),
1260 	    ("ext_pgs with too large header length: %p", ext_pgs));
1261 	KASSERT(ext_pgs->trail_len <= sizeof(ext_pgs->trail),
1262 	    ("ext_pgs with too large header length: %p", ext_pgs));
1263 }
1264 #endif
1265 
1266 /*
1267  * Clean up after mbufs with M_EXT storage attached to them if the
1268  * reference count hits 1.
1269  */
1270 void
1271 mb_free_ext(struct mbuf *m)
1272 {
1273 	volatile u_int *refcnt;
1274 	struct mbuf *mref;
1275 	int freembuf;
1276 
1277 	KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
1278 
1279 	/* See if this is the mbuf that holds the embedded refcount. */
1280 	if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1281 		refcnt = &m->m_ext.ext_count;
1282 		mref = m;
1283 	} else {
1284 		KASSERT(m->m_ext.ext_cnt != NULL,
1285 		    ("%s: no refcounting pointer on %p", __func__, m));
1286 		refcnt = m->m_ext.ext_cnt;
1287 		mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1288 	}
1289 
1290 	/*
1291 	 * Check if the header is embedded in the cluster.  It is
1292 	 * important that we can't touch any of the mbuf fields
1293 	 * after we have freed the external storage, since mbuf
1294 	 * could have been embedded in it.  For now, the mbufs
1295 	 * embedded into the cluster are always of type EXT_EXTREF,
1296 	 * and for this type we won't free the mref.
1297 	 */
1298 	if (m->m_flags & M_NOFREE) {
1299 		freembuf = 0;
1300 		KASSERT(m->m_ext.ext_type == EXT_EXTREF ||
1301 		    m->m_ext.ext_type == EXT_RXRING,
1302 		    ("%s: no-free mbuf %p has wrong type", __func__, m));
1303 	} else
1304 		freembuf = 1;
1305 
1306 	/* Free attached storage if this mbuf is the only reference to it. */
1307 	if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1308 		switch (m->m_ext.ext_type) {
1309 		case EXT_PACKET:
1310 			/* The packet zone is special. */
1311 			if (*refcnt == 0)
1312 				*refcnt = 1;
1313 			uma_zfree(zone_pack, mref);
1314 			break;
1315 		case EXT_CLUSTER:
1316 			uma_zfree(zone_clust, m->m_ext.ext_buf);
1317 			uma_zfree(zone_mbuf, mref);
1318 			break;
1319 		case EXT_JUMBOP:
1320 			uma_zfree(zone_jumbop, m->m_ext.ext_buf);
1321 			uma_zfree(zone_mbuf, mref);
1322 			break;
1323 		case EXT_JUMBO9:
1324 			uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
1325 			uma_zfree(zone_mbuf, mref);
1326 			break;
1327 		case EXT_JUMBO16:
1328 			uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
1329 			uma_zfree(zone_mbuf, mref);
1330 			break;
1331 		case EXT_PGS: {
1332 #ifdef KERN_TLS
1333 			struct mbuf_ext_pgs *pgs;
1334 			struct ktls_session *tls;
1335 #endif
1336 
1337 			KASSERT(mref->m_ext.ext_free != NULL,
1338 			    ("%s: ext_free not set", __func__));
1339 			mref->m_ext.ext_free(mref);
1340 #ifdef KERN_TLS
1341 			pgs = mref->m_ext.ext_pgs;
1342 			tls = pgs->tls;
1343 			if (tls != NULL &&
1344 			    !refcount_release_if_not_last(&tls->refcount))
1345 				ktls_enqueue_to_free(pgs);
1346 			else
1347 #endif
1348 				uma_zfree(zone_extpgs, mref->m_ext.ext_pgs);
1349 			uma_zfree(zone_mbuf, mref);
1350 			break;
1351 		}
1352 		case EXT_SFBUF:
1353 		case EXT_NET_DRV:
1354 		case EXT_MOD_TYPE:
1355 		case EXT_DISPOSABLE:
1356 			KASSERT(mref->m_ext.ext_free != NULL,
1357 			    ("%s: ext_free not set", __func__));
1358 			mref->m_ext.ext_free(mref);
1359 			uma_zfree(zone_mbuf, mref);
1360 			break;
1361 		case EXT_EXTREF:
1362 			KASSERT(m->m_ext.ext_free != NULL,
1363 			    ("%s: ext_free not set", __func__));
1364 			m->m_ext.ext_free(m);
1365 			break;
1366 		case EXT_RXRING:
1367 			KASSERT(m->m_ext.ext_free == NULL,
1368 			    ("%s: ext_free is set", __func__));
1369 			break;
1370 		default:
1371 			KASSERT(m->m_ext.ext_type == 0,
1372 			    ("%s: unknown ext_type", __func__));
1373 		}
1374 	}
1375 
1376 	if (freembuf && m != mref)
1377 		uma_zfree(zone_mbuf, m);
1378 }
1379 
1380 /*
1381  * Official mbuf(9) allocation KPI for stack and drivers:
1382  *
1383  * m_get()	- a single mbuf without any attachments, sys/mbuf.h.
1384  * m_gethdr()	- a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1385  * m_getcl()	- an mbuf + 2k cluster, sys/mbuf.h.
1386  * m_clget()	- attach cluster to already allocated mbuf.
1387  * m_cljget()	- attach jumbo cluster to already allocated mbuf.
1388  * m_get2()	- allocate minimum mbuf that would fit size argument.
1389  * m_getm2()	- allocate a chain of mbufs/clusters.
1390  * m_extadd()	- attach external cluster to mbuf.
1391  *
1392  * m_free()	- free single mbuf with its tags and ext, sys/mbuf.h.
1393  * m_freem()	- free chain of mbufs.
1394  */
1395 
1396 int
1397 m_clget(struct mbuf *m, int how)
1398 {
1399 
1400 	KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1401 	    __func__, m));
1402 	m->m_ext.ext_buf = (char *)NULL;
1403 	uma_zalloc_arg(zone_clust, m, how);
1404 	/*
1405 	 * On a cluster allocation failure, drain the packet zone and retry,
1406 	 * we might be able to loosen a few clusters up on the drain.
1407 	 */
1408 	if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
1409 		uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
1410 		uma_zalloc_arg(zone_clust, m, how);
1411 	}
1412 	MBUF_PROBE2(m__clget, m, how);
1413 	return (m->m_flags & M_EXT);
1414 }
1415 
1416 /*
1417  * m_cljget() is different from m_clget() as it can allocate clusters without
1418  * attaching them to an mbuf.  In that case the return value is the pointer
1419  * to the cluster of the requested size.  If an mbuf was specified, it gets
1420  * the cluster attached to it and the return value can be safely ignored.
1421  * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1422  */
1423 void *
1424 m_cljget(struct mbuf *m, int how, int size)
1425 {
1426 	uma_zone_t zone;
1427 	void *retval;
1428 
1429 	if (m != NULL) {
1430 		KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1431 		    __func__, m));
1432 		m->m_ext.ext_buf = NULL;
1433 	}
1434 
1435 	zone = m_getzone(size);
1436 	retval = uma_zalloc_arg(zone, m, how);
1437 
1438 	MBUF_PROBE4(m__cljget, m, how, size, retval);
1439 
1440 	return (retval);
1441 }
1442 
1443 /*
1444  * m_get2() allocates minimum mbuf that would fit "size" argument.
1445  */
1446 struct mbuf *
1447 m_get2(int size, int how, short type, int flags)
1448 {
1449 	struct mb_args args;
1450 	struct mbuf *m, *n;
1451 
1452 	args.flags = flags;
1453 	args.type = type;
1454 
1455 	if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
1456 		return (uma_zalloc_arg(zone_mbuf, &args, how));
1457 	if (size <= MCLBYTES)
1458 		return (uma_zalloc_arg(zone_pack, &args, how));
1459 
1460 	if (size > MJUMPAGESIZE)
1461 		return (NULL);
1462 
1463 	m = uma_zalloc_arg(zone_mbuf, &args, how);
1464 	if (m == NULL)
1465 		return (NULL);
1466 
1467 	n = uma_zalloc_arg(zone_jumbop, m, how);
1468 	if (n == NULL) {
1469 		uma_zfree(zone_mbuf, m);
1470 		return (NULL);
1471 	}
1472 
1473 	return (m);
1474 }
1475 
1476 /*
1477  * m_getjcl() returns an mbuf with a cluster of the specified size attached.
1478  * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1479  */
1480 struct mbuf *
1481 m_getjcl(int how, short type, int flags, int size)
1482 {
1483 	struct mb_args args;
1484 	struct mbuf *m, *n;
1485 	uma_zone_t zone;
1486 
1487 	if (size == MCLBYTES)
1488 		return m_getcl(how, type, flags);
1489 
1490 	args.flags = flags;
1491 	args.type = type;
1492 
1493 	m = uma_zalloc_arg(zone_mbuf, &args, how);
1494 	if (m == NULL)
1495 		return (NULL);
1496 
1497 	zone = m_getzone(size);
1498 	n = uma_zalloc_arg(zone, m, how);
1499 	if (n == NULL) {
1500 		uma_zfree(zone_mbuf, m);
1501 		return (NULL);
1502 	}
1503 	return (m);
1504 }
1505 
1506 /*
1507  * Allocate a given length worth of mbufs and/or clusters (whatever fits
1508  * best) and return a pointer to the top of the allocated chain.  If an
1509  * existing mbuf chain is provided, then we will append the new chain
1510  * to the existing one and return a pointer to the provided mbuf.
1511  */
1512 struct mbuf *
1513 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
1514 {
1515 	struct mbuf *mb, *nm = NULL, *mtail = NULL;
1516 
1517 	KASSERT(len >= 0, ("%s: len is < 0", __func__));
1518 
1519 	/* Validate flags. */
1520 	flags &= (M_PKTHDR | M_EOR);
1521 
1522 	/* Packet header mbuf must be first in chain. */
1523 	if ((flags & M_PKTHDR) && m != NULL)
1524 		flags &= ~M_PKTHDR;
1525 
1526 	/* Loop and append maximum sized mbufs to the chain tail. */
1527 	while (len > 0) {
1528 		if (len > MCLBYTES)
1529 			mb = m_getjcl(how, type, (flags & M_PKTHDR),
1530 			    MJUMPAGESIZE);
1531 		else if (len >= MINCLSIZE)
1532 			mb = m_getcl(how, type, (flags & M_PKTHDR));
1533 		else if (flags & M_PKTHDR)
1534 			mb = m_gethdr(how, type);
1535 		else
1536 			mb = m_get(how, type);
1537 
1538 		/* Fail the whole operation if one mbuf can't be allocated. */
1539 		if (mb == NULL) {
1540 			if (nm != NULL)
1541 				m_freem(nm);
1542 			return (NULL);
1543 		}
1544 
1545 		/* Book keeping. */
1546 		len -= M_SIZE(mb);
1547 		if (mtail != NULL)
1548 			mtail->m_next = mb;
1549 		else
1550 			nm = mb;
1551 		mtail = mb;
1552 		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
1553 	}
1554 	if (flags & M_EOR)
1555 		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
1556 
1557 	/* If mbuf was supplied, append new chain to the end of it. */
1558 	if (m != NULL) {
1559 		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
1560 			;
1561 		mtail->m_next = nm;
1562 		mtail->m_flags &= ~M_EOR;
1563 	} else
1564 		m = nm;
1565 
1566 	return (m);
1567 }
1568 
1569 /*-
1570  * Configure a provided mbuf to refer to the provided external storage
1571  * buffer and setup a reference count for said buffer.
1572  *
1573  * Arguments:
1574  *    mb     The existing mbuf to which to attach the provided buffer.
1575  *    buf    The address of the provided external storage buffer.
1576  *    size   The size of the provided buffer.
1577  *    freef  A pointer to a routine that is responsible for freeing the
1578  *           provided external storage buffer.
1579  *    args   A pointer to an argument structure (of any type) to be passed
1580  *           to the provided freef routine (may be NULL).
1581  *    flags  Any other flags to be passed to the provided mbuf.
1582  *    type   The type that the external storage buffer should be
1583  *           labeled with.
1584  *
1585  * Returns:
1586  *    Nothing.
1587  */
1588 void
1589 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef,
1590     void *arg1, void *arg2, int flags, int type)
1591 {
1592 
1593 	KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
1594 
1595 	mb->m_flags |= (M_EXT | flags);
1596 	mb->m_ext.ext_buf = buf;
1597 	mb->m_data = mb->m_ext.ext_buf;
1598 	mb->m_ext.ext_size = size;
1599 	mb->m_ext.ext_free = freef;
1600 	mb->m_ext.ext_arg1 = arg1;
1601 	mb->m_ext.ext_arg2 = arg2;
1602 	mb->m_ext.ext_type = type;
1603 
1604 	if (type != EXT_EXTREF) {
1605 		mb->m_ext.ext_count = 1;
1606 		mb->m_ext.ext_flags = EXT_FLAG_EMBREF;
1607 	} else
1608 		mb->m_ext.ext_flags = 0;
1609 }
1610 
1611 /*
1612  * Free an entire chain of mbufs and associated external buffers, if
1613  * applicable.
1614  */
1615 void
1616 m_freem(struct mbuf *mb)
1617 {
1618 
1619 	MBUF_PROBE1(m__freem, mb);
1620 	while (mb != NULL)
1621 		mb = m_free(mb);
1622 }
1623 
1624 void
1625 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp)
1626 {
1627 
1628 	if_ref(ifp);
1629 	mst->ifp = ifp;
1630 	refcount_init(&mst->refcount, 1);
1631 	counter_u64_add(snd_tag_count, 1);
1632 }
1633 
1634 void
1635 m_snd_tag_destroy(struct m_snd_tag *mst)
1636 {
1637 	struct ifnet *ifp;
1638 
1639 	ifp = mst->ifp;
1640 	ifp->if_snd_tag_free(mst);
1641 	if_rele(ifp);
1642 	counter_u64_add(snd_tag_count, -1);
1643 }
1644