1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2004, 2005,
5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_param.h"
32 #include "opt_kern_tls.h"
33
34 #include <sys/param.h>
35 #include <sys/conf.h>
36 #include <sys/domainset.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 #include <sys/mbuf.h>
40 #include <sys/eventhandler.h>
41 #include <sys/kernel.h>
42 #include <sys/ktls.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/refcount.h>
47 #include <sys/sf_buf.h>
48 #include <sys/smp.h>
49 #include <sys/socket.h>
50 #include <sys/sysctl.h>
51
52 #include <net/if.h>
53 #include <net/if_var.h>
54
55 #include <vm/vm.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_map.h>
61 #include <vm/uma.h>
62 #include <vm/uma_dbg.h>
63
64 _Static_assert(MJUMPAGESIZE > MCLBYTES,
65 "Cluster must be smaller than a jumbo page");
66
67 /*
68 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
69 * Zones.
70 *
71 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
72 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
73 * administrator so desires.
74 *
75 * Mbufs are allocated from a UMA Primary Zone called the Mbuf
76 * Zone.
77 *
78 * Additionally, FreeBSD provides a Packet Zone, which it
79 * configures as a Secondary Zone to the Mbuf Primary Zone,
80 * thus sharing backend Slab kegs with the Mbuf Primary Zone.
81 *
82 * Thus common-case allocations and locking are simplified:
83 *
84 * m_clget() m_getcl()
85 * | |
86 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
87 * | | [ Packet ] |
88 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
89 * [ Cluster Zone ] [ Zone ] [ Mbuf Primary Zone ]
90 * | \________ |
91 * [ Cluster Keg ] \ /
92 * | [ Mbuf Keg ]
93 * [ Cluster Slabs ] |
94 * | [ Mbuf Slabs ]
95 * \____________(VM)_________________/
96 *
97 *
98 * Whenever an object is allocated with uma_zalloc() out of
99 * one of the Zones its _ctor_ function is executed. The same
100 * for any deallocation through uma_zfree() the _dtor_ function
101 * is executed.
102 *
103 * Caches are per-CPU and are filled from the Primary Zone.
104 *
105 * Whenever an object is allocated from the underlying global
106 * memory pool it gets pre-initialized with the _zinit_ functions.
107 * When the Keg's are overfull objects get decommissioned with
108 * _zfini_ functions and free'd back to the global memory pool.
109 *
110 */
111
112 int nmbufs; /* limits number of mbufs */
113 int nmbclusters; /* limits number of mbuf clusters */
114 int nmbjumbop; /* limits number of page size jumbo clusters */
115 int nmbjumbo9; /* limits number of 9k jumbo clusters */
116 int nmbjumbo16; /* limits number of 16k jumbo clusters */
117
118 bool mb_use_ext_pgs = false; /* use M_EXTPG mbufs for sendfile & TLS */
119
120 static int
sysctl_mb_use_ext_pgs(SYSCTL_HANDLER_ARGS)121 sysctl_mb_use_ext_pgs(SYSCTL_HANDLER_ARGS)
122 {
123 int error, extpg;
124
125 extpg = mb_use_ext_pgs;
126 error = sysctl_handle_int(oidp, &extpg, 0, req);
127 if (error == 0 && req->newptr != NULL) {
128 if (extpg != 0 && !PMAP_HAS_DMAP)
129 error = EOPNOTSUPP;
130 else
131 mb_use_ext_pgs = extpg != 0;
132 }
133 return (error);
134 }
135 SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_use_ext_pgs,
136 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
137 &mb_use_ext_pgs, 0, sysctl_mb_use_ext_pgs, "IU",
138 "Use unmapped mbufs for sendfile(2) and TLS offload");
139
140 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */
141
142 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0,
143 "Maximum real memory allocatable to various mbuf types");
144
145 static counter_u64_t snd_tag_count;
146 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW,
147 &snd_tag_count, "# of active mbuf send tags");
148
149 /*
150 * tunable_mbinit() has to be run before any mbuf allocations are done.
151 */
152 static void
tunable_mbinit(void * dummy)153 tunable_mbinit(void *dummy)
154 {
155 quad_t realmem;
156 int extpg;
157
158 /*
159 * The default limit for all mbuf related memory is 1/2 of all
160 * available kernel memory (physical or kmem).
161 * At most it can be 3/4 of available kernel memory.
162 */
163 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size);
164 maxmbufmem = realmem / 2;
165 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem);
166 if (maxmbufmem > realmem / 4 * 3)
167 maxmbufmem = realmem / 4 * 3;
168
169 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
170 if (nmbclusters == 0)
171 nmbclusters = maxmbufmem / MCLBYTES / 4;
172
173 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
174 if (nmbjumbop == 0)
175 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
176
177 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
178 if (nmbjumbo9 == 0)
179 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
180
181 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
182 if (nmbjumbo16 == 0)
183 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
184
185 /*
186 * We need at least as many mbufs as we have clusters of
187 * the various types added together.
188 */
189 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
190 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
191 nmbufs = lmax(maxmbufmem / MSIZE / 5,
192 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
193
194 /*
195 * Unmapped mbufs can only safely be used on platforms with a direct
196 * map.
197 */
198 if (PMAP_HAS_DMAP) {
199 extpg = 1;
200 TUNABLE_INT_FETCH("kern.ipc.mb_use_ext_pgs", &extpg);
201 mb_use_ext_pgs = extpg != 0;
202 }
203 }
204 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
205
206 static int
sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)207 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
208 {
209 int error, newnmbclusters;
210
211 newnmbclusters = nmbclusters;
212 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
213 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) {
214 if (newnmbclusters > nmbclusters &&
215 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
216 nmbclusters = newnmbclusters;
217 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
218 EVENTHANDLER_INVOKE(nmbclusters_change);
219 } else
220 error = EINVAL;
221 }
222 return (error);
223 }
224 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters,
225 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
226 &nmbclusters, 0, sysctl_nmbclusters, "IU",
227 "Maximum number of mbuf clusters allowed");
228
229 static int
sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)230 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
231 {
232 int error, newnmbjumbop;
233
234 newnmbjumbop = nmbjumbop;
235 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
236 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) {
237 if (newnmbjumbop > nmbjumbop &&
238 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
239 nmbjumbop = newnmbjumbop;
240 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
241 } else
242 error = EINVAL;
243 }
244 return (error);
245 }
246 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop,
247 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
248 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
249 "Maximum number of mbuf page size jumbo clusters allowed");
250
251 static int
sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)252 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
253 {
254 int error, newnmbjumbo9;
255
256 newnmbjumbo9 = nmbjumbo9;
257 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
258 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) {
259 if (newnmbjumbo9 > nmbjumbo9 &&
260 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
261 nmbjumbo9 = newnmbjumbo9;
262 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
263 } else
264 error = EINVAL;
265 }
266 return (error);
267 }
268 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9,
269 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
270 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
271 "Maximum number of mbuf 9k jumbo clusters allowed");
272
273 static int
sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)274 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
275 {
276 int error, newnmbjumbo16;
277
278 newnmbjumbo16 = nmbjumbo16;
279 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
280 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
281 if (newnmbjumbo16 > nmbjumbo16 &&
282 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
283 nmbjumbo16 = newnmbjumbo16;
284 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
285 } else
286 error = EINVAL;
287 }
288 return (error);
289 }
290 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16,
291 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
292 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
293 "Maximum number of mbuf 16k jumbo clusters allowed");
294
295 static int
sysctl_nmbufs(SYSCTL_HANDLER_ARGS)296 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
297 {
298 int error, newnmbufs;
299
300 newnmbufs = nmbufs;
301 error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
302 if (error == 0 && req->newptr && newnmbufs != nmbufs) {
303 if (newnmbufs > nmbufs) {
304 nmbufs = newnmbufs;
305 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
306 EVENTHANDLER_INVOKE(nmbufs_change);
307 } else
308 error = EINVAL;
309 }
310 return (error);
311 }
312 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs,
313 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
314 &nmbufs, 0, sysctl_nmbufs, "IU",
315 "Maximum number of mbufs allowed");
316
317 /*
318 * Zones from which we allocate.
319 */
320 uma_zone_t zone_mbuf;
321 uma_zone_t zone_clust;
322 uma_zone_t zone_pack;
323 uma_zone_t zone_jumbop;
324 uma_zone_t zone_jumbo9;
325 uma_zone_t zone_jumbo16;
326
327 /*
328 * Local prototypes.
329 */
330 static int mb_ctor_mbuf(void *, int, void *, int);
331 static int mb_ctor_clust(void *, int, void *, int);
332 static int mb_ctor_pack(void *, int, void *, int);
333 static void mb_dtor_mbuf(void *, int, void *);
334 static void mb_dtor_pack(void *, int, void *);
335 static int mb_zinit_pack(void *, int, int);
336 static void mb_zfini_pack(void *, int);
337 static void mb_reclaim(uma_zone_t, int);
338
339 /* Ensure that MSIZE is a power of 2. */
340 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
341
342 _Static_assert(sizeof(struct mbuf) <= MSIZE,
343 "size of mbuf exceeds MSIZE");
344 /*
345 * Initialize FreeBSD Network buffer allocation.
346 */
347 static void
mbuf_init(void * dummy)348 mbuf_init(void *dummy)
349 {
350
351 /*
352 * Configure UMA zones for Mbufs, Clusters, and Packets.
353 */
354 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
355 mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL,
356 MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET);
357 if (nmbufs > 0)
358 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
359 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
360 uma_zone_set_maxaction(zone_mbuf, mb_reclaim);
361
362 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
363 mb_ctor_clust, NULL, NULL, NULL,
364 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
365 if (nmbclusters > 0)
366 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
367 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
368 uma_zone_set_maxaction(zone_clust, mb_reclaim);
369
370 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
371 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
372
373 /* Make jumbo frame zone too. Page size, 9k and 16k. */
374 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
375 mb_ctor_clust, NULL, NULL, NULL,
376 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
377 if (nmbjumbop > 0)
378 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
379 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
380 uma_zone_set_maxaction(zone_jumbop, mb_reclaim);
381
382 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
383 mb_ctor_clust, NULL, NULL, NULL,
384 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
385 if (nmbjumbo9 > 0)
386 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
387 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
388 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);
389
390 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
391 mb_ctor_clust, NULL, NULL, NULL,
392 UMA_ALIGN_PTR, UMA_ZONE_CONTIG);
393 if (nmbjumbo16 > 0)
394 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
395 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
396 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);
397
398 snd_tag_count = counter_u64_alloc(M_WAITOK);
399 }
400 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
401
402 #ifdef DEBUGNET
403 /*
404 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When
405 * debugnet is configured, we initialize a set of UMA cache zones which return
406 * items from this pool. At panic-time, the regular UMA zone pointers are
407 * overwritten with those of the cache zones so that drivers may allocate and
408 * free mbufs and clusters without attempting to allocate physical memory.
409 *
410 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for
411 * the purpose of caching clusters, we treat them as mbufs.
412 */
413 static struct mbufq dn_mbufq =
414 { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX };
415 static struct mbufq dn_clustq =
416 { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX };
417
418 static int dn_clsize;
419 static uma_zone_t dn_zone_mbuf;
420 static uma_zone_t dn_zone_clust;
421 static uma_zone_t dn_zone_pack;
422
423 static struct debugnet_saved_zones {
424 uma_zone_t dsz_mbuf;
425 uma_zone_t dsz_clust;
426 uma_zone_t dsz_pack;
427 uma_zone_t dsz_jumbop;
428 uma_zone_t dsz_jumbo9;
429 uma_zone_t dsz_jumbo16;
430 bool dsz_debugnet_zones_enabled;
431 } dn_saved_zones;
432
433 static int
dn_buf_import(void * arg,void ** store,int count,int domain __unused,int flags)434 dn_buf_import(void *arg, void **store, int count, int domain __unused,
435 int flags)
436 {
437 struct mbufq *q;
438 struct mbuf *m;
439 int i;
440
441 q = arg;
442
443 for (i = 0; i < count; i++) {
444 m = mbufq_dequeue(q);
445 if (m == NULL)
446 break;
447 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags);
448 store[i] = m;
449 }
450 KASSERT((flags & M_WAITOK) == 0 || i == count,
451 ("%s: ran out of pre-allocated mbufs", __func__));
452 return (i);
453 }
454
455 static void
dn_buf_release(void * arg,void ** store,int count)456 dn_buf_release(void *arg, void **store, int count)
457 {
458 struct mbufq *q;
459 struct mbuf *m;
460 int i;
461
462 q = arg;
463
464 for (i = 0; i < count; i++) {
465 m = store[i];
466 (void)mbufq_enqueue(q, m);
467 }
468 }
469
470 static int
dn_pack_import(void * arg __unused,void ** store,int count,int domain __unused,int flags __unused)471 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused,
472 int flags __unused)
473 {
474 struct mbuf *m;
475 void *clust;
476 int i;
477
478 for (i = 0; i < count; i++) {
479 m = m_get(M_NOWAIT, MT_DATA);
480 if (m == NULL)
481 break;
482 clust = uma_zalloc(dn_zone_clust, M_NOWAIT);
483 if (clust == NULL) {
484 m_free(m);
485 break;
486 }
487 mb_ctor_clust(clust, dn_clsize, m, 0);
488 store[i] = m;
489 }
490 KASSERT((flags & M_WAITOK) == 0 || i == count,
491 ("%s: ran out of pre-allocated mbufs", __func__));
492 return (i);
493 }
494
495 static void
dn_pack_release(void * arg __unused,void ** store,int count)496 dn_pack_release(void *arg __unused, void **store, int count)
497 {
498 struct mbuf *m;
499 void *clust;
500 int i;
501
502 for (i = 0; i < count; i++) {
503 m = store[i];
504 clust = m->m_ext.ext_buf;
505 uma_zfree(dn_zone_clust, clust);
506 uma_zfree(dn_zone_mbuf, m);
507 }
508 }
509
510 /*
511 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy
512 * the corresponding UMA cache zones.
513 */
514 void
debugnet_mbuf_drain(void)515 debugnet_mbuf_drain(void)
516 {
517 struct mbuf *m;
518 void *item;
519
520 if (dn_zone_mbuf != NULL) {
521 uma_zdestroy(dn_zone_mbuf);
522 dn_zone_mbuf = NULL;
523 }
524 if (dn_zone_clust != NULL) {
525 uma_zdestroy(dn_zone_clust);
526 dn_zone_clust = NULL;
527 }
528 if (dn_zone_pack != NULL) {
529 uma_zdestroy(dn_zone_pack);
530 dn_zone_pack = NULL;
531 }
532
533 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL)
534 m_free(m);
535 while ((item = mbufq_dequeue(&dn_clustq)) != NULL)
536 uma_zfree(m_getzone(dn_clsize), item);
537 }
538
539 /*
540 * Callback invoked immediately prior to starting a debugnet connection.
541 */
542 void
debugnet_mbuf_start(void)543 debugnet_mbuf_start(void)
544 {
545
546 MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled);
547
548 /* Save the old zone pointers to restore when debugnet is closed. */
549 dn_saved_zones = (struct debugnet_saved_zones) {
550 .dsz_debugnet_zones_enabled = true,
551 .dsz_mbuf = zone_mbuf,
552 .dsz_clust = zone_clust,
553 .dsz_pack = zone_pack,
554 .dsz_jumbop = zone_jumbop,
555 .dsz_jumbo9 = zone_jumbo9,
556 .dsz_jumbo16 = zone_jumbo16,
557 };
558
559 /*
560 * All cluster zones return buffers of the size requested by the
561 * drivers. It's up to the driver to reinitialize the zones if the
562 * MTU of a debugnet-enabled interface changes.
563 */
564 printf("debugnet: overwriting mbuf zone pointers\n");
565 zone_mbuf = dn_zone_mbuf;
566 zone_clust = dn_zone_clust;
567 zone_pack = dn_zone_pack;
568 zone_jumbop = dn_zone_clust;
569 zone_jumbo9 = dn_zone_clust;
570 zone_jumbo16 = dn_zone_clust;
571 }
572
573 /*
574 * Callback invoked when a debugnet connection is closed/finished.
575 */
576 void
debugnet_mbuf_finish(void)577 debugnet_mbuf_finish(void)
578 {
579
580 MPASS(dn_saved_zones.dsz_debugnet_zones_enabled);
581
582 printf("debugnet: restoring mbuf zone pointers\n");
583 zone_mbuf = dn_saved_zones.dsz_mbuf;
584 zone_clust = dn_saved_zones.dsz_clust;
585 zone_pack = dn_saved_zones.dsz_pack;
586 zone_jumbop = dn_saved_zones.dsz_jumbop;
587 zone_jumbo9 = dn_saved_zones.dsz_jumbo9;
588 zone_jumbo16 = dn_saved_zones.dsz_jumbo16;
589
590 memset(&dn_saved_zones, 0, sizeof(dn_saved_zones));
591 }
592
593 /*
594 * Reinitialize the debugnet mbuf+cluster pool and cache zones.
595 */
596 void
debugnet_mbuf_reinit(int nmbuf,int nclust,int clsize)597 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize)
598 {
599 struct mbuf *m;
600 void *item;
601
602 debugnet_mbuf_drain();
603
604 dn_clsize = clsize;
605
606 dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME,
607 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL,
608 dn_buf_import, dn_buf_release,
609 &dn_mbufq, UMA_ZONE_NOBUCKET);
610
611 dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME,
612 clsize, mb_ctor_clust, NULL, NULL, NULL,
613 dn_buf_import, dn_buf_release,
614 &dn_clustq, UMA_ZONE_NOBUCKET);
615
616 dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME,
617 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL,
618 dn_pack_import, dn_pack_release,
619 NULL, UMA_ZONE_NOBUCKET);
620
621 while (nmbuf-- > 0) {
622 m = m_get(M_WAITOK, MT_DATA);
623 uma_zfree(dn_zone_mbuf, m);
624 }
625 while (nclust-- > 0) {
626 item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK);
627 uma_zfree(dn_zone_clust, item);
628 }
629 }
630 #endif /* DEBUGNET */
631
632 /*
633 * Constructor for Mbuf primary zone.
634 *
635 * The 'arg' pointer points to a mb_args structure which
636 * contains call-specific information required to support the
637 * mbuf allocation API. See mbuf.h.
638 */
639 static int
mb_ctor_mbuf(void * mem,int size,void * arg,int how)640 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
641 {
642 struct mbuf *m;
643 struct mb_args *args;
644 int error;
645 int flags;
646 short type;
647
648 args = (struct mb_args *)arg;
649 type = args->type;
650
651 /*
652 * The mbuf is initialized later. The caller has the
653 * responsibility to set up any MAC labels too.
654 */
655 if (type == MT_NOINIT)
656 return (0);
657
658 m = (struct mbuf *)mem;
659 flags = args->flags;
660 MPASS((flags & M_NOFREE) == 0);
661
662 error = m_init(m, how, type, flags);
663
664 return (error);
665 }
666
667 /*
668 * The Mbuf primary zone destructor.
669 */
670 static void
mb_dtor_mbuf(void * mem,int size,void * arg)671 mb_dtor_mbuf(void *mem, int size, void *arg)
672 {
673 struct mbuf *m;
674 unsigned long flags __diagused;
675
676 m = (struct mbuf *)mem;
677 flags = (unsigned long)arg;
678
679 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
680 KASSERT((flags & 0x1) == 0, ("%s: obsolete MB_DTOR_SKIP passed", __func__));
681 if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
682 m_tag_delete_chain(m, NULL);
683 }
684
685 /*
686 * The Mbuf Packet zone destructor.
687 */
688 static void
mb_dtor_pack(void * mem,int size,void * arg)689 mb_dtor_pack(void *mem, int size, void *arg)
690 {
691 struct mbuf *m;
692
693 m = (struct mbuf *)mem;
694 if ((m->m_flags & M_PKTHDR) != 0)
695 m_tag_delete_chain(m, NULL);
696
697 /* Make sure we've got a clean cluster back. */
698 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
699 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
700 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
701 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
702 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
703 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
704 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
705 #if defined(INVARIANTS) && !defined(KMSAN)
706 trash_dtor(m->m_ext.ext_buf, MCLBYTES, zone_clust);
707 #endif
708 /*
709 * If there are processes blocked on zone_clust, waiting for pages
710 * to be freed up, cause them to be woken up by draining the
711 * packet zone. We are exposed to a race here (in the check for
712 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
713 * is deliberate. We don't want to acquire the zone lock for every
714 * mbuf free.
715 */
716 if (uma_zone_exhausted(zone_clust))
717 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
718 }
719
720 /*
721 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
722 *
723 * Here the 'arg' pointer points to the Mbuf which we
724 * are configuring cluster storage for. If 'arg' is
725 * empty we allocate just the cluster without setting
726 * the mbuf to it. See mbuf.h.
727 */
728 static int
mb_ctor_clust(void * mem,int size,void * arg,int how)729 mb_ctor_clust(void *mem, int size, void *arg, int how)
730 {
731 struct mbuf *m;
732
733 m = (struct mbuf *)arg;
734 if (m != NULL) {
735 m->m_ext.ext_buf = (char *)mem;
736 m->m_data = m->m_ext.ext_buf;
737 m->m_flags |= M_EXT;
738 m->m_ext.ext_free = NULL;
739 m->m_ext.ext_arg1 = NULL;
740 m->m_ext.ext_arg2 = NULL;
741 m->m_ext.ext_size = size;
742 m->m_ext.ext_type = m_gettype(size);
743 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
744 m->m_ext.ext_count = 1;
745 }
746
747 return (0);
748 }
749
750 /*
751 * The Packet secondary zone's init routine, executed on the
752 * object's transition from mbuf keg slab to zone cache.
753 */
754 static int
mb_zinit_pack(void * mem,int size,int how)755 mb_zinit_pack(void *mem, int size, int how)
756 {
757 struct mbuf *m;
758
759 m = (struct mbuf *)mem; /* m is virgin. */
760 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
761 m->m_ext.ext_buf == NULL)
762 return (ENOMEM);
763 m->m_ext.ext_type = EXT_PACKET; /* Override. */
764 #if defined(INVARIANTS) && !defined(KMSAN)
765 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
766 #endif
767 return (0);
768 }
769
770 /*
771 * The Packet secondary zone's fini routine, executed on the
772 * object's transition from zone cache to keg slab.
773 */
774 static void
mb_zfini_pack(void * mem,int size)775 mb_zfini_pack(void *mem, int size)
776 {
777 struct mbuf *m;
778
779 m = (struct mbuf *)mem;
780 #if defined(INVARIANTS) && !defined(KMSAN)
781 trash_fini(m->m_ext.ext_buf, MCLBYTES);
782 #endif
783 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
784 #if defined(INVARIANTS) && !defined(KMSAN)
785 trash_dtor(mem, size, zone_clust);
786 #endif
787 }
788
789 /*
790 * The "packet" keg constructor.
791 */
792 static int
mb_ctor_pack(void * mem,int size,void * arg,int how)793 mb_ctor_pack(void *mem, int size, void *arg, int how)
794 {
795 struct mbuf *m;
796 struct mb_args *args;
797 int error, flags;
798 short type;
799
800 m = (struct mbuf *)mem;
801 args = (struct mb_args *)arg;
802 flags = args->flags;
803 type = args->type;
804 MPASS((flags & M_NOFREE) == 0);
805
806 #if defined(INVARIANTS) && !defined(KMSAN)
807 trash_ctor(m->m_ext.ext_buf, MCLBYTES, zone_clust, how);
808 #endif
809
810 error = m_init(m, how, type, flags);
811
812 /* m_ext is already initialized. */
813 m->m_data = m->m_ext.ext_buf;
814 m->m_flags = (flags | M_EXT);
815
816 return (error);
817 }
818
819 /*
820 * This is the protocol drain routine. Called by UMA whenever any of the
821 * mbuf zones is closed to its limit.
822 */
823 static void
mb_reclaim(uma_zone_t zone __unused,int pending __unused)824 mb_reclaim(uma_zone_t zone __unused, int pending __unused)
825 {
826
827 EVENTHANDLER_INVOKE(mbuf_lowmem, VM_LOW_MBUFS);
828 }
829
830 /*
831 * Free "count" units of I/O from an mbuf chain. They could be held
832 * in M_EXTPG or just as a normal mbuf. This code is intended to be
833 * called in an error path (I/O error, closed connection, etc).
834 */
835 void
mb_free_notready(struct mbuf * m,int count)836 mb_free_notready(struct mbuf *m, int count)
837 {
838 int i;
839
840 for (i = 0; i < count && m != NULL; i++) {
841 if ((m->m_flags & M_EXTPG) != 0) {
842 m->m_epg_nrdy--;
843 if (m->m_epg_nrdy != 0)
844 continue;
845 }
846 m = m_free(m);
847 }
848 KASSERT(i == count, ("Removed only %d items from %p", i, m));
849 }
850
851 /*
852 * Compress an unmapped mbuf into a simple mbuf when it holds a small
853 * amount of data. This is used as a DOS defense to avoid having
854 * small packets tie up wired pages, an ext_pgs structure, and an
855 * mbuf. Since this converts the existing mbuf in place, it can only
856 * be used if there are no other references to 'm'.
857 */
858 int
mb_unmapped_compress(struct mbuf * m)859 mb_unmapped_compress(struct mbuf *m)
860 {
861 volatile u_int *refcnt;
862 char buf[MLEN];
863
864 /*
865 * Assert that 'm' does not have a packet header. If 'm' had
866 * a packet header, it would only be able to hold MHLEN bytes
867 * and m_data would have to be initialized differently.
868 */
869 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXTPG),
870 ("%s: m %p !M_EXTPG or M_PKTHDR", __func__, m));
871 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m));
872
873 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
874 refcnt = &m->m_ext.ext_count;
875 } else {
876 KASSERT(m->m_ext.ext_cnt != NULL,
877 ("%s: no refcounting pointer on %p", __func__, m));
878 refcnt = m->m_ext.ext_cnt;
879 }
880
881 if (*refcnt != 1)
882 return (EBUSY);
883
884 m_copydata(m, 0, m->m_len, buf);
885
886 /* Free the backing pages. */
887 m->m_ext.ext_free(m);
888
889 /* Turn 'm' into a "normal" mbuf. */
890 m->m_flags &= ~(M_EXT | M_RDONLY | M_EXTPG);
891 m->m_data = m->m_dat;
892
893 /* Copy data back into m. */
894 bcopy(buf, mtod(m, char *), m->m_len);
895
896 return (0);
897 }
898
899 /*
900 * These next few routines are used to permit downgrading an unmapped
901 * mbuf to a chain of mapped mbufs. This is used when an interface
902 * doesn't supported unmapped mbufs or if checksums need to be
903 * computed in software.
904 *
905 * Each unmapped mbuf is converted to a chain of mbufs. First, any
906 * TLS header data is stored in a regular mbuf. Second, each page of
907 * unmapped data is stored in an mbuf with an EXT_SFBUF external
908 * cluster. These mbufs use an sf_buf to provide a valid KVA for the
909 * associated physical page. They also hold a reference on the
910 * original M_EXTPG mbuf to ensure the physical page doesn't go away.
911 * Finally, any TLS trailer data is stored in a regular mbuf.
912 *
913 * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF
914 * mbufs. It frees the associated sf_buf and releases its reference
915 * on the original M_EXTPG mbuf.
916 *
917 * _mb_unmapped_to_ext() is a helper function that converts a single
918 * unmapped mbuf into a chain of mbufs.
919 *
920 * mb_unmapped_to_ext() is the public function that walks an mbuf
921 * chain converting any unmapped mbufs to mapped mbufs. It returns
922 * the new chain of unmapped mbufs on success. On failure it frees
923 * the original mbuf chain and returns NULL.
924 */
925 static void
mb_unmapped_free_mext(struct mbuf * m)926 mb_unmapped_free_mext(struct mbuf *m)
927 {
928 struct sf_buf *sf;
929 struct mbuf *old_m;
930
931 sf = m->m_ext.ext_arg1;
932 sf_buf_free(sf);
933
934 /* Drop the reference on the backing M_EXTPG mbuf. */
935 old_m = m->m_ext.ext_arg2;
936 mb_free_extpg(old_m);
937 }
938
939 static int
_mb_unmapped_to_ext(struct mbuf * m,struct mbuf ** mres)940 _mb_unmapped_to_ext(struct mbuf *m, struct mbuf **mres)
941 {
942 struct mbuf *m_new, *top, *prev, *mref;
943 struct sf_buf *sf;
944 vm_page_t pg;
945 int i, len, off, pglen, pgoff, seglen, segoff;
946 volatile u_int *refcnt;
947 u_int ref_inc = 0;
948
949 M_ASSERTEXTPG(m);
950
951 if (m->m_epg_tls != NULL) {
952 /* can't convert TLS mbuf */
953 m_freem(m);
954 *mres = NULL;
955 return (EINVAL);
956 }
957
958 len = m->m_len;
959
960 /* See if this is the mbuf that holds the embedded refcount. */
961 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
962 refcnt = &m->m_ext.ext_count;
963 mref = m;
964 } else {
965 KASSERT(m->m_ext.ext_cnt != NULL,
966 ("%s: no refcounting pointer on %p", __func__, m));
967 refcnt = m->m_ext.ext_cnt;
968 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
969 }
970
971 /* Skip over any data removed from the front. */
972 off = mtod(m, vm_offset_t);
973
974 top = NULL;
975 if (m->m_epg_hdrlen != 0) {
976 if (off >= m->m_epg_hdrlen) {
977 off -= m->m_epg_hdrlen;
978 } else {
979 seglen = m->m_epg_hdrlen - off;
980 segoff = off;
981 seglen = min(seglen, len);
982 off = 0;
983 len -= seglen;
984 m_new = m_get(M_NOWAIT, MT_DATA);
985 if (m_new == NULL)
986 goto fail;
987 m_new->m_len = seglen;
988 prev = top = m_new;
989 memcpy(mtod(m_new, void *), &m->m_epg_hdr[segoff],
990 seglen);
991 }
992 }
993 pgoff = m->m_epg_1st_off;
994 for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
995 pglen = m_epg_pagelen(m, i, pgoff);
996 if (off >= pglen) {
997 off -= pglen;
998 pgoff = 0;
999 continue;
1000 }
1001 seglen = pglen - off;
1002 segoff = pgoff + off;
1003 off = 0;
1004 seglen = min(seglen, len);
1005 len -= seglen;
1006
1007 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
1008 m_new = m_get(M_NOWAIT, MT_DATA);
1009 if (m_new == NULL)
1010 goto fail;
1011 if (top == NULL) {
1012 top = prev = m_new;
1013 } else {
1014 prev->m_next = m_new;
1015 prev = m_new;
1016 }
1017 sf = sf_buf_alloc(pg, SFB_NOWAIT);
1018 if (sf == NULL)
1019 goto fail;
1020
1021 ref_inc++;
1022 m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE,
1023 mb_unmapped_free_mext, sf, mref, m->m_flags & M_RDONLY,
1024 EXT_SFBUF);
1025 m_new->m_data += segoff;
1026 m_new->m_len = seglen;
1027
1028 pgoff = 0;
1029 };
1030 if (len != 0) {
1031 KASSERT((off + len) <= m->m_epg_trllen,
1032 ("off + len > trail (%d + %d > %d)", off, len,
1033 m->m_epg_trllen));
1034 m_new = m_get(M_NOWAIT, MT_DATA);
1035 if (m_new == NULL)
1036 goto fail;
1037 if (top == NULL)
1038 top = m_new;
1039 else
1040 prev->m_next = m_new;
1041 m_new->m_len = len;
1042 memcpy(mtod(m_new, void *), &m->m_epg_trail[off], len);
1043 }
1044
1045 if (ref_inc != 0) {
1046 /*
1047 * Obtain an additional reference on the old mbuf for
1048 * each created EXT_SFBUF mbuf. They will be dropped
1049 * in mb_unmapped_free_mext().
1050 */
1051 if (*refcnt == 1)
1052 *refcnt += ref_inc;
1053 else
1054 atomic_add_int(refcnt, ref_inc);
1055 }
1056 m_free(m);
1057 *mres = top;
1058 return (0);
1059
1060 fail:
1061 if (ref_inc != 0) {
1062 /*
1063 * Obtain an additional reference on the old mbuf for
1064 * each created EXT_SFBUF mbuf. They will be
1065 * immediately dropped when these mbufs are freed
1066 * below.
1067 */
1068 if (*refcnt == 1)
1069 *refcnt += ref_inc;
1070 else
1071 atomic_add_int(refcnt, ref_inc);
1072 }
1073 m_free(m);
1074 m_freem(top);
1075 *mres = NULL;
1076 return (ENOMEM);
1077 }
1078
1079 int
mb_unmapped_to_ext(struct mbuf * top,struct mbuf ** mres)1080 mb_unmapped_to_ext(struct mbuf *top, struct mbuf **mres)
1081 {
1082 struct mbuf *m, *m1, *next, *prev = NULL;
1083 int error;
1084
1085 prev = NULL;
1086 for (m = top; m != NULL; m = next) {
1087 /* m might be freed, so cache the next pointer. */
1088 next = m->m_next;
1089 if (m->m_flags & M_EXTPG) {
1090 if (prev != NULL) {
1091 /*
1092 * Remove 'm' from the new chain so
1093 * that the 'top' chain terminates
1094 * before 'm' in case 'top' is freed
1095 * due to an error.
1096 */
1097 prev->m_next = NULL;
1098 }
1099 error = _mb_unmapped_to_ext(m, &m1);
1100 if (error != 0) {
1101 if (top != m)
1102 m_free(top);
1103 m_freem(next);
1104 *mres = NULL;
1105 return (error);
1106 }
1107 m = m1;
1108 if (prev == NULL) {
1109 top = m;
1110 } else {
1111 prev->m_next = m;
1112 }
1113
1114 /*
1115 * Replaced one mbuf with a chain, so we must
1116 * find the end of chain.
1117 */
1118 prev = m_last(m);
1119 } else {
1120 if (prev != NULL) {
1121 prev->m_next = m;
1122 }
1123 prev = m;
1124 }
1125 }
1126 *mres = top;
1127 return (0);
1128 }
1129
1130 /*
1131 * Allocate an empty M_EXTPG mbuf. The ext_free routine is
1132 * responsible for freeing any pages backing this mbuf when it is
1133 * freed.
1134 */
1135 struct mbuf *
mb_alloc_ext_pgs(int how,m_ext_free_t ext_free,int flags)1136 mb_alloc_ext_pgs(int how, m_ext_free_t ext_free, int flags)
1137 {
1138 struct mbuf *m;
1139
1140 m = m_get(how, MT_DATA);
1141 if (m == NULL)
1142 return (NULL);
1143
1144 m->m_epg_npgs = 0;
1145 m->m_epg_nrdy = 0;
1146 m->m_epg_1st_off = 0;
1147 m->m_epg_last_len = 0;
1148 m->m_epg_flags = 0;
1149 m->m_epg_hdrlen = 0;
1150 m->m_epg_trllen = 0;
1151 m->m_epg_tls = NULL;
1152 m->m_epg_so = NULL;
1153 m->m_data = NULL;
1154 m->m_flags |= M_EXT | M_EXTPG | flags;
1155 m->m_ext.ext_flags = EXT_FLAG_EMBREF;
1156 m->m_ext.ext_count = 1;
1157 m->m_ext.ext_size = 0;
1158 m->m_ext.ext_free = ext_free;
1159 return (m);
1160 }
1161
1162 /*
1163 * Clean up after mbufs with M_EXT storage attached to them if the
1164 * reference count hits 1.
1165 */
1166 void
mb_free_ext(struct mbuf * m)1167 mb_free_ext(struct mbuf *m)
1168 {
1169 volatile u_int *refcnt;
1170 struct mbuf *mref;
1171 int freembuf;
1172
1173 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m));
1174
1175 /* See if this is the mbuf that holds the embedded refcount. */
1176 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1177 refcnt = &m->m_ext.ext_count;
1178 mref = m;
1179 } else {
1180 KASSERT(m->m_ext.ext_cnt != NULL,
1181 ("%s: no refcounting pointer on %p", __func__, m));
1182 refcnt = m->m_ext.ext_cnt;
1183 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1184 }
1185
1186 /*
1187 * Check if the header is embedded in the cluster. It is
1188 * important that we can't touch any of the mbuf fields
1189 * after we have freed the external storage, since mbuf
1190 * could have been embedded in it. For now, the mbufs
1191 * embedded into the cluster are always of type EXT_EXTREF,
1192 * and for this type we won't free the mref.
1193 */
1194 if (m->m_flags & M_NOFREE) {
1195 freembuf = 0;
1196 KASSERT(m->m_ext.ext_type == EXT_EXTREF ||
1197 m->m_ext.ext_type == EXT_RXRING,
1198 ("%s: no-free mbuf %p has wrong type", __func__, m));
1199 } else
1200 freembuf = 1;
1201
1202 /* Free attached storage if this mbuf is the only reference to it. */
1203 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1204 switch (m->m_ext.ext_type) {
1205 case EXT_PACKET:
1206 /* The packet zone is special. */
1207 if (*refcnt == 0)
1208 *refcnt = 1;
1209 uma_zfree(zone_pack, mref);
1210 break;
1211 case EXT_CLUSTER:
1212 uma_zfree(zone_clust, m->m_ext.ext_buf);
1213 m_free_raw(mref);
1214 break;
1215 case EXT_JUMBOP:
1216 uma_zfree(zone_jumbop, m->m_ext.ext_buf);
1217 m_free_raw(mref);
1218 break;
1219 case EXT_JUMBO9:
1220 uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
1221 m_free_raw(mref);
1222 break;
1223 case EXT_JUMBO16:
1224 uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
1225 m_free_raw(mref);
1226 break;
1227 case EXT_SFBUF:
1228 case EXT_NET_DRV:
1229 case EXT_CTL:
1230 case EXT_MOD_TYPE:
1231 case EXT_DISPOSABLE:
1232 KASSERT(mref->m_ext.ext_free != NULL,
1233 ("%s: ext_free not set", __func__));
1234 mref->m_ext.ext_free(mref);
1235 m_free_raw(mref);
1236 break;
1237 case EXT_EXTREF:
1238 KASSERT(m->m_ext.ext_free != NULL,
1239 ("%s: ext_free not set", __func__));
1240 m->m_ext.ext_free(m);
1241 break;
1242 case EXT_RXRING:
1243 KASSERT(m->m_ext.ext_free == NULL,
1244 ("%s: ext_free is set", __func__));
1245 break;
1246 default:
1247 KASSERT(m->m_ext.ext_type == 0,
1248 ("%s: unknown ext_type", __func__));
1249 }
1250 }
1251
1252 if (freembuf && m != mref)
1253 m_free_raw(m);
1254 }
1255
1256 /*
1257 * Clean up after mbufs with M_EXTPG storage attached to them if the
1258 * reference count hits 1.
1259 */
1260 void
mb_free_extpg(struct mbuf * m)1261 mb_free_extpg(struct mbuf *m)
1262 {
1263 volatile u_int *refcnt;
1264 struct mbuf *mref;
1265
1266 M_ASSERTEXTPG(m);
1267
1268 /* See if this is the mbuf that holds the embedded refcount. */
1269 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
1270 refcnt = &m->m_ext.ext_count;
1271 mref = m;
1272 } else {
1273 KASSERT(m->m_ext.ext_cnt != NULL,
1274 ("%s: no refcounting pointer on %p", __func__, m));
1275 refcnt = m->m_ext.ext_cnt;
1276 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
1277 }
1278
1279 /* Free attached storage if this mbuf is the only reference to it. */
1280 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
1281 KASSERT(mref->m_ext.ext_free != NULL,
1282 ("%s: ext_free not set", __func__));
1283
1284 mref->m_ext.ext_free(mref);
1285 #ifdef KERN_TLS
1286 if (mref->m_epg_tls != NULL &&
1287 !refcount_release_if_not_last(&mref->m_epg_tls->refcount))
1288 ktls_enqueue_to_free(mref);
1289 else
1290 #endif
1291 m_free_raw(mref);
1292 }
1293
1294 if (m != mref)
1295 m_free_raw(m);
1296 }
1297
1298 /*
1299 * Official mbuf(9) allocation KPI for stack and drivers:
1300 *
1301 * m_get() - a single mbuf without any attachments, sys/mbuf.h.
1302 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h.
1303 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h.
1304 * m_clget() - attach cluster to already allocated mbuf.
1305 * m_cljget() - attach jumbo cluster to already allocated mbuf.
1306 * m_get2() - allocate minimum mbuf that would fit size argument.
1307 * m_getm2() - allocate a chain of mbufs/clusters.
1308 * m_extadd() - attach external cluster to mbuf.
1309 *
1310 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h.
1311 * m_freem() - free chain of mbufs.
1312 */
1313
1314 int
m_clget(struct mbuf * m,int how)1315 m_clget(struct mbuf *m, int how)
1316 {
1317
1318 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1319 __func__, m));
1320 m->m_ext.ext_buf = (char *)NULL;
1321 uma_zalloc_arg(zone_clust, m, how);
1322 /*
1323 * On a cluster allocation failure, drain the packet zone and retry,
1324 * we might be able to loosen a few clusters up on the drain.
1325 */
1326 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) {
1327 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN);
1328 uma_zalloc_arg(zone_clust, m, how);
1329 }
1330 MBUF_PROBE2(m__clget, m, how);
1331 return (m->m_flags & M_EXT);
1332 }
1333
1334 /*
1335 * m_cljget() is different from m_clget() as it can allocate clusters without
1336 * attaching them to an mbuf. In that case the return value is the pointer
1337 * to the cluster of the requested size. If an mbuf was specified, it gets
1338 * the cluster attached to it and the return value can be safely ignored.
1339 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1340 */
1341 void *
m_cljget(struct mbuf * m,int how,int size)1342 m_cljget(struct mbuf *m, int how, int size)
1343 {
1344 uma_zone_t zone;
1345 void *retval;
1346
1347 if (m != NULL) {
1348 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT",
1349 __func__, m));
1350 m->m_ext.ext_buf = NULL;
1351 }
1352
1353 zone = m_getzone(size);
1354 retval = uma_zalloc_arg(zone, m, how);
1355
1356 MBUF_PROBE4(m__cljget, m, how, size, retval);
1357
1358 return (retval);
1359 }
1360
1361 /*
1362 * m_get2() allocates minimum mbuf that would fit "size" argument.
1363 */
1364 struct mbuf *
m_get2(int size,int how,short type,int flags)1365 m_get2(int size, int how, short type, int flags)
1366 {
1367 struct mb_args args;
1368 struct mbuf *m, *n;
1369
1370 args.flags = flags;
1371 args.type = type;
1372
1373 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0))
1374 return (uma_zalloc_arg(zone_mbuf, &args, how));
1375 if (size <= MCLBYTES)
1376 return (uma_zalloc_arg(zone_pack, &args, how));
1377
1378 if (size > MJUMPAGESIZE)
1379 return (NULL);
1380
1381 m = uma_zalloc_arg(zone_mbuf, &args, how);
1382 if (m == NULL)
1383 return (NULL);
1384
1385 n = uma_zalloc_arg(zone_jumbop, m, how);
1386 if (n == NULL) {
1387 m_free_raw(m);
1388 return (NULL);
1389 }
1390
1391 return (m);
1392 }
1393
1394 /*
1395 * m_get3() allocates minimum mbuf that would fit "size" argument.
1396 * Unlike m_get2() it can allocate clusters up to MJUM16BYTES.
1397 */
1398 struct mbuf *
m_get3(int size,int how,short type,int flags)1399 m_get3(int size, int how, short type, int flags)
1400 {
1401 struct mb_args args;
1402 struct mbuf *m, *n;
1403 uma_zone_t zone;
1404
1405 if (size <= MJUMPAGESIZE)
1406 return (m_get2(size, how, type, flags));
1407
1408 if (size > MJUM16BYTES)
1409 return (NULL);
1410
1411 args.flags = flags;
1412 args.type = type;
1413
1414 m = uma_zalloc_arg(zone_mbuf, &args, how);
1415 if (m == NULL)
1416 return (NULL);
1417
1418 if (size <= MJUM9BYTES)
1419 zone = zone_jumbo9;
1420 else
1421 zone = zone_jumbo16;
1422
1423 n = uma_zalloc_arg(zone, m, how);
1424 if (n == NULL) {
1425 m_free_raw(m);
1426 return (NULL);
1427 }
1428
1429 return (m);
1430 }
1431
1432 /*
1433 * m_getjcl() returns an mbuf with a cluster of the specified size attached.
1434 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
1435 */
1436 struct mbuf *
m_getjcl(int how,short type,int flags,int size)1437 m_getjcl(int how, short type, int flags, int size)
1438 {
1439 struct mb_args args;
1440 struct mbuf *m, *n;
1441 uma_zone_t zone;
1442
1443 if (size == MCLBYTES)
1444 return m_getcl(how, type, flags);
1445
1446 args.flags = flags;
1447 args.type = type;
1448
1449 m = uma_zalloc_arg(zone_mbuf, &args, how);
1450 if (m == NULL)
1451 return (NULL);
1452
1453 zone = m_getzone(size);
1454 n = uma_zalloc_arg(zone, m, how);
1455 if (n == NULL) {
1456 m_free_raw(m);
1457 return (NULL);
1458 }
1459 MBUF_PROBE5(m__getjcl, how, type, flags, size, m);
1460 return (m);
1461 }
1462
1463 /*
1464 * Allocate mchain of a given length of mbufs and/or clusters (whatever fits
1465 * best). May fail due to ENOMEM. In case of failure state of mchain is
1466 * inconsistent.
1467 */
1468 int
mc_get(struct mchain * mc,u_int length,int how,short type,int flags)1469 mc_get(struct mchain *mc, u_int length, int how, short type, int flags)
1470 {
1471 struct mbuf *mb;
1472 u_int progress;
1473
1474 MPASS(length >= 0);
1475
1476 *mc = MCHAIN_INITIALIZER(mc);
1477 flags &= (M_PKTHDR | M_EOR);
1478 progress = 0;
1479
1480 /* Loop and append maximum sized mbufs to the chain tail. */
1481 do {
1482 if (length - progress > MCLBYTES) {
1483 /*
1484 * M_NOWAIT here is intentional, it avoids blocking if
1485 * the jumbop zone is exhausted. See 796d4eb89e2c and
1486 * D26150 for more detail.
1487 */
1488 mb = m_getjcl(M_NOWAIT, type, (flags & M_PKTHDR),
1489 MJUMPAGESIZE);
1490 } else
1491 mb = NULL;
1492 if (mb == NULL) {
1493 if (length - progress >= MINCLSIZE)
1494 mb = m_getcl(how, type, (flags & M_PKTHDR));
1495 else if (flags & M_PKTHDR)
1496 mb = m_gethdr(how, type);
1497 else
1498 mb = m_get(how, type);
1499
1500 /*
1501 * Fail the whole operation if one mbuf can't be
1502 * allocated.
1503 */
1504 if (mb == NULL) {
1505 m_freem(mc_first(mc));
1506 return (ENOMEM);
1507 }
1508 }
1509
1510 progress += M_SIZE(mb);
1511 mc_append(mc, mb);
1512 /* Only valid on the first mbuf. */
1513 flags &= ~M_PKTHDR;
1514 } while (progress < length);
1515 if (flags & M_EOR)
1516 /* Only valid on the last mbuf. */
1517 mc_last(mc)->m_flags |= M_EOR;
1518
1519 return (0);
1520 }
1521
1522 /*
1523 * Allocate a given length worth of mbufs and/or clusters (whatever fits
1524 * best) and return a pointer to the top of the allocated chain. If an
1525 * existing mbuf chain is provided, then we will append the new chain
1526 * to the existing one and return a pointer to the provided mbuf.
1527 */
1528 struct mbuf *
m_getm2(struct mbuf * m,int len,int how,short type,int flags)1529 m_getm2(struct mbuf *m, int len, int how, short type, int flags)
1530 {
1531 struct mchain mc;
1532
1533 /* Packet header mbuf must be first in chain. */
1534 if (m != NULL && (flags & M_PKTHDR))
1535 flags &= ~M_PKTHDR;
1536
1537 if (__predict_false(mc_get(&mc, len, how, type, flags) != 0))
1538 return (NULL);
1539
1540 /* If mbuf was supplied, append new chain to the end of it. */
1541 if (m != NULL) {
1542 struct mbuf *mtail;
1543
1544 mtail = m_last(m);
1545 mtail->m_next = mc_first(&mc);
1546 mtail->m_flags &= ~M_EOR;
1547 } else
1548 m = mc_first(&mc);
1549
1550 return (m);
1551 }
1552
1553 /*-
1554 * Configure a provided mbuf to refer to the provided external storage
1555 * buffer and setup a reference count for said buffer.
1556 *
1557 * Arguments:
1558 * mb The existing mbuf to which to attach the provided buffer.
1559 * buf The address of the provided external storage buffer.
1560 * size The size of the provided buffer.
1561 * freef A pointer to a routine that is responsible for freeing the
1562 * provided external storage buffer.
1563 * args A pointer to an argument structure (of any type) to be passed
1564 * to the provided freef routine (may be NULL).
1565 * flags Any other flags to be passed to the provided mbuf.
1566 * type The type that the external storage buffer should be
1567 * labeled with.
1568 *
1569 * Returns:
1570 * Nothing.
1571 */
1572 void
m_extadd(struct mbuf * mb,char * buf,u_int size,m_ext_free_t freef,void * arg1,void * arg2,int flags,int type)1573 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef,
1574 void *arg1, void *arg2, int flags, int type)
1575 {
1576
1577 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
1578
1579 mb->m_flags |= (M_EXT | flags);
1580 mb->m_ext.ext_buf = buf;
1581 mb->m_data = mb->m_ext.ext_buf;
1582 mb->m_ext.ext_size = size;
1583 mb->m_ext.ext_free = freef;
1584 mb->m_ext.ext_arg1 = arg1;
1585 mb->m_ext.ext_arg2 = arg2;
1586 mb->m_ext.ext_type = type;
1587
1588 if (type != EXT_EXTREF) {
1589 mb->m_ext.ext_count = 1;
1590 mb->m_ext.ext_flags = EXT_FLAG_EMBREF;
1591 } else
1592 mb->m_ext.ext_flags = 0;
1593 }
1594
1595 /*
1596 * Free an entire chain of mbufs and associated external buffers, if
1597 * applicable.
1598 */
1599 void
m_freem(struct mbuf * mb)1600 m_freem(struct mbuf *mb)
1601 {
1602
1603 MBUF_PROBE1(m__freem, mb);
1604 while (mb != NULL)
1605 mb = m_free(mb);
1606 }
1607
1608 /*
1609 * Free an entire chain of mbufs and associated external buffers, following
1610 * both m_next and m_nextpkt linkage.
1611 * Note: doesn't support NULL argument.
1612 */
1613 void
m_freemp(struct mbuf * m)1614 m_freemp(struct mbuf *m)
1615 {
1616 struct mbuf *n;
1617
1618 MBUF_PROBE1(m__freemp, m);
1619 do {
1620 n = m->m_nextpkt;
1621 while (m != NULL)
1622 m = m_free(m);
1623 m = n;
1624 } while (m != NULL);
1625 }
1626
1627 /*
1628 * Temporary primitive to allow freeing without going through m_free.
1629 */
1630 void
m_free_raw(struct mbuf * mb)1631 m_free_raw(struct mbuf *mb)
1632 {
1633
1634 uma_zfree(zone_mbuf, mb);
1635 }
1636
1637 int
m_snd_tag_alloc(struct ifnet * ifp,union if_snd_tag_alloc_params * params,struct m_snd_tag ** mstp)1638 m_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
1639 struct m_snd_tag **mstp)
1640 {
1641
1642 return (if_snd_tag_alloc(ifp, params, mstp));
1643 }
1644
1645 void
m_snd_tag_init(struct m_snd_tag * mst,struct ifnet * ifp,const struct if_snd_tag_sw * sw)1646 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp,
1647 const struct if_snd_tag_sw *sw)
1648 {
1649
1650 if_ref(ifp);
1651 mst->ifp = ifp;
1652 refcount_init(&mst->refcount, 1);
1653 mst->sw = sw;
1654 counter_u64_add(snd_tag_count, 1);
1655 }
1656
1657 void
m_snd_tag_destroy(struct m_snd_tag * mst)1658 m_snd_tag_destroy(struct m_snd_tag *mst)
1659 {
1660 struct ifnet *ifp;
1661
1662 ifp = mst->ifp;
1663 mst->sw->snd_tag_free(mst);
1664 if_rele(ifp);
1665 counter_u64_add(snd_tag_count, -1);
1666 }
1667
1668 void
m_rcvif_serialize(struct mbuf * m)1669 m_rcvif_serialize(struct mbuf *m)
1670 {
1671 u_short idx, gen;
1672
1673 M_ASSERTPKTHDR(m);
1674 idx = if_getindex(m->m_pkthdr.rcvif);
1675 gen = if_getidxgen(m->m_pkthdr.rcvif);
1676 m->m_pkthdr.rcvidx = idx;
1677 m->m_pkthdr.rcvgen = gen;
1678 if (__predict_false(m->m_pkthdr.leaf_rcvif != NULL)) {
1679 idx = if_getindex(m->m_pkthdr.leaf_rcvif);
1680 gen = if_getidxgen(m->m_pkthdr.leaf_rcvif);
1681 } else {
1682 idx = -1;
1683 gen = 0;
1684 }
1685 m->m_pkthdr.leaf_rcvidx = idx;
1686 m->m_pkthdr.leaf_rcvgen = gen;
1687 }
1688
1689 struct ifnet *
m_rcvif_restore(struct mbuf * m)1690 m_rcvif_restore(struct mbuf *m)
1691 {
1692 struct ifnet *ifp, *leaf_ifp;
1693
1694 M_ASSERTPKTHDR(m);
1695 NET_EPOCH_ASSERT();
1696
1697 ifp = ifnet_byindexgen(m->m_pkthdr.rcvidx, m->m_pkthdr.rcvgen);
1698 if (ifp == NULL || (if_getflags(ifp) & IFF_DYING))
1699 return (NULL);
1700
1701 if (__predict_true(m->m_pkthdr.leaf_rcvidx == (u_short)-1)) {
1702 leaf_ifp = NULL;
1703 } else {
1704 leaf_ifp = ifnet_byindexgen(m->m_pkthdr.leaf_rcvidx,
1705 m->m_pkthdr.leaf_rcvgen);
1706 if (__predict_false(leaf_ifp != NULL && (if_getflags(leaf_ifp) & IFF_DYING)))
1707 leaf_ifp = NULL;
1708 }
1709
1710 m->m_pkthdr.leaf_rcvif = leaf_ifp;
1711 m->m_pkthdr.rcvif = ifp;
1712
1713 return (ifp);
1714 }
1715
1716 /*
1717 * Allocate an mbuf with anonymous external pages.
1718 */
1719 struct mbuf *
mb_alloc_ext_plus_pages(int len,int how)1720 mb_alloc_ext_plus_pages(int len, int how)
1721 {
1722 struct mbuf *m;
1723 vm_page_t pg;
1724 int i, npgs;
1725
1726 m = mb_alloc_ext_pgs(how, mb_free_mext_pgs, 0);
1727 if (m == NULL)
1728 return (NULL);
1729 m->m_epg_flags |= EPG_FLAG_ANON;
1730 npgs = howmany(len, PAGE_SIZE);
1731 for (i = 0; i < npgs; i++) {
1732 do {
1733 pg = vm_page_alloc_noobj(VM_ALLOC_NODUMP |
1734 VM_ALLOC_WIRED);
1735 if (pg == NULL) {
1736 if (how == M_NOWAIT) {
1737 m->m_epg_npgs = i;
1738 m_free(m);
1739 return (NULL);
1740 }
1741 vm_wait(NULL);
1742 }
1743 } while (pg == NULL);
1744 m->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg);
1745 }
1746 m->m_epg_npgs = npgs;
1747 return (m);
1748 }
1749
1750 /*
1751 * Copy the data in the mbuf chain to a chain of mbufs with anonymous external
1752 * unmapped pages.
1753 * len is the length of data in the input mbuf chain.
1754 * mlen is the maximum number of bytes put into each ext_page mbuf.
1755 */
1756 struct mbuf *
mb_mapped_to_unmapped(struct mbuf * mp,int len,int mlen,int how,struct mbuf ** mlast)1757 mb_mapped_to_unmapped(struct mbuf *mp, int len, int mlen, int how,
1758 struct mbuf **mlast)
1759 {
1760 struct mbuf *m, *mout;
1761 char *pgpos, *mbpos;
1762 int i, mblen, mbufsiz, pglen, xfer;
1763
1764 if (len == 0)
1765 return (NULL);
1766 mbufsiz = min(mlen, len);
1767 m = mout = mb_alloc_ext_plus_pages(mbufsiz, how);
1768 if (m == NULL)
1769 return (m);
1770 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[0]);
1771 pglen = PAGE_SIZE;
1772 mblen = 0;
1773 i = 0;
1774 do {
1775 if (pglen == 0) {
1776 if (++i == m->m_epg_npgs) {
1777 m->m_epg_last_len = PAGE_SIZE;
1778 mbufsiz = min(mlen, len);
1779 m->m_next = mb_alloc_ext_plus_pages(mbufsiz,
1780 how);
1781 m = m->m_next;
1782 if (m == NULL) {
1783 m_freem(mout);
1784 return (m);
1785 }
1786 i = 0;
1787 }
1788 pgpos = (char *)(void *)PHYS_TO_DMAP(m->m_epg_pa[i]);
1789 pglen = PAGE_SIZE;
1790 }
1791 while (mblen == 0) {
1792 if (mp == NULL) {
1793 m_freem(mout);
1794 return (NULL);
1795 }
1796 KASSERT((mp->m_flags & M_EXTPG) == 0,
1797 ("mb_copym_ext_pgs: ext_pgs input mbuf"));
1798 mbpos = mtod(mp, char *);
1799 mblen = mp->m_len;
1800 mp = mp->m_next;
1801 }
1802 xfer = min(mblen, pglen);
1803 memcpy(pgpos, mbpos, xfer);
1804 pgpos += xfer;
1805 mbpos += xfer;
1806 pglen -= xfer;
1807 mblen -= xfer;
1808 len -= xfer;
1809 m->m_len += xfer;
1810 } while (len > 0);
1811 m->m_epg_last_len = PAGE_SIZE - pglen;
1812 if (mlast != NULL)
1813 *mlast = m;
1814 return (mout);
1815 }
1816