1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004, 2005, 5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_param.h" 34 35 #include <sys/param.h> 36 #include <sys/conf.h> 37 #include <sys/domainset.h> 38 #include <sys/malloc.h> 39 #include <sys/systm.h> 40 #include <sys/mbuf.h> 41 #include <sys/domain.h> 42 #include <sys/eventhandler.h> 43 #include <sys/kernel.h> 44 #include <sys/limits.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/protosw.h> 48 #include <sys/smp.h> 49 #include <sys/sysctl.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_extern.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_page.h> 55 #include <vm/vm_map.h> 56 #include <vm/uma.h> 57 #include <vm/uma_dbg.h> 58 59 /* 60 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 61 * Zones. 62 * 63 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 64 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 65 * administrator so desires. 66 * 67 * Mbufs are allocated from a UMA Master Zone called the Mbuf 68 * Zone. 69 * 70 * Additionally, FreeBSD provides a Packet Zone, which it 71 * configures as a Secondary Zone to the Mbuf Master Zone, 72 * thus sharing backend Slab kegs with the Mbuf Master Zone. 73 * 74 * Thus common-case allocations and locking are simplified: 75 * 76 * m_clget() m_getcl() 77 * | | 78 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 79 * | | [ Packet ] | 80 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 81 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 82 * | \________ | 83 * [ Cluster Keg ] \ / 84 * | [ Mbuf Keg ] 85 * [ Cluster Slabs ] | 86 * | [ Mbuf Slabs ] 87 * \____________(VM)_________________/ 88 * 89 * 90 * Whenever an object is allocated with uma_zalloc() out of 91 * one of the Zones its _ctor_ function is executed. The same 92 * for any deallocation through uma_zfree() the _dtor_ function 93 * is executed. 94 * 95 * Caches are per-CPU and are filled from the Master Zone. 96 * 97 * Whenever an object is allocated from the underlying global 98 * memory pool it gets pre-initialized with the _zinit_ functions. 99 * When the Keg's are overfull objects get decommissioned with 100 * _zfini_ functions and free'd back to the global memory pool. 101 * 102 */ 103 104 int nmbufs; /* limits number of mbufs */ 105 int nmbclusters; /* limits number of mbuf clusters */ 106 int nmbjumbop; /* limits number of page size jumbo clusters */ 107 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 108 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 109 110 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 111 112 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0, 113 "Maximum real memory allocatable to various mbuf types"); 114 115 /* 116 * tunable_mbinit() has to be run before any mbuf allocations are done. 117 */ 118 static void 119 tunable_mbinit(void *dummy) 120 { 121 quad_t realmem; 122 123 /* 124 * The default limit for all mbuf related memory is 1/2 of all 125 * available kernel memory (physical or kmem). 126 * At most it can be 3/4 of available kernel memory. 127 */ 128 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 129 maxmbufmem = realmem / 2; 130 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 131 if (maxmbufmem > realmem / 4 * 3) 132 maxmbufmem = realmem / 4 * 3; 133 134 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 135 if (nmbclusters == 0) 136 nmbclusters = maxmbufmem / MCLBYTES / 4; 137 138 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 139 if (nmbjumbop == 0) 140 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 141 142 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 143 if (nmbjumbo9 == 0) 144 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 145 146 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 147 if (nmbjumbo16 == 0) 148 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 149 150 /* 151 * We need at least as many mbufs as we have clusters of 152 * the various types added together. 153 */ 154 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 155 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 156 nmbufs = lmax(maxmbufmem / MSIZE / 5, 157 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 158 } 159 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 160 161 static int 162 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 163 { 164 int error, newnmbclusters; 165 166 newnmbclusters = nmbclusters; 167 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 168 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { 169 if (newnmbclusters > nmbclusters && 170 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 171 nmbclusters = newnmbclusters; 172 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 173 EVENTHANDLER_INVOKE(nmbclusters_change); 174 } else 175 error = EINVAL; 176 } 177 return (error); 178 } 179 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 180 &nmbclusters, 0, sysctl_nmbclusters, "IU", 181 "Maximum number of mbuf clusters allowed"); 182 183 static int 184 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 185 { 186 int error, newnmbjumbop; 187 188 newnmbjumbop = nmbjumbop; 189 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 190 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { 191 if (newnmbjumbop > nmbjumbop && 192 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 193 nmbjumbop = newnmbjumbop; 194 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 195 } else 196 error = EINVAL; 197 } 198 return (error); 199 } 200 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 201 &nmbjumbop, 0, sysctl_nmbjumbop, "IU", 202 "Maximum number of mbuf page size jumbo clusters allowed"); 203 204 static int 205 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 206 { 207 int error, newnmbjumbo9; 208 209 newnmbjumbo9 = nmbjumbo9; 210 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 211 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { 212 if (newnmbjumbo9 > nmbjumbo9 && 213 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 214 nmbjumbo9 = newnmbjumbo9; 215 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 216 } else 217 error = EINVAL; 218 } 219 return (error); 220 } 221 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 222 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 223 "Maximum number of mbuf 9k jumbo clusters allowed"); 224 225 static int 226 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 227 { 228 int error, newnmbjumbo16; 229 230 newnmbjumbo16 = nmbjumbo16; 231 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 232 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { 233 if (newnmbjumbo16 > nmbjumbo16 && 234 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 235 nmbjumbo16 = newnmbjumbo16; 236 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 237 } else 238 error = EINVAL; 239 } 240 return (error); 241 } 242 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 243 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 244 "Maximum number of mbuf 16k jumbo clusters allowed"); 245 246 static int 247 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 248 { 249 int error, newnmbufs; 250 251 newnmbufs = nmbufs; 252 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 253 if (error == 0 && req->newptr && newnmbufs != nmbufs) { 254 if (newnmbufs > nmbufs) { 255 nmbufs = newnmbufs; 256 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 257 EVENTHANDLER_INVOKE(nmbufs_change); 258 } else 259 error = EINVAL; 260 } 261 return (error); 262 } 263 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, 264 &nmbufs, 0, sysctl_nmbufs, "IU", 265 "Maximum number of mbufs allowed"); 266 267 /* 268 * Zones from which we allocate. 269 */ 270 uma_zone_t zone_mbuf; 271 uma_zone_t zone_clust; 272 uma_zone_t zone_pack; 273 uma_zone_t zone_jumbop; 274 uma_zone_t zone_jumbo9; 275 uma_zone_t zone_jumbo16; 276 277 /* 278 * Local prototypes. 279 */ 280 static int mb_ctor_mbuf(void *, int, void *, int); 281 static int mb_ctor_clust(void *, int, void *, int); 282 static int mb_ctor_pack(void *, int, void *, int); 283 static void mb_dtor_mbuf(void *, int, void *); 284 static void mb_dtor_pack(void *, int, void *); 285 static int mb_zinit_pack(void *, int, int); 286 static void mb_zfini_pack(void *, int); 287 static void mb_reclaim(uma_zone_t, int); 288 static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 289 290 /* Ensure that MSIZE is a power of 2. */ 291 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 292 293 /* 294 * Initialize FreeBSD Network buffer allocation. 295 */ 296 static void 297 mbuf_init(void *dummy) 298 { 299 300 /* 301 * Configure UMA zones for Mbufs, Clusters, and Packets. 302 */ 303 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 304 mb_ctor_mbuf, mb_dtor_mbuf, 305 #ifdef INVARIANTS 306 trash_init, trash_fini, 307 #else 308 NULL, NULL, 309 #endif 310 MSIZE - 1, UMA_ZONE_MAXBUCKET); 311 if (nmbufs > 0) 312 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 313 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 314 uma_zone_set_maxaction(zone_mbuf, mb_reclaim); 315 316 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 317 mb_ctor_clust, 318 #ifdef INVARIANTS 319 trash_dtor, trash_init, trash_fini, 320 #else 321 NULL, NULL, NULL, 322 #endif 323 UMA_ALIGN_PTR, 0); 324 if (nmbclusters > 0) 325 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 326 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 327 uma_zone_set_maxaction(zone_clust, mb_reclaim); 328 329 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 330 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 331 332 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 333 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 334 mb_ctor_clust, 335 #ifdef INVARIANTS 336 trash_dtor, trash_init, trash_fini, 337 #else 338 NULL, NULL, NULL, 339 #endif 340 UMA_ALIGN_PTR, 0); 341 if (nmbjumbop > 0) 342 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 343 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 344 uma_zone_set_maxaction(zone_jumbop, mb_reclaim); 345 346 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 347 mb_ctor_clust, 348 #ifdef INVARIANTS 349 trash_dtor, trash_init, trash_fini, 350 #else 351 NULL, NULL, NULL, 352 #endif 353 UMA_ALIGN_PTR, 0); 354 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 355 if (nmbjumbo9 > 0) 356 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 357 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 358 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim); 359 360 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 361 mb_ctor_clust, 362 #ifdef INVARIANTS 363 trash_dtor, trash_init, trash_fini, 364 #else 365 NULL, NULL, NULL, 366 #endif 367 UMA_ALIGN_PTR, 0); 368 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 369 if (nmbjumbo16 > 0) 370 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 371 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 372 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim); 373 374 /* 375 * Hook event handler for low-memory situation, used to 376 * drain protocols and push data back to the caches (UMA 377 * later pushes it back to VM). 378 */ 379 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 380 EVENTHANDLER_PRI_FIRST); 381 } 382 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 383 384 #ifdef NETDUMP 385 /* 386 * netdump makes use of a pre-allocated pool of mbufs and clusters. When 387 * netdump is configured, we initialize a set of UMA cache zones which return 388 * items from this pool. At panic-time, the regular UMA zone pointers are 389 * overwritten with those of the cache zones so that drivers may allocate and 390 * free mbufs and clusters without attempting to allocate physical memory. 391 * 392 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for 393 * the purpose of caching clusters, we treat them as mbufs. 394 */ 395 static struct mbufq nd_mbufq = 396 { STAILQ_HEAD_INITIALIZER(nd_mbufq.mq_head), 0, INT_MAX }; 397 static struct mbufq nd_clustq = 398 { STAILQ_HEAD_INITIALIZER(nd_clustq.mq_head), 0, INT_MAX }; 399 400 static int nd_clsize; 401 static uma_zone_t nd_zone_mbuf; 402 static uma_zone_t nd_zone_clust; 403 static uma_zone_t nd_zone_pack; 404 405 static int 406 nd_buf_import(void *arg, void **store, int count, int domain __unused, 407 int flags) 408 { 409 struct mbufq *q; 410 struct mbuf *m; 411 int i; 412 413 q = arg; 414 415 for (i = 0; i < count; i++) { 416 m = mbufq_dequeue(q); 417 if (m == NULL) 418 break; 419 trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags); 420 store[i] = m; 421 } 422 KASSERT((flags & M_WAITOK) == 0 || i == count, 423 ("%s: ran out of pre-allocated mbufs", __func__)); 424 return (i); 425 } 426 427 static void 428 nd_buf_release(void *arg, void **store, int count) 429 { 430 struct mbufq *q; 431 struct mbuf *m; 432 int i; 433 434 q = arg; 435 436 for (i = 0; i < count; i++) { 437 m = store[i]; 438 (void)mbufq_enqueue(q, m); 439 } 440 } 441 442 static int 443 nd_pack_import(void *arg __unused, void **store, int count, int domain __unused, 444 int flags __unused) 445 { 446 struct mbuf *m; 447 void *clust; 448 int i; 449 450 for (i = 0; i < count; i++) { 451 m = m_get(MT_DATA, M_NOWAIT); 452 if (m == NULL) 453 break; 454 clust = uma_zalloc(nd_zone_clust, M_NOWAIT); 455 if (clust == NULL) { 456 m_free(m); 457 break; 458 } 459 mb_ctor_clust(clust, nd_clsize, m, 0); 460 store[i] = m; 461 } 462 KASSERT((flags & M_WAITOK) == 0 || i == count, 463 ("%s: ran out of pre-allocated mbufs", __func__)); 464 return (i); 465 } 466 467 static void 468 nd_pack_release(void *arg __unused, void **store, int count) 469 { 470 struct mbuf *m; 471 void *clust; 472 int i; 473 474 for (i = 0; i < count; i++) { 475 m = store[i]; 476 clust = m->m_ext.ext_buf; 477 uma_zfree(nd_zone_clust, clust); 478 uma_zfree(nd_zone_mbuf, m); 479 } 480 } 481 482 /* 483 * Free the pre-allocated mbufs and clusters reserved for netdump, and destroy 484 * the corresponding UMA cache zones. 485 */ 486 void 487 netdump_mbuf_drain(void) 488 { 489 struct mbuf *m; 490 void *item; 491 492 if (nd_zone_mbuf != NULL) { 493 uma_zdestroy(nd_zone_mbuf); 494 nd_zone_mbuf = NULL; 495 } 496 if (nd_zone_clust != NULL) { 497 uma_zdestroy(nd_zone_clust); 498 nd_zone_clust = NULL; 499 } 500 if (nd_zone_pack != NULL) { 501 uma_zdestroy(nd_zone_pack); 502 nd_zone_pack = NULL; 503 } 504 505 while ((m = mbufq_dequeue(&nd_mbufq)) != NULL) 506 m_free(m); 507 while ((item = mbufq_dequeue(&nd_clustq)) != NULL) 508 uma_zfree(m_getzone(nd_clsize), item); 509 } 510 511 /* 512 * Callback invoked immediately prior to starting a netdump. 513 */ 514 void 515 netdump_mbuf_dump(void) 516 { 517 518 /* 519 * All cluster zones return buffers of the size requested by the 520 * drivers. It's up to the driver to reinitialize the zones if the 521 * MTU of a netdump-enabled interface changes. 522 */ 523 printf("netdump: overwriting mbuf zone pointers\n"); 524 zone_mbuf = nd_zone_mbuf; 525 zone_clust = nd_zone_clust; 526 zone_pack = nd_zone_pack; 527 zone_jumbop = nd_zone_clust; 528 zone_jumbo9 = nd_zone_clust; 529 zone_jumbo16 = nd_zone_clust; 530 } 531 532 /* 533 * Reinitialize the netdump mbuf+cluster pool and cache zones. 534 */ 535 void 536 netdump_mbuf_reinit(int nmbuf, int nclust, int clsize) 537 { 538 struct mbuf *m; 539 void *item; 540 541 netdump_mbuf_drain(); 542 543 nd_clsize = clsize; 544 545 nd_zone_mbuf = uma_zcache_create("netdump_" MBUF_MEM_NAME, 546 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, 547 #ifdef INVARIANTS 548 trash_init, trash_fini, 549 #else 550 NULL, NULL, 551 #endif 552 nd_buf_import, nd_buf_release, 553 &nd_mbufq, UMA_ZONE_NOBUCKET); 554 555 nd_zone_clust = uma_zcache_create("netdump_" MBUF_CLUSTER_MEM_NAME, 556 clsize, mb_ctor_clust, 557 #ifdef INVARIANTS 558 trash_dtor, trash_init, trash_fini, 559 #else 560 NULL, NULL, NULL, 561 #endif 562 nd_buf_import, nd_buf_release, 563 &nd_clustq, UMA_ZONE_NOBUCKET); 564 565 nd_zone_pack = uma_zcache_create("netdump_" MBUF_PACKET_MEM_NAME, 566 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, 567 nd_pack_import, nd_pack_release, 568 NULL, UMA_ZONE_NOBUCKET); 569 570 while (nmbuf-- > 0) { 571 m = m_get(MT_DATA, M_WAITOK); 572 uma_zfree(nd_zone_mbuf, m); 573 } 574 while (nclust-- > 0) { 575 item = uma_zalloc(m_getzone(nd_clsize), M_WAITOK); 576 uma_zfree(nd_zone_clust, item); 577 } 578 } 579 #endif /* NETDUMP */ 580 581 /* 582 * UMA backend page allocator for the jumbo frame zones. 583 * 584 * Allocates kernel virtual memory that is backed by contiguous physical 585 * pages. 586 */ 587 static void * 588 mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 589 int wait) 590 { 591 592 /* Inform UMA that this allocator uses kernel_map/object. */ 593 *flags = UMA_SLAB_KERNEL; 594 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 595 bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, 596 VM_MEMATTR_DEFAULT)); 597 } 598 599 /* 600 * Constructor for Mbuf master zone. 601 * 602 * The 'arg' pointer points to a mb_args structure which 603 * contains call-specific information required to support the 604 * mbuf allocation API. See mbuf.h. 605 */ 606 static int 607 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 608 { 609 struct mbuf *m; 610 struct mb_args *args; 611 int error; 612 int flags; 613 short type; 614 615 #ifdef INVARIANTS 616 trash_ctor(mem, size, arg, how); 617 #endif 618 args = (struct mb_args *)arg; 619 type = args->type; 620 621 /* 622 * The mbuf is initialized later. The caller has the 623 * responsibility to set up any MAC labels too. 624 */ 625 if (type == MT_NOINIT) 626 return (0); 627 628 m = (struct mbuf *)mem; 629 flags = args->flags; 630 MPASS((flags & M_NOFREE) == 0); 631 632 error = m_init(m, how, type, flags); 633 634 return (error); 635 } 636 637 /* 638 * The Mbuf master zone destructor. 639 */ 640 static void 641 mb_dtor_mbuf(void *mem, int size, void *arg) 642 { 643 struct mbuf *m; 644 unsigned long flags; 645 646 m = (struct mbuf *)mem; 647 flags = (unsigned long)arg; 648 649 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 650 if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) 651 m_tag_delete_chain(m, NULL); 652 #ifdef INVARIANTS 653 trash_dtor(mem, size, arg); 654 #endif 655 } 656 657 /* 658 * The Mbuf Packet zone destructor. 659 */ 660 static void 661 mb_dtor_pack(void *mem, int size, void *arg) 662 { 663 struct mbuf *m; 664 665 m = (struct mbuf *)mem; 666 if ((m->m_flags & M_PKTHDR) != 0) 667 m_tag_delete_chain(m, NULL); 668 669 /* Make sure we've got a clean cluster back. */ 670 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 671 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 672 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 673 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 674 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 675 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 676 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 677 #ifdef INVARIANTS 678 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 679 #endif 680 /* 681 * If there are processes blocked on zone_clust, waiting for pages 682 * to be freed up, * cause them to be woken up by draining the 683 * packet zone. We are exposed to a race here * (in the check for 684 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 685 * is deliberate. We don't want to acquire the zone lock for every 686 * mbuf free. 687 */ 688 if (uma_zone_exhausted_nolock(zone_clust)) 689 zone_drain(zone_pack); 690 } 691 692 /* 693 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 694 * 695 * Here the 'arg' pointer points to the Mbuf which we 696 * are configuring cluster storage for. If 'arg' is 697 * empty we allocate just the cluster without setting 698 * the mbuf to it. See mbuf.h. 699 */ 700 static int 701 mb_ctor_clust(void *mem, int size, void *arg, int how) 702 { 703 struct mbuf *m; 704 705 #ifdef INVARIANTS 706 trash_ctor(mem, size, arg, how); 707 #endif 708 m = (struct mbuf *)arg; 709 if (m != NULL) { 710 m->m_ext.ext_buf = (char *)mem; 711 m->m_data = m->m_ext.ext_buf; 712 m->m_flags |= M_EXT; 713 m->m_ext.ext_free = NULL; 714 m->m_ext.ext_arg1 = NULL; 715 m->m_ext.ext_arg2 = NULL; 716 m->m_ext.ext_size = size; 717 m->m_ext.ext_type = m_gettype(size); 718 m->m_ext.ext_flags = EXT_FLAG_EMBREF; 719 m->m_ext.ext_count = 1; 720 } 721 722 return (0); 723 } 724 725 /* 726 * The Packet secondary zone's init routine, executed on the 727 * object's transition from mbuf keg slab to zone cache. 728 */ 729 static int 730 mb_zinit_pack(void *mem, int size, int how) 731 { 732 struct mbuf *m; 733 734 m = (struct mbuf *)mem; /* m is virgin. */ 735 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 736 m->m_ext.ext_buf == NULL) 737 return (ENOMEM); 738 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 739 #ifdef INVARIANTS 740 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 741 #endif 742 return (0); 743 } 744 745 /* 746 * The Packet secondary zone's fini routine, executed on the 747 * object's transition from zone cache to keg slab. 748 */ 749 static void 750 mb_zfini_pack(void *mem, int size) 751 { 752 struct mbuf *m; 753 754 m = (struct mbuf *)mem; 755 #ifdef INVARIANTS 756 trash_fini(m->m_ext.ext_buf, MCLBYTES); 757 #endif 758 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 759 #ifdef INVARIANTS 760 trash_dtor(mem, size, NULL); 761 #endif 762 } 763 764 /* 765 * The "packet" keg constructor. 766 */ 767 static int 768 mb_ctor_pack(void *mem, int size, void *arg, int how) 769 { 770 struct mbuf *m; 771 struct mb_args *args; 772 int error, flags; 773 short type; 774 775 m = (struct mbuf *)mem; 776 args = (struct mb_args *)arg; 777 flags = args->flags; 778 type = args->type; 779 MPASS((flags & M_NOFREE) == 0); 780 781 #ifdef INVARIANTS 782 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 783 #endif 784 785 error = m_init(m, how, type, flags); 786 787 /* m_ext is already initialized. */ 788 m->m_data = m->m_ext.ext_buf; 789 m->m_flags = (flags | M_EXT); 790 791 return (error); 792 } 793 794 /* 795 * This is the protocol drain routine. Called by UMA whenever any of the 796 * mbuf zones is closed to its limit. 797 * 798 * No locks should be held when this is called. The drain routines have to 799 * presently acquire some locks which raises the possibility of lock order 800 * reversal. 801 */ 802 static void 803 mb_reclaim(uma_zone_t zone __unused, int pending __unused) 804 { 805 struct domain *dp; 806 struct protosw *pr; 807 808 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__); 809 810 for (dp = domains; dp != NULL; dp = dp->dom_next) 811 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 812 if (pr->pr_drain != NULL) 813 (*pr->pr_drain)(); 814 } 815 816 /* 817 * Clean up after mbufs with M_EXT storage attached to them if the 818 * reference count hits 1. 819 */ 820 void 821 mb_free_ext(struct mbuf *m) 822 { 823 volatile u_int *refcnt; 824 struct mbuf *mref; 825 int freembuf; 826 827 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 828 829 /* See if this is the mbuf that holds the embedded refcount. */ 830 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 831 refcnt = &m->m_ext.ext_count; 832 mref = m; 833 } else { 834 KASSERT(m->m_ext.ext_cnt != NULL, 835 ("%s: no refcounting pointer on %p", __func__, m)); 836 refcnt = m->m_ext.ext_cnt; 837 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 838 } 839 840 /* 841 * Check if the header is embedded in the cluster. It is 842 * important that we can't touch any of the mbuf fields 843 * after we have freed the external storage, since mbuf 844 * could have been embedded in it. For now, the mbufs 845 * embedded into the cluster are always of type EXT_EXTREF, 846 * and for this type we won't free the mref. 847 */ 848 if (m->m_flags & M_NOFREE) { 849 freembuf = 0; 850 KASSERT(m->m_ext.ext_type == EXT_EXTREF || 851 m->m_ext.ext_type == EXT_RXRING, 852 ("%s: no-free mbuf %p has wrong type", __func__, m)); 853 } else 854 freembuf = 1; 855 856 /* Free attached storage if this mbuf is the only reference to it. */ 857 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { 858 switch (m->m_ext.ext_type) { 859 case EXT_PACKET: 860 /* The packet zone is special. */ 861 if (*refcnt == 0) 862 *refcnt = 1; 863 uma_zfree(zone_pack, mref); 864 break; 865 case EXT_CLUSTER: 866 uma_zfree(zone_clust, m->m_ext.ext_buf); 867 uma_zfree(zone_mbuf, mref); 868 break; 869 case EXT_JUMBOP: 870 uma_zfree(zone_jumbop, m->m_ext.ext_buf); 871 uma_zfree(zone_mbuf, mref); 872 break; 873 case EXT_JUMBO9: 874 uma_zfree(zone_jumbo9, m->m_ext.ext_buf); 875 uma_zfree(zone_mbuf, mref); 876 break; 877 case EXT_JUMBO16: 878 uma_zfree(zone_jumbo16, m->m_ext.ext_buf); 879 uma_zfree(zone_mbuf, mref); 880 break; 881 case EXT_SFBUF: 882 case EXT_NET_DRV: 883 case EXT_MOD_TYPE: 884 case EXT_DISPOSABLE: 885 KASSERT(mref->m_ext.ext_free != NULL, 886 ("%s: ext_free not set", __func__)); 887 mref->m_ext.ext_free(mref); 888 uma_zfree(zone_mbuf, mref); 889 break; 890 case EXT_EXTREF: 891 KASSERT(m->m_ext.ext_free != NULL, 892 ("%s: ext_free not set", __func__)); 893 m->m_ext.ext_free(m); 894 break; 895 case EXT_RXRING: 896 KASSERT(m->m_ext.ext_free == NULL, 897 ("%s: ext_free is set", __func__)); 898 break; 899 default: 900 KASSERT(m->m_ext.ext_type == 0, 901 ("%s: unknown ext_type", __func__)); 902 } 903 } 904 905 if (freembuf && m != mref) 906 uma_zfree(zone_mbuf, m); 907 } 908 909 /* 910 * Official mbuf(9) allocation KPI for stack and drivers: 911 * 912 * m_get() - a single mbuf without any attachments, sys/mbuf.h. 913 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h. 914 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h. 915 * m_clget() - attach cluster to already allocated mbuf. 916 * m_cljget() - attach jumbo cluster to already allocated mbuf. 917 * m_get2() - allocate minimum mbuf that would fit size argument. 918 * m_getm2() - allocate a chain of mbufs/clusters. 919 * m_extadd() - attach external cluster to mbuf. 920 * 921 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h. 922 * m_freem() - free chain of mbufs. 923 */ 924 925 int 926 m_clget(struct mbuf *m, int how) 927 { 928 929 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 930 __func__, m)); 931 m->m_ext.ext_buf = (char *)NULL; 932 uma_zalloc_arg(zone_clust, m, how); 933 /* 934 * On a cluster allocation failure, drain the packet zone and retry, 935 * we might be able to loosen a few clusters up on the drain. 936 */ 937 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { 938 zone_drain(zone_pack); 939 uma_zalloc_arg(zone_clust, m, how); 940 } 941 MBUF_PROBE2(m__clget, m, how); 942 return (m->m_flags & M_EXT); 943 } 944 945 /* 946 * m_cljget() is different from m_clget() as it can allocate clusters without 947 * attaching them to an mbuf. In that case the return value is the pointer 948 * to the cluster of the requested size. If an mbuf was specified, it gets 949 * the cluster attached to it and the return value can be safely ignored. 950 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 951 */ 952 void * 953 m_cljget(struct mbuf *m, int how, int size) 954 { 955 uma_zone_t zone; 956 void *retval; 957 958 if (m != NULL) { 959 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 960 __func__, m)); 961 m->m_ext.ext_buf = NULL; 962 } 963 964 zone = m_getzone(size); 965 retval = uma_zalloc_arg(zone, m, how); 966 967 MBUF_PROBE4(m__cljget, m, how, size, retval); 968 969 return (retval); 970 } 971 972 /* 973 * m_get2() allocates minimum mbuf that would fit "size" argument. 974 */ 975 struct mbuf * 976 m_get2(int size, int how, short type, int flags) 977 { 978 struct mb_args args; 979 struct mbuf *m, *n; 980 981 args.flags = flags; 982 args.type = type; 983 984 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) 985 return (uma_zalloc_arg(zone_mbuf, &args, how)); 986 if (size <= MCLBYTES) 987 return (uma_zalloc_arg(zone_pack, &args, how)); 988 989 if (size > MJUMPAGESIZE) 990 return (NULL); 991 992 m = uma_zalloc_arg(zone_mbuf, &args, how); 993 if (m == NULL) 994 return (NULL); 995 996 n = uma_zalloc_arg(zone_jumbop, m, how); 997 if (n == NULL) { 998 uma_zfree(zone_mbuf, m); 999 return (NULL); 1000 } 1001 1002 return (m); 1003 } 1004 1005 /* 1006 * m_getjcl() returns an mbuf with a cluster of the specified size attached. 1007 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 1008 */ 1009 struct mbuf * 1010 m_getjcl(int how, short type, int flags, int size) 1011 { 1012 struct mb_args args; 1013 struct mbuf *m, *n; 1014 uma_zone_t zone; 1015 1016 if (size == MCLBYTES) 1017 return m_getcl(how, type, flags); 1018 1019 args.flags = flags; 1020 args.type = type; 1021 1022 m = uma_zalloc_arg(zone_mbuf, &args, how); 1023 if (m == NULL) 1024 return (NULL); 1025 1026 zone = m_getzone(size); 1027 n = uma_zalloc_arg(zone, m, how); 1028 if (n == NULL) { 1029 uma_zfree(zone_mbuf, m); 1030 return (NULL); 1031 } 1032 return (m); 1033 } 1034 1035 /* 1036 * Allocate a given length worth of mbufs and/or clusters (whatever fits 1037 * best) and return a pointer to the top of the allocated chain. If an 1038 * existing mbuf chain is provided, then we will append the new chain 1039 * to the existing one and return a pointer to the provided mbuf. 1040 */ 1041 struct mbuf * 1042 m_getm2(struct mbuf *m, int len, int how, short type, int flags) 1043 { 1044 struct mbuf *mb, *nm = NULL, *mtail = NULL; 1045 1046 KASSERT(len >= 0, ("%s: len is < 0", __func__)); 1047 1048 /* Validate flags. */ 1049 flags &= (M_PKTHDR | M_EOR); 1050 1051 /* Packet header mbuf must be first in chain. */ 1052 if ((flags & M_PKTHDR) && m != NULL) 1053 flags &= ~M_PKTHDR; 1054 1055 /* Loop and append maximum sized mbufs to the chain tail. */ 1056 while (len > 0) { 1057 if (len > MCLBYTES) 1058 mb = m_getjcl(how, type, (flags & M_PKTHDR), 1059 MJUMPAGESIZE); 1060 else if (len >= MINCLSIZE) 1061 mb = m_getcl(how, type, (flags & M_PKTHDR)); 1062 else if (flags & M_PKTHDR) 1063 mb = m_gethdr(how, type); 1064 else 1065 mb = m_get(how, type); 1066 1067 /* Fail the whole operation if one mbuf can't be allocated. */ 1068 if (mb == NULL) { 1069 if (nm != NULL) 1070 m_freem(nm); 1071 return (NULL); 1072 } 1073 1074 /* Book keeping. */ 1075 len -= M_SIZE(mb); 1076 if (mtail != NULL) 1077 mtail->m_next = mb; 1078 else 1079 nm = mb; 1080 mtail = mb; 1081 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ 1082 } 1083 if (flags & M_EOR) 1084 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ 1085 1086 /* If mbuf was supplied, append new chain to the end of it. */ 1087 if (m != NULL) { 1088 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) 1089 ; 1090 mtail->m_next = nm; 1091 mtail->m_flags &= ~M_EOR; 1092 } else 1093 m = nm; 1094 1095 return (m); 1096 } 1097 1098 /*- 1099 * Configure a provided mbuf to refer to the provided external storage 1100 * buffer and setup a reference count for said buffer. 1101 * 1102 * Arguments: 1103 * mb The existing mbuf to which to attach the provided buffer. 1104 * buf The address of the provided external storage buffer. 1105 * size The size of the provided buffer. 1106 * freef A pointer to a routine that is responsible for freeing the 1107 * provided external storage buffer. 1108 * args A pointer to an argument structure (of any type) to be passed 1109 * to the provided freef routine (may be NULL). 1110 * flags Any other flags to be passed to the provided mbuf. 1111 * type The type that the external storage buffer should be 1112 * labeled with. 1113 * 1114 * Returns: 1115 * Nothing. 1116 */ 1117 void 1118 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef, 1119 void *arg1, void *arg2, int flags, int type) 1120 { 1121 1122 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); 1123 1124 mb->m_flags |= (M_EXT | flags); 1125 mb->m_ext.ext_buf = buf; 1126 mb->m_data = mb->m_ext.ext_buf; 1127 mb->m_ext.ext_size = size; 1128 mb->m_ext.ext_free = freef; 1129 mb->m_ext.ext_arg1 = arg1; 1130 mb->m_ext.ext_arg2 = arg2; 1131 mb->m_ext.ext_type = type; 1132 1133 if (type != EXT_EXTREF) { 1134 mb->m_ext.ext_count = 1; 1135 mb->m_ext.ext_flags = EXT_FLAG_EMBREF; 1136 } else 1137 mb->m_ext.ext_flags = 0; 1138 } 1139 1140 /* 1141 * Free an entire chain of mbufs and associated external buffers, if 1142 * applicable. 1143 */ 1144 void 1145 m_freem(struct mbuf *mb) 1146 { 1147 1148 MBUF_PROBE1(m__freem, mb); 1149 while (mb != NULL) 1150 mb = m_free(mb); 1151 } 1152