1 /*- 2 * Copyright (c) 2004, 2005, 3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_param.h" 32 33 #include <sys/param.h> 34 #include <sys/malloc.h> 35 #include <sys/types.h> 36 #include <sys/systm.h> 37 #include <sys/mbuf.h> 38 #include <sys/domain.h> 39 #include <sys/eventhandler.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/mutex.h> 43 #include <sys/protosw.h> 44 #include <sys/smp.h> 45 #include <sys/sysctl.h> 46 47 #include <security/mac/mac_framework.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_kern.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_map.h> 54 #include <vm/uma.h> 55 #include <vm/uma_int.h> 56 #include <vm/uma_dbg.h> 57 58 /* 59 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 60 * Zones. 61 * 62 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 63 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 64 * administrator so desires. 65 * 66 * Mbufs are allocated from a UMA Master Zone called the Mbuf 67 * Zone. 68 * 69 * Additionally, FreeBSD provides a Packet Zone, which it 70 * configures as a Secondary Zone to the Mbuf Master Zone, 71 * thus sharing backend Slab kegs with the Mbuf Master Zone. 72 * 73 * Thus common-case allocations and locking are simplified: 74 * 75 * m_clget() m_getcl() 76 * | | 77 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 78 * | | [ Packet ] | 79 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 80 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 81 * | \________ | 82 * [ Cluster Keg ] \ / 83 * | [ Mbuf Keg ] 84 * [ Cluster Slabs ] | 85 * | [ Mbuf Slabs ] 86 * \____________(VM)_________________/ 87 * 88 * 89 * Whenever an object is allocated with uma_zalloc() out of 90 * one of the Zones its _ctor_ function is executed. The same 91 * for any deallocation through uma_zfree() the _dtor_ function 92 * is executed. 93 * 94 * Caches are per-CPU and are filled from the Master Zone. 95 * 96 * Whenever an object is allocated from the underlying global 97 * memory pool it gets pre-initialized with the _zinit_ functions. 98 * When the Keg's are overfull objects get decomissioned with 99 * _zfini_ functions and free'd back to the global memory pool. 100 * 101 */ 102 103 int nmbufs; /* limits number of mbufs */ 104 int nmbclusters; /* limits number of mbuf clusters */ 105 int nmbjumbop; /* limits number of page size jumbo clusters */ 106 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 107 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 108 109 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 110 111 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0, 112 "Maximum real memory allocatable to various mbuf types"); 113 114 /* 115 * tunable_mbinit() has to be run before any mbuf allocations are done. 116 */ 117 static void 118 tunable_mbinit(void *dummy) 119 { 120 quad_t realmem; 121 122 /* 123 * The default limit for all mbuf related memory is 1/2 of all 124 * available kernel memory (physical or kmem). 125 * At most it can be 3/4 of available kernel memory. 126 */ 127 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 128 maxmbufmem = realmem / 2; 129 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 130 if (maxmbufmem > realmem / 4 * 3) 131 maxmbufmem = realmem / 4 * 3; 132 133 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 134 if (nmbclusters == 0) 135 nmbclusters = maxmbufmem / MCLBYTES / 4; 136 137 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 138 if (nmbjumbop == 0) 139 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 140 141 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 142 if (nmbjumbo9 == 0) 143 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 144 145 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 146 if (nmbjumbo16 == 0) 147 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 148 149 /* 150 * We need at least as many mbufs as we have clusters of 151 * the various types added together. 152 */ 153 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 154 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 155 nmbufs = lmax(maxmbufmem / MSIZE / 5, 156 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 157 } 158 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 159 160 static int 161 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 162 { 163 int error, newnmbclusters; 164 165 newnmbclusters = nmbclusters; 166 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 167 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { 168 if (newnmbclusters > nmbclusters && 169 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 170 nmbclusters = newnmbclusters; 171 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 172 EVENTHANDLER_INVOKE(nmbclusters_change); 173 } else 174 error = EINVAL; 175 } 176 return (error); 177 } 178 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 179 &nmbclusters, 0, sysctl_nmbclusters, "IU", 180 "Maximum number of mbuf clusters allowed"); 181 182 static int 183 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 184 { 185 int error, newnmbjumbop; 186 187 newnmbjumbop = nmbjumbop; 188 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 189 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { 190 if (newnmbjumbop > nmbjumbop && 191 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 192 nmbjumbop = newnmbjumbop; 193 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 194 } else 195 error = EINVAL; 196 } 197 return (error); 198 } 199 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 200 &nmbjumbop, 0, sysctl_nmbjumbop, "IU", 201 "Maximum number of mbuf page size jumbo clusters allowed"); 202 203 static int 204 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 205 { 206 int error, newnmbjumbo9; 207 208 newnmbjumbo9 = nmbjumbo9; 209 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 210 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { 211 if (newnmbjumbo9 > nmbjumbo9 && 212 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 213 nmbjumbo9 = newnmbjumbo9; 214 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 215 } else 216 error = EINVAL; 217 } 218 return (error); 219 } 220 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 221 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 222 "Maximum number of mbuf 9k jumbo clusters allowed"); 223 224 static int 225 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 226 { 227 int error, newnmbjumbo16; 228 229 newnmbjumbo16 = nmbjumbo16; 230 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 231 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { 232 if (newnmbjumbo16 > nmbjumbo16 && 233 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 234 nmbjumbo16 = newnmbjumbo16; 235 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 236 } else 237 error = EINVAL; 238 } 239 return (error); 240 } 241 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 242 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 243 "Maximum number of mbuf 16k jumbo clusters allowed"); 244 245 static int 246 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 247 { 248 int error, newnmbufs; 249 250 newnmbufs = nmbufs; 251 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 252 if (error == 0 && req->newptr && newnmbufs != nmbufs) { 253 if (newnmbufs > nmbufs) { 254 nmbufs = newnmbufs; 255 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 256 EVENTHANDLER_INVOKE(nmbufs_change); 257 } else 258 error = EINVAL; 259 } 260 return (error); 261 } 262 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, 263 &nmbufs, 0, sysctl_nmbufs, "IU", 264 "Maximum number of mbufs allowed"); 265 266 /* 267 * Zones from which we allocate. 268 */ 269 uma_zone_t zone_mbuf; 270 uma_zone_t zone_clust; 271 uma_zone_t zone_pack; 272 uma_zone_t zone_jumbop; 273 uma_zone_t zone_jumbo9; 274 uma_zone_t zone_jumbo16; 275 uma_zone_t zone_ext_refcnt; 276 277 /* 278 * Callout to assist us in freeing mbufs. 279 */ 280 static struct callout mb_reclaim_callout; 281 static struct mtx mb_reclaim_callout_mtx; 282 283 /* 284 * Local prototypes. 285 */ 286 static int mb_ctor_mbuf(void *, int, void *, int); 287 static int mb_ctor_clust(void *, int, void *, int); 288 static int mb_ctor_pack(void *, int, void *, int); 289 static void mb_dtor_mbuf(void *, int, void *); 290 static void mb_dtor_clust(void *, int, void *); 291 static void mb_dtor_pack(void *, int, void *); 292 static int mb_zinit_pack(void *, int, int); 293 static void mb_zfini_pack(void *, int); 294 295 static void mb_reclaim(void *); 296 static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 297 static void mb_maxaction(uma_zone_t); 298 299 /* Ensure that MSIZE is a power of 2. */ 300 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 301 302 /* 303 * Initialize FreeBSD Network buffer allocation. 304 */ 305 static void 306 mbuf_init(void *dummy) 307 { 308 309 /* 310 * Configure UMA zones for Mbufs, Clusters, and Packets. 311 */ 312 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 313 mb_ctor_mbuf, mb_dtor_mbuf, 314 #ifdef INVARIANTS 315 trash_init, trash_fini, 316 #else 317 NULL, NULL, 318 #endif 319 MSIZE - 1, UMA_ZONE_MAXBUCKET); 320 if (nmbufs > 0) 321 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 322 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 323 uma_zone_set_maxaction(zone_mbuf, mb_maxaction); 324 325 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 326 mb_ctor_clust, mb_dtor_clust, 327 #ifdef INVARIANTS 328 trash_init, trash_fini, 329 #else 330 NULL, NULL, 331 #endif 332 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 333 if (nmbclusters > 0) 334 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 335 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 336 uma_zone_set_maxaction(zone_clust, mb_maxaction); 337 338 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 339 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 340 341 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 342 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 343 mb_ctor_clust, mb_dtor_clust, 344 #ifdef INVARIANTS 345 trash_init, trash_fini, 346 #else 347 NULL, NULL, 348 #endif 349 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 350 if (nmbjumbop > 0) 351 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 352 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 353 uma_zone_set_maxaction(zone_jumbop, mb_maxaction); 354 355 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 356 mb_ctor_clust, mb_dtor_clust, 357 #ifdef INVARIANTS 358 trash_init, trash_fini, 359 #else 360 NULL, NULL, 361 #endif 362 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 363 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 364 if (nmbjumbo9 > 0) 365 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 366 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 367 uma_zone_set_maxaction(zone_jumbo9, mb_maxaction); 368 369 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 370 mb_ctor_clust, mb_dtor_clust, 371 #ifdef INVARIANTS 372 trash_init, trash_fini, 373 #else 374 NULL, NULL, 375 #endif 376 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 377 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 378 if (nmbjumbo16 > 0) 379 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 380 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 381 uma_zone_set_maxaction(zone_jumbo16, mb_maxaction); 382 383 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 384 NULL, NULL, 385 NULL, NULL, 386 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 387 388 /* uma_prealloc() goes here... */ 389 390 /* Initialize the mb_reclaim() callout. */ 391 mtx_init(&mb_reclaim_callout_mtx, "mb_reclaim_callout_mtx", NULL, 392 MTX_DEF); 393 callout_init(&mb_reclaim_callout, 1); 394 395 /* 396 * Hook event handler for low-memory situation, used to 397 * drain protocols and push data back to the caches (UMA 398 * later pushes it back to VM). 399 */ 400 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 401 EVENTHANDLER_PRI_FIRST); 402 } 403 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 404 405 /* 406 * UMA backend page allocator for the jumbo frame zones. 407 * 408 * Allocates kernel virtual memory that is backed by contiguous physical 409 * pages. 410 */ 411 static void * 412 mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) 413 { 414 415 /* Inform UMA that this allocator uses kernel_map/object. */ 416 *flags = UMA_SLAB_KERNEL; 417 return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 418 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 419 } 420 421 /* 422 * Constructor for Mbuf master zone. 423 * 424 * The 'arg' pointer points to a mb_args structure which 425 * contains call-specific information required to support the 426 * mbuf allocation API. See mbuf.h. 427 */ 428 static int 429 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 430 { 431 struct mbuf *m; 432 struct mb_args *args; 433 int error; 434 int flags; 435 short type; 436 437 #ifdef INVARIANTS 438 trash_ctor(mem, size, arg, how); 439 #endif 440 args = (struct mb_args *)arg; 441 type = args->type; 442 443 /* 444 * The mbuf is initialized later. The caller has the 445 * responsibility to set up any MAC labels too. 446 */ 447 if (type == MT_NOINIT) 448 return (0); 449 450 m = (struct mbuf *)mem; 451 flags = args->flags; 452 453 error = m_init(m, NULL, size, how, type, flags); 454 455 return (error); 456 } 457 458 /* 459 * The Mbuf master zone destructor. 460 */ 461 static void 462 mb_dtor_mbuf(void *mem, int size, void *arg) 463 { 464 struct mbuf *m; 465 unsigned long flags; 466 467 m = (struct mbuf *)mem; 468 flags = (unsigned long)arg; 469 470 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 471 if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) 472 m_tag_delete_chain(m, NULL); 473 #ifdef INVARIANTS 474 trash_dtor(mem, size, arg); 475 #endif 476 } 477 478 /* 479 * The Mbuf Packet zone destructor. 480 */ 481 static void 482 mb_dtor_pack(void *mem, int size, void *arg) 483 { 484 struct mbuf *m; 485 486 m = (struct mbuf *)mem; 487 if ((m->m_flags & M_PKTHDR) != 0) 488 m_tag_delete_chain(m, NULL); 489 490 /* Make sure we've got a clean cluster back. */ 491 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 492 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 493 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 494 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 495 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 496 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 497 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 498 KASSERT(*m->m_ext.ext_cnt == 1, ("%s: ext_cnt != 1", __func__)); 499 #ifdef INVARIANTS 500 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 501 #endif 502 /* 503 * If there are processes blocked on zone_clust, waiting for pages 504 * to be freed up, * cause them to be woken up by draining the 505 * packet zone. We are exposed to a race here * (in the check for 506 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 507 * is deliberate. We don't want to acquire the zone lock for every 508 * mbuf free. 509 */ 510 if (uma_zone_exhausted_nolock(zone_clust)) 511 zone_drain(zone_pack); 512 } 513 514 /* 515 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 516 * 517 * Here the 'arg' pointer points to the Mbuf which we 518 * are configuring cluster storage for. If 'arg' is 519 * empty we allocate just the cluster without setting 520 * the mbuf to it. See mbuf.h. 521 */ 522 static int 523 mb_ctor_clust(void *mem, int size, void *arg, int how) 524 { 525 struct mbuf *m; 526 u_int *refcnt; 527 int type; 528 uma_zone_t zone; 529 530 #ifdef INVARIANTS 531 trash_ctor(mem, size, arg, how); 532 #endif 533 switch (size) { 534 case MCLBYTES: 535 type = EXT_CLUSTER; 536 zone = zone_clust; 537 break; 538 #if MJUMPAGESIZE != MCLBYTES 539 case MJUMPAGESIZE: 540 type = EXT_JUMBOP; 541 zone = zone_jumbop; 542 break; 543 #endif 544 case MJUM9BYTES: 545 type = EXT_JUMBO9; 546 zone = zone_jumbo9; 547 break; 548 case MJUM16BYTES: 549 type = EXT_JUMBO16; 550 zone = zone_jumbo16; 551 break; 552 default: 553 panic("unknown cluster size"); 554 break; 555 } 556 557 m = (struct mbuf *)arg; 558 refcnt = uma_find_refcnt(zone, mem); 559 *refcnt = 1; 560 if (m != NULL) { 561 m->m_ext.ext_buf = (caddr_t)mem; 562 m->m_data = m->m_ext.ext_buf; 563 m->m_flags |= M_EXT; 564 m->m_ext.ext_free = NULL; 565 m->m_ext.ext_arg1 = NULL; 566 m->m_ext.ext_arg2 = NULL; 567 m->m_ext.ext_size = size; 568 m->m_ext.ext_type = type; 569 m->m_ext.ext_flags = 0; 570 m->m_ext.ext_cnt = refcnt; 571 } 572 573 return (0); 574 } 575 576 /* 577 * The Mbuf Cluster zone destructor. 578 */ 579 static void 580 mb_dtor_clust(void *mem, int size, void *arg) 581 { 582 #ifdef INVARIANTS 583 uma_zone_t zone; 584 585 zone = m_getzone(size); 586 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 587 ("%s: refcnt incorrect %u", __func__, 588 *(uma_find_refcnt(zone, mem))) ); 589 590 trash_dtor(mem, size, arg); 591 #endif 592 } 593 594 /* 595 * The Packet secondary zone's init routine, executed on the 596 * object's transition from mbuf keg slab to zone cache. 597 */ 598 static int 599 mb_zinit_pack(void *mem, int size, int how) 600 { 601 struct mbuf *m; 602 603 m = (struct mbuf *)mem; /* m is virgin. */ 604 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 605 m->m_ext.ext_buf == NULL) 606 return (ENOMEM); 607 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 608 #ifdef INVARIANTS 609 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 610 #endif 611 return (0); 612 } 613 614 /* 615 * The Packet secondary zone's fini routine, executed on the 616 * object's transition from zone cache to keg slab. 617 */ 618 static void 619 mb_zfini_pack(void *mem, int size) 620 { 621 struct mbuf *m; 622 623 m = (struct mbuf *)mem; 624 #ifdef INVARIANTS 625 trash_fini(m->m_ext.ext_buf, MCLBYTES); 626 #endif 627 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 628 #ifdef INVARIANTS 629 trash_dtor(mem, size, NULL); 630 #endif 631 } 632 633 /* 634 * The "packet" keg constructor. 635 */ 636 static int 637 mb_ctor_pack(void *mem, int size, void *arg, int how) 638 { 639 struct mbuf *m; 640 struct mb_args *args; 641 int error, flags; 642 short type; 643 644 m = (struct mbuf *)mem; 645 args = (struct mb_args *)arg; 646 flags = args->flags; 647 type = args->type; 648 649 #ifdef INVARIANTS 650 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 651 #endif 652 653 error = m_init(m, NULL, size, how, type, flags); 654 655 /* m_ext is already initialized. */ 656 m->m_data = m->m_ext.ext_buf; 657 m->m_flags = (flags | M_EXT); 658 659 return (error); 660 } 661 662 int 663 m_pkthdr_init(struct mbuf *m, int how) 664 { 665 #ifdef MAC 666 int error; 667 #endif 668 m->m_data = m->m_pktdat; 669 bzero(&m->m_pkthdr, sizeof(m->m_pkthdr)); 670 #ifdef MAC 671 /* If the label init fails, fail the alloc */ 672 error = mac_mbuf_init(m, how); 673 if (error) 674 return (error); 675 #endif 676 677 return (0); 678 } 679 680 /* 681 * This is the protocol drain routine. 682 * 683 * No locks should be held when this is called. The drain routines have to 684 * presently acquire some locks which raises the possibility of lock order 685 * reversal. 686 */ 687 static void 688 mb_reclaim(void *junk) 689 { 690 struct domain *dp; 691 struct protosw *pr; 692 693 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 694 "mb_reclaim()"); 695 696 for (dp = domains; dp != NULL; dp = dp->dom_next) 697 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 698 if (pr->pr_drain != NULL) 699 (*pr->pr_drain)(); 700 } 701 702 /* 703 * This is the function called by the mb_reclaim_callout, which is 704 * used when we hit the maximum for a zone. 705 * 706 * (See mb_maxaction() below.) 707 */ 708 static void 709 mb_reclaim_timer(void *junk __unused) 710 { 711 712 mtx_lock(&mb_reclaim_callout_mtx); 713 714 /* 715 * Avoid running this function extra times by skipping this invocation 716 * if the callout has already been rescheduled. 717 */ 718 if (callout_pending(&mb_reclaim_callout) || 719 !callout_active(&mb_reclaim_callout)) { 720 mtx_unlock(&mb_reclaim_callout_mtx); 721 return; 722 } 723 mtx_unlock(&mb_reclaim_callout_mtx); 724 725 mb_reclaim(NULL); 726 727 mtx_lock(&mb_reclaim_callout_mtx); 728 callout_deactivate(&mb_reclaim_callout); 729 mtx_unlock(&mb_reclaim_callout_mtx); 730 } 731 732 /* 733 * This function is called when we hit the maximum for a zone. 734 * 735 * At that point, we want to call the protocol drain routine to free up some 736 * mbufs. However, we will use the callout routines to schedule this to 737 * occur in another thread. (The thread calling this function holds the 738 * zone lock.) 739 */ 740 static void 741 mb_maxaction(uma_zone_t zone __unused) 742 { 743 744 /* 745 * If we can't immediately obtain the lock, either the callout 746 * is currently running, or another thread is scheduling the 747 * callout. 748 */ 749 if (!mtx_trylock(&mb_reclaim_callout_mtx)) 750 return; 751 752 /* If not already scheduled/running, schedule the callout. */ 753 if (!callout_active(&mb_reclaim_callout)) { 754 callout_reset(&mb_reclaim_callout, 1, mb_reclaim_timer, NULL); 755 } 756 757 mtx_unlock(&mb_reclaim_callout_mtx); 758 } 759