1 /*- 2 * Copyright (c) 2004, 2005, 3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_param.h" 32 33 #include <sys/param.h> 34 #include <sys/malloc.h> 35 #include <sys/systm.h> 36 #include <sys/mbuf.h> 37 #include <sys/domain.h> 38 #include <sys/eventhandler.h> 39 #include <sys/kernel.h> 40 #include <sys/protosw.h> 41 #include <sys/smp.h> 42 #include <sys/sysctl.h> 43 44 #include <security/mac/mac_framework.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_map.h> 51 #include <vm/uma.h> 52 #include <vm/uma_int.h> 53 #include <vm/uma_dbg.h> 54 55 /* 56 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 57 * Zones. 58 * 59 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 60 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 61 * administrator so desires. 62 * 63 * Mbufs are allocated from a UMA Master Zone called the Mbuf 64 * Zone. 65 * 66 * Additionally, FreeBSD provides a Packet Zone, which it 67 * configures as a Secondary Zone to the Mbuf Master Zone, 68 * thus sharing backend Slab kegs with the Mbuf Master Zone. 69 * 70 * Thus common-case allocations and locking are simplified: 71 * 72 * m_clget() m_getcl() 73 * | | 74 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 75 * | | [ Packet ] | 76 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 77 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 78 * | \________ | 79 * [ Cluster Keg ] \ / 80 * | [ Mbuf Keg ] 81 * [ Cluster Slabs ] | 82 * | [ Mbuf Slabs ] 83 * \____________(VM)_________________/ 84 * 85 * 86 * Whenever an object is allocated with uma_zalloc() out of 87 * one of the Zones its _ctor_ function is executed. The same 88 * for any deallocation through uma_zfree() the _dtor_ function 89 * is executed. 90 * 91 * Caches are per-CPU and are filled from the Master Zone. 92 * 93 * Whenever an object is allocated from the underlying global 94 * memory pool it gets pre-initialized with the _zinit_ functions. 95 * When the Keg's are overfull objects get decomissioned with 96 * _zfini_ functions and free'd back to the global memory pool. 97 * 98 */ 99 100 int nmbufs; /* limits number of mbufs */ 101 int nmbclusters; /* limits number of mbuf clusters */ 102 int nmbjumbop; /* limits number of page size jumbo clusters */ 103 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 104 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 105 106 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 107 108 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN, &maxmbufmem, 0, 109 "Maximum real memory allocatable to various mbuf types"); 110 111 /* 112 * tunable_mbinit() has to be run before any mbuf allocations are done. 113 */ 114 static void 115 tunable_mbinit(void *dummy) 116 { 117 quad_t realmem; 118 119 /* 120 * The default limit for all mbuf related memory is 1/2 of all 121 * available kernel memory (physical or kmem). 122 * At most it can be 3/4 of available kernel memory. 123 */ 124 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 125 maxmbufmem = realmem / 2; 126 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 127 if (maxmbufmem > realmem / 4 * 3) 128 maxmbufmem = realmem / 4 * 3; 129 130 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 131 if (nmbclusters == 0) 132 nmbclusters = maxmbufmem / MCLBYTES / 4; 133 134 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 135 if (nmbjumbop == 0) 136 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 137 138 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 139 if (nmbjumbo9 == 0) 140 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 141 142 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 143 if (nmbjumbo16 == 0) 144 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 145 146 /* 147 * We need at least as many mbufs as we have clusters of 148 * the various types added together. 149 */ 150 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 151 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 152 nmbufs = lmax(maxmbufmem / MSIZE / 5, 153 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 154 } 155 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 156 157 static int 158 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 159 { 160 int error, newnmbclusters; 161 162 newnmbclusters = nmbclusters; 163 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 164 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { 165 if (newnmbclusters > nmbclusters && 166 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 167 nmbclusters = newnmbclusters; 168 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 169 EVENTHANDLER_INVOKE(nmbclusters_change); 170 } else 171 error = EINVAL; 172 } 173 return (error); 174 } 175 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 176 &nmbclusters, 0, sysctl_nmbclusters, "IU", 177 "Maximum number of mbuf clusters allowed"); 178 179 static int 180 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 181 { 182 int error, newnmbjumbop; 183 184 newnmbjumbop = nmbjumbop; 185 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 186 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { 187 if (newnmbjumbop > nmbjumbop && 188 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 189 nmbjumbop = newnmbjumbop; 190 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 191 } else 192 error = EINVAL; 193 } 194 return (error); 195 } 196 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 197 &nmbjumbop, 0, sysctl_nmbjumbop, "IU", 198 "Maximum number of mbuf page size jumbo clusters allowed"); 199 200 static int 201 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 202 { 203 int error, newnmbjumbo9; 204 205 newnmbjumbo9 = nmbjumbo9; 206 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 207 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { 208 if (newnmbjumbo9 > nmbjumbo9 && 209 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 210 nmbjumbo9 = newnmbjumbo9; 211 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 212 } else 213 error = EINVAL; 214 } 215 return (error); 216 } 217 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 218 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 219 "Maximum number of mbuf 9k jumbo clusters allowed"); 220 221 static int 222 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 223 { 224 int error, newnmbjumbo16; 225 226 newnmbjumbo16 = nmbjumbo16; 227 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 228 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { 229 if (newnmbjumbo16 > nmbjumbo16 && 230 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 231 nmbjumbo16 = newnmbjumbo16; 232 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 233 } else 234 error = EINVAL; 235 } 236 return (error); 237 } 238 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 239 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 240 "Maximum number of mbuf 16k jumbo clusters allowed"); 241 242 static int 243 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 244 { 245 int error, newnmbufs; 246 247 newnmbufs = nmbufs; 248 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 249 if (error == 0 && req->newptr && newnmbufs != nmbufs) { 250 if (newnmbufs > nmbufs) { 251 nmbufs = newnmbufs; 252 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 253 EVENTHANDLER_INVOKE(nmbufs_change); 254 } else 255 error = EINVAL; 256 } 257 return (error); 258 } 259 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, 260 &nmbufs, 0, sysctl_nmbufs, "IU", 261 "Maximum number of mbufs allowed"); 262 263 /* 264 * Zones from which we allocate. 265 */ 266 uma_zone_t zone_mbuf; 267 uma_zone_t zone_clust; 268 uma_zone_t zone_pack; 269 uma_zone_t zone_jumbop; 270 uma_zone_t zone_jumbo9; 271 uma_zone_t zone_jumbo16; 272 uma_zone_t zone_ext_refcnt; 273 274 /* 275 * Local prototypes. 276 */ 277 static int mb_ctor_mbuf(void *, int, void *, int); 278 static int mb_ctor_clust(void *, int, void *, int); 279 static int mb_ctor_pack(void *, int, void *, int); 280 static void mb_dtor_mbuf(void *, int, void *); 281 static void mb_dtor_clust(void *, int, void *); 282 static void mb_dtor_pack(void *, int, void *); 283 static int mb_zinit_pack(void *, int, int); 284 static void mb_zfini_pack(void *, int); 285 286 static void mb_reclaim(void *); 287 static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int); 288 289 /* Ensure that MSIZE is a power of 2. */ 290 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 291 292 /* 293 * Initialize FreeBSD Network buffer allocation. 294 */ 295 static void 296 mbuf_init(void *dummy) 297 { 298 299 /* 300 * Configure UMA zones for Mbufs, Clusters, and Packets. 301 */ 302 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 303 mb_ctor_mbuf, mb_dtor_mbuf, 304 #ifdef INVARIANTS 305 trash_init, trash_fini, 306 #else 307 NULL, NULL, 308 #endif 309 MSIZE - 1, UMA_ZONE_MAXBUCKET); 310 if (nmbufs > 0) 311 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 312 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 313 314 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 315 mb_ctor_clust, mb_dtor_clust, 316 #ifdef INVARIANTS 317 trash_init, trash_fini, 318 #else 319 NULL, NULL, 320 #endif 321 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 322 if (nmbclusters > 0) 323 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 324 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 325 326 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 327 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 328 329 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 330 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 331 mb_ctor_clust, mb_dtor_clust, 332 #ifdef INVARIANTS 333 trash_init, trash_fini, 334 #else 335 NULL, NULL, 336 #endif 337 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 338 if (nmbjumbop > 0) 339 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 340 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 341 342 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 343 mb_ctor_clust, mb_dtor_clust, 344 #ifdef INVARIANTS 345 trash_init, trash_fini, 346 #else 347 NULL, NULL, 348 #endif 349 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 350 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 351 if (nmbjumbo9 > 0) 352 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 353 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 354 355 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 356 mb_ctor_clust, mb_dtor_clust, 357 #ifdef INVARIANTS 358 trash_init, trash_fini, 359 #else 360 NULL, NULL, 361 #endif 362 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 363 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 364 if (nmbjumbo16 > 0) 365 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 366 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 367 368 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 369 NULL, NULL, 370 NULL, NULL, 371 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 372 373 /* uma_prealloc() goes here... */ 374 375 /* 376 * Hook event handler for low-memory situation, used to 377 * drain protocols and push data back to the caches (UMA 378 * later pushes it back to VM). 379 */ 380 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 381 EVENTHANDLER_PRI_FIRST); 382 } 383 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 384 385 /* 386 * UMA backend page allocator for the jumbo frame zones. 387 * 388 * Allocates kernel virtual memory that is backed by contiguous physical 389 * pages. 390 */ 391 static void * 392 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 393 { 394 395 /* Inform UMA that this allocator uses kernel_map/object. */ 396 *flags = UMA_SLAB_KERNEL; 397 return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 398 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 399 } 400 401 /* 402 * Constructor for Mbuf master zone. 403 * 404 * The 'arg' pointer points to a mb_args structure which 405 * contains call-specific information required to support the 406 * mbuf allocation API. See mbuf.h. 407 */ 408 static int 409 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 410 { 411 struct mbuf *m; 412 struct mb_args *args; 413 int error; 414 int flags; 415 short type; 416 417 #ifdef INVARIANTS 418 trash_ctor(mem, size, arg, how); 419 #endif 420 args = (struct mb_args *)arg; 421 type = args->type; 422 423 /* 424 * The mbuf is initialized later. The caller has the 425 * responsibility to set up any MAC labels too. 426 */ 427 if (type == MT_NOINIT) 428 return (0); 429 430 m = (struct mbuf *)mem; 431 flags = args->flags; 432 433 error = m_init(m, NULL, size, how, type, flags); 434 435 return (error); 436 } 437 438 /* 439 * The Mbuf master zone destructor. 440 */ 441 static void 442 mb_dtor_mbuf(void *mem, int size, void *arg) 443 { 444 struct mbuf *m; 445 unsigned long flags; 446 447 m = (struct mbuf *)mem; 448 flags = (unsigned long)arg; 449 450 if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) 451 m_tag_delete_chain(m, NULL); 452 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 453 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 454 #ifdef INVARIANTS 455 trash_dtor(mem, size, arg); 456 #endif 457 } 458 459 /* 460 * The Mbuf Packet zone destructor. 461 */ 462 static void 463 mb_dtor_pack(void *mem, int size, void *arg) 464 { 465 struct mbuf *m; 466 467 m = (struct mbuf *)mem; 468 if ((m->m_flags & M_PKTHDR) != 0) 469 m_tag_delete_chain(m, NULL); 470 471 /* Make sure we've got a clean cluster back. */ 472 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 473 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 474 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 475 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 476 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 477 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 478 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 479 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 480 #ifdef INVARIANTS 481 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 482 #endif 483 /* 484 * If there are processes blocked on zone_clust, waiting for pages 485 * to be freed up, * cause them to be woken up by draining the 486 * packet zone. We are exposed to a race here * (in the check for 487 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 488 * is deliberate. We don't want to acquire the zone lock for every 489 * mbuf free. 490 */ 491 if (uma_zone_exhausted_nolock(zone_clust)) 492 zone_drain(zone_pack); 493 } 494 495 /* 496 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 497 * 498 * Here the 'arg' pointer points to the Mbuf which we 499 * are configuring cluster storage for. If 'arg' is 500 * empty we allocate just the cluster without setting 501 * the mbuf to it. See mbuf.h. 502 */ 503 static int 504 mb_ctor_clust(void *mem, int size, void *arg, int how) 505 { 506 struct mbuf *m; 507 u_int *refcnt; 508 int type; 509 uma_zone_t zone; 510 511 #ifdef INVARIANTS 512 trash_ctor(mem, size, arg, how); 513 #endif 514 switch (size) { 515 case MCLBYTES: 516 type = EXT_CLUSTER; 517 zone = zone_clust; 518 break; 519 #if MJUMPAGESIZE != MCLBYTES 520 case MJUMPAGESIZE: 521 type = EXT_JUMBOP; 522 zone = zone_jumbop; 523 break; 524 #endif 525 case MJUM9BYTES: 526 type = EXT_JUMBO9; 527 zone = zone_jumbo9; 528 break; 529 case MJUM16BYTES: 530 type = EXT_JUMBO16; 531 zone = zone_jumbo16; 532 break; 533 default: 534 panic("unknown cluster size"); 535 break; 536 } 537 538 m = (struct mbuf *)arg; 539 refcnt = uma_find_refcnt(zone, mem); 540 *refcnt = 1; 541 if (m != NULL) { 542 m->m_ext.ext_buf = (caddr_t)mem; 543 m->m_data = m->m_ext.ext_buf; 544 m->m_flags |= M_EXT; 545 m->m_ext.ext_free = NULL; 546 m->m_ext.ext_arg1 = NULL; 547 m->m_ext.ext_arg2 = NULL; 548 m->m_ext.ext_size = size; 549 m->m_ext.ext_type = type; 550 m->m_ext.ext_flags = 0; 551 m->m_ext.ref_cnt = refcnt; 552 } 553 554 return (0); 555 } 556 557 /* 558 * The Mbuf Cluster zone destructor. 559 */ 560 static void 561 mb_dtor_clust(void *mem, int size, void *arg) 562 { 563 #ifdef INVARIANTS 564 uma_zone_t zone; 565 566 zone = m_getzone(size); 567 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 568 ("%s: refcnt incorrect %u", __func__, 569 *(uma_find_refcnt(zone, mem))) ); 570 571 trash_dtor(mem, size, arg); 572 #endif 573 } 574 575 /* 576 * The Packet secondary zone's init routine, executed on the 577 * object's transition from mbuf keg slab to zone cache. 578 */ 579 static int 580 mb_zinit_pack(void *mem, int size, int how) 581 { 582 struct mbuf *m; 583 584 m = (struct mbuf *)mem; /* m is virgin. */ 585 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 586 m->m_ext.ext_buf == NULL) 587 return (ENOMEM); 588 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 589 #ifdef INVARIANTS 590 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 591 #endif 592 return (0); 593 } 594 595 /* 596 * The Packet secondary zone's fini routine, executed on the 597 * object's transition from zone cache to keg slab. 598 */ 599 static void 600 mb_zfini_pack(void *mem, int size) 601 { 602 struct mbuf *m; 603 604 m = (struct mbuf *)mem; 605 #ifdef INVARIANTS 606 trash_fini(m->m_ext.ext_buf, MCLBYTES); 607 #endif 608 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 609 #ifdef INVARIANTS 610 trash_dtor(mem, size, NULL); 611 #endif 612 } 613 614 /* 615 * The "packet" keg constructor. 616 */ 617 static int 618 mb_ctor_pack(void *mem, int size, void *arg, int how) 619 { 620 struct mbuf *m; 621 struct mb_args *args; 622 int error, flags; 623 short type; 624 625 m = (struct mbuf *)mem; 626 args = (struct mb_args *)arg; 627 flags = args->flags; 628 type = args->type; 629 630 #ifdef INVARIANTS 631 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 632 #endif 633 634 error = m_init(m, NULL, size, how, type, flags); 635 636 /* m_ext is already initialized. */ 637 m->m_data = m->m_ext.ext_buf; 638 m->m_flags = (flags | M_EXT); 639 640 return (error); 641 } 642 643 int 644 m_pkthdr_init(struct mbuf *m, int how) 645 { 646 #ifdef MAC 647 int error; 648 #endif 649 m->m_data = m->m_pktdat; 650 m->m_pkthdr.rcvif = NULL; 651 SLIST_INIT(&m->m_pkthdr.tags); 652 m->m_pkthdr.len = 0; 653 m->m_pkthdr.flowid = 0; 654 m->m_pkthdr.csum_flags = 0; 655 m->m_pkthdr.fibnum = 0; 656 m->m_pkthdr.cosqos = 0; 657 m->m_pkthdr.rsstype = 0; 658 m->m_pkthdr.l2hlen = 0; 659 m->m_pkthdr.l3hlen = 0; 660 m->m_pkthdr.l4hlen = 0; 661 m->m_pkthdr.l5hlen = 0; 662 m->m_pkthdr.PH_per.sixtyfour[0] = 0; 663 m->m_pkthdr.PH_loc.sixtyfour[0] = 0; 664 #ifdef MAC 665 /* If the label init fails, fail the alloc */ 666 error = mac_mbuf_init(m, how); 667 if (error) 668 return (error); 669 #endif 670 671 return (0); 672 } 673 674 /* 675 * This is the protocol drain routine. 676 * 677 * No locks should be held when this is called. The drain routines have to 678 * presently acquire some locks which raises the possibility of lock order 679 * reversal. 680 */ 681 static void 682 mb_reclaim(void *junk) 683 { 684 struct domain *dp; 685 struct protosw *pr; 686 687 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 688 "mb_reclaim()"); 689 690 for (dp = domains; dp != NULL; dp = dp->dom_next) 691 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 692 if (pr->pr_drain != NULL) 693 (*pr->pr_drain)(); 694 } 695