1 /*- 2 * Copyright (c) 2004, 2005, 3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_param.h" 32 33 #include <sys/param.h> 34 #include <sys/malloc.h> 35 #include <sys/systm.h> 36 #include <sys/mbuf.h> 37 #include <sys/domain.h> 38 #include <sys/eventhandler.h> 39 #include <sys/kernel.h> 40 #include <sys/protosw.h> 41 #include <sys/smp.h> 42 #include <sys/sysctl.h> 43 44 #include <security/mac/mac_framework.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 #include <vm/vm_kern.h> 49 #include <vm/vm_page.h> 50 #include <vm/vm_map.h> 51 #include <vm/uma.h> 52 #include <vm/uma_int.h> 53 #include <vm/uma_dbg.h> 54 55 /* 56 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 57 * Zones. 58 * 59 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 60 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 61 * administrator so desires. 62 * 63 * Mbufs are allocated from a UMA Master Zone called the Mbuf 64 * Zone. 65 * 66 * Additionally, FreeBSD provides a Packet Zone, which it 67 * configures as a Secondary Zone to the Mbuf Master Zone, 68 * thus sharing backend Slab kegs with the Mbuf Master Zone. 69 * 70 * Thus common-case allocations and locking are simplified: 71 * 72 * m_clget() m_getcl() 73 * | | 74 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 75 * | | [ Packet ] | 76 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 77 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 78 * | \________ | 79 * [ Cluster Keg ] \ / 80 * | [ Mbuf Keg ] 81 * [ Cluster Slabs ] | 82 * | [ Mbuf Slabs ] 83 * \____________(VM)_________________/ 84 * 85 * 86 * Whenever an object is allocated with uma_zalloc() out of 87 * one of the Zones its _ctor_ function is executed. The same 88 * for any deallocation through uma_zfree() the _dtor_ function 89 * is executed. 90 * 91 * Caches are per-CPU and are filled from the Master Zone. 92 * 93 * Whenever an object is allocated from the underlying global 94 * memory pool it gets pre-initialized with the _zinit_ functions. 95 * When the Keg's are overfull objects get decomissioned with 96 * _zfini_ functions and free'd back to the global memory pool. 97 * 98 */ 99 100 int nmbufs; /* limits number of mbufs */ 101 int nmbclusters; /* limits number of mbuf clusters */ 102 int nmbjumbop; /* limits number of page size jumbo clusters */ 103 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 104 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 105 106 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 107 108 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN, &maxmbufmem, 0, 109 "Maximum real memory allocateable to various mbuf types"); 110 111 /* 112 * tunable_mbinit() has to be run before any mbuf allocations are done. 113 */ 114 static void 115 tunable_mbinit(void *dummy) 116 { 117 quad_t realmem; 118 119 /* 120 * The default limit for all mbuf related memory is 1/2 of all 121 * available kernel memory (physical or kmem). 122 * At most it can be 3/4 of available kernel memory. 123 */ 124 realmem = qmin((quad_t)physmem * PAGE_SIZE, 125 vm_map_max(kmem_map) - vm_map_min(kmem_map)); 126 maxmbufmem = realmem / 2; 127 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 128 if (maxmbufmem > realmem / 4 * 3) 129 maxmbufmem = realmem / 4 * 3; 130 131 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 132 if (nmbclusters == 0) 133 nmbclusters = maxmbufmem / MCLBYTES / 4; 134 135 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 136 if (nmbjumbop == 0) 137 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 138 139 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 140 if (nmbjumbo9 == 0) 141 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 142 143 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 144 if (nmbjumbo16 == 0) 145 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 146 147 /* 148 * We need at least as many mbufs as we have clusters of 149 * the various types added together. 150 */ 151 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 152 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 153 nmbufs = lmax(maxmbufmem / MSIZE / 5, 154 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 155 } 156 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 157 158 static int 159 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 160 { 161 int error, newnmbclusters; 162 163 newnmbclusters = nmbclusters; 164 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 165 if (error == 0 && req->newptr) { 166 if (newnmbclusters > nmbclusters && 167 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 168 nmbclusters = newnmbclusters; 169 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 170 EVENTHANDLER_INVOKE(nmbclusters_change); 171 } else 172 error = EINVAL; 173 } 174 return (error); 175 } 176 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW, 177 &nmbclusters, 0, sysctl_nmbclusters, "IU", 178 "Maximum number of mbuf clusters allowed"); 179 180 static int 181 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 182 { 183 int error, newnmbjumbop; 184 185 newnmbjumbop = nmbjumbop; 186 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 187 if (error == 0 && req->newptr) { 188 if (newnmbjumbop > nmbjumbop && 189 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 190 nmbjumbop = newnmbjumbop; 191 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 192 } else 193 error = EINVAL; 194 } 195 return (error); 196 } 197 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW, 198 &nmbjumbop, 0, sysctl_nmbjumbop, "IU", 199 "Maximum number of mbuf page size jumbo clusters allowed"); 200 201 static int 202 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 203 { 204 int error, newnmbjumbo9; 205 206 newnmbjumbo9 = nmbjumbo9; 207 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 208 if (error == 0 && req->newptr) { 209 if (newnmbjumbo9 > nmbjumbo9 && 210 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 211 nmbjumbo9 = newnmbjumbo9; 212 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 213 } else 214 error = EINVAL; 215 } 216 return (error); 217 } 218 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW, 219 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU", 220 "Maximum number of mbuf 9k jumbo clusters allowed"); 221 222 static int 223 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 224 { 225 int error, newnmbjumbo16; 226 227 newnmbjumbo16 = nmbjumbo16; 228 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 229 if (error == 0 && req->newptr) { 230 if (newnmbjumbo16 > nmbjumbo16 && 231 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 232 nmbjumbo16 = newnmbjumbo16; 233 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 234 } else 235 error = EINVAL; 236 } 237 return (error); 238 } 239 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW, 240 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU", 241 "Maximum number of mbuf 16k jumbo clusters allowed"); 242 243 static int 244 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 245 { 246 int error, newnmbufs; 247 248 newnmbufs = nmbufs; 249 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 250 if (error == 0 && req->newptr) { 251 if (newnmbufs > nmbufs) { 252 nmbufs = newnmbufs; 253 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 254 EVENTHANDLER_INVOKE(nmbufs_change); 255 } else 256 error = EINVAL; 257 } 258 return (error); 259 } 260 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, CTLTYPE_INT|CTLFLAG_RW, 261 &nmbufs, 0, sysctl_nmbufs, "IU", 262 "Maximum number of mbufs allowed"); 263 264 /* 265 * Zones from which we allocate. 266 */ 267 uma_zone_t zone_mbuf; 268 uma_zone_t zone_clust; 269 uma_zone_t zone_pack; 270 uma_zone_t zone_jumbop; 271 uma_zone_t zone_jumbo9; 272 uma_zone_t zone_jumbo16; 273 uma_zone_t zone_ext_refcnt; 274 275 /* 276 * Local prototypes. 277 */ 278 static int mb_ctor_mbuf(void *, int, void *, int); 279 static int mb_ctor_clust(void *, int, void *, int); 280 static int mb_ctor_pack(void *, int, void *, int); 281 static void mb_dtor_mbuf(void *, int, void *); 282 static void mb_dtor_clust(void *, int, void *); 283 static void mb_dtor_pack(void *, int, void *); 284 static int mb_zinit_pack(void *, int, int); 285 static void mb_zfini_pack(void *, int); 286 287 static void mb_reclaim(void *); 288 static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int); 289 290 /* Ensure that MSIZE is a power of 2. */ 291 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 292 293 /* 294 * Initialize FreeBSD Network buffer allocation. 295 */ 296 static void 297 mbuf_init(void *dummy) 298 { 299 300 /* 301 * Configure UMA zones for Mbufs, Clusters, and Packets. 302 */ 303 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 304 mb_ctor_mbuf, mb_dtor_mbuf, 305 #ifdef INVARIANTS 306 trash_init, trash_fini, 307 #else 308 NULL, NULL, 309 #endif 310 MSIZE - 1, UMA_ZONE_MAXBUCKET); 311 if (nmbufs > 0) 312 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 313 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 314 315 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 316 mb_ctor_clust, mb_dtor_clust, 317 #ifdef INVARIANTS 318 trash_init, trash_fini, 319 #else 320 NULL, NULL, 321 #endif 322 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 323 if (nmbclusters > 0) 324 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 325 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 326 327 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 328 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 329 330 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 331 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 332 mb_ctor_clust, mb_dtor_clust, 333 #ifdef INVARIANTS 334 trash_init, trash_fini, 335 #else 336 NULL, NULL, 337 #endif 338 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 339 if (nmbjumbop > 0) 340 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 341 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 342 343 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 344 mb_ctor_clust, mb_dtor_clust, 345 #ifdef INVARIANTS 346 trash_init, trash_fini, 347 #else 348 NULL, NULL, 349 #endif 350 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 351 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc); 352 if (nmbjumbo9 > 0) 353 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 354 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 355 356 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 357 mb_ctor_clust, mb_dtor_clust, 358 #ifdef INVARIANTS 359 trash_init, trash_fini, 360 #else 361 NULL, NULL, 362 #endif 363 UMA_ALIGN_PTR, UMA_ZONE_REFCNT); 364 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc); 365 if (nmbjumbo16 > 0) 366 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 367 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 368 369 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 370 NULL, NULL, 371 NULL, NULL, 372 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 373 374 /* uma_prealloc() goes here... */ 375 376 /* 377 * Hook event handler for low-memory situation, used to 378 * drain protocols and push data back to the caches (UMA 379 * later pushes it back to VM). 380 */ 381 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 382 EVENTHANDLER_PRI_FIRST); 383 } 384 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 385 386 /* 387 * UMA backend page allocator for the jumbo frame zones. 388 * 389 * Allocates kernel virtual memory that is backed by contiguous physical 390 * pages. 391 */ 392 static void * 393 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 394 { 395 396 /* Inform UMA that this allocator uses kernel_map/object. */ 397 *flags = UMA_SLAB_KERNEL; 398 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 399 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 400 } 401 402 /* 403 * Constructor for Mbuf master zone. 404 * 405 * The 'arg' pointer points to a mb_args structure which 406 * contains call-specific information required to support the 407 * mbuf allocation API. See mbuf.h. 408 */ 409 static int 410 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 411 { 412 struct mbuf *m; 413 struct mb_args *args; 414 #ifdef MAC 415 int error; 416 #endif 417 int flags; 418 short type; 419 420 #ifdef INVARIANTS 421 trash_ctor(mem, size, arg, how); 422 #endif 423 m = (struct mbuf *)mem; 424 args = (struct mb_args *)arg; 425 flags = args->flags; 426 type = args->type; 427 428 /* 429 * The mbuf is initialized later. The caller has the 430 * responsibility to set up any MAC labels too. 431 */ 432 if (type == MT_NOINIT) 433 return (0); 434 435 m->m_next = NULL; 436 m->m_nextpkt = NULL; 437 m->m_len = 0; 438 m->m_flags = flags; 439 m->m_type = type; 440 if (flags & M_PKTHDR) { 441 m->m_data = m->m_pktdat; 442 m->m_pkthdr.rcvif = NULL; 443 m->m_pkthdr.header = NULL; 444 m->m_pkthdr.len = 0; 445 m->m_pkthdr.csum_flags = 0; 446 m->m_pkthdr.csum_data = 0; 447 m->m_pkthdr.tso_segsz = 0; 448 m->m_pkthdr.ether_vtag = 0; 449 m->m_pkthdr.flowid = 0; 450 m->m_pkthdr.fibnum = 0; 451 SLIST_INIT(&m->m_pkthdr.tags); 452 #ifdef MAC 453 /* If the label init fails, fail the alloc */ 454 error = mac_mbuf_init(m, how); 455 if (error) 456 return (error); 457 #endif 458 } else 459 m->m_data = m->m_dat; 460 return (0); 461 } 462 463 /* 464 * The Mbuf master zone destructor. 465 */ 466 static void 467 mb_dtor_mbuf(void *mem, int size, void *arg) 468 { 469 struct mbuf *m; 470 unsigned long flags; 471 472 m = (struct mbuf *)mem; 473 flags = (unsigned long)arg; 474 475 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0) 476 m_tag_delete_chain(m, NULL); 477 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 478 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 479 #ifdef INVARIANTS 480 trash_dtor(mem, size, arg); 481 #endif 482 } 483 484 /* 485 * The Mbuf Packet zone destructor. 486 */ 487 static void 488 mb_dtor_pack(void *mem, int size, void *arg) 489 { 490 struct mbuf *m; 491 492 m = (struct mbuf *)mem; 493 if ((m->m_flags & M_PKTHDR) != 0) 494 m_tag_delete_chain(m, NULL); 495 496 /* Make sure we've got a clean cluster back. */ 497 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 498 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 499 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 500 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 501 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 502 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 503 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 504 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__)); 505 #ifdef INVARIANTS 506 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 507 #endif 508 /* 509 * If there are processes blocked on zone_clust, waiting for pages 510 * to be freed up, * cause them to be woken up by draining the 511 * packet zone. We are exposed to a race here * (in the check for 512 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 513 * is deliberate. We don't want to acquire the zone lock for every 514 * mbuf free. 515 */ 516 if (uma_zone_exhausted_nolock(zone_clust)) 517 zone_drain(zone_pack); 518 } 519 520 /* 521 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 522 * 523 * Here the 'arg' pointer points to the Mbuf which we 524 * are configuring cluster storage for. If 'arg' is 525 * empty we allocate just the cluster without setting 526 * the mbuf to it. See mbuf.h. 527 */ 528 static int 529 mb_ctor_clust(void *mem, int size, void *arg, int how) 530 { 531 struct mbuf *m; 532 u_int *refcnt; 533 int type; 534 uma_zone_t zone; 535 536 #ifdef INVARIANTS 537 trash_ctor(mem, size, arg, how); 538 #endif 539 switch (size) { 540 case MCLBYTES: 541 type = EXT_CLUSTER; 542 zone = zone_clust; 543 break; 544 #if MJUMPAGESIZE != MCLBYTES 545 case MJUMPAGESIZE: 546 type = EXT_JUMBOP; 547 zone = zone_jumbop; 548 break; 549 #endif 550 case MJUM9BYTES: 551 type = EXT_JUMBO9; 552 zone = zone_jumbo9; 553 break; 554 case MJUM16BYTES: 555 type = EXT_JUMBO16; 556 zone = zone_jumbo16; 557 break; 558 default: 559 panic("unknown cluster size"); 560 break; 561 } 562 563 m = (struct mbuf *)arg; 564 refcnt = uma_find_refcnt(zone, mem); 565 *refcnt = 1; 566 if (m != NULL) { 567 m->m_ext.ext_buf = (caddr_t)mem; 568 m->m_data = m->m_ext.ext_buf; 569 m->m_flags |= M_EXT; 570 m->m_ext.ext_free = NULL; 571 m->m_ext.ext_arg1 = NULL; 572 m->m_ext.ext_arg2 = NULL; 573 m->m_ext.ext_size = size; 574 m->m_ext.ext_type = type; 575 m->m_ext.ref_cnt = refcnt; 576 } 577 578 return (0); 579 } 580 581 /* 582 * The Mbuf Cluster zone destructor. 583 */ 584 static void 585 mb_dtor_clust(void *mem, int size, void *arg) 586 { 587 #ifdef INVARIANTS 588 uma_zone_t zone; 589 590 zone = m_getzone(size); 591 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1, 592 ("%s: refcnt incorrect %u", __func__, 593 *(uma_find_refcnt(zone, mem))) ); 594 595 trash_dtor(mem, size, arg); 596 #endif 597 } 598 599 /* 600 * The Packet secondary zone's init routine, executed on the 601 * object's transition from mbuf keg slab to zone cache. 602 */ 603 static int 604 mb_zinit_pack(void *mem, int size, int how) 605 { 606 struct mbuf *m; 607 608 m = (struct mbuf *)mem; /* m is virgin. */ 609 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 610 m->m_ext.ext_buf == NULL) 611 return (ENOMEM); 612 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 613 #ifdef INVARIANTS 614 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 615 #endif 616 return (0); 617 } 618 619 /* 620 * The Packet secondary zone's fini routine, executed on the 621 * object's transition from zone cache to keg slab. 622 */ 623 static void 624 mb_zfini_pack(void *mem, int size) 625 { 626 struct mbuf *m; 627 628 m = (struct mbuf *)mem; 629 #ifdef INVARIANTS 630 trash_fini(m->m_ext.ext_buf, MCLBYTES); 631 #endif 632 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 633 #ifdef INVARIANTS 634 trash_dtor(mem, size, NULL); 635 #endif 636 } 637 638 /* 639 * The "packet" keg constructor. 640 */ 641 static int 642 mb_ctor_pack(void *mem, int size, void *arg, int how) 643 { 644 struct mbuf *m; 645 struct mb_args *args; 646 #ifdef MAC 647 int error; 648 #endif 649 int flags; 650 short type; 651 652 m = (struct mbuf *)mem; 653 args = (struct mb_args *)arg; 654 flags = args->flags; 655 type = args->type; 656 657 #ifdef INVARIANTS 658 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 659 #endif 660 m->m_next = NULL; 661 m->m_nextpkt = NULL; 662 m->m_data = m->m_ext.ext_buf; 663 m->m_len = 0; 664 m->m_flags = (flags | M_EXT); 665 m->m_type = type; 666 667 if (flags & M_PKTHDR) { 668 m->m_pkthdr.rcvif = NULL; 669 m->m_pkthdr.len = 0; 670 m->m_pkthdr.header = NULL; 671 m->m_pkthdr.csum_flags = 0; 672 m->m_pkthdr.csum_data = 0; 673 m->m_pkthdr.tso_segsz = 0; 674 m->m_pkthdr.ether_vtag = 0; 675 m->m_pkthdr.flowid = 0; 676 m->m_pkthdr.fibnum = 0; 677 SLIST_INIT(&m->m_pkthdr.tags); 678 #ifdef MAC 679 /* If the label init fails, fail the alloc */ 680 error = mac_mbuf_init(m, how); 681 if (error) 682 return (error); 683 #endif 684 } 685 /* m_ext is already initialized. */ 686 687 return (0); 688 } 689 690 int 691 m_pkthdr_init(struct mbuf *m, int how) 692 { 693 #ifdef MAC 694 int error; 695 #endif 696 m->m_data = m->m_pktdat; 697 SLIST_INIT(&m->m_pkthdr.tags); 698 m->m_pkthdr.rcvif = NULL; 699 m->m_pkthdr.header = NULL; 700 m->m_pkthdr.len = 0; 701 m->m_pkthdr.flowid = 0; 702 m->m_pkthdr.fibnum = 0; 703 m->m_pkthdr.csum_flags = 0; 704 m->m_pkthdr.csum_data = 0; 705 m->m_pkthdr.tso_segsz = 0; 706 m->m_pkthdr.ether_vtag = 0; 707 #ifdef MAC 708 /* If the label init fails, fail the alloc */ 709 error = mac_mbuf_init(m, how); 710 if (error) 711 return (error); 712 #endif 713 714 return (0); 715 } 716 717 /* 718 * This is the protocol drain routine. 719 * 720 * No locks should be held when this is called. The drain routines have to 721 * presently acquire some locks which raises the possibility of lock order 722 * reversal. 723 */ 724 static void 725 mb_reclaim(void *junk) 726 { 727 struct domain *dp; 728 struct protosw *pr; 729 730 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, 731 "mb_reclaim()"); 732 733 for (dp = domains; dp != NULL; dp = dp->dom_next) 734 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 735 if (pr->pr_drain != NULL) 736 (*pr->pr_drain)(); 737 } 738