1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004, 2005, 5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_param.h" 34 #include "opt_kern_tls.h" 35 36 #include <sys/param.h> 37 #include <sys/conf.h> 38 #include <sys/domainset.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/eventhandler.h> 44 #include <sys/kernel.h> 45 #include <sys/ktls.h> 46 #include <sys/limits.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/protosw.h> 50 #include <sys/refcount.h> 51 #include <sys/sf_buf.h> 52 #include <sys/smp.h> 53 #include <sys/socket.h> 54 #include <sys/sysctl.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_map.h> 64 #include <vm/uma.h> 65 #include <vm/uma_dbg.h> 66 67 /* 68 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 69 * Zones. 70 * 71 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 72 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 73 * administrator so desires. 74 * 75 * Mbufs are allocated from a UMA Master Zone called the Mbuf 76 * Zone. 77 * 78 * Additionally, FreeBSD provides a Packet Zone, which it 79 * configures as a Secondary Zone to the Mbuf Master Zone, 80 * thus sharing backend Slab kegs with the Mbuf Master Zone. 81 * 82 * Thus common-case allocations and locking are simplified: 83 * 84 * m_clget() m_getcl() 85 * | | 86 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 87 * | | [ Packet ] | 88 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 89 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 90 * | \________ | 91 * [ Cluster Keg ] \ / 92 * | [ Mbuf Keg ] 93 * [ Cluster Slabs ] | 94 * | [ Mbuf Slabs ] 95 * \____________(VM)_________________/ 96 * 97 * 98 * Whenever an object is allocated with uma_zalloc() out of 99 * one of the Zones its _ctor_ function is executed. The same 100 * for any deallocation through uma_zfree() the _dtor_ function 101 * is executed. 102 * 103 * Caches are per-CPU and are filled from the Master Zone. 104 * 105 * Whenever an object is allocated from the underlying global 106 * memory pool it gets pre-initialized with the _zinit_ functions. 107 * When the Keg's are overfull objects get decommissioned with 108 * _zfini_ functions and free'd back to the global memory pool. 109 * 110 */ 111 112 int nmbufs; /* limits number of mbufs */ 113 int nmbclusters; /* limits number of mbuf clusters */ 114 int nmbjumbop; /* limits number of page size jumbo clusters */ 115 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 116 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 117 118 bool mb_use_ext_pgs; /* use M_EXTPG mbufs for sendfile & TLS */ 119 SYSCTL_BOOL(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLFLAG_RWTUN, 120 &mb_use_ext_pgs, 0, 121 "Use unmapped mbufs for sendfile(2) and TLS offload"); 122 123 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 124 125 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0, 126 "Maximum real memory allocatable to various mbuf types"); 127 128 static counter_u64_t snd_tag_count; 129 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW, 130 &snd_tag_count, "# of active mbuf send tags"); 131 132 /* 133 * tunable_mbinit() has to be run before any mbuf allocations are done. 134 */ 135 static void 136 tunable_mbinit(void *dummy) 137 { 138 quad_t realmem; 139 140 /* 141 * The default limit for all mbuf related memory is 1/2 of all 142 * available kernel memory (physical or kmem). 143 * At most it can be 3/4 of available kernel memory. 144 */ 145 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 146 maxmbufmem = realmem / 2; 147 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 148 if (maxmbufmem > realmem / 4 * 3) 149 maxmbufmem = realmem / 4 * 3; 150 151 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 152 if (nmbclusters == 0) 153 nmbclusters = maxmbufmem / MCLBYTES / 4; 154 155 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 156 if (nmbjumbop == 0) 157 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 158 159 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 160 if (nmbjumbo9 == 0) 161 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 162 163 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 164 if (nmbjumbo16 == 0) 165 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 166 167 /* 168 * We need at least as many mbufs as we have clusters of 169 * the various types added together. 170 */ 171 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 172 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 173 nmbufs = lmax(maxmbufmem / MSIZE / 5, 174 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 175 } 176 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 177 178 static int 179 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 180 { 181 int error, newnmbclusters; 182 183 newnmbclusters = nmbclusters; 184 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 185 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { 186 if (newnmbclusters > nmbclusters && 187 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 188 nmbclusters = newnmbclusters; 189 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 190 EVENTHANDLER_INVOKE(nmbclusters_change); 191 } else 192 error = EINVAL; 193 } 194 return (error); 195 } 196 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, 197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbclusters, 0, 198 sysctl_nmbclusters, "IU", 199 "Maximum number of mbuf clusters allowed"); 200 201 static int 202 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 203 { 204 int error, newnmbjumbop; 205 206 newnmbjumbop = nmbjumbop; 207 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 208 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { 209 if (newnmbjumbop > nmbjumbop && 210 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 211 nmbjumbop = newnmbjumbop; 212 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 213 } else 214 error = EINVAL; 215 } 216 return (error); 217 } 218 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, 219 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbop, 0, 220 sysctl_nmbjumbop, "IU", 221 "Maximum number of mbuf page size jumbo clusters allowed"); 222 223 static int 224 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 225 { 226 int error, newnmbjumbo9; 227 228 newnmbjumbo9 = nmbjumbo9; 229 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 230 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { 231 if (newnmbjumbo9 > nmbjumbo9 && 232 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 233 nmbjumbo9 = newnmbjumbo9; 234 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 235 } else 236 error = EINVAL; 237 } 238 return (error); 239 } 240 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, 241 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbo9, 0, 242 sysctl_nmbjumbo9, "IU", 243 "Maximum number of mbuf 9k jumbo clusters allowed"); 244 245 static int 246 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 247 { 248 int error, newnmbjumbo16; 249 250 newnmbjumbo16 = nmbjumbo16; 251 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 252 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { 253 if (newnmbjumbo16 > nmbjumbo16 && 254 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 255 nmbjumbo16 = newnmbjumbo16; 256 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 257 } else 258 error = EINVAL; 259 } 260 return (error); 261 } 262 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, 263 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbo16, 0, 264 sysctl_nmbjumbo16, "IU", 265 "Maximum number of mbuf 16k jumbo clusters allowed"); 266 267 static int 268 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 269 { 270 int error, newnmbufs; 271 272 newnmbufs = nmbufs; 273 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 274 if (error == 0 && req->newptr && newnmbufs != nmbufs) { 275 if (newnmbufs > nmbufs) { 276 nmbufs = newnmbufs; 277 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 278 EVENTHANDLER_INVOKE(nmbufs_change); 279 } else 280 error = EINVAL; 281 } 282 return (error); 283 } 284 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, 285 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 286 &nmbufs, 0, sysctl_nmbufs, "IU", 287 "Maximum number of mbufs allowed"); 288 289 /* 290 * Zones from which we allocate. 291 */ 292 uma_zone_t zone_mbuf; 293 uma_zone_t zone_clust; 294 uma_zone_t zone_pack; 295 uma_zone_t zone_jumbop; 296 uma_zone_t zone_jumbo9; 297 uma_zone_t zone_jumbo16; 298 299 /* 300 * Local prototypes. 301 */ 302 static int mb_ctor_mbuf(void *, int, void *, int); 303 static int mb_ctor_clust(void *, int, void *, int); 304 static int mb_ctor_pack(void *, int, void *, int); 305 static void mb_dtor_mbuf(void *, int, void *); 306 static void mb_dtor_pack(void *, int, void *); 307 static int mb_zinit_pack(void *, int, int); 308 static void mb_zfini_pack(void *, int); 309 static void mb_reclaim(uma_zone_t, int); 310 311 /* Ensure that MSIZE is a power of 2. */ 312 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 313 314 _Static_assert(sizeof(struct mbuf) <= MSIZE, 315 "size of mbuf exceeds MSIZE"); 316 /* 317 * Initialize FreeBSD Network buffer allocation. 318 */ 319 static void 320 mbuf_init(void *dummy) 321 { 322 323 /* 324 * Configure UMA zones for Mbufs, Clusters, and Packets. 325 */ 326 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 327 mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL, 328 MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET); 329 if (nmbufs > 0) 330 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 331 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 332 uma_zone_set_maxaction(zone_mbuf, mb_reclaim); 333 334 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 335 mb_ctor_clust, NULL, NULL, NULL, 336 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 337 if (nmbclusters > 0) 338 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 339 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 340 uma_zone_set_maxaction(zone_clust, mb_reclaim); 341 342 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 343 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 344 345 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 346 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 347 mb_ctor_clust, NULL, NULL, NULL, 348 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 349 if (nmbjumbop > 0) 350 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 351 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 352 uma_zone_set_maxaction(zone_jumbop, mb_reclaim); 353 354 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 355 mb_ctor_clust, NULL, NULL, NULL, 356 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 357 if (nmbjumbo9 > 0) 358 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 359 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 360 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim); 361 362 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 363 mb_ctor_clust, NULL, NULL, NULL, 364 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 365 if (nmbjumbo16 > 0) 366 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 367 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 368 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim); 369 370 /* 371 * Hook event handler for low-memory situation, used to 372 * drain protocols and push data back to the caches (UMA 373 * later pushes it back to VM). 374 */ 375 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 376 EVENTHANDLER_PRI_FIRST); 377 378 snd_tag_count = counter_u64_alloc(M_WAITOK); 379 } 380 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 381 382 #ifdef DEBUGNET 383 /* 384 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When 385 * debugnet is configured, we initialize a set of UMA cache zones which return 386 * items from this pool. At panic-time, the regular UMA zone pointers are 387 * overwritten with those of the cache zones so that drivers may allocate and 388 * free mbufs and clusters without attempting to allocate physical memory. 389 * 390 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for 391 * the purpose of caching clusters, we treat them as mbufs. 392 */ 393 static struct mbufq dn_mbufq = 394 { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX }; 395 static struct mbufq dn_clustq = 396 { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX }; 397 398 static int dn_clsize; 399 static uma_zone_t dn_zone_mbuf; 400 static uma_zone_t dn_zone_clust; 401 static uma_zone_t dn_zone_pack; 402 403 static struct debugnet_saved_zones { 404 uma_zone_t dsz_mbuf; 405 uma_zone_t dsz_clust; 406 uma_zone_t dsz_pack; 407 uma_zone_t dsz_jumbop; 408 uma_zone_t dsz_jumbo9; 409 uma_zone_t dsz_jumbo16; 410 bool dsz_debugnet_zones_enabled; 411 } dn_saved_zones; 412 413 static int 414 dn_buf_import(void *arg, void **store, int count, int domain __unused, 415 int flags) 416 { 417 struct mbufq *q; 418 struct mbuf *m; 419 int i; 420 421 q = arg; 422 423 for (i = 0; i < count; i++) { 424 m = mbufq_dequeue(q); 425 if (m == NULL) 426 break; 427 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags); 428 store[i] = m; 429 } 430 KASSERT((flags & M_WAITOK) == 0 || i == count, 431 ("%s: ran out of pre-allocated mbufs", __func__)); 432 return (i); 433 } 434 435 static void 436 dn_buf_release(void *arg, void **store, int count) 437 { 438 struct mbufq *q; 439 struct mbuf *m; 440 int i; 441 442 q = arg; 443 444 for (i = 0; i < count; i++) { 445 m = store[i]; 446 (void)mbufq_enqueue(q, m); 447 } 448 } 449 450 static int 451 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused, 452 int flags __unused) 453 { 454 struct mbuf *m; 455 void *clust; 456 int i; 457 458 for (i = 0; i < count; i++) { 459 m = m_get(MT_DATA, M_NOWAIT); 460 if (m == NULL) 461 break; 462 clust = uma_zalloc(dn_zone_clust, M_NOWAIT); 463 if (clust == NULL) { 464 m_free(m); 465 break; 466 } 467 mb_ctor_clust(clust, dn_clsize, m, 0); 468 store[i] = m; 469 } 470 KASSERT((flags & M_WAITOK) == 0 || i == count, 471 ("%s: ran out of pre-allocated mbufs", __func__)); 472 return (i); 473 } 474 475 static void 476 dn_pack_release(void *arg __unused, void **store, int count) 477 { 478 struct mbuf *m; 479 void *clust; 480 int i; 481 482 for (i = 0; i < count; i++) { 483 m = store[i]; 484 clust = m->m_ext.ext_buf; 485 uma_zfree(dn_zone_clust, clust); 486 uma_zfree(dn_zone_mbuf, m); 487 } 488 } 489 490 /* 491 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy 492 * the corresponding UMA cache zones. 493 */ 494 void 495 debugnet_mbuf_drain(void) 496 { 497 struct mbuf *m; 498 void *item; 499 500 if (dn_zone_mbuf != NULL) { 501 uma_zdestroy(dn_zone_mbuf); 502 dn_zone_mbuf = NULL; 503 } 504 if (dn_zone_clust != NULL) { 505 uma_zdestroy(dn_zone_clust); 506 dn_zone_clust = NULL; 507 } 508 if (dn_zone_pack != NULL) { 509 uma_zdestroy(dn_zone_pack); 510 dn_zone_pack = NULL; 511 } 512 513 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL) 514 m_free(m); 515 while ((item = mbufq_dequeue(&dn_clustq)) != NULL) 516 uma_zfree(m_getzone(dn_clsize), item); 517 } 518 519 /* 520 * Callback invoked immediately prior to starting a debugnet connection. 521 */ 522 void 523 debugnet_mbuf_start(void) 524 { 525 526 MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled); 527 528 /* Save the old zone pointers to restore when debugnet is closed. */ 529 dn_saved_zones = (struct debugnet_saved_zones) { 530 .dsz_debugnet_zones_enabled = true, 531 .dsz_mbuf = zone_mbuf, 532 .dsz_clust = zone_clust, 533 .dsz_pack = zone_pack, 534 .dsz_jumbop = zone_jumbop, 535 .dsz_jumbo9 = zone_jumbo9, 536 .dsz_jumbo16 = zone_jumbo16, 537 }; 538 539 /* 540 * All cluster zones return buffers of the size requested by the 541 * drivers. It's up to the driver to reinitialize the zones if the 542 * MTU of a debugnet-enabled interface changes. 543 */ 544 printf("debugnet: overwriting mbuf zone pointers\n"); 545 zone_mbuf = dn_zone_mbuf; 546 zone_clust = dn_zone_clust; 547 zone_pack = dn_zone_pack; 548 zone_jumbop = dn_zone_clust; 549 zone_jumbo9 = dn_zone_clust; 550 zone_jumbo16 = dn_zone_clust; 551 } 552 553 /* 554 * Callback invoked when a debugnet connection is closed/finished. 555 */ 556 void 557 debugnet_mbuf_finish(void) 558 { 559 560 MPASS(dn_saved_zones.dsz_debugnet_zones_enabled); 561 562 printf("debugnet: restoring mbuf zone pointers\n"); 563 zone_mbuf = dn_saved_zones.dsz_mbuf; 564 zone_clust = dn_saved_zones.dsz_clust; 565 zone_pack = dn_saved_zones.dsz_pack; 566 zone_jumbop = dn_saved_zones.dsz_jumbop; 567 zone_jumbo9 = dn_saved_zones.dsz_jumbo9; 568 zone_jumbo16 = dn_saved_zones.dsz_jumbo16; 569 570 memset(&dn_saved_zones, 0, sizeof(dn_saved_zones)); 571 } 572 573 /* 574 * Reinitialize the debugnet mbuf+cluster pool and cache zones. 575 */ 576 void 577 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize) 578 { 579 struct mbuf *m; 580 void *item; 581 582 debugnet_mbuf_drain(); 583 584 dn_clsize = clsize; 585 586 dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME, 587 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL, 588 dn_buf_import, dn_buf_release, 589 &dn_mbufq, UMA_ZONE_NOBUCKET); 590 591 dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME, 592 clsize, mb_ctor_clust, NULL, NULL, NULL, 593 dn_buf_import, dn_buf_release, 594 &dn_clustq, UMA_ZONE_NOBUCKET); 595 596 dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME, 597 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, 598 dn_pack_import, dn_pack_release, 599 NULL, UMA_ZONE_NOBUCKET); 600 601 while (nmbuf-- > 0) { 602 m = m_get(MT_DATA, M_WAITOK); 603 uma_zfree(dn_zone_mbuf, m); 604 } 605 while (nclust-- > 0) { 606 item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK); 607 uma_zfree(dn_zone_clust, item); 608 } 609 } 610 #endif /* DEBUGNET */ 611 612 /* 613 * Constructor for Mbuf master zone. 614 * 615 * The 'arg' pointer points to a mb_args structure which 616 * contains call-specific information required to support the 617 * mbuf allocation API. See mbuf.h. 618 */ 619 static int 620 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 621 { 622 struct mbuf *m; 623 struct mb_args *args; 624 int error; 625 int flags; 626 short type; 627 628 args = (struct mb_args *)arg; 629 type = args->type; 630 631 /* 632 * The mbuf is initialized later. The caller has the 633 * responsibility to set up any MAC labels too. 634 */ 635 if (type == MT_NOINIT) 636 return (0); 637 638 m = (struct mbuf *)mem; 639 flags = args->flags; 640 MPASS((flags & M_NOFREE) == 0); 641 642 error = m_init(m, how, type, flags); 643 644 return (error); 645 } 646 647 /* 648 * The Mbuf master zone destructor. 649 */ 650 static void 651 mb_dtor_mbuf(void *mem, int size, void *arg) 652 { 653 struct mbuf *m; 654 unsigned long flags; 655 656 m = (struct mbuf *)mem; 657 flags = (unsigned long)arg; 658 659 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 660 if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) 661 m_tag_delete_chain(m, NULL); 662 } 663 664 /* 665 * The Mbuf Packet zone destructor. 666 */ 667 static void 668 mb_dtor_pack(void *mem, int size, void *arg) 669 { 670 struct mbuf *m; 671 672 m = (struct mbuf *)mem; 673 if ((m->m_flags & M_PKTHDR) != 0) 674 m_tag_delete_chain(m, NULL); 675 676 /* Make sure we've got a clean cluster back. */ 677 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 678 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 679 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 680 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 681 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 682 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 683 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 684 #ifdef INVARIANTS 685 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 686 #endif 687 /* 688 * If there are processes blocked on zone_clust, waiting for pages 689 * to be freed up, cause them to be woken up by draining the 690 * packet zone. We are exposed to a race here (in the check for 691 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 692 * is deliberate. We don't want to acquire the zone lock for every 693 * mbuf free. 694 */ 695 if (uma_zone_exhausted(zone_clust)) 696 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); 697 } 698 699 /* 700 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 701 * 702 * Here the 'arg' pointer points to the Mbuf which we 703 * are configuring cluster storage for. If 'arg' is 704 * empty we allocate just the cluster without setting 705 * the mbuf to it. See mbuf.h. 706 */ 707 static int 708 mb_ctor_clust(void *mem, int size, void *arg, int how) 709 { 710 struct mbuf *m; 711 712 m = (struct mbuf *)arg; 713 if (m != NULL) { 714 m->m_ext.ext_buf = (char *)mem; 715 m->m_data = m->m_ext.ext_buf; 716 m->m_flags |= M_EXT; 717 m->m_ext.ext_free = NULL; 718 m->m_ext.ext_arg1 = NULL; 719 m->m_ext.ext_arg2 = NULL; 720 m->m_ext.ext_size = size; 721 m->m_ext.ext_type = m_gettype(size); 722 m->m_ext.ext_flags = EXT_FLAG_EMBREF; 723 m->m_ext.ext_count = 1; 724 } 725 726 return (0); 727 } 728 729 /* 730 * The Packet secondary zone's init routine, executed on the 731 * object's transition from mbuf keg slab to zone cache. 732 */ 733 static int 734 mb_zinit_pack(void *mem, int size, int how) 735 { 736 struct mbuf *m; 737 738 m = (struct mbuf *)mem; /* m is virgin. */ 739 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 740 m->m_ext.ext_buf == NULL) 741 return (ENOMEM); 742 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 743 #ifdef INVARIANTS 744 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 745 #endif 746 return (0); 747 } 748 749 /* 750 * The Packet secondary zone's fini routine, executed on the 751 * object's transition from zone cache to keg slab. 752 */ 753 static void 754 mb_zfini_pack(void *mem, int size) 755 { 756 struct mbuf *m; 757 758 m = (struct mbuf *)mem; 759 #ifdef INVARIANTS 760 trash_fini(m->m_ext.ext_buf, MCLBYTES); 761 #endif 762 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 763 #ifdef INVARIANTS 764 trash_dtor(mem, size, NULL); 765 #endif 766 } 767 768 /* 769 * The "packet" keg constructor. 770 */ 771 static int 772 mb_ctor_pack(void *mem, int size, void *arg, int how) 773 { 774 struct mbuf *m; 775 struct mb_args *args; 776 int error, flags; 777 short type; 778 779 m = (struct mbuf *)mem; 780 args = (struct mb_args *)arg; 781 flags = args->flags; 782 type = args->type; 783 MPASS((flags & M_NOFREE) == 0); 784 785 #ifdef INVARIANTS 786 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 787 #endif 788 789 error = m_init(m, how, type, flags); 790 791 /* m_ext is already initialized. */ 792 m->m_data = m->m_ext.ext_buf; 793 m->m_flags = (flags | M_EXT); 794 795 return (error); 796 } 797 798 /* 799 * This is the protocol drain routine. Called by UMA whenever any of the 800 * mbuf zones is closed to its limit. 801 * 802 * No locks should be held when this is called. The drain routines have to 803 * presently acquire some locks which raises the possibility of lock order 804 * reversal. 805 */ 806 static void 807 mb_reclaim(uma_zone_t zone __unused, int pending __unused) 808 { 809 struct epoch_tracker et; 810 struct domain *dp; 811 struct protosw *pr; 812 813 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__); 814 815 NET_EPOCH_ENTER(et); 816 for (dp = domains; dp != NULL; dp = dp->dom_next) 817 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 818 if (pr->pr_drain != NULL) 819 (*pr->pr_drain)(); 820 NET_EPOCH_EXIT(et); 821 } 822 823 /* 824 * Free "count" units of I/O from an mbuf chain. They could be held 825 * in M_EXTPG or just as a normal mbuf. This code is intended to be 826 * called in an error path (I/O error, closed connection, etc). 827 */ 828 void 829 mb_free_notready(struct mbuf *m, int count) 830 { 831 int i; 832 833 for (i = 0; i < count && m != NULL; i++) { 834 if ((m->m_flags & M_EXTPG) != 0) { 835 m->m_epg_nrdy--; 836 if (m->m_epg_nrdy != 0) 837 continue; 838 } 839 m = m_free(m); 840 } 841 KASSERT(i == count, ("Removed only %d items from %p", i, m)); 842 } 843 844 /* 845 * Compress an unmapped mbuf into a simple mbuf when it holds a small 846 * amount of data. This is used as a DOS defense to avoid having 847 * small packets tie up wired pages, an ext_pgs structure, and an 848 * mbuf. Since this converts the existing mbuf in place, it can only 849 * be used if there are no other references to 'm'. 850 */ 851 int 852 mb_unmapped_compress(struct mbuf *m) 853 { 854 volatile u_int *refcnt; 855 char buf[MLEN]; 856 857 /* 858 * Assert that 'm' does not have a packet header. If 'm' had 859 * a packet header, it would only be able to hold MHLEN bytes 860 * and m_data would have to be initialized differently. 861 */ 862 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXTPG), 863 ("%s: m %p !M_EXTPG or M_PKTHDR", __func__, m)); 864 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m)); 865 866 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 867 refcnt = &m->m_ext.ext_count; 868 } else { 869 KASSERT(m->m_ext.ext_cnt != NULL, 870 ("%s: no refcounting pointer on %p", __func__, m)); 871 refcnt = m->m_ext.ext_cnt; 872 } 873 874 if (*refcnt != 1) 875 return (EBUSY); 876 877 m_copydata(m, 0, m->m_len, buf); 878 879 /* Free the backing pages. */ 880 m->m_ext.ext_free(m); 881 882 /* Turn 'm' into a "normal" mbuf. */ 883 m->m_flags &= ~(M_EXT | M_RDONLY | M_EXTPG); 884 m->m_data = m->m_dat; 885 886 /* Copy data back into m. */ 887 bcopy(buf, mtod(m, char *), m->m_len); 888 889 return (0); 890 } 891 892 /* 893 * These next few routines are used to permit downgrading an unmapped 894 * mbuf to a chain of mapped mbufs. This is used when an interface 895 * doesn't supported unmapped mbufs or if checksums need to be 896 * computed in software. 897 * 898 * Each unmapped mbuf is converted to a chain of mbufs. First, any 899 * TLS header data is stored in a regular mbuf. Second, each page of 900 * unmapped data is stored in an mbuf with an EXT_SFBUF external 901 * cluster. These mbufs use an sf_buf to provide a valid KVA for the 902 * associated physical page. They also hold a reference on the 903 * original M_EXTPG mbuf to ensure the physical page doesn't go away. 904 * Finally, any TLS trailer data is stored in a regular mbuf. 905 * 906 * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF 907 * mbufs. It frees the associated sf_buf and releases its reference 908 * on the original M_EXTPG mbuf. 909 * 910 * _mb_unmapped_to_ext() is a helper function that converts a single 911 * unmapped mbuf into a chain of mbufs. 912 * 913 * mb_unmapped_to_ext() is the public function that walks an mbuf 914 * chain converting any unmapped mbufs to mapped mbufs. It returns 915 * the new chain of unmapped mbufs on success. On failure it frees 916 * the original mbuf chain and returns NULL. 917 */ 918 static void 919 mb_unmapped_free_mext(struct mbuf *m) 920 { 921 struct sf_buf *sf; 922 struct mbuf *old_m; 923 924 sf = m->m_ext.ext_arg1; 925 sf_buf_free(sf); 926 927 /* Drop the reference on the backing M_EXTPG mbuf. */ 928 old_m = m->m_ext.ext_arg2; 929 mb_free_extpg(old_m); 930 } 931 932 static struct mbuf * 933 _mb_unmapped_to_ext(struct mbuf *m) 934 { 935 struct mbuf *m_new, *top, *prev, *mref; 936 struct sf_buf *sf; 937 vm_page_t pg; 938 int i, len, off, pglen, pgoff, seglen, segoff; 939 volatile u_int *refcnt; 940 u_int ref_inc = 0; 941 942 M_ASSERTEXTPG(m); 943 len = m->m_len; 944 KASSERT(m->m_epg_tls == NULL, ("%s: can't convert TLS mbuf %p", 945 __func__, m)); 946 947 /* See if this is the mbuf that holds the embedded refcount. */ 948 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 949 refcnt = &m->m_ext.ext_count; 950 mref = m; 951 } else { 952 KASSERT(m->m_ext.ext_cnt != NULL, 953 ("%s: no refcounting pointer on %p", __func__, m)); 954 refcnt = m->m_ext.ext_cnt; 955 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 956 } 957 958 /* Skip over any data removed from the front. */ 959 off = mtod(m, vm_offset_t); 960 961 top = NULL; 962 if (m->m_epg_hdrlen != 0) { 963 if (off >= m->m_epg_hdrlen) { 964 off -= m->m_epg_hdrlen; 965 } else { 966 seglen = m->m_epg_hdrlen - off; 967 segoff = off; 968 seglen = min(seglen, len); 969 off = 0; 970 len -= seglen; 971 m_new = m_get(M_NOWAIT, MT_DATA); 972 if (m_new == NULL) 973 goto fail; 974 m_new->m_len = seglen; 975 prev = top = m_new; 976 memcpy(mtod(m_new, void *), &m->m_epg_hdr[segoff], 977 seglen); 978 } 979 } 980 pgoff = m->m_epg_1st_off; 981 for (i = 0; i < m->m_epg_npgs && len > 0; i++) { 982 pglen = m_epg_pagelen(m, i, pgoff); 983 if (off >= pglen) { 984 off -= pglen; 985 pgoff = 0; 986 continue; 987 } 988 seglen = pglen - off; 989 segoff = pgoff + off; 990 off = 0; 991 seglen = min(seglen, len); 992 len -= seglen; 993 994 pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]); 995 m_new = m_get(M_NOWAIT, MT_DATA); 996 if (m_new == NULL) 997 goto fail; 998 if (top == NULL) { 999 top = prev = m_new; 1000 } else { 1001 prev->m_next = m_new; 1002 prev = m_new; 1003 } 1004 sf = sf_buf_alloc(pg, SFB_NOWAIT); 1005 if (sf == NULL) 1006 goto fail; 1007 1008 ref_inc++; 1009 m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE, 1010 mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF); 1011 m_new->m_data += segoff; 1012 m_new->m_len = seglen; 1013 1014 pgoff = 0; 1015 }; 1016 if (len != 0) { 1017 KASSERT((off + len) <= m->m_epg_trllen, 1018 ("off + len > trail (%d + %d > %d)", off, len, 1019 m->m_epg_trllen)); 1020 m_new = m_get(M_NOWAIT, MT_DATA); 1021 if (m_new == NULL) 1022 goto fail; 1023 if (top == NULL) 1024 top = m_new; 1025 else 1026 prev->m_next = m_new; 1027 m_new->m_len = len; 1028 memcpy(mtod(m_new, void *), &m->m_epg_trail[off], len); 1029 } 1030 1031 if (ref_inc != 0) { 1032 /* 1033 * Obtain an additional reference on the old mbuf for 1034 * each created EXT_SFBUF mbuf. They will be dropped 1035 * in mb_unmapped_free_mext(). 1036 */ 1037 if (*refcnt == 1) 1038 *refcnt += ref_inc; 1039 else 1040 atomic_add_int(refcnt, ref_inc); 1041 } 1042 m_free(m); 1043 return (top); 1044 1045 fail: 1046 if (ref_inc != 0) { 1047 /* 1048 * Obtain an additional reference on the old mbuf for 1049 * each created EXT_SFBUF mbuf. They will be 1050 * immediately dropped when these mbufs are freed 1051 * below. 1052 */ 1053 if (*refcnt == 1) 1054 *refcnt += ref_inc; 1055 else 1056 atomic_add_int(refcnt, ref_inc); 1057 } 1058 m_free(m); 1059 m_freem(top); 1060 return (NULL); 1061 } 1062 1063 struct mbuf * 1064 mb_unmapped_to_ext(struct mbuf *top) 1065 { 1066 struct mbuf *m, *next, *prev = NULL; 1067 1068 prev = NULL; 1069 for (m = top; m != NULL; m = next) { 1070 /* m might be freed, so cache the next pointer. */ 1071 next = m->m_next; 1072 if (m->m_flags & M_EXTPG) { 1073 if (prev != NULL) { 1074 /* 1075 * Remove 'm' from the new chain so 1076 * that the 'top' chain terminates 1077 * before 'm' in case 'top' is freed 1078 * due to an error. 1079 */ 1080 prev->m_next = NULL; 1081 } 1082 m = _mb_unmapped_to_ext(m); 1083 if (m == NULL) { 1084 m_freem(top); 1085 m_freem(next); 1086 return (NULL); 1087 } 1088 if (prev == NULL) { 1089 top = m; 1090 } else { 1091 prev->m_next = m; 1092 } 1093 1094 /* 1095 * Replaced one mbuf with a chain, so we must 1096 * find the end of chain. 1097 */ 1098 prev = m_last(m); 1099 } else { 1100 if (prev != NULL) { 1101 prev->m_next = m; 1102 } 1103 prev = m; 1104 } 1105 } 1106 return (top); 1107 } 1108 1109 /* 1110 * Allocate an empty M_EXTPG mbuf. The ext_free routine is 1111 * responsible for freeing any pages backing this mbuf when it is 1112 * freed. 1113 */ 1114 struct mbuf * 1115 mb_alloc_ext_pgs(int how, m_ext_free_t ext_free) 1116 { 1117 struct mbuf *m; 1118 1119 m = m_get(how, MT_DATA); 1120 if (m == NULL) 1121 return (NULL); 1122 1123 m->m_epg_npgs = 0; 1124 m->m_epg_nrdy = 0; 1125 m->m_epg_1st_off = 0; 1126 m->m_epg_last_len = 0; 1127 m->m_epg_flags = 0; 1128 m->m_epg_hdrlen = 0; 1129 m->m_epg_trllen = 0; 1130 m->m_epg_tls = NULL; 1131 m->m_epg_so = NULL; 1132 m->m_data = NULL; 1133 m->m_flags |= (M_EXT | M_RDONLY | M_EXTPG); 1134 m->m_ext.ext_flags = EXT_FLAG_EMBREF; 1135 m->m_ext.ext_count = 1; 1136 m->m_ext.ext_size = 0; 1137 m->m_ext.ext_free = ext_free; 1138 return (m); 1139 } 1140 1141 /* 1142 * Clean up after mbufs with M_EXT storage attached to them if the 1143 * reference count hits 1. 1144 */ 1145 void 1146 mb_free_ext(struct mbuf *m) 1147 { 1148 volatile u_int *refcnt; 1149 struct mbuf *mref; 1150 int freembuf; 1151 1152 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 1153 1154 /* See if this is the mbuf that holds the embedded refcount. */ 1155 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 1156 refcnt = &m->m_ext.ext_count; 1157 mref = m; 1158 } else { 1159 KASSERT(m->m_ext.ext_cnt != NULL, 1160 ("%s: no refcounting pointer on %p", __func__, m)); 1161 refcnt = m->m_ext.ext_cnt; 1162 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 1163 } 1164 1165 /* 1166 * Check if the header is embedded in the cluster. It is 1167 * important that we can't touch any of the mbuf fields 1168 * after we have freed the external storage, since mbuf 1169 * could have been embedded in it. For now, the mbufs 1170 * embedded into the cluster are always of type EXT_EXTREF, 1171 * and for this type we won't free the mref. 1172 */ 1173 if (m->m_flags & M_NOFREE) { 1174 freembuf = 0; 1175 KASSERT(m->m_ext.ext_type == EXT_EXTREF || 1176 m->m_ext.ext_type == EXT_RXRING, 1177 ("%s: no-free mbuf %p has wrong type", __func__, m)); 1178 } else 1179 freembuf = 1; 1180 1181 /* Free attached storage if this mbuf is the only reference to it. */ 1182 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { 1183 switch (m->m_ext.ext_type) { 1184 case EXT_PACKET: 1185 /* The packet zone is special. */ 1186 if (*refcnt == 0) 1187 *refcnt = 1; 1188 uma_zfree(zone_pack, mref); 1189 break; 1190 case EXT_CLUSTER: 1191 uma_zfree(zone_clust, m->m_ext.ext_buf); 1192 uma_zfree(zone_mbuf, mref); 1193 break; 1194 case EXT_JUMBOP: 1195 uma_zfree(zone_jumbop, m->m_ext.ext_buf); 1196 uma_zfree(zone_mbuf, mref); 1197 break; 1198 case EXT_JUMBO9: 1199 uma_zfree(zone_jumbo9, m->m_ext.ext_buf); 1200 uma_zfree(zone_mbuf, mref); 1201 break; 1202 case EXT_JUMBO16: 1203 uma_zfree(zone_jumbo16, m->m_ext.ext_buf); 1204 uma_zfree(zone_mbuf, mref); 1205 break; 1206 case EXT_SFBUF: 1207 case EXT_NET_DRV: 1208 case EXT_MOD_TYPE: 1209 case EXT_DISPOSABLE: 1210 KASSERT(mref->m_ext.ext_free != NULL, 1211 ("%s: ext_free not set", __func__)); 1212 mref->m_ext.ext_free(mref); 1213 uma_zfree(zone_mbuf, mref); 1214 break; 1215 case EXT_EXTREF: 1216 KASSERT(m->m_ext.ext_free != NULL, 1217 ("%s: ext_free not set", __func__)); 1218 m->m_ext.ext_free(m); 1219 break; 1220 case EXT_RXRING: 1221 KASSERT(m->m_ext.ext_free == NULL, 1222 ("%s: ext_free is set", __func__)); 1223 break; 1224 default: 1225 KASSERT(m->m_ext.ext_type == 0, 1226 ("%s: unknown ext_type", __func__)); 1227 } 1228 } 1229 1230 if (freembuf && m != mref) 1231 uma_zfree(zone_mbuf, m); 1232 } 1233 1234 /* 1235 * Clean up after mbufs with M_EXTPG storage attached to them if the 1236 * reference count hits 1. 1237 */ 1238 void 1239 mb_free_extpg(struct mbuf *m) 1240 { 1241 volatile u_int *refcnt; 1242 struct mbuf *mref; 1243 1244 M_ASSERTEXTPG(m); 1245 1246 /* See if this is the mbuf that holds the embedded refcount. */ 1247 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 1248 refcnt = &m->m_ext.ext_count; 1249 mref = m; 1250 } else { 1251 KASSERT(m->m_ext.ext_cnt != NULL, 1252 ("%s: no refcounting pointer on %p", __func__, m)); 1253 refcnt = m->m_ext.ext_cnt; 1254 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 1255 } 1256 1257 /* Free attached storage if this mbuf is the only reference to it. */ 1258 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { 1259 KASSERT(mref->m_ext.ext_free != NULL, 1260 ("%s: ext_free not set", __func__)); 1261 1262 mref->m_ext.ext_free(mref); 1263 #ifdef KERN_TLS 1264 if (mref->m_epg_tls != NULL && 1265 !refcount_release_if_not_last(&mref->m_epg_tls->refcount)) 1266 ktls_enqueue_to_free(mref); 1267 else 1268 #endif 1269 uma_zfree(zone_mbuf, mref); 1270 } 1271 1272 if (m != mref) 1273 uma_zfree(zone_mbuf, m); 1274 } 1275 1276 /* 1277 * Official mbuf(9) allocation KPI for stack and drivers: 1278 * 1279 * m_get() - a single mbuf without any attachments, sys/mbuf.h. 1280 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h. 1281 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h. 1282 * m_clget() - attach cluster to already allocated mbuf. 1283 * m_cljget() - attach jumbo cluster to already allocated mbuf. 1284 * m_get2() - allocate minimum mbuf that would fit size argument. 1285 * m_getm2() - allocate a chain of mbufs/clusters. 1286 * m_extadd() - attach external cluster to mbuf. 1287 * 1288 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h. 1289 * m_freem() - free chain of mbufs. 1290 */ 1291 1292 int 1293 m_clget(struct mbuf *m, int how) 1294 { 1295 1296 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 1297 __func__, m)); 1298 m->m_ext.ext_buf = (char *)NULL; 1299 uma_zalloc_arg(zone_clust, m, how); 1300 /* 1301 * On a cluster allocation failure, drain the packet zone and retry, 1302 * we might be able to loosen a few clusters up on the drain. 1303 */ 1304 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { 1305 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); 1306 uma_zalloc_arg(zone_clust, m, how); 1307 } 1308 MBUF_PROBE2(m__clget, m, how); 1309 return (m->m_flags & M_EXT); 1310 } 1311 1312 /* 1313 * m_cljget() is different from m_clget() as it can allocate clusters without 1314 * attaching them to an mbuf. In that case the return value is the pointer 1315 * to the cluster of the requested size. If an mbuf was specified, it gets 1316 * the cluster attached to it and the return value can be safely ignored. 1317 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 1318 */ 1319 void * 1320 m_cljget(struct mbuf *m, int how, int size) 1321 { 1322 uma_zone_t zone; 1323 void *retval; 1324 1325 if (m != NULL) { 1326 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 1327 __func__, m)); 1328 m->m_ext.ext_buf = NULL; 1329 } 1330 1331 zone = m_getzone(size); 1332 retval = uma_zalloc_arg(zone, m, how); 1333 1334 MBUF_PROBE4(m__cljget, m, how, size, retval); 1335 1336 return (retval); 1337 } 1338 1339 /* 1340 * m_get2() allocates minimum mbuf that would fit "size" argument. 1341 */ 1342 struct mbuf * 1343 m_get2(int size, int how, short type, int flags) 1344 { 1345 struct mb_args args; 1346 struct mbuf *m, *n; 1347 1348 args.flags = flags; 1349 args.type = type; 1350 1351 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) 1352 return (uma_zalloc_arg(zone_mbuf, &args, how)); 1353 if (size <= MCLBYTES) 1354 return (uma_zalloc_arg(zone_pack, &args, how)); 1355 1356 if (size > MJUMPAGESIZE) 1357 return (NULL); 1358 1359 m = uma_zalloc_arg(zone_mbuf, &args, how); 1360 if (m == NULL) 1361 return (NULL); 1362 1363 n = uma_zalloc_arg(zone_jumbop, m, how); 1364 if (n == NULL) { 1365 uma_zfree(zone_mbuf, m); 1366 return (NULL); 1367 } 1368 1369 return (m); 1370 } 1371 1372 /* 1373 * m_getjcl() returns an mbuf with a cluster of the specified size attached. 1374 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 1375 */ 1376 struct mbuf * 1377 m_getjcl(int how, short type, int flags, int size) 1378 { 1379 struct mb_args args; 1380 struct mbuf *m, *n; 1381 uma_zone_t zone; 1382 1383 if (size == MCLBYTES) 1384 return m_getcl(how, type, flags); 1385 1386 args.flags = flags; 1387 args.type = type; 1388 1389 m = uma_zalloc_arg(zone_mbuf, &args, how); 1390 if (m == NULL) 1391 return (NULL); 1392 1393 zone = m_getzone(size); 1394 n = uma_zalloc_arg(zone, m, how); 1395 if (n == NULL) { 1396 uma_zfree(zone_mbuf, m); 1397 return (NULL); 1398 } 1399 return (m); 1400 } 1401 1402 /* 1403 * Allocate a given length worth of mbufs and/or clusters (whatever fits 1404 * best) and return a pointer to the top of the allocated chain. If an 1405 * existing mbuf chain is provided, then we will append the new chain 1406 * to the existing one and return a pointer to the provided mbuf. 1407 */ 1408 struct mbuf * 1409 m_getm2(struct mbuf *m, int len, int how, short type, int flags) 1410 { 1411 struct mbuf *mb, *nm = NULL, *mtail = NULL; 1412 1413 KASSERT(len >= 0, ("%s: len is < 0", __func__)); 1414 1415 /* Validate flags. */ 1416 flags &= (M_PKTHDR | M_EOR); 1417 1418 /* Packet header mbuf must be first in chain. */ 1419 if ((flags & M_PKTHDR) && m != NULL) 1420 flags &= ~M_PKTHDR; 1421 1422 /* Loop and append maximum sized mbufs to the chain tail. */ 1423 while (len > 0) { 1424 if (len > MCLBYTES) 1425 mb = m_getjcl(how, type, (flags & M_PKTHDR), 1426 MJUMPAGESIZE); 1427 else if (len >= MINCLSIZE) 1428 mb = m_getcl(how, type, (flags & M_PKTHDR)); 1429 else if (flags & M_PKTHDR) 1430 mb = m_gethdr(how, type); 1431 else 1432 mb = m_get(how, type); 1433 1434 /* Fail the whole operation if one mbuf can't be allocated. */ 1435 if (mb == NULL) { 1436 if (nm != NULL) 1437 m_freem(nm); 1438 return (NULL); 1439 } 1440 1441 /* Book keeping. */ 1442 len -= M_SIZE(mb); 1443 if (mtail != NULL) 1444 mtail->m_next = mb; 1445 else 1446 nm = mb; 1447 mtail = mb; 1448 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ 1449 } 1450 if (flags & M_EOR) 1451 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ 1452 1453 /* If mbuf was supplied, append new chain to the end of it. */ 1454 if (m != NULL) { 1455 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) 1456 ; 1457 mtail->m_next = nm; 1458 mtail->m_flags &= ~M_EOR; 1459 } else 1460 m = nm; 1461 1462 return (m); 1463 } 1464 1465 /*- 1466 * Configure a provided mbuf to refer to the provided external storage 1467 * buffer and setup a reference count for said buffer. 1468 * 1469 * Arguments: 1470 * mb The existing mbuf to which to attach the provided buffer. 1471 * buf The address of the provided external storage buffer. 1472 * size The size of the provided buffer. 1473 * freef A pointer to a routine that is responsible for freeing the 1474 * provided external storage buffer. 1475 * args A pointer to an argument structure (of any type) to be passed 1476 * to the provided freef routine (may be NULL). 1477 * flags Any other flags to be passed to the provided mbuf. 1478 * type The type that the external storage buffer should be 1479 * labeled with. 1480 * 1481 * Returns: 1482 * Nothing. 1483 */ 1484 void 1485 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef, 1486 void *arg1, void *arg2, int flags, int type) 1487 { 1488 1489 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); 1490 1491 mb->m_flags |= (M_EXT | flags); 1492 mb->m_ext.ext_buf = buf; 1493 mb->m_data = mb->m_ext.ext_buf; 1494 mb->m_ext.ext_size = size; 1495 mb->m_ext.ext_free = freef; 1496 mb->m_ext.ext_arg1 = arg1; 1497 mb->m_ext.ext_arg2 = arg2; 1498 mb->m_ext.ext_type = type; 1499 1500 if (type != EXT_EXTREF) { 1501 mb->m_ext.ext_count = 1; 1502 mb->m_ext.ext_flags = EXT_FLAG_EMBREF; 1503 } else 1504 mb->m_ext.ext_flags = 0; 1505 } 1506 1507 /* 1508 * Free an entire chain of mbufs and associated external buffers, if 1509 * applicable. 1510 */ 1511 void 1512 m_freem(struct mbuf *mb) 1513 { 1514 1515 MBUF_PROBE1(m__freem, mb); 1516 while (mb != NULL) 1517 mb = m_free(mb); 1518 } 1519 1520 void 1521 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp) 1522 { 1523 1524 if_ref(ifp); 1525 mst->ifp = ifp; 1526 refcount_init(&mst->refcount, 1); 1527 counter_u64_add(snd_tag_count, 1); 1528 } 1529 1530 void 1531 m_snd_tag_destroy(struct m_snd_tag *mst) 1532 { 1533 struct ifnet *ifp; 1534 1535 ifp = mst->ifp; 1536 ifp->if_snd_tag_free(mst); 1537 if_rele(ifp); 1538 counter_u64_add(snd_tag_count, -1); 1539 } 1540