1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004, 2005, 5 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_param.h" 34 #include "opt_kern_tls.h" 35 36 #include <sys/param.h> 37 #include <sys/conf.h> 38 #include <sys/domainset.h> 39 #include <sys/malloc.h> 40 #include <sys/systm.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/eventhandler.h> 44 #include <sys/kernel.h> 45 #include <sys/ktls.h> 46 #include <sys/limits.h> 47 #include <sys/lock.h> 48 #include <sys/mutex.h> 49 #include <sys/protosw.h> 50 #include <sys/refcount.h> 51 #include <sys/sf_buf.h> 52 #include <sys/smp.h> 53 #include <sys/socket.h> 54 #include <sys/sysctl.h> 55 56 #include <net/if.h> 57 #include <net/if_var.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_extern.h> 61 #include <vm/vm_kern.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_map.h> 64 #include <vm/uma.h> 65 #include <vm/uma_dbg.h> 66 67 /* 68 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA 69 * Zones. 70 * 71 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster 72 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the 73 * administrator so desires. 74 * 75 * Mbufs are allocated from a UMA Master Zone called the Mbuf 76 * Zone. 77 * 78 * Additionally, FreeBSD provides a Packet Zone, which it 79 * configures as a Secondary Zone to the Mbuf Master Zone, 80 * thus sharing backend Slab kegs with the Mbuf Master Zone. 81 * 82 * Thus common-case allocations and locking are simplified: 83 * 84 * m_clget() m_getcl() 85 * | | 86 * | .------------>[(Packet Cache)] m_get(), m_gethdr() 87 * | | [ Packet ] | 88 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ] 89 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ] 90 * | \________ | 91 * [ Cluster Keg ] \ / 92 * | [ Mbuf Keg ] 93 * [ Cluster Slabs ] | 94 * | [ Mbuf Slabs ] 95 * \____________(VM)_________________/ 96 * 97 * 98 * Whenever an object is allocated with uma_zalloc() out of 99 * one of the Zones its _ctor_ function is executed. The same 100 * for any deallocation through uma_zfree() the _dtor_ function 101 * is executed. 102 * 103 * Caches are per-CPU and are filled from the Master Zone. 104 * 105 * Whenever an object is allocated from the underlying global 106 * memory pool it gets pre-initialized with the _zinit_ functions. 107 * When the Keg's are overfull objects get decommissioned with 108 * _zfini_ functions and free'd back to the global memory pool. 109 * 110 */ 111 112 int nmbufs; /* limits number of mbufs */ 113 int nmbclusters; /* limits number of mbuf clusters */ 114 int nmbjumbop; /* limits number of page size jumbo clusters */ 115 int nmbjumbo9; /* limits number of 9k jumbo clusters */ 116 int nmbjumbo16; /* limits number of 16k jumbo clusters */ 117 118 bool mb_use_ext_pgs; /* use EXT_PGS mbufs for sendfile & TLS */ 119 SYSCTL_BOOL(_kern_ipc, OID_AUTO, mb_use_ext_pgs, CTLFLAG_RWTUN, 120 &mb_use_ext_pgs, 0, 121 "Use unmapped mbufs for sendfile(2) and TLS offload"); 122 123 static quad_t maxmbufmem; /* overall real memory limit for all mbufs */ 124 125 SYSCTL_QUAD(_kern_ipc, OID_AUTO, maxmbufmem, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &maxmbufmem, 0, 126 "Maximum real memory allocatable to various mbuf types"); 127 128 static counter_u64_t snd_tag_count; 129 SYSCTL_COUNTER_U64(_kern_ipc, OID_AUTO, num_snd_tags, CTLFLAG_RW, 130 &snd_tag_count, "# of active mbuf send tags"); 131 132 /* 133 * tunable_mbinit() has to be run before any mbuf allocations are done. 134 */ 135 static void 136 tunable_mbinit(void *dummy) 137 { 138 quad_t realmem; 139 140 /* 141 * The default limit for all mbuf related memory is 1/2 of all 142 * available kernel memory (physical or kmem). 143 * At most it can be 3/4 of available kernel memory. 144 */ 145 realmem = qmin((quad_t)physmem * PAGE_SIZE, vm_kmem_size); 146 maxmbufmem = realmem / 2; 147 TUNABLE_QUAD_FETCH("kern.ipc.maxmbufmem", &maxmbufmem); 148 if (maxmbufmem > realmem / 4 * 3) 149 maxmbufmem = realmem / 4 * 3; 150 151 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters); 152 if (nmbclusters == 0) 153 nmbclusters = maxmbufmem / MCLBYTES / 4; 154 155 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop); 156 if (nmbjumbop == 0) 157 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4; 158 159 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9); 160 if (nmbjumbo9 == 0) 161 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6; 162 163 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16); 164 if (nmbjumbo16 == 0) 165 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6; 166 167 /* 168 * We need at least as many mbufs as we have clusters of 169 * the various types added together. 170 */ 171 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs); 172 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) 173 nmbufs = lmax(maxmbufmem / MSIZE / 5, 174 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16); 175 } 176 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL); 177 178 static int 179 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS) 180 { 181 int error, newnmbclusters; 182 183 newnmbclusters = nmbclusters; 184 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 185 if (error == 0 && req->newptr && newnmbclusters != nmbclusters) { 186 if (newnmbclusters > nmbclusters && 187 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 188 nmbclusters = newnmbclusters; 189 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 190 EVENTHANDLER_INVOKE(nmbclusters_change); 191 } else 192 error = EINVAL; 193 } 194 return (error); 195 } 196 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, 197 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbclusters, 0, 198 sysctl_nmbclusters, "IU", 199 "Maximum number of mbuf clusters allowed"); 200 201 static int 202 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS) 203 { 204 int error, newnmbjumbop; 205 206 newnmbjumbop = nmbjumbop; 207 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req); 208 if (error == 0 && req->newptr && newnmbjumbop != nmbjumbop) { 209 if (newnmbjumbop > nmbjumbop && 210 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 211 nmbjumbop = newnmbjumbop; 212 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 213 } else 214 error = EINVAL; 215 } 216 return (error); 217 } 218 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, 219 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbop, 0, 220 sysctl_nmbjumbop, "IU", 221 "Maximum number of mbuf page size jumbo clusters allowed"); 222 223 static int 224 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS) 225 { 226 int error, newnmbjumbo9; 227 228 newnmbjumbo9 = nmbjumbo9; 229 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req); 230 if (error == 0 && req->newptr && newnmbjumbo9 != nmbjumbo9) { 231 if (newnmbjumbo9 > nmbjumbo9 && 232 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 233 nmbjumbo9 = newnmbjumbo9; 234 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 235 } else 236 error = EINVAL; 237 } 238 return (error); 239 } 240 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, 241 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbo9, 0, 242 sysctl_nmbjumbo9, "IU", 243 "Maximum number of mbuf 9k jumbo clusters allowed"); 244 245 static int 246 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS) 247 { 248 int error, newnmbjumbo16; 249 250 newnmbjumbo16 = nmbjumbo16; 251 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req); 252 if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) { 253 if (newnmbjumbo16 > nmbjumbo16 && 254 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) { 255 nmbjumbo16 = newnmbjumbo16; 256 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 257 } else 258 error = EINVAL; 259 } 260 return (error); 261 } 262 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, 263 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &nmbjumbo16, 0, 264 sysctl_nmbjumbo16, "IU", 265 "Maximum number of mbuf 16k jumbo clusters allowed"); 266 267 static int 268 sysctl_nmbufs(SYSCTL_HANDLER_ARGS) 269 { 270 int error, newnmbufs; 271 272 newnmbufs = nmbufs; 273 error = sysctl_handle_int(oidp, &newnmbufs, 0, req); 274 if (error == 0 && req->newptr && newnmbufs != nmbufs) { 275 if (newnmbufs > nmbufs) { 276 nmbufs = newnmbufs; 277 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 278 EVENTHANDLER_INVOKE(nmbufs_change); 279 } else 280 error = EINVAL; 281 } 282 return (error); 283 } 284 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbufs, 285 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 286 &nmbufs, 0, sysctl_nmbufs, "IU", 287 "Maximum number of mbufs allowed"); 288 289 /* 290 * Zones from which we allocate. 291 */ 292 uma_zone_t zone_mbuf; 293 uma_zone_t zone_clust; 294 uma_zone_t zone_pack; 295 uma_zone_t zone_jumbop; 296 uma_zone_t zone_jumbo9; 297 uma_zone_t zone_jumbo16; 298 299 /* 300 * Local prototypes. 301 */ 302 static int mb_ctor_mbuf(void *, int, void *, int); 303 static int mb_ctor_clust(void *, int, void *, int); 304 static int mb_ctor_pack(void *, int, void *, int); 305 static void mb_dtor_mbuf(void *, int, void *); 306 static void mb_dtor_pack(void *, int, void *); 307 static int mb_zinit_pack(void *, int, int); 308 static void mb_zfini_pack(void *, int); 309 static void mb_reclaim(uma_zone_t, int); 310 311 /* Ensure that MSIZE is a power of 2. */ 312 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE); 313 314 _Static_assert(offsetof(struct mbuf, m_ext) == 315 offsetof(struct mbuf, m_ext_pgs.m_ext), 316 "m_ext offset mismatch between mbuf and ext_pgs"); 317 _Static_assert(sizeof(struct mbuf) <= MSIZE, 318 "size of mbuf exceeds MSIZE"); 319 /* 320 * Initialize FreeBSD Network buffer allocation. 321 */ 322 static void 323 mbuf_init(void *dummy) 324 { 325 326 /* 327 * Configure UMA zones for Mbufs, Clusters, and Packets. 328 */ 329 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, 330 mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL, 331 MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET); 332 if (nmbufs > 0) 333 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs); 334 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached"); 335 uma_zone_set_maxaction(zone_mbuf, mb_reclaim); 336 337 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 338 mb_ctor_clust, NULL, NULL, NULL, 339 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 340 if (nmbclusters > 0) 341 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters); 342 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached"); 343 uma_zone_set_maxaction(zone_clust, mb_reclaim); 344 345 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack, 346 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf); 347 348 /* Make jumbo frame zone too. Page size, 9k and 16k. */ 349 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE, 350 mb_ctor_clust, NULL, NULL, NULL, 351 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 352 if (nmbjumbop > 0) 353 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop); 354 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached"); 355 uma_zone_set_maxaction(zone_jumbop, mb_reclaim); 356 357 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES, 358 mb_ctor_clust, NULL, NULL, NULL, 359 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 360 if (nmbjumbo9 > 0) 361 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9); 362 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached"); 363 uma_zone_set_maxaction(zone_jumbo9, mb_reclaim); 364 365 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES, 366 mb_ctor_clust, NULL, NULL, NULL, 367 UMA_ALIGN_PTR, UMA_ZONE_CONTIG); 368 if (nmbjumbo16 > 0) 369 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16); 370 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached"); 371 uma_zone_set_maxaction(zone_jumbo16, mb_reclaim); 372 373 /* 374 * Hook event handler for low-memory situation, used to 375 * drain protocols and push data back to the caches (UMA 376 * later pushes it back to VM). 377 */ 378 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL, 379 EVENTHANDLER_PRI_FIRST); 380 381 snd_tag_count = counter_u64_alloc(M_WAITOK); 382 } 383 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL); 384 385 #ifdef DEBUGNET 386 /* 387 * debugnet makes use of a pre-allocated pool of mbufs and clusters. When 388 * debugnet is configured, we initialize a set of UMA cache zones which return 389 * items from this pool. At panic-time, the regular UMA zone pointers are 390 * overwritten with those of the cache zones so that drivers may allocate and 391 * free mbufs and clusters without attempting to allocate physical memory. 392 * 393 * We keep mbufs and clusters in a pair of mbuf queues. In particular, for 394 * the purpose of caching clusters, we treat them as mbufs. 395 */ 396 static struct mbufq dn_mbufq = 397 { STAILQ_HEAD_INITIALIZER(dn_mbufq.mq_head), 0, INT_MAX }; 398 static struct mbufq dn_clustq = 399 { STAILQ_HEAD_INITIALIZER(dn_clustq.mq_head), 0, INT_MAX }; 400 401 static int dn_clsize; 402 static uma_zone_t dn_zone_mbuf; 403 static uma_zone_t dn_zone_clust; 404 static uma_zone_t dn_zone_pack; 405 406 static struct debugnet_saved_zones { 407 uma_zone_t dsz_mbuf; 408 uma_zone_t dsz_clust; 409 uma_zone_t dsz_pack; 410 uma_zone_t dsz_jumbop; 411 uma_zone_t dsz_jumbo9; 412 uma_zone_t dsz_jumbo16; 413 bool dsz_debugnet_zones_enabled; 414 } dn_saved_zones; 415 416 static int 417 dn_buf_import(void *arg, void **store, int count, int domain __unused, 418 int flags) 419 { 420 struct mbufq *q; 421 struct mbuf *m; 422 int i; 423 424 q = arg; 425 426 for (i = 0; i < count; i++) { 427 m = mbufq_dequeue(q); 428 if (m == NULL) 429 break; 430 trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags); 431 store[i] = m; 432 } 433 KASSERT((flags & M_WAITOK) == 0 || i == count, 434 ("%s: ran out of pre-allocated mbufs", __func__)); 435 return (i); 436 } 437 438 static void 439 dn_buf_release(void *arg, void **store, int count) 440 { 441 struct mbufq *q; 442 struct mbuf *m; 443 int i; 444 445 q = arg; 446 447 for (i = 0; i < count; i++) { 448 m = store[i]; 449 (void)mbufq_enqueue(q, m); 450 } 451 } 452 453 static int 454 dn_pack_import(void *arg __unused, void **store, int count, int domain __unused, 455 int flags __unused) 456 { 457 struct mbuf *m; 458 void *clust; 459 int i; 460 461 for (i = 0; i < count; i++) { 462 m = m_get(MT_DATA, M_NOWAIT); 463 if (m == NULL) 464 break; 465 clust = uma_zalloc(dn_zone_clust, M_NOWAIT); 466 if (clust == NULL) { 467 m_free(m); 468 break; 469 } 470 mb_ctor_clust(clust, dn_clsize, m, 0); 471 store[i] = m; 472 } 473 KASSERT((flags & M_WAITOK) == 0 || i == count, 474 ("%s: ran out of pre-allocated mbufs", __func__)); 475 return (i); 476 } 477 478 static void 479 dn_pack_release(void *arg __unused, void **store, int count) 480 { 481 struct mbuf *m; 482 void *clust; 483 int i; 484 485 for (i = 0; i < count; i++) { 486 m = store[i]; 487 clust = m->m_ext.ext_buf; 488 uma_zfree(dn_zone_clust, clust); 489 uma_zfree(dn_zone_mbuf, m); 490 } 491 } 492 493 /* 494 * Free the pre-allocated mbufs and clusters reserved for debugnet, and destroy 495 * the corresponding UMA cache zones. 496 */ 497 void 498 debugnet_mbuf_drain(void) 499 { 500 struct mbuf *m; 501 void *item; 502 503 if (dn_zone_mbuf != NULL) { 504 uma_zdestroy(dn_zone_mbuf); 505 dn_zone_mbuf = NULL; 506 } 507 if (dn_zone_clust != NULL) { 508 uma_zdestroy(dn_zone_clust); 509 dn_zone_clust = NULL; 510 } 511 if (dn_zone_pack != NULL) { 512 uma_zdestroy(dn_zone_pack); 513 dn_zone_pack = NULL; 514 } 515 516 while ((m = mbufq_dequeue(&dn_mbufq)) != NULL) 517 m_free(m); 518 while ((item = mbufq_dequeue(&dn_clustq)) != NULL) 519 uma_zfree(m_getzone(dn_clsize), item); 520 } 521 522 /* 523 * Callback invoked immediately prior to starting a debugnet connection. 524 */ 525 void 526 debugnet_mbuf_start(void) 527 { 528 529 MPASS(!dn_saved_zones.dsz_debugnet_zones_enabled); 530 531 /* Save the old zone pointers to restore when debugnet is closed. */ 532 dn_saved_zones = (struct debugnet_saved_zones) { 533 .dsz_debugnet_zones_enabled = true, 534 .dsz_mbuf = zone_mbuf, 535 .dsz_clust = zone_clust, 536 .dsz_pack = zone_pack, 537 .dsz_jumbop = zone_jumbop, 538 .dsz_jumbo9 = zone_jumbo9, 539 .dsz_jumbo16 = zone_jumbo16, 540 }; 541 542 /* 543 * All cluster zones return buffers of the size requested by the 544 * drivers. It's up to the driver to reinitialize the zones if the 545 * MTU of a debugnet-enabled interface changes. 546 */ 547 printf("debugnet: overwriting mbuf zone pointers\n"); 548 zone_mbuf = dn_zone_mbuf; 549 zone_clust = dn_zone_clust; 550 zone_pack = dn_zone_pack; 551 zone_jumbop = dn_zone_clust; 552 zone_jumbo9 = dn_zone_clust; 553 zone_jumbo16 = dn_zone_clust; 554 } 555 556 /* 557 * Callback invoked when a debugnet connection is closed/finished. 558 */ 559 void 560 debugnet_mbuf_finish(void) 561 { 562 563 MPASS(dn_saved_zones.dsz_debugnet_zones_enabled); 564 565 printf("debugnet: restoring mbuf zone pointers\n"); 566 zone_mbuf = dn_saved_zones.dsz_mbuf; 567 zone_clust = dn_saved_zones.dsz_clust; 568 zone_pack = dn_saved_zones.dsz_pack; 569 zone_jumbop = dn_saved_zones.dsz_jumbop; 570 zone_jumbo9 = dn_saved_zones.dsz_jumbo9; 571 zone_jumbo16 = dn_saved_zones.dsz_jumbo16; 572 573 memset(&dn_saved_zones, 0, sizeof(dn_saved_zones)); 574 } 575 576 /* 577 * Reinitialize the debugnet mbuf+cluster pool and cache zones. 578 */ 579 void 580 debugnet_mbuf_reinit(int nmbuf, int nclust, int clsize) 581 { 582 struct mbuf *m; 583 void *item; 584 585 debugnet_mbuf_drain(); 586 587 dn_clsize = clsize; 588 589 dn_zone_mbuf = uma_zcache_create("debugnet_" MBUF_MEM_NAME, 590 MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL, 591 dn_buf_import, dn_buf_release, 592 &dn_mbufq, UMA_ZONE_NOBUCKET); 593 594 dn_zone_clust = uma_zcache_create("debugnet_" MBUF_CLUSTER_MEM_NAME, 595 clsize, mb_ctor_clust, NULL, NULL, NULL, 596 dn_buf_import, dn_buf_release, 597 &dn_clustq, UMA_ZONE_NOBUCKET); 598 599 dn_zone_pack = uma_zcache_create("debugnet_" MBUF_PACKET_MEM_NAME, 600 MCLBYTES, mb_ctor_pack, mb_dtor_pack, NULL, NULL, 601 dn_pack_import, dn_pack_release, 602 NULL, UMA_ZONE_NOBUCKET); 603 604 while (nmbuf-- > 0) { 605 m = m_get(MT_DATA, M_WAITOK); 606 uma_zfree(dn_zone_mbuf, m); 607 } 608 while (nclust-- > 0) { 609 item = uma_zalloc(m_getzone(dn_clsize), M_WAITOK); 610 uma_zfree(dn_zone_clust, item); 611 } 612 } 613 #endif /* DEBUGNET */ 614 615 /* 616 * Constructor for Mbuf master zone. 617 * 618 * The 'arg' pointer points to a mb_args structure which 619 * contains call-specific information required to support the 620 * mbuf allocation API. See mbuf.h. 621 */ 622 static int 623 mb_ctor_mbuf(void *mem, int size, void *arg, int how) 624 { 625 struct mbuf *m; 626 struct mb_args *args; 627 int error; 628 int flags; 629 short type; 630 631 args = (struct mb_args *)arg; 632 type = args->type; 633 634 /* 635 * The mbuf is initialized later. The caller has the 636 * responsibility to set up any MAC labels too. 637 */ 638 if (type == MT_NOINIT) 639 return (0); 640 641 m = (struct mbuf *)mem; 642 flags = args->flags; 643 MPASS((flags & M_NOFREE) == 0); 644 645 error = m_init(m, how, type, flags); 646 647 return (error); 648 } 649 650 /* 651 * The Mbuf master zone destructor. 652 */ 653 static void 654 mb_dtor_mbuf(void *mem, int size, void *arg) 655 { 656 struct mbuf *m; 657 unsigned long flags; 658 659 m = (struct mbuf *)mem; 660 flags = (unsigned long)arg; 661 662 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__)); 663 if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags)) 664 m_tag_delete_chain(m, NULL); 665 } 666 667 /* 668 * The Mbuf Packet zone destructor. 669 */ 670 static void 671 mb_dtor_pack(void *mem, int size, void *arg) 672 { 673 struct mbuf *m; 674 675 m = (struct mbuf *)mem; 676 if ((m->m_flags & M_PKTHDR) != 0) 677 m_tag_delete_chain(m, NULL); 678 679 /* Make sure we've got a clean cluster back. */ 680 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 681 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__)); 682 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__)); 683 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__)); 684 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__)); 685 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__)); 686 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__)); 687 #ifdef INVARIANTS 688 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg); 689 #endif 690 /* 691 * If there are processes blocked on zone_clust, waiting for pages 692 * to be freed up, cause them to be woken up by draining the 693 * packet zone. We are exposed to a race here (in the check for 694 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that 695 * is deliberate. We don't want to acquire the zone lock for every 696 * mbuf free. 697 */ 698 if (uma_zone_exhausted(zone_clust)) 699 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); 700 } 701 702 /* 703 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor. 704 * 705 * Here the 'arg' pointer points to the Mbuf which we 706 * are configuring cluster storage for. If 'arg' is 707 * empty we allocate just the cluster without setting 708 * the mbuf to it. See mbuf.h. 709 */ 710 static int 711 mb_ctor_clust(void *mem, int size, void *arg, int how) 712 { 713 struct mbuf *m; 714 715 m = (struct mbuf *)arg; 716 if (m != NULL) { 717 m->m_ext.ext_buf = (char *)mem; 718 m->m_data = m->m_ext.ext_buf; 719 m->m_flags |= M_EXT; 720 m->m_ext.ext_free = NULL; 721 m->m_ext.ext_arg1 = NULL; 722 m->m_ext.ext_arg2 = NULL; 723 m->m_ext.ext_size = size; 724 m->m_ext.ext_type = m_gettype(size); 725 m->m_ext.ext_flags = EXT_FLAG_EMBREF; 726 m->m_ext.ext_count = 1; 727 } 728 729 return (0); 730 } 731 732 /* 733 * The Packet secondary zone's init routine, executed on the 734 * object's transition from mbuf keg slab to zone cache. 735 */ 736 static int 737 mb_zinit_pack(void *mem, int size, int how) 738 { 739 struct mbuf *m; 740 741 m = (struct mbuf *)mem; /* m is virgin. */ 742 if (uma_zalloc_arg(zone_clust, m, how) == NULL || 743 m->m_ext.ext_buf == NULL) 744 return (ENOMEM); 745 m->m_ext.ext_type = EXT_PACKET; /* Override. */ 746 #ifdef INVARIANTS 747 trash_init(m->m_ext.ext_buf, MCLBYTES, how); 748 #endif 749 return (0); 750 } 751 752 /* 753 * The Packet secondary zone's fini routine, executed on the 754 * object's transition from zone cache to keg slab. 755 */ 756 static void 757 mb_zfini_pack(void *mem, int size) 758 { 759 struct mbuf *m; 760 761 m = (struct mbuf *)mem; 762 #ifdef INVARIANTS 763 trash_fini(m->m_ext.ext_buf, MCLBYTES); 764 #endif 765 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL); 766 #ifdef INVARIANTS 767 trash_dtor(mem, size, NULL); 768 #endif 769 } 770 771 /* 772 * The "packet" keg constructor. 773 */ 774 static int 775 mb_ctor_pack(void *mem, int size, void *arg, int how) 776 { 777 struct mbuf *m; 778 struct mb_args *args; 779 int error, flags; 780 short type; 781 782 m = (struct mbuf *)mem; 783 args = (struct mb_args *)arg; 784 flags = args->flags; 785 type = args->type; 786 MPASS((flags & M_NOFREE) == 0); 787 788 #ifdef INVARIANTS 789 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how); 790 #endif 791 792 error = m_init(m, how, type, flags); 793 794 /* m_ext is already initialized. */ 795 m->m_data = m->m_ext.ext_buf; 796 m->m_flags = (flags | M_EXT); 797 798 return (error); 799 } 800 801 /* 802 * This is the protocol drain routine. Called by UMA whenever any of the 803 * mbuf zones is closed to its limit. 804 * 805 * No locks should be held when this is called. The drain routines have to 806 * presently acquire some locks which raises the possibility of lock order 807 * reversal. 808 */ 809 static void 810 mb_reclaim(uma_zone_t zone __unused, int pending __unused) 811 { 812 struct epoch_tracker et; 813 struct domain *dp; 814 struct protosw *pr; 815 816 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL, __func__); 817 818 NET_EPOCH_ENTER(et); 819 for (dp = domains; dp != NULL; dp = dp->dom_next) 820 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 821 if (pr->pr_drain != NULL) 822 (*pr->pr_drain)(); 823 NET_EPOCH_EXIT(et); 824 } 825 826 /* 827 * Free "count" units of I/O from an mbuf chain. They could be held 828 * in EXT_PGS or just as a normal mbuf. This code is intended to be 829 * called in an error path (I/O error, closed connection, etc). 830 */ 831 void 832 mb_free_notready(struct mbuf *m, int count) 833 { 834 int i; 835 836 for (i = 0; i < count && m != NULL; i++) { 837 if ((m->m_flags & M_EXT) != 0 && 838 m->m_ext.ext_type == EXT_PGS) { 839 m->m_ext_pgs.nrdy--; 840 if (m->m_ext_pgs.nrdy != 0) 841 continue; 842 } 843 m = m_free(m); 844 } 845 KASSERT(i == count, ("Removed only %d items from %p", i, m)); 846 } 847 848 /* 849 * Compress an unmapped mbuf into a simple mbuf when it holds a small 850 * amount of data. This is used as a DOS defense to avoid having 851 * small packets tie up wired pages, an ext_pgs structure, and an 852 * mbuf. Since this converts the existing mbuf in place, it can only 853 * be used if there are no other references to 'm'. 854 */ 855 int 856 mb_unmapped_compress(struct mbuf *m) 857 { 858 volatile u_int *refcnt; 859 struct mbuf m_temp; 860 861 /* 862 * Assert that 'm' does not have a packet header. If 'm' had 863 * a packet header, it would only be able to hold MHLEN bytes 864 * and m_data would have to be initialized differently. 865 */ 866 KASSERT((m->m_flags & M_PKTHDR) == 0 && (m->m_flags & M_EXT) && 867 m->m_ext.ext_type == EXT_PGS, 868 ("%s: m %p !M_EXT or !EXT_PGS or M_PKTHDR", __func__, m)); 869 KASSERT(m->m_len <= MLEN, ("m_len too large %p", m)); 870 871 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 872 refcnt = &m->m_ext.ext_count; 873 } else { 874 KASSERT(m->m_ext.ext_cnt != NULL, 875 ("%s: no refcounting pointer on %p", __func__, m)); 876 refcnt = m->m_ext.ext_cnt; 877 } 878 879 if (*refcnt != 1) 880 return (EBUSY); 881 882 m_init(&m_temp, M_NOWAIT, MT_DATA, 0); 883 884 /* copy data out of old mbuf */ 885 m_copydata(m, 0, m->m_len, mtod(&m_temp, char *)); 886 m_temp.m_len = m->m_len; 887 888 /* Free the backing pages. */ 889 m->m_ext.ext_free(m); 890 891 /* Turn 'm' into a "normal" mbuf. */ 892 m->m_flags &= ~(M_EXT | M_RDONLY | M_NOMAP); 893 m->m_data = m->m_dat; 894 895 /* copy data back into m */ 896 m_copydata(&m_temp, 0, m_temp.m_len, mtod(m, char *)); 897 898 return (0); 899 } 900 901 /* 902 * These next few routines are used to permit downgrading an unmapped 903 * mbuf to a chain of mapped mbufs. This is used when an interface 904 * doesn't supported unmapped mbufs or if checksums need to be 905 * computed in software. 906 * 907 * Each unmapped mbuf is converted to a chain of mbufs. First, any 908 * TLS header data is stored in a regular mbuf. Second, each page of 909 * unmapped data is stored in an mbuf with an EXT_SFBUF external 910 * cluster. These mbufs use an sf_buf to provide a valid KVA for the 911 * associated physical page. They also hold a reference on the 912 * original EXT_PGS mbuf to ensure the physical page doesn't go away. 913 * Finally, any TLS trailer data is stored in a regular mbuf. 914 * 915 * mb_unmapped_free_mext() is the ext_free handler for the EXT_SFBUF 916 * mbufs. It frees the associated sf_buf and releases its reference 917 * on the original EXT_PGS mbuf. 918 * 919 * _mb_unmapped_to_ext() is a helper function that converts a single 920 * unmapped mbuf into a chain of mbufs. 921 * 922 * mb_unmapped_to_ext() is the public function that walks an mbuf 923 * chain converting any unmapped mbufs to mapped mbufs. It returns 924 * the new chain of unmapped mbufs on success. On failure it frees 925 * the original mbuf chain and returns NULL. 926 */ 927 static void 928 mb_unmapped_free_mext(struct mbuf *m) 929 { 930 struct sf_buf *sf; 931 struct mbuf *old_m; 932 933 sf = m->m_ext.ext_arg1; 934 sf_buf_free(sf); 935 936 /* Drop the reference on the backing EXT_PGS mbuf. */ 937 old_m = m->m_ext.ext_arg2; 938 mb_free_ext(old_m); 939 } 940 941 static struct mbuf * 942 _mb_unmapped_to_ext(struct mbuf *m) 943 { 944 struct mbuf_ext_pgs *ext_pgs; 945 struct mbuf *m_new, *top, *prev, *mref; 946 struct sf_buf *sf; 947 vm_page_t pg; 948 int i, len, off, pglen, pgoff, seglen, segoff; 949 volatile u_int *refcnt; 950 u_int ref_inc = 0; 951 952 MBUF_EXT_PGS_ASSERT(m); 953 ext_pgs = &m->m_ext_pgs; 954 len = m->m_len; 955 KASSERT(ext_pgs->tls == NULL, ("%s: can't convert TLS mbuf %p", 956 __func__, m)); 957 958 /* See if this is the mbuf that holds the embedded refcount. */ 959 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 960 refcnt = &m->m_ext.ext_count; 961 mref = m; 962 } else { 963 KASSERT(m->m_ext.ext_cnt != NULL, 964 ("%s: no refcounting pointer on %p", __func__, m)); 965 refcnt = m->m_ext.ext_cnt; 966 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 967 } 968 969 /* Skip over any data removed from the front. */ 970 off = mtod(m, vm_offset_t); 971 972 top = NULL; 973 if (ext_pgs->hdr_len != 0) { 974 if (off >= ext_pgs->hdr_len) { 975 off -= ext_pgs->hdr_len; 976 } else { 977 seglen = ext_pgs->hdr_len - off; 978 segoff = off; 979 seglen = min(seglen, len); 980 off = 0; 981 len -= seglen; 982 m_new = m_get(M_NOWAIT, MT_DATA); 983 if (m_new == NULL) 984 goto fail; 985 m_new->m_len = seglen; 986 prev = top = m_new; 987 memcpy(mtod(m_new, void *), &ext_pgs->m_epg_hdr[segoff], 988 seglen); 989 } 990 } 991 pgoff = ext_pgs->first_pg_off; 992 for (i = 0; i < ext_pgs->npgs && len > 0; i++) { 993 pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff); 994 if (off >= pglen) { 995 off -= pglen; 996 pgoff = 0; 997 continue; 998 } 999 seglen = pglen - off; 1000 segoff = pgoff + off; 1001 off = 0; 1002 seglen = min(seglen, len); 1003 len -= seglen; 1004 1005 pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]); 1006 m_new = m_get(M_NOWAIT, MT_DATA); 1007 if (m_new == NULL) 1008 goto fail; 1009 if (top == NULL) { 1010 top = prev = m_new; 1011 } else { 1012 prev->m_next = m_new; 1013 prev = m_new; 1014 } 1015 sf = sf_buf_alloc(pg, SFB_NOWAIT); 1016 if (sf == NULL) 1017 goto fail; 1018 1019 ref_inc++; 1020 m_extadd(m_new, (char *)sf_buf_kva(sf), PAGE_SIZE, 1021 mb_unmapped_free_mext, sf, mref, M_RDONLY, EXT_SFBUF); 1022 m_new->m_data += segoff; 1023 m_new->m_len = seglen; 1024 1025 pgoff = 0; 1026 }; 1027 if (len != 0) { 1028 KASSERT((off + len) <= ext_pgs->trail_len, 1029 ("off + len > trail (%d + %d > %d)", off, len, 1030 ext_pgs->trail_len)); 1031 m_new = m_get(M_NOWAIT, MT_DATA); 1032 if (m_new == NULL) 1033 goto fail; 1034 if (top == NULL) 1035 top = m_new; 1036 else 1037 prev->m_next = m_new; 1038 m_new->m_len = len; 1039 memcpy(mtod(m_new, void *), &ext_pgs->m_epg_trail[off], len); 1040 } 1041 1042 if (ref_inc != 0) { 1043 /* 1044 * Obtain an additional reference on the old mbuf for 1045 * each created EXT_SFBUF mbuf. They will be dropped 1046 * in mb_unmapped_free_mext(). 1047 */ 1048 if (*refcnt == 1) 1049 *refcnt += ref_inc; 1050 else 1051 atomic_add_int(refcnt, ref_inc); 1052 } 1053 m_free(m); 1054 return (top); 1055 1056 fail: 1057 if (ref_inc != 0) { 1058 /* 1059 * Obtain an additional reference on the old mbuf for 1060 * each created EXT_SFBUF mbuf. They will be 1061 * immediately dropped when these mbufs are freed 1062 * below. 1063 */ 1064 if (*refcnt == 1) 1065 *refcnt += ref_inc; 1066 else 1067 atomic_add_int(refcnt, ref_inc); 1068 } 1069 m_free(m); 1070 m_freem(top); 1071 return (NULL); 1072 } 1073 1074 struct mbuf * 1075 mb_unmapped_to_ext(struct mbuf *top) 1076 { 1077 struct mbuf *m, *next, *prev = NULL; 1078 1079 prev = NULL; 1080 for (m = top; m != NULL; m = next) { 1081 /* m might be freed, so cache the next pointer. */ 1082 next = m->m_next; 1083 if (m->m_flags & M_NOMAP) { 1084 if (prev != NULL) { 1085 /* 1086 * Remove 'm' from the new chain so 1087 * that the 'top' chain terminates 1088 * before 'm' in case 'top' is freed 1089 * due to an error. 1090 */ 1091 prev->m_next = NULL; 1092 } 1093 m = _mb_unmapped_to_ext(m); 1094 if (m == NULL) { 1095 m_freem(top); 1096 m_freem(next); 1097 return (NULL); 1098 } 1099 if (prev == NULL) { 1100 top = m; 1101 } else { 1102 prev->m_next = m; 1103 } 1104 1105 /* 1106 * Replaced one mbuf with a chain, so we must 1107 * find the end of chain. 1108 */ 1109 prev = m_last(m); 1110 } else { 1111 if (prev != NULL) { 1112 prev->m_next = m; 1113 } 1114 prev = m; 1115 } 1116 } 1117 return (top); 1118 } 1119 1120 /* 1121 * Allocate an empty EXT_PGS mbuf. The ext_free routine is 1122 * responsible for freeing any pages backing this mbuf when it is 1123 * freed. 1124 */ 1125 struct mbuf * 1126 mb_alloc_ext_pgs(int how, m_ext_free_t ext_free) 1127 { 1128 struct mbuf *m; 1129 struct mbuf_ext_pgs *ext_pgs; 1130 1131 m = m_get(how, MT_DATA); 1132 if (m == NULL) 1133 return (NULL); 1134 1135 ext_pgs = &m->m_ext_pgs; 1136 ext_pgs->npgs = 0; 1137 ext_pgs->nrdy = 0; 1138 ext_pgs->first_pg_off = 0; 1139 ext_pgs->last_pg_len = 0; 1140 ext_pgs->flags = 0; 1141 ext_pgs->hdr_len = 0; 1142 ext_pgs->trail_len = 0; 1143 ext_pgs->tls = NULL; 1144 ext_pgs->so = NULL; 1145 m->m_data = NULL; 1146 m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP); 1147 m->m_ext.ext_type = EXT_PGS; 1148 m->m_ext.ext_flags = EXT_FLAG_EMBREF; 1149 m->m_ext.ext_count = 1; 1150 m->m_ext.ext_size = 0; 1151 m->m_ext.ext_free = ext_free; 1152 return (m); 1153 } 1154 1155 #ifdef INVARIANT_SUPPORT 1156 void 1157 mb_ext_pgs_check(struct mbuf_ext_pgs *ext_pgs) 1158 { 1159 1160 /* 1161 * NB: This expects a non-empty buffer (npgs > 0 and 1162 * last_pg_len > 0). 1163 */ 1164 KASSERT(ext_pgs->npgs > 0, 1165 ("ext_pgs with no valid pages: %p", ext_pgs)); 1166 KASSERT(ext_pgs->npgs <= nitems(ext_pgs->m_epg_pa), 1167 ("ext_pgs with too many pages: %p", ext_pgs)); 1168 KASSERT(ext_pgs->nrdy <= ext_pgs->npgs, 1169 ("ext_pgs with too many ready pages: %p", ext_pgs)); 1170 KASSERT(ext_pgs->first_pg_off < PAGE_SIZE, 1171 ("ext_pgs with too large page offset: %p", ext_pgs)); 1172 KASSERT(ext_pgs->last_pg_len > 0, 1173 ("ext_pgs with zero last page length: %p", ext_pgs)); 1174 KASSERT(ext_pgs->last_pg_len <= PAGE_SIZE, 1175 ("ext_pgs with too large last page length: %p", ext_pgs)); 1176 if (ext_pgs->npgs == 1) { 1177 KASSERT(ext_pgs->first_pg_off + ext_pgs->last_pg_len <= 1178 PAGE_SIZE, ("ext_pgs with single page too large: %p", 1179 ext_pgs)); 1180 } 1181 KASSERT(ext_pgs->hdr_len <= sizeof(ext_pgs->m_epg_hdr), 1182 ("ext_pgs with too large header length: %p", ext_pgs)); 1183 KASSERT(ext_pgs->trail_len <= sizeof(ext_pgs->m_epg_trail), 1184 ("ext_pgs with too large header length: %p", ext_pgs)); 1185 } 1186 #endif 1187 1188 /* 1189 * Clean up after mbufs with M_EXT storage attached to them if the 1190 * reference count hits 1. 1191 */ 1192 void 1193 mb_free_ext(struct mbuf *m) 1194 { 1195 volatile u_int *refcnt; 1196 struct mbuf *mref; 1197 int freembuf; 1198 1199 KASSERT(m->m_flags & M_EXT, ("%s: M_EXT not set on %p", __func__, m)); 1200 1201 /* See if this is the mbuf that holds the embedded refcount. */ 1202 if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) { 1203 refcnt = &m->m_ext.ext_count; 1204 mref = m; 1205 } else { 1206 KASSERT(m->m_ext.ext_cnt != NULL, 1207 ("%s: no refcounting pointer on %p", __func__, m)); 1208 refcnt = m->m_ext.ext_cnt; 1209 mref = __containerof(refcnt, struct mbuf, m_ext.ext_count); 1210 } 1211 1212 /* 1213 * Check if the header is embedded in the cluster. It is 1214 * important that we can't touch any of the mbuf fields 1215 * after we have freed the external storage, since mbuf 1216 * could have been embedded in it. For now, the mbufs 1217 * embedded into the cluster are always of type EXT_EXTREF, 1218 * and for this type we won't free the mref. 1219 */ 1220 if (m->m_flags & M_NOFREE) { 1221 freembuf = 0; 1222 KASSERT(m->m_ext.ext_type == EXT_EXTREF || 1223 m->m_ext.ext_type == EXT_RXRING, 1224 ("%s: no-free mbuf %p has wrong type", __func__, m)); 1225 } else 1226 freembuf = 1; 1227 1228 /* Free attached storage if this mbuf is the only reference to it. */ 1229 if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) { 1230 switch (m->m_ext.ext_type) { 1231 case EXT_PACKET: 1232 /* The packet zone is special. */ 1233 if (*refcnt == 0) 1234 *refcnt = 1; 1235 uma_zfree(zone_pack, mref); 1236 break; 1237 case EXT_CLUSTER: 1238 uma_zfree(zone_clust, m->m_ext.ext_buf); 1239 uma_zfree(zone_mbuf, mref); 1240 break; 1241 case EXT_JUMBOP: 1242 uma_zfree(zone_jumbop, m->m_ext.ext_buf); 1243 uma_zfree(zone_mbuf, mref); 1244 break; 1245 case EXT_JUMBO9: 1246 uma_zfree(zone_jumbo9, m->m_ext.ext_buf); 1247 uma_zfree(zone_mbuf, mref); 1248 break; 1249 case EXT_JUMBO16: 1250 uma_zfree(zone_jumbo16, m->m_ext.ext_buf); 1251 uma_zfree(zone_mbuf, mref); 1252 break; 1253 case EXT_PGS: { 1254 #ifdef KERN_TLS 1255 struct mbuf_ext_pgs *pgs; 1256 struct ktls_session *tls; 1257 #endif 1258 1259 KASSERT(mref->m_ext.ext_free != NULL, 1260 ("%s: ext_free not set", __func__)); 1261 mref->m_ext.ext_free(mref); 1262 #ifdef KERN_TLS 1263 pgs = &mref->m_ext_pgs; 1264 tls = pgs->tls; 1265 if (tls != NULL && 1266 !refcount_release_if_not_last(&tls->refcount)) 1267 ktls_enqueue_to_free(pgs); 1268 else 1269 #endif 1270 uma_zfree(zone_mbuf, mref); 1271 break; 1272 } 1273 case EXT_SFBUF: 1274 case EXT_NET_DRV: 1275 case EXT_MOD_TYPE: 1276 case EXT_DISPOSABLE: 1277 KASSERT(mref->m_ext.ext_free != NULL, 1278 ("%s: ext_free not set", __func__)); 1279 mref->m_ext.ext_free(mref); 1280 uma_zfree(zone_mbuf, mref); 1281 break; 1282 case EXT_EXTREF: 1283 KASSERT(m->m_ext.ext_free != NULL, 1284 ("%s: ext_free not set", __func__)); 1285 m->m_ext.ext_free(m); 1286 break; 1287 case EXT_RXRING: 1288 KASSERT(m->m_ext.ext_free == NULL, 1289 ("%s: ext_free is set", __func__)); 1290 break; 1291 default: 1292 KASSERT(m->m_ext.ext_type == 0, 1293 ("%s: unknown ext_type", __func__)); 1294 } 1295 } 1296 1297 if (freembuf && m != mref) 1298 uma_zfree(zone_mbuf, m); 1299 } 1300 1301 /* 1302 * Official mbuf(9) allocation KPI for stack and drivers: 1303 * 1304 * m_get() - a single mbuf without any attachments, sys/mbuf.h. 1305 * m_gethdr() - a single mbuf initialized as M_PKTHDR, sys/mbuf.h. 1306 * m_getcl() - an mbuf + 2k cluster, sys/mbuf.h. 1307 * m_clget() - attach cluster to already allocated mbuf. 1308 * m_cljget() - attach jumbo cluster to already allocated mbuf. 1309 * m_get2() - allocate minimum mbuf that would fit size argument. 1310 * m_getm2() - allocate a chain of mbufs/clusters. 1311 * m_extadd() - attach external cluster to mbuf. 1312 * 1313 * m_free() - free single mbuf with its tags and ext, sys/mbuf.h. 1314 * m_freem() - free chain of mbufs. 1315 */ 1316 1317 int 1318 m_clget(struct mbuf *m, int how) 1319 { 1320 1321 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 1322 __func__, m)); 1323 m->m_ext.ext_buf = (char *)NULL; 1324 uma_zalloc_arg(zone_clust, m, how); 1325 /* 1326 * On a cluster allocation failure, drain the packet zone and retry, 1327 * we might be able to loosen a few clusters up on the drain. 1328 */ 1329 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { 1330 uma_zone_reclaim(zone_pack, UMA_RECLAIM_DRAIN); 1331 uma_zalloc_arg(zone_clust, m, how); 1332 } 1333 MBUF_PROBE2(m__clget, m, how); 1334 return (m->m_flags & M_EXT); 1335 } 1336 1337 /* 1338 * m_cljget() is different from m_clget() as it can allocate clusters without 1339 * attaching them to an mbuf. In that case the return value is the pointer 1340 * to the cluster of the requested size. If an mbuf was specified, it gets 1341 * the cluster attached to it and the return value can be safely ignored. 1342 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 1343 */ 1344 void * 1345 m_cljget(struct mbuf *m, int how, int size) 1346 { 1347 uma_zone_t zone; 1348 void *retval; 1349 1350 if (m != NULL) { 1351 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 1352 __func__, m)); 1353 m->m_ext.ext_buf = NULL; 1354 } 1355 1356 zone = m_getzone(size); 1357 retval = uma_zalloc_arg(zone, m, how); 1358 1359 MBUF_PROBE4(m__cljget, m, how, size, retval); 1360 1361 return (retval); 1362 } 1363 1364 /* 1365 * m_get2() allocates minimum mbuf that would fit "size" argument. 1366 */ 1367 struct mbuf * 1368 m_get2(int size, int how, short type, int flags) 1369 { 1370 struct mb_args args; 1371 struct mbuf *m, *n; 1372 1373 args.flags = flags; 1374 args.type = type; 1375 1376 if (size <= MHLEN || (size <= MLEN && (flags & M_PKTHDR) == 0)) 1377 return (uma_zalloc_arg(zone_mbuf, &args, how)); 1378 if (size <= MCLBYTES) 1379 return (uma_zalloc_arg(zone_pack, &args, how)); 1380 1381 if (size > MJUMPAGESIZE) 1382 return (NULL); 1383 1384 m = uma_zalloc_arg(zone_mbuf, &args, how); 1385 if (m == NULL) 1386 return (NULL); 1387 1388 n = uma_zalloc_arg(zone_jumbop, m, how); 1389 if (n == NULL) { 1390 uma_zfree(zone_mbuf, m); 1391 return (NULL); 1392 } 1393 1394 return (m); 1395 } 1396 1397 /* 1398 * m_getjcl() returns an mbuf with a cluster of the specified size attached. 1399 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 1400 */ 1401 struct mbuf * 1402 m_getjcl(int how, short type, int flags, int size) 1403 { 1404 struct mb_args args; 1405 struct mbuf *m, *n; 1406 uma_zone_t zone; 1407 1408 if (size == MCLBYTES) 1409 return m_getcl(how, type, flags); 1410 1411 args.flags = flags; 1412 args.type = type; 1413 1414 m = uma_zalloc_arg(zone_mbuf, &args, how); 1415 if (m == NULL) 1416 return (NULL); 1417 1418 zone = m_getzone(size); 1419 n = uma_zalloc_arg(zone, m, how); 1420 if (n == NULL) { 1421 uma_zfree(zone_mbuf, m); 1422 return (NULL); 1423 } 1424 return (m); 1425 } 1426 1427 /* 1428 * Allocate a given length worth of mbufs and/or clusters (whatever fits 1429 * best) and return a pointer to the top of the allocated chain. If an 1430 * existing mbuf chain is provided, then we will append the new chain 1431 * to the existing one and return a pointer to the provided mbuf. 1432 */ 1433 struct mbuf * 1434 m_getm2(struct mbuf *m, int len, int how, short type, int flags) 1435 { 1436 struct mbuf *mb, *nm = NULL, *mtail = NULL; 1437 1438 KASSERT(len >= 0, ("%s: len is < 0", __func__)); 1439 1440 /* Validate flags. */ 1441 flags &= (M_PKTHDR | M_EOR); 1442 1443 /* Packet header mbuf must be first in chain. */ 1444 if ((flags & M_PKTHDR) && m != NULL) 1445 flags &= ~M_PKTHDR; 1446 1447 /* Loop and append maximum sized mbufs to the chain tail. */ 1448 while (len > 0) { 1449 if (len > MCLBYTES) 1450 mb = m_getjcl(how, type, (flags & M_PKTHDR), 1451 MJUMPAGESIZE); 1452 else if (len >= MINCLSIZE) 1453 mb = m_getcl(how, type, (flags & M_PKTHDR)); 1454 else if (flags & M_PKTHDR) 1455 mb = m_gethdr(how, type); 1456 else 1457 mb = m_get(how, type); 1458 1459 /* Fail the whole operation if one mbuf can't be allocated. */ 1460 if (mb == NULL) { 1461 if (nm != NULL) 1462 m_freem(nm); 1463 return (NULL); 1464 } 1465 1466 /* Book keeping. */ 1467 len -= M_SIZE(mb); 1468 if (mtail != NULL) 1469 mtail->m_next = mb; 1470 else 1471 nm = mb; 1472 mtail = mb; 1473 flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ 1474 } 1475 if (flags & M_EOR) 1476 mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ 1477 1478 /* If mbuf was supplied, append new chain to the end of it. */ 1479 if (m != NULL) { 1480 for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) 1481 ; 1482 mtail->m_next = nm; 1483 mtail->m_flags &= ~M_EOR; 1484 } else 1485 m = nm; 1486 1487 return (m); 1488 } 1489 1490 /*- 1491 * Configure a provided mbuf to refer to the provided external storage 1492 * buffer and setup a reference count for said buffer. 1493 * 1494 * Arguments: 1495 * mb The existing mbuf to which to attach the provided buffer. 1496 * buf The address of the provided external storage buffer. 1497 * size The size of the provided buffer. 1498 * freef A pointer to a routine that is responsible for freeing the 1499 * provided external storage buffer. 1500 * args A pointer to an argument structure (of any type) to be passed 1501 * to the provided freef routine (may be NULL). 1502 * flags Any other flags to be passed to the provided mbuf. 1503 * type The type that the external storage buffer should be 1504 * labeled with. 1505 * 1506 * Returns: 1507 * Nothing. 1508 */ 1509 void 1510 m_extadd(struct mbuf *mb, char *buf, u_int size, m_ext_free_t freef, 1511 void *arg1, void *arg2, int flags, int type) 1512 { 1513 1514 KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__)); 1515 1516 mb->m_flags |= (M_EXT | flags); 1517 mb->m_ext.ext_buf = buf; 1518 mb->m_data = mb->m_ext.ext_buf; 1519 mb->m_ext.ext_size = size; 1520 mb->m_ext.ext_free = freef; 1521 mb->m_ext.ext_arg1 = arg1; 1522 mb->m_ext.ext_arg2 = arg2; 1523 mb->m_ext.ext_type = type; 1524 1525 if (type != EXT_EXTREF) { 1526 mb->m_ext.ext_count = 1; 1527 mb->m_ext.ext_flags = EXT_FLAG_EMBREF; 1528 } else 1529 mb->m_ext.ext_flags = 0; 1530 } 1531 1532 /* 1533 * Free an entire chain of mbufs and associated external buffers, if 1534 * applicable. 1535 */ 1536 void 1537 m_freem(struct mbuf *mb) 1538 { 1539 1540 MBUF_PROBE1(m__freem, mb); 1541 while (mb != NULL) 1542 mb = m_free(mb); 1543 } 1544 1545 void 1546 m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp) 1547 { 1548 1549 if_ref(ifp); 1550 mst->ifp = ifp; 1551 refcount_init(&mst->refcount, 1); 1552 counter_u64_add(snd_tag_count, 1); 1553 } 1554 1555 void 1556 m_snd_tag_destroy(struct m_snd_tag *mst) 1557 { 1558 struct ifnet *ifp; 1559 1560 ifp = mst->ifp; 1561 ifp->if_snd_tag_free(mst); 1562 if_rele(ifp); 1563 counter_u64_add(snd_tag_count, -1); 1564 } 1565