1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org> 5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6 * Copyright (c) 2004-2006 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* 32 * uma_core.c Implementation of the Universal Memory allocator 33 * 34 * This allocator is intended to replace the multitude of similar object caches 35 * in the standard FreeBSD kernel. The intent is to be flexible as well as 36 * efficient. A primary design goal is to return unused memory to the rest of 37 * the system. This will make the system as a whole more flexible due to the 38 * ability to move memory to subsystems which most need it instead of leaving 39 * pools of reserved memory unused. 40 * 41 * The basic ideas stem from similar slab/zone based allocators whose algorithms 42 * are well known. 43 * 44 */ 45 46 /* 47 * TODO: 48 * - Improve memory usage for large allocations 49 * - Investigate cache size adjustments 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_ddb.h" 56 #include "opt_param.h" 57 #include "opt_vm.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bitset.h> 62 #include <sys/domainset.h> 63 #include <sys/eventhandler.h> 64 #include <sys/kernel.h> 65 #include <sys/types.h> 66 #include <sys/limits.h> 67 #include <sys/queue.h> 68 #include <sys/malloc.h> 69 #include <sys/ktr.h> 70 #include <sys/lock.h> 71 #include <sys/sysctl.h> 72 #include <sys/mutex.h> 73 #include <sys/proc.h> 74 #include <sys/random.h> 75 #include <sys/rwlock.h> 76 #include <sys/sbuf.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/smr.h> 81 #include <sys/taskqueue.h> 82 #include <sys/vmmeter.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/vm_domainset.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_phys.h> 91 #include <vm/vm_pagequeue.h> 92 #include <vm/vm_map.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vm_dumpset.h> 96 #include <vm/uma.h> 97 #include <vm/uma_int.h> 98 #include <vm/uma_dbg.h> 99 100 #include <ddb/ddb.h> 101 102 #ifdef DEBUG_MEMGUARD 103 #include <vm/memguard.h> 104 #endif 105 106 #include <machine/md_var.h> 107 108 #ifdef INVARIANTS 109 #define UMA_ALWAYS_CTORDTOR 1 110 #else 111 #define UMA_ALWAYS_CTORDTOR 0 112 #endif 113 114 /* 115 * This is the zone and keg from which all zones are spawned. 116 */ 117 static uma_zone_t kegs; 118 static uma_zone_t zones; 119 120 /* 121 * On INVARIANTS builds, the slab contains a second bitset of the same size, 122 * "dbg_bits", which is laid out immediately after us_free. 123 */ 124 #ifdef INVARIANTS 125 #define SLAB_BITSETS 2 126 #else 127 #define SLAB_BITSETS 1 128 #endif 129 130 /* 131 * These are the two zones from which all offpage uma_slab_ts are allocated. 132 * 133 * One zone is for slab headers that can represent a larger number of items, 134 * making the slabs themselves more efficient, and the other zone is for 135 * headers that are smaller and represent fewer items, making the headers more 136 * efficient. 137 */ 138 #define SLABZONE_SIZE(setsize) \ 139 (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS) 140 #define SLABZONE0_SETSIZE (PAGE_SIZE / 16) 141 #define SLABZONE1_SETSIZE SLAB_MAX_SETSIZE 142 #define SLABZONE0_SIZE SLABZONE_SIZE(SLABZONE0_SETSIZE) 143 #define SLABZONE1_SIZE SLABZONE_SIZE(SLABZONE1_SETSIZE) 144 static uma_zone_t slabzones[2]; 145 146 /* 147 * The initial hash tables come out of this zone so they can be allocated 148 * prior to malloc coming up. 149 */ 150 static uma_zone_t hashzone; 151 152 /* The boot-time adjusted value for cache line alignment. */ 153 int uma_align_cache = 64 - 1; 154 155 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 156 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc"); 157 158 /* 159 * Are we allowed to allocate buckets? 160 */ 161 static int bucketdisable = 1; 162 163 /* Linked list of all kegs in the system */ 164 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 165 166 /* Linked list of all cache-only zones in the system */ 167 static LIST_HEAD(,uma_zone) uma_cachezones = 168 LIST_HEAD_INITIALIZER(uma_cachezones); 169 170 /* This RW lock protects the keg list */ 171 static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 172 173 /* 174 * First available virual address for boot time allocations. 175 */ 176 static vm_offset_t bootstart; 177 static vm_offset_t bootmem; 178 179 static struct sx uma_reclaim_lock; 180 181 /* 182 * kmem soft limit, initialized by uma_set_limit(). Ensure that early 183 * allocations don't trigger a wakeup of the reclaim thread. 184 */ 185 unsigned long uma_kmem_limit = LONG_MAX; 186 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, 187 "UMA kernel memory soft limit"); 188 unsigned long uma_kmem_total; 189 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0, 190 "UMA kernel memory usage"); 191 192 /* Is the VM done starting up? */ 193 static enum { 194 BOOT_COLD, 195 BOOT_KVA, 196 BOOT_PCPU, 197 BOOT_RUNNING, 198 BOOT_SHUTDOWN, 199 } booted = BOOT_COLD; 200 201 /* 202 * This is the handle used to schedule events that need to happen 203 * outside of the allocation fast path. 204 */ 205 static struct callout uma_callout; 206 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 207 208 /* 209 * This structure is passed as the zone ctor arg so that I don't have to create 210 * a special allocation function just for zones. 211 */ 212 struct uma_zctor_args { 213 const char *name; 214 size_t size; 215 uma_ctor ctor; 216 uma_dtor dtor; 217 uma_init uminit; 218 uma_fini fini; 219 uma_import import; 220 uma_release release; 221 void *arg; 222 uma_keg_t keg; 223 int align; 224 uint32_t flags; 225 }; 226 227 struct uma_kctor_args { 228 uma_zone_t zone; 229 size_t size; 230 uma_init uminit; 231 uma_fini fini; 232 int align; 233 uint32_t flags; 234 }; 235 236 struct uma_bucket_zone { 237 uma_zone_t ubz_zone; 238 const char *ubz_name; 239 int ubz_entries; /* Number of items it can hold. */ 240 int ubz_maxsize; /* Maximum allocation size per-item. */ 241 }; 242 243 /* 244 * Compute the actual number of bucket entries to pack them in power 245 * of two sizes for more efficient space utilization. 246 */ 247 #define BUCKET_SIZE(n) \ 248 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 249 250 #define BUCKET_MAX BUCKET_SIZE(256) 251 252 struct uma_bucket_zone bucket_zones[] = { 253 /* Literal bucket sizes. */ 254 { NULL, "2 Bucket", 2, 4096 }, 255 { NULL, "4 Bucket", 4, 3072 }, 256 { NULL, "8 Bucket", 8, 2048 }, 257 { NULL, "16 Bucket", 16, 1024 }, 258 /* Rounded down power of 2 sizes for efficiency. */ 259 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 260 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 261 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 262 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 263 { NULL, NULL, 0} 264 }; 265 266 /* 267 * Flags and enumerations to be passed to internal functions. 268 */ 269 enum zfreeskip { 270 SKIP_NONE = 0, 271 SKIP_CNT = 0x00000001, 272 SKIP_DTOR = 0x00010000, 273 SKIP_FINI = 0x00020000, 274 }; 275 276 /* Prototypes.. */ 277 278 void uma_startup1(vm_offset_t); 279 void uma_startup2(void); 280 281 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 282 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 283 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 284 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 285 static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 286 static void page_free(void *, vm_size_t, uint8_t); 287 static void pcpu_page_free(void *, vm_size_t, uint8_t); 288 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); 289 static void cache_drain(uma_zone_t); 290 static void bucket_drain(uma_zone_t, uma_bucket_t); 291 static void bucket_cache_reclaim(uma_zone_t zone, bool); 292 static int keg_ctor(void *, int, void *, int); 293 static void keg_dtor(void *, int, void *); 294 static int zone_ctor(void *, int, void *, int); 295 static void zone_dtor(void *, int, void *); 296 static inline void item_dtor(uma_zone_t zone, void *item, int size, 297 void *udata, enum zfreeskip skip); 298 static int zero_init(void *, int, int); 299 static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, 300 int itemdomain, bool ws); 301 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *); 302 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *); 303 static void zone_timeout(uma_zone_t zone, void *); 304 static int hash_alloc(struct uma_hash *, u_int); 305 static int hash_expand(struct uma_hash *, struct uma_hash *); 306 static void hash_free(struct uma_hash *hash); 307 static void uma_timeout(void *); 308 static void uma_shutdown(void); 309 static void *zone_alloc_item(uma_zone_t, void *, int, int); 310 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 311 static int zone_alloc_limit(uma_zone_t zone, int count, int flags); 312 static void zone_free_limit(uma_zone_t zone, int count); 313 static void bucket_enable(void); 314 static void bucket_init(void); 315 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 316 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 317 static void bucket_zone_drain(void); 318 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); 319 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 320 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item); 321 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 322 uma_fini fini, int align, uint32_t flags); 323 static int zone_import(void *, void **, int, int, int); 324 static void zone_release(void *, void **, int); 325 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int); 326 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int); 327 328 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 329 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 330 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS); 331 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS); 332 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS); 333 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS); 334 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS); 335 336 static uint64_t uma_zone_get_allocs(uma_zone_t zone); 337 338 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 339 "Memory allocation debugging"); 340 341 #ifdef INVARIANTS 342 static uint64_t uma_keg_get_allocs(uma_keg_t zone); 343 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg); 344 345 static bool uma_dbg_kskip(uma_keg_t keg, void *mem); 346 static bool uma_dbg_zskip(uma_zone_t zone, void *mem); 347 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 348 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 349 350 static u_int dbg_divisor = 1; 351 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, 352 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, 353 "Debug & thrash every this item in memory allocator"); 354 355 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; 356 static counter_u64_t uma_skip_cnt = EARLY_COUNTER; 357 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, 358 &uma_dbg_cnt, "memory items debugged"); 359 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, 360 &uma_skip_cnt, "memory items skipped, not debugged"); 361 #endif 362 363 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 364 "Universal Memory Allocator"); 365 366 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT, 367 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 368 369 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT, 370 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 371 372 static int zone_warnings = 1; 373 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 374 "Warn when UMA zones becomes full"); 375 376 static int multipage_slabs = 1; 377 TUNABLE_INT("vm.debug.uma_multipage_slabs", &multipage_slabs); 378 SYSCTL_INT(_vm_debug, OID_AUTO, uma_multipage_slabs, 379 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &multipage_slabs, 0, 380 "UMA may choose larger slab sizes for better efficiency"); 381 382 /* 383 * Select the slab zone for an offpage slab with the given maximum item count. 384 */ 385 static inline uma_zone_t 386 slabzone(int ipers) 387 { 388 389 return (slabzones[ipers > SLABZONE0_SETSIZE]); 390 } 391 392 /* 393 * This routine checks to see whether or not it's safe to enable buckets. 394 */ 395 static void 396 bucket_enable(void) 397 { 398 399 KASSERT(booted >= BOOT_KVA, ("Bucket enable before init")); 400 bucketdisable = vm_page_count_min(); 401 } 402 403 /* 404 * Initialize bucket_zones, the array of zones of buckets of various sizes. 405 * 406 * For each zone, calculate the memory required for each bucket, consisting 407 * of the header and an array of pointers. 408 */ 409 static void 410 bucket_init(void) 411 { 412 struct uma_bucket_zone *ubz; 413 int size; 414 415 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 416 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 417 size += sizeof(void *) * ubz->ubz_entries; 418 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 419 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 420 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | 421 UMA_ZONE_FIRSTTOUCH); 422 } 423 } 424 425 /* 426 * Given a desired number of entries for a bucket, return the zone from which 427 * to allocate the bucket. 428 */ 429 static struct uma_bucket_zone * 430 bucket_zone_lookup(int entries) 431 { 432 struct uma_bucket_zone *ubz; 433 434 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 435 if (ubz->ubz_entries >= entries) 436 return (ubz); 437 ubz--; 438 return (ubz); 439 } 440 441 static int 442 bucket_select(int size) 443 { 444 struct uma_bucket_zone *ubz; 445 446 ubz = &bucket_zones[0]; 447 if (size > ubz->ubz_maxsize) 448 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 449 450 for (; ubz->ubz_entries != 0; ubz++) 451 if (ubz->ubz_maxsize < size) 452 break; 453 ubz--; 454 return (ubz->ubz_entries); 455 } 456 457 static uma_bucket_t 458 bucket_alloc(uma_zone_t zone, void *udata, int flags) 459 { 460 struct uma_bucket_zone *ubz; 461 uma_bucket_t bucket; 462 463 /* 464 * Don't allocate buckets early in boot. 465 */ 466 if (__predict_false(booted < BOOT_KVA)) 467 return (NULL); 468 469 /* 470 * To limit bucket recursion we store the original zone flags 471 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 472 * NOVM flag to persist even through deep recursions. We also 473 * store ZFLAG_BUCKET once we have recursed attempting to allocate 474 * a bucket for a bucket zone so we do not allow infinite bucket 475 * recursion. This cookie will even persist to frees of unused 476 * buckets via the allocation path or bucket allocations in the 477 * free path. 478 */ 479 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 480 udata = (void *)(uintptr_t)zone->uz_flags; 481 else { 482 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 483 return (NULL); 484 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 485 } 486 if (((uintptr_t)udata & UMA_ZONE_VM) != 0) 487 flags |= M_NOVM; 488 ubz = bucket_zone_lookup(atomic_load_16(&zone->uz_bucket_size)); 489 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 490 ubz++; 491 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 492 if (bucket) { 493 #ifdef INVARIANTS 494 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 495 #endif 496 bucket->ub_cnt = 0; 497 bucket->ub_entries = min(ubz->ubz_entries, 498 zone->uz_bucket_size_max); 499 bucket->ub_seq = SMR_SEQ_INVALID; 500 CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p", 501 zone->uz_name, zone, bucket); 502 } 503 504 return (bucket); 505 } 506 507 static void 508 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 509 { 510 struct uma_bucket_zone *ubz; 511 512 if (bucket->ub_cnt != 0) 513 bucket_drain(zone, bucket); 514 515 KASSERT(bucket->ub_cnt == 0, 516 ("bucket_free: Freeing a non free bucket.")); 517 KASSERT(bucket->ub_seq == SMR_SEQ_INVALID, 518 ("bucket_free: Freeing an SMR bucket.")); 519 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 520 udata = (void *)(uintptr_t)zone->uz_flags; 521 ubz = bucket_zone_lookup(bucket->ub_entries); 522 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 523 } 524 525 static void 526 bucket_zone_drain(void) 527 { 528 struct uma_bucket_zone *ubz; 529 530 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 531 uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN); 532 } 533 534 /* 535 * Acquire the domain lock and record contention. 536 */ 537 static uma_zone_domain_t 538 zone_domain_lock(uma_zone_t zone, int domain) 539 { 540 uma_zone_domain_t zdom; 541 bool lockfail; 542 543 zdom = ZDOM_GET(zone, domain); 544 lockfail = false; 545 if (ZDOM_OWNED(zdom)) 546 lockfail = true; 547 ZDOM_LOCK(zdom); 548 /* This is unsynchronized. The counter does not need to be precise. */ 549 if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max) 550 zone->uz_bucket_size++; 551 return (zdom); 552 } 553 554 /* 555 * Search for the domain with the least cached items and return it if it 556 * is out of balance with the preferred domain. 557 */ 558 static __noinline int 559 zone_domain_lowest(uma_zone_t zone, int pref) 560 { 561 long least, nitems, prefitems; 562 int domain; 563 int i; 564 565 prefitems = least = LONG_MAX; 566 domain = 0; 567 for (i = 0; i < vm_ndomains; i++) { 568 nitems = ZDOM_GET(zone, i)->uzd_nitems; 569 if (nitems < least) { 570 domain = i; 571 least = nitems; 572 } 573 if (domain == pref) 574 prefitems = nitems; 575 } 576 if (prefitems < least * 2) 577 return (pref); 578 579 return (domain); 580 } 581 582 /* 583 * Search for the domain with the most cached items and return it or the 584 * preferred domain if it has enough to proceed. 585 */ 586 static __noinline int 587 zone_domain_highest(uma_zone_t zone, int pref) 588 { 589 long most, nitems; 590 int domain; 591 int i; 592 593 if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX) 594 return (pref); 595 596 most = 0; 597 domain = 0; 598 for (i = 0; i < vm_ndomains; i++) { 599 nitems = ZDOM_GET(zone, i)->uzd_nitems; 600 if (nitems > most) { 601 domain = i; 602 most = nitems; 603 } 604 } 605 606 return (domain); 607 } 608 609 /* 610 * Safely subtract cnt from imax. 611 */ 612 static void 613 zone_domain_imax_sub(uma_zone_domain_t zdom, int cnt) 614 { 615 long new; 616 long old; 617 618 old = zdom->uzd_imax; 619 do { 620 if (old <= cnt) 621 new = 0; 622 else 623 new = old - cnt; 624 } while (atomic_fcmpset_long(&zdom->uzd_imax, &old, new) == 0); 625 } 626 627 /* 628 * Set the maximum imax value. 629 */ 630 static void 631 zone_domain_imax_set(uma_zone_domain_t zdom, int nitems) 632 { 633 long old; 634 635 old = zdom->uzd_imax; 636 do { 637 if (old >= nitems) 638 break; 639 } while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0); 640 } 641 642 /* 643 * Attempt to satisfy an allocation by retrieving a full bucket from one of the 644 * zone's caches. If a bucket is found the zone is not locked on return. 645 */ 646 static uma_bucket_t 647 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim) 648 { 649 uma_bucket_t bucket; 650 int i; 651 bool dtor = false; 652 653 ZDOM_LOCK_ASSERT(zdom); 654 655 if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL) 656 return (NULL); 657 658 /* SMR Buckets can not be re-used until readers expire. */ 659 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 660 bucket->ub_seq != SMR_SEQ_INVALID) { 661 if (!smr_poll(zone->uz_smr, bucket->ub_seq, false)) 662 return (NULL); 663 bucket->ub_seq = SMR_SEQ_INVALID; 664 dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR; 665 if (STAILQ_NEXT(bucket, ub_link) != NULL) 666 zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq; 667 } 668 STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); 669 670 KASSERT(zdom->uzd_nitems >= bucket->ub_cnt, 671 ("%s: item count underflow (%ld, %d)", 672 __func__, zdom->uzd_nitems, bucket->ub_cnt)); 673 KASSERT(bucket->ub_cnt > 0, 674 ("%s: empty bucket in bucket cache", __func__)); 675 zdom->uzd_nitems -= bucket->ub_cnt; 676 677 /* 678 * Shift the bounds of the current WSS interval to avoid 679 * perturbing the estimate. 680 */ 681 if (reclaim) { 682 zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt); 683 zone_domain_imax_sub(zdom, bucket->ub_cnt); 684 } else if (zdom->uzd_imin > zdom->uzd_nitems) 685 zdom->uzd_imin = zdom->uzd_nitems; 686 687 ZDOM_UNLOCK(zdom); 688 if (dtor) 689 for (i = 0; i < bucket->ub_cnt; i++) 690 item_dtor(zone, bucket->ub_bucket[i], zone->uz_size, 691 NULL, SKIP_NONE); 692 693 return (bucket); 694 } 695 696 /* 697 * Insert a full bucket into the specified cache. The "ws" parameter indicates 698 * whether the bucket's contents should be counted as part of the zone's working 699 * set. The bucket may be freed if it exceeds the bucket limit. 700 */ 701 static void 702 zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata, 703 const bool ws) 704 { 705 uma_zone_domain_t zdom; 706 707 /* We don't cache empty buckets. This can happen after a reclaim. */ 708 if (bucket->ub_cnt == 0) 709 goto out; 710 zdom = zone_domain_lock(zone, domain); 711 712 /* 713 * Conditionally set the maximum number of items. 714 */ 715 zdom->uzd_nitems += bucket->ub_cnt; 716 if (__predict_true(zdom->uzd_nitems < zone->uz_bucket_max)) { 717 if (ws) 718 zone_domain_imax_set(zdom, zdom->uzd_nitems); 719 if (STAILQ_EMPTY(&zdom->uzd_buckets)) 720 zdom->uzd_seq = bucket->ub_seq; 721 722 /* 723 * Try to promote reuse of recently used items. For items 724 * protected by SMR, try to defer reuse to minimize polling. 725 */ 726 if (bucket->ub_seq == SMR_SEQ_INVALID) 727 STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 728 else 729 STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link); 730 ZDOM_UNLOCK(zdom); 731 return; 732 } 733 zdom->uzd_nitems -= bucket->ub_cnt; 734 ZDOM_UNLOCK(zdom); 735 out: 736 bucket_free(zone, bucket, udata); 737 } 738 739 /* Pops an item out of a per-cpu cache bucket. */ 740 static inline void * 741 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket) 742 { 743 void *item; 744 745 CRITICAL_ASSERT(curthread); 746 747 bucket->ucb_cnt--; 748 item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt]; 749 #ifdef INVARIANTS 750 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL; 751 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 752 #endif 753 cache->uc_allocs++; 754 755 return (item); 756 } 757 758 /* Pushes an item into a per-cpu cache bucket. */ 759 static inline void 760 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item) 761 { 762 763 CRITICAL_ASSERT(curthread); 764 KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL, 765 ("uma_zfree: Freeing to non free bucket index.")); 766 767 bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item; 768 bucket->ucb_cnt++; 769 cache->uc_frees++; 770 } 771 772 /* 773 * Unload a UMA bucket from a per-cpu cache. 774 */ 775 static inline uma_bucket_t 776 cache_bucket_unload(uma_cache_bucket_t bucket) 777 { 778 uma_bucket_t b; 779 780 b = bucket->ucb_bucket; 781 if (b != NULL) { 782 MPASS(b->ub_entries == bucket->ucb_entries); 783 b->ub_cnt = bucket->ucb_cnt; 784 bucket->ucb_bucket = NULL; 785 bucket->ucb_entries = bucket->ucb_cnt = 0; 786 } 787 788 return (b); 789 } 790 791 static inline uma_bucket_t 792 cache_bucket_unload_alloc(uma_cache_t cache) 793 { 794 795 return (cache_bucket_unload(&cache->uc_allocbucket)); 796 } 797 798 static inline uma_bucket_t 799 cache_bucket_unload_free(uma_cache_t cache) 800 { 801 802 return (cache_bucket_unload(&cache->uc_freebucket)); 803 } 804 805 static inline uma_bucket_t 806 cache_bucket_unload_cross(uma_cache_t cache) 807 { 808 809 return (cache_bucket_unload(&cache->uc_crossbucket)); 810 } 811 812 /* 813 * Load a bucket into a per-cpu cache bucket. 814 */ 815 static inline void 816 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b) 817 { 818 819 CRITICAL_ASSERT(curthread); 820 MPASS(bucket->ucb_bucket == NULL); 821 MPASS(b->ub_seq == SMR_SEQ_INVALID); 822 823 bucket->ucb_bucket = b; 824 bucket->ucb_cnt = b->ub_cnt; 825 bucket->ucb_entries = b->ub_entries; 826 } 827 828 static inline void 829 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b) 830 { 831 832 cache_bucket_load(&cache->uc_allocbucket, b); 833 } 834 835 static inline void 836 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b) 837 { 838 839 cache_bucket_load(&cache->uc_freebucket, b); 840 } 841 842 #ifdef NUMA 843 static inline void 844 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b) 845 { 846 847 cache_bucket_load(&cache->uc_crossbucket, b); 848 } 849 #endif 850 851 /* 852 * Copy and preserve ucb_spare. 853 */ 854 static inline void 855 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 856 { 857 858 b1->ucb_bucket = b2->ucb_bucket; 859 b1->ucb_entries = b2->ucb_entries; 860 b1->ucb_cnt = b2->ucb_cnt; 861 } 862 863 /* 864 * Swap two cache buckets. 865 */ 866 static inline void 867 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2) 868 { 869 struct uma_cache_bucket b3; 870 871 CRITICAL_ASSERT(curthread); 872 873 cache_bucket_copy(&b3, b1); 874 cache_bucket_copy(b1, b2); 875 cache_bucket_copy(b2, &b3); 876 } 877 878 /* 879 * Attempt to fetch a bucket from a zone on behalf of the current cpu cache. 880 */ 881 static uma_bucket_t 882 cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain) 883 { 884 uma_zone_domain_t zdom; 885 uma_bucket_t bucket; 886 887 /* 888 * Avoid the lock if possible. 889 */ 890 zdom = ZDOM_GET(zone, domain); 891 if (zdom->uzd_nitems == 0) 892 return (NULL); 893 894 if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 && 895 !smr_poll(zone->uz_smr, zdom->uzd_seq, false)) 896 return (NULL); 897 898 /* 899 * Check the zone's cache of buckets. 900 */ 901 zdom = zone_domain_lock(zone, domain); 902 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) 903 return (bucket); 904 ZDOM_UNLOCK(zdom); 905 906 return (NULL); 907 } 908 909 static void 910 zone_log_warning(uma_zone_t zone) 911 { 912 static const struct timeval warninterval = { 300, 0 }; 913 914 if (!zone_warnings || zone->uz_warning == NULL) 915 return; 916 917 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 918 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 919 } 920 921 static inline void 922 zone_maxaction(uma_zone_t zone) 923 { 924 925 if (zone->uz_maxaction.ta_func != NULL) 926 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 927 } 928 929 /* 930 * Routine called by timeout which is used to fire off some time interval 931 * based calculations. (stats, hash size, etc.) 932 * 933 * Arguments: 934 * arg Unused 935 * 936 * Returns: 937 * Nothing 938 */ 939 static void 940 uma_timeout(void *unused) 941 { 942 bucket_enable(); 943 zone_foreach(zone_timeout, NULL); 944 945 /* Reschedule this event */ 946 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 947 } 948 949 /* 950 * Update the working set size estimate for the zone's bucket cache. 951 * The constants chosen here are somewhat arbitrary. With an update period of 952 * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the 953 * last 100s. 954 */ 955 static void 956 zone_domain_update_wss(uma_zone_domain_t zdom) 957 { 958 long wss; 959 960 ZDOM_LOCK(zdom); 961 MPASS(zdom->uzd_imax >= zdom->uzd_imin); 962 wss = zdom->uzd_imax - zdom->uzd_imin; 963 zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; 964 zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5; 965 ZDOM_UNLOCK(zdom); 966 } 967 968 /* 969 * Routine to perform timeout driven calculations. This expands the 970 * hashes and does per cpu statistics aggregation. 971 * 972 * Returns nothing. 973 */ 974 static void 975 zone_timeout(uma_zone_t zone, void *unused) 976 { 977 uma_keg_t keg; 978 u_int slabs, pages; 979 980 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 981 goto update_wss; 982 983 keg = zone->uz_keg; 984 985 /* 986 * Hash zones are non-numa by definition so the first domain 987 * is the only one present. 988 */ 989 KEG_LOCK(keg, 0); 990 pages = keg->uk_domain[0].ud_pages; 991 992 /* 993 * Expand the keg hash table. 994 * 995 * This is done if the number of slabs is larger than the hash size. 996 * What I'm trying to do here is completely reduce collisions. This 997 * may be a little aggressive. Should I allow for two collisions max? 998 */ 999 if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) { 1000 struct uma_hash newhash; 1001 struct uma_hash oldhash; 1002 int ret; 1003 1004 /* 1005 * This is so involved because allocating and freeing 1006 * while the keg lock is held will lead to deadlock. 1007 * I have to do everything in stages and check for 1008 * races. 1009 */ 1010 KEG_UNLOCK(keg, 0); 1011 ret = hash_alloc(&newhash, 1 << fls(slabs)); 1012 KEG_LOCK(keg, 0); 1013 if (ret) { 1014 if (hash_expand(&keg->uk_hash, &newhash)) { 1015 oldhash = keg->uk_hash; 1016 keg->uk_hash = newhash; 1017 } else 1018 oldhash = newhash; 1019 1020 KEG_UNLOCK(keg, 0); 1021 hash_free(&oldhash); 1022 goto update_wss; 1023 } 1024 } 1025 KEG_UNLOCK(keg, 0); 1026 1027 update_wss: 1028 for (int i = 0; i < vm_ndomains; i++) 1029 zone_domain_update_wss(ZDOM_GET(zone, i)); 1030 } 1031 1032 /* 1033 * Allocate and zero fill the next sized hash table from the appropriate 1034 * backing store. 1035 * 1036 * Arguments: 1037 * hash A new hash structure with the old hash size in uh_hashsize 1038 * 1039 * Returns: 1040 * 1 on success and 0 on failure. 1041 */ 1042 static int 1043 hash_alloc(struct uma_hash *hash, u_int size) 1044 { 1045 size_t alloc; 1046 1047 KASSERT(powerof2(size), ("hash size must be power of 2")); 1048 if (size > UMA_HASH_SIZE_INIT) { 1049 hash->uh_hashsize = size; 1050 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 1051 hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT); 1052 } else { 1053 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 1054 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 1055 UMA_ANYDOMAIN, M_WAITOK); 1056 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 1057 } 1058 if (hash->uh_slab_hash) { 1059 bzero(hash->uh_slab_hash, alloc); 1060 hash->uh_hashmask = hash->uh_hashsize - 1; 1061 return (1); 1062 } 1063 1064 return (0); 1065 } 1066 1067 /* 1068 * Expands the hash table for HASH zones. This is done from zone_timeout 1069 * to reduce collisions. This must not be done in the regular allocation 1070 * path, otherwise, we can recurse on the vm while allocating pages. 1071 * 1072 * Arguments: 1073 * oldhash The hash you want to expand 1074 * newhash The hash structure for the new table 1075 * 1076 * Returns: 1077 * Nothing 1078 * 1079 * Discussion: 1080 */ 1081 static int 1082 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 1083 { 1084 uma_hash_slab_t slab; 1085 u_int hval; 1086 u_int idx; 1087 1088 if (!newhash->uh_slab_hash) 1089 return (0); 1090 1091 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 1092 return (0); 1093 1094 /* 1095 * I need to investigate hash algorithms for resizing without a 1096 * full rehash. 1097 */ 1098 1099 for (idx = 0; idx < oldhash->uh_hashsize; idx++) 1100 while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) { 1101 slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]); 1102 LIST_REMOVE(slab, uhs_hlink); 1103 hval = UMA_HASH(newhash, slab->uhs_data); 1104 LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 1105 slab, uhs_hlink); 1106 } 1107 1108 return (1); 1109 } 1110 1111 /* 1112 * Free the hash bucket to the appropriate backing store. 1113 * 1114 * Arguments: 1115 * slab_hash The hash bucket we're freeing 1116 * hashsize The number of entries in that hash bucket 1117 * 1118 * Returns: 1119 * Nothing 1120 */ 1121 static void 1122 hash_free(struct uma_hash *hash) 1123 { 1124 if (hash->uh_slab_hash == NULL) 1125 return; 1126 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 1127 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 1128 else 1129 free(hash->uh_slab_hash, M_UMAHASH); 1130 } 1131 1132 /* 1133 * Frees all outstanding items in a bucket 1134 * 1135 * Arguments: 1136 * zone The zone to free to, must be unlocked. 1137 * bucket The free/alloc bucket with items. 1138 * 1139 * Returns: 1140 * Nothing 1141 */ 1142 static void 1143 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 1144 { 1145 int i; 1146 1147 if (bucket->ub_cnt == 0) 1148 return; 1149 1150 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && 1151 bucket->ub_seq != SMR_SEQ_INVALID) { 1152 smr_wait(zone->uz_smr, bucket->ub_seq); 1153 bucket->ub_seq = SMR_SEQ_INVALID; 1154 for (i = 0; i < bucket->ub_cnt; i++) 1155 item_dtor(zone, bucket->ub_bucket[i], 1156 zone->uz_size, NULL, SKIP_NONE); 1157 } 1158 if (zone->uz_fini) 1159 for (i = 0; i < bucket->ub_cnt; i++) 1160 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 1161 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 1162 if (zone->uz_max_items > 0) 1163 zone_free_limit(zone, bucket->ub_cnt); 1164 #ifdef INVARIANTS 1165 bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt); 1166 #endif 1167 bucket->ub_cnt = 0; 1168 } 1169 1170 /* 1171 * Drains the per cpu caches for a zone. 1172 * 1173 * NOTE: This may only be called while the zone is being torn down, and not 1174 * during normal operation. This is necessary in order that we do not have 1175 * to migrate CPUs to drain the per-CPU caches. 1176 * 1177 * Arguments: 1178 * zone The zone to drain, must be unlocked. 1179 * 1180 * Returns: 1181 * Nothing 1182 */ 1183 static void 1184 cache_drain(uma_zone_t zone) 1185 { 1186 uma_cache_t cache; 1187 uma_bucket_t bucket; 1188 smr_seq_t seq; 1189 int cpu; 1190 1191 /* 1192 * XXX: It is safe to not lock the per-CPU caches, because we're 1193 * tearing down the zone anyway. I.e., there will be no further use 1194 * of the caches at this point. 1195 * 1196 * XXX: It would good to be able to assert that the zone is being 1197 * torn down to prevent improper use of cache_drain(). 1198 */ 1199 seq = SMR_SEQ_INVALID; 1200 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 1201 seq = smr_advance(zone->uz_smr); 1202 CPU_FOREACH(cpu) { 1203 cache = &zone->uz_cpu[cpu]; 1204 bucket = cache_bucket_unload_alloc(cache); 1205 if (bucket != NULL) 1206 bucket_free(zone, bucket, NULL); 1207 bucket = cache_bucket_unload_free(cache); 1208 if (bucket != NULL) { 1209 bucket->ub_seq = seq; 1210 bucket_free(zone, bucket, NULL); 1211 } 1212 bucket = cache_bucket_unload_cross(cache); 1213 if (bucket != NULL) { 1214 bucket->ub_seq = seq; 1215 bucket_free(zone, bucket, NULL); 1216 } 1217 } 1218 bucket_cache_reclaim(zone, true); 1219 } 1220 1221 static void 1222 cache_shrink(uma_zone_t zone, void *unused) 1223 { 1224 1225 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1226 return; 1227 1228 zone->uz_bucket_size = 1229 (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2; 1230 } 1231 1232 static void 1233 cache_drain_safe_cpu(uma_zone_t zone, void *unused) 1234 { 1235 uma_cache_t cache; 1236 uma_bucket_t b1, b2, b3; 1237 int domain; 1238 1239 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 1240 return; 1241 1242 b1 = b2 = b3 = NULL; 1243 critical_enter(); 1244 cache = &zone->uz_cpu[curcpu]; 1245 domain = PCPU_GET(domain); 1246 b1 = cache_bucket_unload_alloc(cache); 1247 1248 /* 1249 * Don't flush SMR zone buckets. This leaves the zone without a 1250 * bucket and forces every free to synchronize(). 1251 */ 1252 if ((zone->uz_flags & UMA_ZONE_SMR) == 0) { 1253 b2 = cache_bucket_unload_free(cache); 1254 b3 = cache_bucket_unload_cross(cache); 1255 } 1256 critical_exit(); 1257 1258 if (b1 != NULL) 1259 zone_free_bucket(zone, b1, NULL, domain, false); 1260 if (b2 != NULL) 1261 zone_free_bucket(zone, b2, NULL, domain, false); 1262 if (b3 != NULL) { 1263 /* Adjust the domain so it goes to zone_free_cross. */ 1264 domain = (domain + 1) % vm_ndomains; 1265 zone_free_bucket(zone, b3, NULL, domain, false); 1266 } 1267 } 1268 1269 /* 1270 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 1271 * This is an expensive call because it needs to bind to all CPUs 1272 * one by one and enter a critical section on each of them in order 1273 * to safely access their cache buckets. 1274 * Zone lock must not be held on call this function. 1275 */ 1276 static void 1277 pcpu_cache_drain_safe(uma_zone_t zone) 1278 { 1279 int cpu; 1280 1281 /* 1282 * Polite bucket sizes shrinking was not enough, shrink aggressively. 1283 */ 1284 if (zone) 1285 cache_shrink(zone, NULL); 1286 else 1287 zone_foreach(cache_shrink, NULL); 1288 1289 CPU_FOREACH(cpu) { 1290 thread_lock(curthread); 1291 sched_bind(curthread, cpu); 1292 thread_unlock(curthread); 1293 1294 if (zone) 1295 cache_drain_safe_cpu(zone, NULL); 1296 else 1297 zone_foreach(cache_drain_safe_cpu, NULL); 1298 } 1299 thread_lock(curthread); 1300 sched_unbind(curthread); 1301 thread_unlock(curthread); 1302 } 1303 1304 /* 1305 * Reclaim cached buckets from a zone. All buckets are reclaimed if the caller 1306 * requested a drain, otherwise the per-domain caches are trimmed to either 1307 * estimated working set size. 1308 */ 1309 static void 1310 bucket_cache_reclaim(uma_zone_t zone, bool drain) 1311 { 1312 uma_zone_domain_t zdom; 1313 uma_bucket_t bucket; 1314 long target; 1315 int i; 1316 1317 /* 1318 * Shrink the zone bucket size to ensure that the per-CPU caches 1319 * don't grow too large. 1320 */ 1321 if (zone->uz_bucket_size > zone->uz_bucket_size_min) 1322 zone->uz_bucket_size--; 1323 1324 for (i = 0; i < vm_ndomains; i++) { 1325 /* 1326 * The cross bucket is partially filled and not part of 1327 * the item count. Reclaim it individually here. 1328 */ 1329 zdom = ZDOM_GET(zone, i); 1330 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) { 1331 ZONE_CROSS_LOCK(zone); 1332 bucket = zdom->uzd_cross; 1333 zdom->uzd_cross = NULL; 1334 ZONE_CROSS_UNLOCK(zone); 1335 if (bucket != NULL) 1336 bucket_free(zone, bucket, NULL); 1337 } 1338 1339 /* 1340 * If we were asked to drain the zone, we are done only once 1341 * this bucket cache is empty. Otherwise, we reclaim items in 1342 * excess of the zone's estimated working set size. If the 1343 * difference nitems - imin is larger than the WSS estimate, 1344 * then the estimate will grow at the end of this interval and 1345 * we ignore the historical average. 1346 */ 1347 ZDOM_LOCK(zdom); 1348 target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems - 1349 zdom->uzd_imin); 1350 while (zdom->uzd_nitems > target) { 1351 bucket = zone_fetch_bucket(zone, zdom, true); 1352 if (bucket == NULL) 1353 break; 1354 bucket_free(zone, bucket, NULL); 1355 ZDOM_LOCK(zdom); 1356 } 1357 ZDOM_UNLOCK(zdom); 1358 } 1359 } 1360 1361 static void 1362 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 1363 { 1364 uint8_t *mem; 1365 int i; 1366 uint8_t flags; 1367 1368 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 1369 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 1370 1371 mem = slab_data(slab, keg); 1372 flags = slab->us_flags; 1373 i = start; 1374 if (keg->uk_fini != NULL) { 1375 for (i--; i > -1; i--) 1376 #ifdef INVARIANTS 1377 /* 1378 * trash_fini implies that dtor was trash_dtor. trash_fini 1379 * would check that memory hasn't been modified since free, 1380 * which executed trash_dtor. 1381 * That's why we need to run uma_dbg_kskip() check here, 1382 * albeit we don't make skip check for other init/fini 1383 * invocations. 1384 */ 1385 if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) || 1386 keg->uk_fini != trash_fini) 1387 #endif 1388 keg->uk_fini(slab_item(slab, keg, i), keg->uk_size); 1389 } 1390 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1391 zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab), 1392 NULL, SKIP_NONE); 1393 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 1394 uma_total_dec(PAGE_SIZE * keg->uk_ppera); 1395 } 1396 1397 static void 1398 keg_drain_domain(uma_keg_t keg, int domain) 1399 { 1400 struct slabhead freeslabs; 1401 uma_domain_t dom; 1402 uma_slab_t slab, tmp; 1403 uint32_t i, stofree, stokeep, partial; 1404 1405 dom = &keg->uk_domain[domain]; 1406 LIST_INIT(&freeslabs); 1407 1408 CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u", 1409 keg->uk_name, keg, domain, dom->ud_free_items); 1410 1411 KEG_LOCK(keg, domain); 1412 1413 /* 1414 * Are the free items in partially allocated slabs sufficient to meet 1415 * the reserve? If not, compute the number of fully free slabs that must 1416 * be kept. 1417 */ 1418 partial = dom->ud_free_items - dom->ud_free_slabs * keg->uk_ipers; 1419 if (partial < keg->uk_reserve) { 1420 stokeep = min(dom->ud_free_slabs, 1421 howmany(keg->uk_reserve - partial, keg->uk_ipers)); 1422 } else { 1423 stokeep = 0; 1424 } 1425 stofree = dom->ud_free_slabs - stokeep; 1426 1427 /* 1428 * Partition the free slabs into two sets: those that must be kept in 1429 * order to maintain the reserve, and those that may be released back to 1430 * the system. Since one set may be much larger than the other, 1431 * populate the smaller of the two sets and swap them if necessary. 1432 */ 1433 for (i = min(stofree, stokeep); i > 0; i--) { 1434 slab = LIST_FIRST(&dom->ud_free_slab); 1435 LIST_REMOVE(slab, us_link); 1436 LIST_INSERT_HEAD(&freeslabs, slab, us_link); 1437 } 1438 if (stofree > stokeep) 1439 LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link); 1440 1441 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) { 1442 LIST_FOREACH(slab, &freeslabs, us_link) 1443 UMA_HASH_REMOVE(&keg->uk_hash, slab); 1444 } 1445 dom->ud_free_items -= stofree * keg->uk_ipers; 1446 dom->ud_free_slabs -= stofree; 1447 dom->ud_pages -= stofree * keg->uk_ppera; 1448 KEG_UNLOCK(keg, domain); 1449 1450 LIST_FOREACH_SAFE(slab, &freeslabs, us_link, tmp) 1451 keg_free_slab(keg, slab, keg->uk_ipers); 1452 } 1453 1454 /* 1455 * Frees pages from a keg back to the system. This is done on demand from 1456 * the pageout daemon. 1457 * 1458 * Returns nothing. 1459 */ 1460 static void 1461 keg_drain(uma_keg_t keg) 1462 { 1463 int i; 1464 1465 if ((keg->uk_flags & UMA_ZONE_NOFREE) != 0) 1466 return; 1467 for (i = 0; i < vm_ndomains; i++) 1468 keg_drain_domain(keg, i); 1469 } 1470 1471 static void 1472 zone_reclaim(uma_zone_t zone, int waitok, bool drain) 1473 { 1474 1475 /* 1476 * Set draining to interlock with zone_dtor() so we can release our 1477 * locks as we go. Only dtor() should do a WAITOK call since it 1478 * is the only call that knows the structure will still be available 1479 * when it wakes up. 1480 */ 1481 ZONE_LOCK(zone); 1482 while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) { 1483 if (waitok == M_NOWAIT) 1484 goto out; 1485 msleep(zone, &ZDOM_GET(zone, 0)->uzd_lock, PVM, "zonedrain", 1486 1); 1487 } 1488 zone->uz_flags |= UMA_ZFLAG_RECLAIMING; 1489 ZONE_UNLOCK(zone); 1490 bucket_cache_reclaim(zone, drain); 1491 1492 /* 1493 * The DRAINING flag protects us from being freed while 1494 * we're running. Normally the uma_rwlock would protect us but we 1495 * must be able to release and acquire the right lock for each keg. 1496 */ 1497 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) 1498 keg_drain(zone->uz_keg); 1499 ZONE_LOCK(zone); 1500 zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING; 1501 wakeup(zone); 1502 out: 1503 ZONE_UNLOCK(zone); 1504 } 1505 1506 static void 1507 zone_drain(uma_zone_t zone, void *unused) 1508 { 1509 1510 zone_reclaim(zone, M_NOWAIT, true); 1511 } 1512 1513 static void 1514 zone_trim(uma_zone_t zone, void *unused) 1515 { 1516 1517 zone_reclaim(zone, M_NOWAIT, false); 1518 } 1519 1520 /* 1521 * Allocate a new slab for a keg and inserts it into the partial slab list. 1522 * The keg should be unlocked on entry. If the allocation succeeds it will 1523 * be locked on return. 1524 * 1525 * Arguments: 1526 * flags Wait flags for the item initialization routine 1527 * aflags Wait flags for the slab allocation 1528 * 1529 * Returns: 1530 * The slab that was allocated or NULL if there is no memory and the 1531 * caller specified M_NOWAIT. 1532 */ 1533 static uma_slab_t 1534 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags, 1535 int aflags) 1536 { 1537 uma_domain_t dom; 1538 uma_alloc allocf; 1539 uma_slab_t slab; 1540 unsigned long size; 1541 uint8_t *mem; 1542 uint8_t sflags; 1543 int i; 1544 1545 KASSERT(domain >= 0 && domain < vm_ndomains, 1546 ("keg_alloc_slab: domain %d out of range", domain)); 1547 1548 allocf = keg->uk_allocf; 1549 slab = NULL; 1550 mem = NULL; 1551 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) { 1552 uma_hash_slab_t hslab; 1553 hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL, 1554 domain, aflags); 1555 if (hslab == NULL) 1556 goto fail; 1557 slab = &hslab->uhs_slab; 1558 } 1559 1560 /* 1561 * This reproduces the old vm_zone behavior of zero filling pages the 1562 * first time they are added to a zone. 1563 * 1564 * Malloced items are zeroed in uma_zalloc. 1565 */ 1566 1567 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1568 aflags |= M_ZERO; 1569 else 1570 aflags &= ~M_ZERO; 1571 1572 if (keg->uk_flags & UMA_ZONE_NODUMP) 1573 aflags |= M_NODUMP; 1574 1575 /* zone is passed for legacy reasons. */ 1576 size = keg->uk_ppera * PAGE_SIZE; 1577 mem = allocf(zone, size, domain, &sflags, aflags); 1578 if (mem == NULL) { 1579 if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) 1580 zone_free_item(slabzone(keg->uk_ipers), 1581 slab_tohashslab(slab), NULL, SKIP_NONE); 1582 goto fail; 1583 } 1584 uma_total_inc(size); 1585 1586 /* For HASH zones all pages go to the same uma_domain. */ 1587 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 1588 domain = 0; 1589 1590 /* Point the slab into the allocated memory */ 1591 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) 1592 slab = (uma_slab_t )(mem + keg->uk_pgoff); 1593 else 1594 slab_tohashslab(slab)->uhs_data = mem; 1595 1596 if (keg->uk_flags & UMA_ZFLAG_VTOSLAB) 1597 for (i = 0; i < keg->uk_ppera; i++) 1598 vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE), 1599 zone, slab); 1600 1601 slab->us_freecount = keg->uk_ipers; 1602 slab->us_flags = sflags; 1603 slab->us_domain = domain; 1604 1605 BIT_FILL(keg->uk_ipers, &slab->us_free); 1606 #ifdef INVARIANTS 1607 BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg)); 1608 #endif 1609 1610 if (keg->uk_init != NULL) { 1611 for (i = 0; i < keg->uk_ipers; i++) 1612 if (keg->uk_init(slab_item(slab, keg, i), 1613 keg->uk_size, flags) != 0) 1614 break; 1615 if (i != keg->uk_ipers) { 1616 keg_free_slab(keg, slab, i); 1617 goto fail; 1618 } 1619 } 1620 KEG_LOCK(keg, domain); 1621 1622 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1623 slab, keg->uk_name, keg); 1624 1625 if (keg->uk_flags & UMA_ZFLAG_HASH) 1626 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1627 1628 /* 1629 * If we got a slab here it's safe to mark it partially used 1630 * and return. We assume that the caller is going to remove 1631 * at least one item. 1632 */ 1633 dom = &keg->uk_domain[domain]; 1634 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 1635 dom->ud_pages += keg->uk_ppera; 1636 dom->ud_free_items += keg->uk_ipers; 1637 1638 return (slab); 1639 1640 fail: 1641 return (NULL); 1642 } 1643 1644 /* 1645 * This function is intended to be used early on in place of page_alloc() so 1646 * that we may use the boot time page cache to satisfy allocations before 1647 * the VM is ready. 1648 */ 1649 static void * 1650 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1651 int wait) 1652 { 1653 vm_paddr_t pa; 1654 vm_page_t m; 1655 void *mem; 1656 int pages; 1657 int i; 1658 1659 pages = howmany(bytes, PAGE_SIZE); 1660 KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); 1661 1662 *pflag = UMA_SLAB_BOOT; 1663 m = vm_page_alloc_contig_domain(NULL, 0, domain, 1664 malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages, 1665 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT); 1666 if (m == NULL) 1667 return (NULL); 1668 1669 pa = VM_PAGE_TO_PHYS(m); 1670 for (i = 0; i < pages; i++, pa += PAGE_SIZE) { 1671 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1672 defined(__riscv) || defined(__powerpc64__) 1673 if ((wait & M_NODUMP) == 0) 1674 dump_add_page(pa); 1675 #endif 1676 } 1677 /* Allocate KVA and indirectly advance bootmem. */ 1678 mem = (void *)pmap_map(&bootmem, m->phys_addr, 1679 m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE); 1680 if ((wait & M_ZERO) != 0) 1681 bzero(mem, pages * PAGE_SIZE); 1682 1683 return (mem); 1684 } 1685 1686 static void 1687 startup_free(void *mem, vm_size_t bytes) 1688 { 1689 vm_offset_t va; 1690 vm_page_t m; 1691 1692 va = (vm_offset_t)mem; 1693 m = PHYS_TO_VM_PAGE(pmap_kextract(va)); 1694 pmap_remove(kernel_pmap, va, va + bytes); 1695 for (; bytes != 0; bytes -= PAGE_SIZE, m++) { 1696 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \ 1697 defined(__riscv) || defined(__powerpc64__) 1698 dump_drop_page(VM_PAGE_TO_PHYS(m)); 1699 #endif 1700 vm_page_unwire_noq(m); 1701 vm_page_free(m); 1702 } 1703 } 1704 1705 /* 1706 * Allocates a number of pages from the system 1707 * 1708 * Arguments: 1709 * bytes The number of bytes requested 1710 * wait Shall we wait? 1711 * 1712 * Returns: 1713 * A pointer to the alloced memory or possibly 1714 * NULL if M_NOWAIT is set. 1715 */ 1716 static void * 1717 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1718 int wait) 1719 { 1720 void *p; /* Returned page */ 1721 1722 *pflag = UMA_SLAB_KERNEL; 1723 p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); 1724 1725 return (p); 1726 } 1727 1728 static void * 1729 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1730 int wait) 1731 { 1732 struct pglist alloctail; 1733 vm_offset_t addr, zkva; 1734 int cpu, flags; 1735 vm_page_t p, p_next; 1736 #ifdef NUMA 1737 struct pcpu *pc; 1738 #endif 1739 1740 MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); 1741 1742 TAILQ_INIT(&alloctail); 1743 flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1744 malloc2vm_flags(wait); 1745 *pflag = UMA_SLAB_KERNEL; 1746 for (cpu = 0; cpu <= mp_maxid; cpu++) { 1747 if (CPU_ABSENT(cpu)) { 1748 p = vm_page_alloc(NULL, 0, flags); 1749 } else { 1750 #ifndef NUMA 1751 p = vm_page_alloc(NULL, 0, flags); 1752 #else 1753 pc = pcpu_find(cpu); 1754 if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain))) 1755 p = NULL; 1756 else 1757 p = vm_page_alloc_domain(NULL, 0, 1758 pc->pc_domain, flags); 1759 if (__predict_false(p == NULL)) 1760 p = vm_page_alloc(NULL, 0, flags); 1761 #endif 1762 } 1763 if (__predict_false(p == NULL)) 1764 goto fail; 1765 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1766 } 1767 if ((addr = kva_alloc(bytes)) == 0) 1768 goto fail; 1769 zkva = addr; 1770 TAILQ_FOREACH(p, &alloctail, listq) { 1771 pmap_qenter(zkva, &p, 1); 1772 zkva += PAGE_SIZE; 1773 } 1774 return ((void*)addr); 1775 fail: 1776 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1777 vm_page_unwire_noq(p); 1778 vm_page_free(p); 1779 } 1780 return (NULL); 1781 } 1782 1783 /* 1784 * Allocates a number of pages from within an object 1785 * 1786 * Arguments: 1787 * bytes The number of bytes requested 1788 * wait Shall we wait? 1789 * 1790 * Returns: 1791 * A pointer to the alloced memory or possibly 1792 * NULL if M_NOWAIT is set. 1793 */ 1794 static void * 1795 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 1796 int wait) 1797 { 1798 TAILQ_HEAD(, vm_page) alloctail; 1799 u_long npages; 1800 vm_offset_t retkva, zkva; 1801 vm_page_t p, p_next; 1802 uma_keg_t keg; 1803 1804 TAILQ_INIT(&alloctail); 1805 keg = zone->uz_keg; 1806 1807 npages = howmany(bytes, PAGE_SIZE); 1808 while (npages > 0) { 1809 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | 1810 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1811 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1812 VM_ALLOC_NOWAIT)); 1813 if (p != NULL) { 1814 /* 1815 * Since the page does not belong to an object, its 1816 * listq is unused. 1817 */ 1818 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1819 npages--; 1820 continue; 1821 } 1822 /* 1823 * Page allocation failed, free intermediate pages and 1824 * exit. 1825 */ 1826 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1827 vm_page_unwire_noq(p); 1828 vm_page_free(p); 1829 } 1830 return (NULL); 1831 } 1832 *flags = UMA_SLAB_PRIV; 1833 zkva = keg->uk_kva + 1834 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1835 retkva = zkva; 1836 TAILQ_FOREACH(p, &alloctail, listq) { 1837 pmap_qenter(zkva, &p, 1); 1838 zkva += PAGE_SIZE; 1839 } 1840 1841 return ((void *)retkva); 1842 } 1843 1844 /* 1845 * Allocate physically contiguous pages. 1846 */ 1847 static void * 1848 contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1849 int wait) 1850 { 1851 1852 *pflag = UMA_SLAB_KERNEL; 1853 return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain), 1854 bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT)); 1855 } 1856 1857 /* 1858 * Frees a number of pages to the system 1859 * 1860 * Arguments: 1861 * mem A pointer to the memory to be freed 1862 * size The size of the memory being freed 1863 * flags The original p->us_flags field 1864 * 1865 * Returns: 1866 * Nothing 1867 */ 1868 static void 1869 page_free(void *mem, vm_size_t size, uint8_t flags) 1870 { 1871 1872 if ((flags & UMA_SLAB_BOOT) != 0) { 1873 startup_free(mem, size); 1874 return; 1875 } 1876 1877 KASSERT((flags & UMA_SLAB_KERNEL) != 0, 1878 ("UMA: page_free used with invalid flags %x", flags)); 1879 1880 kmem_free((vm_offset_t)mem, size); 1881 } 1882 1883 /* 1884 * Frees pcpu zone allocations 1885 * 1886 * Arguments: 1887 * mem A pointer to the memory to be freed 1888 * size The size of the memory being freed 1889 * flags The original p->us_flags field 1890 * 1891 * Returns: 1892 * Nothing 1893 */ 1894 static void 1895 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) 1896 { 1897 vm_offset_t sva, curva; 1898 vm_paddr_t paddr; 1899 vm_page_t m; 1900 1901 MPASS(size == (mp_maxid+1)*PAGE_SIZE); 1902 1903 if ((flags & UMA_SLAB_BOOT) != 0) { 1904 startup_free(mem, size); 1905 return; 1906 } 1907 1908 sva = (vm_offset_t)mem; 1909 for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { 1910 paddr = pmap_kextract(curva); 1911 m = PHYS_TO_VM_PAGE(paddr); 1912 vm_page_unwire_noq(m); 1913 vm_page_free(m); 1914 } 1915 pmap_qremove(sva, size >> PAGE_SHIFT); 1916 kva_free(sva, size); 1917 } 1918 1919 /* 1920 * Zero fill initializer 1921 * 1922 * Arguments/Returns follow uma_init specifications 1923 */ 1924 static int 1925 zero_init(void *mem, int size, int flags) 1926 { 1927 bzero(mem, size); 1928 return (0); 1929 } 1930 1931 #ifdef INVARIANTS 1932 static struct noslabbits * 1933 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg) 1934 { 1935 1936 return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers))); 1937 } 1938 #endif 1939 1940 /* 1941 * Actual size of embedded struct slab (!OFFPAGE). 1942 */ 1943 static size_t 1944 slab_sizeof(int nitems) 1945 { 1946 size_t s; 1947 1948 s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS; 1949 return (roundup(s, UMA_ALIGN_PTR + 1)); 1950 } 1951 1952 #define UMA_FIXPT_SHIFT 31 1953 #define UMA_FRAC_FIXPT(n, d) \ 1954 ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d))) 1955 #define UMA_FIXPT_PCT(f) \ 1956 ((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT)) 1957 #define UMA_PCT_FIXPT(pct) UMA_FRAC_FIXPT((pct), 100) 1958 #define UMA_MIN_EFF UMA_PCT_FIXPT(100 - UMA_MAX_WASTE) 1959 1960 /* 1961 * Compute the number of items that will fit in a slab. If hdr is true, the 1962 * item count may be limited to provide space in the slab for an inline slab 1963 * header. Otherwise, all slab space will be provided for item storage. 1964 */ 1965 static u_int 1966 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr) 1967 { 1968 u_int ipers; 1969 u_int padpi; 1970 1971 /* The padding between items is not needed after the last item. */ 1972 padpi = rsize - size; 1973 1974 if (hdr) { 1975 /* 1976 * Start with the maximum item count and remove items until 1977 * the slab header first alongside the allocatable memory. 1978 */ 1979 for (ipers = MIN(SLAB_MAX_SETSIZE, 1980 (slabsize + padpi - slab_sizeof(1)) / rsize); 1981 ipers > 0 && 1982 ipers * rsize - padpi + slab_sizeof(ipers) > slabsize; 1983 ipers--) 1984 continue; 1985 } else { 1986 ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE); 1987 } 1988 1989 return (ipers); 1990 } 1991 1992 struct keg_layout_result { 1993 u_int format; 1994 u_int slabsize; 1995 u_int ipers; 1996 u_int eff; 1997 }; 1998 1999 static void 2000 keg_layout_one(uma_keg_t keg, u_int rsize, u_int slabsize, u_int fmt, 2001 struct keg_layout_result *kl) 2002 { 2003 u_int total; 2004 2005 kl->format = fmt; 2006 kl->slabsize = slabsize; 2007 2008 /* Handle INTERNAL as inline with an extra page. */ 2009 if ((fmt & UMA_ZFLAG_INTERNAL) != 0) { 2010 kl->format &= ~UMA_ZFLAG_INTERNAL; 2011 kl->slabsize += PAGE_SIZE; 2012 } 2013 2014 kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize, 2015 (fmt & UMA_ZFLAG_OFFPAGE) == 0); 2016 2017 /* Account for memory used by an offpage slab header. */ 2018 total = kl->slabsize; 2019 if ((fmt & UMA_ZFLAG_OFFPAGE) != 0) 2020 total += slabzone(kl->ipers)->uz_keg->uk_rsize; 2021 2022 kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total); 2023 } 2024 2025 /* 2026 * Determine the format of a uma keg. This determines where the slab header 2027 * will be placed (inline or offpage) and calculates ipers, rsize, and ppera. 2028 * 2029 * Arguments 2030 * keg The zone we should initialize 2031 * 2032 * Returns 2033 * Nothing 2034 */ 2035 static void 2036 keg_layout(uma_keg_t keg) 2037 { 2038 struct keg_layout_result kl = {}, kl_tmp; 2039 u_int fmts[2]; 2040 u_int alignsize; 2041 u_int nfmt; 2042 u_int pages; 2043 u_int rsize; 2044 u_int slabsize; 2045 u_int i, j; 2046 2047 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 2048 (keg->uk_size <= UMA_PCPU_ALLOC_SIZE && 2049 (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0), 2050 ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b", 2051 __func__, keg->uk_name, keg->uk_size, keg->uk_flags, 2052 PRINT_UMA_ZFLAGS)); 2053 KASSERT((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) == 0 || 2054 (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0, 2055 ("%s: incompatible flags 0x%b", __func__, keg->uk_flags, 2056 PRINT_UMA_ZFLAGS)); 2057 2058 alignsize = keg->uk_align + 1; 2059 2060 /* 2061 * Calculate the size of each allocation (rsize) according to 2062 * alignment. If the requested size is smaller than we have 2063 * allocation bits for we round it up. 2064 */ 2065 rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT); 2066 rsize = roundup2(rsize, alignsize); 2067 2068 if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) { 2069 /* 2070 * We want one item to start on every align boundary in a page. 2071 * To do this we will span pages. We will also extend the item 2072 * by the size of align if it is an even multiple of align. 2073 * Otherwise, it would fall on the same boundary every time. 2074 */ 2075 if ((rsize & alignsize) == 0) 2076 rsize += alignsize; 2077 slabsize = rsize * (PAGE_SIZE / alignsize); 2078 slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE); 2079 slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE); 2080 slabsize = round_page(slabsize); 2081 } else { 2082 /* 2083 * Start with a slab size of as many pages as it takes to 2084 * represent a single item. We will try to fit as many 2085 * additional items into the slab as possible. 2086 */ 2087 slabsize = round_page(keg->uk_size); 2088 } 2089 2090 /* Build a list of all of the available formats for this keg. */ 2091 nfmt = 0; 2092 2093 /* Evaluate an inline slab layout. */ 2094 if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0) 2095 fmts[nfmt++] = 0; 2096 2097 /* TODO: vm_page-embedded slab. */ 2098 2099 /* 2100 * We can't do OFFPAGE if we're internal or if we've been 2101 * asked to not go to the VM for buckets. If we do this we 2102 * may end up going to the VM for slabs which we do not want 2103 * to do if we're UMA_ZONE_VM, which clearly forbids it. 2104 * In those cases, evaluate a pseudo-format called INTERNAL 2105 * which has an inline slab header and one extra page to 2106 * guarantee that it fits. 2107 * 2108 * Otherwise, see if using an OFFPAGE slab will improve our 2109 * efficiency. 2110 */ 2111 if ((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) != 0) 2112 fmts[nfmt++] = UMA_ZFLAG_INTERNAL; 2113 else 2114 fmts[nfmt++] = UMA_ZFLAG_OFFPAGE; 2115 2116 /* 2117 * Choose a slab size and format which satisfy the minimum efficiency. 2118 * Prefer the smallest slab size that meets the constraints. 2119 * 2120 * Start with a minimum slab size, to accommodate CACHESPREAD. Then, 2121 * for small items (up to PAGE_SIZE), the iteration increment is one 2122 * page; and for large items, the increment is one item. 2123 */ 2124 i = (slabsize + rsize - keg->uk_size) / MAX(PAGE_SIZE, rsize); 2125 KASSERT(i >= 1, ("keg %s(%p) flags=0x%b slabsize=%u, rsize=%u, i=%u", 2126 keg->uk_name, keg, keg->uk_flags, PRINT_UMA_ZFLAGS, slabsize, 2127 rsize, i)); 2128 for ( ; ; i++) { 2129 slabsize = (rsize <= PAGE_SIZE) ? ptoa(i) : 2130 round_page(rsize * (i - 1) + keg->uk_size); 2131 2132 for (j = 0; j < nfmt; j++) { 2133 /* Only if we have no viable format yet. */ 2134 if ((fmts[j] & UMA_ZFLAG_INTERNAL) != 0 && 2135 kl.ipers > 0) 2136 continue; 2137 2138 keg_layout_one(keg, rsize, slabsize, fmts[j], &kl_tmp); 2139 if (kl_tmp.eff <= kl.eff) 2140 continue; 2141 2142 kl = kl_tmp; 2143 2144 CTR6(KTR_UMA, "keg %s layout: format %#x " 2145 "(ipers %u * rsize %u) / slabsize %#x = %u%% eff", 2146 keg->uk_name, kl.format, kl.ipers, rsize, 2147 kl.slabsize, UMA_FIXPT_PCT(kl.eff)); 2148 2149 /* Stop when we reach the minimum efficiency. */ 2150 if (kl.eff >= UMA_MIN_EFF) 2151 break; 2152 } 2153 2154 if (kl.eff >= UMA_MIN_EFF || !multipage_slabs || 2155 slabsize >= SLAB_MAX_SETSIZE * rsize || 2156 (keg->uk_flags & (UMA_ZONE_PCPU | UMA_ZONE_CONTIG)) != 0) 2157 break; 2158 } 2159 2160 pages = atop(kl.slabsize); 2161 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) 2162 pages *= mp_maxid + 1; 2163 2164 keg->uk_rsize = rsize; 2165 keg->uk_ipers = kl.ipers; 2166 keg->uk_ppera = pages; 2167 keg->uk_flags |= kl.format; 2168 2169 /* 2170 * How do we find the slab header if it is offpage or if not all item 2171 * start addresses are in the same page? We could solve the latter 2172 * case with vaddr alignment, but we don't. 2173 */ 2174 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 || 2175 (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) { 2176 if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0) 2177 keg->uk_flags |= UMA_ZFLAG_HASH; 2178 else 2179 keg->uk_flags |= UMA_ZFLAG_VTOSLAB; 2180 } 2181 2182 CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u", 2183 __func__, keg->uk_name, keg->uk_flags, rsize, keg->uk_ipers, 2184 pages); 2185 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE, 2186 ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__, 2187 keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, 2188 keg->uk_ipers, pages)); 2189 } 2190 2191 /* 2192 * Keg header ctor. This initializes all fields, locks, etc. And inserts 2193 * the keg onto the global keg list. 2194 * 2195 * Arguments/Returns follow uma_ctor specifications 2196 * udata Actually uma_kctor_args 2197 */ 2198 static int 2199 keg_ctor(void *mem, int size, void *udata, int flags) 2200 { 2201 struct uma_kctor_args *arg = udata; 2202 uma_keg_t keg = mem; 2203 uma_zone_t zone; 2204 int i; 2205 2206 bzero(keg, size); 2207 keg->uk_size = arg->size; 2208 keg->uk_init = arg->uminit; 2209 keg->uk_fini = arg->fini; 2210 keg->uk_align = arg->align; 2211 keg->uk_reserve = 0; 2212 keg->uk_flags = arg->flags; 2213 2214 /* 2215 * We use a global round-robin policy by default. Zones with 2216 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which 2217 * case the iterator is never run. 2218 */ 2219 keg->uk_dr.dr_policy = DOMAINSET_RR(); 2220 keg->uk_dr.dr_iter = 0; 2221 2222 /* 2223 * The primary zone is passed to us at keg-creation time. 2224 */ 2225 zone = arg->zone; 2226 keg->uk_name = zone->uz_name; 2227 2228 if (arg->flags & UMA_ZONE_ZINIT) 2229 keg->uk_init = zero_init; 2230 2231 if (arg->flags & UMA_ZONE_MALLOC) 2232 keg->uk_flags |= UMA_ZFLAG_VTOSLAB; 2233 2234 #ifndef SMP 2235 keg->uk_flags &= ~UMA_ZONE_PCPU; 2236 #endif 2237 2238 keg_layout(keg); 2239 2240 /* 2241 * Use a first-touch NUMA policy for kegs that pmap_extract() will 2242 * work on. Use round-robin for everything else. 2243 * 2244 * Zones may override the default by specifying either. 2245 */ 2246 #ifdef NUMA 2247 if ((keg->uk_flags & 2248 (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_CACHE | UMA_ZONE_NOTPAGE)) == 0) 2249 keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; 2250 else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2251 keg->uk_flags |= UMA_ZONE_ROUNDROBIN; 2252 #endif 2253 2254 /* 2255 * If we haven't booted yet we need allocations to go through the 2256 * startup cache until the vm is ready. 2257 */ 2258 #ifdef UMA_MD_SMALL_ALLOC 2259 if (keg->uk_ppera == 1) 2260 keg->uk_allocf = uma_small_alloc; 2261 else 2262 #endif 2263 if (booted < BOOT_KVA) 2264 keg->uk_allocf = startup_alloc; 2265 else if (keg->uk_flags & UMA_ZONE_PCPU) 2266 keg->uk_allocf = pcpu_page_alloc; 2267 else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && keg->uk_ppera > 1) 2268 keg->uk_allocf = contig_alloc; 2269 else 2270 keg->uk_allocf = page_alloc; 2271 #ifdef UMA_MD_SMALL_ALLOC 2272 if (keg->uk_ppera == 1) 2273 keg->uk_freef = uma_small_free; 2274 else 2275 #endif 2276 if (keg->uk_flags & UMA_ZONE_PCPU) 2277 keg->uk_freef = pcpu_page_free; 2278 else 2279 keg->uk_freef = page_free; 2280 2281 /* 2282 * Initialize keg's locks. 2283 */ 2284 for (i = 0; i < vm_ndomains; i++) 2285 KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS)); 2286 2287 /* 2288 * If we're putting the slab header in the actual page we need to 2289 * figure out where in each page it goes. See slab_sizeof 2290 * definition. 2291 */ 2292 if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) { 2293 size_t shsize; 2294 2295 shsize = slab_sizeof(keg->uk_ipers); 2296 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize; 2297 /* 2298 * The only way the following is possible is if with our 2299 * UMA_ALIGN_PTR adjustments we are now bigger than 2300 * UMA_SLAB_SIZE. I haven't checked whether this is 2301 * mathematically possible for all cases, so we make 2302 * sure here anyway. 2303 */ 2304 KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera, 2305 ("zone %s ipers %d rsize %d size %d slab won't fit", 2306 zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size)); 2307 } 2308 2309 if (keg->uk_flags & UMA_ZFLAG_HASH) 2310 hash_alloc(&keg->uk_hash, 0); 2311 2312 CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone); 2313 2314 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 2315 2316 rw_wlock(&uma_rwlock); 2317 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 2318 rw_wunlock(&uma_rwlock); 2319 return (0); 2320 } 2321 2322 static void 2323 zone_kva_available(uma_zone_t zone, void *unused) 2324 { 2325 uma_keg_t keg; 2326 2327 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 2328 return; 2329 KEG_GET(zone, keg); 2330 2331 if (keg->uk_allocf == startup_alloc) { 2332 /* Switch to the real allocator. */ 2333 if (keg->uk_flags & UMA_ZONE_PCPU) 2334 keg->uk_allocf = pcpu_page_alloc; 2335 else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && 2336 keg->uk_ppera > 1) 2337 keg->uk_allocf = contig_alloc; 2338 else 2339 keg->uk_allocf = page_alloc; 2340 } 2341 } 2342 2343 static void 2344 zone_alloc_counters(uma_zone_t zone, void *unused) 2345 { 2346 2347 zone->uz_allocs = counter_u64_alloc(M_WAITOK); 2348 zone->uz_frees = counter_u64_alloc(M_WAITOK); 2349 zone->uz_fails = counter_u64_alloc(M_WAITOK); 2350 zone->uz_xdomain = counter_u64_alloc(M_WAITOK); 2351 } 2352 2353 static void 2354 zone_alloc_sysctl(uma_zone_t zone, void *unused) 2355 { 2356 uma_zone_domain_t zdom; 2357 uma_domain_t dom; 2358 uma_keg_t keg; 2359 struct sysctl_oid *oid, *domainoid; 2360 int domains, i, cnt; 2361 static const char *nokeg = "cache zone"; 2362 char *c; 2363 2364 /* 2365 * Make a sysctl safe copy of the zone name by removing 2366 * any special characters and handling dups by appending 2367 * an index. 2368 */ 2369 if (zone->uz_namecnt != 0) { 2370 /* Count the number of decimal digits and '_' separator. */ 2371 for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++) 2372 cnt /= 10; 2373 zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1, 2374 M_UMA, M_WAITOK); 2375 sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name, 2376 zone->uz_namecnt); 2377 } else 2378 zone->uz_ctlname = strdup(zone->uz_name, M_UMA); 2379 for (c = zone->uz_ctlname; *c != '\0'; c++) 2380 if (strchr("./\\ -", *c) != NULL) 2381 *c = '_'; 2382 2383 /* 2384 * Basic parameters at the root. 2385 */ 2386 zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma), 2387 OID_AUTO, zone->uz_ctlname, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2388 oid = zone->uz_oid; 2389 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2390 "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size"); 2391 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2392 "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, 2393 zone, 0, sysctl_handle_uma_zone_flags, "A", 2394 "Allocator configuration flags"); 2395 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2396 "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0, 2397 "Desired per-cpu cache size"); 2398 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2399 "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0, 2400 "Maximum allowed per-cpu cache size"); 2401 2402 /* 2403 * keg if present. 2404 */ 2405 if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0) 2406 domains = vm_ndomains; 2407 else 2408 domains = 1; 2409 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2410 "keg", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2411 keg = zone->uz_keg; 2412 if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) { 2413 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2414 "name", CTLFLAG_RD, keg->uk_name, "Keg name"); 2415 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2416 "rsize", CTLFLAG_RD, &keg->uk_rsize, 0, 2417 "Real object size with alignment"); 2418 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2419 "ppera", CTLFLAG_RD, &keg->uk_ppera, 0, 2420 "pages per-slab allocation"); 2421 SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2422 "ipers", CTLFLAG_RD, &keg->uk_ipers, 0, 2423 "items available per-slab"); 2424 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2425 "align", CTLFLAG_RD, &keg->uk_align, 0, 2426 "item alignment mask"); 2427 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2428 "reserve", CTLFLAG_RD, &keg->uk_reserve, 0, 2429 "number of reserved items"); 2430 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2431 "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2432 keg, 0, sysctl_handle_uma_slab_efficiency, "I", 2433 "Slab utilization (100 - internal fragmentation %)"); 2434 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid), 2435 OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2436 for (i = 0; i < domains; i++) { 2437 dom = &keg->uk_domain[i]; 2438 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2439 OID_AUTO, VM_DOMAIN(i)->vmd_name, 2440 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2441 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2442 "pages", CTLFLAG_RD, &dom->ud_pages, 0, 2443 "Total pages currently allocated from VM"); 2444 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2445 "free_items", CTLFLAG_RD, &dom->ud_free_items, 0, 2446 "items free in the slab layer"); 2447 } 2448 } else 2449 SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2450 "name", CTLFLAG_RD, nokeg, "Keg name"); 2451 2452 /* 2453 * Information about zone limits. 2454 */ 2455 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2456 "limit", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2457 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2458 "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2459 zone, 0, sysctl_handle_uma_zone_items, "QU", 2460 "Current number of allocated items if limit is set"); 2461 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2462 "max_items", CTLFLAG_RD, &zone->uz_max_items, 0, 2463 "Maximum number of allocated and cached items"); 2464 SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2465 "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0, 2466 "Number of threads sleeping at limit"); 2467 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2468 "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0, 2469 "Total zone limit sleeps"); 2470 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2471 "bucket_max", CTLFLAG_RD, &zone->uz_bucket_max, 0, 2472 "Maximum number of items in each domain's bucket cache"); 2473 2474 /* 2475 * Per-domain zone information. 2476 */ 2477 domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), 2478 OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2479 for (i = 0; i < domains; i++) { 2480 zdom = ZDOM_GET(zone, i); 2481 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid), 2482 OID_AUTO, VM_DOMAIN(i)->vmd_name, 2483 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2484 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2485 "nitems", CTLFLAG_RD, &zdom->uzd_nitems, 2486 "number of items in this domain"); 2487 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2488 "imax", CTLFLAG_RD, &zdom->uzd_imax, 2489 "maximum item count in this period"); 2490 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2491 "imin", CTLFLAG_RD, &zdom->uzd_imin, 2492 "minimum item count in this period"); 2493 SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2494 "wss", CTLFLAG_RD, &zdom->uzd_wss, 2495 "Working set size"); 2496 } 2497 2498 /* 2499 * General statistics. 2500 */ 2501 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO, 2502 "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); 2503 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2504 "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE, 2505 zone, 1, sysctl_handle_uma_zone_cur, "I", 2506 "Current number of allocated items"); 2507 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2508 "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2509 zone, 0, sysctl_handle_uma_zone_allocs, "QU", 2510 "Total allocation calls"); 2511 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2512 "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE, 2513 zone, 0, sysctl_handle_uma_zone_frees, "QU", 2514 "Total free calls"); 2515 SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2516 "fails", CTLFLAG_RD, &zone->uz_fails, 2517 "Number of allocation failures"); 2518 SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO, 2519 "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 2520 "Free calls from the wrong domain"); 2521 } 2522 2523 struct uma_zone_count { 2524 const char *name; 2525 int count; 2526 }; 2527 2528 static void 2529 zone_count(uma_zone_t zone, void *arg) 2530 { 2531 struct uma_zone_count *cnt; 2532 2533 cnt = arg; 2534 /* 2535 * Some zones are rapidly created with identical names and 2536 * destroyed out of order. This can lead to gaps in the count. 2537 * Use one greater than the maximum observed for this name. 2538 */ 2539 if (strcmp(zone->uz_name, cnt->name) == 0) 2540 cnt->count = MAX(cnt->count, 2541 zone->uz_namecnt + 1); 2542 } 2543 2544 static void 2545 zone_update_caches(uma_zone_t zone) 2546 { 2547 int i; 2548 2549 for (i = 0; i <= mp_maxid; i++) { 2550 cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size); 2551 cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags); 2552 } 2553 } 2554 2555 /* 2556 * Zone header ctor. This initializes all fields, locks, etc. 2557 * 2558 * Arguments/Returns follow uma_ctor specifications 2559 * udata Actually uma_zctor_args 2560 */ 2561 static int 2562 zone_ctor(void *mem, int size, void *udata, int flags) 2563 { 2564 struct uma_zone_count cnt; 2565 struct uma_zctor_args *arg = udata; 2566 uma_zone_domain_t zdom; 2567 uma_zone_t zone = mem; 2568 uma_zone_t z; 2569 uma_keg_t keg; 2570 int i; 2571 2572 bzero(zone, size); 2573 zone->uz_name = arg->name; 2574 zone->uz_ctor = arg->ctor; 2575 zone->uz_dtor = arg->dtor; 2576 zone->uz_init = NULL; 2577 zone->uz_fini = NULL; 2578 zone->uz_sleeps = 0; 2579 zone->uz_bucket_size = 0; 2580 zone->uz_bucket_size_min = 0; 2581 zone->uz_bucket_size_max = BUCKET_MAX; 2582 zone->uz_flags = (arg->flags & UMA_ZONE_SMR); 2583 zone->uz_warning = NULL; 2584 /* The domain structures follow the cpu structures. */ 2585 zone->uz_bucket_max = ULONG_MAX; 2586 timevalclear(&zone->uz_ratecheck); 2587 2588 /* Count the number of duplicate names. */ 2589 cnt.name = arg->name; 2590 cnt.count = 0; 2591 zone_foreach(zone_count, &cnt); 2592 zone->uz_namecnt = cnt.count; 2593 ZONE_CROSS_LOCK_INIT(zone); 2594 2595 for (i = 0; i < vm_ndomains; i++) { 2596 zdom = ZDOM_GET(zone, i); 2597 ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS)); 2598 STAILQ_INIT(&zdom->uzd_buckets); 2599 } 2600 2601 #ifdef INVARIANTS 2602 if (arg->uminit == trash_init && arg->fini == trash_fini) 2603 zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR; 2604 #endif 2605 2606 /* 2607 * This is a pure cache zone, no kegs. 2608 */ 2609 if (arg->import) { 2610 KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0, 2611 ("zone_ctor: Import specified for non-cache zone.")); 2612 zone->uz_flags = arg->flags; 2613 zone->uz_size = arg->size; 2614 zone->uz_import = arg->import; 2615 zone->uz_release = arg->release; 2616 zone->uz_arg = arg->arg; 2617 #ifdef NUMA 2618 /* 2619 * Cache zones are round-robin unless a policy is 2620 * specified because they may have incompatible 2621 * constraints. 2622 */ 2623 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0) 2624 zone->uz_flags |= UMA_ZONE_ROUNDROBIN; 2625 #endif 2626 rw_wlock(&uma_rwlock); 2627 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 2628 rw_wunlock(&uma_rwlock); 2629 goto out; 2630 } 2631 2632 /* 2633 * Use the regular zone/keg/slab allocator. 2634 */ 2635 zone->uz_import = zone_import; 2636 zone->uz_release = zone_release; 2637 zone->uz_arg = zone; 2638 keg = arg->keg; 2639 2640 if (arg->flags & UMA_ZONE_SECONDARY) { 2641 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 2642 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 2643 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 2644 zone->uz_init = arg->uminit; 2645 zone->uz_fini = arg->fini; 2646 zone->uz_flags |= UMA_ZONE_SECONDARY; 2647 rw_wlock(&uma_rwlock); 2648 ZONE_LOCK(zone); 2649 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 2650 if (LIST_NEXT(z, uz_link) == NULL) { 2651 LIST_INSERT_AFTER(z, zone, uz_link); 2652 break; 2653 } 2654 } 2655 ZONE_UNLOCK(zone); 2656 rw_wunlock(&uma_rwlock); 2657 } else if (keg == NULL) { 2658 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 2659 arg->align, arg->flags)) == NULL) 2660 return (ENOMEM); 2661 } else { 2662 struct uma_kctor_args karg; 2663 int error; 2664 2665 /* We should only be here from uma_startup() */ 2666 karg.size = arg->size; 2667 karg.uminit = arg->uminit; 2668 karg.fini = arg->fini; 2669 karg.align = arg->align; 2670 karg.flags = (arg->flags & ~UMA_ZONE_SMR); 2671 karg.zone = zone; 2672 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 2673 flags); 2674 if (error) 2675 return (error); 2676 } 2677 2678 /* Inherit properties from the keg. */ 2679 zone->uz_keg = keg; 2680 zone->uz_size = keg->uk_size; 2681 zone->uz_flags |= (keg->uk_flags & 2682 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 2683 2684 out: 2685 if (booted >= BOOT_PCPU) { 2686 zone_alloc_counters(zone, NULL); 2687 if (booted >= BOOT_RUNNING) 2688 zone_alloc_sysctl(zone, NULL); 2689 } else { 2690 zone->uz_allocs = EARLY_COUNTER; 2691 zone->uz_frees = EARLY_COUNTER; 2692 zone->uz_fails = EARLY_COUNTER; 2693 } 2694 2695 /* Caller requests a private SMR context. */ 2696 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 2697 zone->uz_smr = smr_create(zone->uz_name, 0, 0); 2698 2699 KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != 2700 (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), 2701 ("Invalid zone flag combination")); 2702 if (arg->flags & UMA_ZFLAG_INTERNAL) 2703 zone->uz_bucket_size_max = zone->uz_bucket_size = 0; 2704 if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) 2705 zone->uz_bucket_size = BUCKET_MAX; 2706 else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) 2707 zone->uz_bucket_size = 0; 2708 else 2709 zone->uz_bucket_size = bucket_select(zone->uz_size); 2710 zone->uz_bucket_size_min = zone->uz_bucket_size; 2711 if (zone->uz_dtor != NULL || zone->uz_ctor != NULL) 2712 zone->uz_flags |= UMA_ZFLAG_CTORDTOR; 2713 zone_update_caches(zone); 2714 2715 return (0); 2716 } 2717 2718 /* 2719 * Keg header dtor. This frees all data, destroys locks, frees the hash 2720 * table and removes the keg from the global list. 2721 * 2722 * Arguments/Returns follow uma_dtor specifications 2723 * udata unused 2724 */ 2725 static void 2726 keg_dtor(void *arg, int size, void *udata) 2727 { 2728 uma_keg_t keg; 2729 uint32_t free, pages; 2730 int i; 2731 2732 keg = (uma_keg_t)arg; 2733 free = pages = 0; 2734 for (i = 0; i < vm_ndomains; i++) { 2735 free += keg->uk_domain[i].ud_free_items; 2736 pages += keg->uk_domain[i].ud_pages; 2737 KEG_LOCK_FINI(keg, i); 2738 } 2739 if (pages != 0) 2740 printf("Freed UMA keg (%s) was not empty (%u items). " 2741 " Lost %u pages of memory.\n", 2742 keg->uk_name ? keg->uk_name : "", 2743 pages / keg->uk_ppera * keg->uk_ipers - free, pages); 2744 2745 hash_free(&keg->uk_hash); 2746 } 2747 2748 /* 2749 * Zone header dtor. 2750 * 2751 * Arguments/Returns follow uma_dtor specifications 2752 * udata unused 2753 */ 2754 static void 2755 zone_dtor(void *arg, int size, void *udata) 2756 { 2757 uma_zone_t zone; 2758 uma_keg_t keg; 2759 int i; 2760 2761 zone = (uma_zone_t)arg; 2762 2763 sysctl_remove_oid(zone->uz_oid, 1, 1); 2764 2765 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 2766 cache_drain(zone); 2767 2768 rw_wlock(&uma_rwlock); 2769 LIST_REMOVE(zone, uz_link); 2770 rw_wunlock(&uma_rwlock); 2771 if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { 2772 keg = zone->uz_keg; 2773 keg->uk_reserve = 0; 2774 } 2775 zone_reclaim(zone, M_WAITOK, true); 2776 2777 /* 2778 * We only destroy kegs from non secondary/non cache zones. 2779 */ 2780 if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) { 2781 keg = zone->uz_keg; 2782 rw_wlock(&uma_rwlock); 2783 LIST_REMOVE(keg, uk_link); 2784 rw_wunlock(&uma_rwlock); 2785 zone_free_item(kegs, keg, NULL, SKIP_NONE); 2786 } 2787 counter_u64_free(zone->uz_allocs); 2788 counter_u64_free(zone->uz_frees); 2789 counter_u64_free(zone->uz_fails); 2790 counter_u64_free(zone->uz_xdomain); 2791 free(zone->uz_ctlname, M_UMA); 2792 for (i = 0; i < vm_ndomains; i++) 2793 ZDOM_LOCK_FINI(ZDOM_GET(zone, i)); 2794 ZONE_CROSS_LOCK_FINI(zone); 2795 } 2796 2797 static void 2798 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2799 { 2800 uma_keg_t keg; 2801 uma_zone_t zone; 2802 2803 LIST_FOREACH(keg, &uma_kegs, uk_link) { 2804 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 2805 zfunc(zone, arg); 2806 } 2807 LIST_FOREACH(zone, &uma_cachezones, uz_link) 2808 zfunc(zone, arg); 2809 } 2810 2811 /* 2812 * Traverses every zone in the system and calls a callback 2813 * 2814 * Arguments: 2815 * zfunc A pointer to a function which accepts a zone 2816 * as an argument. 2817 * 2818 * Returns: 2819 * Nothing 2820 */ 2821 static void 2822 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg) 2823 { 2824 2825 rw_rlock(&uma_rwlock); 2826 zone_foreach_unlocked(zfunc, arg); 2827 rw_runlock(&uma_rwlock); 2828 } 2829 2830 /* 2831 * Initialize the kernel memory allocator. This is done after pages can be 2832 * allocated but before general KVA is available. 2833 */ 2834 void 2835 uma_startup1(vm_offset_t virtual_avail) 2836 { 2837 struct uma_zctor_args args; 2838 size_t ksize, zsize, size; 2839 uma_keg_t primarykeg; 2840 uintptr_t m; 2841 int domain; 2842 uint8_t pflag; 2843 2844 bootstart = bootmem = virtual_avail; 2845 2846 rw_init(&uma_rwlock, "UMA lock"); 2847 sx_init(&uma_reclaim_lock, "umareclaim"); 2848 2849 ksize = sizeof(struct uma_keg) + 2850 (sizeof(struct uma_domain) * vm_ndomains); 2851 ksize = roundup(ksize, UMA_SUPER_ALIGN); 2852 zsize = sizeof(struct uma_zone) + 2853 (sizeof(struct uma_cache) * (mp_maxid + 1)) + 2854 (sizeof(struct uma_zone_domain) * vm_ndomains); 2855 zsize = roundup(zsize, UMA_SUPER_ALIGN); 2856 2857 /* Allocate the zone of zones, zone of kegs, and zone of zones keg. */ 2858 size = (zsize * 2) + ksize; 2859 for (domain = 0; domain < vm_ndomains; domain++) { 2860 m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag, 2861 M_NOWAIT | M_ZERO); 2862 if (m != 0) 2863 break; 2864 } 2865 zones = (uma_zone_t)m; 2866 m += zsize; 2867 kegs = (uma_zone_t)m; 2868 m += zsize; 2869 primarykeg = (uma_keg_t)m; 2870 2871 /* "manually" create the initial zone */ 2872 memset(&args, 0, sizeof(args)); 2873 args.name = "UMA Kegs"; 2874 args.size = ksize; 2875 args.ctor = keg_ctor; 2876 args.dtor = keg_dtor; 2877 args.uminit = zero_init; 2878 args.fini = NULL; 2879 args.keg = primarykeg; 2880 args.align = UMA_SUPER_ALIGN - 1; 2881 args.flags = UMA_ZFLAG_INTERNAL; 2882 zone_ctor(kegs, zsize, &args, M_WAITOK); 2883 2884 args.name = "UMA Zones"; 2885 args.size = zsize; 2886 args.ctor = zone_ctor; 2887 args.dtor = zone_dtor; 2888 args.uminit = zero_init; 2889 args.fini = NULL; 2890 args.keg = NULL; 2891 args.align = UMA_SUPER_ALIGN - 1; 2892 args.flags = UMA_ZFLAG_INTERNAL; 2893 zone_ctor(zones, zsize, &args, M_WAITOK); 2894 2895 /* Now make zones for slab headers */ 2896 slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE, 2897 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2898 slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE, 2899 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2900 2901 hashzone = uma_zcreate("UMA Hash", 2902 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 2903 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2904 2905 bucket_init(); 2906 smr_init(); 2907 } 2908 2909 #ifndef UMA_MD_SMALL_ALLOC 2910 extern void vm_radix_reserve_kva(void); 2911 #endif 2912 2913 /* 2914 * Advertise the availability of normal kva allocations and switch to 2915 * the default back-end allocator. Marks the KVA we consumed on startup 2916 * as used in the map. 2917 */ 2918 void 2919 uma_startup2(void) 2920 { 2921 2922 if (bootstart != bootmem) { 2923 vm_map_lock(kernel_map); 2924 (void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem, 2925 VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); 2926 vm_map_unlock(kernel_map); 2927 } 2928 2929 #ifndef UMA_MD_SMALL_ALLOC 2930 /* Set up radix zone to use noobj_alloc. */ 2931 vm_radix_reserve_kva(); 2932 #endif 2933 2934 booted = BOOT_KVA; 2935 zone_foreach_unlocked(zone_kva_available, NULL); 2936 bucket_enable(); 2937 } 2938 2939 /* 2940 * Allocate counters as early as possible so that boot-time allocations are 2941 * accounted more precisely. 2942 */ 2943 static void 2944 uma_startup_pcpu(void *arg __unused) 2945 { 2946 2947 zone_foreach_unlocked(zone_alloc_counters, NULL); 2948 booted = BOOT_PCPU; 2949 } 2950 SYSINIT(uma_startup_pcpu, SI_SUB_COUNTER, SI_ORDER_ANY, uma_startup_pcpu, NULL); 2951 2952 /* 2953 * Finish our initialization steps. 2954 */ 2955 static void 2956 uma_startup3(void *arg __unused) 2957 { 2958 2959 #ifdef INVARIANTS 2960 TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); 2961 uma_dbg_cnt = counter_u64_alloc(M_WAITOK); 2962 uma_skip_cnt = counter_u64_alloc(M_WAITOK); 2963 #endif 2964 zone_foreach_unlocked(zone_alloc_sysctl, NULL); 2965 callout_init(&uma_callout, 1); 2966 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 2967 booted = BOOT_RUNNING; 2968 2969 EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL, 2970 EVENTHANDLER_PRI_FIRST); 2971 } 2972 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 2973 2974 static void 2975 uma_shutdown(void) 2976 { 2977 2978 booted = BOOT_SHUTDOWN; 2979 } 2980 2981 static uma_keg_t 2982 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 2983 int align, uint32_t flags) 2984 { 2985 struct uma_kctor_args args; 2986 2987 args.size = size; 2988 args.uminit = uminit; 2989 args.fini = fini; 2990 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 2991 args.flags = flags; 2992 args.zone = zone; 2993 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); 2994 } 2995 2996 /* Public functions */ 2997 /* See uma.h */ 2998 void 2999 uma_set_align(int align) 3000 { 3001 3002 if (align != UMA_ALIGN_CACHE) 3003 uma_align_cache = align; 3004 } 3005 3006 /* See uma.h */ 3007 uma_zone_t 3008 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 3009 uma_init uminit, uma_fini fini, int align, uint32_t flags) 3010 3011 { 3012 struct uma_zctor_args args; 3013 uma_zone_t res; 3014 3015 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 3016 align, name)); 3017 3018 /* This stuff is essential for the zone ctor */ 3019 memset(&args, 0, sizeof(args)); 3020 args.name = name; 3021 args.size = size; 3022 args.ctor = ctor; 3023 args.dtor = dtor; 3024 args.uminit = uminit; 3025 args.fini = fini; 3026 #ifdef INVARIANTS 3027 /* 3028 * Inject procedures which check for memory use after free if we are 3029 * allowed to scramble the memory while it is not allocated. This 3030 * requires that: UMA is actually able to access the memory, no init 3031 * or fini procedures, no dependency on the initial value of the 3032 * memory, and no (legitimate) use of the memory after free. Note, 3033 * the ctor and dtor do not need to be empty. 3034 */ 3035 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH | 3036 UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) { 3037 args.uminit = trash_init; 3038 args.fini = trash_fini; 3039 } 3040 #endif 3041 args.align = align; 3042 args.flags = flags; 3043 args.keg = NULL; 3044 3045 sx_slock(&uma_reclaim_lock); 3046 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 3047 sx_sunlock(&uma_reclaim_lock); 3048 3049 return (res); 3050 } 3051 3052 /* See uma.h */ 3053 uma_zone_t 3054 uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor, 3055 uma_init zinit, uma_fini zfini, uma_zone_t primary) 3056 { 3057 struct uma_zctor_args args; 3058 uma_keg_t keg; 3059 uma_zone_t res; 3060 3061 keg = primary->uz_keg; 3062 memset(&args, 0, sizeof(args)); 3063 args.name = name; 3064 args.size = keg->uk_size; 3065 args.ctor = ctor; 3066 args.dtor = dtor; 3067 args.uminit = zinit; 3068 args.fini = zfini; 3069 args.align = keg->uk_align; 3070 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 3071 args.keg = keg; 3072 3073 sx_slock(&uma_reclaim_lock); 3074 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 3075 sx_sunlock(&uma_reclaim_lock); 3076 3077 return (res); 3078 } 3079 3080 /* See uma.h */ 3081 uma_zone_t 3082 uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor, 3083 uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, 3084 void *arg, int flags) 3085 { 3086 struct uma_zctor_args args; 3087 3088 memset(&args, 0, sizeof(args)); 3089 args.name = name; 3090 args.size = size; 3091 args.ctor = ctor; 3092 args.dtor = dtor; 3093 args.uminit = zinit; 3094 args.fini = zfini; 3095 args.import = zimport; 3096 args.release = zrelease; 3097 args.arg = arg; 3098 args.align = 0; 3099 args.flags = flags | UMA_ZFLAG_CACHE; 3100 3101 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); 3102 } 3103 3104 /* See uma.h */ 3105 void 3106 uma_zdestroy(uma_zone_t zone) 3107 { 3108 3109 /* 3110 * Large slabs are expensive to reclaim, so don't bother doing 3111 * unnecessary work if we're shutting down. 3112 */ 3113 if (booted == BOOT_SHUTDOWN && 3114 zone->uz_fini == NULL && zone->uz_release == zone_release) 3115 return; 3116 sx_slock(&uma_reclaim_lock); 3117 zone_free_item(zones, zone, NULL, SKIP_NONE); 3118 sx_sunlock(&uma_reclaim_lock); 3119 } 3120 3121 void 3122 uma_zwait(uma_zone_t zone) 3123 { 3124 3125 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 3126 uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK)); 3127 else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0) 3128 uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK)); 3129 else 3130 uma_zfree(zone, uma_zalloc(zone, M_WAITOK)); 3131 } 3132 3133 void * 3134 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) 3135 { 3136 void *item, *pcpu_item; 3137 #ifdef SMP 3138 int i; 3139 3140 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 3141 #endif 3142 item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); 3143 if (item == NULL) 3144 return (NULL); 3145 pcpu_item = zpcpu_base_to_offset(item); 3146 if (flags & M_ZERO) { 3147 #ifdef SMP 3148 for (i = 0; i <= mp_maxid; i++) 3149 bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size); 3150 #else 3151 bzero(item, zone->uz_size); 3152 #endif 3153 } 3154 return (pcpu_item); 3155 } 3156 3157 /* 3158 * A stub while both regular and pcpu cases are identical. 3159 */ 3160 void 3161 uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata) 3162 { 3163 void *item; 3164 3165 #ifdef SMP 3166 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 3167 #endif 3168 item = zpcpu_offset_to_base(pcpu_item); 3169 uma_zfree_arg(zone, item, udata); 3170 } 3171 3172 static inline void * 3173 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags, 3174 void *item) 3175 { 3176 #ifdef INVARIANTS 3177 bool skipdbg; 3178 3179 skipdbg = uma_dbg_zskip(zone, item); 3180 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 3181 zone->uz_ctor != trash_ctor) 3182 trash_ctor(item, size, udata, flags); 3183 #endif 3184 /* Check flags before loading ctor pointer. */ 3185 if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) && 3186 __predict_false(zone->uz_ctor != NULL) && 3187 zone->uz_ctor(item, size, udata, flags) != 0) { 3188 counter_u64_add(zone->uz_fails, 1); 3189 zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT); 3190 return (NULL); 3191 } 3192 #ifdef INVARIANTS 3193 if (!skipdbg) 3194 uma_dbg_alloc(zone, NULL, item); 3195 #endif 3196 if (__predict_false(flags & M_ZERO)) 3197 return (memset(item, 0, size)); 3198 3199 return (item); 3200 } 3201 3202 static inline void 3203 item_dtor(uma_zone_t zone, void *item, int size, void *udata, 3204 enum zfreeskip skip) 3205 { 3206 #ifdef INVARIANTS 3207 bool skipdbg; 3208 3209 skipdbg = uma_dbg_zskip(zone, item); 3210 if (skip == SKIP_NONE && !skipdbg) { 3211 if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0) 3212 uma_dbg_free(zone, udata, item); 3213 else 3214 uma_dbg_free(zone, NULL, item); 3215 } 3216 #endif 3217 if (__predict_true(skip < SKIP_DTOR)) { 3218 if (zone->uz_dtor != NULL) 3219 zone->uz_dtor(item, size, udata); 3220 #ifdef INVARIANTS 3221 if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 && 3222 zone->uz_dtor != trash_dtor) 3223 trash_dtor(item, size, udata); 3224 #endif 3225 } 3226 } 3227 3228 #ifdef NUMA 3229 static int 3230 item_domain(void *item) 3231 { 3232 int domain; 3233 3234 domain = vm_phys_domain(vtophys(item)); 3235 KASSERT(domain >= 0 && domain < vm_ndomains, 3236 ("%s: unknown domain for item %p", __func__, item)); 3237 return (domain); 3238 } 3239 #endif 3240 3241 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS) 3242 #define UMA_ZALLOC_DEBUG 3243 static int 3244 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags) 3245 { 3246 int error; 3247 3248 error = 0; 3249 #ifdef WITNESS 3250 if (flags & M_WAITOK) { 3251 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3252 "uma_zalloc_debug: zone \"%s\"", zone->uz_name); 3253 } 3254 #endif 3255 3256 #ifdef INVARIANTS 3257 KASSERT((flags & M_EXEC) == 0, 3258 ("uma_zalloc_debug: called with M_EXEC")); 3259 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3260 ("uma_zalloc_debug: called within spinlock or critical section")); 3261 KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0, 3262 ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO")); 3263 #endif 3264 3265 #ifdef DEBUG_MEMGUARD 3266 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) { 3267 void *item; 3268 item = memguard_alloc(zone->uz_size, flags); 3269 if (item != NULL) { 3270 error = EJUSTRETURN; 3271 if (zone->uz_init != NULL && 3272 zone->uz_init(item, zone->uz_size, flags) != 0) { 3273 *itemp = NULL; 3274 return (error); 3275 } 3276 if (zone->uz_ctor != NULL && 3277 zone->uz_ctor(item, zone->uz_size, udata, 3278 flags) != 0) { 3279 counter_u64_add(zone->uz_fails, 1); 3280 zone->uz_fini(item, zone->uz_size); 3281 *itemp = NULL; 3282 return (error); 3283 } 3284 *itemp = item; 3285 return (error); 3286 } 3287 /* This is unfortunate but should not be fatal. */ 3288 } 3289 #endif 3290 return (error); 3291 } 3292 3293 static int 3294 uma_zfree_debug(uma_zone_t zone, void *item, void *udata) 3295 { 3296 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3297 ("uma_zfree_debug: called with spinlock or critical section held")); 3298 3299 #ifdef DEBUG_MEMGUARD 3300 if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) { 3301 if (zone->uz_dtor != NULL) 3302 zone->uz_dtor(item, zone->uz_size, udata); 3303 if (zone->uz_fini != NULL) 3304 zone->uz_fini(item, zone->uz_size); 3305 memguard_free(item); 3306 return (EJUSTRETURN); 3307 } 3308 #endif 3309 return (0); 3310 } 3311 #endif 3312 3313 static inline void * 3314 cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket, 3315 void *udata, int flags) 3316 { 3317 void *item; 3318 int size, uz_flags; 3319 3320 item = cache_bucket_pop(cache, bucket); 3321 size = cache_uz_size(cache); 3322 uz_flags = cache_uz_flags(cache); 3323 critical_exit(); 3324 return (item_ctor(zone, uz_flags, size, udata, flags, item)); 3325 } 3326 3327 static __noinline void * 3328 cache_alloc_retry(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) 3329 { 3330 uma_cache_bucket_t bucket; 3331 int domain; 3332 3333 while (cache_alloc(zone, cache, udata, flags)) { 3334 cache = &zone->uz_cpu[curcpu]; 3335 bucket = &cache->uc_allocbucket; 3336 if (__predict_false(bucket->ucb_cnt == 0)) 3337 continue; 3338 return (cache_alloc_item(zone, cache, bucket, udata, flags)); 3339 } 3340 critical_exit(); 3341 3342 /* 3343 * We can not get a bucket so try to return a single item. 3344 */ 3345 if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) 3346 domain = PCPU_GET(domain); 3347 else 3348 domain = UMA_ANYDOMAIN; 3349 return (zone_alloc_item(zone, udata, domain, flags)); 3350 } 3351 3352 /* See uma.h */ 3353 void * 3354 uma_zalloc_smr(uma_zone_t zone, int flags) 3355 { 3356 uma_cache_bucket_t bucket; 3357 uma_cache_t cache; 3358 3359 #ifdef UMA_ZALLOC_DEBUG 3360 void *item; 3361 3362 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 3363 ("uma_zalloc_arg: called with non-SMR zone.")); 3364 if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN) 3365 return (item); 3366 #endif 3367 3368 critical_enter(); 3369 cache = &zone->uz_cpu[curcpu]; 3370 bucket = &cache->uc_allocbucket; 3371 if (__predict_false(bucket->ucb_cnt == 0)) 3372 return (cache_alloc_retry(zone, cache, NULL, flags)); 3373 return (cache_alloc_item(zone, cache, bucket, NULL, flags)); 3374 } 3375 3376 /* See uma.h */ 3377 void * 3378 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 3379 { 3380 uma_cache_bucket_t bucket; 3381 uma_cache_t cache; 3382 3383 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3384 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3385 3386 /* This is the fast path allocation */ 3387 CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name, 3388 zone, flags); 3389 3390 #ifdef UMA_ZALLOC_DEBUG 3391 void *item; 3392 3393 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3394 ("uma_zalloc_arg: called with SMR zone.")); 3395 if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN) 3396 return (item); 3397 #endif 3398 3399 /* 3400 * If possible, allocate from the per-CPU cache. There are two 3401 * requirements for safe access to the per-CPU cache: (1) the thread 3402 * accessing the cache must not be preempted or yield during access, 3403 * and (2) the thread must not migrate CPUs without switching which 3404 * cache it accesses. We rely on a critical section to prevent 3405 * preemption and migration. We release the critical section in 3406 * order to acquire the zone mutex if we are unable to allocate from 3407 * the current cache; when we re-acquire the critical section, we 3408 * must detect and handle migration if it has occurred. 3409 */ 3410 critical_enter(); 3411 cache = &zone->uz_cpu[curcpu]; 3412 bucket = &cache->uc_allocbucket; 3413 if (__predict_false(bucket->ucb_cnt == 0)) 3414 return (cache_alloc_retry(zone, cache, udata, flags)); 3415 return (cache_alloc_item(zone, cache, bucket, udata, flags)); 3416 } 3417 3418 /* 3419 * Replenish an alloc bucket and possibly restore an old one. Called in 3420 * a critical section. Returns in a critical section. 3421 * 3422 * A false return value indicates an allocation failure. 3423 * A true return value indicates success and the caller should retry. 3424 */ 3425 static __noinline bool 3426 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) 3427 { 3428 uma_bucket_t bucket; 3429 int curdomain, domain; 3430 bool new; 3431 3432 CRITICAL_ASSERT(curthread); 3433 3434 /* 3435 * If we have run out of items in our alloc bucket see 3436 * if we can switch with the free bucket. 3437 * 3438 * SMR Zones can't re-use the free bucket until the sequence has 3439 * expired. 3440 */ 3441 if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 && 3442 cache->uc_freebucket.ucb_cnt != 0) { 3443 cache_bucket_swap(&cache->uc_freebucket, 3444 &cache->uc_allocbucket); 3445 return (true); 3446 } 3447 3448 /* 3449 * Discard any empty allocation bucket while we hold no locks. 3450 */ 3451 bucket = cache_bucket_unload_alloc(cache); 3452 critical_exit(); 3453 3454 if (bucket != NULL) { 3455 KASSERT(bucket->ub_cnt == 0, 3456 ("cache_alloc: Entered with non-empty alloc bucket.")); 3457 bucket_free(zone, bucket, udata); 3458 } 3459 3460 /* 3461 * Attempt to retrieve the item from the per-CPU cache has failed, so 3462 * we must go back to the zone. This requires the zdom lock, so we 3463 * must drop the critical section, then re-acquire it when we go back 3464 * to the cache. Since the critical section is released, we may be 3465 * preempted or migrate. As such, make sure not to maintain any 3466 * thread-local state specific to the cache from prior to releasing 3467 * the critical section. 3468 */ 3469 domain = PCPU_GET(domain); 3470 if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0 || 3471 VM_DOMAIN_EMPTY(domain)) 3472 domain = zone_domain_highest(zone, domain); 3473 bucket = cache_fetch_bucket(zone, cache, domain); 3474 if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) { 3475 bucket = zone_alloc_bucket(zone, udata, domain, flags); 3476 new = true; 3477 } else { 3478 new = false; 3479 } 3480 3481 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 3482 zone->uz_name, zone, bucket); 3483 if (bucket == NULL) { 3484 critical_enter(); 3485 return (false); 3486 } 3487 3488 /* 3489 * See if we lost the race or were migrated. Cache the 3490 * initialized bucket to make this less likely or claim 3491 * the memory directly. 3492 */ 3493 critical_enter(); 3494 cache = &zone->uz_cpu[curcpu]; 3495 if (cache->uc_allocbucket.ucb_bucket == NULL && 3496 ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 || 3497 (curdomain = PCPU_GET(domain)) == domain || 3498 VM_DOMAIN_EMPTY(curdomain))) { 3499 if (new) 3500 atomic_add_long(&ZDOM_GET(zone, domain)->uzd_imax, 3501 bucket->ub_cnt); 3502 cache_bucket_load_alloc(cache, bucket); 3503 return (true); 3504 } 3505 3506 /* 3507 * We lost the race, release this bucket and start over. 3508 */ 3509 critical_exit(); 3510 zone_put_bucket(zone, domain, bucket, udata, false); 3511 critical_enter(); 3512 3513 return (true); 3514 } 3515 3516 void * 3517 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) 3518 { 3519 #ifdef NUMA 3520 uma_bucket_t bucket; 3521 uma_zone_domain_t zdom; 3522 void *item; 3523 #endif 3524 3525 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3526 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 3527 3528 /* This is the fast path allocation */ 3529 CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d", 3530 zone->uz_name, zone, domain, flags); 3531 3532 if (flags & M_WAITOK) { 3533 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3534 "uma_zalloc_domain: zone \"%s\"", zone->uz_name); 3535 } 3536 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3537 ("uma_zalloc_domain: called with spinlock or critical section held")); 3538 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 3539 ("uma_zalloc_domain: called with SMR zone.")); 3540 #ifdef NUMA 3541 KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0, 3542 ("uma_zalloc_domain: called with non-FIRSTTOUCH zone.")); 3543 3544 if (vm_ndomains == 1) 3545 return (uma_zalloc_arg(zone, udata, flags)); 3546 3547 /* 3548 * Try to allocate from the bucket cache before falling back to the keg. 3549 * We could try harder and attempt to allocate from per-CPU caches or 3550 * the per-domain cross-domain buckets, but the complexity is probably 3551 * not worth it. It is more important that frees of previous 3552 * cross-domain allocations do not blow up the cache. 3553 */ 3554 zdom = zone_domain_lock(zone, domain); 3555 if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) { 3556 item = bucket->ub_bucket[bucket->ub_cnt - 1]; 3557 #ifdef INVARIANTS 3558 bucket->ub_bucket[bucket->ub_cnt - 1] = NULL; 3559 #endif 3560 bucket->ub_cnt--; 3561 zone_put_bucket(zone, domain, bucket, udata, true); 3562 item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, 3563 flags, item); 3564 if (item != NULL) { 3565 KASSERT(item_domain(item) == domain, 3566 ("%s: bucket cache item %p from wrong domain", 3567 __func__, item)); 3568 counter_u64_add(zone->uz_allocs, 1); 3569 } 3570 return (item); 3571 } 3572 ZDOM_UNLOCK(zdom); 3573 return (zone_alloc_item(zone, udata, domain, flags)); 3574 #else 3575 return (uma_zalloc_arg(zone, udata, flags)); 3576 #endif 3577 } 3578 3579 /* 3580 * Find a slab with some space. Prefer slabs that are partially used over those 3581 * that are totally full. This helps to reduce fragmentation. 3582 * 3583 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check 3584 * only 'domain'. 3585 */ 3586 static uma_slab_t 3587 keg_first_slab(uma_keg_t keg, int domain, bool rr) 3588 { 3589 uma_domain_t dom; 3590 uma_slab_t slab; 3591 int start; 3592 3593 KASSERT(domain >= 0 && domain < vm_ndomains, 3594 ("keg_first_slab: domain %d out of range", domain)); 3595 KEG_LOCK_ASSERT(keg, domain); 3596 3597 slab = NULL; 3598 start = domain; 3599 do { 3600 dom = &keg->uk_domain[domain]; 3601 if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL) 3602 return (slab); 3603 if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) { 3604 LIST_REMOVE(slab, us_link); 3605 dom->ud_free_slabs--; 3606 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 3607 return (slab); 3608 } 3609 if (rr) 3610 domain = (domain + 1) % vm_ndomains; 3611 } while (domain != start); 3612 3613 return (NULL); 3614 } 3615 3616 /* 3617 * Fetch an existing slab from a free or partial list. Returns with the 3618 * keg domain lock held if a slab was found or unlocked if not. 3619 */ 3620 static uma_slab_t 3621 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) 3622 { 3623 uma_slab_t slab; 3624 uint32_t reserve; 3625 3626 /* HASH has a single free list. */ 3627 if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) 3628 domain = 0; 3629 3630 KEG_LOCK(keg, domain); 3631 reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; 3632 if (keg->uk_domain[domain].ud_free_items <= reserve || 3633 (slab = keg_first_slab(keg, domain, rr)) == NULL) { 3634 KEG_UNLOCK(keg, domain); 3635 return (NULL); 3636 } 3637 return (slab); 3638 } 3639 3640 static uma_slab_t 3641 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) 3642 { 3643 struct vm_domainset_iter di; 3644 uma_slab_t slab; 3645 int aflags, domain; 3646 bool rr; 3647 3648 restart: 3649 /* 3650 * Use the keg's policy if upper layers haven't already specified a 3651 * domain (as happens with first-touch zones). 3652 * 3653 * To avoid races we run the iterator with the keg lock held, but that 3654 * means that we cannot allow the vm_domainset layer to sleep. Thus, 3655 * clear M_WAITOK and handle low memory conditions locally. 3656 */ 3657 rr = rdomain == UMA_ANYDOMAIN; 3658 if (rr) { 3659 aflags = (flags & ~M_WAITOK) | M_NOWAIT; 3660 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 3661 &aflags); 3662 } else { 3663 aflags = flags; 3664 domain = rdomain; 3665 } 3666 3667 for (;;) { 3668 slab = keg_fetch_free_slab(keg, domain, rr, flags); 3669 if (slab != NULL) 3670 return (slab); 3671 3672 /* 3673 * M_NOVM means don't ask at all! 3674 */ 3675 if (flags & M_NOVM) 3676 break; 3677 3678 slab = keg_alloc_slab(keg, zone, domain, flags, aflags); 3679 if (slab != NULL) 3680 return (slab); 3681 if (!rr && (flags & M_WAITOK) == 0) 3682 break; 3683 if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { 3684 if ((flags & M_WAITOK) != 0) { 3685 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); 3686 goto restart; 3687 } 3688 break; 3689 } 3690 } 3691 3692 /* 3693 * We might not have been able to get a slab but another cpu 3694 * could have while we were unlocked. Check again before we 3695 * fail. 3696 */ 3697 if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) 3698 return (slab); 3699 3700 return (NULL); 3701 } 3702 3703 static void * 3704 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 3705 { 3706 uma_domain_t dom; 3707 void *item; 3708 int freei; 3709 3710 KEG_LOCK_ASSERT(keg, slab->us_domain); 3711 3712 dom = &keg->uk_domain[slab->us_domain]; 3713 freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1; 3714 BIT_CLR(keg->uk_ipers, freei, &slab->us_free); 3715 item = slab_item(slab, keg, freei); 3716 slab->us_freecount--; 3717 dom->ud_free_items--; 3718 3719 /* 3720 * Move this slab to the full list. It must be on the partial list, so 3721 * we do not need to update the free slab count. In particular, 3722 * keg_fetch_slab() always returns slabs on the partial list. 3723 */ 3724 if (slab->us_freecount == 0) { 3725 LIST_REMOVE(slab, us_link); 3726 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); 3727 } 3728 3729 return (item); 3730 } 3731 3732 static int 3733 zone_import(void *arg, void **bucket, int max, int domain, int flags) 3734 { 3735 uma_domain_t dom; 3736 uma_zone_t zone; 3737 uma_slab_t slab; 3738 uma_keg_t keg; 3739 #ifdef NUMA 3740 int stripe; 3741 #endif 3742 int i; 3743 3744 zone = arg; 3745 slab = NULL; 3746 keg = zone->uz_keg; 3747 /* Try to keep the buckets totally full */ 3748 for (i = 0; i < max; ) { 3749 if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL) 3750 break; 3751 #ifdef NUMA 3752 stripe = howmany(max, vm_ndomains); 3753 #endif 3754 dom = &keg->uk_domain[slab->us_domain]; 3755 do { 3756 bucket[i++] = slab_alloc_item(keg, slab); 3757 if (dom->ud_free_items <= keg->uk_reserve) { 3758 /* 3759 * Avoid depleting the reserve after a 3760 * successful item allocation, even if 3761 * M_USE_RESERVE is specified. 3762 */ 3763 KEG_UNLOCK(keg, slab->us_domain); 3764 goto out; 3765 } 3766 #ifdef NUMA 3767 /* 3768 * If the zone is striped we pick a new slab for every 3769 * N allocations. Eliminating this conditional will 3770 * instead pick a new domain for each bucket rather 3771 * than stripe within each bucket. The current option 3772 * produces more fragmentation and requires more cpu 3773 * time but yields better distribution. 3774 */ 3775 if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 && 3776 vm_ndomains > 1 && --stripe == 0) 3777 break; 3778 #endif 3779 } while (slab->us_freecount != 0 && i < max); 3780 KEG_UNLOCK(keg, slab->us_domain); 3781 3782 /* Don't block if we allocated any successfully. */ 3783 flags &= ~M_WAITOK; 3784 flags |= M_NOWAIT; 3785 } 3786 out: 3787 return i; 3788 } 3789 3790 static int 3791 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags) 3792 { 3793 uint64_t old, new, total, max; 3794 3795 /* 3796 * The hard case. We're going to sleep because there were existing 3797 * sleepers or because we ran out of items. This routine enforces 3798 * fairness by keeping fifo order. 3799 * 3800 * First release our ill gotten gains and make some noise. 3801 */ 3802 for (;;) { 3803 zone_free_limit(zone, count); 3804 zone_log_warning(zone); 3805 zone_maxaction(zone); 3806 if (flags & M_NOWAIT) 3807 return (0); 3808 3809 /* 3810 * We need to allocate an item or set ourself as a sleeper 3811 * while the sleepq lock is held to avoid wakeup races. This 3812 * is essentially a home rolled semaphore. 3813 */ 3814 sleepq_lock(&zone->uz_max_items); 3815 old = zone->uz_items; 3816 do { 3817 MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX); 3818 /* Cache the max since we will evaluate twice. */ 3819 max = zone->uz_max_items; 3820 if (UZ_ITEMS_SLEEPERS(old) != 0 || 3821 UZ_ITEMS_COUNT(old) >= max) 3822 new = old + UZ_ITEMS_SLEEPER; 3823 else 3824 new = old + MIN(count, max - old); 3825 } while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0); 3826 3827 /* We may have successfully allocated under the sleepq lock. */ 3828 if (UZ_ITEMS_SLEEPERS(new) == 0) { 3829 sleepq_release(&zone->uz_max_items); 3830 return (new - old); 3831 } 3832 3833 /* 3834 * This is in a different cacheline from uz_items so that we 3835 * don't constantly invalidate the fastpath cacheline when we 3836 * adjust item counts. This could be limited to toggling on 3837 * transitions. 3838 */ 3839 atomic_add_32(&zone->uz_sleepers, 1); 3840 atomic_add_64(&zone->uz_sleeps, 1); 3841 3842 /* 3843 * We have added ourselves as a sleeper. The sleepq lock 3844 * protects us from wakeup races. Sleep now and then retry. 3845 */ 3846 sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0); 3847 sleepq_wait(&zone->uz_max_items, PVM); 3848 3849 /* 3850 * After wakeup, remove ourselves as a sleeper and try 3851 * again. We no longer have the sleepq lock for protection. 3852 * 3853 * Subract ourselves as a sleeper while attempting to add 3854 * our count. 3855 */ 3856 atomic_subtract_32(&zone->uz_sleepers, 1); 3857 old = atomic_fetchadd_64(&zone->uz_items, 3858 -(UZ_ITEMS_SLEEPER - count)); 3859 /* We're no longer a sleeper. */ 3860 old -= UZ_ITEMS_SLEEPER; 3861 3862 /* 3863 * If we're still at the limit, restart. Notably do not 3864 * block on other sleepers. Cache the max value to protect 3865 * against changes via sysctl. 3866 */ 3867 total = UZ_ITEMS_COUNT(old); 3868 max = zone->uz_max_items; 3869 if (total >= max) 3870 continue; 3871 /* Truncate if necessary, otherwise wake other sleepers. */ 3872 if (total + count > max) { 3873 zone_free_limit(zone, total + count - max); 3874 count = max - total; 3875 } else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0) 3876 wakeup_one(&zone->uz_max_items); 3877 3878 return (count); 3879 } 3880 } 3881 3882 /* 3883 * Allocate 'count' items from our max_items limit. Returns the number 3884 * available. If M_NOWAIT is not specified it will sleep until at least 3885 * one item can be allocated. 3886 */ 3887 static int 3888 zone_alloc_limit(uma_zone_t zone, int count, int flags) 3889 { 3890 uint64_t old; 3891 uint64_t max; 3892 3893 max = zone->uz_max_items; 3894 MPASS(max > 0); 3895 3896 /* 3897 * We expect normal allocations to succeed with a simple 3898 * fetchadd. 3899 */ 3900 old = atomic_fetchadd_64(&zone->uz_items, count); 3901 if (__predict_true(old + count <= max)) 3902 return (count); 3903 3904 /* 3905 * If we had some items and no sleepers just return the 3906 * truncated value. We have to release the excess space 3907 * though because that may wake sleepers who weren't woken 3908 * because we were temporarily over the limit. 3909 */ 3910 if (old < max) { 3911 zone_free_limit(zone, (old + count) - max); 3912 return (max - old); 3913 } 3914 return (zone_alloc_limit_hard(zone, count, flags)); 3915 } 3916 3917 /* 3918 * Free a number of items back to the limit. 3919 */ 3920 static void 3921 zone_free_limit(uma_zone_t zone, int count) 3922 { 3923 uint64_t old; 3924 3925 MPASS(count > 0); 3926 3927 /* 3928 * In the common case we either have no sleepers or 3929 * are still over the limit and can just return. 3930 */ 3931 old = atomic_fetchadd_64(&zone->uz_items, -count); 3932 if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 || 3933 UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items)) 3934 return; 3935 3936 /* 3937 * Moderate the rate of wakeups. Sleepers will continue 3938 * to generate wakeups if necessary. 3939 */ 3940 wakeup_one(&zone->uz_max_items); 3941 } 3942 3943 static uma_bucket_t 3944 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) 3945 { 3946 uma_bucket_t bucket; 3947 int maxbucket, cnt; 3948 3949 CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name, 3950 zone, domain); 3951 3952 /* Avoid allocs targeting empty domains. */ 3953 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 3954 domain = UMA_ANYDOMAIN; 3955 else if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) 3956 domain = UMA_ANYDOMAIN; 3957 3958 if (zone->uz_max_items > 0) 3959 maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size, 3960 M_NOWAIT); 3961 else 3962 maxbucket = zone->uz_bucket_size; 3963 if (maxbucket == 0) 3964 return (false); 3965 3966 /* Don't wait for buckets, preserve caller's NOVM setting. */ 3967 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 3968 if (bucket == NULL) { 3969 cnt = 0; 3970 goto out; 3971 } 3972 3973 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 3974 MIN(maxbucket, bucket->ub_entries), domain, flags); 3975 3976 /* 3977 * Initialize the memory if necessary. 3978 */ 3979 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 3980 int i; 3981 3982 for (i = 0; i < bucket->ub_cnt; i++) 3983 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 3984 flags) != 0) 3985 break; 3986 /* 3987 * If we couldn't initialize the whole bucket, put the 3988 * rest back onto the freelist. 3989 */ 3990 if (i != bucket->ub_cnt) { 3991 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 3992 bucket->ub_cnt - i); 3993 #ifdef INVARIANTS 3994 bzero(&bucket->ub_bucket[i], 3995 sizeof(void *) * (bucket->ub_cnt - i)); 3996 #endif 3997 bucket->ub_cnt = i; 3998 } 3999 } 4000 4001 cnt = bucket->ub_cnt; 4002 if (bucket->ub_cnt == 0) { 4003 bucket_free(zone, bucket, udata); 4004 counter_u64_add(zone->uz_fails, 1); 4005 bucket = NULL; 4006 } 4007 out: 4008 if (zone->uz_max_items > 0 && cnt < maxbucket) 4009 zone_free_limit(zone, maxbucket - cnt); 4010 4011 return (bucket); 4012 } 4013 4014 /* 4015 * Allocates a single item from a zone. 4016 * 4017 * Arguments 4018 * zone The zone to alloc for. 4019 * udata The data to be passed to the constructor. 4020 * domain The domain to allocate from or UMA_ANYDOMAIN. 4021 * flags M_WAITOK, M_NOWAIT, M_ZERO. 4022 * 4023 * Returns 4024 * NULL if there is no memory and M_NOWAIT is set 4025 * An item if successful 4026 */ 4027 4028 static void * 4029 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) 4030 { 4031 void *item; 4032 4033 if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) { 4034 counter_u64_add(zone->uz_fails, 1); 4035 return (NULL); 4036 } 4037 4038 /* Avoid allocs targeting empty domains. */ 4039 if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain)) 4040 domain = UMA_ANYDOMAIN; 4041 4042 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) 4043 goto fail_cnt; 4044 4045 /* 4046 * We have to call both the zone's init (not the keg's init) 4047 * and the zone's ctor. This is because the item is going from 4048 * a keg slab directly to the user, and the user is expecting it 4049 * to be both zone-init'd as well as zone-ctor'd. 4050 */ 4051 if (zone->uz_init != NULL) { 4052 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 4053 zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT); 4054 goto fail_cnt; 4055 } 4056 } 4057 item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags, 4058 item); 4059 if (item == NULL) 4060 goto fail; 4061 4062 counter_u64_add(zone->uz_allocs, 1); 4063 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 4064 zone->uz_name, zone); 4065 4066 return (item); 4067 4068 fail_cnt: 4069 counter_u64_add(zone->uz_fails, 1); 4070 fail: 4071 if (zone->uz_max_items > 0) 4072 zone_free_limit(zone, 1); 4073 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 4074 zone->uz_name, zone); 4075 4076 return (NULL); 4077 } 4078 4079 /* See uma.h */ 4080 void 4081 uma_zfree_smr(uma_zone_t zone, void *item) 4082 { 4083 uma_cache_t cache; 4084 uma_cache_bucket_t bucket; 4085 int itemdomain, uz_flags; 4086 4087 #ifdef UMA_ZALLOC_DEBUG 4088 KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0, 4089 ("uma_zfree_smr: called with non-SMR zone.")); 4090 KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer.")); 4091 SMR_ASSERT_NOT_ENTERED(zone->uz_smr); 4092 if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN) 4093 return; 4094 #endif 4095 cache = &zone->uz_cpu[curcpu]; 4096 uz_flags = cache_uz_flags(cache); 4097 itemdomain = 0; 4098 #ifdef NUMA 4099 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 4100 itemdomain = item_domain(item); 4101 #endif 4102 critical_enter(); 4103 do { 4104 cache = &zone->uz_cpu[curcpu]; 4105 /* SMR Zones must free to the free bucket. */ 4106 bucket = &cache->uc_freebucket; 4107 #ifdef NUMA 4108 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4109 PCPU_GET(domain) != itemdomain) { 4110 bucket = &cache->uc_crossbucket; 4111 } 4112 #endif 4113 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 4114 cache_bucket_push(cache, bucket, item); 4115 critical_exit(); 4116 return; 4117 } 4118 } while (cache_free(zone, cache, NULL, item, itemdomain)); 4119 critical_exit(); 4120 4121 /* 4122 * If nothing else caught this, we'll just do an internal free. 4123 */ 4124 zone_free_item(zone, item, NULL, SKIP_NONE); 4125 } 4126 4127 /* See uma.h */ 4128 void 4129 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 4130 { 4131 uma_cache_t cache; 4132 uma_cache_bucket_t bucket; 4133 int itemdomain, uz_flags; 4134 4135 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 4136 random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); 4137 4138 CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone); 4139 4140 #ifdef UMA_ZALLOC_DEBUG 4141 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 4142 ("uma_zfree_arg: called with SMR zone.")); 4143 if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN) 4144 return; 4145 #endif 4146 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 4147 if (item == NULL) 4148 return; 4149 4150 /* 4151 * We are accessing the per-cpu cache without a critical section to 4152 * fetch size and flags. This is acceptable, if we are preempted we 4153 * will simply read another cpu's line. 4154 */ 4155 cache = &zone->uz_cpu[curcpu]; 4156 uz_flags = cache_uz_flags(cache); 4157 if (UMA_ALWAYS_CTORDTOR || 4158 __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0)) 4159 item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE); 4160 4161 /* 4162 * The race here is acceptable. If we miss it we'll just have to wait 4163 * a little longer for the limits to be reset. 4164 */ 4165 if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) { 4166 if (atomic_load_32(&zone->uz_sleepers) > 0) 4167 goto zfree_item; 4168 } 4169 4170 /* 4171 * If possible, free to the per-CPU cache. There are two 4172 * requirements for safe access to the per-CPU cache: (1) the thread 4173 * accessing the cache must not be preempted or yield during access, 4174 * and (2) the thread must not migrate CPUs without switching which 4175 * cache it accesses. We rely on a critical section to prevent 4176 * preemption and migration. We release the critical section in 4177 * order to acquire the zone mutex if we are unable to free to the 4178 * current cache; when we re-acquire the critical section, we must 4179 * detect and handle migration if it has occurred. 4180 */ 4181 itemdomain = 0; 4182 #ifdef NUMA 4183 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) 4184 itemdomain = item_domain(item); 4185 #endif 4186 critical_enter(); 4187 do { 4188 cache = &zone->uz_cpu[curcpu]; 4189 /* 4190 * Try to free into the allocbucket first to give LIFO 4191 * ordering for cache-hot datastructures. Spill over 4192 * into the freebucket if necessary. Alloc will swap 4193 * them if one runs dry. 4194 */ 4195 bucket = &cache->uc_allocbucket; 4196 #ifdef NUMA 4197 if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4198 PCPU_GET(domain) != itemdomain) { 4199 bucket = &cache->uc_crossbucket; 4200 } else 4201 #endif 4202 if (bucket->ucb_cnt == bucket->ucb_entries && 4203 cache->uc_freebucket.ucb_cnt < 4204 cache->uc_freebucket.ucb_entries) 4205 cache_bucket_swap(&cache->uc_freebucket, 4206 &cache->uc_allocbucket); 4207 if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) { 4208 cache_bucket_push(cache, bucket, item); 4209 critical_exit(); 4210 return; 4211 } 4212 } while (cache_free(zone, cache, udata, item, itemdomain)); 4213 critical_exit(); 4214 4215 /* 4216 * If nothing else caught this, we'll just do an internal free. 4217 */ 4218 zfree_item: 4219 zone_free_item(zone, item, udata, SKIP_DTOR); 4220 } 4221 4222 #ifdef NUMA 4223 /* 4224 * sort crossdomain free buckets to domain correct buckets and cache 4225 * them. 4226 */ 4227 static void 4228 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata) 4229 { 4230 struct uma_bucketlist emptybuckets, fullbuckets; 4231 uma_zone_domain_t zdom; 4232 uma_bucket_t b; 4233 smr_seq_t seq; 4234 void *item; 4235 int domain; 4236 4237 CTR3(KTR_UMA, 4238 "uma_zfree: zone %s(%p) draining cross bucket %p", 4239 zone->uz_name, zone, bucket); 4240 4241 /* 4242 * It is possible for buckets to arrive here out of order so we fetch 4243 * the current smr seq rather than accepting the bucket's. 4244 */ 4245 seq = SMR_SEQ_INVALID; 4246 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) 4247 seq = smr_advance(zone->uz_smr); 4248 4249 /* 4250 * To avoid having ndomain * ndomain buckets for sorting we have a 4251 * lock on the current crossfree bucket. A full matrix with 4252 * per-domain locking could be used if necessary. 4253 */ 4254 STAILQ_INIT(&emptybuckets); 4255 STAILQ_INIT(&fullbuckets); 4256 ZONE_CROSS_LOCK(zone); 4257 for (; bucket->ub_cnt > 0; bucket->ub_cnt--) { 4258 item = bucket->ub_bucket[bucket->ub_cnt - 1]; 4259 domain = item_domain(item); 4260 zdom = ZDOM_GET(zone, domain); 4261 if (zdom->uzd_cross == NULL) { 4262 if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) { 4263 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4264 zdom->uzd_cross = b; 4265 } else { 4266 /* 4267 * Avoid allocating a bucket with the cross lock 4268 * held, since allocation can trigger a 4269 * cross-domain free and bucket zones may 4270 * allocate from each other. 4271 */ 4272 ZONE_CROSS_UNLOCK(zone); 4273 b = bucket_alloc(zone, udata, M_NOWAIT); 4274 if (b == NULL) 4275 goto out; 4276 ZONE_CROSS_LOCK(zone); 4277 if (zdom->uzd_cross != NULL) { 4278 STAILQ_INSERT_HEAD(&emptybuckets, b, 4279 ub_link); 4280 } else { 4281 zdom->uzd_cross = b; 4282 } 4283 } 4284 } 4285 b = zdom->uzd_cross; 4286 b->ub_bucket[b->ub_cnt++] = item; 4287 b->ub_seq = seq; 4288 if (b->ub_cnt == b->ub_entries) { 4289 STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link); 4290 if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) 4291 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4292 zdom->uzd_cross = b; 4293 } 4294 } 4295 ZONE_CROSS_UNLOCK(zone); 4296 out: 4297 if (bucket->ub_cnt == 0) 4298 bucket->ub_seq = SMR_SEQ_INVALID; 4299 bucket_free(zone, bucket, udata); 4300 4301 while ((b = STAILQ_FIRST(&emptybuckets)) != NULL) { 4302 STAILQ_REMOVE_HEAD(&emptybuckets, ub_link); 4303 bucket_free(zone, b, udata); 4304 } 4305 while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) { 4306 STAILQ_REMOVE_HEAD(&fullbuckets, ub_link); 4307 domain = item_domain(b->ub_bucket[0]); 4308 zone_put_bucket(zone, domain, b, udata, true); 4309 } 4310 } 4311 #endif 4312 4313 static void 4314 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata, 4315 int itemdomain, bool ws) 4316 { 4317 4318 #ifdef NUMA 4319 /* 4320 * Buckets coming from the wrong domain will be entirely for the 4321 * only other domain on two domain systems. In this case we can 4322 * simply cache them. Otherwise we need to sort them back to 4323 * correct domains. 4324 */ 4325 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && 4326 vm_ndomains > 2 && PCPU_GET(domain) != itemdomain) { 4327 zone_free_cross(zone, bucket, udata); 4328 return; 4329 } 4330 #endif 4331 4332 /* 4333 * Attempt to save the bucket in the zone's domain bucket cache. 4334 */ 4335 CTR3(KTR_UMA, 4336 "uma_zfree: zone %s(%p) putting bucket %p on free list", 4337 zone->uz_name, zone, bucket); 4338 /* ub_cnt is pointing to the last free item */ 4339 if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0) 4340 itemdomain = zone_domain_lowest(zone, itemdomain); 4341 zone_put_bucket(zone, itemdomain, bucket, udata, ws); 4342 } 4343 4344 /* 4345 * Populate a free or cross bucket for the current cpu cache. Free any 4346 * existing full bucket either to the zone cache or back to the slab layer. 4347 * 4348 * Enters and returns in a critical section. false return indicates that 4349 * we can not satisfy this free in the cache layer. true indicates that 4350 * the caller should retry. 4351 */ 4352 static __noinline bool 4353 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item, 4354 int itemdomain) 4355 { 4356 uma_cache_bucket_t cbucket; 4357 uma_bucket_t newbucket, bucket; 4358 4359 CRITICAL_ASSERT(curthread); 4360 4361 if (zone->uz_bucket_size == 0) 4362 return false; 4363 4364 cache = &zone->uz_cpu[curcpu]; 4365 newbucket = NULL; 4366 4367 /* 4368 * FIRSTTOUCH domains need to free to the correct zdom. When 4369 * enabled this is the zdom of the item. The bucket is the 4370 * cross bucket if the current domain and itemdomain do not match. 4371 */ 4372 cbucket = &cache->uc_freebucket; 4373 #ifdef NUMA 4374 if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { 4375 if (PCPU_GET(domain) != itemdomain) { 4376 cbucket = &cache->uc_crossbucket; 4377 if (cbucket->ucb_cnt != 0) 4378 counter_u64_add(zone->uz_xdomain, 4379 cbucket->ucb_cnt); 4380 } 4381 } 4382 #endif 4383 bucket = cache_bucket_unload(cbucket); 4384 KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries, 4385 ("cache_free: Entered with non-full free bucket.")); 4386 4387 /* We are no longer associated with this CPU. */ 4388 critical_exit(); 4389 4390 /* 4391 * Don't let SMR zones operate without a free bucket. Force 4392 * a synchronize and re-use this one. We will only degrade 4393 * to a synchronize every bucket_size items rather than every 4394 * item if we fail to allocate a bucket. 4395 */ 4396 if ((zone->uz_flags & UMA_ZONE_SMR) != 0) { 4397 if (bucket != NULL) 4398 bucket->ub_seq = smr_advance(zone->uz_smr); 4399 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4400 if (newbucket == NULL && bucket != NULL) { 4401 bucket_drain(zone, bucket); 4402 newbucket = bucket; 4403 bucket = NULL; 4404 } 4405 } else if (!bucketdisable) 4406 newbucket = bucket_alloc(zone, udata, M_NOWAIT); 4407 4408 if (bucket != NULL) 4409 zone_free_bucket(zone, bucket, udata, itemdomain, true); 4410 4411 critical_enter(); 4412 if ((bucket = newbucket) == NULL) 4413 return (false); 4414 cache = &zone->uz_cpu[curcpu]; 4415 #ifdef NUMA 4416 /* 4417 * Check to see if we should be populating the cross bucket. If it 4418 * is already populated we will fall through and attempt to populate 4419 * the free bucket. 4420 */ 4421 if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) { 4422 if (PCPU_GET(domain) != itemdomain && 4423 cache->uc_crossbucket.ucb_bucket == NULL) { 4424 cache_bucket_load_cross(cache, bucket); 4425 return (true); 4426 } 4427 } 4428 #endif 4429 /* 4430 * We may have lost the race to fill the bucket or switched CPUs. 4431 */ 4432 if (cache->uc_freebucket.ucb_bucket != NULL) { 4433 critical_exit(); 4434 bucket_free(zone, bucket, udata); 4435 critical_enter(); 4436 } else 4437 cache_bucket_load_free(cache, bucket); 4438 4439 return (true); 4440 } 4441 4442 static void 4443 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item) 4444 { 4445 uma_keg_t keg; 4446 uma_domain_t dom; 4447 int freei; 4448 4449 keg = zone->uz_keg; 4450 KEG_LOCK_ASSERT(keg, slab->us_domain); 4451 4452 /* Do we need to remove from any lists? */ 4453 dom = &keg->uk_domain[slab->us_domain]; 4454 if (slab->us_freecount + 1 == keg->uk_ipers) { 4455 LIST_REMOVE(slab, us_link); 4456 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 4457 dom->ud_free_slabs++; 4458 } else if (slab->us_freecount == 0) { 4459 LIST_REMOVE(slab, us_link); 4460 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 4461 } 4462 4463 /* Slab management. */ 4464 freei = slab_item_index(slab, keg, item); 4465 BIT_SET(keg->uk_ipers, freei, &slab->us_free); 4466 slab->us_freecount++; 4467 4468 /* Keg statistics. */ 4469 dom->ud_free_items++; 4470 } 4471 4472 static void 4473 zone_release(void *arg, void **bucket, int cnt) 4474 { 4475 struct mtx *lock; 4476 uma_zone_t zone; 4477 uma_slab_t slab; 4478 uma_keg_t keg; 4479 uint8_t *mem; 4480 void *item; 4481 int i; 4482 4483 zone = arg; 4484 keg = zone->uz_keg; 4485 lock = NULL; 4486 if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0)) 4487 lock = KEG_LOCK(keg, 0); 4488 for (i = 0; i < cnt; i++) { 4489 item = bucket[i]; 4490 if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) { 4491 slab = vtoslab((vm_offset_t)item); 4492 } else { 4493 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 4494 if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0) 4495 slab = hash_sfind(&keg->uk_hash, mem); 4496 else 4497 slab = (uma_slab_t)(mem + keg->uk_pgoff); 4498 } 4499 if (lock != KEG_LOCKPTR(keg, slab->us_domain)) { 4500 if (lock != NULL) 4501 mtx_unlock(lock); 4502 lock = KEG_LOCK(keg, slab->us_domain); 4503 } 4504 slab_free_item(zone, slab, item); 4505 } 4506 if (lock != NULL) 4507 mtx_unlock(lock); 4508 } 4509 4510 /* 4511 * Frees a single item to any zone. 4512 * 4513 * Arguments: 4514 * zone The zone to free to 4515 * item The item we're freeing 4516 * udata User supplied data for the dtor 4517 * skip Skip dtors and finis 4518 */ 4519 static __noinline void 4520 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 4521 { 4522 4523 /* 4524 * If a free is sent directly to an SMR zone we have to 4525 * synchronize immediately because the item can instantly 4526 * be reallocated. This should only happen in degenerate 4527 * cases when no memory is available for per-cpu caches. 4528 */ 4529 if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE) 4530 smr_synchronize(zone->uz_smr); 4531 4532 item_dtor(zone, item, zone->uz_size, udata, skip); 4533 4534 if (skip < SKIP_FINI && zone->uz_fini) 4535 zone->uz_fini(item, zone->uz_size); 4536 4537 zone->uz_release(zone->uz_arg, &item, 1); 4538 4539 if (skip & SKIP_CNT) 4540 return; 4541 4542 counter_u64_add(zone->uz_frees, 1); 4543 4544 if (zone->uz_max_items > 0) 4545 zone_free_limit(zone, 1); 4546 } 4547 4548 /* See uma.h */ 4549 int 4550 uma_zone_set_max(uma_zone_t zone, int nitems) 4551 { 4552 4553 /* 4554 * If the limit is small, we may need to constrain the maximum per-CPU 4555 * cache size, or disable caching entirely. 4556 */ 4557 uma_zone_set_maxcache(zone, nitems); 4558 4559 /* 4560 * XXX This can misbehave if the zone has any allocations with 4561 * no limit and a limit is imposed. There is currently no 4562 * way to clear a limit. 4563 */ 4564 ZONE_LOCK(zone); 4565 zone->uz_max_items = nitems; 4566 zone->uz_flags |= UMA_ZFLAG_LIMIT; 4567 zone_update_caches(zone); 4568 /* We may need to wake waiters. */ 4569 wakeup(&zone->uz_max_items); 4570 ZONE_UNLOCK(zone); 4571 4572 return (nitems); 4573 } 4574 4575 /* See uma.h */ 4576 void 4577 uma_zone_set_maxcache(uma_zone_t zone, int nitems) 4578 { 4579 int bpcpu, bpdom, bsize, nb; 4580 4581 ZONE_LOCK(zone); 4582 4583 /* 4584 * Compute a lower bound on the number of items that may be cached in 4585 * the zone. Each CPU gets at least two buckets, and for cross-domain 4586 * frees we use an additional bucket per CPU and per domain. Select the 4587 * largest bucket size that does not exceed half of the requested limit, 4588 * with the left over space given to the full bucket cache. 4589 */ 4590 bpdom = 0; 4591 bpcpu = 2; 4592 #ifdef NUMA 4593 if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && vm_ndomains > 1) { 4594 bpcpu++; 4595 bpdom++; 4596 } 4597 #endif 4598 nb = bpcpu * mp_ncpus + bpdom * vm_ndomains; 4599 bsize = nitems / nb / 2; 4600 if (bsize > BUCKET_MAX) 4601 bsize = BUCKET_MAX; 4602 else if (bsize == 0 && nitems / nb > 0) 4603 bsize = 1; 4604 zone->uz_bucket_size_max = zone->uz_bucket_size = bsize; 4605 if (zone->uz_bucket_size_min > zone->uz_bucket_size_max) 4606 zone->uz_bucket_size_min = zone->uz_bucket_size_max; 4607 zone->uz_bucket_max = nitems - nb * bsize; 4608 ZONE_UNLOCK(zone); 4609 } 4610 4611 /* See uma.h */ 4612 int 4613 uma_zone_get_max(uma_zone_t zone) 4614 { 4615 int nitems; 4616 4617 nitems = atomic_load_64(&zone->uz_max_items); 4618 4619 return (nitems); 4620 } 4621 4622 /* See uma.h */ 4623 void 4624 uma_zone_set_warning(uma_zone_t zone, const char *warning) 4625 { 4626 4627 ZONE_ASSERT_COLD(zone); 4628 zone->uz_warning = warning; 4629 } 4630 4631 /* See uma.h */ 4632 void 4633 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 4634 { 4635 4636 ZONE_ASSERT_COLD(zone); 4637 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 4638 } 4639 4640 /* See uma.h */ 4641 int 4642 uma_zone_get_cur(uma_zone_t zone) 4643 { 4644 int64_t nitems; 4645 u_int i; 4646 4647 nitems = 0; 4648 if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER) 4649 nitems = counter_u64_fetch(zone->uz_allocs) - 4650 counter_u64_fetch(zone->uz_frees); 4651 CPU_FOREACH(i) 4652 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) - 4653 atomic_load_64(&zone->uz_cpu[i].uc_frees); 4654 4655 return (nitems < 0 ? 0 : nitems); 4656 } 4657 4658 static uint64_t 4659 uma_zone_get_allocs(uma_zone_t zone) 4660 { 4661 uint64_t nitems; 4662 u_int i; 4663 4664 nitems = 0; 4665 if (zone->uz_allocs != EARLY_COUNTER) 4666 nitems = counter_u64_fetch(zone->uz_allocs); 4667 CPU_FOREACH(i) 4668 nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs); 4669 4670 return (nitems); 4671 } 4672 4673 static uint64_t 4674 uma_zone_get_frees(uma_zone_t zone) 4675 { 4676 uint64_t nitems; 4677 u_int i; 4678 4679 nitems = 0; 4680 if (zone->uz_frees != EARLY_COUNTER) 4681 nitems = counter_u64_fetch(zone->uz_frees); 4682 CPU_FOREACH(i) 4683 nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees); 4684 4685 return (nitems); 4686 } 4687 4688 #ifdef INVARIANTS 4689 /* Used only for KEG_ASSERT_COLD(). */ 4690 static uint64_t 4691 uma_keg_get_allocs(uma_keg_t keg) 4692 { 4693 uma_zone_t z; 4694 uint64_t nitems; 4695 4696 nitems = 0; 4697 LIST_FOREACH(z, &keg->uk_zones, uz_link) 4698 nitems += uma_zone_get_allocs(z); 4699 4700 return (nitems); 4701 } 4702 #endif 4703 4704 /* See uma.h */ 4705 void 4706 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 4707 { 4708 uma_keg_t keg; 4709 4710 KEG_GET(zone, keg); 4711 KEG_ASSERT_COLD(keg); 4712 keg->uk_init = uminit; 4713 } 4714 4715 /* See uma.h */ 4716 void 4717 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 4718 { 4719 uma_keg_t keg; 4720 4721 KEG_GET(zone, keg); 4722 KEG_ASSERT_COLD(keg); 4723 keg->uk_fini = fini; 4724 } 4725 4726 /* See uma.h */ 4727 void 4728 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 4729 { 4730 4731 ZONE_ASSERT_COLD(zone); 4732 zone->uz_init = zinit; 4733 } 4734 4735 /* See uma.h */ 4736 void 4737 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 4738 { 4739 4740 ZONE_ASSERT_COLD(zone); 4741 zone->uz_fini = zfini; 4742 } 4743 4744 /* See uma.h */ 4745 void 4746 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 4747 { 4748 uma_keg_t keg; 4749 4750 KEG_GET(zone, keg); 4751 KEG_ASSERT_COLD(keg); 4752 keg->uk_freef = freef; 4753 } 4754 4755 /* See uma.h */ 4756 void 4757 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 4758 { 4759 uma_keg_t keg; 4760 4761 KEG_GET(zone, keg); 4762 KEG_ASSERT_COLD(keg); 4763 keg->uk_allocf = allocf; 4764 } 4765 4766 /* See uma.h */ 4767 void 4768 uma_zone_set_smr(uma_zone_t zone, smr_t smr) 4769 { 4770 4771 ZONE_ASSERT_COLD(zone); 4772 4773 KASSERT(smr != NULL, ("Got NULL smr")); 4774 KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, 4775 ("zone %p (%s) already uses SMR", zone, zone->uz_name)); 4776 zone->uz_flags |= UMA_ZONE_SMR; 4777 zone->uz_smr = smr; 4778 zone_update_caches(zone); 4779 } 4780 4781 smr_t 4782 uma_zone_get_smr(uma_zone_t zone) 4783 { 4784 4785 return (zone->uz_smr); 4786 } 4787 4788 /* See uma.h */ 4789 void 4790 uma_zone_reserve(uma_zone_t zone, int items) 4791 { 4792 uma_keg_t keg; 4793 4794 KEG_GET(zone, keg); 4795 KEG_ASSERT_COLD(keg); 4796 keg->uk_reserve = items; 4797 } 4798 4799 /* See uma.h */ 4800 int 4801 uma_zone_reserve_kva(uma_zone_t zone, int count) 4802 { 4803 uma_keg_t keg; 4804 vm_offset_t kva; 4805 u_int pages; 4806 4807 KEG_GET(zone, keg); 4808 KEG_ASSERT_COLD(keg); 4809 ZONE_ASSERT_COLD(zone); 4810 4811 pages = howmany(count, keg->uk_ipers) * keg->uk_ppera; 4812 4813 #ifdef UMA_MD_SMALL_ALLOC 4814 if (keg->uk_ppera > 1) { 4815 #else 4816 if (1) { 4817 #endif 4818 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 4819 if (kva == 0) 4820 return (0); 4821 } else 4822 kva = 0; 4823 4824 MPASS(keg->uk_kva == 0); 4825 keg->uk_kva = kva; 4826 keg->uk_offset = 0; 4827 zone->uz_max_items = pages * keg->uk_ipers; 4828 #ifdef UMA_MD_SMALL_ALLOC 4829 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 4830 #else 4831 keg->uk_allocf = noobj_alloc; 4832 #endif 4833 keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4834 zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE; 4835 zone_update_caches(zone); 4836 4837 return (1); 4838 } 4839 4840 /* See uma.h */ 4841 void 4842 uma_prealloc(uma_zone_t zone, int items) 4843 { 4844 struct vm_domainset_iter di; 4845 uma_domain_t dom; 4846 uma_slab_t slab; 4847 uma_keg_t keg; 4848 int aflags, domain, slabs; 4849 4850 KEG_GET(zone, keg); 4851 slabs = howmany(items, keg->uk_ipers); 4852 while (slabs-- > 0) { 4853 aflags = M_NOWAIT; 4854 vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, 4855 &aflags); 4856 for (;;) { 4857 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK, 4858 aflags); 4859 if (slab != NULL) { 4860 dom = &keg->uk_domain[slab->us_domain]; 4861 /* 4862 * keg_alloc_slab() always returns a slab on the 4863 * partial list. 4864 */ 4865 LIST_REMOVE(slab, us_link); 4866 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, 4867 us_link); 4868 dom->ud_free_slabs++; 4869 KEG_UNLOCK(keg, slab->us_domain); 4870 break; 4871 } 4872 if (vm_domainset_iter_policy(&di, &domain) != 0) 4873 vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0); 4874 } 4875 } 4876 } 4877 4878 /* 4879 * Returns a snapshot of memory consumption in bytes. 4880 */ 4881 size_t 4882 uma_zone_memory(uma_zone_t zone) 4883 { 4884 size_t sz; 4885 int i; 4886 4887 sz = 0; 4888 if (zone->uz_flags & UMA_ZFLAG_CACHE) { 4889 for (i = 0; i < vm_ndomains; i++) 4890 sz += ZDOM_GET(zone, i)->uzd_nitems; 4891 return (sz * zone->uz_size); 4892 } 4893 for (i = 0; i < vm_ndomains; i++) 4894 sz += zone->uz_keg->uk_domain[i].ud_pages; 4895 4896 return (sz * PAGE_SIZE); 4897 } 4898 4899 /* See uma.h */ 4900 void 4901 uma_reclaim(int req) 4902 { 4903 4904 CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 4905 sx_xlock(&uma_reclaim_lock); 4906 bucket_enable(); 4907 4908 switch (req) { 4909 case UMA_RECLAIM_TRIM: 4910 zone_foreach(zone_trim, NULL); 4911 break; 4912 case UMA_RECLAIM_DRAIN: 4913 case UMA_RECLAIM_DRAIN_CPU: 4914 zone_foreach(zone_drain, NULL); 4915 if (req == UMA_RECLAIM_DRAIN_CPU) { 4916 pcpu_cache_drain_safe(NULL); 4917 zone_foreach(zone_drain, NULL); 4918 } 4919 break; 4920 default: 4921 panic("unhandled reclamation request %d", req); 4922 } 4923 4924 /* 4925 * Some slabs may have been freed but this zone will be visited early 4926 * we visit again so that we can free pages that are empty once other 4927 * zones are drained. We have to do the same for buckets. 4928 */ 4929 zone_drain(slabzones[0], NULL); 4930 zone_drain(slabzones[1], NULL); 4931 bucket_zone_drain(); 4932 sx_xunlock(&uma_reclaim_lock); 4933 } 4934 4935 static volatile int uma_reclaim_needed; 4936 4937 void 4938 uma_reclaim_wakeup(void) 4939 { 4940 4941 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 4942 wakeup(uma_reclaim); 4943 } 4944 4945 void 4946 uma_reclaim_worker(void *arg __unused) 4947 { 4948 4949 for (;;) { 4950 sx_xlock(&uma_reclaim_lock); 4951 while (atomic_load_int(&uma_reclaim_needed) == 0) 4952 sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl", 4953 hz); 4954 sx_xunlock(&uma_reclaim_lock); 4955 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 4956 uma_reclaim(UMA_RECLAIM_DRAIN_CPU); 4957 atomic_store_int(&uma_reclaim_needed, 0); 4958 /* Don't fire more than once per-second. */ 4959 pause("umarclslp", hz); 4960 } 4961 } 4962 4963 /* See uma.h */ 4964 void 4965 uma_zone_reclaim(uma_zone_t zone, int req) 4966 { 4967 4968 switch (req) { 4969 case UMA_RECLAIM_TRIM: 4970 zone_trim(zone, NULL); 4971 break; 4972 case UMA_RECLAIM_DRAIN: 4973 zone_drain(zone, NULL); 4974 break; 4975 case UMA_RECLAIM_DRAIN_CPU: 4976 pcpu_cache_drain_safe(zone); 4977 zone_drain(zone, NULL); 4978 break; 4979 default: 4980 panic("unhandled reclamation request %d", req); 4981 } 4982 } 4983 4984 /* See uma.h */ 4985 int 4986 uma_zone_exhausted(uma_zone_t zone) 4987 { 4988 4989 return (atomic_load_32(&zone->uz_sleepers) > 0); 4990 } 4991 4992 unsigned long 4993 uma_limit(void) 4994 { 4995 4996 return (uma_kmem_limit); 4997 } 4998 4999 void 5000 uma_set_limit(unsigned long limit) 5001 { 5002 5003 uma_kmem_limit = limit; 5004 } 5005 5006 unsigned long 5007 uma_size(void) 5008 { 5009 5010 return (atomic_load_long(&uma_kmem_total)); 5011 } 5012 5013 long 5014 uma_avail(void) 5015 { 5016 5017 return (uma_kmem_limit - uma_size()); 5018 } 5019 5020 #ifdef DDB 5021 /* 5022 * Generate statistics across both the zone and its per-cpu cache's. Return 5023 * desired statistics if the pointer is non-NULL for that statistic. 5024 * 5025 * Note: does not update the zone statistics, as it can't safely clear the 5026 * per-CPU cache statistic. 5027 * 5028 */ 5029 static void 5030 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp, 5031 uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp) 5032 { 5033 uma_cache_t cache; 5034 uint64_t allocs, frees, sleeps, xdomain; 5035 int cachefree, cpu; 5036 5037 allocs = frees = sleeps = xdomain = 0; 5038 cachefree = 0; 5039 CPU_FOREACH(cpu) { 5040 cache = &z->uz_cpu[cpu]; 5041 cachefree += cache->uc_allocbucket.ucb_cnt; 5042 cachefree += cache->uc_freebucket.ucb_cnt; 5043 xdomain += cache->uc_crossbucket.ucb_cnt; 5044 cachefree += cache->uc_crossbucket.ucb_cnt; 5045 allocs += cache->uc_allocs; 5046 frees += cache->uc_frees; 5047 } 5048 allocs += counter_u64_fetch(z->uz_allocs); 5049 frees += counter_u64_fetch(z->uz_frees); 5050 xdomain += counter_u64_fetch(z->uz_xdomain); 5051 sleeps += z->uz_sleeps; 5052 if (cachefreep != NULL) 5053 *cachefreep = cachefree; 5054 if (allocsp != NULL) 5055 *allocsp = allocs; 5056 if (freesp != NULL) 5057 *freesp = frees; 5058 if (sleepsp != NULL) 5059 *sleepsp = sleeps; 5060 if (xdomainp != NULL) 5061 *xdomainp = xdomain; 5062 } 5063 #endif /* DDB */ 5064 5065 static int 5066 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 5067 { 5068 uma_keg_t kz; 5069 uma_zone_t z; 5070 int count; 5071 5072 count = 0; 5073 rw_rlock(&uma_rwlock); 5074 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5075 LIST_FOREACH(z, &kz->uk_zones, uz_link) 5076 count++; 5077 } 5078 LIST_FOREACH(z, &uma_cachezones, uz_link) 5079 count++; 5080 5081 rw_runlock(&uma_rwlock); 5082 return (sysctl_handle_int(oidp, &count, 0, req)); 5083 } 5084 5085 static void 5086 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, 5087 struct uma_percpu_stat *ups, bool internal) 5088 { 5089 uma_zone_domain_t zdom; 5090 uma_cache_t cache; 5091 int i; 5092 5093 for (i = 0; i < vm_ndomains; i++) { 5094 zdom = ZDOM_GET(z, i); 5095 uth->uth_zone_free += zdom->uzd_nitems; 5096 } 5097 uth->uth_allocs = counter_u64_fetch(z->uz_allocs); 5098 uth->uth_frees = counter_u64_fetch(z->uz_frees); 5099 uth->uth_fails = counter_u64_fetch(z->uz_fails); 5100 uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain); 5101 uth->uth_sleeps = z->uz_sleeps; 5102 5103 for (i = 0; i < mp_maxid + 1; i++) { 5104 bzero(&ups[i], sizeof(*ups)); 5105 if (internal || CPU_ABSENT(i)) 5106 continue; 5107 cache = &z->uz_cpu[i]; 5108 ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt; 5109 ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt; 5110 ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt; 5111 ups[i].ups_allocs = cache->uc_allocs; 5112 ups[i].ups_frees = cache->uc_frees; 5113 } 5114 } 5115 5116 static int 5117 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 5118 { 5119 struct uma_stream_header ush; 5120 struct uma_type_header uth; 5121 struct uma_percpu_stat *ups; 5122 struct sbuf sbuf; 5123 uma_keg_t kz; 5124 uma_zone_t z; 5125 uint64_t items; 5126 uint32_t kfree, pages; 5127 int count, error, i; 5128 5129 error = sysctl_wire_old_buffer(req, 0); 5130 if (error != 0) 5131 return (error); 5132 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 5133 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 5134 ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); 5135 5136 count = 0; 5137 rw_rlock(&uma_rwlock); 5138 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5139 LIST_FOREACH(z, &kz->uk_zones, uz_link) 5140 count++; 5141 } 5142 5143 LIST_FOREACH(z, &uma_cachezones, uz_link) 5144 count++; 5145 5146 /* 5147 * Insert stream header. 5148 */ 5149 bzero(&ush, sizeof(ush)); 5150 ush.ush_version = UMA_STREAM_VERSION; 5151 ush.ush_maxcpus = (mp_maxid + 1); 5152 ush.ush_count = count; 5153 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 5154 5155 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5156 kfree = pages = 0; 5157 for (i = 0; i < vm_ndomains; i++) { 5158 kfree += kz->uk_domain[i].ud_free_items; 5159 pages += kz->uk_domain[i].ud_pages; 5160 } 5161 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 5162 bzero(&uth, sizeof(uth)); 5163 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 5164 uth.uth_align = kz->uk_align; 5165 uth.uth_size = kz->uk_size; 5166 uth.uth_rsize = kz->uk_rsize; 5167 if (z->uz_max_items > 0) { 5168 items = UZ_ITEMS_COUNT(z->uz_items); 5169 uth.uth_pages = (items / kz->uk_ipers) * 5170 kz->uk_ppera; 5171 } else 5172 uth.uth_pages = pages; 5173 uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) * 5174 kz->uk_ppera; 5175 uth.uth_limit = z->uz_max_items; 5176 uth.uth_keg_free = kfree; 5177 5178 /* 5179 * A zone is secondary is it is not the first entry 5180 * on the keg's zone list. 5181 */ 5182 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 5183 (LIST_FIRST(&kz->uk_zones) != z)) 5184 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 5185 uma_vm_zone_stats(&uth, z, &sbuf, ups, 5186 kz->uk_flags & UMA_ZFLAG_INTERNAL); 5187 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 5188 for (i = 0; i < mp_maxid + 1; i++) 5189 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 5190 } 5191 } 5192 LIST_FOREACH(z, &uma_cachezones, uz_link) { 5193 bzero(&uth, sizeof(uth)); 5194 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 5195 uth.uth_size = z->uz_size; 5196 uma_vm_zone_stats(&uth, z, &sbuf, ups, false); 5197 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 5198 for (i = 0; i < mp_maxid + 1; i++) 5199 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 5200 } 5201 5202 rw_runlock(&uma_rwlock); 5203 error = sbuf_finish(&sbuf); 5204 sbuf_delete(&sbuf); 5205 free(ups, M_TEMP); 5206 return (error); 5207 } 5208 5209 int 5210 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 5211 { 5212 uma_zone_t zone = *(uma_zone_t *)arg1; 5213 int error, max; 5214 5215 max = uma_zone_get_max(zone); 5216 error = sysctl_handle_int(oidp, &max, 0, req); 5217 if (error || !req->newptr) 5218 return (error); 5219 5220 uma_zone_set_max(zone, max); 5221 5222 return (0); 5223 } 5224 5225 int 5226 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 5227 { 5228 uma_zone_t zone; 5229 int cur; 5230 5231 /* 5232 * Some callers want to add sysctls for global zones that 5233 * may not yet exist so they pass a pointer to a pointer. 5234 */ 5235 if (arg2 == 0) 5236 zone = *(uma_zone_t *)arg1; 5237 else 5238 zone = arg1; 5239 cur = uma_zone_get_cur(zone); 5240 return (sysctl_handle_int(oidp, &cur, 0, req)); 5241 } 5242 5243 static int 5244 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS) 5245 { 5246 uma_zone_t zone = arg1; 5247 uint64_t cur; 5248 5249 cur = uma_zone_get_allocs(zone); 5250 return (sysctl_handle_64(oidp, &cur, 0, req)); 5251 } 5252 5253 static int 5254 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS) 5255 { 5256 uma_zone_t zone = arg1; 5257 uint64_t cur; 5258 5259 cur = uma_zone_get_frees(zone); 5260 return (sysctl_handle_64(oidp, &cur, 0, req)); 5261 } 5262 5263 static int 5264 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS) 5265 { 5266 struct sbuf sbuf; 5267 uma_zone_t zone = arg1; 5268 int error; 5269 5270 sbuf_new_for_sysctl(&sbuf, NULL, 0, req); 5271 if (zone->uz_flags != 0) 5272 sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS); 5273 else 5274 sbuf_printf(&sbuf, "0"); 5275 error = sbuf_finish(&sbuf); 5276 sbuf_delete(&sbuf); 5277 5278 return (error); 5279 } 5280 5281 static int 5282 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS) 5283 { 5284 uma_keg_t keg = arg1; 5285 int avail, effpct, total; 5286 5287 total = keg->uk_ppera * PAGE_SIZE; 5288 if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) 5289 total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize; 5290 /* 5291 * We consider the client's requested size and alignment here, not the 5292 * real size determination uk_rsize, because we also adjust the real 5293 * size for internal implementation reasons (max bitset size). 5294 */ 5295 avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1); 5296 if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) 5297 avail *= mp_maxid + 1; 5298 effpct = 100 * avail / total; 5299 return (sysctl_handle_int(oidp, &effpct, 0, req)); 5300 } 5301 5302 static int 5303 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS) 5304 { 5305 uma_zone_t zone = arg1; 5306 uint64_t cur; 5307 5308 cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items)); 5309 return (sysctl_handle_64(oidp, &cur, 0, req)); 5310 } 5311 5312 #ifdef INVARIANTS 5313 static uma_slab_t 5314 uma_dbg_getslab(uma_zone_t zone, void *item) 5315 { 5316 uma_slab_t slab; 5317 uma_keg_t keg; 5318 uint8_t *mem; 5319 5320 /* 5321 * It is safe to return the slab here even though the 5322 * zone is unlocked because the item's allocation state 5323 * essentially holds a reference. 5324 */ 5325 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 5326 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5327 return (NULL); 5328 if (zone->uz_flags & UMA_ZFLAG_VTOSLAB) 5329 return (vtoslab((vm_offset_t)mem)); 5330 keg = zone->uz_keg; 5331 if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0) 5332 return ((uma_slab_t)(mem + keg->uk_pgoff)); 5333 KEG_LOCK(keg, 0); 5334 slab = hash_sfind(&keg->uk_hash, mem); 5335 KEG_UNLOCK(keg, 0); 5336 5337 return (slab); 5338 } 5339 5340 static bool 5341 uma_dbg_zskip(uma_zone_t zone, void *mem) 5342 { 5343 5344 if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) 5345 return (true); 5346 5347 return (uma_dbg_kskip(zone->uz_keg, mem)); 5348 } 5349 5350 static bool 5351 uma_dbg_kskip(uma_keg_t keg, void *mem) 5352 { 5353 uintptr_t idx; 5354 5355 if (dbg_divisor == 0) 5356 return (true); 5357 5358 if (dbg_divisor == 1) 5359 return (false); 5360 5361 idx = (uintptr_t)mem >> PAGE_SHIFT; 5362 if (keg->uk_ipers > 1) { 5363 idx *= keg->uk_ipers; 5364 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; 5365 } 5366 5367 if ((idx / dbg_divisor) * dbg_divisor != idx) { 5368 counter_u64_add(uma_skip_cnt, 1); 5369 return (true); 5370 } 5371 counter_u64_add(uma_dbg_cnt, 1); 5372 5373 return (false); 5374 } 5375 5376 /* 5377 * Set up the slab's freei data such that uma_dbg_free can function. 5378 * 5379 */ 5380 static void 5381 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 5382 { 5383 uma_keg_t keg; 5384 int freei; 5385 5386 if (slab == NULL) { 5387 slab = uma_dbg_getslab(zone, item); 5388 if (slab == NULL) 5389 panic("uma: item %p did not belong to zone %s", 5390 item, zone->uz_name); 5391 } 5392 keg = zone->uz_keg; 5393 freei = slab_item_index(slab, keg, item); 5394 5395 if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) 5396 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)", 5397 item, zone, zone->uz_name, slab, freei); 5398 BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); 5399 } 5400 5401 /* 5402 * Verifies freed addresses. Checks for alignment, valid slab membership 5403 * and duplicate frees. 5404 * 5405 */ 5406 static void 5407 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 5408 { 5409 uma_keg_t keg; 5410 int freei; 5411 5412 if (slab == NULL) { 5413 slab = uma_dbg_getslab(zone, item); 5414 if (slab == NULL) 5415 panic("uma: Freed item %p did not belong to zone %s", 5416 item, zone->uz_name); 5417 } 5418 keg = zone->uz_keg; 5419 freei = slab_item_index(slab, keg, item); 5420 5421 if (freei >= keg->uk_ipers) 5422 panic("Invalid free of %p from zone %p(%s) slab %p(%d)", 5423 item, zone, zone->uz_name, slab, freei); 5424 5425 if (slab_item(slab, keg, freei) != item) 5426 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)", 5427 item, zone, zone->uz_name, slab, freei); 5428 5429 if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg))) 5430 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)", 5431 item, zone, zone->uz_name, slab, freei); 5432 5433 BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)); 5434 } 5435 #endif /* INVARIANTS */ 5436 5437 #ifdef DDB 5438 static int64_t 5439 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used, 5440 uint64_t *sleeps, long *cachefree, uint64_t *xdomain) 5441 { 5442 uint64_t frees; 5443 int i; 5444 5445 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 5446 *allocs = counter_u64_fetch(z->uz_allocs); 5447 frees = counter_u64_fetch(z->uz_frees); 5448 *sleeps = z->uz_sleeps; 5449 *cachefree = 0; 5450 *xdomain = 0; 5451 } else 5452 uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps, 5453 xdomain); 5454 for (i = 0; i < vm_ndomains; i++) { 5455 *cachefree += ZDOM_GET(z, i)->uzd_nitems; 5456 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 5457 (LIST_FIRST(&kz->uk_zones) != z))) 5458 *cachefree += kz->uk_domain[i].ud_free_items; 5459 } 5460 *used = *allocs - frees; 5461 return (((int64_t)*used + *cachefree) * kz->uk_size); 5462 } 5463 5464 DB_SHOW_COMMAND(uma, db_show_uma) 5465 { 5466 const char *fmt_hdr, *fmt_entry; 5467 uma_keg_t kz; 5468 uma_zone_t z; 5469 uint64_t allocs, used, sleeps, xdomain; 5470 long cachefree; 5471 /* variables for sorting */ 5472 uma_keg_t cur_keg; 5473 uma_zone_t cur_zone, last_zone; 5474 int64_t cur_size, last_size, size; 5475 int ties; 5476 5477 /* /i option produces machine-parseable CSV output */ 5478 if (modif[0] == 'i') { 5479 fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n"; 5480 fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n"; 5481 } else { 5482 fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n"; 5483 fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n"; 5484 } 5485 5486 db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests", 5487 "Sleeps", "Bucket", "Total Mem", "XFree"); 5488 5489 /* Sort the zones with largest size first. */ 5490 last_zone = NULL; 5491 last_size = INT64_MAX; 5492 for (;;) { 5493 cur_zone = NULL; 5494 cur_size = -1; 5495 ties = 0; 5496 LIST_FOREACH(kz, &uma_kegs, uk_link) { 5497 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 5498 /* 5499 * In the case of size ties, print out zones 5500 * in the order they are encountered. That is, 5501 * when we encounter the most recently output 5502 * zone, we have already printed all preceding 5503 * ties, and we must print all following ties. 5504 */ 5505 if (z == last_zone) { 5506 ties = 1; 5507 continue; 5508 } 5509 size = get_uma_stats(kz, z, &allocs, &used, 5510 &sleeps, &cachefree, &xdomain); 5511 if (size > cur_size && size < last_size + ties) 5512 { 5513 cur_size = size; 5514 cur_zone = z; 5515 cur_keg = kz; 5516 } 5517 } 5518 } 5519 if (cur_zone == NULL) 5520 break; 5521 5522 size = get_uma_stats(cur_keg, cur_zone, &allocs, &used, 5523 &sleeps, &cachefree, &xdomain); 5524 db_printf(fmt_entry, cur_zone->uz_name, 5525 (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree, 5526 (uintmax_t)allocs, (uintmax_t)sleeps, 5527 (unsigned)cur_zone->uz_bucket_size, (intmax_t)size, 5528 xdomain); 5529 5530 if (db_pager_quit) 5531 return; 5532 last_zone = cur_zone; 5533 last_size = cur_size; 5534 } 5535 } 5536 5537 DB_SHOW_COMMAND(umacache, db_show_umacache) 5538 { 5539 uma_zone_t z; 5540 uint64_t allocs, frees; 5541 long cachefree; 5542 int i; 5543 5544 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 5545 "Requests", "Bucket"); 5546 LIST_FOREACH(z, &uma_cachezones, uz_link) { 5547 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL); 5548 for (i = 0; i < vm_ndomains; i++) 5549 cachefree += ZDOM_GET(z, i)->uzd_nitems; 5550 db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", 5551 z->uz_name, (uintmax_t)z->uz_size, 5552 (intmax_t)(allocs - frees), cachefree, 5553 (uintmax_t)allocs, z->uz_bucket_size); 5554 if (db_pager_quit) 5555 return; 5556 } 5557 } 5558 #endif /* DDB */ 5559