1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6 * Copyright (c) 2004-2006 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* 32 * uma_core.c Implementation of the Universal Memory allocator 33 * 34 * This allocator is intended to replace the multitude of similar object caches 35 * in the standard FreeBSD kernel. The intent is to be flexible as well as 36 * efficient. A primary design goal is to return unused memory to the rest of 37 * the system. This will make the system as a whole more flexible due to the 38 * ability to move memory to subsystems which most need it instead of leaving 39 * pools of reserved memory unused. 40 * 41 * The basic ideas stem from similar slab/zone based allocators whose algorithms 42 * are well known. 43 * 44 */ 45 46 /* 47 * TODO: 48 * - Improve memory usage for large allocations 49 * - Investigate cache size adjustments 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_ddb.h" 56 #include "opt_param.h" 57 #include "opt_vm.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bitset.h> 62 #include <sys/eventhandler.h> 63 #include <sys/kernel.h> 64 #include <sys/types.h> 65 #include <sys/limits.h> 66 #include <sys/queue.h> 67 #include <sys/malloc.h> 68 #include <sys/ktr.h> 69 #include <sys/lock.h> 70 #include <sys/sysctl.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/random.h> 74 #include <sys/rwlock.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/smp.h> 78 #include <sys/taskqueue.h> 79 #include <sys/vmmeter.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_param.h> 86 #include <vm/vm_phys.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/uma.h> 91 #include <vm/uma_int.h> 92 #include <vm/uma_dbg.h> 93 94 #include <ddb/ddb.h> 95 96 #ifdef DEBUG_MEMGUARD 97 #include <vm/memguard.h> 98 #endif 99 100 /* 101 * This is the zone and keg from which all zones are spawned. 102 */ 103 static uma_zone_t kegs; 104 static uma_zone_t zones; 105 106 /* This is the zone from which all offpage uma_slab_ts are allocated. */ 107 static uma_zone_t slabzone; 108 109 /* 110 * The initial hash tables come out of this zone so they can be allocated 111 * prior to malloc coming up. 112 */ 113 static uma_zone_t hashzone; 114 115 /* The boot-time adjusted value for cache line alignment. */ 116 int uma_align_cache = 64 - 1; 117 118 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 119 120 /* 121 * Are we allowed to allocate buckets? 122 */ 123 static int bucketdisable = 1; 124 125 /* Linked list of all kegs in the system */ 126 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 127 128 /* Linked list of all cache-only zones in the system */ 129 static LIST_HEAD(,uma_zone) uma_cachezones = 130 LIST_HEAD_INITIALIZER(uma_cachezones); 131 132 /* This RW lock protects the keg list */ 133 static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 134 135 /* 136 * Pointer and counter to pool of pages, that is preallocated at 137 * startup to bootstrap UMA. 138 */ 139 static char *bootmem; 140 static int boot_pages; 141 142 static struct sx uma_drain_lock; 143 144 /* kmem soft limit. */ 145 static unsigned long uma_kmem_limit = LONG_MAX; 146 static volatile unsigned long uma_kmem_total; 147 148 /* Is the VM done starting up? */ 149 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS, 150 BOOT_RUNNING } booted = BOOT_COLD; 151 152 /* 153 * This is the handle used to schedule events that need to happen 154 * outside of the allocation fast path. 155 */ 156 static struct callout uma_callout; 157 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 158 159 /* 160 * This structure is passed as the zone ctor arg so that I don't have to create 161 * a special allocation function just for zones. 162 */ 163 struct uma_zctor_args { 164 const char *name; 165 size_t size; 166 uma_ctor ctor; 167 uma_dtor dtor; 168 uma_init uminit; 169 uma_fini fini; 170 uma_import import; 171 uma_release release; 172 void *arg; 173 uma_keg_t keg; 174 int align; 175 uint32_t flags; 176 }; 177 178 struct uma_kctor_args { 179 uma_zone_t zone; 180 size_t size; 181 uma_init uminit; 182 uma_fini fini; 183 int align; 184 uint32_t flags; 185 }; 186 187 struct uma_bucket_zone { 188 uma_zone_t ubz_zone; 189 char *ubz_name; 190 int ubz_entries; /* Number of items it can hold. */ 191 int ubz_maxsize; /* Maximum allocation size per-item. */ 192 }; 193 194 /* 195 * Compute the actual number of bucket entries to pack them in power 196 * of two sizes for more efficient space utilization. 197 */ 198 #define BUCKET_SIZE(n) \ 199 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 200 201 #define BUCKET_MAX BUCKET_SIZE(256) 202 203 struct uma_bucket_zone bucket_zones[] = { 204 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 205 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 206 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 207 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 208 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 209 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 210 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 211 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 212 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 213 { NULL, NULL, 0} 214 }; 215 216 /* 217 * Flags and enumerations to be passed to internal functions. 218 */ 219 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 220 221 #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ 222 223 /* Prototypes.. */ 224 225 int uma_startup_count(int); 226 void uma_startup(void *, int); 227 void uma_startup1(void); 228 void uma_startup2(void); 229 230 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 231 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 232 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 233 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 234 static void page_free(void *, vm_size_t, uint8_t); 235 static void pcpu_page_free(void *, vm_size_t, uint8_t); 236 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); 237 static void cache_drain(uma_zone_t); 238 static void bucket_drain(uma_zone_t, uma_bucket_t); 239 static void bucket_cache_drain(uma_zone_t zone); 240 static int keg_ctor(void *, int, void *, int); 241 static void keg_dtor(void *, int, void *); 242 static int zone_ctor(void *, int, void *, int); 243 static void zone_dtor(void *, int, void *); 244 static int zero_init(void *, int, int); 245 static void keg_small_init(uma_keg_t keg); 246 static void keg_large_init(uma_keg_t keg); 247 static void zone_foreach(void (*zfunc)(uma_zone_t)); 248 static void zone_timeout(uma_zone_t zone); 249 static int hash_alloc(struct uma_hash *); 250 static int hash_expand(struct uma_hash *, struct uma_hash *); 251 static void hash_free(struct uma_hash *hash); 252 static void uma_timeout(void *); 253 static void uma_startup3(void); 254 static void *zone_alloc_item(uma_zone_t, void *, int, int); 255 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 256 static void bucket_enable(void); 257 static void bucket_init(void); 258 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 259 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 260 static void bucket_zone_drain(void); 261 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); 262 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); 263 static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); 264 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 265 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 266 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 267 uma_fini fini, int align, uint32_t flags); 268 static int zone_import(uma_zone_t, void **, int, int, int); 269 static void zone_release(uma_zone_t, void **, int); 270 static void uma_zero_item(void *, uma_zone_t); 271 272 void uma_print_zone(uma_zone_t); 273 void uma_print_stats(void); 274 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 275 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 276 277 #ifdef INVARIANTS 278 static bool uma_dbg_kskip(uma_keg_t keg, void *mem); 279 static bool uma_dbg_zskip(uma_zone_t zone, void *mem); 280 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 281 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 282 283 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, 284 "Memory allocation debugging"); 285 286 static u_int dbg_divisor = 1; 287 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, 288 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, 289 "Debug & thrash every this item in memory allocator"); 290 291 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; 292 static counter_u64_t uma_skip_cnt = EARLY_COUNTER; 293 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, 294 &uma_dbg_cnt, "memory items debugged"); 295 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, 296 &uma_skip_cnt, "memory items skipped, not debugged"); 297 #endif 298 299 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 300 301 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 302 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 303 304 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 305 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 306 307 static int zone_warnings = 1; 308 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 309 "Warn when UMA zones becomes full"); 310 311 /* Adjust bytes under management by UMA. */ 312 static inline void 313 uma_total_dec(unsigned long size) 314 { 315 316 atomic_subtract_long(&uma_kmem_total, size); 317 } 318 319 static inline void 320 uma_total_inc(unsigned long size) 321 { 322 323 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) 324 uma_reclaim_wakeup(); 325 } 326 327 /* 328 * This routine checks to see whether or not it's safe to enable buckets. 329 */ 330 static void 331 bucket_enable(void) 332 { 333 bucketdisable = vm_page_count_min(); 334 } 335 336 /* 337 * Initialize bucket_zones, the array of zones of buckets of various sizes. 338 * 339 * For each zone, calculate the memory required for each bucket, consisting 340 * of the header and an array of pointers. 341 */ 342 static void 343 bucket_init(void) 344 { 345 struct uma_bucket_zone *ubz; 346 int size; 347 348 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 349 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 350 size += sizeof(void *) * ubz->ubz_entries; 351 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 352 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 353 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); 354 } 355 } 356 357 /* 358 * Given a desired number of entries for a bucket, return the zone from which 359 * to allocate the bucket. 360 */ 361 static struct uma_bucket_zone * 362 bucket_zone_lookup(int entries) 363 { 364 struct uma_bucket_zone *ubz; 365 366 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 367 if (ubz->ubz_entries >= entries) 368 return (ubz); 369 ubz--; 370 return (ubz); 371 } 372 373 static int 374 bucket_select(int size) 375 { 376 struct uma_bucket_zone *ubz; 377 378 ubz = &bucket_zones[0]; 379 if (size > ubz->ubz_maxsize) 380 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 381 382 for (; ubz->ubz_entries != 0; ubz++) 383 if (ubz->ubz_maxsize < size) 384 break; 385 ubz--; 386 return (ubz->ubz_entries); 387 } 388 389 static uma_bucket_t 390 bucket_alloc(uma_zone_t zone, void *udata, int flags) 391 { 392 struct uma_bucket_zone *ubz; 393 uma_bucket_t bucket; 394 395 /* 396 * This is to stop us from allocating per cpu buckets while we're 397 * running out of vm.boot_pages. Otherwise, we would exhaust the 398 * boot pages. This also prevents us from allocating buckets in 399 * low memory situations. 400 */ 401 if (bucketdisable) 402 return (NULL); 403 /* 404 * To limit bucket recursion we store the original zone flags 405 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 406 * NOVM flag to persist even through deep recursions. We also 407 * store ZFLAG_BUCKET once we have recursed attempting to allocate 408 * a bucket for a bucket zone so we do not allow infinite bucket 409 * recursion. This cookie will even persist to frees of unused 410 * buckets via the allocation path or bucket allocations in the 411 * free path. 412 */ 413 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 414 udata = (void *)(uintptr_t)zone->uz_flags; 415 else { 416 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 417 return (NULL); 418 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 419 } 420 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 421 flags |= M_NOVM; 422 ubz = bucket_zone_lookup(zone->uz_count); 423 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 424 ubz++; 425 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 426 if (bucket) { 427 #ifdef INVARIANTS 428 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 429 #endif 430 bucket->ub_cnt = 0; 431 bucket->ub_entries = ubz->ubz_entries; 432 } 433 434 return (bucket); 435 } 436 437 static void 438 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 439 { 440 struct uma_bucket_zone *ubz; 441 442 KASSERT(bucket->ub_cnt == 0, 443 ("bucket_free: Freeing a non free bucket.")); 444 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 445 udata = (void *)(uintptr_t)zone->uz_flags; 446 ubz = bucket_zone_lookup(bucket->ub_entries); 447 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 448 } 449 450 static void 451 bucket_zone_drain(void) 452 { 453 struct uma_bucket_zone *ubz; 454 455 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 456 zone_drain(ubz->ubz_zone); 457 } 458 459 static void 460 zone_log_warning(uma_zone_t zone) 461 { 462 static const struct timeval warninterval = { 300, 0 }; 463 464 if (!zone_warnings || zone->uz_warning == NULL) 465 return; 466 467 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 468 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 469 } 470 471 static inline void 472 zone_maxaction(uma_zone_t zone) 473 { 474 475 if (zone->uz_maxaction.ta_func != NULL) 476 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 477 } 478 479 static void 480 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 481 { 482 uma_klink_t klink; 483 484 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 485 kegfn(klink->kl_keg); 486 } 487 488 /* 489 * Routine called by timeout which is used to fire off some time interval 490 * based calculations. (stats, hash size, etc.) 491 * 492 * Arguments: 493 * arg Unused 494 * 495 * Returns: 496 * Nothing 497 */ 498 static void 499 uma_timeout(void *unused) 500 { 501 bucket_enable(); 502 zone_foreach(zone_timeout); 503 504 /* Reschedule this event */ 505 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 506 } 507 508 /* 509 * Routine to perform timeout driven calculations. This expands the 510 * hashes and does per cpu statistics aggregation. 511 * 512 * Returns nothing. 513 */ 514 static void 515 keg_timeout(uma_keg_t keg) 516 { 517 518 KEG_LOCK(keg); 519 /* 520 * Expand the keg hash table. 521 * 522 * This is done if the number of slabs is larger than the hash size. 523 * What I'm trying to do here is completely reduce collisions. This 524 * may be a little aggressive. Should I allow for two collisions max? 525 */ 526 if (keg->uk_flags & UMA_ZONE_HASH && 527 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 528 struct uma_hash newhash; 529 struct uma_hash oldhash; 530 int ret; 531 532 /* 533 * This is so involved because allocating and freeing 534 * while the keg lock is held will lead to deadlock. 535 * I have to do everything in stages and check for 536 * races. 537 */ 538 newhash = keg->uk_hash; 539 KEG_UNLOCK(keg); 540 ret = hash_alloc(&newhash); 541 KEG_LOCK(keg); 542 if (ret) { 543 if (hash_expand(&keg->uk_hash, &newhash)) { 544 oldhash = keg->uk_hash; 545 keg->uk_hash = newhash; 546 } else 547 oldhash = newhash; 548 549 KEG_UNLOCK(keg); 550 hash_free(&oldhash); 551 return; 552 } 553 } 554 KEG_UNLOCK(keg); 555 } 556 557 static void 558 zone_timeout(uma_zone_t zone) 559 { 560 561 zone_foreach_keg(zone, &keg_timeout); 562 } 563 564 /* 565 * Allocate and zero fill the next sized hash table from the appropriate 566 * backing store. 567 * 568 * Arguments: 569 * hash A new hash structure with the old hash size in uh_hashsize 570 * 571 * Returns: 572 * 1 on success and 0 on failure. 573 */ 574 static int 575 hash_alloc(struct uma_hash *hash) 576 { 577 int oldsize; 578 int alloc; 579 580 oldsize = hash->uh_hashsize; 581 582 /* We're just going to go to a power of two greater */ 583 if (oldsize) { 584 hash->uh_hashsize = oldsize * 2; 585 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 586 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 587 M_UMAHASH, M_NOWAIT); 588 } else { 589 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 590 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 591 UMA_ANYDOMAIN, M_WAITOK); 592 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 593 } 594 if (hash->uh_slab_hash) { 595 bzero(hash->uh_slab_hash, alloc); 596 hash->uh_hashmask = hash->uh_hashsize - 1; 597 return (1); 598 } 599 600 return (0); 601 } 602 603 /* 604 * Expands the hash table for HASH zones. This is done from zone_timeout 605 * to reduce collisions. This must not be done in the regular allocation 606 * path, otherwise, we can recurse on the vm while allocating pages. 607 * 608 * Arguments: 609 * oldhash The hash you want to expand 610 * newhash The hash structure for the new table 611 * 612 * Returns: 613 * Nothing 614 * 615 * Discussion: 616 */ 617 static int 618 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 619 { 620 uma_slab_t slab; 621 int hval; 622 int i; 623 624 if (!newhash->uh_slab_hash) 625 return (0); 626 627 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 628 return (0); 629 630 /* 631 * I need to investigate hash algorithms for resizing without a 632 * full rehash. 633 */ 634 635 for (i = 0; i < oldhash->uh_hashsize; i++) 636 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 637 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 638 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 639 hval = UMA_HASH(newhash, slab->us_data); 640 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 641 slab, us_hlink); 642 } 643 644 return (1); 645 } 646 647 /* 648 * Free the hash bucket to the appropriate backing store. 649 * 650 * Arguments: 651 * slab_hash The hash bucket we're freeing 652 * hashsize The number of entries in that hash bucket 653 * 654 * Returns: 655 * Nothing 656 */ 657 static void 658 hash_free(struct uma_hash *hash) 659 { 660 if (hash->uh_slab_hash == NULL) 661 return; 662 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 663 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 664 else 665 free(hash->uh_slab_hash, M_UMAHASH); 666 } 667 668 /* 669 * Frees all outstanding items in a bucket 670 * 671 * Arguments: 672 * zone The zone to free to, must be unlocked. 673 * bucket The free/alloc bucket with items, cpu queue must be locked. 674 * 675 * Returns: 676 * Nothing 677 */ 678 679 static void 680 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 681 { 682 int i; 683 684 if (bucket == NULL) 685 return; 686 687 if (zone->uz_fini) 688 for (i = 0; i < bucket->ub_cnt; i++) 689 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 690 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 691 bucket->ub_cnt = 0; 692 } 693 694 /* 695 * Drains the per cpu caches for a zone. 696 * 697 * NOTE: This may only be called while the zone is being turn down, and not 698 * during normal operation. This is necessary in order that we do not have 699 * to migrate CPUs to drain the per-CPU caches. 700 * 701 * Arguments: 702 * zone The zone to drain, must be unlocked. 703 * 704 * Returns: 705 * Nothing 706 */ 707 static void 708 cache_drain(uma_zone_t zone) 709 { 710 uma_cache_t cache; 711 int cpu; 712 713 /* 714 * XXX: It is safe to not lock the per-CPU caches, because we're 715 * tearing down the zone anyway. I.e., there will be no further use 716 * of the caches at this point. 717 * 718 * XXX: It would good to be able to assert that the zone is being 719 * torn down to prevent improper use of cache_drain(). 720 * 721 * XXX: We lock the zone before passing into bucket_cache_drain() as 722 * it is used elsewhere. Should the tear-down path be made special 723 * there in some form? 724 */ 725 CPU_FOREACH(cpu) { 726 cache = &zone->uz_cpu[cpu]; 727 bucket_drain(zone, cache->uc_allocbucket); 728 bucket_drain(zone, cache->uc_freebucket); 729 if (cache->uc_allocbucket != NULL) 730 bucket_free(zone, cache->uc_allocbucket, NULL); 731 if (cache->uc_freebucket != NULL) 732 bucket_free(zone, cache->uc_freebucket, NULL); 733 cache->uc_allocbucket = cache->uc_freebucket = NULL; 734 } 735 ZONE_LOCK(zone); 736 bucket_cache_drain(zone); 737 ZONE_UNLOCK(zone); 738 } 739 740 static void 741 cache_shrink(uma_zone_t zone) 742 { 743 744 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 745 return; 746 747 ZONE_LOCK(zone); 748 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 749 ZONE_UNLOCK(zone); 750 } 751 752 static void 753 cache_drain_safe_cpu(uma_zone_t zone) 754 { 755 uma_cache_t cache; 756 uma_bucket_t b1, b2; 757 int domain; 758 759 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 760 return; 761 762 b1 = b2 = NULL; 763 ZONE_LOCK(zone); 764 critical_enter(); 765 if (zone->uz_flags & UMA_ZONE_NUMA) 766 domain = PCPU_GET(domain); 767 else 768 domain = 0; 769 cache = &zone->uz_cpu[curcpu]; 770 if (cache->uc_allocbucket) { 771 if (cache->uc_allocbucket->ub_cnt != 0) 772 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, 773 cache->uc_allocbucket, ub_link); 774 else 775 b1 = cache->uc_allocbucket; 776 cache->uc_allocbucket = NULL; 777 } 778 if (cache->uc_freebucket) { 779 if (cache->uc_freebucket->ub_cnt != 0) 780 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, 781 cache->uc_freebucket, ub_link); 782 else 783 b2 = cache->uc_freebucket; 784 cache->uc_freebucket = NULL; 785 } 786 critical_exit(); 787 ZONE_UNLOCK(zone); 788 if (b1) 789 bucket_free(zone, b1, NULL); 790 if (b2) 791 bucket_free(zone, b2, NULL); 792 } 793 794 /* 795 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 796 * This is an expensive call because it needs to bind to all CPUs 797 * one by one and enter a critical section on each of them in order 798 * to safely access their cache buckets. 799 * Zone lock must not be held on call this function. 800 */ 801 static void 802 cache_drain_safe(uma_zone_t zone) 803 { 804 int cpu; 805 806 /* 807 * Polite bucket sizes shrinking was not enouth, shrink aggressively. 808 */ 809 if (zone) 810 cache_shrink(zone); 811 else 812 zone_foreach(cache_shrink); 813 814 CPU_FOREACH(cpu) { 815 thread_lock(curthread); 816 sched_bind(curthread, cpu); 817 thread_unlock(curthread); 818 819 if (zone) 820 cache_drain_safe_cpu(zone); 821 else 822 zone_foreach(cache_drain_safe_cpu); 823 } 824 thread_lock(curthread); 825 sched_unbind(curthread); 826 thread_unlock(curthread); 827 } 828 829 /* 830 * Drain the cached buckets from a zone. Expects a locked zone on entry. 831 */ 832 static void 833 bucket_cache_drain(uma_zone_t zone) 834 { 835 uma_zone_domain_t zdom; 836 uma_bucket_t bucket; 837 int i; 838 839 /* 840 * Drain the bucket queues and free the buckets. 841 */ 842 for (i = 0; i < vm_ndomains; i++) { 843 zdom = &zone->uz_domain[i]; 844 while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { 845 LIST_REMOVE(bucket, ub_link); 846 ZONE_UNLOCK(zone); 847 bucket_drain(zone, bucket); 848 bucket_free(zone, bucket, NULL); 849 ZONE_LOCK(zone); 850 } 851 } 852 853 /* 854 * Shrink further bucket sizes. Price of single zone lock collision 855 * is probably lower then price of global cache drain. 856 */ 857 if (zone->uz_count > zone->uz_count_min) 858 zone->uz_count--; 859 } 860 861 static void 862 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 863 { 864 uint8_t *mem; 865 int i; 866 uint8_t flags; 867 868 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 869 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 870 871 mem = slab->us_data; 872 flags = slab->us_flags; 873 i = start; 874 if (keg->uk_fini != NULL) { 875 for (i--; i > -1; i--) 876 #ifdef INVARIANTS 877 /* 878 * trash_fini implies that dtor was trash_dtor. trash_fini 879 * would check that memory hasn't been modified since free, 880 * which executed trash_dtor. 881 * That's why we need to run uma_dbg_kskip() check here, 882 * albeit we don't make skip check for other init/fini 883 * invocations. 884 */ 885 if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) || 886 keg->uk_fini != trash_fini) 887 #endif 888 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 889 keg->uk_size); 890 } 891 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 892 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 893 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 894 uma_total_dec(PAGE_SIZE * keg->uk_ppera); 895 } 896 897 /* 898 * Frees pages from a keg back to the system. This is done on demand from 899 * the pageout daemon. 900 * 901 * Returns nothing. 902 */ 903 static void 904 keg_drain(uma_keg_t keg) 905 { 906 struct slabhead freeslabs = { 0 }; 907 uma_domain_t dom; 908 uma_slab_t slab, tmp; 909 int i; 910 911 /* 912 * We don't want to take pages from statically allocated kegs at this 913 * time 914 */ 915 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 916 return; 917 918 CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", 919 keg->uk_name, keg, keg->uk_free); 920 KEG_LOCK(keg); 921 if (keg->uk_free == 0) 922 goto finished; 923 924 for (i = 0; i < vm_ndomains; i++) { 925 dom = &keg->uk_domain[i]; 926 LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { 927 /* We have nowhere to free these to. */ 928 if (slab->us_flags & UMA_SLAB_BOOT) 929 continue; 930 931 LIST_REMOVE(slab, us_link); 932 keg->uk_pages -= keg->uk_ppera; 933 keg->uk_free -= keg->uk_ipers; 934 935 if (keg->uk_flags & UMA_ZONE_HASH) 936 UMA_HASH_REMOVE(&keg->uk_hash, slab, 937 slab->us_data); 938 939 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 940 } 941 } 942 943 finished: 944 KEG_UNLOCK(keg); 945 946 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 947 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 948 keg_free_slab(keg, slab, keg->uk_ipers); 949 } 950 } 951 952 static void 953 zone_drain_wait(uma_zone_t zone, int waitok) 954 { 955 956 /* 957 * Set draining to interlock with zone_dtor() so we can release our 958 * locks as we go. Only dtor() should do a WAITOK call since it 959 * is the only call that knows the structure will still be available 960 * when it wakes up. 961 */ 962 ZONE_LOCK(zone); 963 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 964 if (waitok == M_NOWAIT) 965 goto out; 966 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 967 } 968 zone->uz_flags |= UMA_ZFLAG_DRAINING; 969 bucket_cache_drain(zone); 970 ZONE_UNLOCK(zone); 971 /* 972 * The DRAINING flag protects us from being freed while 973 * we're running. Normally the uma_rwlock would protect us but we 974 * must be able to release and acquire the right lock for each keg. 975 */ 976 zone_foreach_keg(zone, &keg_drain); 977 ZONE_LOCK(zone); 978 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 979 wakeup(zone); 980 out: 981 ZONE_UNLOCK(zone); 982 } 983 984 void 985 zone_drain(uma_zone_t zone) 986 { 987 988 zone_drain_wait(zone, M_NOWAIT); 989 } 990 991 /* 992 * Allocate a new slab for a keg. This does not insert the slab onto a list. 993 * 994 * Arguments: 995 * wait Shall we wait? 996 * 997 * Returns: 998 * The slab that was allocated or NULL if there is no memory and the 999 * caller specified M_NOWAIT. 1000 */ 1001 static uma_slab_t 1002 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) 1003 { 1004 uma_alloc allocf; 1005 uma_slab_t slab; 1006 unsigned long size; 1007 uint8_t *mem; 1008 uint8_t flags; 1009 int i; 1010 1011 KASSERT(domain >= 0 && domain < vm_ndomains, 1012 ("keg_alloc_slab: domain %d out of range", domain)); 1013 mtx_assert(&keg->uk_lock, MA_OWNED); 1014 slab = NULL; 1015 mem = NULL; 1016 1017 allocf = keg->uk_allocf; 1018 KEG_UNLOCK(keg); 1019 size = keg->uk_ppera * PAGE_SIZE; 1020 1021 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1022 slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); 1023 if (slab == NULL) 1024 goto out; 1025 } 1026 1027 /* 1028 * This reproduces the old vm_zone behavior of zero filling pages the 1029 * first time they are added to a zone. 1030 * 1031 * Malloced items are zeroed in uma_zalloc. 1032 */ 1033 1034 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1035 wait |= M_ZERO; 1036 else 1037 wait &= ~M_ZERO; 1038 1039 if (keg->uk_flags & UMA_ZONE_NODUMP) 1040 wait |= M_NODUMP; 1041 1042 /* zone is passed for legacy reasons. */ 1043 mem = allocf(zone, size, domain, &flags, wait); 1044 if (mem == NULL) { 1045 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1046 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 1047 slab = NULL; 1048 goto out; 1049 } 1050 uma_total_inc(size); 1051 1052 /* Point the slab into the allocated memory */ 1053 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 1054 slab = (uma_slab_t )(mem + keg->uk_pgoff); 1055 1056 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 1057 for (i = 0; i < keg->uk_ppera; i++) 1058 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 1059 1060 slab->us_keg = keg; 1061 slab->us_data = mem; 1062 slab->us_freecount = keg->uk_ipers; 1063 slab->us_flags = flags; 1064 slab->us_domain = domain; 1065 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 1066 #ifdef INVARIANTS 1067 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1068 #endif 1069 1070 if (keg->uk_init != NULL) { 1071 for (i = 0; i < keg->uk_ipers; i++) 1072 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1073 keg->uk_size, wait) != 0) 1074 break; 1075 if (i != keg->uk_ipers) { 1076 keg_free_slab(keg, slab, i); 1077 slab = NULL; 1078 goto out; 1079 } 1080 } 1081 out: 1082 KEG_LOCK(keg); 1083 1084 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1085 slab, keg->uk_name, keg); 1086 1087 if (slab != NULL) { 1088 if (keg->uk_flags & UMA_ZONE_HASH) 1089 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1090 1091 keg->uk_pages += keg->uk_ppera; 1092 keg->uk_free += keg->uk_ipers; 1093 } 1094 1095 return (slab); 1096 } 1097 1098 /* 1099 * This function is intended to be used early on in place of page_alloc() so 1100 * that we may use the boot time page cache to satisfy allocations before 1101 * the VM is ready. 1102 */ 1103 static void * 1104 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1105 int wait) 1106 { 1107 uma_keg_t keg; 1108 void *mem; 1109 int pages; 1110 1111 keg = zone_first_keg(zone); 1112 1113 /* 1114 * If we are in BOOT_BUCKETS or higher, than switch to real 1115 * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. 1116 */ 1117 switch (booted) { 1118 case BOOT_COLD: 1119 case BOOT_STRAPPED: 1120 break; 1121 case BOOT_PAGEALLOC: 1122 if (keg->uk_ppera > 1) 1123 break; 1124 case BOOT_BUCKETS: 1125 case BOOT_RUNNING: 1126 #ifdef UMA_MD_SMALL_ALLOC 1127 keg->uk_allocf = (keg->uk_ppera > 1) ? 1128 page_alloc : uma_small_alloc; 1129 #else 1130 keg->uk_allocf = page_alloc; 1131 #endif 1132 return keg->uk_allocf(zone, bytes, domain, pflag, wait); 1133 } 1134 1135 /* 1136 * Check our small startup cache to see if it has pages remaining. 1137 */ 1138 pages = howmany(bytes, PAGE_SIZE); 1139 KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); 1140 if (pages > boot_pages) 1141 panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name); 1142 #ifdef DIAGNOSTIC 1143 printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name, 1144 boot_pages); 1145 #endif 1146 mem = bootmem; 1147 boot_pages -= pages; 1148 bootmem += pages * PAGE_SIZE; 1149 *pflag = UMA_SLAB_BOOT; 1150 1151 return (mem); 1152 } 1153 1154 /* 1155 * Allocates a number of pages from the system 1156 * 1157 * Arguments: 1158 * bytes The number of bytes requested 1159 * wait Shall we wait? 1160 * 1161 * Returns: 1162 * A pointer to the alloced memory or possibly 1163 * NULL if M_NOWAIT is set. 1164 */ 1165 static void * 1166 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1167 int wait) 1168 { 1169 void *p; /* Returned page */ 1170 1171 *pflag = UMA_SLAB_KERNEL; 1172 p = (void *) kmem_malloc_domain(kernel_arena, domain, bytes, wait); 1173 1174 return (p); 1175 } 1176 1177 static void * 1178 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1179 int wait) 1180 { 1181 struct pglist alloctail; 1182 vm_offset_t addr, zkva; 1183 int cpu, flags; 1184 vm_page_t p, p_next; 1185 #ifdef NUMA 1186 struct pcpu *pc; 1187 #endif 1188 1189 TAILQ_INIT(&alloctail); 1190 MPASS(bytes == (mp_maxid+1)*PAGE_SIZE); 1191 *pflag = UMA_SLAB_KERNEL; 1192 1193 flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1194 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1195 VM_ALLOC_NOWAIT); 1196 for (cpu = 0; cpu <= mp_maxid; cpu++) { 1197 if (CPU_ABSENT(cpu)) { 1198 p = vm_page_alloc(NULL, 0, flags); 1199 } else { 1200 #ifndef NUMA 1201 p = vm_page_alloc(NULL, 0, flags); 1202 #else 1203 pc = pcpu_find(cpu); 1204 p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags); 1205 if (__predict_false(p == NULL)) 1206 p = vm_page_alloc(NULL, 0, flags); 1207 #endif 1208 } 1209 if (__predict_false(p == NULL)) 1210 goto fail; 1211 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1212 } 1213 if ((addr = kva_alloc(bytes)) == 0) 1214 goto fail; 1215 zkva = addr; 1216 TAILQ_FOREACH(p, &alloctail, listq) { 1217 pmap_qenter(zkva, &p, 1); 1218 zkva += PAGE_SIZE; 1219 } 1220 return ((void*)addr); 1221 fail: 1222 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1223 vm_page_unwire(p, PQ_NONE); 1224 vm_page_free(p); 1225 } 1226 return (NULL); 1227 } 1228 1229 /* 1230 * Allocates a number of pages from within an object 1231 * 1232 * Arguments: 1233 * bytes The number of bytes requested 1234 * wait Shall we wait? 1235 * 1236 * Returns: 1237 * A pointer to the alloced memory or possibly 1238 * NULL if M_NOWAIT is set. 1239 */ 1240 static void * 1241 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 1242 int wait) 1243 { 1244 TAILQ_HEAD(, vm_page) alloctail; 1245 u_long npages; 1246 vm_offset_t retkva, zkva; 1247 vm_page_t p, p_next; 1248 uma_keg_t keg; 1249 1250 TAILQ_INIT(&alloctail); 1251 keg = zone_first_keg(zone); 1252 1253 npages = howmany(bytes, PAGE_SIZE); 1254 while (npages > 0) { 1255 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | 1256 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1257 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1258 VM_ALLOC_NOWAIT)); 1259 if (p != NULL) { 1260 /* 1261 * Since the page does not belong to an object, its 1262 * listq is unused. 1263 */ 1264 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1265 npages--; 1266 continue; 1267 } 1268 /* 1269 * Page allocation failed, free intermediate pages and 1270 * exit. 1271 */ 1272 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1273 vm_page_unwire(p, PQ_NONE); 1274 vm_page_free(p); 1275 } 1276 return (NULL); 1277 } 1278 *flags = UMA_SLAB_PRIV; 1279 zkva = keg->uk_kva + 1280 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1281 retkva = zkva; 1282 TAILQ_FOREACH(p, &alloctail, listq) { 1283 pmap_qenter(zkva, &p, 1); 1284 zkva += PAGE_SIZE; 1285 } 1286 1287 return ((void *)retkva); 1288 } 1289 1290 /* 1291 * Frees a number of pages to the system 1292 * 1293 * Arguments: 1294 * mem A pointer to the memory to be freed 1295 * size The size of the memory being freed 1296 * flags The original p->us_flags field 1297 * 1298 * Returns: 1299 * Nothing 1300 */ 1301 static void 1302 page_free(void *mem, vm_size_t size, uint8_t flags) 1303 { 1304 struct vmem *vmem; 1305 1306 if (flags & UMA_SLAB_KERNEL) 1307 vmem = kernel_arena; 1308 else 1309 panic("UMA: page_free used with invalid flags %x", flags); 1310 1311 kmem_free(vmem, (vm_offset_t)mem, size); 1312 } 1313 1314 /* 1315 * Frees pcpu zone allocations 1316 * 1317 * Arguments: 1318 * mem A pointer to the memory to be freed 1319 * size The size of the memory being freed 1320 * flags The original p->us_flags field 1321 * 1322 * Returns: 1323 * Nothing 1324 */ 1325 static void 1326 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) 1327 { 1328 vm_offset_t sva, curva; 1329 vm_paddr_t paddr; 1330 vm_page_t m; 1331 1332 MPASS(size == (mp_maxid+1)*PAGE_SIZE); 1333 sva = (vm_offset_t)mem; 1334 for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { 1335 paddr = pmap_kextract(curva); 1336 m = PHYS_TO_VM_PAGE(paddr); 1337 vm_page_unwire(m, PQ_NONE); 1338 vm_page_free(m); 1339 } 1340 pmap_qremove(sva, size >> PAGE_SHIFT); 1341 kva_free(sva, size); 1342 } 1343 1344 1345 /* 1346 * Zero fill initializer 1347 * 1348 * Arguments/Returns follow uma_init specifications 1349 */ 1350 static int 1351 zero_init(void *mem, int size, int flags) 1352 { 1353 bzero(mem, size); 1354 return (0); 1355 } 1356 1357 /* 1358 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1359 * 1360 * Arguments 1361 * keg The zone we should initialize 1362 * 1363 * Returns 1364 * Nothing 1365 */ 1366 static void 1367 keg_small_init(uma_keg_t keg) 1368 { 1369 u_int rsize; 1370 u_int memused; 1371 u_int wastedspace; 1372 u_int shsize; 1373 u_int slabsize; 1374 1375 if (keg->uk_flags & UMA_ZONE_PCPU) { 1376 u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; 1377 1378 slabsize = UMA_PCPU_ALLOC_SIZE; 1379 keg->uk_ppera = ncpus; 1380 } else { 1381 slabsize = UMA_SLAB_SIZE; 1382 keg->uk_ppera = 1; 1383 } 1384 1385 /* 1386 * Calculate the size of each allocation (rsize) according to 1387 * alignment. If the requested size is smaller than we have 1388 * allocation bits for we round it up. 1389 */ 1390 rsize = keg->uk_size; 1391 if (rsize < slabsize / SLAB_SETSIZE) 1392 rsize = slabsize / SLAB_SETSIZE; 1393 if (rsize & keg->uk_align) 1394 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1395 keg->uk_rsize = rsize; 1396 1397 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1398 keg->uk_rsize < UMA_PCPU_ALLOC_SIZE, 1399 ("%s: size %u too large", __func__, keg->uk_rsize)); 1400 1401 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1402 shsize = 0; 1403 else 1404 shsize = sizeof(struct uma_slab); 1405 1406 if (rsize <= slabsize - shsize) 1407 keg->uk_ipers = (slabsize - shsize) / rsize; 1408 else { 1409 /* Handle special case when we have 1 item per slab, so 1410 * alignment requirement can be relaxed. */ 1411 KASSERT(keg->uk_size <= slabsize - shsize, 1412 ("%s: size %u greater than slab", __func__, keg->uk_size)); 1413 keg->uk_ipers = 1; 1414 } 1415 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1416 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1417 1418 memused = keg->uk_ipers * rsize + shsize; 1419 wastedspace = slabsize - memused; 1420 1421 /* 1422 * We can't do OFFPAGE if we're internal or if we've been 1423 * asked to not go to the VM for buckets. If we do this we 1424 * may end up going to the VM for slabs which we do not 1425 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1426 * of UMA_ZONE_VM, which clearly forbids it. 1427 */ 1428 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1429 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1430 return; 1431 1432 /* 1433 * See if using an OFFPAGE slab will limit our waste. Only do 1434 * this if it permits more items per-slab. 1435 * 1436 * XXX We could try growing slabsize to limit max waste as well. 1437 * Historically this was not done because the VM could not 1438 * efficiently handle contiguous allocations. 1439 */ 1440 if ((wastedspace >= slabsize / UMA_MAX_WASTE) && 1441 (keg->uk_ipers < (slabsize / keg->uk_rsize))) { 1442 keg->uk_ipers = slabsize / keg->uk_rsize; 1443 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1444 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1445 CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " 1446 "keg: %s(%p), calculated wastedspace = %d, " 1447 "maximum wasted space allowed = %d, " 1448 "calculated ipers = %d, " 1449 "new wasted space = %d\n", keg->uk_name, keg, wastedspace, 1450 slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1451 slabsize - keg->uk_ipers * keg->uk_rsize); 1452 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1453 } 1454 1455 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1456 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1457 keg->uk_flags |= UMA_ZONE_HASH; 1458 } 1459 1460 /* 1461 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1462 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1463 * more complicated. 1464 * 1465 * Arguments 1466 * keg The keg we should initialize 1467 * 1468 * Returns 1469 * Nothing 1470 */ 1471 static void 1472 keg_large_init(uma_keg_t keg) 1473 { 1474 u_int shsize; 1475 1476 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1477 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1478 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1479 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1480 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1481 1482 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1483 keg->uk_ipers = 1; 1484 keg->uk_rsize = keg->uk_size; 1485 1486 /* Check whether we have enough space to not do OFFPAGE. */ 1487 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1488 shsize = sizeof(struct uma_slab); 1489 if (shsize & UMA_ALIGN_PTR) 1490 shsize = (shsize & ~UMA_ALIGN_PTR) + 1491 (UMA_ALIGN_PTR + 1); 1492 1493 if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { 1494 /* 1495 * We can't do OFFPAGE if we're internal, in which case 1496 * we need an extra page per allocation to contain the 1497 * slab header. 1498 */ 1499 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) 1500 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1501 else 1502 keg->uk_ppera++; 1503 } 1504 } 1505 1506 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1507 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1508 keg->uk_flags |= UMA_ZONE_HASH; 1509 } 1510 1511 static void 1512 keg_cachespread_init(uma_keg_t keg) 1513 { 1514 int alignsize; 1515 int trailer; 1516 int pages; 1517 int rsize; 1518 1519 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1520 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1521 1522 alignsize = keg->uk_align + 1; 1523 rsize = keg->uk_size; 1524 /* 1525 * We want one item to start on every align boundary in a page. To 1526 * do this we will span pages. We will also extend the item by the 1527 * size of align if it is an even multiple of align. Otherwise, it 1528 * would fall on the same boundary every time. 1529 */ 1530 if (rsize & keg->uk_align) 1531 rsize = (rsize & ~keg->uk_align) + alignsize; 1532 if ((rsize & alignsize) == 0) 1533 rsize += alignsize; 1534 trailer = rsize - keg->uk_size; 1535 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1536 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1537 keg->uk_rsize = rsize; 1538 keg->uk_ppera = pages; 1539 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1540 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1541 KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 1542 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1543 keg->uk_ipers)); 1544 } 1545 1546 /* 1547 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1548 * the keg onto the global keg list. 1549 * 1550 * Arguments/Returns follow uma_ctor specifications 1551 * udata Actually uma_kctor_args 1552 */ 1553 static int 1554 keg_ctor(void *mem, int size, void *udata, int flags) 1555 { 1556 struct uma_kctor_args *arg = udata; 1557 uma_keg_t keg = mem; 1558 uma_zone_t zone; 1559 1560 bzero(keg, size); 1561 keg->uk_size = arg->size; 1562 keg->uk_init = arg->uminit; 1563 keg->uk_fini = arg->fini; 1564 keg->uk_align = arg->align; 1565 keg->uk_cursor = 0; 1566 keg->uk_free = 0; 1567 keg->uk_reserve = 0; 1568 keg->uk_pages = 0; 1569 keg->uk_flags = arg->flags; 1570 keg->uk_slabzone = NULL; 1571 1572 /* 1573 * The master zone is passed to us at keg-creation time. 1574 */ 1575 zone = arg->zone; 1576 keg->uk_name = zone->uz_name; 1577 1578 if (arg->flags & UMA_ZONE_VM) 1579 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1580 1581 if (arg->flags & UMA_ZONE_ZINIT) 1582 keg->uk_init = zero_init; 1583 1584 if (arg->flags & UMA_ZONE_MALLOC) 1585 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1586 1587 if (arg->flags & UMA_ZONE_PCPU) 1588 #ifdef SMP 1589 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1590 #else 1591 keg->uk_flags &= ~UMA_ZONE_PCPU; 1592 #endif 1593 1594 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1595 keg_cachespread_init(keg); 1596 } else { 1597 if (keg->uk_size > UMA_SLAB_SPACE) 1598 keg_large_init(keg); 1599 else 1600 keg_small_init(keg); 1601 } 1602 1603 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1604 keg->uk_slabzone = slabzone; 1605 1606 /* 1607 * If we haven't booted yet we need allocations to go through the 1608 * startup cache until the vm is ready. 1609 */ 1610 if (booted < BOOT_PAGEALLOC) 1611 keg->uk_allocf = startup_alloc; 1612 #ifdef UMA_MD_SMALL_ALLOC 1613 else if (keg->uk_ppera == 1) 1614 keg->uk_allocf = uma_small_alloc; 1615 #endif 1616 else if (keg->uk_flags & UMA_ZONE_PCPU) 1617 keg->uk_allocf = pcpu_page_alloc; 1618 else 1619 keg->uk_allocf = page_alloc; 1620 #ifdef UMA_MD_SMALL_ALLOC 1621 if (keg->uk_ppera == 1) 1622 keg->uk_freef = uma_small_free; 1623 else 1624 #endif 1625 if (keg->uk_flags & UMA_ZONE_PCPU) 1626 keg->uk_freef = pcpu_page_free; 1627 else 1628 keg->uk_freef = page_free; 1629 1630 /* 1631 * Initialize keg's lock 1632 */ 1633 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1634 1635 /* 1636 * If we're putting the slab header in the actual page we need to 1637 * figure out where in each page it goes. This calculates a right 1638 * justified offset into the memory on an ALIGN_PTR boundary. 1639 */ 1640 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1641 u_int totsize; 1642 1643 /* Size of the slab struct and free list */ 1644 totsize = sizeof(struct uma_slab); 1645 1646 if (totsize & UMA_ALIGN_PTR) 1647 totsize = (totsize & ~UMA_ALIGN_PTR) + 1648 (UMA_ALIGN_PTR + 1); 1649 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1650 1651 /* 1652 * The only way the following is possible is if with our 1653 * UMA_ALIGN_PTR adjustments we are now bigger than 1654 * UMA_SLAB_SIZE. I haven't checked whether this is 1655 * mathematically possible for all cases, so we make 1656 * sure here anyway. 1657 */ 1658 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1659 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1660 printf("zone %s ipers %d rsize %d size %d\n", 1661 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1662 keg->uk_size); 1663 panic("UMA slab won't fit."); 1664 } 1665 } 1666 1667 if (keg->uk_flags & UMA_ZONE_HASH) 1668 hash_alloc(&keg->uk_hash); 1669 1670 CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", 1671 keg, zone->uz_name, zone, 1672 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 1673 keg->uk_free); 1674 1675 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1676 1677 rw_wlock(&uma_rwlock); 1678 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1679 rw_wunlock(&uma_rwlock); 1680 return (0); 1681 } 1682 1683 /* 1684 * Zone header ctor. This initializes all fields, locks, etc. 1685 * 1686 * Arguments/Returns follow uma_ctor specifications 1687 * udata Actually uma_zctor_args 1688 */ 1689 static int 1690 zone_ctor(void *mem, int size, void *udata, int flags) 1691 { 1692 struct uma_zctor_args *arg = udata; 1693 uma_zone_t zone = mem; 1694 uma_zone_t z; 1695 uma_keg_t keg; 1696 1697 bzero(zone, size); 1698 zone->uz_name = arg->name; 1699 zone->uz_ctor = arg->ctor; 1700 zone->uz_dtor = arg->dtor; 1701 zone->uz_slab = zone_fetch_slab; 1702 zone->uz_init = NULL; 1703 zone->uz_fini = NULL; 1704 zone->uz_allocs = 0; 1705 zone->uz_frees = 0; 1706 zone->uz_fails = 0; 1707 zone->uz_sleeps = 0; 1708 zone->uz_count = 0; 1709 zone->uz_count_min = 0; 1710 zone->uz_flags = 0; 1711 zone->uz_warning = NULL; 1712 /* The domain structures follow the cpu structures. */ 1713 zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; 1714 timevalclear(&zone->uz_ratecheck); 1715 keg = arg->keg; 1716 1717 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1718 1719 /* 1720 * This is a pure cache zone, no kegs. 1721 */ 1722 if (arg->import) { 1723 if (arg->flags & UMA_ZONE_VM) 1724 arg->flags |= UMA_ZFLAG_CACHEONLY; 1725 zone->uz_flags = arg->flags; 1726 zone->uz_size = arg->size; 1727 zone->uz_import = arg->import; 1728 zone->uz_release = arg->release; 1729 zone->uz_arg = arg->arg; 1730 zone->uz_lockptr = &zone->uz_lock; 1731 rw_wlock(&uma_rwlock); 1732 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1733 rw_wunlock(&uma_rwlock); 1734 goto out; 1735 } 1736 1737 /* 1738 * Use the regular zone/keg/slab allocator. 1739 */ 1740 zone->uz_import = (uma_import)zone_import; 1741 zone->uz_release = (uma_release)zone_release; 1742 zone->uz_arg = zone; 1743 1744 if (arg->flags & UMA_ZONE_SECONDARY) { 1745 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1746 zone->uz_init = arg->uminit; 1747 zone->uz_fini = arg->fini; 1748 zone->uz_lockptr = &keg->uk_lock; 1749 zone->uz_flags |= UMA_ZONE_SECONDARY; 1750 rw_wlock(&uma_rwlock); 1751 ZONE_LOCK(zone); 1752 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1753 if (LIST_NEXT(z, uz_link) == NULL) { 1754 LIST_INSERT_AFTER(z, zone, uz_link); 1755 break; 1756 } 1757 } 1758 ZONE_UNLOCK(zone); 1759 rw_wunlock(&uma_rwlock); 1760 } else if (keg == NULL) { 1761 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1762 arg->align, arg->flags)) == NULL) 1763 return (ENOMEM); 1764 } else { 1765 struct uma_kctor_args karg; 1766 int error; 1767 1768 /* We should only be here from uma_startup() */ 1769 karg.size = arg->size; 1770 karg.uminit = arg->uminit; 1771 karg.fini = arg->fini; 1772 karg.align = arg->align; 1773 karg.flags = arg->flags; 1774 karg.zone = zone; 1775 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1776 flags); 1777 if (error) 1778 return (error); 1779 } 1780 1781 /* 1782 * Link in the first keg. 1783 */ 1784 zone->uz_klink.kl_keg = keg; 1785 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1786 zone->uz_lockptr = &keg->uk_lock; 1787 zone->uz_size = keg->uk_size; 1788 zone->uz_flags |= (keg->uk_flags & 1789 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1790 1791 /* 1792 * Some internal zones don't have room allocated for the per cpu 1793 * caches. If we're internal, bail out here. 1794 */ 1795 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1796 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1797 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1798 return (0); 1799 } 1800 1801 out: 1802 KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != 1803 (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), 1804 ("Invalid zone flag combination")); 1805 if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) 1806 zone->uz_count = BUCKET_MAX; 1807 else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) 1808 zone->uz_count = 0; 1809 else 1810 zone->uz_count = bucket_select(zone->uz_size); 1811 zone->uz_count_min = zone->uz_count; 1812 1813 return (0); 1814 } 1815 1816 /* 1817 * Keg header dtor. This frees all data, destroys locks, frees the hash 1818 * table and removes the keg from the global list. 1819 * 1820 * Arguments/Returns follow uma_dtor specifications 1821 * udata unused 1822 */ 1823 static void 1824 keg_dtor(void *arg, int size, void *udata) 1825 { 1826 uma_keg_t keg; 1827 1828 keg = (uma_keg_t)arg; 1829 KEG_LOCK(keg); 1830 if (keg->uk_free != 0) { 1831 printf("Freed UMA keg (%s) was not empty (%d items). " 1832 " Lost %d pages of memory.\n", 1833 keg->uk_name ? keg->uk_name : "", 1834 keg->uk_free, keg->uk_pages); 1835 } 1836 KEG_UNLOCK(keg); 1837 1838 hash_free(&keg->uk_hash); 1839 1840 KEG_LOCK_FINI(keg); 1841 } 1842 1843 /* 1844 * Zone header dtor. 1845 * 1846 * Arguments/Returns follow uma_dtor specifications 1847 * udata unused 1848 */ 1849 static void 1850 zone_dtor(void *arg, int size, void *udata) 1851 { 1852 uma_klink_t klink; 1853 uma_zone_t zone; 1854 uma_keg_t keg; 1855 1856 zone = (uma_zone_t)arg; 1857 keg = zone_first_keg(zone); 1858 1859 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1860 cache_drain(zone); 1861 1862 rw_wlock(&uma_rwlock); 1863 LIST_REMOVE(zone, uz_link); 1864 rw_wunlock(&uma_rwlock); 1865 /* 1866 * XXX there are some races here where 1867 * the zone can be drained but zone lock 1868 * released and then refilled before we 1869 * remove it... we dont care for now 1870 */ 1871 zone_drain_wait(zone, M_WAITOK); 1872 /* 1873 * Unlink all of our kegs. 1874 */ 1875 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1876 klink->kl_keg = NULL; 1877 LIST_REMOVE(klink, kl_link); 1878 if (klink == &zone->uz_klink) 1879 continue; 1880 free(klink, M_TEMP); 1881 } 1882 /* 1883 * We only destroy kegs from non secondary zones. 1884 */ 1885 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1886 rw_wlock(&uma_rwlock); 1887 LIST_REMOVE(keg, uk_link); 1888 rw_wunlock(&uma_rwlock); 1889 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1890 } 1891 ZONE_LOCK_FINI(zone); 1892 } 1893 1894 /* 1895 * Traverses every zone in the system and calls a callback 1896 * 1897 * Arguments: 1898 * zfunc A pointer to a function which accepts a zone 1899 * as an argument. 1900 * 1901 * Returns: 1902 * Nothing 1903 */ 1904 static void 1905 zone_foreach(void (*zfunc)(uma_zone_t)) 1906 { 1907 uma_keg_t keg; 1908 uma_zone_t zone; 1909 1910 rw_rlock(&uma_rwlock); 1911 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1912 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1913 zfunc(zone); 1914 } 1915 rw_runlock(&uma_rwlock); 1916 } 1917 1918 /* 1919 * Count how many pages do we need to bootstrap. VM supplies 1920 * its need in early zones in the argument, we add up our zones, 1921 * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The 1922 * zone of zones and zone of kegs are accounted separately. 1923 */ 1924 #define UMA_BOOT_ZONES 11 1925 /* Zone of zones and zone of kegs have arbitrary alignment. */ 1926 #define UMA_BOOT_ALIGN 32 1927 static int zsize, ksize; 1928 int 1929 uma_startup_count(int vm_zones) 1930 { 1931 int zones, pages; 1932 1933 ksize = sizeof(struct uma_keg) + 1934 (sizeof(struct uma_domain) * vm_ndomains); 1935 zsize = sizeof(struct uma_zone) + 1936 (sizeof(struct uma_cache) * (mp_maxid + 1)) + 1937 (sizeof(struct uma_zone_domain) * vm_ndomains); 1938 1939 /* 1940 * Memory for the zone of kegs and its keg, 1941 * and for zone of zones. 1942 */ 1943 pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 + 1944 roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE); 1945 1946 #ifdef UMA_MD_SMALL_ALLOC 1947 zones = UMA_BOOT_ZONES; 1948 #else 1949 zones = UMA_BOOT_ZONES + vm_zones; 1950 vm_zones = 0; 1951 #endif 1952 1953 /* Memory for the rest of startup zones, UMA and VM, ... */ 1954 if (zsize > UMA_SLAB_SPACE) 1955 pages += (zones + vm_zones) * 1956 howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE); 1957 else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE) 1958 pages += zones; 1959 else 1960 pages += howmany(zones, 1961 UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN)); 1962 1963 /* ... and their kegs. Note that zone of zones allocates a keg! */ 1964 pages += howmany(zones + 1, 1965 UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN)); 1966 1967 /* 1968 * Most of startup zones are not going to be offpages, that's 1969 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all 1970 * calculations. Some large bucket zones will be offpage, and 1971 * thus will allocate hashes. We take conservative approach 1972 * and assume that all zones may allocate hash. This may give 1973 * us some positive inaccuracy, usually an extra single page. 1974 */ 1975 pages += howmany(zones, UMA_SLAB_SPACE / 1976 (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT)); 1977 1978 return (pages); 1979 } 1980 1981 void 1982 uma_startup(void *mem, int npages) 1983 { 1984 struct uma_zctor_args args; 1985 uma_keg_t masterkeg; 1986 uintptr_t m; 1987 1988 #ifdef DIAGNOSTIC 1989 printf("Entering %s with %d boot pages configured\n", __func__, npages); 1990 #endif 1991 1992 rw_init(&uma_rwlock, "UMA lock"); 1993 1994 /* Use bootpages memory for the zone of zones and zone of kegs. */ 1995 m = (uintptr_t)mem; 1996 zones = (uma_zone_t)m; 1997 m += roundup(zsize, CACHE_LINE_SIZE); 1998 kegs = (uma_zone_t)m; 1999 m += roundup(zsize, CACHE_LINE_SIZE); 2000 masterkeg = (uma_keg_t)m; 2001 m += roundup(ksize, CACHE_LINE_SIZE); 2002 m = roundup(m, PAGE_SIZE); 2003 npages -= (m - (uintptr_t)mem) / PAGE_SIZE; 2004 mem = (void *)m; 2005 2006 /* "manually" create the initial zone */ 2007 memset(&args, 0, sizeof(args)); 2008 args.name = "UMA Kegs"; 2009 args.size = ksize; 2010 args.ctor = keg_ctor; 2011 args.dtor = keg_dtor; 2012 args.uminit = zero_init; 2013 args.fini = NULL; 2014 args.keg = masterkeg; 2015 args.align = UMA_BOOT_ALIGN - 1; 2016 args.flags = UMA_ZFLAG_INTERNAL; 2017 zone_ctor(kegs, zsize, &args, M_WAITOK); 2018 2019 bootmem = mem; 2020 boot_pages = npages; 2021 2022 args.name = "UMA Zones"; 2023 args.size = zsize; 2024 args.ctor = zone_ctor; 2025 args.dtor = zone_dtor; 2026 args.uminit = zero_init; 2027 args.fini = NULL; 2028 args.keg = NULL; 2029 args.align = UMA_BOOT_ALIGN - 1; 2030 args.flags = UMA_ZFLAG_INTERNAL; 2031 zone_ctor(zones, zsize, &args, M_WAITOK); 2032 2033 /* Now make a zone for slab headers */ 2034 slabzone = uma_zcreate("UMA Slabs", 2035 sizeof(struct uma_slab), 2036 NULL, NULL, NULL, NULL, 2037 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2038 2039 hashzone = uma_zcreate("UMA Hash", 2040 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 2041 NULL, NULL, NULL, NULL, 2042 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 2043 2044 bucket_init(); 2045 2046 booted = BOOT_STRAPPED; 2047 } 2048 2049 void 2050 uma_startup1(void) 2051 { 2052 2053 #ifdef DIAGNOSTIC 2054 printf("Entering %s with %d boot pages left\n", __func__, boot_pages); 2055 #endif 2056 booted = BOOT_PAGEALLOC; 2057 } 2058 2059 void 2060 uma_startup2(void) 2061 { 2062 2063 #ifdef DIAGNOSTIC 2064 printf("Entering %s with %d boot pages left\n", __func__, boot_pages); 2065 #endif 2066 booted = BOOT_BUCKETS; 2067 sx_init(&uma_drain_lock, "umadrain"); 2068 bucket_enable(); 2069 } 2070 2071 /* 2072 * Initialize our callout handle 2073 * 2074 */ 2075 static void 2076 uma_startup3(void) 2077 { 2078 2079 #ifdef INVARIANTS 2080 TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); 2081 uma_dbg_cnt = counter_u64_alloc(M_WAITOK); 2082 uma_skip_cnt = counter_u64_alloc(M_WAITOK); 2083 #endif 2084 callout_init(&uma_callout, 1); 2085 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 2086 booted = BOOT_RUNNING; 2087 } 2088 2089 static uma_keg_t 2090 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 2091 int align, uint32_t flags) 2092 { 2093 struct uma_kctor_args args; 2094 2095 args.size = size; 2096 args.uminit = uminit; 2097 args.fini = fini; 2098 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 2099 args.flags = flags; 2100 args.zone = zone; 2101 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); 2102 } 2103 2104 /* Public functions */ 2105 /* See uma.h */ 2106 void 2107 uma_set_align(int align) 2108 { 2109 2110 if (align != UMA_ALIGN_CACHE) 2111 uma_align_cache = align; 2112 } 2113 2114 /* See uma.h */ 2115 uma_zone_t 2116 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 2117 uma_init uminit, uma_fini fini, int align, uint32_t flags) 2118 2119 { 2120 struct uma_zctor_args args; 2121 uma_zone_t res; 2122 bool locked; 2123 2124 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 2125 align, name)); 2126 2127 /* This stuff is essential for the zone ctor */ 2128 memset(&args, 0, sizeof(args)); 2129 args.name = name; 2130 args.size = size; 2131 args.ctor = ctor; 2132 args.dtor = dtor; 2133 args.uminit = uminit; 2134 args.fini = fini; 2135 #ifdef INVARIANTS 2136 /* 2137 * If a zone is being created with an empty constructor and 2138 * destructor, pass UMA constructor/destructor which checks for 2139 * memory use after free. 2140 */ 2141 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && 2142 ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { 2143 args.ctor = trash_ctor; 2144 args.dtor = trash_dtor; 2145 args.uminit = trash_init; 2146 args.fini = trash_fini; 2147 } 2148 #endif 2149 args.align = align; 2150 args.flags = flags; 2151 args.keg = NULL; 2152 2153 if (booted < BOOT_BUCKETS) { 2154 locked = false; 2155 } else { 2156 sx_slock(&uma_drain_lock); 2157 locked = true; 2158 } 2159 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 2160 if (locked) 2161 sx_sunlock(&uma_drain_lock); 2162 return (res); 2163 } 2164 2165 /* See uma.h */ 2166 uma_zone_t 2167 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 2168 uma_init zinit, uma_fini zfini, uma_zone_t master) 2169 { 2170 struct uma_zctor_args args; 2171 uma_keg_t keg; 2172 uma_zone_t res; 2173 bool locked; 2174 2175 keg = zone_first_keg(master); 2176 memset(&args, 0, sizeof(args)); 2177 args.name = name; 2178 args.size = keg->uk_size; 2179 args.ctor = ctor; 2180 args.dtor = dtor; 2181 args.uminit = zinit; 2182 args.fini = zfini; 2183 args.align = keg->uk_align; 2184 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 2185 args.keg = keg; 2186 2187 if (booted < BOOT_BUCKETS) { 2188 locked = false; 2189 } else { 2190 sx_slock(&uma_drain_lock); 2191 locked = true; 2192 } 2193 /* XXX Attaches only one keg of potentially many. */ 2194 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 2195 if (locked) 2196 sx_sunlock(&uma_drain_lock); 2197 return (res); 2198 } 2199 2200 /* See uma.h */ 2201 uma_zone_t 2202 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 2203 uma_init zinit, uma_fini zfini, uma_import zimport, 2204 uma_release zrelease, void *arg, int flags) 2205 { 2206 struct uma_zctor_args args; 2207 2208 memset(&args, 0, sizeof(args)); 2209 args.name = name; 2210 args.size = size; 2211 args.ctor = ctor; 2212 args.dtor = dtor; 2213 args.uminit = zinit; 2214 args.fini = zfini; 2215 args.import = zimport; 2216 args.release = zrelease; 2217 args.arg = arg; 2218 args.align = 0; 2219 args.flags = flags; 2220 2221 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); 2222 } 2223 2224 static void 2225 zone_lock_pair(uma_zone_t a, uma_zone_t b) 2226 { 2227 if (a < b) { 2228 ZONE_LOCK(a); 2229 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 2230 } else { 2231 ZONE_LOCK(b); 2232 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 2233 } 2234 } 2235 2236 static void 2237 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 2238 { 2239 2240 ZONE_UNLOCK(a); 2241 ZONE_UNLOCK(b); 2242 } 2243 2244 int 2245 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 2246 { 2247 uma_klink_t klink; 2248 uma_klink_t kl; 2249 int error; 2250 2251 error = 0; 2252 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 2253 2254 zone_lock_pair(zone, master); 2255 /* 2256 * zone must use vtoslab() to resolve objects and must already be 2257 * a secondary. 2258 */ 2259 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 2260 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 2261 error = EINVAL; 2262 goto out; 2263 } 2264 /* 2265 * The new master must also use vtoslab(). 2266 */ 2267 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2268 error = EINVAL; 2269 goto out; 2270 } 2271 2272 /* 2273 * The underlying object must be the same size. rsize 2274 * may be different. 2275 */ 2276 if (master->uz_size != zone->uz_size) { 2277 error = E2BIG; 2278 goto out; 2279 } 2280 /* 2281 * Put it at the end of the list. 2282 */ 2283 klink->kl_keg = zone_first_keg(master); 2284 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2285 if (LIST_NEXT(kl, kl_link) == NULL) { 2286 LIST_INSERT_AFTER(kl, klink, kl_link); 2287 break; 2288 } 2289 } 2290 klink = NULL; 2291 zone->uz_flags |= UMA_ZFLAG_MULTI; 2292 zone->uz_slab = zone_fetch_slab_multi; 2293 2294 out: 2295 zone_unlock_pair(zone, master); 2296 if (klink != NULL) 2297 free(klink, M_TEMP); 2298 2299 return (error); 2300 } 2301 2302 2303 /* See uma.h */ 2304 void 2305 uma_zdestroy(uma_zone_t zone) 2306 { 2307 2308 sx_slock(&uma_drain_lock); 2309 zone_free_item(zones, zone, NULL, SKIP_NONE); 2310 sx_sunlock(&uma_drain_lock); 2311 } 2312 2313 void 2314 uma_zwait(uma_zone_t zone) 2315 { 2316 void *item; 2317 2318 item = uma_zalloc_arg(zone, NULL, M_WAITOK); 2319 uma_zfree(zone, item); 2320 } 2321 2322 void * 2323 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) 2324 { 2325 void *item; 2326 #ifdef SMP 2327 int i; 2328 2329 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 2330 #endif 2331 item = uma_zalloc_arg(zone, udata, flags &~ M_ZERO); 2332 if (item != NULL && (flags & M_ZERO)) { 2333 #ifdef SMP 2334 CPU_FOREACH(i) 2335 bzero(zpcpu_get_cpu(item, i), zone->uz_size); 2336 #else 2337 bzero(item, zone->uz_size); 2338 #endif 2339 } 2340 return (item); 2341 } 2342 2343 /* 2344 * A stub while both regular and pcpu cases are identical. 2345 */ 2346 void 2347 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) 2348 { 2349 2350 #ifdef SMP 2351 MPASS(zone->uz_flags & UMA_ZONE_PCPU); 2352 #endif 2353 uma_zfree_arg(zone, item, udata); 2354 } 2355 2356 /* See uma.h */ 2357 void * 2358 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 2359 { 2360 uma_zone_domain_t zdom; 2361 uma_bucket_t bucket; 2362 uma_cache_t cache; 2363 void *item; 2364 int cpu, domain, lockfail; 2365 #ifdef INVARIANTS 2366 bool skipdbg; 2367 #endif 2368 2369 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2370 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2371 2372 /* This is the fast path allocation */ 2373 CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", 2374 curthread, zone->uz_name, zone, flags); 2375 2376 if (flags & M_WAITOK) { 2377 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2378 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2379 } 2380 KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); 2381 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2382 ("uma_zalloc_arg: called with spinlock or critical section held")); 2383 if (zone->uz_flags & UMA_ZONE_PCPU) 2384 KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone " 2385 "with M_ZERO passed")); 2386 2387 #ifdef DEBUG_MEMGUARD 2388 if (memguard_cmp_zone(zone)) { 2389 item = memguard_alloc(zone->uz_size, flags); 2390 if (item != NULL) { 2391 if (zone->uz_init != NULL && 2392 zone->uz_init(item, zone->uz_size, flags) != 0) 2393 return (NULL); 2394 if (zone->uz_ctor != NULL && 2395 zone->uz_ctor(item, zone->uz_size, udata, 2396 flags) != 0) { 2397 zone->uz_fini(item, zone->uz_size); 2398 return (NULL); 2399 } 2400 return (item); 2401 } 2402 /* This is unfortunate but should not be fatal. */ 2403 } 2404 #endif 2405 /* 2406 * If possible, allocate from the per-CPU cache. There are two 2407 * requirements for safe access to the per-CPU cache: (1) the thread 2408 * accessing the cache must not be preempted or yield during access, 2409 * and (2) the thread must not migrate CPUs without switching which 2410 * cache it accesses. We rely on a critical section to prevent 2411 * preemption and migration. We release the critical section in 2412 * order to acquire the zone mutex if we are unable to allocate from 2413 * the current cache; when we re-acquire the critical section, we 2414 * must detect and handle migration if it has occurred. 2415 */ 2416 critical_enter(); 2417 cpu = curcpu; 2418 cache = &zone->uz_cpu[cpu]; 2419 2420 zalloc_start: 2421 bucket = cache->uc_allocbucket; 2422 if (bucket != NULL && bucket->ub_cnt > 0) { 2423 bucket->ub_cnt--; 2424 item = bucket->ub_bucket[bucket->ub_cnt]; 2425 #ifdef INVARIANTS 2426 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2427 #endif 2428 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2429 cache->uc_allocs++; 2430 critical_exit(); 2431 #ifdef INVARIANTS 2432 skipdbg = uma_dbg_zskip(zone, item); 2433 #endif 2434 if (zone->uz_ctor != NULL && 2435 #ifdef INVARIANTS 2436 (!skipdbg || zone->uz_ctor != trash_ctor || 2437 zone->uz_dtor != trash_dtor) && 2438 #endif 2439 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2440 atomic_add_long(&zone->uz_fails, 1); 2441 zone_free_item(zone, item, udata, SKIP_DTOR); 2442 return (NULL); 2443 } 2444 #ifdef INVARIANTS 2445 if (!skipdbg) 2446 uma_dbg_alloc(zone, NULL, item); 2447 #endif 2448 if (flags & M_ZERO) 2449 uma_zero_item(item, zone); 2450 return (item); 2451 } 2452 2453 /* 2454 * We have run out of items in our alloc bucket. 2455 * See if we can switch with our free bucket. 2456 */ 2457 bucket = cache->uc_freebucket; 2458 if (bucket != NULL && bucket->ub_cnt > 0) { 2459 CTR2(KTR_UMA, 2460 "uma_zalloc: zone %s(%p) swapping empty with alloc", 2461 zone->uz_name, zone); 2462 cache->uc_freebucket = cache->uc_allocbucket; 2463 cache->uc_allocbucket = bucket; 2464 goto zalloc_start; 2465 } 2466 2467 /* 2468 * Discard any empty allocation bucket while we hold no locks. 2469 */ 2470 bucket = cache->uc_allocbucket; 2471 cache->uc_allocbucket = NULL; 2472 critical_exit(); 2473 if (bucket != NULL) 2474 bucket_free(zone, bucket, udata); 2475 2476 if (zone->uz_flags & UMA_ZONE_NUMA) 2477 domain = PCPU_GET(domain); 2478 else 2479 domain = UMA_ANYDOMAIN; 2480 2481 /* Short-circuit for zones without buckets and low memory. */ 2482 if (zone->uz_count == 0 || bucketdisable) 2483 goto zalloc_item; 2484 2485 /* 2486 * Attempt to retrieve the item from the per-CPU cache has failed, so 2487 * we must go back to the zone. This requires the zone lock, so we 2488 * must drop the critical section, then re-acquire it when we go back 2489 * to the cache. Since the critical section is released, we may be 2490 * preempted or migrate. As such, make sure not to maintain any 2491 * thread-local state specific to the cache from prior to releasing 2492 * the critical section. 2493 */ 2494 lockfail = 0; 2495 if (ZONE_TRYLOCK(zone) == 0) { 2496 /* Record contention to size the buckets. */ 2497 ZONE_LOCK(zone); 2498 lockfail = 1; 2499 } 2500 critical_enter(); 2501 cpu = curcpu; 2502 cache = &zone->uz_cpu[cpu]; 2503 2504 /* See if we lost the race to fill the cache. */ 2505 if (cache->uc_allocbucket != NULL) { 2506 ZONE_UNLOCK(zone); 2507 goto zalloc_start; 2508 } 2509 2510 /* 2511 * Check the zone's cache of buckets. 2512 */ 2513 if (domain == UMA_ANYDOMAIN) 2514 zdom = &zone->uz_domain[0]; 2515 else 2516 zdom = &zone->uz_domain[domain]; 2517 if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { 2518 KASSERT(bucket->ub_cnt != 0, 2519 ("uma_zalloc_arg: Returning an empty bucket.")); 2520 2521 LIST_REMOVE(bucket, ub_link); 2522 cache->uc_allocbucket = bucket; 2523 ZONE_UNLOCK(zone); 2524 goto zalloc_start; 2525 } 2526 /* We are no longer associated with this CPU. */ 2527 critical_exit(); 2528 2529 /* 2530 * We bump the uz count when the cache size is insufficient to 2531 * handle the working set. 2532 */ 2533 if (lockfail && zone->uz_count < BUCKET_MAX) 2534 zone->uz_count++; 2535 ZONE_UNLOCK(zone); 2536 2537 /* 2538 * Now lets just fill a bucket and put it on the free list. If that 2539 * works we'll restart the allocation from the beginning and it 2540 * will use the just filled bucket. 2541 */ 2542 bucket = zone_alloc_bucket(zone, udata, domain, flags); 2543 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 2544 zone->uz_name, zone, bucket); 2545 if (bucket != NULL) { 2546 ZONE_LOCK(zone); 2547 critical_enter(); 2548 cpu = curcpu; 2549 cache = &zone->uz_cpu[cpu]; 2550 /* 2551 * See if we lost the race or were migrated. Cache the 2552 * initialized bucket to make this less likely or claim 2553 * the memory directly. 2554 */ 2555 if (cache->uc_allocbucket != NULL || 2556 (zone->uz_flags & UMA_ZONE_NUMA && 2557 domain != PCPU_GET(domain))) 2558 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 2559 else 2560 cache->uc_allocbucket = bucket; 2561 ZONE_UNLOCK(zone); 2562 goto zalloc_start; 2563 } 2564 2565 /* 2566 * We may not be able to get a bucket so return an actual item. 2567 */ 2568 zalloc_item: 2569 item = zone_alloc_item(zone, udata, domain, flags); 2570 2571 return (item); 2572 } 2573 2574 void * 2575 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) 2576 { 2577 2578 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2579 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2580 2581 /* This is the fast path allocation */ 2582 CTR5(KTR_UMA, 2583 "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d", 2584 curthread, zone->uz_name, zone, domain, flags); 2585 2586 if (flags & M_WAITOK) { 2587 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2588 "uma_zalloc_domain: zone \"%s\"", zone->uz_name); 2589 } 2590 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2591 ("uma_zalloc_domain: called with spinlock or critical section held")); 2592 2593 return (zone_alloc_item(zone, udata, domain, flags)); 2594 } 2595 2596 /* 2597 * Find a slab with some space. Prefer slabs that are partially used over those 2598 * that are totally full. This helps to reduce fragmentation. 2599 * 2600 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check 2601 * only 'domain'. 2602 */ 2603 static uma_slab_t 2604 keg_first_slab(uma_keg_t keg, int domain, int rr) 2605 { 2606 uma_domain_t dom; 2607 uma_slab_t slab; 2608 int start; 2609 2610 KASSERT(domain >= 0 && domain < vm_ndomains, 2611 ("keg_first_slab: domain %d out of range", domain)); 2612 2613 slab = NULL; 2614 start = domain; 2615 do { 2616 dom = &keg->uk_domain[domain]; 2617 if (!LIST_EMPTY(&dom->ud_part_slab)) 2618 return (LIST_FIRST(&dom->ud_part_slab)); 2619 if (!LIST_EMPTY(&dom->ud_free_slab)) { 2620 slab = LIST_FIRST(&dom->ud_free_slab); 2621 LIST_REMOVE(slab, us_link); 2622 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 2623 return (slab); 2624 } 2625 if (rr) 2626 domain = (domain + 1) % vm_ndomains; 2627 } while (domain != start); 2628 2629 return (NULL); 2630 } 2631 2632 static uma_slab_t 2633 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags) 2634 { 2635 uma_domain_t dom; 2636 uma_slab_t slab; 2637 int allocflags, domain, reserve, rr, start; 2638 2639 mtx_assert(&keg->uk_lock, MA_OWNED); 2640 slab = NULL; 2641 reserve = 0; 2642 allocflags = flags; 2643 if ((flags & M_USE_RESERVE) == 0) 2644 reserve = keg->uk_reserve; 2645 2646 /* 2647 * Round-robin for non first-touch zones when there is more than one 2648 * domain. 2649 */ 2650 if (vm_ndomains == 1) 2651 rdomain = 0; 2652 rr = rdomain == UMA_ANYDOMAIN; 2653 if (rr) { 2654 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; 2655 domain = start = keg->uk_cursor; 2656 /* Only block on the second pass. */ 2657 if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK) 2658 allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT; 2659 } else 2660 domain = start = rdomain; 2661 2662 again: 2663 do { 2664 if (keg->uk_free > reserve && 2665 (slab = keg_first_slab(keg, domain, rr)) != NULL) { 2666 MPASS(slab->us_keg == keg); 2667 return (slab); 2668 } 2669 2670 /* 2671 * M_NOVM means don't ask at all! 2672 */ 2673 if (flags & M_NOVM) 2674 break; 2675 2676 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2677 keg->uk_flags |= UMA_ZFLAG_FULL; 2678 /* 2679 * If this is not a multi-zone, set the FULL bit. 2680 * Otherwise slab_multi() takes care of it. 2681 */ 2682 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2683 zone->uz_flags |= UMA_ZFLAG_FULL; 2684 zone_log_warning(zone); 2685 zone_maxaction(zone); 2686 } 2687 if (flags & M_NOWAIT) 2688 return (NULL); 2689 zone->uz_sleeps++; 2690 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2691 continue; 2692 } 2693 slab = keg_alloc_slab(keg, zone, domain, allocflags); 2694 /* 2695 * If we got a slab here it's safe to mark it partially used 2696 * and return. We assume that the caller is going to remove 2697 * at least one item. 2698 */ 2699 if (slab) { 2700 MPASS(slab->us_keg == keg); 2701 dom = &keg->uk_domain[slab->us_domain]; 2702 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 2703 return (slab); 2704 } 2705 if (rr) { 2706 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; 2707 domain = keg->uk_cursor; 2708 } 2709 } while (domain != start); 2710 2711 /* Retry domain scan with blocking. */ 2712 if (allocflags != flags) { 2713 allocflags = flags; 2714 goto again; 2715 } 2716 2717 /* 2718 * We might not have been able to get a slab but another cpu 2719 * could have while we were unlocked. Check again before we 2720 * fail. 2721 */ 2722 if (keg->uk_free > reserve && 2723 (slab = keg_first_slab(keg, domain, rr)) != NULL) { 2724 MPASS(slab->us_keg == keg); 2725 return (slab); 2726 } 2727 return (NULL); 2728 } 2729 2730 static uma_slab_t 2731 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) 2732 { 2733 uma_slab_t slab; 2734 2735 if (keg == NULL) { 2736 keg = zone_first_keg(zone); 2737 KEG_LOCK(keg); 2738 } 2739 2740 for (;;) { 2741 slab = keg_fetch_slab(keg, zone, domain, flags); 2742 if (slab) 2743 return (slab); 2744 if (flags & (M_NOWAIT | M_NOVM)) 2745 break; 2746 } 2747 KEG_UNLOCK(keg); 2748 return (NULL); 2749 } 2750 2751 /* 2752 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2753 * with the keg locked. On NULL no lock is held. 2754 * 2755 * The last pointer is used to seed the search. It is not required. 2756 */ 2757 static uma_slab_t 2758 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) 2759 { 2760 uma_klink_t klink; 2761 uma_slab_t slab; 2762 uma_keg_t keg; 2763 int flags; 2764 int empty; 2765 int full; 2766 2767 /* 2768 * Don't wait on the first pass. This will skip limit tests 2769 * as well. We don't want to block if we can find a provider 2770 * without blocking. 2771 */ 2772 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2773 /* 2774 * Use the last slab allocated as a hint for where to start 2775 * the search. 2776 */ 2777 if (last != NULL) { 2778 slab = keg_fetch_slab(last, zone, domain, flags); 2779 if (slab) 2780 return (slab); 2781 KEG_UNLOCK(last); 2782 } 2783 /* 2784 * Loop until we have a slab incase of transient failures 2785 * while M_WAITOK is specified. I'm not sure this is 100% 2786 * required but we've done it for so long now. 2787 */ 2788 for (;;) { 2789 empty = 0; 2790 full = 0; 2791 /* 2792 * Search the available kegs for slabs. Be careful to hold the 2793 * correct lock while calling into the keg layer. 2794 */ 2795 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2796 keg = klink->kl_keg; 2797 KEG_LOCK(keg); 2798 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2799 slab = keg_fetch_slab(keg, zone, domain, flags); 2800 if (slab) 2801 return (slab); 2802 } 2803 if (keg->uk_flags & UMA_ZFLAG_FULL) 2804 full++; 2805 else 2806 empty++; 2807 KEG_UNLOCK(keg); 2808 } 2809 if (rflags & (M_NOWAIT | M_NOVM)) 2810 break; 2811 flags = rflags; 2812 /* 2813 * All kegs are full. XXX We can't atomically check all kegs 2814 * and sleep so just sleep for a short period and retry. 2815 */ 2816 if (full && !empty) { 2817 ZONE_LOCK(zone); 2818 zone->uz_flags |= UMA_ZFLAG_FULL; 2819 zone->uz_sleeps++; 2820 zone_log_warning(zone); 2821 zone_maxaction(zone); 2822 msleep(zone, zone->uz_lockptr, PVM, 2823 "zonelimit", hz/100); 2824 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2825 ZONE_UNLOCK(zone); 2826 continue; 2827 } 2828 } 2829 return (NULL); 2830 } 2831 2832 static void * 2833 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2834 { 2835 uma_domain_t dom; 2836 void *item; 2837 uint8_t freei; 2838 2839 MPASS(keg == slab->us_keg); 2840 mtx_assert(&keg->uk_lock, MA_OWNED); 2841 2842 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2843 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2844 item = slab->us_data + (keg->uk_rsize * freei); 2845 slab->us_freecount--; 2846 keg->uk_free--; 2847 2848 /* Move this slab to the full list */ 2849 if (slab->us_freecount == 0) { 2850 LIST_REMOVE(slab, us_link); 2851 dom = &keg->uk_domain[slab->us_domain]; 2852 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); 2853 } 2854 2855 return (item); 2856 } 2857 2858 static int 2859 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) 2860 { 2861 uma_slab_t slab; 2862 uma_keg_t keg; 2863 #ifdef NUMA 2864 int stripe; 2865 #endif 2866 int i; 2867 2868 slab = NULL; 2869 keg = NULL; 2870 /* Try to keep the buckets totally full */ 2871 for (i = 0; i < max; ) { 2872 if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) 2873 break; 2874 keg = slab->us_keg; 2875 #ifdef NUMA 2876 stripe = howmany(max, vm_ndomains); 2877 #endif 2878 while (slab->us_freecount && i < max) { 2879 bucket[i++] = slab_alloc_item(keg, slab); 2880 if (keg->uk_free <= keg->uk_reserve) 2881 break; 2882 #ifdef NUMA 2883 /* 2884 * If the zone is striped we pick a new slab for every 2885 * N allocations. Eliminating this conditional will 2886 * instead pick a new domain for each bucket rather 2887 * than stripe within each bucket. The current option 2888 * produces more fragmentation and requires more cpu 2889 * time but yields better distribution. 2890 */ 2891 if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && 2892 vm_ndomains > 1 && --stripe == 0) 2893 break; 2894 #endif 2895 } 2896 /* Don't block if we allocated any successfully. */ 2897 flags &= ~M_WAITOK; 2898 flags |= M_NOWAIT; 2899 } 2900 if (slab != NULL) 2901 KEG_UNLOCK(keg); 2902 2903 return i; 2904 } 2905 2906 static uma_bucket_t 2907 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) 2908 { 2909 uma_bucket_t bucket; 2910 int max; 2911 2912 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2913 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2914 if (bucket == NULL) 2915 return (NULL); 2916 2917 max = MIN(bucket->ub_entries, zone->uz_count); 2918 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2919 max, domain, flags); 2920 2921 /* 2922 * Initialize the memory if necessary. 2923 */ 2924 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2925 int i; 2926 2927 for (i = 0; i < bucket->ub_cnt; i++) 2928 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2929 flags) != 0) 2930 break; 2931 /* 2932 * If we couldn't initialize the whole bucket, put the 2933 * rest back onto the freelist. 2934 */ 2935 if (i != bucket->ub_cnt) { 2936 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2937 bucket->ub_cnt - i); 2938 #ifdef INVARIANTS 2939 bzero(&bucket->ub_bucket[i], 2940 sizeof(void *) * (bucket->ub_cnt - i)); 2941 #endif 2942 bucket->ub_cnt = i; 2943 } 2944 } 2945 2946 if (bucket->ub_cnt == 0) { 2947 bucket_free(zone, bucket, udata); 2948 atomic_add_long(&zone->uz_fails, 1); 2949 return (NULL); 2950 } 2951 2952 return (bucket); 2953 } 2954 2955 /* 2956 * Allocates a single item from a zone. 2957 * 2958 * Arguments 2959 * zone The zone to alloc for. 2960 * udata The data to be passed to the constructor. 2961 * domain The domain to allocate from or UMA_ANYDOMAIN. 2962 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2963 * 2964 * Returns 2965 * NULL if there is no memory and M_NOWAIT is set 2966 * An item if successful 2967 */ 2968 2969 static void * 2970 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) 2971 { 2972 void *item; 2973 #ifdef INVARIANTS 2974 bool skipdbg; 2975 #endif 2976 2977 item = NULL; 2978 2979 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) 2980 goto fail; 2981 atomic_add_long(&zone->uz_allocs, 1); 2982 2983 #ifdef INVARIANTS 2984 skipdbg = uma_dbg_zskip(zone, item); 2985 #endif 2986 /* 2987 * We have to call both the zone's init (not the keg's init) 2988 * and the zone's ctor. This is because the item is going from 2989 * a keg slab directly to the user, and the user is expecting it 2990 * to be both zone-init'd as well as zone-ctor'd. 2991 */ 2992 if (zone->uz_init != NULL) { 2993 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2994 zone_free_item(zone, item, udata, SKIP_FINI); 2995 goto fail; 2996 } 2997 } 2998 if (zone->uz_ctor != NULL && 2999 #ifdef INVARIANTS 3000 (!skipdbg || zone->uz_ctor != trash_ctor || 3001 zone->uz_dtor != trash_dtor) && 3002 #endif 3003 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 3004 zone_free_item(zone, item, udata, SKIP_DTOR); 3005 goto fail; 3006 } 3007 #ifdef INVARIANTS 3008 if (!skipdbg) 3009 uma_dbg_alloc(zone, NULL, item); 3010 #endif 3011 if (flags & M_ZERO) 3012 uma_zero_item(item, zone); 3013 3014 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 3015 zone->uz_name, zone); 3016 3017 return (item); 3018 3019 fail: 3020 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 3021 zone->uz_name, zone); 3022 atomic_add_long(&zone->uz_fails, 1); 3023 return (NULL); 3024 } 3025 3026 /* See uma.h */ 3027 void 3028 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 3029 { 3030 uma_cache_t cache; 3031 uma_bucket_t bucket; 3032 uma_zone_domain_t zdom; 3033 int cpu, domain, lockfail; 3034 #ifdef INVARIANTS 3035 bool skipdbg; 3036 #endif 3037 3038 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3039 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 3040 3041 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 3042 zone->uz_name); 3043 3044 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3045 ("uma_zfree_arg: called with spinlock or critical section held")); 3046 3047 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 3048 if (item == NULL) 3049 return; 3050 #ifdef DEBUG_MEMGUARD 3051 if (is_memguard_addr(item)) { 3052 if (zone->uz_dtor != NULL) 3053 zone->uz_dtor(item, zone->uz_size, udata); 3054 if (zone->uz_fini != NULL) 3055 zone->uz_fini(item, zone->uz_size); 3056 memguard_free(item); 3057 return; 3058 } 3059 #endif 3060 #ifdef INVARIANTS 3061 skipdbg = uma_dbg_zskip(zone, item); 3062 if (skipdbg == false) { 3063 if (zone->uz_flags & UMA_ZONE_MALLOC) 3064 uma_dbg_free(zone, udata, item); 3065 else 3066 uma_dbg_free(zone, NULL, item); 3067 } 3068 if (zone->uz_dtor != NULL && (!skipdbg || 3069 zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) 3070 #else 3071 if (zone->uz_dtor != NULL) 3072 #endif 3073 zone->uz_dtor(item, zone->uz_size, udata); 3074 3075 /* 3076 * The race here is acceptable. If we miss it we'll just have to wait 3077 * a little longer for the limits to be reset. 3078 */ 3079 if (zone->uz_flags & UMA_ZFLAG_FULL) 3080 goto zfree_item; 3081 3082 /* 3083 * If possible, free to the per-CPU cache. There are two 3084 * requirements for safe access to the per-CPU cache: (1) the thread 3085 * accessing the cache must not be preempted or yield during access, 3086 * and (2) the thread must not migrate CPUs without switching which 3087 * cache it accesses. We rely on a critical section to prevent 3088 * preemption and migration. We release the critical section in 3089 * order to acquire the zone mutex if we are unable to free to the 3090 * current cache; when we re-acquire the critical section, we must 3091 * detect and handle migration if it has occurred. 3092 */ 3093 zfree_restart: 3094 critical_enter(); 3095 cpu = curcpu; 3096 cache = &zone->uz_cpu[cpu]; 3097 3098 zfree_start: 3099 /* 3100 * Try to free into the allocbucket first to give LIFO ordering 3101 * for cache-hot datastructures. Spill over into the freebucket 3102 * if necessary. Alloc will swap them if one runs dry. 3103 */ 3104 bucket = cache->uc_allocbucket; 3105 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 3106 bucket = cache->uc_freebucket; 3107 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 3108 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 3109 ("uma_zfree: Freeing to non free bucket index.")); 3110 bucket->ub_bucket[bucket->ub_cnt] = item; 3111 bucket->ub_cnt++; 3112 cache->uc_frees++; 3113 critical_exit(); 3114 return; 3115 } 3116 3117 /* 3118 * We must go back the zone, which requires acquiring the zone lock, 3119 * which in turn means we must release and re-acquire the critical 3120 * section. Since the critical section is released, we may be 3121 * preempted or migrate. As such, make sure not to maintain any 3122 * thread-local state specific to the cache from prior to releasing 3123 * the critical section. 3124 */ 3125 critical_exit(); 3126 if (zone->uz_count == 0 || bucketdisable) 3127 goto zfree_item; 3128 3129 lockfail = 0; 3130 if (ZONE_TRYLOCK(zone) == 0) { 3131 /* Record contention to size the buckets. */ 3132 ZONE_LOCK(zone); 3133 lockfail = 1; 3134 } 3135 critical_enter(); 3136 cpu = curcpu; 3137 cache = &zone->uz_cpu[cpu]; 3138 3139 /* 3140 * Since we have locked the zone we may as well send back our stats. 3141 */ 3142 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 3143 atomic_add_long(&zone->uz_frees, cache->uc_frees); 3144 cache->uc_allocs = 0; 3145 cache->uc_frees = 0; 3146 3147 bucket = cache->uc_freebucket; 3148 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 3149 ZONE_UNLOCK(zone); 3150 goto zfree_start; 3151 } 3152 cache->uc_freebucket = NULL; 3153 /* We are no longer associated with this CPU. */ 3154 critical_exit(); 3155 3156 if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) 3157 domain = PCPU_GET(domain); 3158 else 3159 domain = 0; 3160 zdom = &zone->uz_domain[0]; 3161 3162 /* Can we throw this on the zone full list? */ 3163 if (bucket != NULL) { 3164 CTR3(KTR_UMA, 3165 "uma_zfree: zone %s(%p) putting bucket %p on free list", 3166 zone->uz_name, zone, bucket); 3167 /* ub_cnt is pointing to the last free item */ 3168 KASSERT(bucket->ub_cnt != 0, 3169 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 3170 if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { 3171 ZONE_UNLOCK(zone); 3172 bucket_drain(zone, bucket); 3173 bucket_free(zone, bucket, udata); 3174 goto zfree_restart; 3175 } else 3176 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 3177 } 3178 3179 /* 3180 * We bump the uz count when the cache size is insufficient to 3181 * handle the working set. 3182 */ 3183 if (lockfail && zone->uz_count < BUCKET_MAX) 3184 zone->uz_count++; 3185 ZONE_UNLOCK(zone); 3186 3187 bucket = bucket_alloc(zone, udata, M_NOWAIT); 3188 CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", 3189 zone->uz_name, zone, bucket); 3190 if (bucket) { 3191 critical_enter(); 3192 cpu = curcpu; 3193 cache = &zone->uz_cpu[cpu]; 3194 if (cache->uc_freebucket == NULL && 3195 ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || 3196 domain == PCPU_GET(domain))) { 3197 cache->uc_freebucket = bucket; 3198 goto zfree_start; 3199 } 3200 /* 3201 * We lost the race, start over. We have to drop our 3202 * critical section to free the bucket. 3203 */ 3204 critical_exit(); 3205 bucket_free(zone, bucket, udata); 3206 goto zfree_restart; 3207 } 3208 3209 /* 3210 * If nothing else caught this, we'll just do an internal free. 3211 */ 3212 zfree_item: 3213 zone_free_item(zone, item, udata, SKIP_DTOR); 3214 3215 return; 3216 } 3217 3218 void 3219 uma_zfree_domain(uma_zone_t zone, void *item, void *udata) 3220 { 3221 3222 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 3223 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 3224 3225 CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, 3226 zone->uz_name); 3227 3228 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 3229 ("uma_zfree_domain: called with spinlock or critical section held")); 3230 3231 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 3232 if (item == NULL) 3233 return; 3234 zone_free_item(zone, item, udata, SKIP_NONE); 3235 } 3236 3237 static void 3238 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 3239 { 3240 uma_domain_t dom; 3241 uint8_t freei; 3242 3243 mtx_assert(&keg->uk_lock, MA_OWNED); 3244 MPASS(keg == slab->us_keg); 3245 3246 dom = &keg->uk_domain[slab->us_domain]; 3247 3248 /* Do we need to remove from any lists? */ 3249 if (slab->us_freecount+1 == keg->uk_ipers) { 3250 LIST_REMOVE(slab, us_link); 3251 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 3252 } else if (slab->us_freecount == 0) { 3253 LIST_REMOVE(slab, us_link); 3254 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 3255 } 3256 3257 /* Slab management. */ 3258 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3259 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 3260 slab->us_freecount++; 3261 3262 /* Keg statistics. */ 3263 keg->uk_free++; 3264 } 3265 3266 static void 3267 zone_release(uma_zone_t zone, void **bucket, int cnt) 3268 { 3269 void *item; 3270 uma_slab_t slab; 3271 uma_keg_t keg; 3272 uint8_t *mem; 3273 int clearfull; 3274 int i; 3275 3276 clearfull = 0; 3277 keg = zone_first_keg(zone); 3278 KEG_LOCK(keg); 3279 for (i = 0; i < cnt; i++) { 3280 item = bucket[i]; 3281 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 3282 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 3283 if (zone->uz_flags & UMA_ZONE_HASH) { 3284 slab = hash_sfind(&keg->uk_hash, mem); 3285 } else { 3286 mem += keg->uk_pgoff; 3287 slab = (uma_slab_t)mem; 3288 } 3289 } else { 3290 slab = vtoslab((vm_offset_t)item); 3291 if (slab->us_keg != keg) { 3292 KEG_UNLOCK(keg); 3293 keg = slab->us_keg; 3294 KEG_LOCK(keg); 3295 } 3296 } 3297 slab_free_item(keg, slab, item); 3298 if (keg->uk_flags & UMA_ZFLAG_FULL) { 3299 if (keg->uk_pages < keg->uk_maxpages) { 3300 keg->uk_flags &= ~UMA_ZFLAG_FULL; 3301 clearfull = 1; 3302 } 3303 3304 /* 3305 * We can handle one more allocation. Since we're 3306 * clearing ZFLAG_FULL, wake up all procs blocked 3307 * on pages. This should be uncommon, so keeping this 3308 * simple for now (rather than adding count of blocked 3309 * threads etc). 3310 */ 3311 wakeup(keg); 3312 } 3313 } 3314 KEG_UNLOCK(keg); 3315 if (clearfull) { 3316 ZONE_LOCK(zone); 3317 zone->uz_flags &= ~UMA_ZFLAG_FULL; 3318 wakeup(zone); 3319 ZONE_UNLOCK(zone); 3320 } 3321 3322 } 3323 3324 /* 3325 * Frees a single item to any zone. 3326 * 3327 * Arguments: 3328 * zone The zone to free to 3329 * item The item we're freeing 3330 * udata User supplied data for the dtor 3331 * skip Skip dtors and finis 3332 */ 3333 static void 3334 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 3335 { 3336 #ifdef INVARIANTS 3337 bool skipdbg; 3338 3339 skipdbg = uma_dbg_zskip(zone, item); 3340 if (skip == SKIP_NONE && !skipdbg) { 3341 if (zone->uz_flags & UMA_ZONE_MALLOC) 3342 uma_dbg_free(zone, udata, item); 3343 else 3344 uma_dbg_free(zone, NULL, item); 3345 } 3346 3347 if (skip < SKIP_DTOR && zone->uz_dtor != NULL && 3348 (!skipdbg || zone->uz_dtor != trash_dtor || 3349 zone->uz_ctor != trash_ctor)) 3350 #else 3351 if (skip < SKIP_DTOR && zone->uz_dtor != NULL) 3352 #endif 3353 zone->uz_dtor(item, zone->uz_size, udata); 3354 3355 if (skip < SKIP_FINI && zone->uz_fini) 3356 zone->uz_fini(item, zone->uz_size); 3357 3358 atomic_add_long(&zone->uz_frees, 1); 3359 zone->uz_release(zone->uz_arg, &item, 1); 3360 } 3361 3362 /* See uma.h */ 3363 int 3364 uma_zone_set_max(uma_zone_t zone, int nitems) 3365 { 3366 uma_keg_t keg; 3367 3368 keg = zone_first_keg(zone); 3369 if (keg == NULL) 3370 return (0); 3371 KEG_LOCK(keg); 3372 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 3373 if (keg->uk_maxpages * keg->uk_ipers < nitems) 3374 keg->uk_maxpages += keg->uk_ppera; 3375 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 3376 KEG_UNLOCK(keg); 3377 3378 return (nitems); 3379 } 3380 3381 /* See uma.h */ 3382 int 3383 uma_zone_get_max(uma_zone_t zone) 3384 { 3385 int nitems; 3386 uma_keg_t keg; 3387 3388 keg = zone_first_keg(zone); 3389 if (keg == NULL) 3390 return (0); 3391 KEG_LOCK(keg); 3392 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 3393 KEG_UNLOCK(keg); 3394 3395 return (nitems); 3396 } 3397 3398 /* See uma.h */ 3399 void 3400 uma_zone_set_warning(uma_zone_t zone, const char *warning) 3401 { 3402 3403 ZONE_LOCK(zone); 3404 zone->uz_warning = warning; 3405 ZONE_UNLOCK(zone); 3406 } 3407 3408 /* See uma.h */ 3409 void 3410 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 3411 { 3412 3413 ZONE_LOCK(zone); 3414 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 3415 ZONE_UNLOCK(zone); 3416 } 3417 3418 /* See uma.h */ 3419 int 3420 uma_zone_get_cur(uma_zone_t zone) 3421 { 3422 int64_t nitems; 3423 u_int i; 3424 3425 ZONE_LOCK(zone); 3426 nitems = zone->uz_allocs - zone->uz_frees; 3427 CPU_FOREACH(i) { 3428 /* 3429 * See the comment in sysctl_vm_zone_stats() regarding the 3430 * safety of accessing the per-cpu caches. With the zone lock 3431 * held, it is safe, but can potentially result in stale data. 3432 */ 3433 nitems += zone->uz_cpu[i].uc_allocs - 3434 zone->uz_cpu[i].uc_frees; 3435 } 3436 ZONE_UNLOCK(zone); 3437 3438 return (nitems < 0 ? 0 : nitems); 3439 } 3440 3441 /* See uma.h */ 3442 void 3443 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 3444 { 3445 uma_keg_t keg; 3446 3447 keg = zone_first_keg(zone); 3448 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3449 KEG_LOCK(keg); 3450 KASSERT(keg->uk_pages == 0, 3451 ("uma_zone_set_init on non-empty keg")); 3452 keg->uk_init = uminit; 3453 KEG_UNLOCK(keg); 3454 } 3455 3456 /* See uma.h */ 3457 void 3458 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 3459 { 3460 uma_keg_t keg; 3461 3462 keg = zone_first_keg(zone); 3463 KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); 3464 KEG_LOCK(keg); 3465 KASSERT(keg->uk_pages == 0, 3466 ("uma_zone_set_fini on non-empty keg")); 3467 keg->uk_fini = fini; 3468 KEG_UNLOCK(keg); 3469 } 3470 3471 /* See uma.h */ 3472 void 3473 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 3474 { 3475 3476 ZONE_LOCK(zone); 3477 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3478 ("uma_zone_set_zinit on non-empty keg")); 3479 zone->uz_init = zinit; 3480 ZONE_UNLOCK(zone); 3481 } 3482 3483 /* See uma.h */ 3484 void 3485 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3486 { 3487 3488 ZONE_LOCK(zone); 3489 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3490 ("uma_zone_set_zfini on non-empty keg")); 3491 zone->uz_fini = zfini; 3492 ZONE_UNLOCK(zone); 3493 } 3494 3495 /* See uma.h */ 3496 /* XXX uk_freef is not actually used with the zone locked */ 3497 void 3498 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 3499 { 3500 uma_keg_t keg; 3501 3502 keg = zone_first_keg(zone); 3503 KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); 3504 KEG_LOCK(keg); 3505 keg->uk_freef = freef; 3506 KEG_UNLOCK(keg); 3507 } 3508 3509 /* See uma.h */ 3510 /* XXX uk_allocf is not actually used with the zone locked */ 3511 void 3512 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 3513 { 3514 uma_keg_t keg; 3515 3516 keg = zone_first_keg(zone); 3517 KEG_LOCK(keg); 3518 keg->uk_allocf = allocf; 3519 KEG_UNLOCK(keg); 3520 } 3521 3522 /* See uma.h */ 3523 void 3524 uma_zone_reserve(uma_zone_t zone, int items) 3525 { 3526 uma_keg_t keg; 3527 3528 keg = zone_first_keg(zone); 3529 if (keg == NULL) 3530 return; 3531 KEG_LOCK(keg); 3532 keg->uk_reserve = items; 3533 KEG_UNLOCK(keg); 3534 3535 return; 3536 } 3537 3538 /* See uma.h */ 3539 int 3540 uma_zone_reserve_kva(uma_zone_t zone, int count) 3541 { 3542 uma_keg_t keg; 3543 vm_offset_t kva; 3544 u_int pages; 3545 3546 keg = zone_first_keg(zone); 3547 if (keg == NULL) 3548 return (0); 3549 pages = count / keg->uk_ipers; 3550 3551 if (pages * keg->uk_ipers < count) 3552 pages++; 3553 pages *= keg->uk_ppera; 3554 3555 #ifdef UMA_MD_SMALL_ALLOC 3556 if (keg->uk_ppera > 1) { 3557 #else 3558 if (1) { 3559 #endif 3560 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 3561 if (kva == 0) 3562 return (0); 3563 } else 3564 kva = 0; 3565 KEG_LOCK(keg); 3566 keg->uk_kva = kva; 3567 keg->uk_offset = 0; 3568 keg->uk_maxpages = pages; 3569 #ifdef UMA_MD_SMALL_ALLOC 3570 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3571 #else 3572 keg->uk_allocf = noobj_alloc; 3573 #endif 3574 keg->uk_flags |= UMA_ZONE_NOFREE; 3575 KEG_UNLOCK(keg); 3576 3577 return (1); 3578 } 3579 3580 /* See uma.h */ 3581 void 3582 uma_prealloc(uma_zone_t zone, int items) 3583 { 3584 uma_domain_t dom; 3585 uma_slab_t slab; 3586 uma_keg_t keg; 3587 int domain, slabs; 3588 3589 keg = zone_first_keg(zone); 3590 if (keg == NULL) 3591 return; 3592 KEG_LOCK(keg); 3593 slabs = items / keg->uk_ipers; 3594 domain = 0; 3595 if (slabs * keg->uk_ipers < items) 3596 slabs++; 3597 while (slabs > 0) { 3598 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK); 3599 if (slab == NULL) 3600 break; 3601 MPASS(slab->us_keg == keg); 3602 dom = &keg->uk_domain[slab->us_domain]; 3603 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 3604 slabs--; 3605 domain = (domain + 1) % vm_ndomains; 3606 } 3607 KEG_UNLOCK(keg); 3608 } 3609 3610 /* See uma.h */ 3611 static void 3612 uma_reclaim_locked(bool kmem_danger) 3613 { 3614 3615 CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 3616 sx_assert(&uma_drain_lock, SA_XLOCKED); 3617 bucket_enable(); 3618 zone_foreach(zone_drain); 3619 if (vm_page_count_min() || kmem_danger) { 3620 cache_drain_safe(NULL); 3621 zone_foreach(zone_drain); 3622 } 3623 /* 3624 * Some slabs may have been freed but this zone will be visited early 3625 * we visit again so that we can free pages that are empty once other 3626 * zones are drained. We have to do the same for buckets. 3627 */ 3628 zone_drain(slabzone); 3629 bucket_zone_drain(); 3630 } 3631 3632 void 3633 uma_reclaim(void) 3634 { 3635 3636 sx_xlock(&uma_drain_lock); 3637 uma_reclaim_locked(false); 3638 sx_xunlock(&uma_drain_lock); 3639 } 3640 3641 static volatile int uma_reclaim_needed; 3642 3643 void 3644 uma_reclaim_wakeup(void) 3645 { 3646 3647 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 3648 wakeup(uma_reclaim); 3649 } 3650 3651 void 3652 uma_reclaim_worker(void *arg __unused) 3653 { 3654 3655 for (;;) { 3656 sx_xlock(&uma_drain_lock); 3657 while (atomic_load_int(&uma_reclaim_needed) == 0) 3658 sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", 3659 hz); 3660 sx_xunlock(&uma_drain_lock); 3661 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 3662 sx_xlock(&uma_drain_lock); 3663 uma_reclaim_locked(true); 3664 atomic_store_int(&uma_reclaim_needed, 0); 3665 sx_xunlock(&uma_drain_lock); 3666 /* Don't fire more than once per-second. */ 3667 pause("umarclslp", hz); 3668 } 3669 } 3670 3671 /* See uma.h */ 3672 int 3673 uma_zone_exhausted(uma_zone_t zone) 3674 { 3675 int full; 3676 3677 ZONE_LOCK(zone); 3678 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3679 ZONE_UNLOCK(zone); 3680 return (full); 3681 } 3682 3683 int 3684 uma_zone_exhausted_nolock(uma_zone_t zone) 3685 { 3686 return (zone->uz_flags & UMA_ZFLAG_FULL); 3687 } 3688 3689 void * 3690 uma_large_malloc_domain(vm_size_t size, int domain, int wait) 3691 { 3692 struct vmem *arena; 3693 vm_offset_t addr; 3694 uma_slab_t slab; 3695 3696 #if VM_NRESERVLEVEL > 0 3697 if (__predict_true((wait & M_EXEC) == 0)) 3698 arena = kernel_arena; 3699 else 3700 arena = kernel_rwx_arena; 3701 #else 3702 arena = kernel_arena; 3703 #endif 3704 3705 slab = zone_alloc_item(slabzone, NULL, domain, wait); 3706 if (slab == NULL) 3707 return (NULL); 3708 if (domain == UMA_ANYDOMAIN) 3709 addr = kmem_malloc(arena, size, wait); 3710 else 3711 addr = kmem_malloc_domain(arena, domain, size, wait); 3712 if (addr != 0) { 3713 vsetslab(addr, slab); 3714 slab->us_data = (void *)addr; 3715 slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; 3716 #if VM_NRESERVLEVEL > 0 3717 if (__predict_false(arena == kernel_rwx_arena)) 3718 slab->us_flags |= UMA_SLAB_KRWX; 3719 #endif 3720 slab->us_size = size; 3721 slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE( 3722 pmap_kextract(addr))); 3723 uma_total_inc(size); 3724 } else { 3725 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3726 } 3727 3728 return ((void *)addr); 3729 } 3730 3731 void * 3732 uma_large_malloc(vm_size_t size, int wait) 3733 { 3734 3735 return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); 3736 } 3737 3738 void 3739 uma_large_free(uma_slab_t slab) 3740 { 3741 struct vmem *arena; 3742 3743 KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, 3744 ("uma_large_free: Memory not allocated with uma_large_malloc.")); 3745 #if VM_NRESERVLEVEL > 0 3746 if (__predict_true((slab->us_flags & UMA_SLAB_KRWX) == 0)) 3747 arena = kernel_arena; 3748 else 3749 arena = kernel_rwx_arena; 3750 #else 3751 arena = kernel_arena; 3752 #endif 3753 kmem_free(arena, (vm_offset_t)slab->us_data, slab->us_size); 3754 uma_total_dec(slab->us_size); 3755 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3756 } 3757 3758 static void 3759 uma_zero_item(void *item, uma_zone_t zone) 3760 { 3761 3762 bzero(item, zone->uz_size); 3763 } 3764 3765 unsigned long 3766 uma_limit(void) 3767 { 3768 3769 return (uma_kmem_limit); 3770 } 3771 3772 void 3773 uma_set_limit(unsigned long limit) 3774 { 3775 3776 uma_kmem_limit = limit; 3777 } 3778 3779 unsigned long 3780 uma_size(void) 3781 { 3782 3783 return (uma_kmem_total); 3784 } 3785 3786 long 3787 uma_avail(void) 3788 { 3789 3790 return (uma_kmem_limit - uma_kmem_total); 3791 } 3792 3793 void 3794 uma_print_stats(void) 3795 { 3796 zone_foreach(uma_print_zone); 3797 } 3798 3799 static void 3800 slab_print(uma_slab_t slab) 3801 { 3802 printf("slab: keg %p, data %p, freecount %d\n", 3803 slab->us_keg, slab->us_data, slab->us_freecount); 3804 } 3805 3806 static void 3807 cache_print(uma_cache_t cache) 3808 { 3809 printf("alloc: %p(%d), free: %p(%d)\n", 3810 cache->uc_allocbucket, 3811 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3812 cache->uc_freebucket, 3813 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3814 } 3815 3816 static void 3817 uma_print_keg(uma_keg_t keg) 3818 { 3819 uma_domain_t dom; 3820 uma_slab_t slab; 3821 int i; 3822 3823 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3824 "out %d free %d limit %d\n", 3825 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3826 keg->uk_ipers, keg->uk_ppera, 3827 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 3828 keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3829 for (i = 0; i < vm_ndomains; i++) { 3830 dom = &keg->uk_domain[i]; 3831 printf("Part slabs:\n"); 3832 LIST_FOREACH(slab, &dom->ud_part_slab, us_link) 3833 slab_print(slab); 3834 printf("Free slabs:\n"); 3835 LIST_FOREACH(slab, &dom->ud_free_slab, us_link) 3836 slab_print(slab); 3837 printf("Full slabs:\n"); 3838 LIST_FOREACH(slab, &dom->ud_full_slab, us_link) 3839 slab_print(slab); 3840 } 3841 } 3842 3843 void 3844 uma_print_zone(uma_zone_t zone) 3845 { 3846 uma_cache_t cache; 3847 uma_klink_t kl; 3848 int i; 3849 3850 printf("zone: %s(%p) size %d flags %#x\n", 3851 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3852 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3853 uma_print_keg(kl->kl_keg); 3854 CPU_FOREACH(i) { 3855 cache = &zone->uz_cpu[i]; 3856 printf("CPU %d Cache:\n", i); 3857 cache_print(cache); 3858 } 3859 } 3860 3861 #ifdef DDB 3862 /* 3863 * Generate statistics across both the zone and its per-cpu cache's. Return 3864 * desired statistics if the pointer is non-NULL for that statistic. 3865 * 3866 * Note: does not update the zone statistics, as it can't safely clear the 3867 * per-CPU cache statistic. 3868 * 3869 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3870 * safe from off-CPU; we should modify the caches to track this information 3871 * directly so that we don't have to. 3872 */ 3873 static void 3874 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3875 uint64_t *freesp, uint64_t *sleepsp) 3876 { 3877 uma_cache_t cache; 3878 uint64_t allocs, frees, sleeps; 3879 int cachefree, cpu; 3880 3881 allocs = frees = sleeps = 0; 3882 cachefree = 0; 3883 CPU_FOREACH(cpu) { 3884 cache = &z->uz_cpu[cpu]; 3885 if (cache->uc_allocbucket != NULL) 3886 cachefree += cache->uc_allocbucket->ub_cnt; 3887 if (cache->uc_freebucket != NULL) 3888 cachefree += cache->uc_freebucket->ub_cnt; 3889 allocs += cache->uc_allocs; 3890 frees += cache->uc_frees; 3891 } 3892 allocs += z->uz_allocs; 3893 frees += z->uz_frees; 3894 sleeps += z->uz_sleeps; 3895 if (cachefreep != NULL) 3896 *cachefreep = cachefree; 3897 if (allocsp != NULL) 3898 *allocsp = allocs; 3899 if (freesp != NULL) 3900 *freesp = frees; 3901 if (sleepsp != NULL) 3902 *sleepsp = sleeps; 3903 } 3904 #endif /* DDB */ 3905 3906 static int 3907 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3908 { 3909 uma_keg_t kz; 3910 uma_zone_t z; 3911 int count; 3912 3913 count = 0; 3914 rw_rlock(&uma_rwlock); 3915 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3916 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3917 count++; 3918 } 3919 rw_runlock(&uma_rwlock); 3920 return (sysctl_handle_int(oidp, &count, 0, req)); 3921 } 3922 3923 static int 3924 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3925 { 3926 struct uma_stream_header ush; 3927 struct uma_type_header uth; 3928 struct uma_percpu_stat *ups; 3929 uma_bucket_t bucket; 3930 uma_zone_domain_t zdom; 3931 struct sbuf sbuf; 3932 uma_cache_t cache; 3933 uma_klink_t kl; 3934 uma_keg_t kz; 3935 uma_zone_t z; 3936 uma_keg_t k; 3937 int count, error, i; 3938 3939 error = sysctl_wire_old_buffer(req, 0); 3940 if (error != 0) 3941 return (error); 3942 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3943 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 3944 ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); 3945 3946 count = 0; 3947 rw_rlock(&uma_rwlock); 3948 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3949 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3950 count++; 3951 } 3952 3953 /* 3954 * Insert stream header. 3955 */ 3956 bzero(&ush, sizeof(ush)); 3957 ush.ush_version = UMA_STREAM_VERSION; 3958 ush.ush_maxcpus = (mp_maxid + 1); 3959 ush.ush_count = count; 3960 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3961 3962 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3963 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3964 bzero(&uth, sizeof(uth)); 3965 ZONE_LOCK(z); 3966 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3967 uth.uth_align = kz->uk_align; 3968 uth.uth_size = kz->uk_size; 3969 uth.uth_rsize = kz->uk_rsize; 3970 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3971 k = kl->kl_keg; 3972 uth.uth_maxpages += k->uk_maxpages; 3973 uth.uth_pages += k->uk_pages; 3974 uth.uth_keg_free += k->uk_free; 3975 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3976 * k->uk_ipers; 3977 } 3978 3979 /* 3980 * A zone is secondary is it is not the first entry 3981 * on the keg's zone list. 3982 */ 3983 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3984 (LIST_FIRST(&kz->uk_zones) != z)) 3985 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3986 3987 for (i = 0; i < vm_ndomains; i++) { 3988 zdom = &z->uz_domain[i]; 3989 LIST_FOREACH(bucket, &zdom->uzd_buckets, 3990 ub_link) 3991 uth.uth_zone_free += bucket->ub_cnt; 3992 } 3993 uth.uth_allocs = z->uz_allocs; 3994 uth.uth_frees = z->uz_frees; 3995 uth.uth_fails = z->uz_fails; 3996 uth.uth_sleeps = z->uz_sleeps; 3997 /* 3998 * While it is not normally safe to access the cache 3999 * bucket pointers while not on the CPU that owns the 4000 * cache, we only allow the pointers to be exchanged 4001 * without the zone lock held, not invalidated, so 4002 * accept the possible race associated with bucket 4003 * exchange during monitoring. 4004 */ 4005 for (i = 0; i < mp_maxid + 1; i++) { 4006 bzero(&ups[i], sizeof(*ups)); 4007 if (kz->uk_flags & UMA_ZFLAG_INTERNAL || 4008 CPU_ABSENT(i)) 4009 continue; 4010 cache = &z->uz_cpu[i]; 4011 if (cache->uc_allocbucket != NULL) 4012 ups[i].ups_cache_free += 4013 cache->uc_allocbucket->ub_cnt; 4014 if (cache->uc_freebucket != NULL) 4015 ups[i].ups_cache_free += 4016 cache->uc_freebucket->ub_cnt; 4017 ups[i].ups_allocs = cache->uc_allocs; 4018 ups[i].ups_frees = cache->uc_frees; 4019 } 4020 ZONE_UNLOCK(z); 4021 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 4022 for (i = 0; i < mp_maxid + 1; i++) 4023 (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); 4024 } 4025 } 4026 rw_runlock(&uma_rwlock); 4027 error = sbuf_finish(&sbuf); 4028 sbuf_delete(&sbuf); 4029 free(ups, M_TEMP); 4030 return (error); 4031 } 4032 4033 int 4034 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 4035 { 4036 uma_zone_t zone = *(uma_zone_t *)arg1; 4037 int error, max; 4038 4039 max = uma_zone_get_max(zone); 4040 error = sysctl_handle_int(oidp, &max, 0, req); 4041 if (error || !req->newptr) 4042 return (error); 4043 4044 uma_zone_set_max(zone, max); 4045 4046 return (0); 4047 } 4048 4049 int 4050 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 4051 { 4052 uma_zone_t zone = *(uma_zone_t *)arg1; 4053 int cur; 4054 4055 cur = uma_zone_get_cur(zone); 4056 return (sysctl_handle_int(oidp, &cur, 0, req)); 4057 } 4058 4059 #ifdef INVARIANTS 4060 static uma_slab_t 4061 uma_dbg_getslab(uma_zone_t zone, void *item) 4062 { 4063 uma_slab_t slab; 4064 uma_keg_t keg; 4065 uint8_t *mem; 4066 4067 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 4068 if (zone->uz_flags & UMA_ZONE_VTOSLAB) { 4069 slab = vtoslab((vm_offset_t)mem); 4070 } else { 4071 /* 4072 * It is safe to return the slab here even though the 4073 * zone is unlocked because the item's allocation state 4074 * essentially holds a reference. 4075 */ 4076 ZONE_LOCK(zone); 4077 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; 4078 if (keg->uk_flags & UMA_ZONE_HASH) 4079 slab = hash_sfind(&keg->uk_hash, mem); 4080 else 4081 slab = (uma_slab_t)(mem + keg->uk_pgoff); 4082 ZONE_UNLOCK(zone); 4083 } 4084 4085 return (slab); 4086 } 4087 4088 static bool 4089 uma_dbg_zskip(uma_zone_t zone, void *mem) 4090 { 4091 uma_keg_t keg; 4092 4093 if ((keg = zone_first_keg(zone)) == NULL) 4094 return (true); 4095 4096 return (uma_dbg_kskip(keg, mem)); 4097 } 4098 4099 static bool 4100 uma_dbg_kskip(uma_keg_t keg, void *mem) 4101 { 4102 uintptr_t idx; 4103 4104 if (dbg_divisor == 0) 4105 return (true); 4106 4107 if (dbg_divisor == 1) 4108 return (false); 4109 4110 idx = (uintptr_t)mem >> PAGE_SHIFT; 4111 if (keg->uk_ipers > 1) { 4112 idx *= keg->uk_ipers; 4113 idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; 4114 } 4115 4116 if ((idx / dbg_divisor) * dbg_divisor != idx) { 4117 counter_u64_add(uma_skip_cnt, 1); 4118 return (true); 4119 } 4120 counter_u64_add(uma_dbg_cnt, 1); 4121 4122 return (false); 4123 } 4124 4125 /* 4126 * Set up the slab's freei data such that uma_dbg_free can function. 4127 * 4128 */ 4129 static void 4130 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 4131 { 4132 uma_keg_t keg; 4133 int freei; 4134 4135 if (slab == NULL) { 4136 slab = uma_dbg_getslab(zone, item); 4137 if (slab == NULL) 4138 panic("uma: item %p did not belong to zone %s\n", 4139 item, zone->uz_name); 4140 } 4141 keg = slab->us_keg; 4142 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 4143 4144 if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 4145 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 4146 item, zone, zone->uz_name, slab, freei); 4147 BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 4148 4149 return; 4150 } 4151 4152 /* 4153 * Verifies freed addresses. Checks for alignment, valid slab membership 4154 * and duplicate frees. 4155 * 4156 */ 4157 static void 4158 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 4159 { 4160 uma_keg_t keg; 4161 int freei; 4162 4163 if (slab == NULL) { 4164 slab = uma_dbg_getslab(zone, item); 4165 if (slab == NULL) 4166 panic("uma: Freed item %p did not belong to zone %s\n", 4167 item, zone->uz_name); 4168 } 4169 keg = slab->us_keg; 4170 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 4171 4172 if (freei >= keg->uk_ipers) 4173 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 4174 item, zone, zone->uz_name, slab, freei); 4175 4176 if (((freei * keg->uk_rsize) + slab->us_data) != item) 4177 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 4178 item, zone, zone->uz_name, slab, freei); 4179 4180 if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 4181 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 4182 item, zone, zone->uz_name, slab, freei); 4183 4184 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 4185 } 4186 #endif /* INVARIANTS */ 4187 4188 #ifdef DDB 4189 DB_SHOW_COMMAND(uma, db_show_uma) 4190 { 4191 uma_bucket_t bucket; 4192 uma_keg_t kz; 4193 uma_zone_t z; 4194 uma_zone_domain_t zdom; 4195 uint64_t allocs, frees, sleeps; 4196 int cachefree, i; 4197 4198 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 4199 "Free", "Requests", "Sleeps", "Bucket"); 4200 LIST_FOREACH(kz, &uma_kegs, uk_link) { 4201 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 4202 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 4203 allocs = z->uz_allocs; 4204 frees = z->uz_frees; 4205 sleeps = z->uz_sleeps; 4206 cachefree = 0; 4207 } else 4208 uma_zone_sumstat(z, &cachefree, &allocs, 4209 &frees, &sleeps); 4210 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 4211 (LIST_FIRST(&kz->uk_zones) != z))) 4212 cachefree += kz->uk_free; 4213 for (i = 0; i < vm_ndomains; i++) { 4214 zdom = &z->uz_domain[i]; 4215 LIST_FOREACH(bucket, &zdom->uzd_buckets, 4216 ub_link) 4217 cachefree += bucket->ub_cnt; 4218 } 4219 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 4220 z->uz_name, (uintmax_t)kz->uk_size, 4221 (intmax_t)(allocs - frees), cachefree, 4222 (uintmax_t)allocs, sleeps, z->uz_count); 4223 if (db_pager_quit) 4224 return; 4225 } 4226 } 4227 } 4228 4229 DB_SHOW_COMMAND(umacache, db_show_umacache) 4230 { 4231 uma_bucket_t bucket; 4232 uma_zone_t z; 4233 uma_zone_domain_t zdom; 4234 uint64_t allocs, frees; 4235 int cachefree, i; 4236 4237 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 4238 "Requests", "Bucket"); 4239 LIST_FOREACH(z, &uma_cachezones, uz_link) { 4240 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 4241 for (i = 0; i < vm_ndomains; i++) { 4242 zdom = &z->uz_domain[i]; 4243 LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) 4244 cachefree += bucket->ub_cnt; 4245 } 4246 db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 4247 z->uz_name, (uintmax_t)z->uz_size, 4248 (intmax_t)(allocs - frees), cachefree, 4249 (uintmax_t)allocs, z->uz_count); 4250 if (db_pager_quit) 4251 return; 4252 } 4253 } 4254 #endif /* DDB */ 4255