1 /*- 2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uma_core.c Implementation of the Universal Memory allocator 31 * 32 * This allocator is intended to replace the multitude of similar object caches 33 * in the standard FreeBSD kernel. The intent is to be flexible as well as 34 * effecient. A primary design goal is to return unused memory to the rest of 35 * the system. This will make the system as a whole more flexible due to the 36 * ability to move memory to subsystems which most need it instead of leaving 37 * pools of reserved memory unused. 38 * 39 * The basic ideas stem from similar slab/zone based allocators whose algorithms 40 * are well known. 41 * 42 */ 43 44 /* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* I should really use ktr.. */ 54 /* 55 #define UMA_DEBUG 1 56 #define UMA_DEBUG_ALLOC 1 57 #define UMA_DEBUG_ALLOC_1 1 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_param.h" 62 #include "opt_vm.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bitset.h> 67 #include <sys/kernel.h> 68 #include <sys/types.h> 69 #include <sys/queue.h> 70 #include <sys/malloc.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/rwlock.h> 77 #include <sys/sbuf.h> 78 #include <sys/smp.h> 79 #include <sys/vmmeter.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_param.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_kern.h> 88 #include <vm/vm_extern.h> 89 #include <vm/uma.h> 90 #include <vm/uma_int.h> 91 #include <vm/uma_dbg.h> 92 93 #include <ddb/ddb.h> 94 95 #ifdef DEBUG_MEMGUARD 96 #include <vm/memguard.h> 97 #endif 98 99 /* 100 * This is the zone and keg from which all zones are spawned. The idea is that 101 * even the zone & keg heads are allocated from the allocator, so we use the 102 * bss section to bootstrap us. 103 */ 104 static struct uma_keg masterkeg; 105 static struct uma_zone masterzone_k; 106 static struct uma_zone masterzone_z; 107 static uma_zone_t kegs = &masterzone_k; 108 static uma_zone_t zones = &masterzone_z; 109 110 /* This is the zone from which all of uma_slab_t's are allocated. */ 111 static uma_zone_t slabzone; 112 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 113 114 /* 115 * The initial hash tables come out of this zone so they can be allocated 116 * prior to malloc coming up. 117 */ 118 static uma_zone_t hashzone; 119 120 /* The boot-time adjusted value for cache line alignment. */ 121 int uma_align_cache = 64 - 1; 122 123 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 124 125 /* 126 * Are we allowed to allocate buckets? 127 */ 128 static int bucketdisable = 1; 129 130 /* Linked list of all kegs in the system */ 131 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 132 133 /* This mutex protects the keg list */ 134 static struct mtx_padalign uma_mtx; 135 136 /* Linked list of boot time pages */ 137 static LIST_HEAD(,uma_slab) uma_boot_pages = 138 LIST_HEAD_INITIALIZER(uma_boot_pages); 139 140 /* This mutex protects the boot time pages list */ 141 static struct mtx_padalign uma_boot_pages_mtx; 142 143 /* Is the VM done starting up? */ 144 static int booted = 0; 145 #define UMA_STARTUP 1 146 #define UMA_STARTUP2 2 147 148 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 149 static const u_int uma_max_ipers = SLAB_SETSIZE; 150 151 /* 152 * Only mbuf clusters use ref zones. Just provide enough references 153 * to support the one user. New code should not use the ref facility. 154 */ 155 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; 156 157 /* 158 * This is the handle used to schedule events that need to happen 159 * outside of the allocation fast path. 160 */ 161 static struct callout uma_callout; 162 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 163 164 /* 165 * This structure is passed as the zone ctor arg so that I don't have to create 166 * a special allocation function just for zones. 167 */ 168 struct uma_zctor_args { 169 const char *name; 170 size_t size; 171 uma_ctor ctor; 172 uma_dtor dtor; 173 uma_init uminit; 174 uma_fini fini; 175 uma_import import; 176 uma_release release; 177 void *arg; 178 uma_keg_t keg; 179 int align; 180 uint32_t flags; 181 }; 182 183 struct uma_kctor_args { 184 uma_zone_t zone; 185 size_t size; 186 uma_init uminit; 187 uma_fini fini; 188 int align; 189 uint32_t flags; 190 }; 191 192 struct uma_bucket_zone { 193 uma_zone_t ubz_zone; 194 char *ubz_name; 195 int ubz_entries; /* Number of items it can hold. */ 196 int ubz_maxsize; /* Maximum allocation size per-item. */ 197 }; 198 199 /* 200 * Compute the actual number of bucket entries to pack them in power 201 * of two sizes for more efficient space utilization. 202 */ 203 #define BUCKET_SIZE(n) \ 204 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 205 206 #define BUCKET_MAX BUCKET_SIZE(128) 207 208 struct uma_bucket_zone bucket_zones[] = { 209 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 210 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 211 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 212 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 213 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 214 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 215 { NULL, NULL, 0} 216 }; 217 218 /* 219 * Flags and enumerations to be passed to internal functions. 220 */ 221 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 222 223 /* Prototypes.. */ 224 225 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int); 226 static void *page_alloc(uma_zone_t, int, uint8_t *, int); 227 static void *startup_alloc(uma_zone_t, int, uint8_t *, int); 228 static void page_free(void *, int, uint8_t); 229 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 230 static void cache_drain(uma_zone_t); 231 static void bucket_drain(uma_zone_t, uma_bucket_t); 232 static void bucket_cache_drain(uma_zone_t zone); 233 static int keg_ctor(void *, int, void *, int); 234 static void keg_dtor(void *, int, void *); 235 static int zone_ctor(void *, int, void *, int); 236 static void zone_dtor(void *, int, void *); 237 static int zero_init(void *, int, int); 238 static void keg_small_init(uma_keg_t keg); 239 static void keg_large_init(uma_keg_t keg); 240 static void zone_foreach(void (*zfunc)(uma_zone_t)); 241 static void zone_timeout(uma_zone_t zone); 242 static int hash_alloc(struct uma_hash *); 243 static int hash_expand(struct uma_hash *, struct uma_hash *); 244 static void hash_free(struct uma_hash *hash); 245 static void uma_timeout(void *); 246 static void uma_startup3(void); 247 static void *zone_alloc_item(uma_zone_t, void *, int); 248 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 249 static void bucket_enable(void); 250 static void bucket_init(void); 251 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 252 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 253 static void bucket_zone_drain(void); 254 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 255 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 256 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 257 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 258 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 259 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 260 uma_fini fini, int align, uint32_t flags); 261 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 262 static void zone_release(uma_zone_t zone, void **bucket, int cnt); 263 264 void uma_print_zone(uma_zone_t); 265 void uma_print_stats(void); 266 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 267 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 268 269 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 270 271 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 272 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 273 274 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 275 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 276 277 static int zone_warnings = 1; 278 TUNABLE_INT("vm.zone_warnings", &zone_warnings); 279 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0, 280 "Warn when UMA zones becomes full"); 281 282 /* 283 * This routine checks to see whether or not it's safe to enable buckets. 284 */ 285 static void 286 bucket_enable(void) 287 { 288 bucketdisable = vm_page_count_min(); 289 } 290 291 /* 292 * Initialize bucket_zones, the array of zones of buckets of various sizes. 293 * 294 * For each zone, calculate the memory required for each bucket, consisting 295 * of the header and an array of pointers. 296 */ 297 static void 298 bucket_init(void) 299 { 300 struct uma_bucket_zone *ubz; 301 int size; 302 int i; 303 304 for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 305 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 306 size += sizeof(void *) * ubz->ubz_entries; 307 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 308 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 309 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 310 } 311 } 312 313 /* 314 * Given a desired number of entries for a bucket, return the zone from which 315 * to allocate the bucket. 316 */ 317 static struct uma_bucket_zone * 318 bucket_zone_lookup(int entries) 319 { 320 struct uma_bucket_zone *ubz; 321 322 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 323 if (ubz->ubz_entries >= entries) 324 return (ubz); 325 ubz--; 326 return (ubz); 327 } 328 329 static int 330 bucket_select(int size) 331 { 332 struct uma_bucket_zone *ubz; 333 334 ubz = &bucket_zones[0]; 335 if (size > ubz->ubz_maxsize) 336 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 337 338 for (; ubz->ubz_entries != 0; ubz++) 339 if (ubz->ubz_maxsize < size) 340 break; 341 ubz--; 342 return (ubz->ubz_entries); 343 } 344 345 static uma_bucket_t 346 bucket_alloc(uma_zone_t zone, void *udata, int flags) 347 { 348 struct uma_bucket_zone *ubz; 349 uma_bucket_t bucket; 350 351 /* 352 * This is to stop us from allocating per cpu buckets while we're 353 * running out of vm.boot_pages. Otherwise, we would exhaust the 354 * boot pages. This also prevents us from allocating buckets in 355 * low memory situations. 356 */ 357 if (bucketdisable) 358 return (NULL); 359 /* 360 * To limit bucket recursion we store the original zone flags 361 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 362 * NOVM flag to persist even through deep recursions. We also 363 * store ZFLAG_BUCKET once we have recursed attempting to allocate 364 * a bucket for a bucket zone so we do not allow infinite bucket 365 * recursion. This cookie will even persist to frees of unused 366 * buckets via the allocation path or bucket allocations in the 367 * free path. 368 */ 369 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 370 return (NULL); 371 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 372 udata = (void *)(uintptr_t)zone->uz_flags; 373 else 374 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 375 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 376 flags |= M_NOVM; 377 ubz = bucket_zone_lookup(zone->uz_count); 378 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 379 if (bucket) { 380 #ifdef INVARIANTS 381 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 382 #endif 383 bucket->ub_cnt = 0; 384 bucket->ub_entries = ubz->ubz_entries; 385 } 386 387 return (bucket); 388 } 389 390 static void 391 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 392 { 393 struct uma_bucket_zone *ubz; 394 395 KASSERT(bucket->ub_cnt == 0, 396 ("bucket_free: Freeing a non free bucket.")); 397 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 398 udata = (void *)(uintptr_t)zone->uz_flags; 399 ubz = bucket_zone_lookup(bucket->ub_entries); 400 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 401 } 402 403 static void 404 bucket_zone_drain(void) 405 { 406 struct uma_bucket_zone *ubz; 407 408 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 409 zone_drain(ubz->ubz_zone); 410 } 411 412 static void 413 zone_log_warning(uma_zone_t zone) 414 { 415 static const struct timeval warninterval = { 300, 0 }; 416 417 if (!zone_warnings || zone->uz_warning == NULL) 418 return; 419 420 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 421 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 422 } 423 424 static void 425 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 426 { 427 uma_klink_t klink; 428 429 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 430 kegfn(klink->kl_keg); 431 } 432 433 /* 434 * Routine called by timeout which is used to fire off some time interval 435 * based calculations. (stats, hash size, etc.) 436 * 437 * Arguments: 438 * arg Unused 439 * 440 * Returns: 441 * Nothing 442 */ 443 static void 444 uma_timeout(void *unused) 445 { 446 bucket_enable(); 447 zone_foreach(zone_timeout); 448 449 /* Reschedule this event */ 450 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 451 } 452 453 /* 454 * Routine to perform timeout driven calculations. This expands the 455 * hashes and does per cpu statistics aggregation. 456 * 457 * Returns nothing. 458 */ 459 static void 460 keg_timeout(uma_keg_t keg) 461 { 462 463 KEG_LOCK(keg); 464 /* 465 * Expand the keg hash table. 466 * 467 * This is done if the number of slabs is larger than the hash size. 468 * What I'm trying to do here is completely reduce collisions. This 469 * may be a little aggressive. Should I allow for two collisions max? 470 */ 471 if (keg->uk_flags & UMA_ZONE_HASH && 472 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 473 struct uma_hash newhash; 474 struct uma_hash oldhash; 475 int ret; 476 477 /* 478 * This is so involved because allocating and freeing 479 * while the keg lock is held will lead to deadlock. 480 * I have to do everything in stages and check for 481 * races. 482 */ 483 newhash = keg->uk_hash; 484 KEG_UNLOCK(keg); 485 ret = hash_alloc(&newhash); 486 KEG_LOCK(keg); 487 if (ret) { 488 if (hash_expand(&keg->uk_hash, &newhash)) { 489 oldhash = keg->uk_hash; 490 keg->uk_hash = newhash; 491 } else 492 oldhash = newhash; 493 494 KEG_UNLOCK(keg); 495 hash_free(&oldhash); 496 return; 497 } 498 } 499 KEG_UNLOCK(keg); 500 } 501 502 static void 503 zone_timeout(uma_zone_t zone) 504 { 505 506 zone_foreach_keg(zone, &keg_timeout); 507 } 508 509 /* 510 * Allocate and zero fill the next sized hash table from the appropriate 511 * backing store. 512 * 513 * Arguments: 514 * hash A new hash structure with the old hash size in uh_hashsize 515 * 516 * Returns: 517 * 1 on sucess and 0 on failure. 518 */ 519 static int 520 hash_alloc(struct uma_hash *hash) 521 { 522 int oldsize; 523 int alloc; 524 525 oldsize = hash->uh_hashsize; 526 527 /* We're just going to go to a power of two greater */ 528 if (oldsize) { 529 hash->uh_hashsize = oldsize * 2; 530 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 531 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 532 M_UMAHASH, M_NOWAIT); 533 } else { 534 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 535 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 536 M_WAITOK); 537 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 538 } 539 if (hash->uh_slab_hash) { 540 bzero(hash->uh_slab_hash, alloc); 541 hash->uh_hashmask = hash->uh_hashsize - 1; 542 return (1); 543 } 544 545 return (0); 546 } 547 548 /* 549 * Expands the hash table for HASH zones. This is done from zone_timeout 550 * to reduce collisions. This must not be done in the regular allocation 551 * path, otherwise, we can recurse on the vm while allocating pages. 552 * 553 * Arguments: 554 * oldhash The hash you want to expand 555 * newhash The hash structure for the new table 556 * 557 * Returns: 558 * Nothing 559 * 560 * Discussion: 561 */ 562 static int 563 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 564 { 565 uma_slab_t slab; 566 int hval; 567 int i; 568 569 if (!newhash->uh_slab_hash) 570 return (0); 571 572 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 573 return (0); 574 575 /* 576 * I need to investigate hash algorithms for resizing without a 577 * full rehash. 578 */ 579 580 for (i = 0; i < oldhash->uh_hashsize; i++) 581 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 582 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 583 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 584 hval = UMA_HASH(newhash, slab->us_data); 585 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 586 slab, us_hlink); 587 } 588 589 return (1); 590 } 591 592 /* 593 * Free the hash bucket to the appropriate backing store. 594 * 595 * Arguments: 596 * slab_hash The hash bucket we're freeing 597 * hashsize The number of entries in that hash bucket 598 * 599 * Returns: 600 * Nothing 601 */ 602 static void 603 hash_free(struct uma_hash *hash) 604 { 605 if (hash->uh_slab_hash == NULL) 606 return; 607 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 608 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 609 else 610 free(hash->uh_slab_hash, M_UMAHASH); 611 } 612 613 /* 614 * Frees all outstanding items in a bucket 615 * 616 * Arguments: 617 * zone The zone to free to, must be unlocked. 618 * bucket The free/alloc bucket with items, cpu queue must be locked. 619 * 620 * Returns: 621 * Nothing 622 */ 623 624 static void 625 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 626 { 627 int i; 628 629 if (bucket == NULL) 630 return; 631 632 if (zone->uz_fini) 633 for (i = 0; i < bucket->ub_cnt; i++) 634 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 635 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 636 bucket->ub_cnt = 0; 637 } 638 639 /* 640 * Drains the per cpu caches for a zone. 641 * 642 * NOTE: This may only be called while the zone is being turn down, and not 643 * during normal operation. This is necessary in order that we do not have 644 * to migrate CPUs to drain the per-CPU caches. 645 * 646 * Arguments: 647 * zone The zone to drain, must be unlocked. 648 * 649 * Returns: 650 * Nothing 651 */ 652 static void 653 cache_drain(uma_zone_t zone) 654 { 655 uma_cache_t cache; 656 int cpu; 657 658 /* 659 * XXX: It is safe to not lock the per-CPU caches, because we're 660 * tearing down the zone anyway. I.e., there will be no further use 661 * of the caches at this point. 662 * 663 * XXX: It would good to be able to assert that the zone is being 664 * torn down to prevent improper use of cache_drain(). 665 * 666 * XXX: We lock the zone before passing into bucket_cache_drain() as 667 * it is used elsewhere. Should the tear-down path be made special 668 * there in some form? 669 */ 670 CPU_FOREACH(cpu) { 671 cache = &zone->uz_cpu[cpu]; 672 bucket_drain(zone, cache->uc_allocbucket); 673 bucket_drain(zone, cache->uc_freebucket); 674 if (cache->uc_allocbucket != NULL) 675 bucket_free(zone, cache->uc_allocbucket, NULL); 676 if (cache->uc_freebucket != NULL) 677 bucket_free(zone, cache->uc_freebucket, NULL); 678 cache->uc_allocbucket = cache->uc_freebucket = NULL; 679 } 680 ZONE_LOCK(zone); 681 bucket_cache_drain(zone); 682 ZONE_UNLOCK(zone); 683 } 684 685 /* 686 * Drain the cached buckets from a zone. Expects a locked zone on entry. 687 */ 688 static void 689 bucket_cache_drain(uma_zone_t zone) 690 { 691 uma_bucket_t bucket; 692 693 /* 694 * Drain the bucket queues and free the buckets, we just keep two per 695 * cpu (alloc/free). 696 */ 697 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 698 LIST_REMOVE(bucket, ub_link); 699 ZONE_UNLOCK(zone); 700 bucket_drain(zone, bucket); 701 bucket_free(zone, bucket, NULL); 702 ZONE_LOCK(zone); 703 } 704 } 705 706 static void 707 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 708 { 709 uint8_t *mem; 710 int i; 711 uint8_t flags; 712 713 mem = slab->us_data; 714 flags = slab->us_flags; 715 i = start; 716 if (keg->uk_fini != NULL) { 717 for (i--; i > -1; i--) 718 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 719 keg->uk_size); 720 } 721 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { 722 vm_object_t obj; 723 724 if (flags & UMA_SLAB_KMEM) 725 obj = kmem_object; 726 else if (flags & UMA_SLAB_KERNEL) 727 obj = kernel_object; 728 else 729 obj = NULL; 730 for (i = 0; i < keg->uk_ppera; i++) 731 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), obj); 732 } 733 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 734 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 735 #ifdef UMA_DEBUG 736 printf("%s: Returning %d bytes.\n", keg->uk_name, 737 PAGE_SIZE * keg->uk_ppera); 738 #endif 739 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 740 } 741 742 /* 743 * Frees pages from a keg back to the system. This is done on demand from 744 * the pageout daemon. 745 * 746 * Returns nothing. 747 */ 748 static void 749 keg_drain(uma_keg_t keg) 750 { 751 struct slabhead freeslabs = { 0 }; 752 uma_slab_t slab; 753 uma_slab_t n; 754 755 /* 756 * We don't want to take pages from statically allocated kegs at this 757 * time 758 */ 759 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 760 return; 761 762 #ifdef UMA_DEBUG 763 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 764 #endif 765 KEG_LOCK(keg); 766 if (keg->uk_free == 0) 767 goto finished; 768 769 slab = LIST_FIRST(&keg->uk_free_slab); 770 while (slab) { 771 n = LIST_NEXT(slab, us_link); 772 773 /* We have no where to free these to */ 774 if (slab->us_flags & UMA_SLAB_BOOT) { 775 slab = n; 776 continue; 777 } 778 779 LIST_REMOVE(slab, us_link); 780 keg->uk_pages -= keg->uk_ppera; 781 keg->uk_free -= keg->uk_ipers; 782 783 if (keg->uk_flags & UMA_ZONE_HASH) 784 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 785 786 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 787 788 slab = n; 789 } 790 finished: 791 KEG_UNLOCK(keg); 792 793 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 794 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 795 keg_free_slab(keg, slab, 0); 796 } 797 } 798 799 static void 800 zone_drain_wait(uma_zone_t zone, int waitok) 801 { 802 803 /* 804 * Set draining to interlock with zone_dtor() so we can release our 805 * locks as we go. Only dtor() should do a WAITOK call since it 806 * is the only call that knows the structure will still be available 807 * when it wakes up. 808 */ 809 ZONE_LOCK(zone); 810 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 811 if (waitok == M_NOWAIT) 812 goto out; 813 mtx_unlock(&uma_mtx); 814 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 815 mtx_lock(&uma_mtx); 816 } 817 zone->uz_flags |= UMA_ZFLAG_DRAINING; 818 bucket_cache_drain(zone); 819 ZONE_UNLOCK(zone); 820 /* 821 * The DRAINING flag protects us from being freed while 822 * we're running. Normally the uma_mtx would protect us but we 823 * must be able to release and acquire the right lock for each keg. 824 */ 825 zone_foreach_keg(zone, &keg_drain); 826 ZONE_LOCK(zone); 827 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 828 wakeup(zone); 829 out: 830 ZONE_UNLOCK(zone); 831 } 832 833 void 834 zone_drain(uma_zone_t zone) 835 { 836 837 zone_drain_wait(zone, M_NOWAIT); 838 } 839 840 /* 841 * Allocate a new slab for a keg. This does not insert the slab onto a list. 842 * 843 * Arguments: 844 * wait Shall we wait? 845 * 846 * Returns: 847 * The slab that was allocated or NULL if there is no memory and the 848 * caller specified M_NOWAIT. 849 */ 850 static uma_slab_t 851 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 852 { 853 uma_slabrefcnt_t slabref; 854 uma_alloc allocf; 855 uma_slab_t slab; 856 uint8_t *mem; 857 uint8_t flags; 858 int i; 859 860 mtx_assert(&keg->uk_lock, MA_OWNED); 861 slab = NULL; 862 mem = NULL; 863 864 #ifdef UMA_DEBUG 865 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name); 866 #endif 867 allocf = keg->uk_allocf; 868 KEG_UNLOCK(keg); 869 870 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 871 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 872 if (slab == NULL) 873 goto out; 874 } 875 876 /* 877 * This reproduces the old vm_zone behavior of zero filling pages the 878 * first time they are added to a zone. 879 * 880 * Malloced items are zeroed in uma_zalloc. 881 */ 882 883 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 884 wait |= M_ZERO; 885 else 886 wait &= ~M_ZERO; 887 888 if (keg->uk_flags & UMA_ZONE_NODUMP) 889 wait |= M_NODUMP; 890 891 /* zone is passed for legacy reasons. */ 892 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 893 if (mem == NULL) { 894 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 895 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 896 slab = NULL; 897 goto out; 898 } 899 900 /* Point the slab into the allocated memory */ 901 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 902 slab = (uma_slab_t )(mem + keg->uk_pgoff); 903 904 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 905 for (i = 0; i < keg->uk_ppera; i++) 906 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 907 908 slab->us_keg = keg; 909 slab->us_data = mem; 910 slab->us_freecount = keg->uk_ipers; 911 slab->us_flags = flags; 912 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 913 #ifdef INVARIANTS 914 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 915 #endif 916 if (keg->uk_flags & UMA_ZONE_REFCNT) { 917 slabref = (uma_slabrefcnt_t)slab; 918 for (i = 0; i < keg->uk_ipers; i++) 919 slabref->us_refcnt[i] = 0; 920 } 921 922 if (keg->uk_init != NULL) { 923 for (i = 0; i < keg->uk_ipers; i++) 924 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 925 keg->uk_size, wait) != 0) 926 break; 927 if (i != keg->uk_ipers) { 928 keg_free_slab(keg, slab, i); 929 slab = NULL; 930 goto out; 931 } 932 } 933 out: 934 KEG_LOCK(keg); 935 936 if (slab != NULL) { 937 if (keg->uk_flags & UMA_ZONE_HASH) 938 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 939 940 keg->uk_pages += keg->uk_ppera; 941 keg->uk_free += keg->uk_ipers; 942 } 943 944 return (slab); 945 } 946 947 /* 948 * This function is intended to be used early on in place of page_alloc() so 949 * that we may use the boot time page cache to satisfy allocations before 950 * the VM is ready. 951 */ 952 static void * 953 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 954 { 955 uma_keg_t keg; 956 uma_slab_t tmps; 957 int pages, check_pages; 958 959 keg = zone_first_keg(zone); 960 pages = howmany(bytes, PAGE_SIZE); 961 check_pages = pages - 1; 962 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 963 964 /* 965 * Check our small startup cache to see if it has pages remaining. 966 */ 967 mtx_lock(&uma_boot_pages_mtx); 968 969 /* First check if we have enough room. */ 970 tmps = LIST_FIRST(&uma_boot_pages); 971 while (tmps != NULL && check_pages-- > 0) 972 tmps = LIST_NEXT(tmps, us_link); 973 if (tmps != NULL) { 974 /* 975 * It's ok to lose tmps references. The last one will 976 * have tmps->us_data pointing to the start address of 977 * "pages" contiguous pages of memory. 978 */ 979 while (pages-- > 0) { 980 tmps = LIST_FIRST(&uma_boot_pages); 981 LIST_REMOVE(tmps, us_link); 982 } 983 mtx_unlock(&uma_boot_pages_mtx); 984 *pflag = tmps->us_flags; 985 return (tmps->us_data); 986 } 987 mtx_unlock(&uma_boot_pages_mtx); 988 if (booted < UMA_STARTUP2) 989 panic("UMA: Increase vm.boot_pages"); 990 /* 991 * Now that we've booted reset these users to their real allocator. 992 */ 993 #ifdef UMA_MD_SMALL_ALLOC 994 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 995 #else 996 keg->uk_allocf = page_alloc; 997 #endif 998 return keg->uk_allocf(zone, bytes, pflag, wait); 999 } 1000 1001 /* 1002 * Allocates a number of pages from the system 1003 * 1004 * Arguments: 1005 * bytes The number of bytes requested 1006 * wait Shall we wait? 1007 * 1008 * Returns: 1009 * A pointer to the alloced memory or possibly 1010 * NULL if M_NOWAIT is set. 1011 */ 1012 static void * 1013 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1014 { 1015 void *p; /* Returned page */ 1016 1017 *pflag = UMA_SLAB_KMEM; 1018 p = (void *) kmem_malloc(kmem_map, bytes, wait); 1019 1020 return (p); 1021 } 1022 1023 /* 1024 * Allocates a number of pages from within an object 1025 * 1026 * Arguments: 1027 * bytes The number of bytes requested 1028 * wait Shall we wait? 1029 * 1030 * Returns: 1031 * A pointer to the alloced memory or possibly 1032 * NULL if M_NOWAIT is set. 1033 */ 1034 static void * 1035 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 1036 { 1037 TAILQ_HEAD(, vm_page) alloctail; 1038 u_long npages; 1039 vm_offset_t retkva, zkva; 1040 vm_page_t p, p_next; 1041 uma_keg_t keg; 1042 1043 TAILQ_INIT(&alloctail); 1044 keg = zone_first_keg(zone); 1045 1046 npages = howmany(bytes, PAGE_SIZE); 1047 while (npages > 0) { 1048 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1049 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1050 if (p != NULL) { 1051 /* 1052 * Since the page does not belong to an object, its 1053 * listq is unused. 1054 */ 1055 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1056 npages--; 1057 continue; 1058 } 1059 if (wait & M_WAITOK) { 1060 VM_WAIT; 1061 continue; 1062 } 1063 1064 /* 1065 * Page allocation failed, free intermediate pages and 1066 * exit. 1067 */ 1068 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1069 vm_page_unwire(p, 0); 1070 vm_page_free(p); 1071 } 1072 return (NULL); 1073 } 1074 *flags = UMA_SLAB_PRIV; 1075 zkva = keg->uk_kva + 1076 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1077 retkva = zkva; 1078 TAILQ_FOREACH(p, &alloctail, listq) { 1079 pmap_qenter(zkva, &p, 1); 1080 zkva += PAGE_SIZE; 1081 } 1082 1083 return ((void *)retkva); 1084 } 1085 1086 /* 1087 * Frees a number of pages to the system 1088 * 1089 * Arguments: 1090 * mem A pointer to the memory to be freed 1091 * size The size of the memory being freed 1092 * flags The original p->us_flags field 1093 * 1094 * Returns: 1095 * Nothing 1096 */ 1097 static void 1098 page_free(void *mem, int size, uint8_t flags) 1099 { 1100 vm_map_t map; 1101 1102 if (flags & UMA_SLAB_KMEM) 1103 map = kmem_map; 1104 else if (flags & UMA_SLAB_KERNEL) 1105 map = kernel_map; 1106 else 1107 panic("UMA: page_free used with invalid flags %d", flags); 1108 1109 kmem_free(map, (vm_offset_t)mem, size); 1110 } 1111 1112 /* 1113 * Zero fill initializer 1114 * 1115 * Arguments/Returns follow uma_init specifications 1116 */ 1117 static int 1118 zero_init(void *mem, int size, int flags) 1119 { 1120 bzero(mem, size); 1121 return (0); 1122 } 1123 1124 /* 1125 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1126 * 1127 * Arguments 1128 * keg The zone we should initialize 1129 * 1130 * Returns 1131 * Nothing 1132 */ 1133 static void 1134 keg_small_init(uma_keg_t keg) 1135 { 1136 u_int rsize; 1137 u_int memused; 1138 u_int wastedspace; 1139 u_int shsize; 1140 1141 if (keg->uk_flags & UMA_ZONE_PCPU) { 1142 KASSERT(mp_ncpus > 0, ("%s: ncpus %d\n", __func__, mp_ncpus)); 1143 keg->uk_slabsize = sizeof(struct pcpu); 1144 keg->uk_ppera = howmany(mp_ncpus * sizeof(struct pcpu), 1145 PAGE_SIZE); 1146 } else { 1147 keg->uk_slabsize = UMA_SLAB_SIZE; 1148 keg->uk_ppera = 1; 1149 } 1150 1151 /* 1152 * Calculate the size of each allocation (rsize) according to 1153 * alignment. If the requested size is smaller than we have 1154 * allocation bits for we round it up. 1155 */ 1156 rsize = keg->uk_size; 1157 if (rsize < keg->uk_slabsize / SLAB_SETSIZE) 1158 rsize = keg->uk_slabsize / SLAB_SETSIZE; 1159 if (rsize & keg->uk_align) 1160 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1161 keg->uk_rsize = rsize; 1162 1163 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1164 keg->uk_rsize < sizeof(struct pcpu), 1165 ("%s: size %u too large", __func__, keg->uk_rsize)); 1166 1167 if (keg->uk_flags & UMA_ZONE_REFCNT) 1168 rsize += sizeof(uint32_t); 1169 1170 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1171 shsize = 0; 1172 else 1173 shsize = sizeof(struct uma_slab); 1174 1175 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize; 1176 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1177 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1178 1179 memused = keg->uk_ipers * rsize + shsize; 1180 wastedspace = keg->uk_slabsize - memused; 1181 1182 /* 1183 * We can't do OFFPAGE if we're internal or if we've been 1184 * asked to not go to the VM for buckets. If we do this we 1185 * may end up going to the VM for slabs which we do not 1186 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1187 * of UMA_ZONE_VM, which clearly forbids it. 1188 */ 1189 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1190 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1191 return; 1192 1193 /* 1194 * See if using an OFFPAGE slab will limit our waste. Only do 1195 * this if it permits more items per-slab. 1196 * 1197 * XXX We could try growing slabsize to limit max waste as well. 1198 * Historically this was not done because the VM could not 1199 * efficiently handle contiguous allocations. 1200 */ 1201 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) && 1202 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) { 1203 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize; 1204 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1205 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1206 #ifdef UMA_DEBUG 1207 printf("UMA decided we need offpage slab headers for " 1208 "keg: %s, calculated wastedspace = %d, " 1209 "maximum wasted space allowed = %d, " 1210 "calculated ipers = %d, " 1211 "new wasted space = %d\n", keg->uk_name, wastedspace, 1212 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1213 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize); 1214 #endif 1215 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1216 } 1217 1218 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1219 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1220 keg->uk_flags |= UMA_ZONE_HASH; 1221 } 1222 1223 /* 1224 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1225 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1226 * more complicated. 1227 * 1228 * Arguments 1229 * keg The keg we should initialize 1230 * 1231 * Returns 1232 * Nothing 1233 */ 1234 static void 1235 keg_large_init(uma_keg_t keg) 1236 { 1237 1238 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1239 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1240 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1241 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1242 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1243 1244 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1245 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE; 1246 keg->uk_ipers = 1; 1247 keg->uk_rsize = keg->uk_size; 1248 1249 /* We can't do OFFPAGE if we're internal, bail out here. */ 1250 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1251 return; 1252 1253 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1254 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1255 keg->uk_flags |= UMA_ZONE_HASH; 1256 } 1257 1258 static void 1259 keg_cachespread_init(uma_keg_t keg) 1260 { 1261 int alignsize; 1262 int trailer; 1263 int pages; 1264 int rsize; 1265 1266 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1267 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1268 1269 alignsize = keg->uk_align + 1; 1270 rsize = keg->uk_size; 1271 /* 1272 * We want one item to start on every align boundary in a page. To 1273 * do this we will span pages. We will also extend the item by the 1274 * size of align if it is an even multiple of align. Otherwise, it 1275 * would fall on the same boundary every time. 1276 */ 1277 if (rsize & keg->uk_align) 1278 rsize = (rsize & ~keg->uk_align) + alignsize; 1279 if ((rsize & alignsize) == 0) 1280 rsize += alignsize; 1281 trailer = rsize - keg->uk_size; 1282 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1283 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1284 keg->uk_rsize = rsize; 1285 keg->uk_ppera = pages; 1286 keg->uk_slabsize = UMA_SLAB_SIZE; 1287 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1288 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1289 KASSERT(keg->uk_ipers <= uma_max_ipers, 1290 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1291 keg->uk_ipers)); 1292 } 1293 1294 /* 1295 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1296 * the keg onto the global keg list. 1297 * 1298 * Arguments/Returns follow uma_ctor specifications 1299 * udata Actually uma_kctor_args 1300 */ 1301 static int 1302 keg_ctor(void *mem, int size, void *udata, int flags) 1303 { 1304 struct uma_kctor_args *arg = udata; 1305 uma_keg_t keg = mem; 1306 uma_zone_t zone; 1307 1308 bzero(keg, size); 1309 keg->uk_size = arg->size; 1310 keg->uk_init = arg->uminit; 1311 keg->uk_fini = arg->fini; 1312 keg->uk_align = arg->align; 1313 keg->uk_free = 0; 1314 keg->uk_reserve = 0; 1315 keg->uk_pages = 0; 1316 keg->uk_flags = arg->flags; 1317 keg->uk_allocf = page_alloc; 1318 keg->uk_freef = page_free; 1319 keg->uk_slabzone = NULL; 1320 1321 /* 1322 * The master zone is passed to us at keg-creation time. 1323 */ 1324 zone = arg->zone; 1325 keg->uk_name = zone->uz_name; 1326 1327 if (arg->flags & UMA_ZONE_VM) 1328 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1329 1330 if (arg->flags & UMA_ZONE_ZINIT) 1331 keg->uk_init = zero_init; 1332 1333 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1334 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1335 1336 if (arg->flags & UMA_ZONE_PCPU) 1337 #ifdef SMP 1338 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1339 #else 1340 keg->uk_flags &= ~UMA_ZONE_PCPU; 1341 #endif 1342 1343 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1344 keg_cachespread_init(keg); 1345 } else if (keg->uk_flags & UMA_ZONE_REFCNT) { 1346 if (keg->uk_size > 1347 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - 1348 sizeof(uint32_t))) 1349 keg_large_init(keg); 1350 else 1351 keg_small_init(keg); 1352 } else { 1353 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1354 keg_large_init(keg); 1355 else 1356 keg_small_init(keg); 1357 } 1358 1359 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1360 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1361 if (keg->uk_ipers > uma_max_ipers_ref) 1362 panic("Too many ref items per zone: %d > %d\n", 1363 keg->uk_ipers, uma_max_ipers_ref); 1364 keg->uk_slabzone = slabrefzone; 1365 } else 1366 keg->uk_slabzone = slabzone; 1367 } 1368 1369 /* 1370 * If we haven't booted yet we need allocations to go through the 1371 * startup cache until the vm is ready. 1372 */ 1373 if (keg->uk_ppera == 1) { 1374 #ifdef UMA_MD_SMALL_ALLOC 1375 keg->uk_allocf = uma_small_alloc; 1376 keg->uk_freef = uma_small_free; 1377 1378 if (booted < UMA_STARTUP) 1379 keg->uk_allocf = startup_alloc; 1380 #else 1381 if (booted < UMA_STARTUP2) 1382 keg->uk_allocf = startup_alloc; 1383 #endif 1384 } else if (booted < UMA_STARTUP2 && 1385 (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1386 keg->uk_allocf = startup_alloc; 1387 1388 /* 1389 * Initialize keg's lock 1390 */ 1391 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1392 1393 /* 1394 * If we're putting the slab header in the actual page we need to 1395 * figure out where in each page it goes. This calculates a right 1396 * justified offset into the memory on an ALIGN_PTR boundary. 1397 */ 1398 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1399 u_int totsize; 1400 1401 /* Size of the slab struct and free list */ 1402 totsize = sizeof(struct uma_slab); 1403 1404 /* Size of the reference counts. */ 1405 if (keg->uk_flags & UMA_ZONE_REFCNT) 1406 totsize += keg->uk_ipers * sizeof(uint32_t); 1407 1408 if (totsize & UMA_ALIGN_PTR) 1409 totsize = (totsize & ~UMA_ALIGN_PTR) + 1410 (UMA_ALIGN_PTR + 1); 1411 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1412 1413 /* 1414 * The only way the following is possible is if with our 1415 * UMA_ALIGN_PTR adjustments we are now bigger than 1416 * UMA_SLAB_SIZE. I haven't checked whether this is 1417 * mathematically possible for all cases, so we make 1418 * sure here anyway. 1419 */ 1420 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1421 if (keg->uk_flags & UMA_ZONE_REFCNT) 1422 totsize += keg->uk_ipers * sizeof(uint32_t); 1423 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1424 printf("zone %s ipers %d rsize %d size %d\n", 1425 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1426 keg->uk_size); 1427 panic("UMA slab won't fit."); 1428 } 1429 } 1430 1431 if (keg->uk_flags & UMA_ZONE_HASH) 1432 hash_alloc(&keg->uk_hash); 1433 1434 #ifdef UMA_DEBUG 1435 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1436 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1437 keg->uk_ipers, keg->uk_ppera, 1438 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1439 #endif 1440 1441 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1442 1443 mtx_lock(&uma_mtx); 1444 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1445 mtx_unlock(&uma_mtx); 1446 return (0); 1447 } 1448 1449 /* 1450 * Zone header ctor. This initializes all fields, locks, etc. 1451 * 1452 * Arguments/Returns follow uma_ctor specifications 1453 * udata Actually uma_zctor_args 1454 */ 1455 static int 1456 zone_ctor(void *mem, int size, void *udata, int flags) 1457 { 1458 struct uma_zctor_args *arg = udata; 1459 uma_zone_t zone = mem; 1460 uma_zone_t z; 1461 uma_keg_t keg; 1462 1463 bzero(zone, size); 1464 zone->uz_name = arg->name; 1465 zone->uz_ctor = arg->ctor; 1466 zone->uz_dtor = arg->dtor; 1467 zone->uz_slab = zone_fetch_slab; 1468 zone->uz_init = NULL; 1469 zone->uz_fini = NULL; 1470 zone->uz_allocs = 0; 1471 zone->uz_frees = 0; 1472 zone->uz_fails = 0; 1473 zone->uz_sleeps = 0; 1474 zone->uz_count = 0; 1475 zone->uz_flags = 0; 1476 zone->uz_warning = NULL; 1477 timevalclear(&zone->uz_ratecheck); 1478 keg = arg->keg; 1479 1480 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1481 1482 /* 1483 * This is a pure cache zone, no kegs. 1484 */ 1485 if (arg->import) { 1486 if (arg->flags & UMA_ZONE_VM) 1487 arg->flags |= UMA_ZFLAG_CACHEONLY; 1488 zone->uz_flags = arg->flags; 1489 zone->uz_size = arg->size; 1490 zone->uz_import = arg->import; 1491 zone->uz_release = arg->release; 1492 zone->uz_arg = arg->arg; 1493 zone->uz_lockptr = &zone->uz_lock; 1494 goto out; 1495 } 1496 1497 /* 1498 * Use the regular zone/keg/slab allocator. 1499 */ 1500 zone->uz_import = (uma_import)zone_import; 1501 zone->uz_release = (uma_release)zone_release; 1502 zone->uz_arg = zone; 1503 1504 if (arg->flags & UMA_ZONE_SECONDARY) { 1505 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1506 zone->uz_init = arg->uminit; 1507 zone->uz_fini = arg->fini; 1508 zone->uz_lockptr = &keg->uk_lock; 1509 zone->uz_flags |= UMA_ZONE_SECONDARY; 1510 mtx_lock(&uma_mtx); 1511 ZONE_LOCK(zone); 1512 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1513 if (LIST_NEXT(z, uz_link) == NULL) { 1514 LIST_INSERT_AFTER(z, zone, uz_link); 1515 break; 1516 } 1517 } 1518 ZONE_UNLOCK(zone); 1519 mtx_unlock(&uma_mtx); 1520 } else if (keg == NULL) { 1521 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1522 arg->align, arg->flags)) == NULL) 1523 return (ENOMEM); 1524 } else { 1525 struct uma_kctor_args karg; 1526 int error; 1527 1528 /* We should only be here from uma_startup() */ 1529 karg.size = arg->size; 1530 karg.uminit = arg->uminit; 1531 karg.fini = arg->fini; 1532 karg.align = arg->align; 1533 karg.flags = arg->flags; 1534 karg.zone = zone; 1535 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1536 flags); 1537 if (error) 1538 return (error); 1539 } 1540 1541 /* 1542 * Link in the first keg. 1543 */ 1544 zone->uz_klink.kl_keg = keg; 1545 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1546 zone->uz_lockptr = &keg->uk_lock; 1547 zone->uz_size = keg->uk_size; 1548 zone->uz_flags |= (keg->uk_flags & 1549 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1550 1551 /* 1552 * Some internal zones don't have room allocated for the per cpu 1553 * caches. If we're internal, bail out here. 1554 */ 1555 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1556 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1557 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1558 return (0); 1559 } 1560 1561 out: 1562 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1563 zone->uz_count = bucket_select(zone->uz_size); 1564 else 1565 zone->uz_count = BUCKET_MAX; 1566 1567 return (0); 1568 } 1569 1570 /* 1571 * Keg header dtor. This frees all data, destroys locks, frees the hash 1572 * table and removes the keg from the global list. 1573 * 1574 * Arguments/Returns follow uma_dtor specifications 1575 * udata unused 1576 */ 1577 static void 1578 keg_dtor(void *arg, int size, void *udata) 1579 { 1580 uma_keg_t keg; 1581 1582 keg = (uma_keg_t)arg; 1583 KEG_LOCK(keg); 1584 if (keg->uk_free != 0) { 1585 printf("Freed UMA keg was not empty (%d items). " 1586 " Lost %d pages of memory.\n", 1587 keg->uk_free, keg->uk_pages); 1588 } 1589 KEG_UNLOCK(keg); 1590 1591 hash_free(&keg->uk_hash); 1592 1593 KEG_LOCK_FINI(keg); 1594 } 1595 1596 /* 1597 * Zone header dtor. 1598 * 1599 * Arguments/Returns follow uma_dtor specifications 1600 * udata unused 1601 */ 1602 static void 1603 zone_dtor(void *arg, int size, void *udata) 1604 { 1605 uma_klink_t klink; 1606 uma_zone_t zone; 1607 uma_keg_t keg; 1608 1609 zone = (uma_zone_t)arg; 1610 keg = zone_first_keg(zone); 1611 1612 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1613 cache_drain(zone); 1614 1615 mtx_lock(&uma_mtx); 1616 LIST_REMOVE(zone, uz_link); 1617 mtx_unlock(&uma_mtx); 1618 /* 1619 * XXX there are some races here where 1620 * the zone can be drained but zone lock 1621 * released and then refilled before we 1622 * remove it... we dont care for now 1623 */ 1624 zone_drain_wait(zone, M_WAITOK); 1625 /* 1626 * Unlink all of our kegs. 1627 */ 1628 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1629 klink->kl_keg = NULL; 1630 LIST_REMOVE(klink, kl_link); 1631 if (klink == &zone->uz_klink) 1632 continue; 1633 free(klink, M_TEMP); 1634 } 1635 /* 1636 * We only destroy kegs from non secondary zones. 1637 */ 1638 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1639 mtx_lock(&uma_mtx); 1640 LIST_REMOVE(keg, uk_link); 1641 mtx_unlock(&uma_mtx); 1642 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1643 } 1644 ZONE_LOCK_FINI(zone); 1645 } 1646 1647 /* 1648 * Traverses every zone in the system and calls a callback 1649 * 1650 * Arguments: 1651 * zfunc A pointer to a function which accepts a zone 1652 * as an argument. 1653 * 1654 * Returns: 1655 * Nothing 1656 */ 1657 static void 1658 zone_foreach(void (*zfunc)(uma_zone_t)) 1659 { 1660 uma_keg_t keg; 1661 uma_zone_t zone; 1662 1663 mtx_lock(&uma_mtx); 1664 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1665 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1666 zfunc(zone); 1667 } 1668 mtx_unlock(&uma_mtx); 1669 } 1670 1671 /* Public functions */ 1672 /* See uma.h */ 1673 void 1674 uma_startup(void *bootmem, int boot_pages) 1675 { 1676 struct uma_zctor_args args; 1677 uma_slab_t slab; 1678 u_int slabsize; 1679 int i; 1680 1681 #ifdef UMA_DEBUG 1682 printf("Creating uma keg headers zone and keg.\n"); 1683 #endif 1684 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF); 1685 1686 /* "manually" create the initial zone */ 1687 memset(&args, 0, sizeof(args)); 1688 args.name = "UMA Kegs"; 1689 args.size = sizeof(struct uma_keg); 1690 args.ctor = keg_ctor; 1691 args.dtor = keg_dtor; 1692 args.uminit = zero_init; 1693 args.fini = NULL; 1694 args.keg = &masterkeg; 1695 args.align = 32 - 1; 1696 args.flags = UMA_ZFLAG_INTERNAL; 1697 /* The initial zone has no Per cpu queues so it's smaller */ 1698 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1699 1700 #ifdef UMA_DEBUG 1701 printf("Filling boot free list.\n"); 1702 #endif 1703 for (i = 0; i < boot_pages; i++) { 1704 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1705 slab->us_data = (uint8_t *)slab; 1706 slab->us_flags = UMA_SLAB_BOOT; 1707 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1708 } 1709 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1710 1711 #ifdef UMA_DEBUG 1712 printf("Creating uma zone headers zone and keg.\n"); 1713 #endif 1714 args.name = "UMA Zones"; 1715 args.size = sizeof(struct uma_zone) + 1716 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1717 args.ctor = zone_ctor; 1718 args.dtor = zone_dtor; 1719 args.uminit = zero_init; 1720 args.fini = NULL; 1721 args.keg = NULL; 1722 args.align = 32 - 1; 1723 args.flags = UMA_ZFLAG_INTERNAL; 1724 /* The initial zone has no Per cpu queues so it's smaller */ 1725 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1726 1727 #ifdef UMA_DEBUG 1728 printf("Initializing pcpu cache locks.\n"); 1729 #endif 1730 #ifdef UMA_DEBUG 1731 printf("Creating slab and hash zones.\n"); 1732 #endif 1733 1734 /* Now make a zone for slab headers */ 1735 slabzone = uma_zcreate("UMA Slabs", 1736 sizeof(struct uma_slab), 1737 NULL, NULL, NULL, NULL, 1738 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1739 1740 /* 1741 * We also create a zone for the bigger slabs with reference 1742 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1743 */ 1744 slabsize = sizeof(struct uma_slab_refcnt); 1745 slabsize += uma_max_ipers_ref * sizeof(uint32_t); 1746 slabrefzone = uma_zcreate("UMA RCntSlabs", 1747 slabsize, 1748 NULL, NULL, NULL, NULL, 1749 UMA_ALIGN_PTR, 1750 UMA_ZFLAG_INTERNAL); 1751 1752 hashzone = uma_zcreate("UMA Hash", 1753 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1754 NULL, NULL, NULL, NULL, 1755 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1756 1757 bucket_init(); 1758 1759 booted = UMA_STARTUP; 1760 1761 #ifdef UMA_DEBUG 1762 printf("UMA startup complete.\n"); 1763 #endif 1764 } 1765 1766 /* see uma.h */ 1767 void 1768 uma_startup2(void) 1769 { 1770 booted = UMA_STARTUP2; 1771 bucket_enable(); 1772 #ifdef UMA_DEBUG 1773 printf("UMA startup2 complete.\n"); 1774 #endif 1775 } 1776 1777 /* 1778 * Initialize our callout handle 1779 * 1780 */ 1781 1782 static void 1783 uma_startup3(void) 1784 { 1785 #ifdef UMA_DEBUG 1786 printf("Starting callout.\n"); 1787 #endif 1788 callout_init(&uma_callout, CALLOUT_MPSAFE); 1789 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1790 #ifdef UMA_DEBUG 1791 printf("UMA startup3 complete.\n"); 1792 #endif 1793 } 1794 1795 static uma_keg_t 1796 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1797 int align, uint32_t flags) 1798 { 1799 struct uma_kctor_args args; 1800 1801 args.size = size; 1802 args.uminit = uminit; 1803 args.fini = fini; 1804 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1805 args.flags = flags; 1806 args.zone = zone; 1807 return (zone_alloc_item(kegs, &args, M_WAITOK)); 1808 } 1809 1810 /* See uma.h */ 1811 void 1812 uma_set_align(int align) 1813 { 1814 1815 if (align != UMA_ALIGN_CACHE) 1816 uma_align_cache = align; 1817 } 1818 1819 /* See uma.h */ 1820 uma_zone_t 1821 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1822 uma_init uminit, uma_fini fini, int align, uint32_t flags) 1823 1824 { 1825 struct uma_zctor_args args; 1826 1827 /* This stuff is essential for the zone ctor */ 1828 memset(&args, 0, sizeof(args)); 1829 args.name = name; 1830 args.size = size; 1831 args.ctor = ctor; 1832 args.dtor = dtor; 1833 args.uminit = uminit; 1834 args.fini = fini; 1835 args.align = align; 1836 args.flags = flags; 1837 args.keg = NULL; 1838 1839 return (zone_alloc_item(zones, &args, M_WAITOK)); 1840 } 1841 1842 /* See uma.h */ 1843 uma_zone_t 1844 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1845 uma_init zinit, uma_fini zfini, uma_zone_t master) 1846 { 1847 struct uma_zctor_args args; 1848 uma_keg_t keg; 1849 1850 keg = zone_first_keg(master); 1851 memset(&args, 0, sizeof(args)); 1852 args.name = name; 1853 args.size = keg->uk_size; 1854 args.ctor = ctor; 1855 args.dtor = dtor; 1856 args.uminit = zinit; 1857 args.fini = zfini; 1858 args.align = keg->uk_align; 1859 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1860 args.keg = keg; 1861 1862 /* XXX Attaches only one keg of potentially many. */ 1863 return (zone_alloc_item(zones, &args, M_WAITOK)); 1864 } 1865 1866 /* See uma.h */ 1867 uma_zone_t 1868 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1869 uma_init zinit, uma_fini zfini, uma_import zimport, 1870 uma_release zrelease, void *arg, int flags) 1871 { 1872 struct uma_zctor_args args; 1873 1874 memset(&args, 0, sizeof(args)); 1875 args.name = name; 1876 args.size = size; 1877 args.ctor = ctor; 1878 args.dtor = dtor; 1879 args.uminit = zinit; 1880 args.fini = zfini; 1881 args.import = zimport; 1882 args.release = zrelease; 1883 args.arg = arg; 1884 args.align = 0; 1885 args.flags = flags; 1886 1887 return (zone_alloc_item(zones, &args, M_WAITOK)); 1888 } 1889 1890 static void 1891 zone_lock_pair(uma_zone_t a, uma_zone_t b) 1892 { 1893 if (a < b) { 1894 ZONE_LOCK(a); 1895 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 1896 } else { 1897 ZONE_LOCK(b); 1898 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 1899 } 1900 } 1901 1902 static void 1903 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1904 { 1905 1906 ZONE_UNLOCK(a); 1907 ZONE_UNLOCK(b); 1908 } 1909 1910 int 1911 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1912 { 1913 uma_klink_t klink; 1914 uma_klink_t kl; 1915 int error; 1916 1917 error = 0; 1918 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1919 1920 zone_lock_pair(zone, master); 1921 /* 1922 * zone must use vtoslab() to resolve objects and must already be 1923 * a secondary. 1924 */ 1925 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1926 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1927 error = EINVAL; 1928 goto out; 1929 } 1930 /* 1931 * The new master must also use vtoslab(). 1932 */ 1933 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 1934 error = EINVAL; 1935 goto out; 1936 } 1937 /* 1938 * Both must either be refcnt, or not be refcnt. 1939 */ 1940 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 1941 (master->uz_flags & UMA_ZONE_REFCNT)) { 1942 error = EINVAL; 1943 goto out; 1944 } 1945 /* 1946 * The underlying object must be the same size. rsize 1947 * may be different. 1948 */ 1949 if (master->uz_size != zone->uz_size) { 1950 error = E2BIG; 1951 goto out; 1952 } 1953 /* 1954 * Put it at the end of the list. 1955 */ 1956 klink->kl_keg = zone_first_keg(master); 1957 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 1958 if (LIST_NEXT(kl, kl_link) == NULL) { 1959 LIST_INSERT_AFTER(kl, klink, kl_link); 1960 break; 1961 } 1962 } 1963 klink = NULL; 1964 zone->uz_flags |= UMA_ZFLAG_MULTI; 1965 zone->uz_slab = zone_fetch_slab_multi; 1966 1967 out: 1968 zone_unlock_pair(zone, master); 1969 if (klink != NULL) 1970 free(klink, M_TEMP); 1971 1972 return (error); 1973 } 1974 1975 1976 /* See uma.h */ 1977 void 1978 uma_zdestroy(uma_zone_t zone) 1979 { 1980 1981 zone_free_item(zones, zone, NULL, SKIP_NONE); 1982 } 1983 1984 /* See uma.h */ 1985 void * 1986 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1987 { 1988 void *item; 1989 uma_cache_t cache; 1990 uma_bucket_t bucket; 1991 int lockfail; 1992 int cpu; 1993 1994 /* This is the fast path allocation */ 1995 #ifdef UMA_DEBUG_ALLOC_1 1996 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1997 #endif 1998 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1999 zone->uz_name, flags); 2000 2001 if (flags & M_WAITOK) { 2002 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2003 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2004 } 2005 #ifdef DEBUG_MEMGUARD 2006 if (memguard_cmp_zone(zone)) { 2007 item = memguard_alloc(zone->uz_size, flags); 2008 if (item != NULL) { 2009 /* 2010 * Avoid conflict with the use-after-free 2011 * protecting infrastructure from INVARIANTS. 2012 */ 2013 if (zone->uz_init != NULL && 2014 zone->uz_init != mtrash_init && 2015 zone->uz_init(item, zone->uz_size, flags) != 0) 2016 return (NULL); 2017 if (zone->uz_ctor != NULL && 2018 zone->uz_ctor != mtrash_ctor && 2019 zone->uz_ctor(item, zone->uz_size, udata, 2020 flags) != 0) { 2021 zone->uz_fini(item, zone->uz_size); 2022 return (NULL); 2023 } 2024 return (item); 2025 } 2026 /* This is unfortunate but should not be fatal. */ 2027 } 2028 #endif 2029 /* 2030 * If possible, allocate from the per-CPU cache. There are two 2031 * requirements for safe access to the per-CPU cache: (1) the thread 2032 * accessing the cache must not be preempted or yield during access, 2033 * and (2) the thread must not migrate CPUs without switching which 2034 * cache it accesses. We rely on a critical section to prevent 2035 * preemption and migration. We release the critical section in 2036 * order to acquire the zone mutex if we are unable to allocate from 2037 * the current cache; when we re-acquire the critical section, we 2038 * must detect and handle migration if it has occurred. 2039 */ 2040 critical_enter(); 2041 cpu = curcpu; 2042 cache = &zone->uz_cpu[cpu]; 2043 2044 zalloc_start: 2045 bucket = cache->uc_allocbucket; 2046 if (bucket != NULL && bucket->ub_cnt > 0) { 2047 bucket->ub_cnt--; 2048 item = bucket->ub_bucket[bucket->ub_cnt]; 2049 #ifdef INVARIANTS 2050 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2051 #endif 2052 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2053 cache->uc_allocs++; 2054 critical_exit(); 2055 if (zone->uz_ctor != NULL && 2056 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2057 atomic_add_long(&zone->uz_fails, 1); 2058 zone_free_item(zone, item, udata, SKIP_DTOR); 2059 return (NULL); 2060 } 2061 #ifdef INVARIANTS 2062 uma_dbg_alloc(zone, NULL, item); 2063 #endif 2064 if (flags & M_ZERO) 2065 bzero(item, zone->uz_size); 2066 return (item); 2067 } 2068 2069 /* 2070 * We have run out of items in our alloc bucket. 2071 * See if we can switch with our free bucket. 2072 */ 2073 bucket = cache->uc_freebucket; 2074 if (bucket != NULL && bucket->ub_cnt > 0) { 2075 #ifdef UMA_DEBUG_ALLOC 2076 printf("uma_zalloc: Swapping empty with alloc.\n"); 2077 #endif 2078 cache->uc_freebucket = cache->uc_allocbucket; 2079 cache->uc_allocbucket = bucket; 2080 goto zalloc_start; 2081 } 2082 2083 /* 2084 * Discard any empty allocation bucket while we hold no locks. 2085 */ 2086 bucket = cache->uc_allocbucket; 2087 cache->uc_allocbucket = NULL; 2088 critical_exit(); 2089 if (bucket != NULL) 2090 bucket_free(zone, bucket, udata); 2091 2092 /* Short-circuit for zones without buckets and low memory. */ 2093 if (zone->uz_count == 0 || bucketdisable) 2094 goto zalloc_item; 2095 2096 /* 2097 * Attempt to retrieve the item from the per-CPU cache has failed, so 2098 * we must go back to the zone. This requires the zone lock, so we 2099 * must drop the critical section, then re-acquire it when we go back 2100 * to the cache. Since the critical section is released, we may be 2101 * preempted or migrate. As such, make sure not to maintain any 2102 * thread-local state specific to the cache from prior to releasing 2103 * the critical section. 2104 */ 2105 lockfail = 0; 2106 if (ZONE_TRYLOCK(zone) == 0) { 2107 /* Record contention to size the buckets. */ 2108 ZONE_LOCK(zone); 2109 lockfail = 1; 2110 } 2111 critical_enter(); 2112 cpu = curcpu; 2113 cache = &zone->uz_cpu[cpu]; 2114 2115 /* 2116 * Since we have locked the zone we may as well send back our stats. 2117 */ 2118 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2119 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2120 cache->uc_allocs = 0; 2121 cache->uc_frees = 0; 2122 2123 /* See if we lost the race to fill the cache. */ 2124 if (cache->uc_allocbucket != NULL) { 2125 ZONE_UNLOCK(zone); 2126 goto zalloc_start; 2127 } 2128 2129 /* 2130 * Check the zone's cache of buckets. 2131 */ 2132 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2133 KASSERT(bucket->ub_cnt != 0, 2134 ("uma_zalloc_arg: Returning an empty bucket.")); 2135 2136 LIST_REMOVE(bucket, ub_link); 2137 cache->uc_allocbucket = bucket; 2138 ZONE_UNLOCK(zone); 2139 goto zalloc_start; 2140 } 2141 /* We are no longer associated with this CPU. */ 2142 critical_exit(); 2143 2144 /* 2145 * We bump the uz count when the cache size is insufficient to 2146 * handle the working set. 2147 */ 2148 if (lockfail && zone->uz_count < BUCKET_MAX) 2149 zone->uz_count++; 2150 ZONE_UNLOCK(zone); 2151 2152 /* 2153 * Now lets just fill a bucket and put it on the free list. If that 2154 * works we'll restart the allocation from the begining and it 2155 * will use the just filled bucket. 2156 */ 2157 bucket = zone_alloc_bucket(zone, udata, flags); 2158 if (bucket != NULL) { 2159 ZONE_LOCK(zone); 2160 critical_enter(); 2161 cpu = curcpu; 2162 cache = &zone->uz_cpu[cpu]; 2163 /* 2164 * See if we lost the race or were migrated. Cache the 2165 * initialized bucket to make this less likely or claim 2166 * the memory directly. 2167 */ 2168 if (cache->uc_allocbucket == NULL) 2169 cache->uc_allocbucket = bucket; 2170 else 2171 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2172 ZONE_UNLOCK(zone); 2173 goto zalloc_start; 2174 } 2175 2176 /* 2177 * We may not be able to get a bucket so return an actual item. 2178 */ 2179 #ifdef UMA_DEBUG 2180 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2181 #endif 2182 2183 zalloc_item: 2184 item = zone_alloc_item(zone, udata, flags); 2185 2186 return (item); 2187 } 2188 2189 static uma_slab_t 2190 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2191 { 2192 uma_slab_t slab; 2193 int reserve; 2194 2195 mtx_assert(&keg->uk_lock, MA_OWNED); 2196 slab = NULL; 2197 reserve = 0; 2198 if ((flags & M_USE_RESERVE) == 0) 2199 reserve = keg->uk_reserve; 2200 2201 for (;;) { 2202 /* 2203 * Find a slab with some space. Prefer slabs that are partially 2204 * used over those that are totally full. This helps to reduce 2205 * fragmentation. 2206 */ 2207 if (keg->uk_free > reserve) { 2208 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2209 slab = LIST_FIRST(&keg->uk_part_slab); 2210 } else { 2211 slab = LIST_FIRST(&keg->uk_free_slab); 2212 LIST_REMOVE(slab, us_link); 2213 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2214 us_link); 2215 } 2216 MPASS(slab->us_keg == keg); 2217 return (slab); 2218 } 2219 2220 /* 2221 * M_NOVM means don't ask at all! 2222 */ 2223 if (flags & M_NOVM) 2224 break; 2225 2226 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2227 keg->uk_flags |= UMA_ZFLAG_FULL; 2228 /* 2229 * If this is not a multi-zone, set the FULL bit. 2230 * Otherwise slab_multi() takes care of it. 2231 */ 2232 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2233 zone->uz_flags |= UMA_ZFLAG_FULL; 2234 zone_log_warning(zone); 2235 } 2236 if (flags & M_NOWAIT) 2237 break; 2238 zone->uz_sleeps++; 2239 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2240 continue; 2241 } 2242 slab = keg_alloc_slab(keg, zone, flags); 2243 /* 2244 * If we got a slab here it's safe to mark it partially used 2245 * and return. We assume that the caller is going to remove 2246 * at least one item. 2247 */ 2248 if (slab) { 2249 MPASS(slab->us_keg == keg); 2250 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2251 return (slab); 2252 } 2253 /* 2254 * We might not have been able to get a slab but another cpu 2255 * could have while we were unlocked. Check again before we 2256 * fail. 2257 */ 2258 flags |= M_NOVM; 2259 } 2260 return (slab); 2261 } 2262 2263 static uma_slab_t 2264 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2265 { 2266 uma_slab_t slab; 2267 2268 if (keg == NULL) { 2269 keg = zone_first_keg(zone); 2270 KEG_LOCK(keg); 2271 } 2272 2273 for (;;) { 2274 slab = keg_fetch_slab(keg, zone, flags); 2275 if (slab) 2276 return (slab); 2277 if (flags & (M_NOWAIT | M_NOVM)) 2278 break; 2279 } 2280 KEG_UNLOCK(keg); 2281 return (NULL); 2282 } 2283 2284 /* 2285 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2286 * with the keg locked. On NULL no lock is held. 2287 * 2288 * The last pointer is used to seed the search. It is not required. 2289 */ 2290 static uma_slab_t 2291 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2292 { 2293 uma_klink_t klink; 2294 uma_slab_t slab; 2295 uma_keg_t keg; 2296 int flags; 2297 int empty; 2298 int full; 2299 2300 /* 2301 * Don't wait on the first pass. This will skip limit tests 2302 * as well. We don't want to block if we can find a provider 2303 * without blocking. 2304 */ 2305 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2306 /* 2307 * Use the last slab allocated as a hint for where to start 2308 * the search. 2309 */ 2310 if (last != NULL) { 2311 slab = keg_fetch_slab(last, zone, flags); 2312 if (slab) 2313 return (slab); 2314 KEG_UNLOCK(last); 2315 } 2316 /* 2317 * Loop until we have a slab incase of transient failures 2318 * while M_WAITOK is specified. I'm not sure this is 100% 2319 * required but we've done it for so long now. 2320 */ 2321 for (;;) { 2322 empty = 0; 2323 full = 0; 2324 /* 2325 * Search the available kegs for slabs. Be careful to hold the 2326 * correct lock while calling into the keg layer. 2327 */ 2328 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2329 keg = klink->kl_keg; 2330 KEG_LOCK(keg); 2331 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2332 slab = keg_fetch_slab(keg, zone, flags); 2333 if (slab) 2334 return (slab); 2335 } 2336 if (keg->uk_flags & UMA_ZFLAG_FULL) 2337 full++; 2338 else 2339 empty++; 2340 KEG_UNLOCK(keg); 2341 } 2342 if (rflags & (M_NOWAIT | M_NOVM)) 2343 break; 2344 flags = rflags; 2345 /* 2346 * All kegs are full. XXX We can't atomically check all kegs 2347 * and sleep so just sleep for a short period and retry. 2348 */ 2349 if (full && !empty) { 2350 ZONE_LOCK(zone); 2351 zone->uz_flags |= UMA_ZFLAG_FULL; 2352 zone->uz_sleeps++; 2353 zone_log_warning(zone); 2354 msleep(zone, zone->uz_lockptr, PVM, 2355 "zonelimit", hz/100); 2356 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2357 ZONE_UNLOCK(zone); 2358 continue; 2359 } 2360 } 2361 return (NULL); 2362 } 2363 2364 static void * 2365 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2366 { 2367 void *item; 2368 uint8_t freei; 2369 2370 MPASS(keg == slab->us_keg); 2371 mtx_assert(&keg->uk_lock, MA_OWNED); 2372 2373 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2374 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2375 item = slab->us_data + (keg->uk_rsize * freei); 2376 slab->us_freecount--; 2377 keg->uk_free--; 2378 2379 /* Move this slab to the full list */ 2380 if (slab->us_freecount == 0) { 2381 LIST_REMOVE(slab, us_link); 2382 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2383 } 2384 2385 return (item); 2386 } 2387 2388 static int 2389 zone_import(uma_zone_t zone, void **bucket, int max, int flags) 2390 { 2391 uma_slab_t slab; 2392 uma_keg_t keg; 2393 int i; 2394 2395 slab = NULL; 2396 keg = NULL; 2397 /* Try to keep the buckets totally full */ 2398 for (i = 0; i < max; ) { 2399 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 2400 break; 2401 keg = slab->us_keg; 2402 while (slab->us_freecount && i < max) { 2403 bucket[i++] = slab_alloc_item(keg, slab); 2404 if (keg->uk_free <= keg->uk_reserve) 2405 break; 2406 } 2407 /* Don't grab more than one slab at a time. */ 2408 flags &= ~M_WAITOK; 2409 flags |= M_NOWAIT; 2410 } 2411 if (slab != NULL) 2412 KEG_UNLOCK(keg); 2413 2414 return i; 2415 } 2416 2417 static uma_bucket_t 2418 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2419 { 2420 uma_bucket_t bucket; 2421 int max; 2422 2423 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2424 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2425 if (bucket == NULL) 2426 goto out; 2427 2428 max = MIN(bucket->ub_entries, zone->uz_count); 2429 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2430 max, flags); 2431 2432 /* 2433 * Initialize the memory if necessary. 2434 */ 2435 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2436 int i; 2437 2438 for (i = 0; i < bucket->ub_cnt; i++) 2439 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2440 flags) != 0) 2441 break; 2442 /* 2443 * If we couldn't initialize the whole bucket, put the 2444 * rest back onto the freelist. 2445 */ 2446 if (i != bucket->ub_cnt) { 2447 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2448 bucket->ub_cnt - i); 2449 #ifdef INVARIANTS 2450 bzero(&bucket->ub_bucket[i], 2451 sizeof(void *) * (bucket->ub_cnt - i)); 2452 #endif 2453 bucket->ub_cnt = i; 2454 } 2455 } 2456 2457 out: 2458 if (bucket == NULL || bucket->ub_cnt == 0) { 2459 if (bucket != NULL) 2460 bucket_free(zone, bucket, udata); 2461 atomic_add_long(&zone->uz_fails, 1); 2462 return (NULL); 2463 } 2464 2465 return (bucket); 2466 } 2467 2468 /* 2469 * Allocates a single item from a zone. 2470 * 2471 * Arguments 2472 * zone The zone to alloc for. 2473 * udata The data to be passed to the constructor. 2474 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2475 * 2476 * Returns 2477 * NULL if there is no memory and M_NOWAIT is set 2478 * An item if successful 2479 */ 2480 2481 static void * 2482 zone_alloc_item(uma_zone_t zone, void *udata, int flags) 2483 { 2484 void *item; 2485 2486 item = NULL; 2487 2488 #ifdef UMA_DEBUG_ALLOC 2489 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2490 #endif 2491 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 2492 goto fail; 2493 atomic_add_long(&zone->uz_allocs, 1); 2494 2495 /* 2496 * We have to call both the zone's init (not the keg's init) 2497 * and the zone's ctor. This is because the item is going from 2498 * a keg slab directly to the user, and the user is expecting it 2499 * to be both zone-init'd as well as zone-ctor'd. 2500 */ 2501 if (zone->uz_init != NULL) { 2502 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2503 zone_free_item(zone, item, udata, SKIP_FINI); 2504 goto fail; 2505 } 2506 } 2507 if (zone->uz_ctor != NULL) { 2508 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2509 zone_free_item(zone, item, udata, SKIP_DTOR); 2510 goto fail; 2511 } 2512 } 2513 #ifdef INVARIANTS 2514 uma_dbg_alloc(zone, NULL, item); 2515 #endif 2516 if (flags & M_ZERO) 2517 bzero(item, zone->uz_size); 2518 2519 return (item); 2520 2521 fail: 2522 atomic_add_long(&zone->uz_fails, 1); 2523 return (NULL); 2524 } 2525 2526 /* See uma.h */ 2527 void 2528 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2529 { 2530 uma_cache_t cache; 2531 uma_bucket_t bucket; 2532 int cpu; 2533 2534 #ifdef UMA_DEBUG_ALLOC_1 2535 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2536 #endif 2537 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2538 zone->uz_name); 2539 2540 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2541 if (item == NULL) 2542 return; 2543 #ifdef DEBUG_MEMGUARD 2544 if (is_memguard_addr(item)) { 2545 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 2546 zone->uz_dtor(item, zone->uz_size, udata); 2547 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 2548 zone->uz_fini(item, zone->uz_size); 2549 memguard_free(item); 2550 return; 2551 } 2552 #endif 2553 #ifdef INVARIANTS 2554 if (zone->uz_flags & UMA_ZONE_MALLOC) 2555 uma_dbg_free(zone, udata, item); 2556 else 2557 uma_dbg_free(zone, NULL, item); 2558 #endif 2559 if (zone->uz_dtor != NULL) 2560 zone->uz_dtor(item, zone->uz_size, udata); 2561 2562 /* 2563 * The race here is acceptable. If we miss it we'll just have to wait 2564 * a little longer for the limits to be reset. 2565 */ 2566 if (zone->uz_flags & UMA_ZFLAG_FULL) 2567 goto zfree_item; 2568 2569 /* 2570 * If possible, free to the per-CPU cache. There are two 2571 * requirements for safe access to the per-CPU cache: (1) the thread 2572 * accessing the cache must not be preempted or yield during access, 2573 * and (2) the thread must not migrate CPUs without switching which 2574 * cache it accesses. We rely on a critical section to prevent 2575 * preemption and migration. We release the critical section in 2576 * order to acquire the zone mutex if we are unable to free to the 2577 * current cache; when we re-acquire the critical section, we must 2578 * detect and handle migration if it has occurred. 2579 */ 2580 zfree_restart: 2581 critical_enter(); 2582 cpu = curcpu; 2583 cache = &zone->uz_cpu[cpu]; 2584 2585 zfree_start: 2586 /* 2587 * Try to free into the allocbucket first to give LIFO ordering 2588 * for cache-hot datastructures. Spill over into the freebucket 2589 * if necessary. Alloc will swap them if one runs dry. 2590 */ 2591 bucket = cache->uc_allocbucket; 2592 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2593 bucket = cache->uc_freebucket; 2594 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2595 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2596 ("uma_zfree: Freeing to non free bucket index.")); 2597 bucket->ub_bucket[bucket->ub_cnt] = item; 2598 bucket->ub_cnt++; 2599 cache->uc_frees++; 2600 critical_exit(); 2601 return; 2602 } 2603 2604 /* 2605 * We must go back the zone, which requires acquiring the zone lock, 2606 * which in turn means we must release and re-acquire the critical 2607 * section. Since the critical section is released, we may be 2608 * preempted or migrate. As such, make sure not to maintain any 2609 * thread-local state specific to the cache from prior to releasing 2610 * the critical section. 2611 */ 2612 critical_exit(); 2613 if (zone->uz_count == 0 || bucketdisable) 2614 goto zfree_item; 2615 2616 ZONE_LOCK(zone); 2617 critical_enter(); 2618 cpu = curcpu; 2619 cache = &zone->uz_cpu[cpu]; 2620 2621 /* 2622 * Since we have locked the zone we may as well send back our stats. 2623 */ 2624 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2625 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2626 cache->uc_allocs = 0; 2627 cache->uc_frees = 0; 2628 2629 bucket = cache->uc_freebucket; 2630 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2631 ZONE_UNLOCK(zone); 2632 goto zfree_start; 2633 } 2634 cache->uc_freebucket = NULL; 2635 2636 /* Can we throw this on the zone full list? */ 2637 if (bucket != NULL) { 2638 #ifdef UMA_DEBUG_ALLOC 2639 printf("uma_zfree: Putting old bucket on the free list.\n"); 2640 #endif 2641 /* ub_cnt is pointing to the last free item */ 2642 KASSERT(bucket->ub_cnt != 0, 2643 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2644 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2645 } 2646 2647 /* We are no longer associated with this CPU. */ 2648 critical_exit(); 2649 2650 /* And the zone.. */ 2651 ZONE_UNLOCK(zone); 2652 2653 #ifdef UMA_DEBUG_ALLOC 2654 printf("uma_zfree: Allocating new free bucket.\n"); 2655 #endif 2656 bucket = bucket_alloc(zone, udata, M_NOWAIT); 2657 if (bucket) { 2658 critical_enter(); 2659 cpu = curcpu; 2660 cache = &zone->uz_cpu[cpu]; 2661 if (cache->uc_freebucket == NULL) { 2662 cache->uc_freebucket = bucket; 2663 goto zfree_start; 2664 } 2665 /* 2666 * We lost the race, start over. We have to drop our 2667 * critical section to free the bucket. 2668 */ 2669 critical_exit(); 2670 bucket_free(zone, bucket, udata); 2671 goto zfree_restart; 2672 } 2673 2674 /* 2675 * If nothing else caught this, we'll just do an internal free. 2676 */ 2677 zfree_item: 2678 zone_free_item(zone, item, udata, SKIP_DTOR); 2679 2680 return; 2681 } 2682 2683 static void 2684 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 2685 { 2686 uint8_t freei; 2687 2688 mtx_assert(&keg->uk_lock, MA_OWNED); 2689 MPASS(keg == slab->us_keg); 2690 2691 /* Do we need to remove from any lists? */ 2692 if (slab->us_freecount+1 == keg->uk_ipers) { 2693 LIST_REMOVE(slab, us_link); 2694 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2695 } else if (slab->us_freecount == 0) { 2696 LIST_REMOVE(slab, us_link); 2697 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2698 } 2699 2700 /* Slab management. */ 2701 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2702 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 2703 slab->us_freecount++; 2704 2705 /* Keg statistics. */ 2706 keg->uk_free++; 2707 } 2708 2709 static void 2710 zone_release(uma_zone_t zone, void **bucket, int cnt) 2711 { 2712 void *item; 2713 uma_slab_t slab; 2714 uma_keg_t keg; 2715 uint8_t *mem; 2716 int clearfull; 2717 int i; 2718 2719 clearfull = 0; 2720 keg = zone_first_keg(zone); 2721 KEG_LOCK(keg); 2722 for (i = 0; i < cnt; i++) { 2723 item = bucket[i]; 2724 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2725 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 2726 if (zone->uz_flags & UMA_ZONE_HASH) { 2727 slab = hash_sfind(&keg->uk_hash, mem); 2728 } else { 2729 mem += keg->uk_pgoff; 2730 slab = (uma_slab_t)mem; 2731 } 2732 } else { 2733 slab = vtoslab((vm_offset_t)item); 2734 if (slab->us_keg != keg) { 2735 KEG_UNLOCK(keg); 2736 keg = slab->us_keg; 2737 KEG_LOCK(keg); 2738 } 2739 } 2740 slab_free_item(keg, slab, item); 2741 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2742 if (keg->uk_pages < keg->uk_maxpages) { 2743 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2744 clearfull = 1; 2745 } 2746 2747 /* 2748 * We can handle one more allocation. Since we're 2749 * clearing ZFLAG_FULL, wake up all procs blocked 2750 * on pages. This should be uncommon, so keeping this 2751 * simple for now (rather than adding count of blocked 2752 * threads etc). 2753 */ 2754 wakeup(keg); 2755 } 2756 } 2757 KEG_UNLOCK(keg); 2758 if (clearfull) { 2759 ZONE_LOCK(zone); 2760 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2761 wakeup(zone); 2762 ZONE_UNLOCK(zone); 2763 } 2764 2765 } 2766 2767 /* 2768 * Frees a single item to any zone. 2769 * 2770 * Arguments: 2771 * zone The zone to free to 2772 * item The item we're freeing 2773 * udata User supplied data for the dtor 2774 * skip Skip dtors and finis 2775 */ 2776 static void 2777 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 2778 { 2779 2780 #ifdef INVARIANTS 2781 if (skip == SKIP_NONE) { 2782 if (zone->uz_flags & UMA_ZONE_MALLOC) 2783 uma_dbg_free(zone, udata, item); 2784 else 2785 uma_dbg_free(zone, NULL, item); 2786 } 2787 #endif 2788 if (skip < SKIP_DTOR && zone->uz_dtor) 2789 zone->uz_dtor(item, zone->uz_size, udata); 2790 2791 if (skip < SKIP_FINI && zone->uz_fini) 2792 zone->uz_fini(item, zone->uz_size); 2793 2794 atomic_add_long(&zone->uz_frees, 1); 2795 zone->uz_release(zone->uz_arg, &item, 1); 2796 } 2797 2798 /* See uma.h */ 2799 int 2800 uma_zone_set_max(uma_zone_t zone, int nitems) 2801 { 2802 uma_keg_t keg; 2803 2804 keg = zone_first_keg(zone); 2805 if (keg == NULL) 2806 return (0); 2807 KEG_LOCK(keg); 2808 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2809 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2810 keg->uk_maxpages += keg->uk_ppera; 2811 nitems = keg->uk_maxpages * keg->uk_ipers; 2812 KEG_UNLOCK(keg); 2813 2814 return (nitems); 2815 } 2816 2817 /* See uma.h */ 2818 int 2819 uma_zone_get_max(uma_zone_t zone) 2820 { 2821 int nitems; 2822 uma_keg_t keg; 2823 2824 keg = zone_first_keg(zone); 2825 if (keg == NULL) 2826 return (0); 2827 KEG_LOCK(keg); 2828 nitems = keg->uk_maxpages * keg->uk_ipers; 2829 KEG_UNLOCK(keg); 2830 2831 return (nitems); 2832 } 2833 2834 /* See uma.h */ 2835 void 2836 uma_zone_set_warning(uma_zone_t zone, const char *warning) 2837 { 2838 2839 ZONE_LOCK(zone); 2840 zone->uz_warning = warning; 2841 ZONE_UNLOCK(zone); 2842 } 2843 2844 /* See uma.h */ 2845 int 2846 uma_zone_get_cur(uma_zone_t zone) 2847 { 2848 int64_t nitems; 2849 u_int i; 2850 2851 ZONE_LOCK(zone); 2852 nitems = zone->uz_allocs - zone->uz_frees; 2853 CPU_FOREACH(i) { 2854 /* 2855 * See the comment in sysctl_vm_zone_stats() regarding the 2856 * safety of accessing the per-cpu caches. With the zone lock 2857 * held, it is safe, but can potentially result in stale data. 2858 */ 2859 nitems += zone->uz_cpu[i].uc_allocs - 2860 zone->uz_cpu[i].uc_frees; 2861 } 2862 ZONE_UNLOCK(zone); 2863 2864 return (nitems < 0 ? 0 : nitems); 2865 } 2866 2867 /* See uma.h */ 2868 void 2869 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2870 { 2871 uma_keg_t keg; 2872 2873 keg = zone_first_keg(zone); 2874 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2875 KEG_LOCK(keg); 2876 KASSERT(keg->uk_pages == 0, 2877 ("uma_zone_set_init on non-empty keg")); 2878 keg->uk_init = uminit; 2879 KEG_UNLOCK(keg); 2880 } 2881 2882 /* See uma.h */ 2883 void 2884 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2885 { 2886 uma_keg_t keg; 2887 2888 keg = zone_first_keg(zone); 2889 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2890 KEG_LOCK(keg); 2891 KASSERT(keg->uk_pages == 0, 2892 ("uma_zone_set_fini on non-empty keg")); 2893 keg->uk_fini = fini; 2894 KEG_UNLOCK(keg); 2895 } 2896 2897 /* See uma.h */ 2898 void 2899 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2900 { 2901 2902 ZONE_LOCK(zone); 2903 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2904 ("uma_zone_set_zinit on non-empty keg")); 2905 zone->uz_init = zinit; 2906 ZONE_UNLOCK(zone); 2907 } 2908 2909 /* See uma.h */ 2910 void 2911 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2912 { 2913 2914 ZONE_LOCK(zone); 2915 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2916 ("uma_zone_set_zfini on non-empty keg")); 2917 zone->uz_fini = zfini; 2918 ZONE_UNLOCK(zone); 2919 } 2920 2921 /* See uma.h */ 2922 /* XXX uk_freef is not actually used with the zone locked */ 2923 void 2924 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2925 { 2926 uma_keg_t keg; 2927 2928 keg = zone_first_keg(zone); 2929 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2930 KEG_LOCK(keg); 2931 keg->uk_freef = freef; 2932 KEG_UNLOCK(keg); 2933 } 2934 2935 /* See uma.h */ 2936 /* XXX uk_allocf is not actually used with the zone locked */ 2937 void 2938 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2939 { 2940 uma_keg_t keg; 2941 2942 keg = zone_first_keg(zone); 2943 KEG_LOCK(keg); 2944 keg->uk_allocf = allocf; 2945 KEG_UNLOCK(keg); 2946 } 2947 2948 /* See uma.h */ 2949 void 2950 uma_zone_reserve(uma_zone_t zone, int items) 2951 { 2952 uma_keg_t keg; 2953 2954 keg = zone_first_keg(zone); 2955 if (keg == NULL) 2956 return; 2957 KEG_LOCK(keg); 2958 keg->uk_reserve = items; 2959 KEG_UNLOCK(keg); 2960 2961 return; 2962 } 2963 2964 /* See uma.h */ 2965 int 2966 uma_zone_reserve_kva(uma_zone_t zone, int count) 2967 { 2968 uma_keg_t keg; 2969 vm_offset_t kva; 2970 int pages; 2971 2972 keg = zone_first_keg(zone); 2973 if (keg == NULL) 2974 return (0); 2975 pages = count / keg->uk_ipers; 2976 2977 if (pages * keg->uk_ipers < count) 2978 pages++; 2979 2980 #ifdef UMA_MD_SMALL_ALLOC 2981 if (keg->uk_ppera > 1) { 2982 #else 2983 if (1) { 2984 #endif 2985 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2986 if (kva == 0) 2987 return (0); 2988 } else 2989 kva = 0; 2990 KEG_LOCK(keg); 2991 keg->uk_kva = kva; 2992 keg->uk_offset = 0; 2993 keg->uk_maxpages = pages; 2994 #ifdef UMA_MD_SMALL_ALLOC 2995 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 2996 #else 2997 keg->uk_allocf = noobj_alloc; 2998 #endif 2999 keg->uk_flags |= UMA_ZONE_NOFREE; 3000 KEG_UNLOCK(keg); 3001 3002 return (1); 3003 } 3004 3005 /* See uma.h */ 3006 void 3007 uma_prealloc(uma_zone_t zone, int items) 3008 { 3009 int slabs; 3010 uma_slab_t slab; 3011 uma_keg_t keg; 3012 3013 keg = zone_first_keg(zone); 3014 if (keg == NULL) 3015 return; 3016 KEG_LOCK(keg); 3017 slabs = items / keg->uk_ipers; 3018 if (slabs * keg->uk_ipers < items) 3019 slabs++; 3020 while (slabs > 0) { 3021 slab = keg_alloc_slab(keg, zone, M_WAITOK); 3022 if (slab == NULL) 3023 break; 3024 MPASS(slab->us_keg == keg); 3025 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 3026 slabs--; 3027 } 3028 KEG_UNLOCK(keg); 3029 } 3030 3031 /* See uma.h */ 3032 uint32_t * 3033 uma_find_refcnt(uma_zone_t zone, void *item) 3034 { 3035 uma_slabrefcnt_t slabref; 3036 uma_slab_t slab; 3037 uma_keg_t keg; 3038 uint32_t *refcnt; 3039 int idx; 3040 3041 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 3042 slabref = (uma_slabrefcnt_t)slab; 3043 keg = slab->us_keg; 3044 KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, 3045 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 3046 idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3047 refcnt = &slabref->us_refcnt[idx]; 3048 return refcnt; 3049 } 3050 3051 /* See uma.h */ 3052 void 3053 uma_reclaim(void) 3054 { 3055 #ifdef UMA_DEBUG 3056 printf("UMA: vm asked us to release pages!\n"); 3057 #endif 3058 bucket_enable(); 3059 zone_foreach(zone_drain); 3060 /* 3061 * Some slabs may have been freed but this zone will be visited early 3062 * we visit again so that we can free pages that are empty once other 3063 * zones are drained. We have to do the same for buckets. 3064 */ 3065 zone_drain(slabzone); 3066 zone_drain(slabrefzone); 3067 bucket_zone_drain(); 3068 } 3069 3070 /* See uma.h */ 3071 int 3072 uma_zone_exhausted(uma_zone_t zone) 3073 { 3074 int full; 3075 3076 ZONE_LOCK(zone); 3077 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3078 ZONE_UNLOCK(zone); 3079 return (full); 3080 } 3081 3082 int 3083 uma_zone_exhausted_nolock(uma_zone_t zone) 3084 { 3085 return (zone->uz_flags & UMA_ZFLAG_FULL); 3086 } 3087 3088 void * 3089 uma_large_malloc(int size, int wait) 3090 { 3091 void *mem; 3092 uma_slab_t slab; 3093 uint8_t flags; 3094 3095 slab = zone_alloc_item(slabzone, NULL, wait); 3096 if (slab == NULL) 3097 return (NULL); 3098 mem = page_alloc(NULL, size, &flags, wait); 3099 if (mem) { 3100 vsetslab((vm_offset_t)mem, slab); 3101 slab->us_data = mem; 3102 slab->us_flags = flags | UMA_SLAB_MALLOC; 3103 slab->us_size = size; 3104 } else { 3105 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3106 } 3107 3108 return (mem); 3109 } 3110 3111 void 3112 uma_large_free(uma_slab_t slab) 3113 { 3114 vsetobj((vm_offset_t)slab->us_data, kmem_object); 3115 page_free(slab->us_data, slab->us_size, slab->us_flags); 3116 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3117 } 3118 3119 void 3120 uma_print_stats(void) 3121 { 3122 zone_foreach(uma_print_zone); 3123 } 3124 3125 static void 3126 slab_print(uma_slab_t slab) 3127 { 3128 printf("slab: keg %p, data %p, freecount %d\n", 3129 slab->us_keg, slab->us_data, slab->us_freecount); 3130 } 3131 3132 static void 3133 cache_print(uma_cache_t cache) 3134 { 3135 printf("alloc: %p(%d), free: %p(%d)\n", 3136 cache->uc_allocbucket, 3137 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3138 cache->uc_freebucket, 3139 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3140 } 3141 3142 static void 3143 uma_print_keg(uma_keg_t keg) 3144 { 3145 uma_slab_t slab; 3146 3147 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3148 "out %d free %d limit %d\n", 3149 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3150 keg->uk_ipers, keg->uk_ppera, 3151 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3152 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3153 printf("Part slabs:\n"); 3154 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3155 slab_print(slab); 3156 printf("Free slabs:\n"); 3157 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3158 slab_print(slab); 3159 printf("Full slabs:\n"); 3160 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3161 slab_print(slab); 3162 } 3163 3164 void 3165 uma_print_zone(uma_zone_t zone) 3166 { 3167 uma_cache_t cache; 3168 uma_klink_t kl; 3169 int i; 3170 3171 printf("zone: %s(%p) size %d flags %#x\n", 3172 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3173 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3174 uma_print_keg(kl->kl_keg); 3175 CPU_FOREACH(i) { 3176 cache = &zone->uz_cpu[i]; 3177 printf("CPU %d Cache:\n", i); 3178 cache_print(cache); 3179 } 3180 } 3181 3182 #ifdef DDB 3183 /* 3184 * Generate statistics across both the zone and its per-cpu cache's. Return 3185 * desired statistics if the pointer is non-NULL for that statistic. 3186 * 3187 * Note: does not update the zone statistics, as it can't safely clear the 3188 * per-CPU cache statistic. 3189 * 3190 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3191 * safe from off-CPU; we should modify the caches to track this information 3192 * directly so that we don't have to. 3193 */ 3194 static void 3195 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3196 uint64_t *freesp, uint64_t *sleepsp) 3197 { 3198 uma_cache_t cache; 3199 uint64_t allocs, frees, sleeps; 3200 int cachefree, cpu; 3201 3202 allocs = frees = sleeps = 0; 3203 cachefree = 0; 3204 CPU_FOREACH(cpu) { 3205 cache = &z->uz_cpu[cpu]; 3206 if (cache->uc_allocbucket != NULL) 3207 cachefree += cache->uc_allocbucket->ub_cnt; 3208 if (cache->uc_freebucket != NULL) 3209 cachefree += cache->uc_freebucket->ub_cnt; 3210 allocs += cache->uc_allocs; 3211 frees += cache->uc_frees; 3212 } 3213 allocs += z->uz_allocs; 3214 frees += z->uz_frees; 3215 sleeps += z->uz_sleeps; 3216 if (cachefreep != NULL) 3217 *cachefreep = cachefree; 3218 if (allocsp != NULL) 3219 *allocsp = allocs; 3220 if (freesp != NULL) 3221 *freesp = frees; 3222 if (sleepsp != NULL) 3223 *sleepsp = sleeps; 3224 } 3225 #endif /* DDB */ 3226 3227 static int 3228 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3229 { 3230 uma_keg_t kz; 3231 uma_zone_t z; 3232 int count; 3233 3234 count = 0; 3235 mtx_lock(&uma_mtx); 3236 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3237 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3238 count++; 3239 } 3240 mtx_unlock(&uma_mtx); 3241 return (sysctl_handle_int(oidp, &count, 0, req)); 3242 } 3243 3244 static int 3245 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3246 { 3247 struct uma_stream_header ush; 3248 struct uma_type_header uth; 3249 struct uma_percpu_stat ups; 3250 uma_bucket_t bucket; 3251 struct sbuf sbuf; 3252 uma_cache_t cache; 3253 uma_klink_t kl; 3254 uma_keg_t kz; 3255 uma_zone_t z; 3256 uma_keg_t k; 3257 int count, error, i; 3258 3259 error = sysctl_wire_old_buffer(req, 0); 3260 if (error != 0) 3261 return (error); 3262 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3263 3264 count = 0; 3265 mtx_lock(&uma_mtx); 3266 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3267 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3268 count++; 3269 } 3270 3271 /* 3272 * Insert stream header. 3273 */ 3274 bzero(&ush, sizeof(ush)); 3275 ush.ush_version = UMA_STREAM_VERSION; 3276 ush.ush_maxcpus = (mp_maxid + 1); 3277 ush.ush_count = count; 3278 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3279 3280 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3281 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3282 bzero(&uth, sizeof(uth)); 3283 ZONE_LOCK(z); 3284 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3285 uth.uth_align = kz->uk_align; 3286 uth.uth_size = kz->uk_size; 3287 uth.uth_rsize = kz->uk_rsize; 3288 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3289 k = kl->kl_keg; 3290 uth.uth_maxpages += k->uk_maxpages; 3291 uth.uth_pages += k->uk_pages; 3292 uth.uth_keg_free += k->uk_free; 3293 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3294 * k->uk_ipers; 3295 } 3296 3297 /* 3298 * A zone is secondary is it is not the first entry 3299 * on the keg's zone list. 3300 */ 3301 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3302 (LIST_FIRST(&kz->uk_zones) != z)) 3303 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3304 3305 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3306 uth.uth_zone_free += bucket->ub_cnt; 3307 uth.uth_allocs = z->uz_allocs; 3308 uth.uth_frees = z->uz_frees; 3309 uth.uth_fails = z->uz_fails; 3310 uth.uth_sleeps = z->uz_sleeps; 3311 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3312 /* 3313 * While it is not normally safe to access the cache 3314 * bucket pointers while not on the CPU that owns the 3315 * cache, we only allow the pointers to be exchanged 3316 * without the zone lock held, not invalidated, so 3317 * accept the possible race associated with bucket 3318 * exchange during monitoring. 3319 */ 3320 for (i = 0; i < (mp_maxid + 1); i++) { 3321 bzero(&ups, sizeof(ups)); 3322 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3323 goto skip; 3324 if (CPU_ABSENT(i)) 3325 goto skip; 3326 cache = &z->uz_cpu[i]; 3327 if (cache->uc_allocbucket != NULL) 3328 ups.ups_cache_free += 3329 cache->uc_allocbucket->ub_cnt; 3330 if (cache->uc_freebucket != NULL) 3331 ups.ups_cache_free += 3332 cache->uc_freebucket->ub_cnt; 3333 ups.ups_allocs = cache->uc_allocs; 3334 ups.ups_frees = cache->uc_frees; 3335 skip: 3336 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3337 } 3338 ZONE_UNLOCK(z); 3339 } 3340 } 3341 mtx_unlock(&uma_mtx); 3342 error = sbuf_finish(&sbuf); 3343 sbuf_delete(&sbuf); 3344 return (error); 3345 } 3346 3347 #ifdef DDB 3348 DB_SHOW_COMMAND(uma, db_show_uma) 3349 { 3350 uint64_t allocs, frees, sleeps; 3351 uma_bucket_t bucket; 3352 uma_keg_t kz; 3353 uma_zone_t z; 3354 int cachefree; 3355 3356 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3357 "Requests", "Sleeps"); 3358 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3359 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3360 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3361 allocs = z->uz_allocs; 3362 frees = z->uz_frees; 3363 sleeps = z->uz_sleeps; 3364 cachefree = 0; 3365 } else 3366 uma_zone_sumstat(z, &cachefree, &allocs, 3367 &frees, &sleeps); 3368 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3369 (LIST_FIRST(&kz->uk_zones) != z))) 3370 cachefree += kz->uk_free; 3371 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3372 cachefree += bucket->ub_cnt; 3373 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name, 3374 (uintmax_t)kz->uk_size, 3375 (intmax_t)(allocs - frees), cachefree, 3376 (uintmax_t)allocs, sleeps); 3377 if (db_pager_quit) 3378 return; 3379 } 3380 } 3381 } 3382 #endif 3383