1 /*- 2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uma_core.c Implementation of the Universal Memory allocator 31 * 32 * This allocator is intended to replace the multitude of similar object caches 33 * in the standard FreeBSD kernel. The intent is to be flexible as well as 34 * effecient. A primary design goal is to return unused memory to the rest of 35 * the system. This will make the system as a whole more flexible due to the 36 * ability to move memory to subsystems which most need it instead of leaving 37 * pools of reserved memory unused. 38 * 39 * The basic ideas stem from similar slab/zone based allocators whose algorithms 40 * are well known. 41 * 42 */ 43 44 /* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* I should really use ktr.. */ 54 /* 55 #define UMA_DEBUG 1 56 #define UMA_DEBUG_ALLOC 1 57 #define UMA_DEBUG_ALLOC_1 1 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_param.h" 62 #include "opt_vm.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bitset.h> 67 #include <sys/kernel.h> 68 #include <sys/types.h> 69 #include <sys/queue.h> 70 #include <sys/malloc.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/rwlock.h> 77 #include <sys/sbuf.h> 78 #include <sys/smp.h> 79 #include <sys/vmmeter.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_param.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_kern.h> 88 #include <vm/vm_extern.h> 89 #include <vm/uma.h> 90 #include <vm/uma_int.h> 91 #include <vm/uma_dbg.h> 92 93 #include <ddb/ddb.h> 94 95 #ifdef DEBUG_MEMGUARD 96 #include <vm/memguard.h> 97 #endif 98 99 /* 100 * This is the zone and keg from which all zones are spawned. The idea is that 101 * even the zone & keg heads are allocated from the allocator, so we use the 102 * bss section to bootstrap us. 103 */ 104 static struct uma_keg masterkeg; 105 static struct uma_zone masterzone_k; 106 static struct uma_zone masterzone_z; 107 static uma_zone_t kegs = &masterzone_k; 108 static uma_zone_t zones = &masterzone_z; 109 110 /* This is the zone from which all of uma_slab_t's are allocated. */ 111 static uma_zone_t slabzone; 112 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 113 114 /* 115 * The initial hash tables come out of this zone so they can be allocated 116 * prior to malloc coming up. 117 */ 118 static uma_zone_t hashzone; 119 120 /* The boot-time adjusted value for cache line alignment. */ 121 int uma_align_cache = 64 - 1; 122 123 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 124 125 /* 126 * Are we allowed to allocate buckets? 127 */ 128 static int bucketdisable = 1; 129 130 /* Linked list of all kegs in the system */ 131 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 132 133 /* This mutex protects the keg list */ 134 static struct mtx_padalign uma_mtx; 135 136 /* Linked list of boot time pages */ 137 static LIST_HEAD(,uma_slab) uma_boot_pages = 138 LIST_HEAD_INITIALIZER(uma_boot_pages); 139 140 /* This mutex protects the boot time pages list */ 141 static struct mtx_padalign uma_boot_pages_mtx; 142 143 /* Is the VM done starting up? */ 144 static int booted = 0; 145 #define UMA_STARTUP 1 146 #define UMA_STARTUP2 2 147 148 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 149 static const u_int uma_max_ipers = SLAB_SETSIZE; 150 151 /* 152 * Only mbuf clusters use ref zones. Just provide enough references 153 * to support the one user. New code should not use the ref facility. 154 */ 155 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; 156 157 /* 158 * This is the handle used to schedule events that need to happen 159 * outside of the allocation fast path. 160 */ 161 static struct callout uma_callout; 162 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 163 164 /* 165 * This structure is passed as the zone ctor arg so that I don't have to create 166 * a special allocation function just for zones. 167 */ 168 struct uma_zctor_args { 169 const char *name; 170 size_t size; 171 uma_ctor ctor; 172 uma_dtor dtor; 173 uma_init uminit; 174 uma_fini fini; 175 uma_import import; 176 uma_release release; 177 void *arg; 178 uma_keg_t keg; 179 int align; 180 uint32_t flags; 181 }; 182 183 struct uma_kctor_args { 184 uma_zone_t zone; 185 size_t size; 186 uma_init uminit; 187 uma_fini fini; 188 int align; 189 uint32_t flags; 190 }; 191 192 struct uma_bucket_zone { 193 uma_zone_t ubz_zone; 194 char *ubz_name; 195 int ubz_entries; /* Number of items it can hold. */ 196 int ubz_maxsize; /* Maximum allocation size per-item. */ 197 }; 198 199 /* 200 * Compute the actual number of bucket entries to pack them in power 201 * of two sizes for more efficient space utilization. 202 */ 203 #define BUCKET_SIZE(n) \ 204 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 205 206 #define BUCKET_MAX BUCKET_SIZE(128) 207 208 struct uma_bucket_zone bucket_zones[] = { 209 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 210 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 211 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 212 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 213 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 214 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 215 { NULL, NULL, 0} 216 }; 217 218 /* 219 * Flags and enumerations to be passed to internal functions. 220 */ 221 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 222 223 /* Prototypes.. */ 224 225 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int); 226 static void *page_alloc(uma_zone_t, int, uint8_t *, int); 227 static void *startup_alloc(uma_zone_t, int, uint8_t *, int); 228 static void page_free(void *, int, uint8_t); 229 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 230 static void cache_drain(uma_zone_t); 231 static void bucket_drain(uma_zone_t, uma_bucket_t); 232 static void bucket_cache_drain(uma_zone_t zone); 233 static int keg_ctor(void *, int, void *, int); 234 static void keg_dtor(void *, int, void *); 235 static int zone_ctor(void *, int, void *, int); 236 static void zone_dtor(void *, int, void *); 237 static int zero_init(void *, int, int); 238 static void keg_small_init(uma_keg_t keg); 239 static void keg_large_init(uma_keg_t keg); 240 static void zone_foreach(void (*zfunc)(uma_zone_t)); 241 static void zone_timeout(uma_zone_t zone); 242 static int hash_alloc(struct uma_hash *); 243 static int hash_expand(struct uma_hash *, struct uma_hash *); 244 static void hash_free(struct uma_hash *hash); 245 static void uma_timeout(void *); 246 static void uma_startup3(void); 247 static void *zone_alloc_item(uma_zone_t, void *, int); 248 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 249 static void bucket_enable(void); 250 static void bucket_init(void); 251 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 252 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 253 static void bucket_zone_drain(void); 254 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 255 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 256 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 257 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 258 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 259 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 260 uma_fini fini, int align, uint32_t flags); 261 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 262 static void zone_release(uma_zone_t zone, void **bucket, int cnt); 263 264 void uma_print_zone(uma_zone_t); 265 void uma_print_stats(void); 266 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 267 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 268 269 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 270 271 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 272 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 273 274 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 275 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 276 277 static int zone_warnings = 1; 278 TUNABLE_INT("vm.zone_warnings", &zone_warnings); 279 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0, 280 "Warn when UMA zones becomes full"); 281 282 /* 283 * This routine checks to see whether or not it's safe to enable buckets. 284 */ 285 static void 286 bucket_enable(void) 287 { 288 bucketdisable = vm_page_count_min(); 289 } 290 291 /* 292 * Initialize bucket_zones, the array of zones of buckets of various sizes. 293 * 294 * For each zone, calculate the memory required for each bucket, consisting 295 * of the header and an array of pointers. 296 */ 297 static void 298 bucket_init(void) 299 { 300 struct uma_bucket_zone *ubz; 301 int size; 302 int i; 303 304 for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 305 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 306 size += sizeof(void *) * ubz->ubz_entries; 307 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 308 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 309 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 310 } 311 } 312 313 /* 314 * Given a desired number of entries for a bucket, return the zone from which 315 * to allocate the bucket. 316 */ 317 static struct uma_bucket_zone * 318 bucket_zone_lookup(int entries) 319 { 320 struct uma_bucket_zone *ubz; 321 322 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 323 if (ubz->ubz_entries >= entries) 324 return (ubz); 325 ubz--; 326 return (ubz); 327 } 328 329 static int 330 bucket_select(int size) 331 { 332 struct uma_bucket_zone *ubz; 333 334 ubz = &bucket_zones[0]; 335 if (size > ubz->ubz_maxsize) 336 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 337 338 for (; ubz->ubz_entries != 0; ubz++) 339 if (ubz->ubz_maxsize < size) 340 break; 341 ubz--; 342 return (ubz->ubz_entries); 343 } 344 345 static uma_bucket_t 346 bucket_alloc(uma_zone_t zone, void *udata, int flags) 347 { 348 struct uma_bucket_zone *ubz; 349 uma_bucket_t bucket; 350 351 /* 352 * This is to stop us from allocating per cpu buckets while we're 353 * running out of vm.boot_pages. Otherwise, we would exhaust the 354 * boot pages. This also prevents us from allocating buckets in 355 * low memory situations. 356 */ 357 if (bucketdisable) 358 return (NULL); 359 /* 360 * To limit bucket recursion we store the original zone flags 361 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 362 * NOVM flag to persist even through deep recursions. We also 363 * store ZFLAG_BUCKET once we have recursed attempting to allocate 364 * a bucket for a bucket zone so we do not allow infinite bucket 365 * recursion. This cookie will even persist to frees of unused 366 * buckets via the allocation path or bucket allocations in the 367 * free path. 368 */ 369 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 370 return (NULL); 371 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 372 udata = (void *)(uintptr_t)zone->uz_flags; 373 else 374 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 375 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 376 flags |= M_NOVM; 377 ubz = bucket_zone_lookup(zone->uz_count); 378 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 379 if (bucket) { 380 #ifdef INVARIANTS 381 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 382 #endif 383 bucket->ub_cnt = 0; 384 bucket->ub_entries = ubz->ubz_entries; 385 } 386 387 return (bucket); 388 } 389 390 static void 391 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 392 { 393 struct uma_bucket_zone *ubz; 394 395 KASSERT(bucket->ub_cnt == 0, 396 ("bucket_free: Freeing a non free bucket.")); 397 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 398 udata = (void *)(uintptr_t)zone->uz_flags; 399 ubz = bucket_zone_lookup(bucket->ub_entries); 400 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 401 } 402 403 static void 404 bucket_zone_drain(void) 405 { 406 struct uma_bucket_zone *ubz; 407 408 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 409 zone_drain(ubz->ubz_zone); 410 } 411 412 static void 413 zone_log_warning(uma_zone_t zone) 414 { 415 static const struct timeval warninterval = { 300, 0 }; 416 417 if (!zone_warnings || zone->uz_warning == NULL) 418 return; 419 420 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 421 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 422 } 423 424 static void 425 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 426 { 427 uma_klink_t klink; 428 429 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 430 kegfn(klink->kl_keg); 431 } 432 433 /* 434 * Routine called by timeout which is used to fire off some time interval 435 * based calculations. (stats, hash size, etc.) 436 * 437 * Arguments: 438 * arg Unused 439 * 440 * Returns: 441 * Nothing 442 */ 443 static void 444 uma_timeout(void *unused) 445 { 446 bucket_enable(); 447 zone_foreach(zone_timeout); 448 449 /* Reschedule this event */ 450 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 451 } 452 453 /* 454 * Routine to perform timeout driven calculations. This expands the 455 * hashes and does per cpu statistics aggregation. 456 * 457 * Returns nothing. 458 */ 459 static void 460 keg_timeout(uma_keg_t keg) 461 { 462 463 KEG_LOCK(keg); 464 /* 465 * Expand the keg hash table. 466 * 467 * This is done if the number of slabs is larger than the hash size. 468 * What I'm trying to do here is completely reduce collisions. This 469 * may be a little aggressive. Should I allow for two collisions max? 470 */ 471 if (keg->uk_flags & UMA_ZONE_HASH && 472 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 473 struct uma_hash newhash; 474 struct uma_hash oldhash; 475 int ret; 476 477 /* 478 * This is so involved because allocating and freeing 479 * while the keg lock is held will lead to deadlock. 480 * I have to do everything in stages and check for 481 * races. 482 */ 483 newhash = keg->uk_hash; 484 KEG_UNLOCK(keg); 485 ret = hash_alloc(&newhash); 486 KEG_LOCK(keg); 487 if (ret) { 488 if (hash_expand(&keg->uk_hash, &newhash)) { 489 oldhash = keg->uk_hash; 490 keg->uk_hash = newhash; 491 } else 492 oldhash = newhash; 493 494 KEG_UNLOCK(keg); 495 hash_free(&oldhash); 496 return; 497 } 498 } 499 KEG_UNLOCK(keg); 500 } 501 502 static void 503 zone_timeout(uma_zone_t zone) 504 { 505 506 zone_foreach_keg(zone, &keg_timeout); 507 } 508 509 /* 510 * Allocate and zero fill the next sized hash table from the appropriate 511 * backing store. 512 * 513 * Arguments: 514 * hash A new hash structure with the old hash size in uh_hashsize 515 * 516 * Returns: 517 * 1 on sucess and 0 on failure. 518 */ 519 static int 520 hash_alloc(struct uma_hash *hash) 521 { 522 int oldsize; 523 int alloc; 524 525 oldsize = hash->uh_hashsize; 526 527 /* We're just going to go to a power of two greater */ 528 if (oldsize) { 529 hash->uh_hashsize = oldsize * 2; 530 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 531 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 532 M_UMAHASH, M_NOWAIT); 533 } else { 534 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 535 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 536 M_WAITOK); 537 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 538 } 539 if (hash->uh_slab_hash) { 540 bzero(hash->uh_slab_hash, alloc); 541 hash->uh_hashmask = hash->uh_hashsize - 1; 542 return (1); 543 } 544 545 return (0); 546 } 547 548 /* 549 * Expands the hash table for HASH zones. This is done from zone_timeout 550 * to reduce collisions. This must not be done in the regular allocation 551 * path, otherwise, we can recurse on the vm while allocating pages. 552 * 553 * Arguments: 554 * oldhash The hash you want to expand 555 * newhash The hash structure for the new table 556 * 557 * Returns: 558 * Nothing 559 * 560 * Discussion: 561 */ 562 static int 563 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 564 { 565 uma_slab_t slab; 566 int hval; 567 int i; 568 569 if (!newhash->uh_slab_hash) 570 return (0); 571 572 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 573 return (0); 574 575 /* 576 * I need to investigate hash algorithms for resizing without a 577 * full rehash. 578 */ 579 580 for (i = 0; i < oldhash->uh_hashsize; i++) 581 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 582 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 583 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 584 hval = UMA_HASH(newhash, slab->us_data); 585 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 586 slab, us_hlink); 587 } 588 589 return (1); 590 } 591 592 /* 593 * Free the hash bucket to the appropriate backing store. 594 * 595 * Arguments: 596 * slab_hash The hash bucket we're freeing 597 * hashsize The number of entries in that hash bucket 598 * 599 * Returns: 600 * Nothing 601 */ 602 static void 603 hash_free(struct uma_hash *hash) 604 { 605 if (hash->uh_slab_hash == NULL) 606 return; 607 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 608 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 609 else 610 free(hash->uh_slab_hash, M_UMAHASH); 611 } 612 613 /* 614 * Frees all outstanding items in a bucket 615 * 616 * Arguments: 617 * zone The zone to free to, must be unlocked. 618 * bucket The free/alloc bucket with items, cpu queue must be locked. 619 * 620 * Returns: 621 * Nothing 622 */ 623 624 static void 625 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 626 { 627 int i; 628 629 if (bucket == NULL) 630 return; 631 632 if (zone->uz_fini) 633 for (i = 0; i < bucket->ub_cnt; i++) 634 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 635 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 636 bucket->ub_cnt = 0; 637 } 638 639 /* 640 * Drains the per cpu caches for a zone. 641 * 642 * NOTE: This may only be called while the zone is being turn down, and not 643 * during normal operation. This is necessary in order that we do not have 644 * to migrate CPUs to drain the per-CPU caches. 645 * 646 * Arguments: 647 * zone The zone to drain, must be unlocked. 648 * 649 * Returns: 650 * Nothing 651 */ 652 static void 653 cache_drain(uma_zone_t zone) 654 { 655 uma_cache_t cache; 656 int cpu; 657 658 /* 659 * XXX: It is safe to not lock the per-CPU caches, because we're 660 * tearing down the zone anyway. I.e., there will be no further use 661 * of the caches at this point. 662 * 663 * XXX: It would good to be able to assert that the zone is being 664 * torn down to prevent improper use of cache_drain(). 665 * 666 * XXX: We lock the zone before passing into bucket_cache_drain() as 667 * it is used elsewhere. Should the tear-down path be made special 668 * there in some form? 669 */ 670 CPU_FOREACH(cpu) { 671 cache = &zone->uz_cpu[cpu]; 672 bucket_drain(zone, cache->uc_allocbucket); 673 bucket_drain(zone, cache->uc_freebucket); 674 if (cache->uc_allocbucket != NULL) 675 bucket_free(zone, cache->uc_allocbucket, NULL); 676 if (cache->uc_freebucket != NULL) 677 bucket_free(zone, cache->uc_freebucket, NULL); 678 cache->uc_allocbucket = cache->uc_freebucket = NULL; 679 } 680 ZONE_LOCK(zone); 681 bucket_cache_drain(zone); 682 ZONE_UNLOCK(zone); 683 } 684 685 /* 686 * Drain the cached buckets from a zone. Expects a locked zone on entry. 687 */ 688 static void 689 bucket_cache_drain(uma_zone_t zone) 690 { 691 uma_bucket_t bucket; 692 693 /* 694 * Drain the bucket queues and free the buckets, we just keep two per 695 * cpu (alloc/free). 696 */ 697 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 698 LIST_REMOVE(bucket, ub_link); 699 ZONE_UNLOCK(zone); 700 bucket_drain(zone, bucket); 701 bucket_free(zone, bucket, NULL); 702 ZONE_LOCK(zone); 703 } 704 } 705 706 static void 707 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 708 { 709 uint8_t *mem; 710 int i; 711 uint8_t flags; 712 713 mem = slab->us_data; 714 flags = slab->us_flags; 715 i = start; 716 if (keg->uk_fini != NULL) { 717 for (i--; i > -1; i--) 718 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 719 keg->uk_size); 720 } 721 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { 722 vm_object_t obj; 723 724 if (flags & UMA_SLAB_KMEM) 725 obj = kmem_object; 726 else if (flags & UMA_SLAB_KERNEL) 727 obj = kernel_object; 728 else 729 obj = NULL; 730 for (i = 0; i < keg->uk_ppera; i++) 731 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), obj); 732 } 733 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 734 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 735 #ifdef UMA_DEBUG 736 printf("%s: Returning %d bytes.\n", keg->uk_name, 737 PAGE_SIZE * keg->uk_ppera); 738 #endif 739 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 740 } 741 742 /* 743 * Frees pages from a keg back to the system. This is done on demand from 744 * the pageout daemon. 745 * 746 * Returns nothing. 747 */ 748 static void 749 keg_drain(uma_keg_t keg) 750 { 751 struct slabhead freeslabs = { 0 }; 752 uma_slab_t slab; 753 uma_slab_t n; 754 755 /* 756 * We don't want to take pages from statically allocated kegs at this 757 * time 758 */ 759 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 760 return; 761 762 #ifdef UMA_DEBUG 763 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 764 #endif 765 KEG_LOCK(keg); 766 if (keg->uk_free == 0) 767 goto finished; 768 769 slab = LIST_FIRST(&keg->uk_free_slab); 770 while (slab) { 771 n = LIST_NEXT(slab, us_link); 772 773 /* We have no where to free these to */ 774 if (slab->us_flags & UMA_SLAB_BOOT) { 775 slab = n; 776 continue; 777 } 778 779 LIST_REMOVE(slab, us_link); 780 keg->uk_pages -= keg->uk_ppera; 781 keg->uk_free -= keg->uk_ipers; 782 783 if (keg->uk_flags & UMA_ZONE_HASH) 784 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 785 786 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 787 788 slab = n; 789 } 790 finished: 791 KEG_UNLOCK(keg); 792 793 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 794 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 795 keg_free_slab(keg, slab, 0); 796 } 797 } 798 799 static void 800 zone_drain_wait(uma_zone_t zone, int waitok) 801 { 802 803 /* 804 * Set draining to interlock with zone_dtor() so we can release our 805 * locks as we go. Only dtor() should do a WAITOK call since it 806 * is the only call that knows the structure will still be available 807 * when it wakes up. 808 */ 809 ZONE_LOCK(zone); 810 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 811 if (waitok == M_NOWAIT) 812 goto out; 813 mtx_unlock(&uma_mtx); 814 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 815 mtx_lock(&uma_mtx); 816 } 817 zone->uz_flags |= UMA_ZFLAG_DRAINING; 818 bucket_cache_drain(zone); 819 ZONE_UNLOCK(zone); 820 /* 821 * The DRAINING flag protects us from being freed while 822 * we're running. Normally the uma_mtx would protect us but we 823 * must be able to release and acquire the right lock for each keg. 824 */ 825 zone_foreach_keg(zone, &keg_drain); 826 ZONE_LOCK(zone); 827 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 828 wakeup(zone); 829 out: 830 ZONE_UNLOCK(zone); 831 } 832 833 void 834 zone_drain(uma_zone_t zone) 835 { 836 837 zone_drain_wait(zone, M_NOWAIT); 838 } 839 840 /* 841 * Allocate a new slab for a keg. This does not insert the slab onto a list. 842 * 843 * Arguments: 844 * wait Shall we wait? 845 * 846 * Returns: 847 * The slab that was allocated or NULL if there is no memory and the 848 * caller specified M_NOWAIT. 849 */ 850 static uma_slab_t 851 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 852 { 853 uma_slabrefcnt_t slabref; 854 uma_alloc allocf; 855 uma_slab_t slab; 856 uint8_t *mem; 857 uint8_t flags; 858 int i; 859 860 mtx_assert(&keg->uk_lock, MA_OWNED); 861 slab = NULL; 862 mem = NULL; 863 864 #ifdef UMA_DEBUG 865 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name); 866 #endif 867 allocf = keg->uk_allocf; 868 KEG_UNLOCK(keg); 869 870 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 871 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 872 if (slab == NULL) 873 goto out; 874 } 875 876 /* 877 * This reproduces the old vm_zone behavior of zero filling pages the 878 * first time they are added to a zone. 879 * 880 * Malloced items are zeroed in uma_zalloc. 881 */ 882 883 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 884 wait |= M_ZERO; 885 else 886 wait &= ~M_ZERO; 887 888 if (keg->uk_flags & UMA_ZONE_NODUMP) 889 wait |= M_NODUMP; 890 891 /* zone is passed for legacy reasons. */ 892 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 893 if (mem == NULL) { 894 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 895 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 896 slab = NULL; 897 goto out; 898 } 899 900 /* Point the slab into the allocated memory */ 901 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 902 slab = (uma_slab_t )(mem + keg->uk_pgoff); 903 904 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 905 for (i = 0; i < keg->uk_ppera; i++) 906 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 907 908 slab->us_keg = keg; 909 slab->us_data = mem; 910 slab->us_freecount = keg->uk_ipers; 911 slab->us_flags = flags; 912 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 913 #ifdef INVARIANTS 914 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 915 #endif 916 if (keg->uk_flags & UMA_ZONE_REFCNT) { 917 slabref = (uma_slabrefcnt_t)slab; 918 for (i = 0; i < keg->uk_ipers; i++) 919 slabref->us_refcnt[i] = 0; 920 } 921 922 if (keg->uk_init != NULL) { 923 for (i = 0; i < keg->uk_ipers; i++) 924 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 925 keg->uk_size, wait) != 0) 926 break; 927 if (i != keg->uk_ipers) { 928 keg_free_slab(keg, slab, i); 929 slab = NULL; 930 goto out; 931 } 932 } 933 out: 934 KEG_LOCK(keg); 935 936 if (slab != NULL) { 937 if (keg->uk_flags & UMA_ZONE_HASH) 938 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 939 940 keg->uk_pages += keg->uk_ppera; 941 keg->uk_free += keg->uk_ipers; 942 } 943 944 return (slab); 945 } 946 947 /* 948 * This function is intended to be used early on in place of page_alloc() so 949 * that we may use the boot time page cache to satisfy allocations before 950 * the VM is ready. 951 */ 952 static void * 953 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 954 { 955 uma_keg_t keg; 956 uma_slab_t tmps; 957 int pages, check_pages; 958 959 keg = zone_first_keg(zone); 960 pages = howmany(bytes, PAGE_SIZE); 961 check_pages = pages - 1; 962 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 963 964 /* 965 * Check our small startup cache to see if it has pages remaining. 966 */ 967 mtx_lock(&uma_boot_pages_mtx); 968 969 /* First check if we have enough room. */ 970 tmps = LIST_FIRST(&uma_boot_pages); 971 while (tmps != NULL && check_pages-- > 0) 972 tmps = LIST_NEXT(tmps, us_link); 973 if (tmps != NULL) { 974 /* 975 * It's ok to lose tmps references. The last one will 976 * have tmps->us_data pointing to the start address of 977 * "pages" contiguous pages of memory. 978 */ 979 while (pages-- > 0) { 980 tmps = LIST_FIRST(&uma_boot_pages); 981 LIST_REMOVE(tmps, us_link); 982 } 983 mtx_unlock(&uma_boot_pages_mtx); 984 *pflag = tmps->us_flags; 985 return (tmps->us_data); 986 } 987 mtx_unlock(&uma_boot_pages_mtx); 988 if (booted < UMA_STARTUP2) 989 panic("UMA: Increase vm.boot_pages"); 990 /* 991 * Now that we've booted reset these users to their real allocator. 992 */ 993 #ifdef UMA_MD_SMALL_ALLOC 994 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 995 #else 996 keg->uk_allocf = page_alloc; 997 #endif 998 return keg->uk_allocf(zone, bytes, pflag, wait); 999 } 1000 1001 /* 1002 * Allocates a number of pages from the system 1003 * 1004 * Arguments: 1005 * bytes The number of bytes requested 1006 * wait Shall we wait? 1007 * 1008 * Returns: 1009 * A pointer to the alloced memory or possibly 1010 * NULL if M_NOWAIT is set. 1011 */ 1012 static void * 1013 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1014 { 1015 void *p; /* Returned page */ 1016 1017 *pflag = UMA_SLAB_KMEM; 1018 p = (void *) kmem_malloc(kmem_map, bytes, wait); 1019 1020 return (p); 1021 } 1022 1023 /* 1024 * Allocates a number of pages from within an object 1025 * 1026 * Arguments: 1027 * bytes The number of bytes requested 1028 * wait Shall we wait? 1029 * 1030 * Returns: 1031 * A pointer to the alloced memory or possibly 1032 * NULL if M_NOWAIT is set. 1033 */ 1034 static void * 1035 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 1036 { 1037 TAILQ_HEAD(, vm_page) alloctail; 1038 u_long npages; 1039 vm_offset_t retkva, zkva; 1040 vm_page_t p, p_next; 1041 uma_keg_t keg; 1042 1043 TAILQ_INIT(&alloctail); 1044 keg = zone_first_keg(zone); 1045 1046 npages = howmany(bytes, PAGE_SIZE); 1047 while (npages > 0) { 1048 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1049 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1050 if (p != NULL) { 1051 /* 1052 * Since the page does not belong to an object, its 1053 * listq is unused. 1054 */ 1055 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1056 npages--; 1057 continue; 1058 } 1059 if (wait & M_WAITOK) { 1060 VM_WAIT; 1061 continue; 1062 } 1063 1064 /* 1065 * Page allocation failed, free intermediate pages and 1066 * exit. 1067 */ 1068 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1069 vm_page_unwire(p, 0); 1070 vm_page_free(p); 1071 } 1072 return (NULL); 1073 } 1074 *flags = UMA_SLAB_PRIV; 1075 zkva = keg->uk_kva + 1076 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1077 retkva = zkva; 1078 TAILQ_FOREACH(p, &alloctail, listq) { 1079 pmap_qenter(zkva, &p, 1); 1080 zkva += PAGE_SIZE; 1081 } 1082 1083 return ((void *)retkva); 1084 } 1085 1086 /* 1087 * Frees a number of pages to the system 1088 * 1089 * Arguments: 1090 * mem A pointer to the memory to be freed 1091 * size The size of the memory being freed 1092 * flags The original p->us_flags field 1093 * 1094 * Returns: 1095 * Nothing 1096 */ 1097 static void 1098 page_free(void *mem, int size, uint8_t flags) 1099 { 1100 vm_map_t map; 1101 1102 if (flags & UMA_SLAB_KMEM) 1103 map = kmem_map; 1104 else if (flags & UMA_SLAB_KERNEL) 1105 map = kernel_map; 1106 else 1107 panic("UMA: page_free used with invalid flags %d", flags); 1108 1109 kmem_free(map, (vm_offset_t)mem, size); 1110 } 1111 1112 /* 1113 * Zero fill initializer 1114 * 1115 * Arguments/Returns follow uma_init specifications 1116 */ 1117 static int 1118 zero_init(void *mem, int size, int flags) 1119 { 1120 bzero(mem, size); 1121 return (0); 1122 } 1123 1124 /* 1125 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1126 * 1127 * Arguments 1128 * keg The zone we should initialize 1129 * 1130 * Returns 1131 * Nothing 1132 */ 1133 static void 1134 keg_small_init(uma_keg_t keg) 1135 { 1136 u_int rsize; 1137 u_int memused; 1138 u_int wastedspace; 1139 u_int shsize; 1140 1141 if (keg->uk_flags & UMA_ZONE_PCPU) { 1142 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 1143 1144 keg->uk_slabsize = sizeof(struct pcpu); 1145 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1146 PAGE_SIZE); 1147 } else { 1148 keg->uk_slabsize = UMA_SLAB_SIZE; 1149 keg->uk_ppera = 1; 1150 } 1151 1152 /* 1153 * Calculate the size of each allocation (rsize) according to 1154 * alignment. If the requested size is smaller than we have 1155 * allocation bits for we round it up. 1156 */ 1157 rsize = keg->uk_size; 1158 if (rsize < keg->uk_slabsize / SLAB_SETSIZE) 1159 rsize = keg->uk_slabsize / SLAB_SETSIZE; 1160 if (rsize & keg->uk_align) 1161 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1162 keg->uk_rsize = rsize; 1163 1164 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1165 keg->uk_rsize < sizeof(struct pcpu), 1166 ("%s: size %u too large", __func__, keg->uk_rsize)); 1167 1168 if (keg->uk_flags & UMA_ZONE_REFCNT) 1169 rsize += sizeof(uint32_t); 1170 1171 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1172 shsize = 0; 1173 else 1174 shsize = sizeof(struct uma_slab); 1175 1176 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize; 1177 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1178 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1179 1180 memused = keg->uk_ipers * rsize + shsize; 1181 wastedspace = keg->uk_slabsize - memused; 1182 1183 /* 1184 * We can't do OFFPAGE if we're internal or if we've been 1185 * asked to not go to the VM for buckets. If we do this we 1186 * may end up going to the VM for slabs which we do not 1187 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1188 * of UMA_ZONE_VM, which clearly forbids it. 1189 */ 1190 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1191 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1192 return; 1193 1194 /* 1195 * See if using an OFFPAGE slab will limit our waste. Only do 1196 * this if it permits more items per-slab. 1197 * 1198 * XXX We could try growing slabsize to limit max waste as well. 1199 * Historically this was not done because the VM could not 1200 * efficiently handle contiguous allocations. 1201 */ 1202 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) && 1203 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) { 1204 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize; 1205 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1206 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1207 #ifdef UMA_DEBUG 1208 printf("UMA decided we need offpage slab headers for " 1209 "keg: %s, calculated wastedspace = %d, " 1210 "maximum wasted space allowed = %d, " 1211 "calculated ipers = %d, " 1212 "new wasted space = %d\n", keg->uk_name, wastedspace, 1213 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1214 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize); 1215 #endif 1216 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1217 } 1218 1219 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1220 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1221 keg->uk_flags |= UMA_ZONE_HASH; 1222 } 1223 1224 /* 1225 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1226 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1227 * more complicated. 1228 * 1229 * Arguments 1230 * keg The keg we should initialize 1231 * 1232 * Returns 1233 * Nothing 1234 */ 1235 static void 1236 keg_large_init(uma_keg_t keg) 1237 { 1238 1239 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1240 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1241 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1242 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1243 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1244 1245 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1246 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE; 1247 keg->uk_ipers = 1; 1248 keg->uk_rsize = keg->uk_size; 1249 1250 /* We can't do OFFPAGE if we're internal, bail out here. */ 1251 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1252 return; 1253 1254 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1255 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1256 keg->uk_flags |= UMA_ZONE_HASH; 1257 } 1258 1259 static void 1260 keg_cachespread_init(uma_keg_t keg) 1261 { 1262 int alignsize; 1263 int trailer; 1264 int pages; 1265 int rsize; 1266 1267 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1268 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1269 1270 alignsize = keg->uk_align + 1; 1271 rsize = keg->uk_size; 1272 /* 1273 * We want one item to start on every align boundary in a page. To 1274 * do this we will span pages. We will also extend the item by the 1275 * size of align if it is an even multiple of align. Otherwise, it 1276 * would fall on the same boundary every time. 1277 */ 1278 if (rsize & keg->uk_align) 1279 rsize = (rsize & ~keg->uk_align) + alignsize; 1280 if ((rsize & alignsize) == 0) 1281 rsize += alignsize; 1282 trailer = rsize - keg->uk_size; 1283 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1284 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1285 keg->uk_rsize = rsize; 1286 keg->uk_ppera = pages; 1287 keg->uk_slabsize = UMA_SLAB_SIZE; 1288 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1289 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1290 KASSERT(keg->uk_ipers <= uma_max_ipers, 1291 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1292 keg->uk_ipers)); 1293 } 1294 1295 /* 1296 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1297 * the keg onto the global keg list. 1298 * 1299 * Arguments/Returns follow uma_ctor specifications 1300 * udata Actually uma_kctor_args 1301 */ 1302 static int 1303 keg_ctor(void *mem, int size, void *udata, int flags) 1304 { 1305 struct uma_kctor_args *arg = udata; 1306 uma_keg_t keg = mem; 1307 uma_zone_t zone; 1308 1309 bzero(keg, size); 1310 keg->uk_size = arg->size; 1311 keg->uk_init = arg->uminit; 1312 keg->uk_fini = arg->fini; 1313 keg->uk_align = arg->align; 1314 keg->uk_free = 0; 1315 keg->uk_reserve = 0; 1316 keg->uk_pages = 0; 1317 keg->uk_flags = arg->flags; 1318 keg->uk_allocf = page_alloc; 1319 keg->uk_freef = page_free; 1320 keg->uk_slabzone = NULL; 1321 1322 /* 1323 * The master zone is passed to us at keg-creation time. 1324 */ 1325 zone = arg->zone; 1326 keg->uk_name = zone->uz_name; 1327 1328 if (arg->flags & UMA_ZONE_VM) 1329 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1330 1331 if (arg->flags & UMA_ZONE_ZINIT) 1332 keg->uk_init = zero_init; 1333 1334 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1335 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1336 1337 if (arg->flags & UMA_ZONE_PCPU) 1338 #ifdef SMP 1339 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1340 #else 1341 keg->uk_flags &= ~UMA_ZONE_PCPU; 1342 #endif 1343 1344 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1345 keg_cachespread_init(keg); 1346 } else if (keg->uk_flags & UMA_ZONE_REFCNT) { 1347 if (keg->uk_size > 1348 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - 1349 sizeof(uint32_t))) 1350 keg_large_init(keg); 1351 else 1352 keg_small_init(keg); 1353 } else { 1354 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1355 keg_large_init(keg); 1356 else 1357 keg_small_init(keg); 1358 } 1359 1360 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1361 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1362 if (keg->uk_ipers > uma_max_ipers_ref) 1363 panic("Too many ref items per zone: %d > %d\n", 1364 keg->uk_ipers, uma_max_ipers_ref); 1365 keg->uk_slabzone = slabrefzone; 1366 } else 1367 keg->uk_slabzone = slabzone; 1368 } 1369 1370 /* 1371 * If we haven't booted yet we need allocations to go through the 1372 * startup cache until the vm is ready. 1373 */ 1374 if (keg->uk_ppera == 1) { 1375 #ifdef UMA_MD_SMALL_ALLOC 1376 keg->uk_allocf = uma_small_alloc; 1377 keg->uk_freef = uma_small_free; 1378 1379 if (booted < UMA_STARTUP) 1380 keg->uk_allocf = startup_alloc; 1381 #else 1382 if (booted < UMA_STARTUP2) 1383 keg->uk_allocf = startup_alloc; 1384 #endif 1385 } else if (booted < UMA_STARTUP2 && 1386 (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1387 keg->uk_allocf = startup_alloc; 1388 1389 /* 1390 * Initialize keg's lock 1391 */ 1392 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1393 1394 /* 1395 * If we're putting the slab header in the actual page we need to 1396 * figure out where in each page it goes. This calculates a right 1397 * justified offset into the memory on an ALIGN_PTR boundary. 1398 */ 1399 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1400 u_int totsize; 1401 1402 /* Size of the slab struct and free list */ 1403 totsize = sizeof(struct uma_slab); 1404 1405 /* Size of the reference counts. */ 1406 if (keg->uk_flags & UMA_ZONE_REFCNT) 1407 totsize += keg->uk_ipers * sizeof(uint32_t); 1408 1409 if (totsize & UMA_ALIGN_PTR) 1410 totsize = (totsize & ~UMA_ALIGN_PTR) + 1411 (UMA_ALIGN_PTR + 1); 1412 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1413 1414 /* 1415 * The only way the following is possible is if with our 1416 * UMA_ALIGN_PTR adjustments we are now bigger than 1417 * UMA_SLAB_SIZE. I haven't checked whether this is 1418 * mathematically possible for all cases, so we make 1419 * sure here anyway. 1420 */ 1421 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1422 if (keg->uk_flags & UMA_ZONE_REFCNT) 1423 totsize += keg->uk_ipers * sizeof(uint32_t); 1424 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1425 printf("zone %s ipers %d rsize %d size %d\n", 1426 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1427 keg->uk_size); 1428 panic("UMA slab won't fit."); 1429 } 1430 } 1431 1432 if (keg->uk_flags & UMA_ZONE_HASH) 1433 hash_alloc(&keg->uk_hash); 1434 1435 #ifdef UMA_DEBUG 1436 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1437 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1438 keg->uk_ipers, keg->uk_ppera, 1439 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1440 #endif 1441 1442 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1443 1444 mtx_lock(&uma_mtx); 1445 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1446 mtx_unlock(&uma_mtx); 1447 return (0); 1448 } 1449 1450 /* 1451 * Zone header ctor. This initializes all fields, locks, etc. 1452 * 1453 * Arguments/Returns follow uma_ctor specifications 1454 * udata Actually uma_zctor_args 1455 */ 1456 static int 1457 zone_ctor(void *mem, int size, void *udata, int flags) 1458 { 1459 struct uma_zctor_args *arg = udata; 1460 uma_zone_t zone = mem; 1461 uma_zone_t z; 1462 uma_keg_t keg; 1463 1464 bzero(zone, size); 1465 zone->uz_name = arg->name; 1466 zone->uz_ctor = arg->ctor; 1467 zone->uz_dtor = arg->dtor; 1468 zone->uz_slab = zone_fetch_slab; 1469 zone->uz_init = NULL; 1470 zone->uz_fini = NULL; 1471 zone->uz_allocs = 0; 1472 zone->uz_frees = 0; 1473 zone->uz_fails = 0; 1474 zone->uz_sleeps = 0; 1475 zone->uz_count = 0; 1476 zone->uz_flags = 0; 1477 zone->uz_warning = NULL; 1478 timevalclear(&zone->uz_ratecheck); 1479 keg = arg->keg; 1480 1481 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1482 1483 /* 1484 * This is a pure cache zone, no kegs. 1485 */ 1486 if (arg->import) { 1487 if (arg->flags & UMA_ZONE_VM) 1488 arg->flags |= UMA_ZFLAG_CACHEONLY; 1489 zone->uz_flags = arg->flags; 1490 zone->uz_size = arg->size; 1491 zone->uz_import = arg->import; 1492 zone->uz_release = arg->release; 1493 zone->uz_arg = arg->arg; 1494 zone->uz_lockptr = &zone->uz_lock; 1495 goto out; 1496 } 1497 1498 /* 1499 * Use the regular zone/keg/slab allocator. 1500 */ 1501 zone->uz_import = (uma_import)zone_import; 1502 zone->uz_release = (uma_release)zone_release; 1503 zone->uz_arg = zone; 1504 1505 if (arg->flags & UMA_ZONE_SECONDARY) { 1506 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1507 zone->uz_init = arg->uminit; 1508 zone->uz_fini = arg->fini; 1509 zone->uz_lockptr = &keg->uk_lock; 1510 zone->uz_flags |= UMA_ZONE_SECONDARY; 1511 mtx_lock(&uma_mtx); 1512 ZONE_LOCK(zone); 1513 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1514 if (LIST_NEXT(z, uz_link) == NULL) { 1515 LIST_INSERT_AFTER(z, zone, uz_link); 1516 break; 1517 } 1518 } 1519 ZONE_UNLOCK(zone); 1520 mtx_unlock(&uma_mtx); 1521 } else if (keg == NULL) { 1522 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1523 arg->align, arg->flags)) == NULL) 1524 return (ENOMEM); 1525 } else { 1526 struct uma_kctor_args karg; 1527 int error; 1528 1529 /* We should only be here from uma_startup() */ 1530 karg.size = arg->size; 1531 karg.uminit = arg->uminit; 1532 karg.fini = arg->fini; 1533 karg.align = arg->align; 1534 karg.flags = arg->flags; 1535 karg.zone = zone; 1536 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1537 flags); 1538 if (error) 1539 return (error); 1540 } 1541 1542 /* 1543 * Link in the first keg. 1544 */ 1545 zone->uz_klink.kl_keg = keg; 1546 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1547 zone->uz_lockptr = &keg->uk_lock; 1548 zone->uz_size = keg->uk_size; 1549 zone->uz_flags |= (keg->uk_flags & 1550 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1551 1552 /* 1553 * Some internal zones don't have room allocated for the per cpu 1554 * caches. If we're internal, bail out here. 1555 */ 1556 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1557 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1558 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1559 return (0); 1560 } 1561 1562 out: 1563 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1564 zone->uz_count = bucket_select(zone->uz_size); 1565 else 1566 zone->uz_count = BUCKET_MAX; 1567 1568 return (0); 1569 } 1570 1571 /* 1572 * Keg header dtor. This frees all data, destroys locks, frees the hash 1573 * table and removes the keg from the global list. 1574 * 1575 * Arguments/Returns follow uma_dtor specifications 1576 * udata unused 1577 */ 1578 static void 1579 keg_dtor(void *arg, int size, void *udata) 1580 { 1581 uma_keg_t keg; 1582 1583 keg = (uma_keg_t)arg; 1584 KEG_LOCK(keg); 1585 if (keg->uk_free != 0) { 1586 printf("Freed UMA keg was not empty (%d items). " 1587 " Lost %d pages of memory.\n", 1588 keg->uk_free, keg->uk_pages); 1589 } 1590 KEG_UNLOCK(keg); 1591 1592 hash_free(&keg->uk_hash); 1593 1594 KEG_LOCK_FINI(keg); 1595 } 1596 1597 /* 1598 * Zone header dtor. 1599 * 1600 * Arguments/Returns follow uma_dtor specifications 1601 * udata unused 1602 */ 1603 static void 1604 zone_dtor(void *arg, int size, void *udata) 1605 { 1606 uma_klink_t klink; 1607 uma_zone_t zone; 1608 uma_keg_t keg; 1609 1610 zone = (uma_zone_t)arg; 1611 keg = zone_first_keg(zone); 1612 1613 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1614 cache_drain(zone); 1615 1616 mtx_lock(&uma_mtx); 1617 LIST_REMOVE(zone, uz_link); 1618 mtx_unlock(&uma_mtx); 1619 /* 1620 * XXX there are some races here where 1621 * the zone can be drained but zone lock 1622 * released and then refilled before we 1623 * remove it... we dont care for now 1624 */ 1625 zone_drain_wait(zone, M_WAITOK); 1626 /* 1627 * Unlink all of our kegs. 1628 */ 1629 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1630 klink->kl_keg = NULL; 1631 LIST_REMOVE(klink, kl_link); 1632 if (klink == &zone->uz_klink) 1633 continue; 1634 free(klink, M_TEMP); 1635 } 1636 /* 1637 * We only destroy kegs from non secondary zones. 1638 */ 1639 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1640 mtx_lock(&uma_mtx); 1641 LIST_REMOVE(keg, uk_link); 1642 mtx_unlock(&uma_mtx); 1643 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1644 } 1645 ZONE_LOCK_FINI(zone); 1646 } 1647 1648 /* 1649 * Traverses every zone in the system and calls a callback 1650 * 1651 * Arguments: 1652 * zfunc A pointer to a function which accepts a zone 1653 * as an argument. 1654 * 1655 * Returns: 1656 * Nothing 1657 */ 1658 static void 1659 zone_foreach(void (*zfunc)(uma_zone_t)) 1660 { 1661 uma_keg_t keg; 1662 uma_zone_t zone; 1663 1664 mtx_lock(&uma_mtx); 1665 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1666 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1667 zfunc(zone); 1668 } 1669 mtx_unlock(&uma_mtx); 1670 } 1671 1672 /* Public functions */ 1673 /* See uma.h */ 1674 void 1675 uma_startup(void *bootmem, int boot_pages) 1676 { 1677 struct uma_zctor_args args; 1678 uma_slab_t slab; 1679 u_int slabsize; 1680 int i; 1681 1682 #ifdef UMA_DEBUG 1683 printf("Creating uma keg headers zone and keg.\n"); 1684 #endif 1685 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF); 1686 1687 /* "manually" create the initial zone */ 1688 memset(&args, 0, sizeof(args)); 1689 args.name = "UMA Kegs"; 1690 args.size = sizeof(struct uma_keg); 1691 args.ctor = keg_ctor; 1692 args.dtor = keg_dtor; 1693 args.uminit = zero_init; 1694 args.fini = NULL; 1695 args.keg = &masterkeg; 1696 args.align = 32 - 1; 1697 args.flags = UMA_ZFLAG_INTERNAL; 1698 /* The initial zone has no Per cpu queues so it's smaller */ 1699 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1700 1701 #ifdef UMA_DEBUG 1702 printf("Filling boot free list.\n"); 1703 #endif 1704 for (i = 0; i < boot_pages; i++) { 1705 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1706 slab->us_data = (uint8_t *)slab; 1707 slab->us_flags = UMA_SLAB_BOOT; 1708 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1709 } 1710 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1711 1712 #ifdef UMA_DEBUG 1713 printf("Creating uma zone headers zone and keg.\n"); 1714 #endif 1715 args.name = "UMA Zones"; 1716 args.size = sizeof(struct uma_zone) + 1717 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1718 args.ctor = zone_ctor; 1719 args.dtor = zone_dtor; 1720 args.uminit = zero_init; 1721 args.fini = NULL; 1722 args.keg = NULL; 1723 args.align = 32 - 1; 1724 args.flags = UMA_ZFLAG_INTERNAL; 1725 /* The initial zone has no Per cpu queues so it's smaller */ 1726 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1727 1728 #ifdef UMA_DEBUG 1729 printf("Initializing pcpu cache locks.\n"); 1730 #endif 1731 #ifdef UMA_DEBUG 1732 printf("Creating slab and hash zones.\n"); 1733 #endif 1734 1735 /* Now make a zone for slab headers */ 1736 slabzone = uma_zcreate("UMA Slabs", 1737 sizeof(struct uma_slab), 1738 NULL, NULL, NULL, NULL, 1739 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1740 1741 /* 1742 * We also create a zone for the bigger slabs with reference 1743 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1744 */ 1745 slabsize = sizeof(struct uma_slab_refcnt); 1746 slabsize += uma_max_ipers_ref * sizeof(uint32_t); 1747 slabrefzone = uma_zcreate("UMA RCntSlabs", 1748 slabsize, 1749 NULL, NULL, NULL, NULL, 1750 UMA_ALIGN_PTR, 1751 UMA_ZFLAG_INTERNAL); 1752 1753 hashzone = uma_zcreate("UMA Hash", 1754 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1755 NULL, NULL, NULL, NULL, 1756 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1757 1758 bucket_init(); 1759 1760 booted = UMA_STARTUP; 1761 1762 #ifdef UMA_DEBUG 1763 printf("UMA startup complete.\n"); 1764 #endif 1765 } 1766 1767 /* see uma.h */ 1768 void 1769 uma_startup2(void) 1770 { 1771 booted = UMA_STARTUP2; 1772 bucket_enable(); 1773 #ifdef UMA_DEBUG 1774 printf("UMA startup2 complete.\n"); 1775 #endif 1776 } 1777 1778 /* 1779 * Initialize our callout handle 1780 * 1781 */ 1782 1783 static void 1784 uma_startup3(void) 1785 { 1786 #ifdef UMA_DEBUG 1787 printf("Starting callout.\n"); 1788 #endif 1789 callout_init(&uma_callout, CALLOUT_MPSAFE); 1790 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1791 #ifdef UMA_DEBUG 1792 printf("UMA startup3 complete.\n"); 1793 #endif 1794 } 1795 1796 static uma_keg_t 1797 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1798 int align, uint32_t flags) 1799 { 1800 struct uma_kctor_args args; 1801 1802 args.size = size; 1803 args.uminit = uminit; 1804 args.fini = fini; 1805 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1806 args.flags = flags; 1807 args.zone = zone; 1808 return (zone_alloc_item(kegs, &args, M_WAITOK)); 1809 } 1810 1811 /* See uma.h */ 1812 void 1813 uma_set_align(int align) 1814 { 1815 1816 if (align != UMA_ALIGN_CACHE) 1817 uma_align_cache = align; 1818 } 1819 1820 /* See uma.h */ 1821 uma_zone_t 1822 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1823 uma_init uminit, uma_fini fini, int align, uint32_t flags) 1824 1825 { 1826 struct uma_zctor_args args; 1827 1828 /* This stuff is essential for the zone ctor */ 1829 memset(&args, 0, sizeof(args)); 1830 args.name = name; 1831 args.size = size; 1832 args.ctor = ctor; 1833 args.dtor = dtor; 1834 args.uminit = uminit; 1835 args.fini = fini; 1836 args.align = align; 1837 args.flags = flags; 1838 args.keg = NULL; 1839 1840 return (zone_alloc_item(zones, &args, M_WAITOK)); 1841 } 1842 1843 /* See uma.h */ 1844 uma_zone_t 1845 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1846 uma_init zinit, uma_fini zfini, uma_zone_t master) 1847 { 1848 struct uma_zctor_args args; 1849 uma_keg_t keg; 1850 1851 keg = zone_first_keg(master); 1852 memset(&args, 0, sizeof(args)); 1853 args.name = name; 1854 args.size = keg->uk_size; 1855 args.ctor = ctor; 1856 args.dtor = dtor; 1857 args.uminit = zinit; 1858 args.fini = zfini; 1859 args.align = keg->uk_align; 1860 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1861 args.keg = keg; 1862 1863 /* XXX Attaches only one keg of potentially many. */ 1864 return (zone_alloc_item(zones, &args, M_WAITOK)); 1865 } 1866 1867 /* See uma.h */ 1868 uma_zone_t 1869 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1870 uma_init zinit, uma_fini zfini, uma_import zimport, 1871 uma_release zrelease, void *arg, int flags) 1872 { 1873 struct uma_zctor_args args; 1874 1875 memset(&args, 0, sizeof(args)); 1876 args.name = name; 1877 args.size = size; 1878 args.ctor = ctor; 1879 args.dtor = dtor; 1880 args.uminit = zinit; 1881 args.fini = zfini; 1882 args.import = zimport; 1883 args.release = zrelease; 1884 args.arg = arg; 1885 args.align = 0; 1886 args.flags = flags; 1887 1888 return (zone_alloc_item(zones, &args, M_WAITOK)); 1889 } 1890 1891 static void 1892 zone_lock_pair(uma_zone_t a, uma_zone_t b) 1893 { 1894 if (a < b) { 1895 ZONE_LOCK(a); 1896 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 1897 } else { 1898 ZONE_LOCK(b); 1899 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 1900 } 1901 } 1902 1903 static void 1904 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1905 { 1906 1907 ZONE_UNLOCK(a); 1908 ZONE_UNLOCK(b); 1909 } 1910 1911 int 1912 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1913 { 1914 uma_klink_t klink; 1915 uma_klink_t kl; 1916 int error; 1917 1918 error = 0; 1919 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1920 1921 zone_lock_pair(zone, master); 1922 /* 1923 * zone must use vtoslab() to resolve objects and must already be 1924 * a secondary. 1925 */ 1926 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1927 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1928 error = EINVAL; 1929 goto out; 1930 } 1931 /* 1932 * The new master must also use vtoslab(). 1933 */ 1934 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 1935 error = EINVAL; 1936 goto out; 1937 } 1938 /* 1939 * Both must either be refcnt, or not be refcnt. 1940 */ 1941 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 1942 (master->uz_flags & UMA_ZONE_REFCNT)) { 1943 error = EINVAL; 1944 goto out; 1945 } 1946 /* 1947 * The underlying object must be the same size. rsize 1948 * may be different. 1949 */ 1950 if (master->uz_size != zone->uz_size) { 1951 error = E2BIG; 1952 goto out; 1953 } 1954 /* 1955 * Put it at the end of the list. 1956 */ 1957 klink->kl_keg = zone_first_keg(master); 1958 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 1959 if (LIST_NEXT(kl, kl_link) == NULL) { 1960 LIST_INSERT_AFTER(kl, klink, kl_link); 1961 break; 1962 } 1963 } 1964 klink = NULL; 1965 zone->uz_flags |= UMA_ZFLAG_MULTI; 1966 zone->uz_slab = zone_fetch_slab_multi; 1967 1968 out: 1969 zone_unlock_pair(zone, master); 1970 if (klink != NULL) 1971 free(klink, M_TEMP); 1972 1973 return (error); 1974 } 1975 1976 1977 /* See uma.h */ 1978 void 1979 uma_zdestroy(uma_zone_t zone) 1980 { 1981 1982 zone_free_item(zones, zone, NULL, SKIP_NONE); 1983 } 1984 1985 /* See uma.h */ 1986 void * 1987 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1988 { 1989 void *item; 1990 uma_cache_t cache; 1991 uma_bucket_t bucket; 1992 int lockfail; 1993 int cpu; 1994 1995 /* This is the fast path allocation */ 1996 #ifdef UMA_DEBUG_ALLOC_1 1997 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1998 #endif 1999 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 2000 zone->uz_name, flags); 2001 2002 if (flags & M_WAITOK) { 2003 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2004 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2005 } 2006 #ifdef DEBUG_MEMGUARD 2007 if (memguard_cmp_zone(zone)) { 2008 item = memguard_alloc(zone->uz_size, flags); 2009 if (item != NULL) { 2010 /* 2011 * Avoid conflict with the use-after-free 2012 * protecting infrastructure from INVARIANTS. 2013 */ 2014 if (zone->uz_init != NULL && 2015 zone->uz_init != mtrash_init && 2016 zone->uz_init(item, zone->uz_size, flags) != 0) 2017 return (NULL); 2018 if (zone->uz_ctor != NULL && 2019 zone->uz_ctor != mtrash_ctor && 2020 zone->uz_ctor(item, zone->uz_size, udata, 2021 flags) != 0) { 2022 zone->uz_fini(item, zone->uz_size); 2023 return (NULL); 2024 } 2025 return (item); 2026 } 2027 /* This is unfortunate but should not be fatal. */ 2028 } 2029 #endif 2030 /* 2031 * If possible, allocate from the per-CPU cache. There are two 2032 * requirements for safe access to the per-CPU cache: (1) the thread 2033 * accessing the cache must not be preempted or yield during access, 2034 * and (2) the thread must not migrate CPUs without switching which 2035 * cache it accesses. We rely on a critical section to prevent 2036 * preemption and migration. We release the critical section in 2037 * order to acquire the zone mutex if we are unable to allocate from 2038 * the current cache; when we re-acquire the critical section, we 2039 * must detect and handle migration if it has occurred. 2040 */ 2041 critical_enter(); 2042 cpu = curcpu; 2043 cache = &zone->uz_cpu[cpu]; 2044 2045 zalloc_start: 2046 bucket = cache->uc_allocbucket; 2047 if (bucket != NULL && bucket->ub_cnt > 0) { 2048 bucket->ub_cnt--; 2049 item = bucket->ub_bucket[bucket->ub_cnt]; 2050 #ifdef INVARIANTS 2051 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2052 #endif 2053 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2054 cache->uc_allocs++; 2055 critical_exit(); 2056 if (zone->uz_ctor != NULL && 2057 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2058 atomic_add_long(&zone->uz_fails, 1); 2059 zone_free_item(zone, item, udata, SKIP_DTOR); 2060 return (NULL); 2061 } 2062 #ifdef INVARIANTS 2063 uma_dbg_alloc(zone, NULL, item); 2064 #endif 2065 if (flags & M_ZERO) 2066 bzero(item, zone->uz_size); 2067 return (item); 2068 } 2069 2070 /* 2071 * We have run out of items in our alloc bucket. 2072 * See if we can switch with our free bucket. 2073 */ 2074 bucket = cache->uc_freebucket; 2075 if (bucket != NULL && bucket->ub_cnt > 0) { 2076 #ifdef UMA_DEBUG_ALLOC 2077 printf("uma_zalloc: Swapping empty with alloc.\n"); 2078 #endif 2079 cache->uc_freebucket = cache->uc_allocbucket; 2080 cache->uc_allocbucket = bucket; 2081 goto zalloc_start; 2082 } 2083 2084 /* 2085 * Discard any empty allocation bucket while we hold no locks. 2086 */ 2087 bucket = cache->uc_allocbucket; 2088 cache->uc_allocbucket = NULL; 2089 critical_exit(); 2090 if (bucket != NULL) 2091 bucket_free(zone, bucket, udata); 2092 2093 /* Short-circuit for zones without buckets and low memory. */ 2094 if (zone->uz_count == 0 || bucketdisable) 2095 goto zalloc_item; 2096 2097 /* 2098 * Attempt to retrieve the item from the per-CPU cache has failed, so 2099 * we must go back to the zone. This requires the zone lock, so we 2100 * must drop the critical section, then re-acquire it when we go back 2101 * to the cache. Since the critical section is released, we may be 2102 * preempted or migrate. As such, make sure not to maintain any 2103 * thread-local state specific to the cache from prior to releasing 2104 * the critical section. 2105 */ 2106 lockfail = 0; 2107 if (ZONE_TRYLOCK(zone) == 0) { 2108 /* Record contention to size the buckets. */ 2109 ZONE_LOCK(zone); 2110 lockfail = 1; 2111 } 2112 critical_enter(); 2113 cpu = curcpu; 2114 cache = &zone->uz_cpu[cpu]; 2115 2116 /* 2117 * Since we have locked the zone we may as well send back our stats. 2118 */ 2119 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2120 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2121 cache->uc_allocs = 0; 2122 cache->uc_frees = 0; 2123 2124 /* See if we lost the race to fill the cache. */ 2125 if (cache->uc_allocbucket != NULL) { 2126 ZONE_UNLOCK(zone); 2127 goto zalloc_start; 2128 } 2129 2130 /* 2131 * Check the zone's cache of buckets. 2132 */ 2133 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2134 KASSERT(bucket->ub_cnt != 0, 2135 ("uma_zalloc_arg: Returning an empty bucket.")); 2136 2137 LIST_REMOVE(bucket, ub_link); 2138 cache->uc_allocbucket = bucket; 2139 ZONE_UNLOCK(zone); 2140 goto zalloc_start; 2141 } 2142 /* We are no longer associated with this CPU. */ 2143 critical_exit(); 2144 2145 /* 2146 * We bump the uz count when the cache size is insufficient to 2147 * handle the working set. 2148 */ 2149 if (lockfail && zone->uz_count < BUCKET_MAX) 2150 zone->uz_count++; 2151 ZONE_UNLOCK(zone); 2152 2153 /* 2154 * Now lets just fill a bucket and put it on the free list. If that 2155 * works we'll restart the allocation from the begining and it 2156 * will use the just filled bucket. 2157 */ 2158 bucket = zone_alloc_bucket(zone, udata, flags); 2159 if (bucket != NULL) { 2160 ZONE_LOCK(zone); 2161 critical_enter(); 2162 cpu = curcpu; 2163 cache = &zone->uz_cpu[cpu]; 2164 /* 2165 * See if we lost the race or were migrated. Cache the 2166 * initialized bucket to make this less likely or claim 2167 * the memory directly. 2168 */ 2169 if (cache->uc_allocbucket == NULL) 2170 cache->uc_allocbucket = bucket; 2171 else 2172 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2173 ZONE_UNLOCK(zone); 2174 goto zalloc_start; 2175 } 2176 2177 /* 2178 * We may not be able to get a bucket so return an actual item. 2179 */ 2180 #ifdef UMA_DEBUG 2181 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2182 #endif 2183 2184 zalloc_item: 2185 item = zone_alloc_item(zone, udata, flags); 2186 2187 return (item); 2188 } 2189 2190 static uma_slab_t 2191 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2192 { 2193 uma_slab_t slab; 2194 int reserve; 2195 2196 mtx_assert(&keg->uk_lock, MA_OWNED); 2197 slab = NULL; 2198 reserve = 0; 2199 if ((flags & M_USE_RESERVE) == 0) 2200 reserve = keg->uk_reserve; 2201 2202 for (;;) { 2203 /* 2204 * Find a slab with some space. Prefer slabs that are partially 2205 * used over those that are totally full. This helps to reduce 2206 * fragmentation. 2207 */ 2208 if (keg->uk_free > reserve) { 2209 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2210 slab = LIST_FIRST(&keg->uk_part_slab); 2211 } else { 2212 slab = LIST_FIRST(&keg->uk_free_slab); 2213 LIST_REMOVE(slab, us_link); 2214 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2215 us_link); 2216 } 2217 MPASS(slab->us_keg == keg); 2218 return (slab); 2219 } 2220 2221 /* 2222 * M_NOVM means don't ask at all! 2223 */ 2224 if (flags & M_NOVM) 2225 break; 2226 2227 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2228 keg->uk_flags |= UMA_ZFLAG_FULL; 2229 /* 2230 * If this is not a multi-zone, set the FULL bit. 2231 * Otherwise slab_multi() takes care of it. 2232 */ 2233 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2234 zone->uz_flags |= UMA_ZFLAG_FULL; 2235 zone_log_warning(zone); 2236 } 2237 if (flags & M_NOWAIT) 2238 break; 2239 zone->uz_sleeps++; 2240 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2241 continue; 2242 } 2243 slab = keg_alloc_slab(keg, zone, flags); 2244 /* 2245 * If we got a slab here it's safe to mark it partially used 2246 * and return. We assume that the caller is going to remove 2247 * at least one item. 2248 */ 2249 if (slab) { 2250 MPASS(slab->us_keg == keg); 2251 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2252 return (slab); 2253 } 2254 /* 2255 * We might not have been able to get a slab but another cpu 2256 * could have while we were unlocked. Check again before we 2257 * fail. 2258 */ 2259 flags |= M_NOVM; 2260 } 2261 return (slab); 2262 } 2263 2264 static uma_slab_t 2265 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2266 { 2267 uma_slab_t slab; 2268 2269 if (keg == NULL) { 2270 keg = zone_first_keg(zone); 2271 KEG_LOCK(keg); 2272 } 2273 2274 for (;;) { 2275 slab = keg_fetch_slab(keg, zone, flags); 2276 if (slab) 2277 return (slab); 2278 if (flags & (M_NOWAIT | M_NOVM)) 2279 break; 2280 } 2281 KEG_UNLOCK(keg); 2282 return (NULL); 2283 } 2284 2285 /* 2286 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2287 * with the keg locked. On NULL no lock is held. 2288 * 2289 * The last pointer is used to seed the search. It is not required. 2290 */ 2291 static uma_slab_t 2292 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2293 { 2294 uma_klink_t klink; 2295 uma_slab_t slab; 2296 uma_keg_t keg; 2297 int flags; 2298 int empty; 2299 int full; 2300 2301 /* 2302 * Don't wait on the first pass. This will skip limit tests 2303 * as well. We don't want to block if we can find a provider 2304 * without blocking. 2305 */ 2306 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2307 /* 2308 * Use the last slab allocated as a hint for where to start 2309 * the search. 2310 */ 2311 if (last != NULL) { 2312 slab = keg_fetch_slab(last, zone, flags); 2313 if (slab) 2314 return (slab); 2315 KEG_UNLOCK(last); 2316 } 2317 /* 2318 * Loop until we have a slab incase of transient failures 2319 * while M_WAITOK is specified. I'm not sure this is 100% 2320 * required but we've done it for so long now. 2321 */ 2322 for (;;) { 2323 empty = 0; 2324 full = 0; 2325 /* 2326 * Search the available kegs for slabs. Be careful to hold the 2327 * correct lock while calling into the keg layer. 2328 */ 2329 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2330 keg = klink->kl_keg; 2331 KEG_LOCK(keg); 2332 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2333 slab = keg_fetch_slab(keg, zone, flags); 2334 if (slab) 2335 return (slab); 2336 } 2337 if (keg->uk_flags & UMA_ZFLAG_FULL) 2338 full++; 2339 else 2340 empty++; 2341 KEG_UNLOCK(keg); 2342 } 2343 if (rflags & (M_NOWAIT | M_NOVM)) 2344 break; 2345 flags = rflags; 2346 /* 2347 * All kegs are full. XXX We can't atomically check all kegs 2348 * and sleep so just sleep for a short period and retry. 2349 */ 2350 if (full && !empty) { 2351 ZONE_LOCK(zone); 2352 zone->uz_flags |= UMA_ZFLAG_FULL; 2353 zone->uz_sleeps++; 2354 zone_log_warning(zone); 2355 msleep(zone, zone->uz_lockptr, PVM, 2356 "zonelimit", hz/100); 2357 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2358 ZONE_UNLOCK(zone); 2359 continue; 2360 } 2361 } 2362 return (NULL); 2363 } 2364 2365 static void * 2366 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2367 { 2368 void *item; 2369 uint8_t freei; 2370 2371 MPASS(keg == slab->us_keg); 2372 mtx_assert(&keg->uk_lock, MA_OWNED); 2373 2374 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2375 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2376 item = slab->us_data + (keg->uk_rsize * freei); 2377 slab->us_freecount--; 2378 keg->uk_free--; 2379 2380 /* Move this slab to the full list */ 2381 if (slab->us_freecount == 0) { 2382 LIST_REMOVE(slab, us_link); 2383 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2384 } 2385 2386 return (item); 2387 } 2388 2389 static int 2390 zone_import(uma_zone_t zone, void **bucket, int max, int flags) 2391 { 2392 uma_slab_t slab; 2393 uma_keg_t keg; 2394 int i; 2395 2396 slab = NULL; 2397 keg = NULL; 2398 /* Try to keep the buckets totally full */ 2399 for (i = 0; i < max; ) { 2400 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 2401 break; 2402 keg = slab->us_keg; 2403 while (slab->us_freecount && i < max) { 2404 bucket[i++] = slab_alloc_item(keg, slab); 2405 if (keg->uk_free <= keg->uk_reserve) 2406 break; 2407 } 2408 /* Don't grab more than one slab at a time. */ 2409 flags &= ~M_WAITOK; 2410 flags |= M_NOWAIT; 2411 } 2412 if (slab != NULL) 2413 KEG_UNLOCK(keg); 2414 2415 return i; 2416 } 2417 2418 static uma_bucket_t 2419 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2420 { 2421 uma_bucket_t bucket; 2422 int max; 2423 2424 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2425 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2426 if (bucket == NULL) 2427 goto out; 2428 2429 max = MIN(bucket->ub_entries, zone->uz_count); 2430 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2431 max, flags); 2432 2433 /* 2434 * Initialize the memory if necessary. 2435 */ 2436 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2437 int i; 2438 2439 for (i = 0; i < bucket->ub_cnt; i++) 2440 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2441 flags) != 0) 2442 break; 2443 /* 2444 * If we couldn't initialize the whole bucket, put the 2445 * rest back onto the freelist. 2446 */ 2447 if (i != bucket->ub_cnt) { 2448 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2449 bucket->ub_cnt - i); 2450 #ifdef INVARIANTS 2451 bzero(&bucket->ub_bucket[i], 2452 sizeof(void *) * (bucket->ub_cnt - i)); 2453 #endif 2454 bucket->ub_cnt = i; 2455 } 2456 } 2457 2458 out: 2459 if (bucket == NULL || bucket->ub_cnt == 0) { 2460 if (bucket != NULL) 2461 bucket_free(zone, bucket, udata); 2462 atomic_add_long(&zone->uz_fails, 1); 2463 return (NULL); 2464 } 2465 2466 return (bucket); 2467 } 2468 2469 /* 2470 * Allocates a single item from a zone. 2471 * 2472 * Arguments 2473 * zone The zone to alloc for. 2474 * udata The data to be passed to the constructor. 2475 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2476 * 2477 * Returns 2478 * NULL if there is no memory and M_NOWAIT is set 2479 * An item if successful 2480 */ 2481 2482 static void * 2483 zone_alloc_item(uma_zone_t zone, void *udata, int flags) 2484 { 2485 void *item; 2486 2487 item = NULL; 2488 2489 #ifdef UMA_DEBUG_ALLOC 2490 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2491 #endif 2492 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 2493 goto fail; 2494 atomic_add_long(&zone->uz_allocs, 1); 2495 2496 /* 2497 * We have to call both the zone's init (not the keg's init) 2498 * and the zone's ctor. This is because the item is going from 2499 * a keg slab directly to the user, and the user is expecting it 2500 * to be both zone-init'd as well as zone-ctor'd. 2501 */ 2502 if (zone->uz_init != NULL) { 2503 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2504 zone_free_item(zone, item, udata, SKIP_FINI); 2505 goto fail; 2506 } 2507 } 2508 if (zone->uz_ctor != NULL) { 2509 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2510 zone_free_item(zone, item, udata, SKIP_DTOR); 2511 goto fail; 2512 } 2513 } 2514 #ifdef INVARIANTS 2515 uma_dbg_alloc(zone, NULL, item); 2516 #endif 2517 if (flags & M_ZERO) 2518 bzero(item, zone->uz_size); 2519 2520 return (item); 2521 2522 fail: 2523 atomic_add_long(&zone->uz_fails, 1); 2524 return (NULL); 2525 } 2526 2527 /* See uma.h */ 2528 void 2529 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2530 { 2531 uma_cache_t cache; 2532 uma_bucket_t bucket; 2533 int cpu; 2534 2535 #ifdef UMA_DEBUG_ALLOC_1 2536 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2537 #endif 2538 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2539 zone->uz_name); 2540 2541 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2542 if (item == NULL) 2543 return; 2544 #ifdef DEBUG_MEMGUARD 2545 if (is_memguard_addr(item)) { 2546 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 2547 zone->uz_dtor(item, zone->uz_size, udata); 2548 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 2549 zone->uz_fini(item, zone->uz_size); 2550 memguard_free(item); 2551 return; 2552 } 2553 #endif 2554 #ifdef INVARIANTS 2555 if (zone->uz_flags & UMA_ZONE_MALLOC) 2556 uma_dbg_free(zone, udata, item); 2557 else 2558 uma_dbg_free(zone, NULL, item); 2559 #endif 2560 if (zone->uz_dtor != NULL) 2561 zone->uz_dtor(item, zone->uz_size, udata); 2562 2563 /* 2564 * The race here is acceptable. If we miss it we'll just have to wait 2565 * a little longer for the limits to be reset. 2566 */ 2567 if (zone->uz_flags & UMA_ZFLAG_FULL) 2568 goto zfree_item; 2569 2570 /* 2571 * If possible, free to the per-CPU cache. There are two 2572 * requirements for safe access to the per-CPU cache: (1) the thread 2573 * accessing the cache must not be preempted or yield during access, 2574 * and (2) the thread must not migrate CPUs without switching which 2575 * cache it accesses. We rely on a critical section to prevent 2576 * preemption and migration. We release the critical section in 2577 * order to acquire the zone mutex if we are unable to free to the 2578 * current cache; when we re-acquire the critical section, we must 2579 * detect and handle migration if it has occurred. 2580 */ 2581 zfree_restart: 2582 critical_enter(); 2583 cpu = curcpu; 2584 cache = &zone->uz_cpu[cpu]; 2585 2586 zfree_start: 2587 /* 2588 * Try to free into the allocbucket first to give LIFO ordering 2589 * for cache-hot datastructures. Spill over into the freebucket 2590 * if necessary. Alloc will swap them if one runs dry. 2591 */ 2592 bucket = cache->uc_allocbucket; 2593 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2594 bucket = cache->uc_freebucket; 2595 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2596 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2597 ("uma_zfree: Freeing to non free bucket index.")); 2598 bucket->ub_bucket[bucket->ub_cnt] = item; 2599 bucket->ub_cnt++; 2600 cache->uc_frees++; 2601 critical_exit(); 2602 return; 2603 } 2604 2605 /* 2606 * We must go back the zone, which requires acquiring the zone lock, 2607 * which in turn means we must release and re-acquire the critical 2608 * section. Since the critical section is released, we may be 2609 * preempted or migrate. As such, make sure not to maintain any 2610 * thread-local state specific to the cache from prior to releasing 2611 * the critical section. 2612 */ 2613 critical_exit(); 2614 if (zone->uz_count == 0 || bucketdisable) 2615 goto zfree_item; 2616 2617 ZONE_LOCK(zone); 2618 critical_enter(); 2619 cpu = curcpu; 2620 cache = &zone->uz_cpu[cpu]; 2621 2622 /* 2623 * Since we have locked the zone we may as well send back our stats. 2624 */ 2625 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2626 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2627 cache->uc_allocs = 0; 2628 cache->uc_frees = 0; 2629 2630 bucket = cache->uc_freebucket; 2631 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2632 ZONE_UNLOCK(zone); 2633 goto zfree_start; 2634 } 2635 cache->uc_freebucket = NULL; 2636 2637 /* Can we throw this on the zone full list? */ 2638 if (bucket != NULL) { 2639 #ifdef UMA_DEBUG_ALLOC 2640 printf("uma_zfree: Putting old bucket on the free list.\n"); 2641 #endif 2642 /* ub_cnt is pointing to the last free item */ 2643 KASSERT(bucket->ub_cnt != 0, 2644 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2645 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2646 } 2647 2648 /* We are no longer associated with this CPU. */ 2649 critical_exit(); 2650 2651 /* And the zone.. */ 2652 ZONE_UNLOCK(zone); 2653 2654 #ifdef UMA_DEBUG_ALLOC 2655 printf("uma_zfree: Allocating new free bucket.\n"); 2656 #endif 2657 bucket = bucket_alloc(zone, udata, M_NOWAIT); 2658 if (bucket) { 2659 critical_enter(); 2660 cpu = curcpu; 2661 cache = &zone->uz_cpu[cpu]; 2662 if (cache->uc_freebucket == NULL) { 2663 cache->uc_freebucket = bucket; 2664 goto zfree_start; 2665 } 2666 /* 2667 * We lost the race, start over. We have to drop our 2668 * critical section to free the bucket. 2669 */ 2670 critical_exit(); 2671 bucket_free(zone, bucket, udata); 2672 goto zfree_restart; 2673 } 2674 2675 /* 2676 * If nothing else caught this, we'll just do an internal free. 2677 */ 2678 zfree_item: 2679 zone_free_item(zone, item, udata, SKIP_DTOR); 2680 2681 return; 2682 } 2683 2684 static void 2685 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 2686 { 2687 uint8_t freei; 2688 2689 mtx_assert(&keg->uk_lock, MA_OWNED); 2690 MPASS(keg == slab->us_keg); 2691 2692 /* Do we need to remove from any lists? */ 2693 if (slab->us_freecount+1 == keg->uk_ipers) { 2694 LIST_REMOVE(slab, us_link); 2695 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2696 } else if (slab->us_freecount == 0) { 2697 LIST_REMOVE(slab, us_link); 2698 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2699 } 2700 2701 /* Slab management. */ 2702 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2703 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 2704 slab->us_freecount++; 2705 2706 /* Keg statistics. */ 2707 keg->uk_free++; 2708 } 2709 2710 static void 2711 zone_release(uma_zone_t zone, void **bucket, int cnt) 2712 { 2713 void *item; 2714 uma_slab_t slab; 2715 uma_keg_t keg; 2716 uint8_t *mem; 2717 int clearfull; 2718 int i; 2719 2720 clearfull = 0; 2721 keg = zone_first_keg(zone); 2722 KEG_LOCK(keg); 2723 for (i = 0; i < cnt; i++) { 2724 item = bucket[i]; 2725 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2726 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 2727 if (zone->uz_flags & UMA_ZONE_HASH) { 2728 slab = hash_sfind(&keg->uk_hash, mem); 2729 } else { 2730 mem += keg->uk_pgoff; 2731 slab = (uma_slab_t)mem; 2732 } 2733 } else { 2734 slab = vtoslab((vm_offset_t)item); 2735 if (slab->us_keg != keg) { 2736 KEG_UNLOCK(keg); 2737 keg = slab->us_keg; 2738 KEG_LOCK(keg); 2739 } 2740 } 2741 slab_free_item(keg, slab, item); 2742 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2743 if (keg->uk_pages < keg->uk_maxpages) { 2744 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2745 clearfull = 1; 2746 } 2747 2748 /* 2749 * We can handle one more allocation. Since we're 2750 * clearing ZFLAG_FULL, wake up all procs blocked 2751 * on pages. This should be uncommon, so keeping this 2752 * simple for now (rather than adding count of blocked 2753 * threads etc). 2754 */ 2755 wakeup(keg); 2756 } 2757 } 2758 KEG_UNLOCK(keg); 2759 if (clearfull) { 2760 ZONE_LOCK(zone); 2761 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2762 wakeup(zone); 2763 ZONE_UNLOCK(zone); 2764 } 2765 2766 } 2767 2768 /* 2769 * Frees a single item to any zone. 2770 * 2771 * Arguments: 2772 * zone The zone to free to 2773 * item The item we're freeing 2774 * udata User supplied data for the dtor 2775 * skip Skip dtors and finis 2776 */ 2777 static void 2778 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 2779 { 2780 2781 #ifdef INVARIANTS 2782 if (skip == SKIP_NONE) { 2783 if (zone->uz_flags & UMA_ZONE_MALLOC) 2784 uma_dbg_free(zone, udata, item); 2785 else 2786 uma_dbg_free(zone, NULL, item); 2787 } 2788 #endif 2789 if (skip < SKIP_DTOR && zone->uz_dtor) 2790 zone->uz_dtor(item, zone->uz_size, udata); 2791 2792 if (skip < SKIP_FINI && zone->uz_fini) 2793 zone->uz_fini(item, zone->uz_size); 2794 2795 atomic_add_long(&zone->uz_frees, 1); 2796 zone->uz_release(zone->uz_arg, &item, 1); 2797 } 2798 2799 /* See uma.h */ 2800 int 2801 uma_zone_set_max(uma_zone_t zone, int nitems) 2802 { 2803 uma_keg_t keg; 2804 2805 keg = zone_first_keg(zone); 2806 if (keg == NULL) 2807 return (0); 2808 KEG_LOCK(keg); 2809 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2810 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2811 keg->uk_maxpages += keg->uk_ppera; 2812 nitems = keg->uk_maxpages * keg->uk_ipers; 2813 KEG_UNLOCK(keg); 2814 2815 return (nitems); 2816 } 2817 2818 /* See uma.h */ 2819 int 2820 uma_zone_get_max(uma_zone_t zone) 2821 { 2822 int nitems; 2823 uma_keg_t keg; 2824 2825 keg = zone_first_keg(zone); 2826 if (keg == NULL) 2827 return (0); 2828 KEG_LOCK(keg); 2829 nitems = keg->uk_maxpages * keg->uk_ipers; 2830 KEG_UNLOCK(keg); 2831 2832 return (nitems); 2833 } 2834 2835 /* See uma.h */ 2836 void 2837 uma_zone_set_warning(uma_zone_t zone, const char *warning) 2838 { 2839 2840 ZONE_LOCK(zone); 2841 zone->uz_warning = warning; 2842 ZONE_UNLOCK(zone); 2843 } 2844 2845 /* See uma.h */ 2846 int 2847 uma_zone_get_cur(uma_zone_t zone) 2848 { 2849 int64_t nitems; 2850 u_int i; 2851 2852 ZONE_LOCK(zone); 2853 nitems = zone->uz_allocs - zone->uz_frees; 2854 CPU_FOREACH(i) { 2855 /* 2856 * See the comment in sysctl_vm_zone_stats() regarding the 2857 * safety of accessing the per-cpu caches. With the zone lock 2858 * held, it is safe, but can potentially result in stale data. 2859 */ 2860 nitems += zone->uz_cpu[i].uc_allocs - 2861 zone->uz_cpu[i].uc_frees; 2862 } 2863 ZONE_UNLOCK(zone); 2864 2865 return (nitems < 0 ? 0 : nitems); 2866 } 2867 2868 /* See uma.h */ 2869 void 2870 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2871 { 2872 uma_keg_t keg; 2873 2874 keg = zone_first_keg(zone); 2875 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2876 KEG_LOCK(keg); 2877 KASSERT(keg->uk_pages == 0, 2878 ("uma_zone_set_init on non-empty keg")); 2879 keg->uk_init = uminit; 2880 KEG_UNLOCK(keg); 2881 } 2882 2883 /* See uma.h */ 2884 void 2885 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2886 { 2887 uma_keg_t keg; 2888 2889 keg = zone_first_keg(zone); 2890 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2891 KEG_LOCK(keg); 2892 KASSERT(keg->uk_pages == 0, 2893 ("uma_zone_set_fini on non-empty keg")); 2894 keg->uk_fini = fini; 2895 KEG_UNLOCK(keg); 2896 } 2897 2898 /* See uma.h */ 2899 void 2900 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2901 { 2902 2903 ZONE_LOCK(zone); 2904 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2905 ("uma_zone_set_zinit on non-empty keg")); 2906 zone->uz_init = zinit; 2907 ZONE_UNLOCK(zone); 2908 } 2909 2910 /* See uma.h */ 2911 void 2912 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2913 { 2914 2915 ZONE_LOCK(zone); 2916 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2917 ("uma_zone_set_zfini on non-empty keg")); 2918 zone->uz_fini = zfini; 2919 ZONE_UNLOCK(zone); 2920 } 2921 2922 /* See uma.h */ 2923 /* XXX uk_freef is not actually used with the zone locked */ 2924 void 2925 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2926 { 2927 uma_keg_t keg; 2928 2929 keg = zone_first_keg(zone); 2930 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2931 KEG_LOCK(keg); 2932 keg->uk_freef = freef; 2933 KEG_UNLOCK(keg); 2934 } 2935 2936 /* See uma.h */ 2937 /* XXX uk_allocf is not actually used with the zone locked */ 2938 void 2939 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2940 { 2941 uma_keg_t keg; 2942 2943 keg = zone_first_keg(zone); 2944 KEG_LOCK(keg); 2945 keg->uk_allocf = allocf; 2946 KEG_UNLOCK(keg); 2947 } 2948 2949 /* See uma.h */ 2950 void 2951 uma_zone_reserve(uma_zone_t zone, int items) 2952 { 2953 uma_keg_t keg; 2954 2955 keg = zone_first_keg(zone); 2956 if (keg == NULL) 2957 return; 2958 KEG_LOCK(keg); 2959 keg->uk_reserve = items; 2960 KEG_UNLOCK(keg); 2961 2962 return; 2963 } 2964 2965 /* See uma.h */ 2966 int 2967 uma_zone_reserve_kva(uma_zone_t zone, int count) 2968 { 2969 uma_keg_t keg; 2970 vm_offset_t kva; 2971 int pages; 2972 2973 keg = zone_first_keg(zone); 2974 if (keg == NULL) 2975 return (0); 2976 pages = count / keg->uk_ipers; 2977 2978 if (pages * keg->uk_ipers < count) 2979 pages++; 2980 2981 #ifdef UMA_MD_SMALL_ALLOC 2982 if (keg->uk_ppera > 1) { 2983 #else 2984 if (1) { 2985 #endif 2986 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2987 if (kva == 0) 2988 return (0); 2989 } else 2990 kva = 0; 2991 KEG_LOCK(keg); 2992 keg->uk_kva = kva; 2993 keg->uk_offset = 0; 2994 keg->uk_maxpages = pages; 2995 #ifdef UMA_MD_SMALL_ALLOC 2996 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 2997 #else 2998 keg->uk_allocf = noobj_alloc; 2999 #endif 3000 keg->uk_flags |= UMA_ZONE_NOFREE; 3001 KEG_UNLOCK(keg); 3002 3003 return (1); 3004 } 3005 3006 /* See uma.h */ 3007 void 3008 uma_prealloc(uma_zone_t zone, int items) 3009 { 3010 int slabs; 3011 uma_slab_t slab; 3012 uma_keg_t keg; 3013 3014 keg = zone_first_keg(zone); 3015 if (keg == NULL) 3016 return; 3017 KEG_LOCK(keg); 3018 slabs = items / keg->uk_ipers; 3019 if (slabs * keg->uk_ipers < items) 3020 slabs++; 3021 while (slabs > 0) { 3022 slab = keg_alloc_slab(keg, zone, M_WAITOK); 3023 if (slab == NULL) 3024 break; 3025 MPASS(slab->us_keg == keg); 3026 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 3027 slabs--; 3028 } 3029 KEG_UNLOCK(keg); 3030 } 3031 3032 /* See uma.h */ 3033 uint32_t * 3034 uma_find_refcnt(uma_zone_t zone, void *item) 3035 { 3036 uma_slabrefcnt_t slabref; 3037 uma_slab_t slab; 3038 uma_keg_t keg; 3039 uint32_t *refcnt; 3040 int idx; 3041 3042 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 3043 slabref = (uma_slabrefcnt_t)slab; 3044 keg = slab->us_keg; 3045 KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, 3046 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 3047 idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3048 refcnt = &slabref->us_refcnt[idx]; 3049 return refcnt; 3050 } 3051 3052 /* See uma.h */ 3053 void 3054 uma_reclaim(void) 3055 { 3056 #ifdef UMA_DEBUG 3057 printf("UMA: vm asked us to release pages!\n"); 3058 #endif 3059 bucket_enable(); 3060 zone_foreach(zone_drain); 3061 /* 3062 * Some slabs may have been freed but this zone will be visited early 3063 * we visit again so that we can free pages that are empty once other 3064 * zones are drained. We have to do the same for buckets. 3065 */ 3066 zone_drain(slabzone); 3067 zone_drain(slabrefzone); 3068 bucket_zone_drain(); 3069 } 3070 3071 /* See uma.h */ 3072 int 3073 uma_zone_exhausted(uma_zone_t zone) 3074 { 3075 int full; 3076 3077 ZONE_LOCK(zone); 3078 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3079 ZONE_UNLOCK(zone); 3080 return (full); 3081 } 3082 3083 int 3084 uma_zone_exhausted_nolock(uma_zone_t zone) 3085 { 3086 return (zone->uz_flags & UMA_ZFLAG_FULL); 3087 } 3088 3089 void * 3090 uma_large_malloc(int size, int wait) 3091 { 3092 void *mem; 3093 uma_slab_t slab; 3094 uint8_t flags; 3095 3096 slab = zone_alloc_item(slabzone, NULL, wait); 3097 if (slab == NULL) 3098 return (NULL); 3099 mem = page_alloc(NULL, size, &flags, wait); 3100 if (mem) { 3101 vsetslab((vm_offset_t)mem, slab); 3102 slab->us_data = mem; 3103 slab->us_flags = flags | UMA_SLAB_MALLOC; 3104 slab->us_size = size; 3105 } else { 3106 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3107 } 3108 3109 return (mem); 3110 } 3111 3112 void 3113 uma_large_free(uma_slab_t slab) 3114 { 3115 vsetobj((vm_offset_t)slab->us_data, kmem_object); 3116 page_free(slab->us_data, slab->us_size, slab->us_flags); 3117 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3118 } 3119 3120 void 3121 uma_print_stats(void) 3122 { 3123 zone_foreach(uma_print_zone); 3124 } 3125 3126 static void 3127 slab_print(uma_slab_t slab) 3128 { 3129 printf("slab: keg %p, data %p, freecount %d\n", 3130 slab->us_keg, slab->us_data, slab->us_freecount); 3131 } 3132 3133 static void 3134 cache_print(uma_cache_t cache) 3135 { 3136 printf("alloc: %p(%d), free: %p(%d)\n", 3137 cache->uc_allocbucket, 3138 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3139 cache->uc_freebucket, 3140 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3141 } 3142 3143 static void 3144 uma_print_keg(uma_keg_t keg) 3145 { 3146 uma_slab_t slab; 3147 3148 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3149 "out %d free %d limit %d\n", 3150 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3151 keg->uk_ipers, keg->uk_ppera, 3152 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3153 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3154 printf("Part slabs:\n"); 3155 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3156 slab_print(slab); 3157 printf("Free slabs:\n"); 3158 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3159 slab_print(slab); 3160 printf("Full slabs:\n"); 3161 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3162 slab_print(slab); 3163 } 3164 3165 void 3166 uma_print_zone(uma_zone_t zone) 3167 { 3168 uma_cache_t cache; 3169 uma_klink_t kl; 3170 int i; 3171 3172 printf("zone: %s(%p) size %d flags %#x\n", 3173 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3174 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3175 uma_print_keg(kl->kl_keg); 3176 CPU_FOREACH(i) { 3177 cache = &zone->uz_cpu[i]; 3178 printf("CPU %d Cache:\n", i); 3179 cache_print(cache); 3180 } 3181 } 3182 3183 #ifdef DDB 3184 /* 3185 * Generate statistics across both the zone and its per-cpu cache's. Return 3186 * desired statistics if the pointer is non-NULL for that statistic. 3187 * 3188 * Note: does not update the zone statistics, as it can't safely clear the 3189 * per-CPU cache statistic. 3190 * 3191 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3192 * safe from off-CPU; we should modify the caches to track this information 3193 * directly so that we don't have to. 3194 */ 3195 static void 3196 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3197 uint64_t *freesp, uint64_t *sleepsp) 3198 { 3199 uma_cache_t cache; 3200 uint64_t allocs, frees, sleeps; 3201 int cachefree, cpu; 3202 3203 allocs = frees = sleeps = 0; 3204 cachefree = 0; 3205 CPU_FOREACH(cpu) { 3206 cache = &z->uz_cpu[cpu]; 3207 if (cache->uc_allocbucket != NULL) 3208 cachefree += cache->uc_allocbucket->ub_cnt; 3209 if (cache->uc_freebucket != NULL) 3210 cachefree += cache->uc_freebucket->ub_cnt; 3211 allocs += cache->uc_allocs; 3212 frees += cache->uc_frees; 3213 } 3214 allocs += z->uz_allocs; 3215 frees += z->uz_frees; 3216 sleeps += z->uz_sleeps; 3217 if (cachefreep != NULL) 3218 *cachefreep = cachefree; 3219 if (allocsp != NULL) 3220 *allocsp = allocs; 3221 if (freesp != NULL) 3222 *freesp = frees; 3223 if (sleepsp != NULL) 3224 *sleepsp = sleeps; 3225 } 3226 #endif /* DDB */ 3227 3228 static int 3229 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3230 { 3231 uma_keg_t kz; 3232 uma_zone_t z; 3233 int count; 3234 3235 count = 0; 3236 mtx_lock(&uma_mtx); 3237 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3238 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3239 count++; 3240 } 3241 mtx_unlock(&uma_mtx); 3242 return (sysctl_handle_int(oidp, &count, 0, req)); 3243 } 3244 3245 static int 3246 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3247 { 3248 struct uma_stream_header ush; 3249 struct uma_type_header uth; 3250 struct uma_percpu_stat ups; 3251 uma_bucket_t bucket; 3252 struct sbuf sbuf; 3253 uma_cache_t cache; 3254 uma_klink_t kl; 3255 uma_keg_t kz; 3256 uma_zone_t z; 3257 uma_keg_t k; 3258 int count, error, i; 3259 3260 error = sysctl_wire_old_buffer(req, 0); 3261 if (error != 0) 3262 return (error); 3263 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3264 3265 count = 0; 3266 mtx_lock(&uma_mtx); 3267 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3268 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3269 count++; 3270 } 3271 3272 /* 3273 * Insert stream header. 3274 */ 3275 bzero(&ush, sizeof(ush)); 3276 ush.ush_version = UMA_STREAM_VERSION; 3277 ush.ush_maxcpus = (mp_maxid + 1); 3278 ush.ush_count = count; 3279 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3280 3281 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3282 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3283 bzero(&uth, sizeof(uth)); 3284 ZONE_LOCK(z); 3285 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3286 uth.uth_align = kz->uk_align; 3287 uth.uth_size = kz->uk_size; 3288 uth.uth_rsize = kz->uk_rsize; 3289 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3290 k = kl->kl_keg; 3291 uth.uth_maxpages += k->uk_maxpages; 3292 uth.uth_pages += k->uk_pages; 3293 uth.uth_keg_free += k->uk_free; 3294 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3295 * k->uk_ipers; 3296 } 3297 3298 /* 3299 * A zone is secondary is it is not the first entry 3300 * on the keg's zone list. 3301 */ 3302 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3303 (LIST_FIRST(&kz->uk_zones) != z)) 3304 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3305 3306 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3307 uth.uth_zone_free += bucket->ub_cnt; 3308 uth.uth_allocs = z->uz_allocs; 3309 uth.uth_frees = z->uz_frees; 3310 uth.uth_fails = z->uz_fails; 3311 uth.uth_sleeps = z->uz_sleeps; 3312 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3313 /* 3314 * While it is not normally safe to access the cache 3315 * bucket pointers while not on the CPU that owns the 3316 * cache, we only allow the pointers to be exchanged 3317 * without the zone lock held, not invalidated, so 3318 * accept the possible race associated with bucket 3319 * exchange during monitoring. 3320 */ 3321 for (i = 0; i < (mp_maxid + 1); i++) { 3322 bzero(&ups, sizeof(ups)); 3323 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3324 goto skip; 3325 if (CPU_ABSENT(i)) 3326 goto skip; 3327 cache = &z->uz_cpu[i]; 3328 if (cache->uc_allocbucket != NULL) 3329 ups.ups_cache_free += 3330 cache->uc_allocbucket->ub_cnt; 3331 if (cache->uc_freebucket != NULL) 3332 ups.ups_cache_free += 3333 cache->uc_freebucket->ub_cnt; 3334 ups.ups_allocs = cache->uc_allocs; 3335 ups.ups_frees = cache->uc_frees; 3336 skip: 3337 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3338 } 3339 ZONE_UNLOCK(z); 3340 } 3341 } 3342 mtx_unlock(&uma_mtx); 3343 error = sbuf_finish(&sbuf); 3344 sbuf_delete(&sbuf); 3345 return (error); 3346 } 3347 3348 #ifdef DDB 3349 DB_SHOW_COMMAND(uma, db_show_uma) 3350 { 3351 uint64_t allocs, frees, sleeps; 3352 uma_bucket_t bucket; 3353 uma_keg_t kz; 3354 uma_zone_t z; 3355 int cachefree; 3356 3357 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3358 "Requests", "Sleeps"); 3359 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3360 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3361 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3362 allocs = z->uz_allocs; 3363 frees = z->uz_frees; 3364 sleeps = z->uz_sleeps; 3365 cachefree = 0; 3366 } else 3367 uma_zone_sumstat(z, &cachefree, &allocs, 3368 &frees, &sleeps); 3369 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3370 (LIST_FIRST(&kz->uk_zones) != z))) 3371 cachefree += kz->uk_free; 3372 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3373 cachefree += bucket->ub_cnt; 3374 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name, 3375 (uintmax_t)kz->uk_size, 3376 (intmax_t)(allocs - frees), cachefree, 3377 (uintmax_t)allocs, sleeps); 3378 if (db_pager_quit) 3379 return; 3380 } 3381 } 3382 } 3383 #endif 3384