1 /*- 2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org> 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uma_core.c Implementation of the Universal Memory allocator 31 * 32 * This allocator is intended to replace the multitude of similar object caches 33 * in the standard FreeBSD kernel. The intent is to be flexible as well as 34 * effecient. A primary design goal is to return unused memory to the rest of 35 * the system. This will make the system as a whole more flexible due to the 36 * ability to move memory to subsystems which most need it instead of leaving 37 * pools of reserved memory unused. 38 * 39 * The basic ideas stem from similar slab/zone based allocators whose algorithms 40 * are well known. 41 * 42 */ 43 44 /* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* I should really use ktr.. */ 54 /* 55 #define UMA_DEBUG 1 56 #define UMA_DEBUG_ALLOC 1 57 #define UMA_DEBUG_ALLOC_1 1 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_param.h" 62 #include "opt_vm.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/types.h> 68 #include <sys/queue.h> 69 #include <sys/malloc.h> 70 #include <sys/ktr.h> 71 #include <sys/lock.h> 72 #include <sys/sysctl.h> 73 #include <sys/mutex.h> 74 #include <sys/proc.h> 75 #include <sys/sbuf.h> 76 #include <sys/smp.h> 77 #include <sys/vmmeter.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_object.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_pageout.h> 83 #include <vm/vm_param.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_kern.h> 86 #include <vm/vm_extern.h> 87 #include <vm/uma.h> 88 #include <vm/uma_int.h> 89 #include <vm/uma_dbg.h> 90 91 #include <ddb/ddb.h> 92 93 #ifdef DEBUG_MEMGUARD 94 #include <vm/memguard.h> 95 #endif 96 97 /* 98 * This is the zone and keg from which all zones are spawned. The idea is that 99 * even the zone & keg heads are allocated from the allocator, so we use the 100 * bss section to bootstrap us. 101 */ 102 static struct uma_keg masterkeg; 103 static struct uma_zone masterzone_k; 104 static struct uma_zone masterzone_z; 105 static uma_zone_t kegs = &masterzone_k; 106 static uma_zone_t zones = &masterzone_z; 107 108 /* This is the zone from which all of uma_slab_t's are allocated. */ 109 static uma_zone_t slabzone; 110 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 111 112 /* 113 * The initial hash tables come out of this zone so they can be allocated 114 * prior to malloc coming up. 115 */ 116 static uma_zone_t hashzone; 117 118 /* The boot-time adjusted value for cache line alignment. */ 119 int uma_align_cache = 64 - 1; 120 121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 122 123 /* 124 * Are we allowed to allocate buckets? 125 */ 126 static int bucketdisable = 1; 127 128 /* Linked list of all kegs in the system */ 129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 130 131 /* This mutex protects the keg list */ 132 static struct mtx uma_mtx; 133 134 /* Linked list of boot time pages */ 135 static LIST_HEAD(,uma_slab) uma_boot_pages = 136 LIST_HEAD_INITIALIZER(uma_boot_pages); 137 138 /* This mutex protects the boot time pages list */ 139 static struct mtx uma_boot_pages_mtx; 140 141 /* Is the VM done starting up? */ 142 static int booted = 0; 143 #define UMA_STARTUP 1 144 #define UMA_STARTUP2 2 145 146 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 147 static u_int uma_max_ipers; 148 static u_int uma_max_ipers_ref; 149 150 /* 151 * This is the handle used to schedule events that need to happen 152 * outside of the allocation fast path. 153 */ 154 static struct callout uma_callout; 155 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 156 157 /* 158 * This structure is passed as the zone ctor arg so that I don't have to create 159 * a special allocation function just for zones. 160 */ 161 struct uma_zctor_args { 162 const char *name; 163 size_t size; 164 uma_ctor ctor; 165 uma_dtor dtor; 166 uma_init uminit; 167 uma_fini fini; 168 uma_keg_t keg; 169 int align; 170 u_int32_t flags; 171 }; 172 173 struct uma_kctor_args { 174 uma_zone_t zone; 175 size_t size; 176 uma_init uminit; 177 uma_fini fini; 178 int align; 179 u_int32_t flags; 180 }; 181 182 struct uma_bucket_zone { 183 uma_zone_t ubz_zone; 184 char *ubz_name; 185 int ubz_entries; 186 }; 187 188 #define BUCKET_MAX 128 189 190 struct uma_bucket_zone bucket_zones[] = { 191 { NULL, "16 Bucket", 16 }, 192 { NULL, "32 Bucket", 32 }, 193 { NULL, "64 Bucket", 64 }, 194 { NULL, "128 Bucket", 128 }, 195 { NULL, NULL, 0} 196 }; 197 198 #define BUCKET_SHIFT 4 199 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) 200 201 /* 202 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket 203 * of approximately the right size. 204 */ 205 static uint8_t bucket_size[BUCKET_ZONES]; 206 207 /* 208 * Flags and enumerations to be passed to internal functions. 209 */ 210 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI }; 211 212 #define ZFREE_STATFAIL 0x00000001 /* Update zone failure statistic. */ 213 #define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */ 214 215 /* Prototypes.. */ 216 217 static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int); 218 static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 219 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); 220 static void page_free(void *, int, u_int8_t); 221 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 222 static void cache_drain(uma_zone_t); 223 static void bucket_drain(uma_zone_t, uma_bucket_t); 224 static void bucket_cache_drain(uma_zone_t zone); 225 static int keg_ctor(void *, int, void *, int); 226 static void keg_dtor(void *, int, void *); 227 static int zone_ctor(void *, int, void *, int); 228 static void zone_dtor(void *, int, void *); 229 static int zero_init(void *, int, int); 230 static void keg_small_init(uma_keg_t keg); 231 static void keg_large_init(uma_keg_t keg); 232 static void zone_foreach(void (*zfunc)(uma_zone_t)); 233 static void zone_timeout(uma_zone_t zone); 234 static int hash_alloc(struct uma_hash *); 235 static int hash_expand(struct uma_hash *, struct uma_hash *); 236 static void hash_free(struct uma_hash *hash); 237 static void uma_timeout(void *); 238 static void uma_startup3(void); 239 static void *zone_alloc_item(uma_zone_t, void *, int); 240 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip, 241 int); 242 static void bucket_enable(void); 243 static void bucket_init(void); 244 static uma_bucket_t bucket_alloc(int, int); 245 static void bucket_free(uma_bucket_t); 246 static void bucket_zone_drain(void); 247 static int zone_alloc_bucket(uma_zone_t zone, int flags); 248 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 249 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 250 static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab); 251 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 252 uma_fini fini, int align, u_int32_t flags); 253 static inline void zone_relock(uma_zone_t zone, uma_keg_t keg); 254 static inline void keg_relock(uma_keg_t keg, uma_zone_t zone); 255 256 void uma_print_zone(uma_zone_t); 257 void uma_print_stats(void); 258 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 259 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 260 261 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 262 263 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 264 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 265 266 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 267 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 268 269 static int zone_warnings = 1; 270 TUNABLE_INT("vm.zone_warnings", &zone_warnings); 271 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0, 272 "Warn when UMA zones becomes full"); 273 274 /* 275 * This routine checks to see whether or not it's safe to enable buckets. 276 */ 277 278 static void 279 bucket_enable(void) 280 { 281 bucketdisable = vm_page_count_min(); 282 } 283 284 /* 285 * Initialize bucket_zones, the array of zones of buckets of various sizes. 286 * 287 * For each zone, calculate the memory required for each bucket, consisting 288 * of the header and an array of pointers. Initialize bucket_size[] to point 289 * the range of appropriate bucket sizes at the zone. 290 */ 291 static void 292 bucket_init(void) 293 { 294 struct uma_bucket_zone *ubz; 295 int i; 296 int j; 297 298 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 299 int size; 300 301 ubz = &bucket_zones[j]; 302 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 303 size += sizeof(void *) * ubz->ubz_entries; 304 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 305 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 306 UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET); 307 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 308 bucket_size[i >> BUCKET_SHIFT] = j; 309 } 310 } 311 312 /* 313 * Given a desired number of entries for a bucket, return the zone from which 314 * to allocate the bucket. 315 */ 316 static struct uma_bucket_zone * 317 bucket_zone_lookup(int entries) 318 { 319 int idx; 320 321 idx = howmany(entries, 1 << BUCKET_SHIFT); 322 return (&bucket_zones[bucket_size[idx]]); 323 } 324 325 static uma_bucket_t 326 bucket_alloc(int entries, int bflags) 327 { 328 struct uma_bucket_zone *ubz; 329 uma_bucket_t bucket; 330 331 /* 332 * This is to stop us from allocating per cpu buckets while we're 333 * running out of vm.boot_pages. Otherwise, we would exhaust the 334 * boot pages. This also prevents us from allocating buckets in 335 * low memory situations. 336 */ 337 if (bucketdisable) 338 return (NULL); 339 340 ubz = bucket_zone_lookup(entries); 341 bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags); 342 if (bucket) { 343 #ifdef INVARIANTS 344 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 345 #endif 346 bucket->ub_cnt = 0; 347 bucket->ub_entries = ubz->ubz_entries; 348 } 349 350 return (bucket); 351 } 352 353 static void 354 bucket_free(uma_bucket_t bucket) 355 { 356 struct uma_bucket_zone *ubz; 357 358 ubz = bucket_zone_lookup(bucket->ub_entries); 359 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE, 360 ZFREE_STATFREE); 361 } 362 363 static void 364 bucket_zone_drain(void) 365 { 366 struct uma_bucket_zone *ubz; 367 368 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 369 zone_drain(ubz->ubz_zone); 370 } 371 372 static void 373 zone_log_warning(uma_zone_t zone) 374 { 375 static const struct timeval warninterval = { 300, 0 }; 376 377 if (!zone_warnings || zone->uz_warning == NULL) 378 return; 379 380 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 381 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 382 } 383 384 static inline uma_keg_t 385 zone_first_keg(uma_zone_t zone) 386 { 387 388 return (LIST_FIRST(&zone->uz_kegs)->kl_keg); 389 } 390 391 static void 392 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 393 { 394 uma_klink_t klink; 395 396 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 397 kegfn(klink->kl_keg); 398 } 399 400 /* 401 * Routine called by timeout which is used to fire off some time interval 402 * based calculations. (stats, hash size, etc.) 403 * 404 * Arguments: 405 * arg Unused 406 * 407 * Returns: 408 * Nothing 409 */ 410 static void 411 uma_timeout(void *unused) 412 { 413 bucket_enable(); 414 zone_foreach(zone_timeout); 415 416 /* Reschedule this event */ 417 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 418 } 419 420 /* 421 * Routine to perform timeout driven calculations. This expands the 422 * hashes and does per cpu statistics aggregation. 423 * 424 * Returns nothing. 425 */ 426 static void 427 keg_timeout(uma_keg_t keg) 428 { 429 430 KEG_LOCK(keg); 431 /* 432 * Expand the keg hash table. 433 * 434 * This is done if the number of slabs is larger than the hash size. 435 * What I'm trying to do here is completely reduce collisions. This 436 * may be a little aggressive. Should I allow for two collisions max? 437 */ 438 if (keg->uk_flags & UMA_ZONE_HASH && 439 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 440 struct uma_hash newhash; 441 struct uma_hash oldhash; 442 int ret; 443 444 /* 445 * This is so involved because allocating and freeing 446 * while the keg lock is held will lead to deadlock. 447 * I have to do everything in stages and check for 448 * races. 449 */ 450 newhash = keg->uk_hash; 451 KEG_UNLOCK(keg); 452 ret = hash_alloc(&newhash); 453 KEG_LOCK(keg); 454 if (ret) { 455 if (hash_expand(&keg->uk_hash, &newhash)) { 456 oldhash = keg->uk_hash; 457 keg->uk_hash = newhash; 458 } else 459 oldhash = newhash; 460 461 KEG_UNLOCK(keg); 462 hash_free(&oldhash); 463 KEG_LOCK(keg); 464 } 465 } 466 KEG_UNLOCK(keg); 467 } 468 469 static void 470 zone_timeout(uma_zone_t zone) 471 { 472 473 zone_foreach_keg(zone, &keg_timeout); 474 } 475 476 /* 477 * Allocate and zero fill the next sized hash table from the appropriate 478 * backing store. 479 * 480 * Arguments: 481 * hash A new hash structure with the old hash size in uh_hashsize 482 * 483 * Returns: 484 * 1 on sucess and 0 on failure. 485 */ 486 static int 487 hash_alloc(struct uma_hash *hash) 488 { 489 int oldsize; 490 int alloc; 491 492 oldsize = hash->uh_hashsize; 493 494 /* We're just going to go to a power of two greater */ 495 if (oldsize) { 496 hash->uh_hashsize = oldsize * 2; 497 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 498 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 499 M_UMAHASH, M_NOWAIT); 500 } else { 501 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 502 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 503 M_WAITOK); 504 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 505 } 506 if (hash->uh_slab_hash) { 507 bzero(hash->uh_slab_hash, alloc); 508 hash->uh_hashmask = hash->uh_hashsize - 1; 509 return (1); 510 } 511 512 return (0); 513 } 514 515 /* 516 * Expands the hash table for HASH zones. This is done from zone_timeout 517 * to reduce collisions. This must not be done in the regular allocation 518 * path, otherwise, we can recurse on the vm while allocating pages. 519 * 520 * Arguments: 521 * oldhash The hash you want to expand 522 * newhash The hash structure for the new table 523 * 524 * Returns: 525 * Nothing 526 * 527 * Discussion: 528 */ 529 static int 530 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 531 { 532 uma_slab_t slab; 533 int hval; 534 int i; 535 536 if (!newhash->uh_slab_hash) 537 return (0); 538 539 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 540 return (0); 541 542 /* 543 * I need to investigate hash algorithms for resizing without a 544 * full rehash. 545 */ 546 547 for (i = 0; i < oldhash->uh_hashsize; i++) 548 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 549 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 550 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 551 hval = UMA_HASH(newhash, slab->us_data); 552 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 553 slab, us_hlink); 554 } 555 556 return (1); 557 } 558 559 /* 560 * Free the hash bucket to the appropriate backing store. 561 * 562 * Arguments: 563 * slab_hash The hash bucket we're freeing 564 * hashsize The number of entries in that hash bucket 565 * 566 * Returns: 567 * Nothing 568 */ 569 static void 570 hash_free(struct uma_hash *hash) 571 { 572 if (hash->uh_slab_hash == NULL) 573 return; 574 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 575 zone_free_item(hashzone, 576 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE); 577 else 578 free(hash->uh_slab_hash, M_UMAHASH); 579 } 580 581 /* 582 * Frees all outstanding items in a bucket 583 * 584 * Arguments: 585 * zone The zone to free to, must be unlocked. 586 * bucket The free/alloc bucket with items, cpu queue must be locked. 587 * 588 * Returns: 589 * Nothing 590 */ 591 592 static void 593 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 594 { 595 void *item; 596 597 if (bucket == NULL) 598 return; 599 600 while (bucket->ub_cnt > 0) { 601 bucket->ub_cnt--; 602 item = bucket->ub_bucket[bucket->ub_cnt]; 603 #ifdef INVARIANTS 604 bucket->ub_bucket[bucket->ub_cnt] = NULL; 605 KASSERT(item != NULL, 606 ("bucket_drain: botched ptr, item is NULL")); 607 #endif 608 zone_free_item(zone, item, NULL, SKIP_DTOR, 0); 609 } 610 } 611 612 /* 613 * Drains the per cpu caches for a zone. 614 * 615 * NOTE: This may only be called while the zone is being turn down, and not 616 * during normal operation. This is necessary in order that we do not have 617 * to migrate CPUs to drain the per-CPU caches. 618 * 619 * Arguments: 620 * zone The zone to drain, must be unlocked. 621 * 622 * Returns: 623 * Nothing 624 */ 625 static void 626 cache_drain(uma_zone_t zone) 627 { 628 uma_cache_t cache; 629 int cpu; 630 631 /* 632 * XXX: It is safe to not lock the per-CPU caches, because we're 633 * tearing down the zone anyway. I.e., there will be no further use 634 * of the caches at this point. 635 * 636 * XXX: It would good to be able to assert that the zone is being 637 * torn down to prevent improper use of cache_drain(). 638 * 639 * XXX: We lock the zone before passing into bucket_cache_drain() as 640 * it is used elsewhere. Should the tear-down path be made special 641 * there in some form? 642 */ 643 CPU_FOREACH(cpu) { 644 cache = &zone->uz_cpu[cpu]; 645 bucket_drain(zone, cache->uc_allocbucket); 646 bucket_drain(zone, cache->uc_freebucket); 647 if (cache->uc_allocbucket != NULL) 648 bucket_free(cache->uc_allocbucket); 649 if (cache->uc_freebucket != NULL) 650 bucket_free(cache->uc_freebucket); 651 cache->uc_allocbucket = cache->uc_freebucket = NULL; 652 } 653 ZONE_LOCK(zone); 654 bucket_cache_drain(zone); 655 ZONE_UNLOCK(zone); 656 } 657 658 /* 659 * Drain the cached buckets from a zone. Expects a locked zone on entry. 660 */ 661 static void 662 bucket_cache_drain(uma_zone_t zone) 663 { 664 uma_bucket_t bucket; 665 666 /* 667 * Drain the bucket queues and free the buckets, we just keep two per 668 * cpu (alloc/free). 669 */ 670 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 671 LIST_REMOVE(bucket, ub_link); 672 ZONE_UNLOCK(zone); 673 bucket_drain(zone, bucket); 674 bucket_free(bucket); 675 ZONE_LOCK(zone); 676 } 677 678 /* Now we do the free queue.. */ 679 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 680 LIST_REMOVE(bucket, ub_link); 681 bucket_free(bucket); 682 } 683 } 684 685 /* 686 * Frees pages from a keg back to the system. This is done on demand from 687 * the pageout daemon. 688 * 689 * Returns nothing. 690 */ 691 static void 692 keg_drain(uma_keg_t keg) 693 { 694 struct slabhead freeslabs = { 0 }; 695 uma_slab_t slab; 696 uma_slab_t n; 697 u_int8_t flags; 698 u_int8_t *mem; 699 int i; 700 701 /* 702 * We don't want to take pages from statically allocated kegs at this 703 * time 704 */ 705 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 706 return; 707 708 #ifdef UMA_DEBUG 709 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 710 #endif 711 KEG_LOCK(keg); 712 if (keg->uk_free == 0) 713 goto finished; 714 715 slab = LIST_FIRST(&keg->uk_free_slab); 716 while (slab) { 717 n = LIST_NEXT(slab, us_link); 718 719 /* We have no where to free these to */ 720 if (slab->us_flags & UMA_SLAB_BOOT) { 721 slab = n; 722 continue; 723 } 724 725 LIST_REMOVE(slab, us_link); 726 keg->uk_pages -= keg->uk_ppera; 727 keg->uk_free -= keg->uk_ipers; 728 729 if (keg->uk_flags & UMA_ZONE_HASH) 730 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 731 732 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 733 734 slab = n; 735 } 736 finished: 737 KEG_UNLOCK(keg); 738 739 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 740 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 741 if (keg->uk_fini) 742 for (i = 0; i < keg->uk_ipers; i++) 743 keg->uk_fini( 744 slab->us_data + (keg->uk_rsize * i), 745 keg->uk_size); 746 flags = slab->us_flags; 747 mem = slab->us_data; 748 749 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { 750 vm_object_t obj; 751 752 if (flags & UMA_SLAB_KMEM) 753 obj = kmem_object; 754 else if (flags & UMA_SLAB_KERNEL) 755 obj = kernel_object; 756 else 757 obj = NULL; 758 for (i = 0; i < keg->uk_ppera; i++) 759 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 760 obj); 761 } 762 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 763 zone_free_item(keg->uk_slabzone, slab, NULL, 764 SKIP_NONE, ZFREE_STATFREE); 765 #ifdef UMA_DEBUG 766 printf("%s: Returning %d bytes.\n", 767 keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera); 768 #endif 769 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 770 } 771 } 772 773 static void 774 zone_drain_wait(uma_zone_t zone, int waitok) 775 { 776 777 /* 778 * Set draining to interlock with zone_dtor() so we can release our 779 * locks as we go. Only dtor() should do a WAITOK call since it 780 * is the only call that knows the structure will still be available 781 * when it wakes up. 782 */ 783 ZONE_LOCK(zone); 784 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 785 if (waitok == M_NOWAIT) 786 goto out; 787 mtx_unlock(&uma_mtx); 788 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1); 789 mtx_lock(&uma_mtx); 790 } 791 zone->uz_flags |= UMA_ZFLAG_DRAINING; 792 bucket_cache_drain(zone); 793 ZONE_UNLOCK(zone); 794 /* 795 * The DRAINING flag protects us from being freed while 796 * we're running. Normally the uma_mtx would protect us but we 797 * must be able to release and acquire the right lock for each keg. 798 */ 799 zone_foreach_keg(zone, &keg_drain); 800 ZONE_LOCK(zone); 801 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 802 wakeup(zone); 803 out: 804 ZONE_UNLOCK(zone); 805 } 806 807 void 808 zone_drain(uma_zone_t zone) 809 { 810 811 zone_drain_wait(zone, M_NOWAIT); 812 } 813 814 /* 815 * Allocate a new slab for a keg. This does not insert the slab onto a list. 816 * 817 * Arguments: 818 * wait Shall we wait? 819 * 820 * Returns: 821 * The slab that was allocated or NULL if there is no memory and the 822 * caller specified M_NOWAIT. 823 */ 824 static uma_slab_t 825 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 826 { 827 uma_slabrefcnt_t slabref; 828 uma_alloc allocf; 829 uma_slab_t slab; 830 u_int8_t *mem; 831 u_int8_t flags; 832 int i; 833 834 mtx_assert(&keg->uk_lock, MA_OWNED); 835 slab = NULL; 836 837 #ifdef UMA_DEBUG 838 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name); 839 #endif 840 allocf = keg->uk_allocf; 841 KEG_UNLOCK(keg); 842 843 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 844 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 845 if (slab == NULL) { 846 KEG_LOCK(keg); 847 return NULL; 848 } 849 } 850 851 /* 852 * This reproduces the old vm_zone behavior of zero filling pages the 853 * first time they are added to a zone. 854 * 855 * Malloced items are zeroed in uma_zalloc. 856 */ 857 858 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 859 wait |= M_ZERO; 860 else 861 wait &= ~M_ZERO; 862 863 if (keg->uk_flags & UMA_ZONE_NODUMP) 864 wait |= M_NODUMP; 865 866 /* zone is passed for legacy reasons. */ 867 mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait); 868 if (mem == NULL) { 869 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 870 zone_free_item(keg->uk_slabzone, slab, NULL, 871 SKIP_NONE, ZFREE_STATFREE); 872 KEG_LOCK(keg); 873 return (NULL); 874 } 875 876 /* Point the slab into the allocated memory */ 877 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 878 slab = (uma_slab_t )(mem + keg->uk_pgoff); 879 880 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 881 for (i = 0; i < keg->uk_ppera; i++) 882 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 883 884 slab->us_keg = keg; 885 slab->us_data = mem; 886 slab->us_freecount = keg->uk_ipers; 887 slab->us_firstfree = 0; 888 slab->us_flags = flags; 889 890 if (keg->uk_flags & UMA_ZONE_REFCNT) { 891 slabref = (uma_slabrefcnt_t)slab; 892 for (i = 0; i < keg->uk_ipers; i++) { 893 slabref->us_freelist[i].us_refcnt = 0; 894 slabref->us_freelist[i].us_item = i+1; 895 } 896 } else { 897 for (i = 0; i < keg->uk_ipers; i++) 898 slab->us_freelist[i].us_item = i+1; 899 } 900 901 if (keg->uk_init != NULL) { 902 for (i = 0; i < keg->uk_ipers; i++) 903 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 904 keg->uk_size, wait) != 0) 905 break; 906 if (i != keg->uk_ipers) { 907 if (keg->uk_fini != NULL) { 908 for (i--; i > -1; i--) 909 keg->uk_fini(slab->us_data + 910 (keg->uk_rsize * i), 911 keg->uk_size); 912 } 913 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { 914 vm_object_t obj; 915 916 if (flags & UMA_SLAB_KMEM) 917 obj = kmem_object; 918 else if (flags & UMA_SLAB_KERNEL) 919 obj = kernel_object; 920 else 921 obj = NULL; 922 for (i = 0; i < keg->uk_ppera; i++) 923 vsetobj((vm_offset_t)mem + 924 (i * PAGE_SIZE), obj); 925 } 926 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 927 zone_free_item(keg->uk_slabzone, slab, 928 NULL, SKIP_NONE, ZFREE_STATFREE); 929 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 930 flags); 931 KEG_LOCK(keg); 932 return (NULL); 933 } 934 } 935 KEG_LOCK(keg); 936 937 if (keg->uk_flags & UMA_ZONE_HASH) 938 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 939 940 keg->uk_pages += keg->uk_ppera; 941 keg->uk_free += keg->uk_ipers; 942 943 return (slab); 944 } 945 946 /* 947 * This function is intended to be used early on in place of page_alloc() so 948 * that we may use the boot time page cache to satisfy allocations before 949 * the VM is ready. 950 */ 951 static void * 952 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 953 { 954 uma_keg_t keg; 955 uma_slab_t tmps; 956 int pages, check_pages; 957 958 keg = zone_first_keg(zone); 959 pages = howmany(bytes, PAGE_SIZE); 960 check_pages = pages - 1; 961 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 962 963 /* 964 * Check our small startup cache to see if it has pages remaining. 965 */ 966 mtx_lock(&uma_boot_pages_mtx); 967 968 /* First check if we have enough room. */ 969 tmps = LIST_FIRST(&uma_boot_pages); 970 while (tmps != NULL && check_pages-- > 0) 971 tmps = LIST_NEXT(tmps, us_link); 972 if (tmps != NULL) { 973 /* 974 * It's ok to lose tmps references. The last one will 975 * have tmps->us_data pointing to the start address of 976 * "pages" contiguous pages of memory. 977 */ 978 while (pages-- > 0) { 979 tmps = LIST_FIRST(&uma_boot_pages); 980 LIST_REMOVE(tmps, us_link); 981 } 982 mtx_unlock(&uma_boot_pages_mtx); 983 *pflag = tmps->us_flags; 984 return (tmps->us_data); 985 } 986 mtx_unlock(&uma_boot_pages_mtx); 987 if (booted < UMA_STARTUP2) 988 panic("UMA: Increase vm.boot_pages"); 989 /* 990 * Now that we've booted reset these users to their real allocator. 991 */ 992 #ifdef UMA_MD_SMALL_ALLOC 993 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 994 #else 995 keg->uk_allocf = page_alloc; 996 #endif 997 return keg->uk_allocf(zone, bytes, pflag, wait); 998 } 999 1000 /* 1001 * Allocates a number of pages from the system 1002 * 1003 * Arguments: 1004 * bytes The number of bytes requested 1005 * wait Shall we wait? 1006 * 1007 * Returns: 1008 * A pointer to the alloced memory or possibly 1009 * NULL if M_NOWAIT is set. 1010 */ 1011 static void * 1012 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 1013 { 1014 void *p; /* Returned page */ 1015 1016 *pflag = UMA_SLAB_KMEM; 1017 p = (void *) kmem_malloc(kmem_map, bytes, wait); 1018 1019 return (p); 1020 } 1021 1022 /* 1023 * Allocates a number of pages from within an object 1024 * 1025 * Arguments: 1026 * bytes The number of bytes requested 1027 * wait Shall we wait? 1028 * 1029 * Returns: 1030 * A pointer to the alloced memory or possibly 1031 * NULL if M_NOWAIT is set. 1032 */ 1033 static void * 1034 noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1035 { 1036 TAILQ_HEAD(, vm_page) alloctail; 1037 u_long npages; 1038 vm_offset_t retkva, zkva; 1039 vm_page_t p, p_next; 1040 uma_keg_t keg; 1041 1042 TAILQ_INIT(&alloctail); 1043 keg = zone_first_keg(zone); 1044 1045 npages = howmany(bytes, PAGE_SIZE); 1046 while (npages > 0) { 1047 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1048 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1049 if (p != NULL) { 1050 /* 1051 * Since the page does not belong to an object, its 1052 * listq is unused. 1053 */ 1054 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1055 npages--; 1056 continue; 1057 } 1058 if (wait & M_WAITOK) { 1059 VM_WAIT; 1060 continue; 1061 } 1062 1063 /* 1064 * Page allocation failed, free intermediate pages and 1065 * exit. 1066 */ 1067 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1068 vm_page_unwire(p, 0); 1069 vm_page_free(p); 1070 } 1071 return (NULL); 1072 } 1073 *flags = UMA_SLAB_PRIV; 1074 zkva = keg->uk_kva + 1075 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1076 retkva = zkva; 1077 TAILQ_FOREACH(p, &alloctail, listq) { 1078 pmap_qenter(zkva, &p, 1); 1079 zkva += PAGE_SIZE; 1080 } 1081 1082 return ((void *)retkva); 1083 } 1084 1085 /* 1086 * Frees a number of pages to the system 1087 * 1088 * Arguments: 1089 * mem A pointer to the memory to be freed 1090 * size The size of the memory being freed 1091 * flags The original p->us_flags field 1092 * 1093 * Returns: 1094 * Nothing 1095 */ 1096 static void 1097 page_free(void *mem, int size, u_int8_t flags) 1098 { 1099 vm_map_t map; 1100 1101 if (flags & UMA_SLAB_KMEM) 1102 map = kmem_map; 1103 else if (flags & UMA_SLAB_KERNEL) 1104 map = kernel_map; 1105 else 1106 panic("UMA: page_free used with invalid flags %d", flags); 1107 1108 kmem_free(map, (vm_offset_t)mem, size); 1109 } 1110 1111 /* 1112 * Zero fill initializer 1113 * 1114 * Arguments/Returns follow uma_init specifications 1115 */ 1116 static int 1117 zero_init(void *mem, int size, int flags) 1118 { 1119 bzero(mem, size); 1120 return (0); 1121 } 1122 1123 /* 1124 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1125 * 1126 * Arguments 1127 * keg The zone we should initialize 1128 * 1129 * Returns 1130 * Nothing 1131 */ 1132 static void 1133 keg_small_init(uma_keg_t keg) 1134 { 1135 u_int rsize; 1136 u_int memused; 1137 u_int wastedspace; 1138 u_int shsize; 1139 1140 KASSERT(keg != NULL, ("Keg is null in keg_small_init")); 1141 rsize = keg->uk_size; 1142 1143 if (rsize < UMA_SMALLEST_UNIT) 1144 rsize = UMA_SMALLEST_UNIT; 1145 if (rsize & keg->uk_align) 1146 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1147 1148 keg->uk_rsize = rsize; 1149 keg->uk_ppera = 1; 1150 1151 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1152 shsize = 0; 1153 } else if (keg->uk_flags & UMA_ZONE_REFCNT) { 1154 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1155 shsize = sizeof(struct uma_slab_refcnt); 1156 } else { 1157 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1158 shsize = sizeof(struct uma_slab); 1159 } 1160 1161 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1162 KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0")); 1163 memused = keg->uk_ipers * rsize + shsize; 1164 wastedspace = UMA_SLAB_SIZE - memused; 1165 1166 /* 1167 * We can't do OFFPAGE if we're internal or if we've been 1168 * asked to not go to the VM for buckets. If we do this we 1169 * may end up going to the VM (kmem_map) for slabs which we 1170 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1171 * result of UMA_ZONE_VM, which clearly forbids it. 1172 */ 1173 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1174 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1175 return; 1176 1177 if ((wastedspace >= UMA_MAX_WASTE) && 1178 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1179 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1180 KASSERT(keg->uk_ipers <= 255, 1181 ("keg_small_init: keg->uk_ipers too high!")); 1182 #ifdef UMA_DEBUG 1183 printf("UMA decided we need offpage slab headers for " 1184 "keg: %s, calculated wastedspace = %d, " 1185 "maximum wasted space allowed = %d, " 1186 "calculated ipers = %d, " 1187 "new wasted space = %d\n", keg->uk_name, wastedspace, 1188 UMA_MAX_WASTE, keg->uk_ipers, 1189 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1190 #endif 1191 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1192 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1193 keg->uk_flags |= UMA_ZONE_HASH; 1194 } 1195 } 1196 1197 /* 1198 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1199 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1200 * more complicated. 1201 * 1202 * Arguments 1203 * keg The keg we should initialize 1204 * 1205 * Returns 1206 * Nothing 1207 */ 1208 static void 1209 keg_large_init(uma_keg_t keg) 1210 { 1211 int pages; 1212 1213 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1214 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1215 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1216 1217 pages = keg->uk_size / UMA_SLAB_SIZE; 1218 1219 /* Account for remainder */ 1220 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1221 pages++; 1222 1223 keg->uk_ppera = pages; 1224 keg->uk_ipers = 1; 1225 keg->uk_rsize = keg->uk_size; 1226 1227 /* We can't do OFFPAGE if we're internal, bail out here. */ 1228 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1229 return; 1230 1231 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1232 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1233 keg->uk_flags |= UMA_ZONE_HASH; 1234 } 1235 1236 static void 1237 keg_cachespread_init(uma_keg_t keg) 1238 { 1239 int alignsize; 1240 int trailer; 1241 int pages; 1242 int rsize; 1243 1244 alignsize = keg->uk_align + 1; 1245 rsize = keg->uk_size; 1246 /* 1247 * We want one item to start on every align boundary in a page. To 1248 * do this we will span pages. We will also extend the item by the 1249 * size of align if it is an even multiple of align. Otherwise, it 1250 * would fall on the same boundary every time. 1251 */ 1252 if (rsize & keg->uk_align) 1253 rsize = (rsize & ~keg->uk_align) + alignsize; 1254 if ((rsize & alignsize) == 0) 1255 rsize += alignsize; 1256 trailer = rsize - keg->uk_size; 1257 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1258 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1259 keg->uk_rsize = rsize; 1260 keg->uk_ppera = pages; 1261 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1262 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1263 KASSERT(keg->uk_ipers <= uma_max_ipers, 1264 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1265 keg->uk_ipers)); 1266 } 1267 1268 /* 1269 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1270 * the keg onto the global keg list. 1271 * 1272 * Arguments/Returns follow uma_ctor specifications 1273 * udata Actually uma_kctor_args 1274 */ 1275 static int 1276 keg_ctor(void *mem, int size, void *udata, int flags) 1277 { 1278 struct uma_kctor_args *arg = udata; 1279 uma_keg_t keg = mem; 1280 uma_zone_t zone; 1281 1282 bzero(keg, size); 1283 keg->uk_size = arg->size; 1284 keg->uk_init = arg->uminit; 1285 keg->uk_fini = arg->fini; 1286 keg->uk_align = arg->align; 1287 keg->uk_free = 0; 1288 keg->uk_pages = 0; 1289 keg->uk_flags = arg->flags; 1290 keg->uk_allocf = page_alloc; 1291 keg->uk_freef = page_free; 1292 keg->uk_recurse = 0; 1293 keg->uk_slabzone = NULL; 1294 1295 /* 1296 * The master zone is passed to us at keg-creation time. 1297 */ 1298 zone = arg->zone; 1299 keg->uk_name = zone->uz_name; 1300 1301 if (arg->flags & UMA_ZONE_VM) 1302 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1303 1304 if (arg->flags & UMA_ZONE_ZINIT) 1305 keg->uk_init = zero_init; 1306 1307 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1308 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1309 1310 /* 1311 * The +UMA_FRITM_SZ added to uk_size is to account for the 1312 * linkage that is added to the size in keg_small_init(). If 1313 * we don't account for this here then we may end up in 1314 * keg_small_init() with a calculated 'ipers' of 0. 1315 */ 1316 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1317 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) 1318 keg_cachespread_init(keg); 1319 else if ((keg->uk_size+UMA_FRITMREF_SZ) > 1320 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1321 keg_large_init(keg); 1322 else 1323 keg_small_init(keg); 1324 } else { 1325 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) 1326 keg_cachespread_init(keg); 1327 else if ((keg->uk_size+UMA_FRITM_SZ) > 1328 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1329 keg_large_init(keg); 1330 else 1331 keg_small_init(keg); 1332 } 1333 1334 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1335 if (keg->uk_flags & UMA_ZONE_REFCNT) 1336 keg->uk_slabzone = slabrefzone; 1337 else 1338 keg->uk_slabzone = slabzone; 1339 } 1340 1341 /* 1342 * If we haven't booted yet we need allocations to go through the 1343 * startup cache until the vm is ready. 1344 */ 1345 if (keg->uk_ppera == 1) { 1346 #ifdef UMA_MD_SMALL_ALLOC 1347 keg->uk_allocf = uma_small_alloc; 1348 keg->uk_freef = uma_small_free; 1349 1350 if (booted < UMA_STARTUP) 1351 keg->uk_allocf = startup_alloc; 1352 #else 1353 if (booted < UMA_STARTUP2) 1354 keg->uk_allocf = startup_alloc; 1355 #endif 1356 } else if (booted < UMA_STARTUP2 && 1357 (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1358 keg->uk_allocf = startup_alloc; 1359 1360 /* 1361 * Initialize keg's lock (shared among zones). 1362 */ 1363 if (arg->flags & UMA_ZONE_MTXCLASS) 1364 KEG_LOCK_INIT(keg, 1); 1365 else 1366 KEG_LOCK_INIT(keg, 0); 1367 1368 /* 1369 * If we're putting the slab header in the actual page we need to 1370 * figure out where in each page it goes. This calculates a right 1371 * justified offset into the memory on an ALIGN_PTR boundary. 1372 */ 1373 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1374 u_int totsize; 1375 1376 /* Size of the slab struct and free list */ 1377 if (keg->uk_flags & UMA_ZONE_REFCNT) 1378 totsize = sizeof(struct uma_slab_refcnt) + 1379 keg->uk_ipers * UMA_FRITMREF_SZ; 1380 else 1381 totsize = sizeof(struct uma_slab) + 1382 keg->uk_ipers * UMA_FRITM_SZ; 1383 1384 if (totsize & UMA_ALIGN_PTR) 1385 totsize = (totsize & ~UMA_ALIGN_PTR) + 1386 (UMA_ALIGN_PTR + 1); 1387 keg->uk_pgoff = (UMA_SLAB_SIZE * keg->uk_ppera) - totsize; 1388 1389 if (keg->uk_flags & UMA_ZONE_REFCNT) 1390 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1391 + keg->uk_ipers * UMA_FRITMREF_SZ; 1392 else 1393 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1394 + keg->uk_ipers * UMA_FRITM_SZ; 1395 1396 /* 1397 * The only way the following is possible is if with our 1398 * UMA_ALIGN_PTR adjustments we are now bigger than 1399 * UMA_SLAB_SIZE. I haven't checked whether this is 1400 * mathematically possible for all cases, so we make 1401 * sure here anyway. 1402 */ 1403 if (totsize > UMA_SLAB_SIZE * keg->uk_ppera) { 1404 printf("zone %s ipers %d rsize %d size %d\n", 1405 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1406 keg->uk_size); 1407 panic("UMA slab won't fit."); 1408 } 1409 } 1410 1411 if (keg->uk_flags & UMA_ZONE_HASH) 1412 hash_alloc(&keg->uk_hash); 1413 1414 #ifdef UMA_DEBUG 1415 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1416 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1417 keg->uk_ipers, keg->uk_ppera, 1418 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1419 #endif 1420 1421 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1422 1423 mtx_lock(&uma_mtx); 1424 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1425 mtx_unlock(&uma_mtx); 1426 return (0); 1427 } 1428 1429 /* 1430 * Zone header ctor. This initializes all fields, locks, etc. 1431 * 1432 * Arguments/Returns follow uma_ctor specifications 1433 * udata Actually uma_zctor_args 1434 */ 1435 static int 1436 zone_ctor(void *mem, int size, void *udata, int flags) 1437 { 1438 struct uma_zctor_args *arg = udata; 1439 uma_zone_t zone = mem; 1440 uma_zone_t z; 1441 uma_keg_t keg; 1442 1443 bzero(zone, size); 1444 zone->uz_name = arg->name; 1445 zone->uz_ctor = arg->ctor; 1446 zone->uz_dtor = arg->dtor; 1447 zone->uz_slab = zone_fetch_slab; 1448 zone->uz_init = NULL; 1449 zone->uz_fini = NULL; 1450 zone->uz_allocs = 0; 1451 zone->uz_frees = 0; 1452 zone->uz_fails = 0; 1453 zone->uz_sleeps = 0; 1454 zone->uz_fills = zone->uz_count = 0; 1455 zone->uz_flags = 0; 1456 zone->uz_warning = NULL; 1457 timevalclear(&zone->uz_ratecheck); 1458 keg = arg->keg; 1459 1460 if (arg->flags & UMA_ZONE_SECONDARY) { 1461 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1462 zone->uz_init = arg->uminit; 1463 zone->uz_fini = arg->fini; 1464 zone->uz_lock = &keg->uk_lock; 1465 zone->uz_flags |= UMA_ZONE_SECONDARY; 1466 mtx_lock(&uma_mtx); 1467 ZONE_LOCK(zone); 1468 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1469 if (LIST_NEXT(z, uz_link) == NULL) { 1470 LIST_INSERT_AFTER(z, zone, uz_link); 1471 break; 1472 } 1473 } 1474 ZONE_UNLOCK(zone); 1475 mtx_unlock(&uma_mtx); 1476 } else if (keg == NULL) { 1477 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1478 arg->align, arg->flags)) == NULL) 1479 return (ENOMEM); 1480 } else { 1481 struct uma_kctor_args karg; 1482 int error; 1483 1484 /* We should only be here from uma_startup() */ 1485 karg.size = arg->size; 1486 karg.uminit = arg->uminit; 1487 karg.fini = arg->fini; 1488 karg.align = arg->align; 1489 karg.flags = arg->flags; 1490 karg.zone = zone; 1491 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1492 flags); 1493 if (error) 1494 return (error); 1495 } 1496 /* 1497 * Link in the first keg. 1498 */ 1499 zone->uz_klink.kl_keg = keg; 1500 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1501 zone->uz_lock = &keg->uk_lock; 1502 zone->uz_size = keg->uk_size; 1503 zone->uz_flags |= (keg->uk_flags & 1504 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1505 1506 /* 1507 * Some internal zones don't have room allocated for the per cpu 1508 * caches. If we're internal, bail out here. 1509 */ 1510 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1511 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1512 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1513 return (0); 1514 } 1515 1516 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1517 zone->uz_count = BUCKET_MAX; 1518 else if (keg->uk_ipers <= BUCKET_MAX) 1519 zone->uz_count = keg->uk_ipers; 1520 else 1521 zone->uz_count = BUCKET_MAX; 1522 return (0); 1523 } 1524 1525 /* 1526 * Keg header dtor. This frees all data, destroys locks, frees the hash 1527 * table and removes the keg from the global list. 1528 * 1529 * Arguments/Returns follow uma_dtor specifications 1530 * udata unused 1531 */ 1532 static void 1533 keg_dtor(void *arg, int size, void *udata) 1534 { 1535 uma_keg_t keg; 1536 1537 keg = (uma_keg_t)arg; 1538 KEG_LOCK(keg); 1539 if (keg->uk_free != 0) { 1540 printf("Freed UMA keg was not empty (%d items). " 1541 " Lost %d pages of memory.\n", 1542 keg->uk_free, keg->uk_pages); 1543 } 1544 KEG_UNLOCK(keg); 1545 1546 hash_free(&keg->uk_hash); 1547 1548 KEG_LOCK_FINI(keg); 1549 } 1550 1551 /* 1552 * Zone header dtor. 1553 * 1554 * Arguments/Returns follow uma_dtor specifications 1555 * udata unused 1556 */ 1557 static void 1558 zone_dtor(void *arg, int size, void *udata) 1559 { 1560 uma_klink_t klink; 1561 uma_zone_t zone; 1562 uma_keg_t keg; 1563 1564 zone = (uma_zone_t)arg; 1565 keg = zone_first_keg(zone); 1566 1567 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1568 cache_drain(zone); 1569 1570 mtx_lock(&uma_mtx); 1571 LIST_REMOVE(zone, uz_link); 1572 mtx_unlock(&uma_mtx); 1573 /* 1574 * XXX there are some races here where 1575 * the zone can be drained but zone lock 1576 * released and then refilled before we 1577 * remove it... we dont care for now 1578 */ 1579 zone_drain_wait(zone, M_WAITOK); 1580 /* 1581 * Unlink all of our kegs. 1582 */ 1583 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1584 klink->kl_keg = NULL; 1585 LIST_REMOVE(klink, kl_link); 1586 if (klink == &zone->uz_klink) 1587 continue; 1588 free(klink, M_TEMP); 1589 } 1590 /* 1591 * We only destroy kegs from non secondary zones. 1592 */ 1593 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1594 mtx_lock(&uma_mtx); 1595 LIST_REMOVE(keg, uk_link); 1596 mtx_unlock(&uma_mtx); 1597 zone_free_item(kegs, keg, NULL, SKIP_NONE, 1598 ZFREE_STATFREE); 1599 } 1600 } 1601 1602 /* 1603 * Traverses every zone in the system and calls a callback 1604 * 1605 * Arguments: 1606 * zfunc A pointer to a function which accepts a zone 1607 * as an argument. 1608 * 1609 * Returns: 1610 * Nothing 1611 */ 1612 static void 1613 zone_foreach(void (*zfunc)(uma_zone_t)) 1614 { 1615 uma_keg_t keg; 1616 uma_zone_t zone; 1617 1618 mtx_lock(&uma_mtx); 1619 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1620 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1621 zfunc(zone); 1622 } 1623 mtx_unlock(&uma_mtx); 1624 } 1625 1626 /* Public functions */ 1627 /* See uma.h */ 1628 void 1629 uma_startup(void *bootmem, int boot_pages) 1630 { 1631 struct uma_zctor_args args; 1632 uma_slab_t slab; 1633 u_int slabsize; 1634 u_int objsize, totsize, wsize; 1635 int i; 1636 1637 #ifdef UMA_DEBUG 1638 printf("Creating uma keg headers zone and keg.\n"); 1639 #endif 1640 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF); 1641 1642 /* 1643 * Figure out the maximum number of items-per-slab we'll have if 1644 * we're using the OFFPAGE slab header to track free items, given 1645 * all possible object sizes and the maximum desired wastage 1646 * (UMA_MAX_WASTE). 1647 * 1648 * We iterate until we find an object size for 1649 * which the calculated wastage in keg_small_init() will be 1650 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1651 * is an overall increasing see-saw function, we find the smallest 1652 * objsize such that the wastage is always acceptable for objects 1653 * with that objsize or smaller. Since a smaller objsize always 1654 * generates a larger possible uma_max_ipers, we use this computed 1655 * objsize to calculate the largest ipers possible. Since the 1656 * ipers calculated for OFFPAGE slab headers is always larger than 1657 * the ipers initially calculated in keg_small_init(), we use 1658 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1659 * obtain the maximum ipers possible for offpage slab headers. 1660 * 1661 * It should be noted that ipers versus objsize is an inversly 1662 * proportional function which drops off rather quickly so as 1663 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1664 * falls into the portion of the inverse relation AFTER the steep 1665 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1666 * 1667 * Note that we have 8-bits (1 byte) to use as a freelist index 1668 * inside the actual slab header itself and this is enough to 1669 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1670 * object with offpage slab header would have ipers = 1671 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1672 * 1 greater than what our byte-integer freelist index can 1673 * accomodate, but we know that this situation never occurs as 1674 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1675 * that we need to go to offpage slab headers. Or, if we do, 1676 * then we trap that condition below and panic in the INVARIANTS case. 1677 */ 1678 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1679 totsize = wsize; 1680 objsize = UMA_SMALLEST_UNIT; 1681 while (totsize >= wsize) { 1682 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1683 (objsize + UMA_FRITM_SZ); 1684 totsize *= (UMA_FRITM_SZ + objsize); 1685 objsize++; 1686 } 1687 if (objsize > UMA_SMALLEST_UNIT) 1688 objsize--; 1689 uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64); 1690 1691 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1692 totsize = wsize; 1693 objsize = UMA_SMALLEST_UNIT; 1694 while (totsize >= wsize) { 1695 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1696 (objsize + UMA_FRITMREF_SZ); 1697 totsize *= (UMA_FRITMREF_SZ + objsize); 1698 objsize++; 1699 } 1700 if (objsize > UMA_SMALLEST_UNIT) 1701 objsize--; 1702 uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64); 1703 1704 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1705 ("uma_startup: calculated uma_max_ipers values too large!")); 1706 1707 #ifdef UMA_DEBUG 1708 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1709 printf("Calculated uma_max_ipers_ref (for OFFPAGE) is %d\n", 1710 uma_max_ipers_ref); 1711 #endif 1712 1713 /* "manually" create the initial zone */ 1714 args.name = "UMA Kegs"; 1715 args.size = sizeof(struct uma_keg); 1716 args.ctor = keg_ctor; 1717 args.dtor = keg_dtor; 1718 args.uminit = zero_init; 1719 args.fini = NULL; 1720 args.keg = &masterkeg; 1721 args.align = 32 - 1; 1722 args.flags = UMA_ZFLAG_INTERNAL; 1723 /* The initial zone has no Per cpu queues so it's smaller */ 1724 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1725 1726 #ifdef UMA_DEBUG 1727 printf("Filling boot free list.\n"); 1728 #endif 1729 for (i = 0; i < boot_pages; i++) { 1730 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1731 slab->us_data = (u_int8_t *)slab; 1732 slab->us_flags = UMA_SLAB_BOOT; 1733 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1734 } 1735 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1736 1737 #ifdef UMA_DEBUG 1738 printf("Creating uma zone headers zone and keg.\n"); 1739 #endif 1740 args.name = "UMA Zones"; 1741 args.size = sizeof(struct uma_zone) + 1742 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1743 args.ctor = zone_ctor; 1744 args.dtor = zone_dtor; 1745 args.uminit = zero_init; 1746 args.fini = NULL; 1747 args.keg = NULL; 1748 args.align = 32 - 1; 1749 args.flags = UMA_ZFLAG_INTERNAL; 1750 /* The initial zone has no Per cpu queues so it's smaller */ 1751 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1752 1753 #ifdef UMA_DEBUG 1754 printf("Initializing pcpu cache locks.\n"); 1755 #endif 1756 #ifdef UMA_DEBUG 1757 printf("Creating slab and hash zones.\n"); 1758 #endif 1759 1760 /* 1761 * This is the max number of free list items we'll have with 1762 * offpage slabs. 1763 */ 1764 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1765 slabsize += sizeof(struct uma_slab); 1766 1767 /* Now make a zone for slab headers */ 1768 slabzone = uma_zcreate("UMA Slabs", 1769 slabsize, 1770 NULL, NULL, NULL, NULL, 1771 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1772 1773 /* 1774 * We also create a zone for the bigger slabs with reference 1775 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1776 */ 1777 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1778 slabsize += sizeof(struct uma_slab_refcnt); 1779 slabrefzone = uma_zcreate("UMA RCntSlabs", 1780 slabsize, 1781 NULL, NULL, NULL, NULL, 1782 UMA_ALIGN_PTR, 1783 UMA_ZFLAG_INTERNAL); 1784 1785 hashzone = uma_zcreate("UMA Hash", 1786 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1787 NULL, NULL, NULL, NULL, 1788 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1789 1790 bucket_init(); 1791 1792 booted = UMA_STARTUP; 1793 1794 #ifdef UMA_DEBUG 1795 printf("UMA startup complete.\n"); 1796 #endif 1797 } 1798 1799 /* see uma.h */ 1800 void 1801 uma_startup2(void) 1802 { 1803 booted = UMA_STARTUP2; 1804 bucket_enable(); 1805 #ifdef UMA_DEBUG 1806 printf("UMA startup2 complete.\n"); 1807 #endif 1808 } 1809 1810 /* 1811 * Initialize our callout handle 1812 * 1813 */ 1814 1815 static void 1816 uma_startup3(void) 1817 { 1818 #ifdef UMA_DEBUG 1819 printf("Starting callout.\n"); 1820 #endif 1821 callout_init(&uma_callout, CALLOUT_MPSAFE); 1822 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1823 #ifdef UMA_DEBUG 1824 printf("UMA startup3 complete.\n"); 1825 #endif 1826 } 1827 1828 static uma_keg_t 1829 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1830 int align, u_int32_t flags) 1831 { 1832 struct uma_kctor_args args; 1833 1834 args.size = size; 1835 args.uminit = uminit; 1836 args.fini = fini; 1837 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1838 args.flags = flags; 1839 args.zone = zone; 1840 return (zone_alloc_item(kegs, &args, M_WAITOK)); 1841 } 1842 1843 /* See uma.h */ 1844 void 1845 uma_set_align(int align) 1846 { 1847 1848 if (align != UMA_ALIGN_CACHE) 1849 uma_align_cache = align; 1850 } 1851 1852 /* See uma.h */ 1853 uma_zone_t 1854 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1855 uma_init uminit, uma_fini fini, int align, u_int32_t flags) 1856 1857 { 1858 struct uma_zctor_args args; 1859 1860 /* This stuff is essential for the zone ctor */ 1861 args.name = name; 1862 args.size = size; 1863 args.ctor = ctor; 1864 args.dtor = dtor; 1865 args.uminit = uminit; 1866 args.fini = fini; 1867 args.align = align; 1868 args.flags = flags; 1869 args.keg = NULL; 1870 1871 return (zone_alloc_item(zones, &args, M_WAITOK)); 1872 } 1873 1874 /* See uma.h */ 1875 uma_zone_t 1876 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1877 uma_init zinit, uma_fini zfini, uma_zone_t master) 1878 { 1879 struct uma_zctor_args args; 1880 uma_keg_t keg; 1881 1882 keg = zone_first_keg(master); 1883 args.name = name; 1884 args.size = keg->uk_size; 1885 args.ctor = ctor; 1886 args.dtor = dtor; 1887 args.uminit = zinit; 1888 args.fini = zfini; 1889 args.align = keg->uk_align; 1890 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1891 args.keg = keg; 1892 1893 /* XXX Attaches only one keg of potentially many. */ 1894 return (zone_alloc_item(zones, &args, M_WAITOK)); 1895 } 1896 1897 static void 1898 zone_lock_pair(uma_zone_t a, uma_zone_t b) 1899 { 1900 if (a < b) { 1901 ZONE_LOCK(a); 1902 mtx_lock_flags(b->uz_lock, MTX_DUPOK); 1903 } else { 1904 ZONE_LOCK(b); 1905 mtx_lock_flags(a->uz_lock, MTX_DUPOK); 1906 } 1907 } 1908 1909 static void 1910 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1911 { 1912 1913 ZONE_UNLOCK(a); 1914 ZONE_UNLOCK(b); 1915 } 1916 1917 int 1918 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1919 { 1920 uma_klink_t klink; 1921 uma_klink_t kl; 1922 int error; 1923 1924 error = 0; 1925 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1926 1927 zone_lock_pair(zone, master); 1928 /* 1929 * zone must use vtoslab() to resolve objects and must already be 1930 * a secondary. 1931 */ 1932 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1933 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1934 error = EINVAL; 1935 goto out; 1936 } 1937 /* 1938 * The new master must also use vtoslab(). 1939 */ 1940 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 1941 error = EINVAL; 1942 goto out; 1943 } 1944 /* 1945 * Both must either be refcnt, or not be refcnt. 1946 */ 1947 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 1948 (master->uz_flags & UMA_ZONE_REFCNT)) { 1949 error = EINVAL; 1950 goto out; 1951 } 1952 /* 1953 * The underlying object must be the same size. rsize 1954 * may be different. 1955 */ 1956 if (master->uz_size != zone->uz_size) { 1957 error = E2BIG; 1958 goto out; 1959 } 1960 /* 1961 * Put it at the end of the list. 1962 */ 1963 klink->kl_keg = zone_first_keg(master); 1964 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 1965 if (LIST_NEXT(kl, kl_link) == NULL) { 1966 LIST_INSERT_AFTER(kl, klink, kl_link); 1967 break; 1968 } 1969 } 1970 klink = NULL; 1971 zone->uz_flags |= UMA_ZFLAG_MULTI; 1972 zone->uz_slab = zone_fetch_slab_multi; 1973 1974 out: 1975 zone_unlock_pair(zone, master); 1976 if (klink != NULL) 1977 free(klink, M_TEMP); 1978 1979 return (error); 1980 } 1981 1982 1983 /* See uma.h */ 1984 void 1985 uma_zdestroy(uma_zone_t zone) 1986 { 1987 1988 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE); 1989 } 1990 1991 /* See uma.h */ 1992 void * 1993 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1994 { 1995 void *item; 1996 uma_cache_t cache; 1997 uma_bucket_t bucket; 1998 int cpu; 1999 2000 /* This is the fast path allocation */ 2001 #ifdef UMA_DEBUG_ALLOC_1 2002 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 2003 #endif 2004 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 2005 zone->uz_name, flags); 2006 2007 if (flags & M_WAITOK) { 2008 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2009 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2010 } 2011 #ifdef DEBUG_MEMGUARD 2012 if (memguard_cmp_zone(zone)) { 2013 item = memguard_alloc(zone->uz_size, flags); 2014 if (item != NULL) { 2015 /* 2016 * Avoid conflict with the use-after-free 2017 * protecting infrastructure from INVARIANTS. 2018 */ 2019 if (zone->uz_init != NULL && 2020 zone->uz_init != mtrash_init && 2021 zone->uz_init(item, zone->uz_size, flags) != 0) 2022 return (NULL); 2023 if (zone->uz_ctor != NULL && 2024 zone->uz_ctor != mtrash_ctor && 2025 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2026 zone->uz_fini(item, zone->uz_size); 2027 return (NULL); 2028 } 2029 return (item); 2030 } 2031 /* This is unfortunate but should not be fatal. */ 2032 } 2033 #endif 2034 /* 2035 * If possible, allocate from the per-CPU cache. There are two 2036 * requirements for safe access to the per-CPU cache: (1) the thread 2037 * accessing the cache must not be preempted or yield during access, 2038 * and (2) the thread must not migrate CPUs without switching which 2039 * cache it accesses. We rely on a critical section to prevent 2040 * preemption and migration. We release the critical section in 2041 * order to acquire the zone mutex if we are unable to allocate from 2042 * the current cache; when we re-acquire the critical section, we 2043 * must detect and handle migration if it has occurred. 2044 */ 2045 zalloc_restart: 2046 critical_enter(); 2047 cpu = curcpu; 2048 cache = &zone->uz_cpu[cpu]; 2049 2050 zalloc_start: 2051 bucket = cache->uc_allocbucket; 2052 2053 if (bucket) { 2054 if (bucket->ub_cnt > 0) { 2055 bucket->ub_cnt--; 2056 item = bucket->ub_bucket[bucket->ub_cnt]; 2057 #ifdef INVARIANTS 2058 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2059 #endif 2060 KASSERT(item != NULL, 2061 ("uma_zalloc: Bucket pointer mangled.")); 2062 cache->uc_allocs++; 2063 critical_exit(); 2064 #ifdef INVARIANTS 2065 ZONE_LOCK(zone); 2066 uma_dbg_alloc(zone, NULL, item); 2067 ZONE_UNLOCK(zone); 2068 #endif 2069 if (zone->uz_ctor != NULL) { 2070 if (zone->uz_ctor(item, zone->uz_size, 2071 udata, flags) != 0) { 2072 zone_free_item(zone, item, udata, 2073 SKIP_DTOR, ZFREE_STATFAIL | 2074 ZFREE_STATFREE); 2075 return (NULL); 2076 } 2077 } 2078 if (flags & M_ZERO) 2079 bzero(item, zone->uz_size); 2080 return (item); 2081 } else if (cache->uc_freebucket) { 2082 /* 2083 * We have run out of items in our allocbucket. 2084 * See if we can switch with our free bucket. 2085 */ 2086 if (cache->uc_freebucket->ub_cnt > 0) { 2087 #ifdef UMA_DEBUG_ALLOC 2088 printf("uma_zalloc: Swapping empty with" 2089 " alloc.\n"); 2090 #endif 2091 bucket = cache->uc_freebucket; 2092 cache->uc_freebucket = cache->uc_allocbucket; 2093 cache->uc_allocbucket = bucket; 2094 2095 goto zalloc_start; 2096 } 2097 } 2098 } 2099 /* 2100 * Attempt to retrieve the item from the per-CPU cache has failed, so 2101 * we must go back to the zone. This requires the zone lock, so we 2102 * must drop the critical section, then re-acquire it when we go back 2103 * to the cache. Since the critical section is released, we may be 2104 * preempted or migrate. As such, make sure not to maintain any 2105 * thread-local state specific to the cache from prior to releasing 2106 * the critical section. 2107 */ 2108 critical_exit(); 2109 ZONE_LOCK(zone); 2110 critical_enter(); 2111 cpu = curcpu; 2112 cache = &zone->uz_cpu[cpu]; 2113 bucket = cache->uc_allocbucket; 2114 if (bucket != NULL) { 2115 if (bucket->ub_cnt > 0) { 2116 ZONE_UNLOCK(zone); 2117 goto zalloc_start; 2118 } 2119 bucket = cache->uc_freebucket; 2120 if (bucket != NULL && bucket->ub_cnt > 0) { 2121 ZONE_UNLOCK(zone); 2122 goto zalloc_start; 2123 } 2124 } 2125 2126 /* Since we have locked the zone we may as well send back our stats */ 2127 zone->uz_allocs += cache->uc_allocs; 2128 cache->uc_allocs = 0; 2129 zone->uz_frees += cache->uc_frees; 2130 cache->uc_frees = 0; 2131 2132 /* Our old one is now a free bucket */ 2133 if (cache->uc_allocbucket) { 2134 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 2135 ("uma_zalloc_arg: Freeing a non free bucket.")); 2136 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2137 cache->uc_allocbucket, ub_link); 2138 cache->uc_allocbucket = NULL; 2139 } 2140 2141 /* Check the free list for a new alloc bucket */ 2142 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 2143 KASSERT(bucket->ub_cnt != 0, 2144 ("uma_zalloc_arg: Returning an empty bucket.")); 2145 2146 LIST_REMOVE(bucket, ub_link); 2147 cache->uc_allocbucket = bucket; 2148 ZONE_UNLOCK(zone); 2149 goto zalloc_start; 2150 } 2151 /* We are no longer associated with this CPU. */ 2152 critical_exit(); 2153 2154 /* Bump up our uz_count so we get here less */ 2155 if (zone->uz_count < BUCKET_MAX) 2156 zone->uz_count++; 2157 2158 /* 2159 * Now lets just fill a bucket and put it on the free list. If that 2160 * works we'll restart the allocation from the begining. 2161 */ 2162 if (zone_alloc_bucket(zone, flags)) { 2163 ZONE_UNLOCK(zone); 2164 goto zalloc_restart; 2165 } 2166 ZONE_UNLOCK(zone); 2167 /* 2168 * We may not be able to get a bucket so return an actual item. 2169 */ 2170 #ifdef UMA_DEBUG 2171 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2172 #endif 2173 2174 item = zone_alloc_item(zone, udata, flags); 2175 return (item); 2176 } 2177 2178 static uma_slab_t 2179 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2180 { 2181 uma_slab_t slab; 2182 2183 mtx_assert(&keg->uk_lock, MA_OWNED); 2184 slab = NULL; 2185 2186 for (;;) { 2187 /* 2188 * Find a slab with some space. Prefer slabs that are partially 2189 * used over those that are totally full. This helps to reduce 2190 * fragmentation. 2191 */ 2192 if (keg->uk_free != 0) { 2193 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2194 slab = LIST_FIRST(&keg->uk_part_slab); 2195 } else { 2196 slab = LIST_FIRST(&keg->uk_free_slab); 2197 LIST_REMOVE(slab, us_link); 2198 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2199 us_link); 2200 } 2201 MPASS(slab->us_keg == keg); 2202 return (slab); 2203 } 2204 2205 /* 2206 * M_NOVM means don't ask at all! 2207 */ 2208 if (flags & M_NOVM) 2209 break; 2210 2211 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2212 keg->uk_flags |= UMA_ZFLAG_FULL; 2213 /* 2214 * If this is not a multi-zone, set the FULL bit. 2215 * Otherwise slab_multi() takes care of it. 2216 */ 2217 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2218 zone->uz_flags |= UMA_ZFLAG_FULL; 2219 zone_log_warning(zone); 2220 } 2221 if (flags & M_NOWAIT) 2222 break; 2223 zone->uz_sleeps++; 2224 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2225 continue; 2226 } 2227 keg->uk_recurse++; 2228 slab = keg_alloc_slab(keg, zone, flags); 2229 keg->uk_recurse--; 2230 /* 2231 * If we got a slab here it's safe to mark it partially used 2232 * and return. We assume that the caller is going to remove 2233 * at least one item. 2234 */ 2235 if (slab) { 2236 MPASS(slab->us_keg == keg); 2237 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2238 return (slab); 2239 } 2240 /* 2241 * We might not have been able to get a slab but another cpu 2242 * could have while we were unlocked. Check again before we 2243 * fail. 2244 */ 2245 flags |= M_NOVM; 2246 } 2247 return (slab); 2248 } 2249 2250 static inline void 2251 zone_relock(uma_zone_t zone, uma_keg_t keg) 2252 { 2253 if (zone->uz_lock != &keg->uk_lock) { 2254 KEG_UNLOCK(keg); 2255 ZONE_LOCK(zone); 2256 } 2257 } 2258 2259 static inline void 2260 keg_relock(uma_keg_t keg, uma_zone_t zone) 2261 { 2262 if (zone->uz_lock != &keg->uk_lock) { 2263 ZONE_UNLOCK(zone); 2264 KEG_LOCK(keg); 2265 } 2266 } 2267 2268 static uma_slab_t 2269 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2270 { 2271 uma_slab_t slab; 2272 2273 if (keg == NULL) 2274 keg = zone_first_keg(zone); 2275 /* 2276 * This is to prevent us from recursively trying to allocate 2277 * buckets. The problem is that if an allocation forces us to 2278 * grab a new bucket we will call page_alloc, which will go off 2279 * and cause the vm to allocate vm_map_entries. If we need new 2280 * buckets there too we will recurse in kmem_alloc and bad 2281 * things happen. So instead we return a NULL bucket, and make 2282 * the code that allocates buckets smart enough to deal with it 2283 */ 2284 if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0) 2285 return (NULL); 2286 2287 for (;;) { 2288 slab = keg_fetch_slab(keg, zone, flags); 2289 if (slab) 2290 return (slab); 2291 if (flags & (M_NOWAIT | M_NOVM)) 2292 break; 2293 } 2294 return (NULL); 2295 } 2296 2297 /* 2298 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2299 * with the keg locked. Caller must call zone_relock() afterwards if the 2300 * zone lock is required. On NULL the zone lock is held. 2301 * 2302 * The last pointer is used to seed the search. It is not required. 2303 */ 2304 static uma_slab_t 2305 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2306 { 2307 uma_klink_t klink; 2308 uma_slab_t slab; 2309 uma_keg_t keg; 2310 int flags; 2311 int empty; 2312 int full; 2313 2314 /* 2315 * Don't wait on the first pass. This will skip limit tests 2316 * as well. We don't want to block if we can find a provider 2317 * without blocking. 2318 */ 2319 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2320 /* 2321 * Use the last slab allocated as a hint for where to start 2322 * the search. 2323 */ 2324 if (last) { 2325 slab = keg_fetch_slab(last, zone, flags); 2326 if (slab) 2327 return (slab); 2328 zone_relock(zone, last); 2329 last = NULL; 2330 } 2331 /* 2332 * Loop until we have a slab incase of transient failures 2333 * while M_WAITOK is specified. I'm not sure this is 100% 2334 * required but we've done it for so long now. 2335 */ 2336 for (;;) { 2337 empty = 0; 2338 full = 0; 2339 /* 2340 * Search the available kegs for slabs. Be careful to hold the 2341 * correct lock while calling into the keg layer. 2342 */ 2343 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2344 keg = klink->kl_keg; 2345 keg_relock(keg, zone); 2346 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2347 slab = keg_fetch_slab(keg, zone, flags); 2348 if (slab) 2349 return (slab); 2350 } 2351 if (keg->uk_flags & UMA_ZFLAG_FULL) 2352 full++; 2353 else 2354 empty++; 2355 zone_relock(zone, keg); 2356 } 2357 if (rflags & (M_NOWAIT | M_NOVM)) 2358 break; 2359 flags = rflags; 2360 /* 2361 * All kegs are full. XXX We can't atomically check all kegs 2362 * and sleep so just sleep for a short period and retry. 2363 */ 2364 if (full && !empty) { 2365 zone->uz_flags |= UMA_ZFLAG_FULL; 2366 zone->uz_sleeps++; 2367 zone_log_warning(zone); 2368 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100); 2369 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2370 continue; 2371 } 2372 } 2373 return (NULL); 2374 } 2375 2376 static void * 2377 slab_alloc_item(uma_zone_t zone, uma_slab_t slab) 2378 { 2379 uma_keg_t keg; 2380 uma_slabrefcnt_t slabref; 2381 void *item; 2382 u_int8_t freei; 2383 2384 keg = slab->us_keg; 2385 mtx_assert(&keg->uk_lock, MA_OWNED); 2386 2387 freei = slab->us_firstfree; 2388 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2389 slabref = (uma_slabrefcnt_t)slab; 2390 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2391 } else { 2392 slab->us_firstfree = slab->us_freelist[freei].us_item; 2393 } 2394 item = slab->us_data + (keg->uk_rsize * freei); 2395 2396 slab->us_freecount--; 2397 keg->uk_free--; 2398 #ifdef INVARIANTS 2399 uma_dbg_alloc(zone, slab, item); 2400 #endif 2401 /* Move this slab to the full list */ 2402 if (slab->us_freecount == 0) { 2403 LIST_REMOVE(slab, us_link); 2404 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2405 } 2406 2407 return (item); 2408 } 2409 2410 static int 2411 zone_alloc_bucket(uma_zone_t zone, int flags) 2412 { 2413 uma_bucket_t bucket; 2414 uma_slab_t slab; 2415 uma_keg_t keg; 2416 int16_t saved; 2417 int max, origflags = flags; 2418 2419 /* 2420 * Try this zone's free list first so we don't allocate extra buckets. 2421 */ 2422 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2423 KASSERT(bucket->ub_cnt == 0, 2424 ("zone_alloc_bucket: Bucket on free list is not empty.")); 2425 LIST_REMOVE(bucket, ub_link); 2426 } else { 2427 int bflags; 2428 2429 bflags = (flags & ~M_ZERO); 2430 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) 2431 bflags |= M_NOVM; 2432 2433 ZONE_UNLOCK(zone); 2434 bucket = bucket_alloc(zone->uz_count, bflags); 2435 ZONE_LOCK(zone); 2436 } 2437 2438 if (bucket == NULL) { 2439 return (0); 2440 } 2441 2442 #ifdef SMP 2443 /* 2444 * This code is here to limit the number of simultaneous bucket fills 2445 * for any given zone to the number of per cpu caches in this zone. This 2446 * is done so that we don't allocate more memory than we really need. 2447 */ 2448 if (zone->uz_fills >= mp_ncpus) 2449 goto done; 2450 2451 #endif 2452 zone->uz_fills++; 2453 2454 max = MIN(bucket->ub_entries, zone->uz_count); 2455 /* Try to keep the buckets totally full */ 2456 saved = bucket->ub_cnt; 2457 slab = NULL; 2458 keg = NULL; 2459 while (bucket->ub_cnt < max && 2460 (slab = zone->uz_slab(zone, keg, flags)) != NULL) { 2461 keg = slab->us_keg; 2462 while (slab->us_freecount && bucket->ub_cnt < max) { 2463 bucket->ub_bucket[bucket->ub_cnt++] = 2464 slab_alloc_item(zone, slab); 2465 } 2466 2467 /* Don't block on the next fill */ 2468 flags |= M_NOWAIT; 2469 } 2470 if (slab) 2471 zone_relock(zone, keg); 2472 2473 /* 2474 * We unlock here because we need to call the zone's init. 2475 * It should be safe to unlock because the slab dealt with 2476 * above is already on the appropriate list within the keg 2477 * and the bucket we filled is not yet on any list, so we 2478 * own it. 2479 */ 2480 if (zone->uz_init != NULL) { 2481 int i; 2482 2483 ZONE_UNLOCK(zone); 2484 for (i = saved; i < bucket->ub_cnt; i++) 2485 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2486 origflags) != 0) 2487 break; 2488 /* 2489 * If we couldn't initialize the whole bucket, put the 2490 * rest back onto the freelist. 2491 */ 2492 if (i != bucket->ub_cnt) { 2493 int j; 2494 2495 for (j = i; j < bucket->ub_cnt; j++) { 2496 zone_free_item(zone, bucket->ub_bucket[j], 2497 NULL, SKIP_FINI, 0); 2498 #ifdef INVARIANTS 2499 bucket->ub_bucket[j] = NULL; 2500 #endif 2501 } 2502 bucket->ub_cnt = i; 2503 } 2504 ZONE_LOCK(zone); 2505 } 2506 2507 zone->uz_fills--; 2508 if (bucket->ub_cnt != 0) { 2509 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2510 bucket, ub_link); 2511 return (1); 2512 } 2513 #ifdef SMP 2514 done: 2515 #endif 2516 bucket_free(bucket); 2517 2518 return (0); 2519 } 2520 /* 2521 * Allocates an item for an internal zone 2522 * 2523 * Arguments 2524 * zone The zone to alloc for. 2525 * udata The data to be passed to the constructor. 2526 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2527 * 2528 * Returns 2529 * NULL if there is no memory and M_NOWAIT is set 2530 * An item if successful 2531 */ 2532 2533 static void * 2534 zone_alloc_item(uma_zone_t zone, void *udata, int flags) 2535 { 2536 uma_slab_t slab; 2537 void *item; 2538 2539 item = NULL; 2540 2541 #ifdef UMA_DEBUG_ALLOC 2542 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2543 #endif 2544 ZONE_LOCK(zone); 2545 2546 slab = zone->uz_slab(zone, NULL, flags); 2547 if (slab == NULL) { 2548 zone->uz_fails++; 2549 ZONE_UNLOCK(zone); 2550 return (NULL); 2551 } 2552 2553 item = slab_alloc_item(zone, slab); 2554 2555 zone_relock(zone, slab->us_keg); 2556 zone->uz_allocs++; 2557 ZONE_UNLOCK(zone); 2558 2559 /* 2560 * We have to call both the zone's init (not the keg's init) 2561 * and the zone's ctor. This is because the item is going from 2562 * a keg slab directly to the user, and the user is expecting it 2563 * to be both zone-init'd as well as zone-ctor'd. 2564 */ 2565 if (zone->uz_init != NULL) { 2566 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2567 zone_free_item(zone, item, udata, SKIP_FINI, 2568 ZFREE_STATFAIL | ZFREE_STATFREE); 2569 return (NULL); 2570 } 2571 } 2572 if (zone->uz_ctor != NULL) { 2573 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2574 zone_free_item(zone, item, udata, SKIP_DTOR, 2575 ZFREE_STATFAIL | ZFREE_STATFREE); 2576 return (NULL); 2577 } 2578 } 2579 if (flags & M_ZERO) 2580 bzero(item, zone->uz_size); 2581 2582 return (item); 2583 } 2584 2585 /* See uma.h */ 2586 void 2587 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2588 { 2589 uma_cache_t cache; 2590 uma_bucket_t bucket; 2591 int bflags; 2592 int cpu; 2593 2594 #ifdef UMA_DEBUG_ALLOC_1 2595 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2596 #endif 2597 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2598 zone->uz_name); 2599 2600 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2601 if (item == NULL) 2602 return; 2603 #ifdef DEBUG_MEMGUARD 2604 if (is_memguard_addr(item)) { 2605 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 2606 zone->uz_dtor(item, zone->uz_size, udata); 2607 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 2608 zone->uz_fini(item, zone->uz_size); 2609 memguard_free(item); 2610 return; 2611 } 2612 #endif 2613 if (zone->uz_dtor) 2614 zone->uz_dtor(item, zone->uz_size, udata); 2615 2616 #ifdef INVARIANTS 2617 ZONE_LOCK(zone); 2618 if (zone->uz_flags & UMA_ZONE_MALLOC) 2619 uma_dbg_free(zone, udata, item); 2620 else 2621 uma_dbg_free(zone, NULL, item); 2622 ZONE_UNLOCK(zone); 2623 #endif 2624 /* 2625 * The race here is acceptable. If we miss it we'll just have to wait 2626 * a little longer for the limits to be reset. 2627 */ 2628 if (zone->uz_flags & UMA_ZFLAG_FULL) 2629 goto zfree_internal; 2630 2631 /* 2632 * If possible, free to the per-CPU cache. There are two 2633 * requirements for safe access to the per-CPU cache: (1) the thread 2634 * accessing the cache must not be preempted or yield during access, 2635 * and (2) the thread must not migrate CPUs without switching which 2636 * cache it accesses. We rely on a critical section to prevent 2637 * preemption and migration. We release the critical section in 2638 * order to acquire the zone mutex if we are unable to free to the 2639 * current cache; when we re-acquire the critical section, we must 2640 * detect and handle migration if it has occurred. 2641 */ 2642 zfree_restart: 2643 critical_enter(); 2644 cpu = curcpu; 2645 cache = &zone->uz_cpu[cpu]; 2646 2647 zfree_start: 2648 bucket = cache->uc_freebucket; 2649 2650 if (bucket) { 2651 /* 2652 * Do we have room in our bucket? It is OK for this uz count 2653 * check to be slightly out of sync. 2654 */ 2655 2656 if (bucket->ub_cnt < bucket->ub_entries) { 2657 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2658 ("uma_zfree: Freeing to non free bucket index.")); 2659 bucket->ub_bucket[bucket->ub_cnt] = item; 2660 bucket->ub_cnt++; 2661 cache->uc_frees++; 2662 critical_exit(); 2663 return; 2664 } else if (cache->uc_allocbucket) { 2665 #ifdef UMA_DEBUG_ALLOC 2666 printf("uma_zfree: Swapping buckets.\n"); 2667 #endif 2668 /* 2669 * We have run out of space in our freebucket. 2670 * See if we can switch with our alloc bucket. 2671 */ 2672 if (cache->uc_allocbucket->ub_cnt < 2673 cache->uc_freebucket->ub_cnt) { 2674 bucket = cache->uc_freebucket; 2675 cache->uc_freebucket = cache->uc_allocbucket; 2676 cache->uc_allocbucket = bucket; 2677 goto zfree_start; 2678 } 2679 } 2680 } 2681 /* 2682 * We can get here for two reasons: 2683 * 2684 * 1) The buckets are NULL 2685 * 2) The alloc and free buckets are both somewhat full. 2686 * 2687 * We must go back the zone, which requires acquiring the zone lock, 2688 * which in turn means we must release and re-acquire the critical 2689 * section. Since the critical section is released, we may be 2690 * preempted or migrate. As such, make sure not to maintain any 2691 * thread-local state specific to the cache from prior to releasing 2692 * the critical section. 2693 */ 2694 critical_exit(); 2695 ZONE_LOCK(zone); 2696 critical_enter(); 2697 cpu = curcpu; 2698 cache = &zone->uz_cpu[cpu]; 2699 if (cache->uc_freebucket != NULL) { 2700 if (cache->uc_freebucket->ub_cnt < 2701 cache->uc_freebucket->ub_entries) { 2702 ZONE_UNLOCK(zone); 2703 goto zfree_start; 2704 } 2705 if (cache->uc_allocbucket != NULL && 2706 (cache->uc_allocbucket->ub_cnt < 2707 cache->uc_freebucket->ub_cnt)) { 2708 ZONE_UNLOCK(zone); 2709 goto zfree_start; 2710 } 2711 } 2712 2713 /* Since we have locked the zone we may as well send back our stats */ 2714 zone->uz_allocs += cache->uc_allocs; 2715 cache->uc_allocs = 0; 2716 zone->uz_frees += cache->uc_frees; 2717 cache->uc_frees = 0; 2718 2719 bucket = cache->uc_freebucket; 2720 cache->uc_freebucket = NULL; 2721 2722 /* Can we throw this on the zone full list? */ 2723 if (bucket != NULL) { 2724 #ifdef UMA_DEBUG_ALLOC 2725 printf("uma_zfree: Putting old bucket on the free list.\n"); 2726 #endif 2727 /* ub_cnt is pointing to the last free item */ 2728 KASSERT(bucket->ub_cnt != 0, 2729 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2730 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2731 bucket, ub_link); 2732 } 2733 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2734 LIST_REMOVE(bucket, ub_link); 2735 ZONE_UNLOCK(zone); 2736 cache->uc_freebucket = bucket; 2737 goto zfree_start; 2738 } 2739 /* We are no longer associated with this CPU. */ 2740 critical_exit(); 2741 2742 /* And the zone.. */ 2743 ZONE_UNLOCK(zone); 2744 2745 #ifdef UMA_DEBUG_ALLOC 2746 printf("uma_zfree: Allocating new free bucket.\n"); 2747 #endif 2748 bflags = M_NOWAIT; 2749 2750 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) 2751 bflags |= M_NOVM; 2752 bucket = bucket_alloc(zone->uz_count, bflags); 2753 if (bucket) { 2754 ZONE_LOCK(zone); 2755 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2756 bucket, ub_link); 2757 ZONE_UNLOCK(zone); 2758 goto zfree_restart; 2759 } 2760 2761 /* 2762 * If nothing else caught this, we'll just do an internal free. 2763 */ 2764 zfree_internal: 2765 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE); 2766 2767 return; 2768 } 2769 2770 /* 2771 * Frees an item to an INTERNAL zone or allocates a free bucket 2772 * 2773 * Arguments: 2774 * zone The zone to free to 2775 * item The item we're freeing 2776 * udata User supplied data for the dtor 2777 * skip Skip dtors and finis 2778 */ 2779 static void 2780 zone_free_item(uma_zone_t zone, void *item, void *udata, 2781 enum zfreeskip skip, int flags) 2782 { 2783 uma_slab_t slab; 2784 uma_slabrefcnt_t slabref; 2785 uma_keg_t keg; 2786 u_int8_t *mem; 2787 u_int8_t freei; 2788 int clearfull; 2789 2790 if (skip < SKIP_DTOR && zone->uz_dtor) 2791 zone->uz_dtor(item, zone->uz_size, udata); 2792 2793 if (skip < SKIP_FINI && zone->uz_fini) 2794 zone->uz_fini(item, zone->uz_size); 2795 2796 ZONE_LOCK(zone); 2797 2798 if (flags & ZFREE_STATFAIL) 2799 zone->uz_fails++; 2800 if (flags & ZFREE_STATFREE) 2801 zone->uz_frees++; 2802 2803 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2804 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2805 keg = zone_first_keg(zone); /* Must only be one. */ 2806 if (zone->uz_flags & UMA_ZONE_HASH) { 2807 slab = hash_sfind(&keg->uk_hash, mem); 2808 } else { 2809 mem += keg->uk_pgoff; 2810 slab = (uma_slab_t)mem; 2811 } 2812 } else { 2813 /* This prevents redundant lookups via free(). */ 2814 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL) 2815 slab = (uma_slab_t)udata; 2816 else 2817 slab = vtoslab((vm_offset_t)item); 2818 keg = slab->us_keg; 2819 keg_relock(keg, zone); 2820 } 2821 MPASS(keg == slab->us_keg); 2822 2823 /* Do we need to remove from any lists? */ 2824 if (slab->us_freecount+1 == keg->uk_ipers) { 2825 LIST_REMOVE(slab, us_link); 2826 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2827 } else if (slab->us_freecount == 0) { 2828 LIST_REMOVE(slab, us_link); 2829 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2830 } 2831 2832 /* Slab management stuff */ 2833 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2834 / keg->uk_rsize; 2835 2836 #ifdef INVARIANTS 2837 if (!skip) 2838 uma_dbg_free(zone, slab, item); 2839 #endif 2840 2841 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2842 slabref = (uma_slabrefcnt_t)slab; 2843 slabref->us_freelist[freei].us_item = slab->us_firstfree; 2844 } else { 2845 slab->us_freelist[freei].us_item = slab->us_firstfree; 2846 } 2847 slab->us_firstfree = freei; 2848 slab->us_freecount++; 2849 2850 /* Zone statistics */ 2851 keg->uk_free++; 2852 2853 clearfull = 0; 2854 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2855 if (keg->uk_pages < keg->uk_maxpages) { 2856 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2857 clearfull = 1; 2858 } 2859 2860 /* 2861 * We can handle one more allocation. Since we're clearing ZFLAG_FULL, 2862 * wake up all procs blocked on pages. This should be uncommon, so 2863 * keeping this simple for now (rather than adding count of blocked 2864 * threads etc). 2865 */ 2866 wakeup(keg); 2867 } 2868 if (clearfull) { 2869 zone_relock(zone, keg); 2870 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2871 wakeup(zone); 2872 ZONE_UNLOCK(zone); 2873 } else 2874 KEG_UNLOCK(keg); 2875 } 2876 2877 /* See uma.h */ 2878 int 2879 uma_zone_set_max(uma_zone_t zone, int nitems) 2880 { 2881 uma_keg_t keg; 2882 2883 ZONE_LOCK(zone); 2884 keg = zone_first_keg(zone); 2885 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2886 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2887 keg->uk_maxpages += keg->uk_ppera; 2888 nitems = keg->uk_maxpages * keg->uk_ipers; 2889 ZONE_UNLOCK(zone); 2890 2891 return (nitems); 2892 } 2893 2894 /* See uma.h */ 2895 int 2896 uma_zone_get_max(uma_zone_t zone) 2897 { 2898 int nitems; 2899 uma_keg_t keg; 2900 2901 ZONE_LOCK(zone); 2902 keg = zone_first_keg(zone); 2903 nitems = keg->uk_maxpages * keg->uk_ipers; 2904 ZONE_UNLOCK(zone); 2905 2906 return (nitems); 2907 } 2908 2909 /* See uma.h */ 2910 void 2911 uma_zone_set_warning(uma_zone_t zone, const char *warning) 2912 { 2913 2914 ZONE_LOCK(zone); 2915 zone->uz_warning = warning; 2916 ZONE_UNLOCK(zone); 2917 } 2918 2919 /* See uma.h */ 2920 int 2921 uma_zone_get_cur(uma_zone_t zone) 2922 { 2923 int64_t nitems; 2924 u_int i; 2925 2926 ZONE_LOCK(zone); 2927 nitems = zone->uz_allocs - zone->uz_frees; 2928 CPU_FOREACH(i) { 2929 /* 2930 * See the comment in sysctl_vm_zone_stats() regarding the 2931 * safety of accessing the per-cpu caches. With the zone lock 2932 * held, it is safe, but can potentially result in stale data. 2933 */ 2934 nitems += zone->uz_cpu[i].uc_allocs - 2935 zone->uz_cpu[i].uc_frees; 2936 } 2937 ZONE_UNLOCK(zone); 2938 2939 return (nitems < 0 ? 0 : nitems); 2940 } 2941 2942 /* See uma.h */ 2943 void 2944 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2945 { 2946 uma_keg_t keg; 2947 2948 ZONE_LOCK(zone); 2949 keg = zone_first_keg(zone); 2950 KASSERT(keg->uk_pages == 0, 2951 ("uma_zone_set_init on non-empty keg")); 2952 keg->uk_init = uminit; 2953 ZONE_UNLOCK(zone); 2954 } 2955 2956 /* See uma.h */ 2957 void 2958 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2959 { 2960 uma_keg_t keg; 2961 2962 ZONE_LOCK(zone); 2963 keg = zone_first_keg(zone); 2964 KASSERT(keg->uk_pages == 0, 2965 ("uma_zone_set_fini on non-empty keg")); 2966 keg->uk_fini = fini; 2967 ZONE_UNLOCK(zone); 2968 } 2969 2970 /* See uma.h */ 2971 void 2972 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2973 { 2974 ZONE_LOCK(zone); 2975 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2976 ("uma_zone_set_zinit on non-empty keg")); 2977 zone->uz_init = zinit; 2978 ZONE_UNLOCK(zone); 2979 } 2980 2981 /* See uma.h */ 2982 void 2983 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2984 { 2985 ZONE_LOCK(zone); 2986 KASSERT(zone_first_keg(zone)->uk_pages == 0, 2987 ("uma_zone_set_zfini on non-empty keg")); 2988 zone->uz_fini = zfini; 2989 ZONE_UNLOCK(zone); 2990 } 2991 2992 /* See uma.h */ 2993 /* XXX uk_freef is not actually used with the zone locked */ 2994 void 2995 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2996 { 2997 2998 ZONE_LOCK(zone); 2999 zone_first_keg(zone)->uk_freef = freef; 3000 ZONE_UNLOCK(zone); 3001 } 3002 3003 /* See uma.h */ 3004 /* XXX uk_allocf is not actually used with the zone locked */ 3005 void 3006 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 3007 { 3008 uma_keg_t keg; 3009 3010 ZONE_LOCK(zone); 3011 keg = zone_first_keg(zone); 3012 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 3013 keg->uk_allocf = allocf; 3014 ZONE_UNLOCK(zone); 3015 } 3016 3017 /* See uma.h */ 3018 int 3019 uma_zone_reserve_kva(uma_zone_t zone, int count) 3020 { 3021 uma_keg_t keg; 3022 vm_offset_t kva; 3023 int pages; 3024 3025 keg = zone_first_keg(zone); 3026 pages = count / keg->uk_ipers; 3027 3028 if (pages * keg->uk_ipers < count) 3029 pages++; 3030 3031 #ifdef UMA_MD_SMALL_ALLOC 3032 if (keg->uk_ppera > 1) { 3033 #else 3034 if (1) { 3035 #endif 3036 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 3037 if (kva == 0) 3038 return (0); 3039 } else 3040 kva = 0; 3041 ZONE_LOCK(zone); 3042 keg->uk_kva = kva; 3043 keg->uk_offset = 0; 3044 keg->uk_maxpages = pages; 3045 #ifdef UMA_MD_SMALL_ALLOC 3046 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3047 #else 3048 keg->uk_allocf = noobj_alloc; 3049 #endif 3050 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 3051 ZONE_UNLOCK(zone); 3052 return (1); 3053 } 3054 3055 /* See uma.h */ 3056 void 3057 uma_prealloc(uma_zone_t zone, int items) 3058 { 3059 int slabs; 3060 uma_slab_t slab; 3061 uma_keg_t keg; 3062 3063 keg = zone_first_keg(zone); 3064 ZONE_LOCK(zone); 3065 slabs = items / keg->uk_ipers; 3066 if (slabs * keg->uk_ipers < items) 3067 slabs++; 3068 while (slabs > 0) { 3069 slab = keg_alloc_slab(keg, zone, M_WAITOK); 3070 if (slab == NULL) 3071 break; 3072 MPASS(slab->us_keg == keg); 3073 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 3074 slabs--; 3075 } 3076 ZONE_UNLOCK(zone); 3077 } 3078 3079 /* See uma.h */ 3080 u_int32_t * 3081 uma_find_refcnt(uma_zone_t zone, void *item) 3082 { 3083 uma_slabrefcnt_t slabref; 3084 uma_keg_t keg; 3085 u_int32_t *refcnt; 3086 int idx; 3087 3088 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 3089 (~UMA_SLAB_MASK)); 3090 keg = slabref->us_keg; 3091 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 3092 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 3093 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 3094 / keg->uk_rsize; 3095 refcnt = &slabref->us_freelist[idx].us_refcnt; 3096 return refcnt; 3097 } 3098 3099 /* See uma.h */ 3100 void 3101 uma_reclaim(void) 3102 { 3103 #ifdef UMA_DEBUG 3104 printf("UMA: vm asked us to release pages!\n"); 3105 #endif 3106 bucket_enable(); 3107 zone_foreach(zone_drain); 3108 /* 3109 * Some slabs may have been freed but this zone will be visited early 3110 * we visit again so that we can free pages that are empty once other 3111 * zones are drained. We have to do the same for buckets. 3112 */ 3113 zone_drain(slabzone); 3114 zone_drain(slabrefzone); 3115 bucket_zone_drain(); 3116 } 3117 3118 /* See uma.h */ 3119 int 3120 uma_zone_exhausted(uma_zone_t zone) 3121 { 3122 int full; 3123 3124 ZONE_LOCK(zone); 3125 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3126 ZONE_UNLOCK(zone); 3127 return (full); 3128 } 3129 3130 int 3131 uma_zone_exhausted_nolock(uma_zone_t zone) 3132 { 3133 return (zone->uz_flags & UMA_ZFLAG_FULL); 3134 } 3135 3136 void * 3137 uma_large_malloc(int size, int wait) 3138 { 3139 void *mem; 3140 uma_slab_t slab; 3141 u_int8_t flags; 3142 3143 slab = zone_alloc_item(slabzone, NULL, wait); 3144 if (slab == NULL) 3145 return (NULL); 3146 mem = page_alloc(NULL, size, &flags, wait); 3147 if (mem) { 3148 vsetslab((vm_offset_t)mem, slab); 3149 slab->us_data = mem; 3150 slab->us_flags = flags | UMA_SLAB_MALLOC; 3151 slab->us_size = size; 3152 } else { 3153 zone_free_item(slabzone, slab, NULL, SKIP_NONE, 3154 ZFREE_STATFAIL | ZFREE_STATFREE); 3155 } 3156 3157 return (mem); 3158 } 3159 3160 void 3161 uma_large_free(uma_slab_t slab) 3162 { 3163 vsetobj((vm_offset_t)slab->us_data, kmem_object); 3164 page_free(slab->us_data, slab->us_size, slab->us_flags); 3165 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE); 3166 } 3167 3168 void 3169 uma_print_stats(void) 3170 { 3171 zone_foreach(uma_print_zone); 3172 } 3173 3174 static void 3175 slab_print(uma_slab_t slab) 3176 { 3177 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 3178 slab->us_keg, slab->us_data, slab->us_freecount, 3179 slab->us_firstfree); 3180 } 3181 3182 static void 3183 cache_print(uma_cache_t cache) 3184 { 3185 printf("alloc: %p(%d), free: %p(%d)\n", 3186 cache->uc_allocbucket, 3187 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3188 cache->uc_freebucket, 3189 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3190 } 3191 3192 static void 3193 uma_print_keg(uma_keg_t keg) 3194 { 3195 uma_slab_t slab; 3196 3197 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3198 "out %d free %d limit %d\n", 3199 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3200 keg->uk_ipers, keg->uk_ppera, 3201 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3202 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3203 printf("Part slabs:\n"); 3204 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3205 slab_print(slab); 3206 printf("Free slabs:\n"); 3207 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3208 slab_print(slab); 3209 printf("Full slabs:\n"); 3210 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3211 slab_print(slab); 3212 } 3213 3214 void 3215 uma_print_zone(uma_zone_t zone) 3216 { 3217 uma_cache_t cache; 3218 uma_klink_t kl; 3219 int i; 3220 3221 printf("zone: %s(%p) size %d flags %#x\n", 3222 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3223 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3224 uma_print_keg(kl->kl_keg); 3225 CPU_FOREACH(i) { 3226 cache = &zone->uz_cpu[i]; 3227 printf("CPU %d Cache:\n", i); 3228 cache_print(cache); 3229 } 3230 } 3231 3232 #ifdef DDB 3233 /* 3234 * Generate statistics across both the zone and its per-cpu cache's. Return 3235 * desired statistics if the pointer is non-NULL for that statistic. 3236 * 3237 * Note: does not update the zone statistics, as it can't safely clear the 3238 * per-CPU cache statistic. 3239 * 3240 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3241 * safe from off-CPU; we should modify the caches to track this information 3242 * directly so that we don't have to. 3243 */ 3244 static void 3245 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp, 3246 u_int64_t *freesp, u_int64_t *sleepsp) 3247 { 3248 uma_cache_t cache; 3249 u_int64_t allocs, frees, sleeps; 3250 int cachefree, cpu; 3251 3252 allocs = frees = sleeps = 0; 3253 cachefree = 0; 3254 CPU_FOREACH(cpu) { 3255 cache = &z->uz_cpu[cpu]; 3256 if (cache->uc_allocbucket != NULL) 3257 cachefree += cache->uc_allocbucket->ub_cnt; 3258 if (cache->uc_freebucket != NULL) 3259 cachefree += cache->uc_freebucket->ub_cnt; 3260 allocs += cache->uc_allocs; 3261 frees += cache->uc_frees; 3262 } 3263 allocs += z->uz_allocs; 3264 frees += z->uz_frees; 3265 sleeps += z->uz_sleeps; 3266 if (cachefreep != NULL) 3267 *cachefreep = cachefree; 3268 if (allocsp != NULL) 3269 *allocsp = allocs; 3270 if (freesp != NULL) 3271 *freesp = frees; 3272 if (sleepsp != NULL) 3273 *sleepsp = sleeps; 3274 } 3275 #endif /* DDB */ 3276 3277 static int 3278 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3279 { 3280 uma_keg_t kz; 3281 uma_zone_t z; 3282 int count; 3283 3284 count = 0; 3285 mtx_lock(&uma_mtx); 3286 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3287 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3288 count++; 3289 } 3290 mtx_unlock(&uma_mtx); 3291 return (sysctl_handle_int(oidp, &count, 0, req)); 3292 } 3293 3294 static int 3295 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3296 { 3297 struct uma_stream_header ush; 3298 struct uma_type_header uth; 3299 struct uma_percpu_stat ups; 3300 uma_bucket_t bucket; 3301 struct sbuf sbuf; 3302 uma_cache_t cache; 3303 uma_klink_t kl; 3304 uma_keg_t kz; 3305 uma_zone_t z; 3306 uma_keg_t k; 3307 int count, error, i; 3308 3309 error = sysctl_wire_old_buffer(req, 0); 3310 if (error != 0) 3311 return (error); 3312 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3313 3314 count = 0; 3315 mtx_lock(&uma_mtx); 3316 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3317 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3318 count++; 3319 } 3320 3321 /* 3322 * Insert stream header. 3323 */ 3324 bzero(&ush, sizeof(ush)); 3325 ush.ush_version = UMA_STREAM_VERSION; 3326 ush.ush_maxcpus = (mp_maxid + 1); 3327 ush.ush_count = count; 3328 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3329 3330 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3331 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3332 bzero(&uth, sizeof(uth)); 3333 ZONE_LOCK(z); 3334 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3335 uth.uth_align = kz->uk_align; 3336 uth.uth_size = kz->uk_size; 3337 uth.uth_rsize = kz->uk_rsize; 3338 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3339 k = kl->kl_keg; 3340 uth.uth_maxpages += k->uk_maxpages; 3341 uth.uth_pages += k->uk_pages; 3342 uth.uth_keg_free += k->uk_free; 3343 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3344 * k->uk_ipers; 3345 } 3346 3347 /* 3348 * A zone is secondary is it is not the first entry 3349 * on the keg's zone list. 3350 */ 3351 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3352 (LIST_FIRST(&kz->uk_zones) != z)) 3353 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3354 3355 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) 3356 uth.uth_zone_free += bucket->ub_cnt; 3357 uth.uth_allocs = z->uz_allocs; 3358 uth.uth_frees = z->uz_frees; 3359 uth.uth_fails = z->uz_fails; 3360 uth.uth_sleeps = z->uz_sleeps; 3361 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3362 /* 3363 * While it is not normally safe to access the cache 3364 * bucket pointers while not on the CPU that owns the 3365 * cache, we only allow the pointers to be exchanged 3366 * without the zone lock held, not invalidated, so 3367 * accept the possible race associated with bucket 3368 * exchange during monitoring. 3369 */ 3370 for (i = 0; i < (mp_maxid + 1); i++) { 3371 bzero(&ups, sizeof(ups)); 3372 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3373 goto skip; 3374 if (CPU_ABSENT(i)) 3375 goto skip; 3376 cache = &z->uz_cpu[i]; 3377 if (cache->uc_allocbucket != NULL) 3378 ups.ups_cache_free += 3379 cache->uc_allocbucket->ub_cnt; 3380 if (cache->uc_freebucket != NULL) 3381 ups.ups_cache_free += 3382 cache->uc_freebucket->ub_cnt; 3383 ups.ups_allocs = cache->uc_allocs; 3384 ups.ups_frees = cache->uc_frees; 3385 skip: 3386 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3387 } 3388 ZONE_UNLOCK(z); 3389 } 3390 } 3391 mtx_unlock(&uma_mtx); 3392 error = sbuf_finish(&sbuf); 3393 sbuf_delete(&sbuf); 3394 return (error); 3395 } 3396 3397 #ifdef DDB 3398 DB_SHOW_COMMAND(uma, db_show_uma) 3399 { 3400 u_int64_t allocs, frees, sleeps; 3401 uma_bucket_t bucket; 3402 uma_keg_t kz; 3403 uma_zone_t z; 3404 int cachefree; 3405 3406 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3407 "Requests", "Sleeps"); 3408 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3409 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3410 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3411 allocs = z->uz_allocs; 3412 frees = z->uz_frees; 3413 sleeps = z->uz_sleeps; 3414 cachefree = 0; 3415 } else 3416 uma_zone_sumstat(z, &cachefree, &allocs, 3417 &frees, &sleeps); 3418 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3419 (LIST_FIRST(&kz->uk_zones) != z))) 3420 cachefree += kz->uk_free; 3421 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) 3422 cachefree += bucket->ub_cnt; 3423 db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name, 3424 (uintmax_t)kz->uk_size, 3425 (intmax_t)(allocs - frees), cachefree, 3426 (uintmax_t)allocs, sleeps); 3427 if (db_pager_quit) 3428 return; 3429 } 3430 } 3431 } 3432 #endif 3433