1 /* 2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * uma_core.c Implementation of the Universal Memory allocator 29 * 30 * This allocator is intended to replace the multitude of similar object caches 31 * in the standard FreeBSD kernel. The intent is to be flexible as well as 32 * effecient. A primary design goal is to return unused memory to the rest of 33 * the system. This will make the system as a whole more flexible due to the 34 * ability to move memory to subsystems which most need it instead of leaving 35 * pools of reserved memory unused. 36 * 37 * The basic ideas stem from similar slab/zone based allocators whose algorithms 38 * are well known. 39 * 40 */ 41 42 /* 43 * TODO: 44 * - Improve memory usage for large allocations 45 * - Investigate cache size adjustments 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 /* I should really use ktr.. */ 52 /* 53 #define UMA_DEBUG 1 54 #define UMA_DEBUG_ALLOC 1 55 #define UMA_DEBUG_ALLOC_1 1 56 */ 57 58 #include "opt_param.h" 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/kernel.h> 62 #include <sys/types.h> 63 #include <sys/queue.h> 64 #include <sys/malloc.h> 65 #include <sys/ktr.h> 66 #include <sys/lock.h> 67 #include <sys/sysctl.h> 68 #include <sys/mutex.h> 69 #include <sys/proc.h> 70 #include <sys/smp.h> 71 #include <sys/vmmeter.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_param.h> 77 #include <vm/vm_map.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/uma.h> 81 #include <vm/uma_int.h> 82 #include <vm/uma_dbg.h> 83 84 #include <machine/vmparam.h> 85 86 /* 87 * This is the zone and keg from which all zones are spawned. The idea is that 88 * even the zone & keg heads are allocated from the allocator, so we use the 89 * bss section to bootstrap us. 90 */ 91 static struct uma_keg masterkeg; 92 static struct uma_zone masterzone_k; 93 static struct uma_zone masterzone_z; 94 static uma_zone_t kegs = &masterzone_k; 95 static uma_zone_t zones = &masterzone_z; 96 97 /* This is the zone from which all of uma_slab_t's are allocated. */ 98 static uma_zone_t slabzone; 99 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 100 101 /* 102 * The initial hash tables come out of this zone so they can be allocated 103 * prior to malloc coming up. 104 */ 105 static uma_zone_t hashzone; 106 107 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 108 109 /* 110 * Are we allowed to allocate buckets? 111 */ 112 static int bucketdisable = 1; 113 114 /* Linked list of all kegs in the system */ 115 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs); 116 117 /* This mutex protects the keg list */ 118 static struct mtx uma_mtx; 119 120 /* These are the pcpu cache locks */ 121 static struct mtx uma_pcpu_mtx[MAXCPU]; 122 123 /* Linked list of boot time pages */ 124 static LIST_HEAD(,uma_slab) uma_boot_pages = 125 LIST_HEAD_INITIALIZER(&uma_boot_pages); 126 127 /* Count of free boottime pages */ 128 static int uma_boot_free = 0; 129 130 /* Is the VM done starting up? */ 131 static int booted = 0; 132 133 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 134 static u_int uma_max_ipers; 135 static u_int uma_max_ipers_ref; 136 137 /* 138 * This is the handle used to schedule events that need to happen 139 * outside of the allocation fast path. 140 */ 141 static struct callout uma_callout; 142 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 143 144 /* 145 * This structure is passed as the zone ctor arg so that I don't have to create 146 * a special allocation function just for zones. 147 */ 148 struct uma_zctor_args { 149 char *name; 150 size_t size; 151 uma_ctor ctor; 152 uma_dtor dtor; 153 uma_init uminit; 154 uma_fini fini; 155 uma_keg_t keg; 156 int align; 157 u_int16_t flags; 158 }; 159 160 struct uma_kctor_args { 161 uma_zone_t zone; 162 size_t size; 163 uma_init uminit; 164 uma_fini fini; 165 int align; 166 u_int16_t flags; 167 }; 168 169 struct uma_bucket_zone { 170 uma_zone_t ubz_zone; 171 char *ubz_name; 172 int ubz_entries; 173 }; 174 175 #define BUCKET_MAX 128 176 177 struct uma_bucket_zone bucket_zones[] = { 178 { NULL, "16 Bucket", 16 }, 179 { NULL, "32 Bucket", 32 }, 180 { NULL, "64 Bucket", 64 }, 181 { NULL, "128 Bucket", 128 }, 182 { NULL, NULL, 0} 183 }; 184 185 #define BUCKET_SHIFT 4 186 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) 187 188 /* 189 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket 190 * of approximately the right size. 191 */ 192 static uint8_t bucket_size[BUCKET_ZONES]; 193 194 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI }; 195 196 /* Prototypes.. */ 197 198 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 199 static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 200 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); 201 static void page_free(void *, int, u_int8_t); 202 static uma_slab_t slab_zalloc(uma_zone_t, int); 203 static void cache_drain(uma_zone_t); 204 static void bucket_drain(uma_zone_t, uma_bucket_t); 205 static void bucket_cache_drain(uma_zone_t zone); 206 static int keg_ctor(void *, int, void *, int); 207 static void keg_dtor(void *, int, void *); 208 static int zone_ctor(void *, int, void *, int); 209 static void zone_dtor(void *, int, void *); 210 static int zero_init(void *, int, int); 211 static void zone_small_init(uma_zone_t zone); 212 static void zone_large_init(uma_zone_t zone); 213 static void zone_foreach(void (*zfunc)(uma_zone_t)); 214 static void zone_timeout(uma_zone_t zone); 215 static int hash_alloc(struct uma_hash *); 216 static int hash_expand(struct uma_hash *, struct uma_hash *); 217 static void hash_free(struct uma_hash *hash); 218 static void uma_timeout(void *); 219 static void uma_startup3(void); 220 static void *uma_zalloc_internal(uma_zone_t, void *, int); 221 static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip); 222 static void bucket_enable(void); 223 static void bucket_init(void); 224 static uma_bucket_t bucket_alloc(int, int); 225 static void bucket_free(uma_bucket_t); 226 static void bucket_zone_drain(void); 227 static int uma_zalloc_bucket(uma_zone_t zone, int flags); 228 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags); 229 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab); 230 static void zone_drain(uma_zone_t); 231 static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 232 uma_fini fini, int align, u_int16_t flags); 233 234 void uma_print_zone(uma_zone_t); 235 void uma_print_stats(void); 236 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 237 238 #ifdef WITNESS 239 static int nosleepwithlocks = 1; 240 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 241 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 242 #else 243 static int nosleepwithlocks = 0; 244 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 245 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 246 #endif 247 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, 248 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 249 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 250 251 /* 252 * This routine checks to see whether or not it's safe to enable buckets. 253 */ 254 255 static void 256 bucket_enable(void) 257 { 258 if (cnt.v_free_count < cnt.v_free_min) 259 bucketdisable = 1; 260 else 261 bucketdisable = 0; 262 } 263 264 /* 265 * Initialize bucket_zones, the array of zones of buckets of various sizes. 266 * 267 * For each zone, calculate the memory required for each bucket, consisting 268 * of the header and an array of pointers. Initialize bucket_size[] to point 269 * the range of appropriate bucket sizes at the zone. 270 */ 271 static void 272 bucket_init(void) 273 { 274 struct uma_bucket_zone *ubz; 275 int i; 276 int j; 277 278 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 279 int size; 280 281 ubz = &bucket_zones[j]; 282 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 283 size += sizeof(void *) * ubz->ubz_entries; 284 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 286 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 287 bucket_size[i >> BUCKET_SHIFT] = j; 288 } 289 } 290 291 /* 292 * Given a desired number of entries for a bucket, return the zone from which 293 * to allocate the bucket. 294 */ 295 static struct uma_bucket_zone * 296 bucket_zone_lookup(int entries) 297 { 298 int idx; 299 300 idx = howmany(entries, 1 << BUCKET_SHIFT); 301 return (&bucket_zones[bucket_size[idx]]); 302 } 303 304 static uma_bucket_t 305 bucket_alloc(int entries, int bflags) 306 { 307 struct uma_bucket_zone *ubz; 308 uma_bucket_t bucket; 309 310 /* 311 * This is to stop us from allocating per cpu buckets while we're 312 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 313 * boot pages. This also prevents us from allocating buckets in 314 * low memory situations. 315 */ 316 if (bucketdisable) 317 return (NULL); 318 319 ubz = bucket_zone_lookup(entries); 320 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 321 if (bucket) { 322 #ifdef INVARIANTS 323 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 324 #endif 325 bucket->ub_cnt = 0; 326 bucket->ub_entries = ubz->ubz_entries; 327 } 328 329 return (bucket); 330 } 331 332 static void 333 bucket_free(uma_bucket_t bucket) 334 { 335 struct uma_bucket_zone *ubz; 336 337 ubz = bucket_zone_lookup(bucket->ub_entries); 338 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE); 339 } 340 341 static void 342 bucket_zone_drain(void) 343 { 344 struct uma_bucket_zone *ubz; 345 346 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 347 zone_drain(ubz->ubz_zone); 348 } 349 350 351 /* 352 * Routine called by timeout which is used to fire off some time interval 353 * based calculations. (stats, hash size, etc.) 354 * 355 * Arguments: 356 * arg Unused 357 * 358 * Returns: 359 * Nothing 360 */ 361 static void 362 uma_timeout(void *unused) 363 { 364 bucket_enable(); 365 zone_foreach(zone_timeout); 366 367 /* Reschedule this event */ 368 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 369 } 370 371 /* 372 * Routine to perform timeout driven calculations. This expands the 373 * hashes and does per cpu statistics aggregation. 374 * 375 * Arguments: 376 * zone The zone to operate on 377 * 378 * Returns: 379 * Nothing 380 */ 381 static void 382 zone_timeout(uma_zone_t zone) 383 { 384 uma_keg_t keg; 385 uma_cache_t cache; 386 u_int64_t alloc; 387 int cpu; 388 389 keg = zone->uz_keg; 390 alloc = 0; 391 392 /* 393 * Aggregate per cpu cache statistics back to the zone. 394 * 395 * XXX This should be done in the sysctl handler. 396 * 397 * I may rewrite this to set a flag in the per cpu cache instead of 398 * locking. If the flag is not cleared on the next round I will have 399 * to lock and do it here instead so that the statistics don't get too 400 * far out of sync. 401 */ 402 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) { 403 for (cpu = 0; cpu <= mp_maxid; cpu++) { 404 if (CPU_ABSENT(cpu)) 405 continue; 406 CPU_LOCK(cpu); 407 cache = &zone->uz_cpu[cpu]; 408 /* Add them up, and reset */ 409 alloc += cache->uc_allocs; 410 cache->uc_allocs = 0; 411 CPU_UNLOCK(cpu); 412 } 413 } 414 415 /* Now push these stats back into the zone.. */ 416 ZONE_LOCK(zone); 417 zone->uz_allocs += alloc; 418 419 /* 420 * Expand the zone hash table. 421 * 422 * This is done if the number of slabs is larger than the hash size. 423 * What I'm trying to do here is completely reduce collisions. This 424 * may be a little aggressive. Should I allow for two collisions max? 425 */ 426 427 if (keg->uk_flags & UMA_ZONE_HASH && 428 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 429 struct uma_hash newhash; 430 struct uma_hash oldhash; 431 int ret; 432 433 /* 434 * This is so involved because allocating and freeing 435 * while the zone lock is held will lead to deadlock. 436 * I have to do everything in stages and check for 437 * races. 438 */ 439 newhash = keg->uk_hash; 440 ZONE_UNLOCK(zone); 441 ret = hash_alloc(&newhash); 442 ZONE_LOCK(zone); 443 if (ret) { 444 if (hash_expand(&keg->uk_hash, &newhash)) { 445 oldhash = keg->uk_hash; 446 keg->uk_hash = newhash; 447 } else 448 oldhash = newhash; 449 450 ZONE_UNLOCK(zone); 451 hash_free(&oldhash); 452 ZONE_LOCK(zone); 453 } 454 } 455 ZONE_UNLOCK(zone); 456 } 457 458 /* 459 * Allocate and zero fill the next sized hash table from the appropriate 460 * backing store. 461 * 462 * Arguments: 463 * hash A new hash structure with the old hash size in uh_hashsize 464 * 465 * Returns: 466 * 1 on sucess and 0 on failure. 467 */ 468 static int 469 hash_alloc(struct uma_hash *hash) 470 { 471 int oldsize; 472 int alloc; 473 474 oldsize = hash->uh_hashsize; 475 476 /* We're just going to go to a power of two greater */ 477 if (oldsize) { 478 hash->uh_hashsize = oldsize * 2; 479 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 480 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 481 M_UMAHASH, M_NOWAIT); 482 } else { 483 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 484 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 485 M_WAITOK); 486 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 487 } 488 if (hash->uh_slab_hash) { 489 bzero(hash->uh_slab_hash, alloc); 490 hash->uh_hashmask = hash->uh_hashsize - 1; 491 return (1); 492 } 493 494 return (0); 495 } 496 497 /* 498 * Expands the hash table for HASH zones. This is done from zone_timeout 499 * to reduce collisions. This must not be done in the regular allocation 500 * path, otherwise, we can recurse on the vm while allocating pages. 501 * 502 * Arguments: 503 * oldhash The hash you want to expand 504 * newhash The hash structure for the new table 505 * 506 * Returns: 507 * Nothing 508 * 509 * Discussion: 510 */ 511 static int 512 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 513 { 514 uma_slab_t slab; 515 int hval; 516 int i; 517 518 if (!newhash->uh_slab_hash) 519 return (0); 520 521 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 522 return (0); 523 524 /* 525 * I need to investigate hash algorithms for resizing without a 526 * full rehash. 527 */ 528 529 for (i = 0; i < oldhash->uh_hashsize; i++) 530 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 531 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 532 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 533 hval = UMA_HASH(newhash, slab->us_data); 534 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 535 slab, us_hlink); 536 } 537 538 return (1); 539 } 540 541 /* 542 * Free the hash bucket to the appropriate backing store. 543 * 544 * Arguments: 545 * slab_hash The hash bucket we're freeing 546 * hashsize The number of entries in that hash bucket 547 * 548 * Returns: 549 * Nothing 550 */ 551 static void 552 hash_free(struct uma_hash *hash) 553 { 554 if (hash->uh_slab_hash == NULL) 555 return; 556 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 557 uma_zfree_internal(hashzone, 558 hash->uh_slab_hash, NULL, SKIP_NONE); 559 else 560 free(hash->uh_slab_hash, M_UMAHASH); 561 } 562 563 /* 564 * Frees all outstanding items in a bucket 565 * 566 * Arguments: 567 * zone The zone to free to, must be unlocked. 568 * bucket The free/alloc bucket with items, cpu queue must be locked. 569 * 570 * Returns: 571 * Nothing 572 */ 573 574 static void 575 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 576 { 577 uma_slab_t slab; 578 int mzone; 579 void *item; 580 581 if (bucket == NULL) 582 return; 583 584 slab = NULL; 585 mzone = 0; 586 587 /* We have to lookup the slab again for malloc.. */ 588 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC) 589 mzone = 1; 590 591 while (bucket->ub_cnt > 0) { 592 bucket->ub_cnt--; 593 item = bucket->ub_bucket[bucket->ub_cnt]; 594 #ifdef INVARIANTS 595 bucket->ub_bucket[bucket->ub_cnt] = NULL; 596 KASSERT(item != NULL, 597 ("bucket_drain: botched ptr, item is NULL")); 598 #endif 599 /* 600 * This is extremely inefficient. The slab pointer was passed 601 * to uma_zfree_arg, but we lost it because the buckets don't 602 * hold them. This will go away when free() gets a size passed 603 * to it. 604 */ 605 if (mzone) 606 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 607 uma_zfree_internal(zone, item, slab, SKIP_DTOR); 608 } 609 } 610 611 /* 612 * Drains the per cpu caches for a zone. 613 * 614 * Arguments: 615 * zone The zone to drain, must be unlocked. 616 * 617 * Returns: 618 * Nothing 619 */ 620 static void 621 cache_drain(uma_zone_t zone) 622 { 623 uma_cache_t cache; 624 int cpu; 625 626 /* 627 * We have to lock each cpu cache before locking the zone 628 */ 629 for (cpu = 0; cpu <= mp_maxid; cpu++) { 630 if (CPU_ABSENT(cpu)) 631 continue; 632 CPU_LOCK(cpu); 633 cache = &zone->uz_cpu[cpu]; 634 bucket_drain(zone, cache->uc_allocbucket); 635 bucket_drain(zone, cache->uc_freebucket); 636 if (cache->uc_allocbucket != NULL) 637 bucket_free(cache->uc_allocbucket); 638 if (cache->uc_freebucket != NULL) 639 bucket_free(cache->uc_freebucket); 640 cache->uc_allocbucket = cache->uc_freebucket = NULL; 641 } 642 ZONE_LOCK(zone); 643 bucket_cache_drain(zone); 644 ZONE_UNLOCK(zone); 645 for (cpu = 0; cpu <= mp_maxid; cpu++) { 646 if (CPU_ABSENT(cpu)) 647 continue; 648 CPU_UNLOCK(cpu); 649 } 650 } 651 652 /* 653 * Drain the cached buckets from a zone. Expects a locked zone on entry. 654 */ 655 static void 656 bucket_cache_drain(uma_zone_t zone) 657 { 658 uma_bucket_t bucket; 659 660 /* 661 * Drain the bucket queues and free the buckets, we just keep two per 662 * cpu (alloc/free). 663 */ 664 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 665 LIST_REMOVE(bucket, ub_link); 666 ZONE_UNLOCK(zone); 667 bucket_drain(zone, bucket); 668 bucket_free(bucket); 669 ZONE_LOCK(zone); 670 } 671 672 /* Now we do the free queue.. */ 673 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 674 LIST_REMOVE(bucket, ub_link); 675 bucket_free(bucket); 676 } 677 } 678 679 /* 680 * Frees pages from a zone back to the system. This is done on demand from 681 * the pageout daemon. 682 * 683 * Arguments: 684 * zone The zone to free pages from 685 * all Should we drain all items? 686 * 687 * Returns: 688 * Nothing. 689 */ 690 static void 691 zone_drain(uma_zone_t zone) 692 { 693 struct slabhead freeslabs = {}; 694 uma_keg_t keg; 695 uma_slab_t slab; 696 uma_slab_t n; 697 u_int8_t flags; 698 u_int8_t *mem; 699 int i; 700 701 keg = zone->uz_keg; 702 703 /* 704 * We don't want to take pages from statically allocated zones at this 705 * time 706 */ 707 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 708 return; 709 710 ZONE_LOCK(zone); 711 712 #ifdef UMA_DEBUG 713 printf("%s free items: %u\n", zone->uz_name, keg->uk_free); 714 #endif 715 bucket_cache_drain(zone); 716 if (keg->uk_free == 0) 717 goto finished; 718 719 slab = LIST_FIRST(&keg->uk_free_slab); 720 while (slab) { 721 n = LIST_NEXT(slab, us_link); 722 723 /* We have no where to free these to */ 724 if (slab->us_flags & UMA_SLAB_BOOT) { 725 slab = n; 726 continue; 727 } 728 729 LIST_REMOVE(slab, us_link); 730 keg->uk_pages -= keg->uk_ppera; 731 keg->uk_free -= keg->uk_ipers; 732 733 if (keg->uk_flags & UMA_ZONE_HASH) 734 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 735 736 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 737 738 slab = n; 739 } 740 finished: 741 ZONE_UNLOCK(zone); 742 743 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 744 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 745 if (keg->uk_fini) 746 for (i = 0; i < keg->uk_ipers; i++) 747 keg->uk_fini( 748 slab->us_data + (keg->uk_rsize * i), 749 keg->uk_size); 750 flags = slab->us_flags; 751 mem = slab->us_data; 752 753 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 754 (keg->uk_flags & UMA_ZONE_REFCNT)) { 755 vm_object_t obj; 756 757 if (flags & UMA_SLAB_KMEM) 758 obj = kmem_object; 759 else 760 obj = NULL; 761 for (i = 0; i < keg->uk_ppera; i++) 762 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 763 obj); 764 } 765 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 766 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 767 SKIP_NONE); 768 #ifdef UMA_DEBUG 769 printf("%s: Returning %d bytes.\n", 770 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera); 771 #endif 772 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 773 } 774 } 775 776 /* 777 * Allocate a new slab for a zone. This does not insert the slab onto a list. 778 * 779 * Arguments: 780 * zone The zone to allocate slabs for 781 * wait Shall we wait? 782 * 783 * Returns: 784 * The slab that was allocated or NULL if there is no memory and the 785 * caller specified M_NOWAIT. 786 */ 787 static uma_slab_t 788 slab_zalloc(uma_zone_t zone, int wait) 789 { 790 uma_slabrefcnt_t slabref; 791 uma_slab_t slab; 792 uma_keg_t keg; 793 u_int8_t *mem; 794 u_int8_t flags; 795 int i; 796 797 slab = NULL; 798 keg = zone->uz_keg; 799 800 #ifdef UMA_DEBUG 801 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 802 #endif 803 ZONE_UNLOCK(zone); 804 805 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 806 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait); 807 if (slab == NULL) { 808 ZONE_LOCK(zone); 809 return NULL; 810 } 811 } 812 813 /* 814 * This reproduces the old vm_zone behavior of zero filling pages the 815 * first time they are added to a zone. 816 * 817 * Malloced items are zeroed in uma_zalloc. 818 */ 819 820 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 821 wait |= M_ZERO; 822 else 823 wait &= ~M_ZERO; 824 825 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, 826 &flags, wait); 827 if (mem == NULL) { 828 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 829 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0); 830 ZONE_LOCK(zone); 831 return (NULL); 832 } 833 834 /* Point the slab into the allocated memory */ 835 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 836 slab = (uma_slab_t )(mem + keg->uk_pgoff); 837 838 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 839 (keg->uk_flags & UMA_ZONE_REFCNT)) 840 for (i = 0; i < keg->uk_ppera; i++) 841 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 842 843 slab->us_keg = keg; 844 slab->us_data = mem; 845 slab->us_freecount = keg->uk_ipers; 846 slab->us_firstfree = 0; 847 slab->us_flags = flags; 848 849 if (keg->uk_flags & UMA_ZONE_REFCNT) { 850 slabref = (uma_slabrefcnt_t)slab; 851 for (i = 0; i < keg->uk_ipers; i++) { 852 slabref->us_freelist[i].us_refcnt = 0; 853 slabref->us_freelist[i].us_item = i+1; 854 } 855 } else { 856 for (i = 0; i < keg->uk_ipers; i++) 857 slab->us_freelist[i].us_item = i+1; 858 } 859 860 if (keg->uk_init != NULL) { 861 for (i = 0; i < keg->uk_ipers; i++) 862 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 863 keg->uk_size, wait) != 0) 864 break; 865 if (i != keg->uk_ipers) { 866 if (keg->uk_fini != NULL) { 867 for (i--; i > -1; i--) 868 keg->uk_fini(slab->us_data + 869 (keg->uk_rsize * i), 870 keg->uk_size); 871 } 872 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 873 (keg->uk_flags & UMA_ZONE_REFCNT)) 874 for (i = 0; i < keg->uk_ppera; i++) 875 vsetobj((vm_offset_t)mem + 876 (i * PAGE_SIZE), NULL); 877 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 878 uma_zfree_internal(keg->uk_slabzone, slab, 879 NULL, SKIP_NONE); 880 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 881 flags); 882 ZONE_LOCK(zone); 883 return (NULL); 884 } 885 } 886 ZONE_LOCK(zone); 887 888 if (keg->uk_flags & UMA_ZONE_HASH) 889 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 890 891 keg->uk_pages += keg->uk_ppera; 892 keg->uk_free += keg->uk_ipers; 893 894 return (slab); 895 } 896 897 /* 898 * This function is intended to be used early on in place of page_alloc() so 899 * that we may use the boot time page cache to satisfy allocations before 900 * the VM is ready. 901 */ 902 static void * 903 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 904 { 905 uma_keg_t keg; 906 907 keg = zone->uz_keg; 908 909 /* 910 * Check our small startup cache to see if it has pages remaining. 911 */ 912 mtx_lock(&uma_mtx); 913 if (uma_boot_free != 0) { 914 uma_slab_t tmps; 915 916 tmps = LIST_FIRST(&uma_boot_pages); 917 LIST_REMOVE(tmps, us_link); 918 uma_boot_free--; 919 mtx_unlock(&uma_mtx); 920 *pflag = tmps->us_flags; 921 return (tmps->us_data); 922 } 923 mtx_unlock(&uma_mtx); 924 if (booted == 0) 925 panic("UMA: Increase UMA_BOOT_PAGES"); 926 /* 927 * Now that we've booted reset these users to their real allocator. 928 */ 929 #ifdef UMA_MD_SMALL_ALLOC 930 keg->uk_allocf = uma_small_alloc; 931 #else 932 keg->uk_allocf = page_alloc; 933 #endif 934 return keg->uk_allocf(zone, bytes, pflag, wait); 935 } 936 937 /* 938 * Allocates a number of pages from the system 939 * 940 * Arguments: 941 * zone Unused 942 * bytes The number of bytes requested 943 * wait Shall we wait? 944 * 945 * Returns: 946 * A pointer to the alloced memory or possibly 947 * NULL if M_NOWAIT is set. 948 */ 949 static void * 950 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 951 { 952 void *p; /* Returned page */ 953 954 *pflag = UMA_SLAB_KMEM; 955 p = (void *) kmem_malloc(kmem_map, bytes, wait); 956 957 return (p); 958 } 959 960 /* 961 * Allocates a number of pages from within an object 962 * 963 * Arguments: 964 * zone Unused 965 * bytes The number of bytes requested 966 * wait Shall we wait? 967 * 968 * Returns: 969 * A pointer to the alloced memory or possibly 970 * NULL if M_NOWAIT is set. 971 */ 972 static void * 973 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 974 { 975 vm_object_t object; 976 vm_offset_t retkva, zkva; 977 vm_page_t p; 978 int pages, startpages; 979 980 object = zone->uz_keg->uk_obj; 981 retkva = 0; 982 983 /* 984 * This looks a little weird since we're getting one page at a time. 985 */ 986 VM_OBJECT_LOCK(object); 987 p = TAILQ_LAST(&object->memq, pglist); 988 pages = p != NULL ? p->pindex + 1 : 0; 989 startpages = pages; 990 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; 991 for (; bytes > 0; bytes -= PAGE_SIZE) { 992 p = vm_page_alloc(object, pages, 993 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 994 if (p == NULL) { 995 if (pages != startpages) 996 pmap_qremove(retkva, pages - startpages); 997 while (pages != startpages) { 998 pages--; 999 p = TAILQ_LAST(&object->memq, pglist); 1000 vm_page_lock_queues(); 1001 vm_page_unwire(p, 0); 1002 vm_page_free(p); 1003 vm_page_unlock_queues(); 1004 } 1005 retkva = 0; 1006 goto done; 1007 } 1008 pmap_qenter(zkva, &p, 1); 1009 if (retkva == 0) 1010 retkva = zkva; 1011 zkva += PAGE_SIZE; 1012 pages += 1; 1013 } 1014 done: 1015 VM_OBJECT_UNLOCK(object); 1016 *flags = UMA_SLAB_PRIV; 1017 1018 return ((void *)retkva); 1019 } 1020 1021 /* 1022 * Frees a number of pages to the system 1023 * 1024 * Arguments: 1025 * mem A pointer to the memory to be freed 1026 * size The size of the memory being freed 1027 * flags The original p->us_flags field 1028 * 1029 * Returns: 1030 * Nothing 1031 */ 1032 static void 1033 page_free(void *mem, int size, u_int8_t flags) 1034 { 1035 vm_map_t map; 1036 1037 if (flags & UMA_SLAB_KMEM) 1038 map = kmem_map; 1039 else 1040 panic("UMA: page_free used with invalid flags %d\n", flags); 1041 1042 kmem_free(map, (vm_offset_t)mem, size); 1043 } 1044 1045 /* 1046 * Zero fill initializer 1047 * 1048 * Arguments/Returns follow uma_init specifications 1049 */ 1050 static int 1051 zero_init(void *mem, int size, int flags) 1052 { 1053 bzero(mem, size); 1054 return (0); 1055 } 1056 1057 /* 1058 * Finish creating a small uma zone. This calculates ipers, and the zone size. 1059 * 1060 * Arguments 1061 * zone The zone we should initialize 1062 * 1063 * Returns 1064 * Nothing 1065 */ 1066 static void 1067 zone_small_init(uma_zone_t zone) 1068 { 1069 uma_keg_t keg; 1070 u_int rsize; 1071 u_int memused; 1072 u_int wastedspace; 1073 u_int shsize; 1074 1075 keg = zone->uz_keg; 1076 KASSERT(keg != NULL, ("Keg is null in zone_small_init")); 1077 rsize = keg->uk_size; 1078 1079 if (rsize < UMA_SMALLEST_UNIT) 1080 rsize = UMA_SMALLEST_UNIT; 1081 if (rsize & keg->uk_align) 1082 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1083 1084 keg->uk_rsize = rsize; 1085 keg->uk_ppera = 1; 1086 1087 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1088 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1089 shsize = sizeof(struct uma_slab_refcnt); 1090 } else { 1091 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1092 shsize = sizeof(struct uma_slab); 1093 } 1094 1095 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1096 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0")); 1097 memused = keg->uk_ipers * rsize + shsize; 1098 wastedspace = UMA_SLAB_SIZE - memused; 1099 1100 /* 1101 * We can't do OFFPAGE if we're internal or if we've been 1102 * asked to not go to the VM for buckets. If we do this we 1103 * may end up going to the VM (kmem_map) for slabs which we 1104 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1105 * result of UMA_ZONE_VM, which clearly forbids it. 1106 */ 1107 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1108 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1109 return; 1110 1111 if ((wastedspace >= UMA_MAX_WASTE) && 1112 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1113 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1114 KASSERT(keg->uk_ipers <= 255, 1115 ("zone_small_init: keg->uk_ipers too high!")); 1116 #ifdef UMA_DEBUG 1117 printf("UMA decided we need offpage slab headers for " 1118 "zone: %s, calculated wastedspace = %d, " 1119 "maximum wasted space allowed = %d, " 1120 "calculated ipers = %d, " 1121 "new wasted space = %d\n", zone->uz_name, wastedspace, 1122 UMA_MAX_WASTE, keg->uk_ipers, 1123 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1124 #endif 1125 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1126 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1127 keg->uk_flags |= UMA_ZONE_HASH; 1128 } 1129 } 1130 1131 /* 1132 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 1133 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1134 * more complicated. 1135 * 1136 * Arguments 1137 * zone The zone we should initialize 1138 * 1139 * Returns 1140 * Nothing 1141 */ 1142 static void 1143 zone_large_init(uma_zone_t zone) 1144 { 1145 uma_keg_t keg; 1146 int pages; 1147 1148 keg = zone->uz_keg; 1149 1150 KASSERT(keg != NULL, ("Keg is null in zone_large_init")); 1151 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1152 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 1153 1154 pages = keg->uk_size / UMA_SLAB_SIZE; 1155 1156 /* Account for remainder */ 1157 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1158 pages++; 1159 1160 keg->uk_ppera = pages; 1161 keg->uk_ipers = 1; 1162 1163 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1164 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1165 keg->uk_flags |= UMA_ZONE_HASH; 1166 1167 keg->uk_rsize = keg->uk_size; 1168 } 1169 1170 /* 1171 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1172 * the keg onto the global keg list. 1173 * 1174 * Arguments/Returns follow uma_ctor specifications 1175 * udata Actually uma_kctor_args 1176 */ 1177 static int 1178 keg_ctor(void *mem, int size, void *udata, int flags) 1179 { 1180 struct uma_kctor_args *arg = udata; 1181 uma_keg_t keg = mem; 1182 uma_zone_t zone; 1183 1184 bzero(keg, size); 1185 keg->uk_size = arg->size; 1186 keg->uk_init = arg->uminit; 1187 keg->uk_fini = arg->fini; 1188 keg->uk_align = arg->align; 1189 keg->uk_free = 0; 1190 keg->uk_pages = 0; 1191 keg->uk_flags = arg->flags; 1192 keg->uk_allocf = page_alloc; 1193 keg->uk_freef = page_free; 1194 keg->uk_recurse = 0; 1195 keg->uk_slabzone = NULL; 1196 1197 /* 1198 * The master zone is passed to us at keg-creation time. 1199 */ 1200 zone = arg->zone; 1201 zone->uz_keg = keg; 1202 1203 if (arg->flags & UMA_ZONE_VM) 1204 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1205 1206 if (arg->flags & UMA_ZONE_ZINIT) 1207 keg->uk_init = zero_init; 1208 1209 /* 1210 * The +UMA_FRITM_SZ added to uk_size is to account for the 1211 * linkage that is added to the size in zone_small_init(). If 1212 * we don't account for this here then we may end up in 1213 * zone_small_init() with a calculated 'ipers' of 0. 1214 */ 1215 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1216 if ((keg->uk_size+UMA_FRITMREF_SZ) > 1217 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1218 zone_large_init(zone); 1219 else 1220 zone_small_init(zone); 1221 } else { 1222 if ((keg->uk_size+UMA_FRITM_SZ) > 1223 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1224 zone_large_init(zone); 1225 else 1226 zone_small_init(zone); 1227 } 1228 1229 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1230 if (keg->uk_flags & UMA_ZONE_REFCNT) 1231 keg->uk_slabzone = slabrefzone; 1232 else 1233 keg->uk_slabzone = slabzone; 1234 } 1235 1236 /* 1237 * If we haven't booted yet we need allocations to go through the 1238 * startup cache until the vm is ready. 1239 */ 1240 if (keg->uk_ppera == 1) { 1241 #ifdef UMA_MD_SMALL_ALLOC 1242 keg->uk_allocf = uma_small_alloc; 1243 keg->uk_freef = uma_small_free; 1244 #endif 1245 if (booted == 0) 1246 keg->uk_allocf = startup_alloc; 1247 } 1248 1249 /* 1250 * Initialize keg's lock (shared among zones) through 1251 * Master zone 1252 */ 1253 zone->uz_lock = &keg->uk_lock; 1254 if (arg->flags & UMA_ZONE_MTXCLASS) 1255 ZONE_LOCK_INIT(zone, 1); 1256 else 1257 ZONE_LOCK_INIT(zone, 0); 1258 1259 /* 1260 * If we're putting the slab header in the actual page we need to 1261 * figure out where in each page it goes. This calculates a right 1262 * justified offset into the memory on an ALIGN_PTR boundary. 1263 */ 1264 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1265 u_int totsize; 1266 1267 /* Size of the slab struct and free list */ 1268 if (keg->uk_flags & UMA_ZONE_REFCNT) 1269 totsize = sizeof(struct uma_slab_refcnt) + 1270 keg->uk_ipers * UMA_FRITMREF_SZ; 1271 else 1272 totsize = sizeof(struct uma_slab) + 1273 keg->uk_ipers * UMA_FRITM_SZ; 1274 1275 if (totsize & UMA_ALIGN_PTR) 1276 totsize = (totsize & ~UMA_ALIGN_PTR) + 1277 (UMA_ALIGN_PTR + 1); 1278 keg->uk_pgoff = UMA_SLAB_SIZE - totsize; 1279 1280 if (keg->uk_flags & UMA_ZONE_REFCNT) 1281 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1282 + keg->uk_ipers * UMA_FRITMREF_SZ; 1283 else 1284 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1285 + keg->uk_ipers * UMA_FRITM_SZ; 1286 1287 /* 1288 * The only way the following is possible is if with our 1289 * UMA_ALIGN_PTR adjustments we are now bigger than 1290 * UMA_SLAB_SIZE. I haven't checked whether this is 1291 * mathematically possible for all cases, so we make 1292 * sure here anyway. 1293 */ 1294 if (totsize > UMA_SLAB_SIZE) { 1295 printf("zone %s ipers %d rsize %d size %d\n", 1296 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1297 keg->uk_size); 1298 panic("UMA slab won't fit.\n"); 1299 } 1300 } 1301 1302 if (keg->uk_flags & UMA_ZONE_HASH) 1303 hash_alloc(&keg->uk_hash); 1304 1305 #ifdef UMA_DEBUG 1306 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1307 zone->uz_name, zone, 1308 keg->uk_size, keg->uk_ipers, 1309 keg->uk_ppera, keg->uk_pgoff); 1310 #endif 1311 1312 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1313 1314 mtx_lock(&uma_mtx); 1315 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1316 mtx_unlock(&uma_mtx); 1317 return (0); 1318 } 1319 1320 /* 1321 * Zone header ctor. This initializes all fields, locks, etc. 1322 * 1323 * Arguments/Returns follow uma_ctor specifications 1324 * udata Actually uma_zctor_args 1325 */ 1326 1327 static int 1328 zone_ctor(void *mem, int size, void *udata, int flags) 1329 { 1330 struct uma_zctor_args *arg = udata; 1331 uma_zone_t zone = mem; 1332 uma_zone_t z; 1333 uma_keg_t keg; 1334 1335 bzero(zone, size); 1336 zone->uz_name = arg->name; 1337 zone->uz_ctor = arg->ctor; 1338 zone->uz_dtor = arg->dtor; 1339 zone->uz_init = NULL; 1340 zone->uz_fini = NULL; 1341 zone->uz_allocs = 0; 1342 zone->uz_fills = zone->uz_count = 0; 1343 1344 if (arg->flags & UMA_ZONE_SECONDARY) { 1345 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1346 keg = arg->keg; 1347 zone->uz_keg = keg; 1348 zone->uz_init = arg->uminit; 1349 zone->uz_fini = arg->fini; 1350 zone->uz_lock = &keg->uk_lock; 1351 mtx_lock(&uma_mtx); 1352 ZONE_LOCK(zone); 1353 keg->uk_flags |= UMA_ZONE_SECONDARY; 1354 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1355 if (LIST_NEXT(z, uz_link) == NULL) { 1356 LIST_INSERT_AFTER(z, zone, uz_link); 1357 break; 1358 } 1359 } 1360 ZONE_UNLOCK(zone); 1361 mtx_unlock(&uma_mtx); 1362 } else if (arg->keg == NULL) { 1363 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1364 arg->align, arg->flags) == NULL) 1365 return (ENOMEM); 1366 } else { 1367 struct uma_kctor_args karg; 1368 int error; 1369 1370 /* We should only be here from uma_startup() */ 1371 karg.size = arg->size; 1372 karg.uminit = arg->uminit; 1373 karg.fini = arg->fini; 1374 karg.align = arg->align; 1375 karg.flags = arg->flags; 1376 karg.zone = zone; 1377 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1378 flags); 1379 if (error) 1380 return (error); 1381 } 1382 keg = zone->uz_keg; 1383 zone->uz_lock = &keg->uk_lock; 1384 1385 /* 1386 * Some internal zones don't have room allocated for the per cpu 1387 * caches. If we're internal, bail out here. 1388 */ 1389 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1390 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0, 1391 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1392 return (0); 1393 } 1394 1395 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1396 zone->uz_count = BUCKET_MAX; 1397 else if (keg->uk_ipers <= BUCKET_MAX) 1398 zone->uz_count = keg->uk_ipers; 1399 else 1400 zone->uz_count = BUCKET_MAX; 1401 return (0); 1402 } 1403 1404 /* 1405 * Keg header dtor. This frees all data, destroys locks, frees the hash 1406 * table and removes the keg from the global list. 1407 * 1408 * Arguments/Returns follow uma_dtor specifications 1409 * udata unused 1410 */ 1411 static void 1412 keg_dtor(void *arg, int size, void *udata) 1413 { 1414 uma_keg_t keg; 1415 1416 keg = (uma_keg_t)arg; 1417 mtx_lock(&keg->uk_lock); 1418 if (keg->uk_free != 0) { 1419 printf("Freed UMA keg was not empty (%d items). " 1420 " Lost %d pages of memory.\n", 1421 keg->uk_free, keg->uk_pages); 1422 } 1423 mtx_unlock(&keg->uk_lock); 1424 1425 if (keg->uk_flags & UMA_ZONE_HASH) 1426 hash_free(&keg->uk_hash); 1427 1428 mtx_destroy(&keg->uk_lock); 1429 } 1430 1431 /* 1432 * Zone header dtor. 1433 * 1434 * Arguments/Returns follow uma_dtor specifications 1435 * udata unused 1436 */ 1437 static void 1438 zone_dtor(void *arg, int size, void *udata) 1439 { 1440 uma_zone_t zone; 1441 uma_keg_t keg; 1442 1443 zone = (uma_zone_t)arg; 1444 keg = zone->uz_keg; 1445 1446 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1447 cache_drain(zone); 1448 1449 mtx_lock(&uma_mtx); 1450 zone_drain(zone); 1451 if (keg->uk_flags & UMA_ZONE_SECONDARY) { 1452 LIST_REMOVE(zone, uz_link); 1453 /* 1454 * XXX there are some races here where 1455 * the zone can be drained but zone lock 1456 * released and then refilled before we 1457 * remove it... we dont care for now 1458 */ 1459 ZONE_LOCK(zone); 1460 if (LIST_EMPTY(&keg->uk_zones)) 1461 keg->uk_flags &= ~UMA_ZONE_SECONDARY; 1462 ZONE_UNLOCK(zone); 1463 mtx_unlock(&uma_mtx); 1464 } else { 1465 LIST_REMOVE(keg, uk_link); 1466 LIST_REMOVE(zone, uz_link); 1467 mtx_unlock(&uma_mtx); 1468 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE); 1469 } 1470 zone->uz_keg = NULL; 1471 } 1472 1473 /* 1474 * Traverses every zone in the system and calls a callback 1475 * 1476 * Arguments: 1477 * zfunc A pointer to a function which accepts a zone 1478 * as an argument. 1479 * 1480 * Returns: 1481 * Nothing 1482 */ 1483 static void 1484 zone_foreach(void (*zfunc)(uma_zone_t)) 1485 { 1486 uma_keg_t keg; 1487 uma_zone_t zone; 1488 1489 mtx_lock(&uma_mtx); 1490 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1491 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1492 zfunc(zone); 1493 } 1494 mtx_unlock(&uma_mtx); 1495 } 1496 1497 /* Public functions */ 1498 /* See uma.h */ 1499 void 1500 uma_startup(void *bootmem) 1501 { 1502 struct uma_zctor_args args; 1503 uma_slab_t slab; 1504 u_int slabsize; 1505 u_int objsize, totsize, wsize; 1506 int i; 1507 1508 #ifdef UMA_DEBUG 1509 printf("Creating uma keg headers zone and keg.\n"); 1510 #endif 1511 /* 1512 * The general UMA lock is a recursion-allowed lock because 1513 * there is a code path where, while we're still configured 1514 * to use startup_alloc() for backend page allocations, we 1515 * may end up in uma_reclaim() which calls zone_foreach(zone_drain), 1516 * which grabs uma_mtx, only to later call into startup_alloc() 1517 * because while freeing we needed to allocate a bucket. Since 1518 * startup_alloc() also takes uma_mtx, we need to be able to 1519 * recurse on it. 1520 */ 1521 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE); 1522 1523 /* 1524 * Figure out the maximum number of items-per-slab we'll have if 1525 * we're using the OFFPAGE slab header to track free items, given 1526 * all possible object sizes and the maximum desired wastage 1527 * (UMA_MAX_WASTE). 1528 * 1529 * We iterate until we find an object size for 1530 * which the calculated wastage in zone_small_init() will be 1531 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1532 * is an overall increasing see-saw function, we find the smallest 1533 * objsize such that the wastage is always acceptable for objects 1534 * with that objsize or smaller. Since a smaller objsize always 1535 * generates a larger possible uma_max_ipers, we use this computed 1536 * objsize to calculate the largest ipers possible. Since the 1537 * ipers calculated for OFFPAGE slab headers is always larger than 1538 * the ipers initially calculated in zone_small_init(), we use 1539 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1540 * obtain the maximum ipers possible for offpage slab headers. 1541 * 1542 * It should be noted that ipers versus objsize is an inversly 1543 * proportional function which drops off rather quickly so as 1544 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1545 * falls into the portion of the inverse relation AFTER the steep 1546 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1547 * 1548 * Note that we have 8-bits (1 byte) to use as a freelist index 1549 * inside the actual slab header itself and this is enough to 1550 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1551 * object with offpage slab header would have ipers = 1552 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1553 * 1 greater than what our byte-integer freelist index can 1554 * accomodate, but we know that this situation never occurs as 1555 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1556 * that we need to go to offpage slab headers. Or, if we do, 1557 * then we trap that condition below and panic in the INVARIANTS case. 1558 */ 1559 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1560 totsize = wsize; 1561 objsize = UMA_SMALLEST_UNIT; 1562 while (totsize >= wsize) { 1563 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1564 (objsize + UMA_FRITM_SZ); 1565 totsize *= (UMA_FRITM_SZ + objsize); 1566 objsize++; 1567 } 1568 if (objsize > UMA_SMALLEST_UNIT) 1569 objsize--; 1570 uma_max_ipers = UMA_SLAB_SIZE / objsize; 1571 1572 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1573 totsize = wsize; 1574 objsize = UMA_SMALLEST_UNIT; 1575 while (totsize >= wsize) { 1576 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1577 (objsize + UMA_FRITMREF_SZ); 1578 totsize *= (UMA_FRITMREF_SZ + objsize); 1579 objsize++; 1580 } 1581 if (objsize > UMA_SMALLEST_UNIT) 1582 objsize--; 1583 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize; 1584 1585 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1586 ("uma_startup: calculated uma_max_ipers values too large!")); 1587 1588 #ifdef UMA_DEBUG 1589 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1590 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1591 uma_max_ipers_ref); 1592 #endif 1593 1594 /* "manually" create the initial zone */ 1595 args.name = "UMA Kegs"; 1596 args.size = sizeof(struct uma_keg); 1597 args.ctor = keg_ctor; 1598 args.dtor = keg_dtor; 1599 args.uminit = zero_init; 1600 args.fini = NULL; 1601 args.keg = &masterkeg; 1602 args.align = 32 - 1; 1603 args.flags = UMA_ZFLAG_INTERNAL; 1604 /* The initial zone has no Per cpu queues so it's smaller */ 1605 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1606 1607 #ifdef UMA_DEBUG 1608 printf("Filling boot free list.\n"); 1609 #endif 1610 for (i = 0; i < UMA_BOOT_PAGES; i++) { 1611 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1612 slab->us_data = (u_int8_t *)slab; 1613 slab->us_flags = UMA_SLAB_BOOT; 1614 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1615 uma_boot_free++; 1616 } 1617 1618 #ifdef UMA_DEBUG 1619 printf("Creating uma zone headers zone and keg.\n"); 1620 #endif 1621 args.name = "UMA Zones"; 1622 args.size = sizeof(struct uma_zone) + 1623 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1624 args.ctor = zone_ctor; 1625 args.dtor = zone_dtor; 1626 args.uminit = zero_init; 1627 args.fini = NULL; 1628 args.keg = NULL; 1629 args.align = 32 - 1; 1630 args.flags = UMA_ZFLAG_INTERNAL; 1631 /* The initial zone has no Per cpu queues so it's smaller */ 1632 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1633 1634 #ifdef UMA_DEBUG 1635 printf("Initializing pcpu cache locks.\n"); 1636 #endif 1637 /* Initialize the pcpu cache lock set once and for all */ 1638 for (i = 0; i <= mp_maxid; i++) 1639 CPU_LOCK_INIT(i); 1640 1641 #ifdef UMA_DEBUG 1642 printf("Creating slab and hash zones.\n"); 1643 #endif 1644 1645 /* 1646 * This is the max number of free list items we'll have with 1647 * offpage slabs. 1648 */ 1649 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1650 slabsize += sizeof(struct uma_slab); 1651 1652 /* Now make a zone for slab headers */ 1653 slabzone = uma_zcreate("UMA Slabs", 1654 slabsize, 1655 NULL, NULL, NULL, NULL, 1656 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1657 1658 /* 1659 * We also create a zone for the bigger slabs with reference 1660 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1661 */ 1662 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1663 slabsize += sizeof(struct uma_slab_refcnt); 1664 slabrefzone = uma_zcreate("UMA RCntSlabs", 1665 slabsize, 1666 NULL, NULL, NULL, NULL, 1667 UMA_ALIGN_PTR, 1668 UMA_ZFLAG_INTERNAL); 1669 1670 hashzone = uma_zcreate("UMA Hash", 1671 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1672 NULL, NULL, NULL, NULL, 1673 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1674 1675 bucket_init(); 1676 1677 #ifdef UMA_MD_SMALL_ALLOC 1678 booted = 1; 1679 #endif 1680 1681 #ifdef UMA_DEBUG 1682 printf("UMA startup complete.\n"); 1683 #endif 1684 } 1685 1686 /* see uma.h */ 1687 void 1688 uma_startup2(void) 1689 { 1690 booted = 1; 1691 bucket_enable(); 1692 #ifdef UMA_DEBUG 1693 printf("UMA startup2 complete.\n"); 1694 #endif 1695 } 1696 1697 /* 1698 * Initialize our callout handle 1699 * 1700 */ 1701 1702 static void 1703 uma_startup3(void) 1704 { 1705 #ifdef UMA_DEBUG 1706 printf("Starting callout.\n"); 1707 #endif 1708 callout_init(&uma_callout, CALLOUT_MPSAFE); 1709 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1710 #ifdef UMA_DEBUG 1711 printf("UMA startup3 complete.\n"); 1712 #endif 1713 } 1714 1715 static uma_zone_t 1716 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1717 int align, u_int16_t flags) 1718 { 1719 struct uma_kctor_args args; 1720 1721 args.size = size; 1722 args.uminit = uminit; 1723 args.fini = fini; 1724 args.align = align; 1725 args.flags = flags; 1726 args.zone = zone; 1727 return (uma_zalloc_internal(kegs, &args, M_WAITOK)); 1728 } 1729 1730 /* See uma.h */ 1731 uma_zone_t 1732 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1733 uma_init uminit, uma_fini fini, int align, u_int16_t flags) 1734 1735 { 1736 struct uma_zctor_args args; 1737 1738 /* This stuff is essential for the zone ctor */ 1739 args.name = name; 1740 args.size = size; 1741 args.ctor = ctor; 1742 args.dtor = dtor; 1743 args.uminit = uminit; 1744 args.fini = fini; 1745 args.align = align; 1746 args.flags = flags; 1747 args.keg = NULL; 1748 1749 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1750 } 1751 1752 /* See uma.h */ 1753 uma_zone_t 1754 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1755 uma_init zinit, uma_fini zfini, uma_zone_t master) 1756 { 1757 struct uma_zctor_args args; 1758 1759 args.name = name; 1760 args.size = master->uz_keg->uk_size; 1761 args.ctor = ctor; 1762 args.dtor = dtor; 1763 args.uminit = zinit; 1764 args.fini = zfini; 1765 args.align = master->uz_keg->uk_align; 1766 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY; 1767 args.keg = master->uz_keg; 1768 1769 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1770 } 1771 1772 /* See uma.h */ 1773 void 1774 uma_zdestroy(uma_zone_t zone) 1775 { 1776 uma_zfree_internal(zones, zone, NULL, SKIP_NONE); 1777 } 1778 1779 /* See uma.h */ 1780 void * 1781 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1782 { 1783 void *item; 1784 uma_cache_t cache; 1785 uma_bucket_t bucket; 1786 int cpu; 1787 int badness; 1788 1789 /* This is the fast path allocation */ 1790 #ifdef UMA_DEBUG_ALLOC_1 1791 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1792 #endif 1793 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1794 zone->uz_name, flags); 1795 1796 if (!(flags & M_NOWAIT)) { 1797 KASSERT(curthread->td_intr_nesting_level == 0, 1798 ("malloc(M_WAITOK) in interrupt context")); 1799 if (nosleepwithlocks) { 1800 #ifdef WITNESS 1801 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1802 NULL, 1803 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT", 1804 zone->uz_name); 1805 #else 1806 badness = 1; 1807 #endif 1808 } else { 1809 badness = 0; 1810 #ifdef WITNESS 1811 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1812 "malloc(M_WAITOK) of \"%s\"", zone->uz_name); 1813 #endif 1814 } 1815 if (badness) { 1816 flags &= ~M_WAITOK; 1817 flags |= M_NOWAIT; 1818 } 1819 } 1820 1821 zalloc_restart: 1822 cpu = PCPU_GET(cpuid); 1823 CPU_LOCK(cpu); 1824 cache = &zone->uz_cpu[cpu]; 1825 1826 zalloc_start: 1827 bucket = cache->uc_allocbucket; 1828 1829 if (bucket) { 1830 if (bucket->ub_cnt > 0) { 1831 bucket->ub_cnt--; 1832 item = bucket->ub_bucket[bucket->ub_cnt]; 1833 #ifdef INVARIANTS 1834 bucket->ub_bucket[bucket->ub_cnt] = NULL; 1835 #endif 1836 KASSERT(item != NULL, 1837 ("uma_zalloc: Bucket pointer mangled.")); 1838 cache->uc_allocs++; 1839 #ifdef INVARIANTS 1840 ZONE_LOCK(zone); 1841 uma_dbg_alloc(zone, NULL, item); 1842 ZONE_UNLOCK(zone); 1843 #endif 1844 CPU_UNLOCK(cpu); 1845 if (zone->uz_ctor != NULL) { 1846 if (zone->uz_ctor(item, zone->uz_keg->uk_size, 1847 udata, flags) != 0) { 1848 uma_zfree_internal(zone, item, udata, 1849 SKIP_DTOR); 1850 return (NULL); 1851 } 1852 } 1853 if (flags & M_ZERO) 1854 bzero(item, zone->uz_keg->uk_size); 1855 return (item); 1856 } else if (cache->uc_freebucket) { 1857 /* 1858 * We have run out of items in our allocbucket. 1859 * See if we can switch with our free bucket. 1860 */ 1861 if (cache->uc_freebucket->ub_cnt > 0) { 1862 #ifdef UMA_DEBUG_ALLOC 1863 printf("uma_zalloc: Swapping empty with" 1864 " alloc.\n"); 1865 #endif 1866 bucket = cache->uc_freebucket; 1867 cache->uc_freebucket = cache->uc_allocbucket; 1868 cache->uc_allocbucket = bucket; 1869 1870 goto zalloc_start; 1871 } 1872 } 1873 } 1874 ZONE_LOCK(zone); 1875 /* Since we have locked the zone we may as well send back our stats */ 1876 zone->uz_allocs += cache->uc_allocs; 1877 cache->uc_allocs = 0; 1878 1879 /* Our old one is now a free bucket */ 1880 if (cache->uc_allocbucket) { 1881 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1882 ("uma_zalloc_arg: Freeing a non free bucket.")); 1883 LIST_INSERT_HEAD(&zone->uz_free_bucket, 1884 cache->uc_allocbucket, ub_link); 1885 cache->uc_allocbucket = NULL; 1886 } 1887 1888 /* Check the free list for a new alloc bucket */ 1889 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1890 KASSERT(bucket->ub_cnt != 0, 1891 ("uma_zalloc_arg: Returning an empty bucket.")); 1892 1893 LIST_REMOVE(bucket, ub_link); 1894 cache->uc_allocbucket = bucket; 1895 ZONE_UNLOCK(zone); 1896 goto zalloc_start; 1897 } 1898 /* We are no longer associated with this cpu!!! */ 1899 CPU_UNLOCK(cpu); 1900 1901 /* Bump up our uz_count so we get here less */ 1902 if (zone->uz_count < BUCKET_MAX) 1903 zone->uz_count++; 1904 1905 /* 1906 * Now lets just fill a bucket and put it on the free list. If that 1907 * works we'll restart the allocation from the begining. 1908 */ 1909 if (uma_zalloc_bucket(zone, flags)) { 1910 ZONE_UNLOCK(zone); 1911 goto zalloc_restart; 1912 } 1913 ZONE_UNLOCK(zone); 1914 /* 1915 * We may not be able to get a bucket so return an actual item. 1916 */ 1917 #ifdef UMA_DEBUG 1918 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1919 #endif 1920 1921 return (uma_zalloc_internal(zone, udata, flags)); 1922 } 1923 1924 static uma_slab_t 1925 uma_zone_slab(uma_zone_t zone, int flags) 1926 { 1927 uma_slab_t slab; 1928 uma_keg_t keg; 1929 1930 keg = zone->uz_keg; 1931 1932 /* 1933 * This is to prevent us from recursively trying to allocate 1934 * buckets. The problem is that if an allocation forces us to 1935 * grab a new bucket we will call page_alloc, which will go off 1936 * and cause the vm to allocate vm_map_entries. If we need new 1937 * buckets there too we will recurse in kmem_alloc and bad 1938 * things happen. So instead we return a NULL bucket, and make 1939 * the code that allocates buckets smart enough to deal with it 1940 */ 1941 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0) 1942 return (NULL); 1943 1944 slab = NULL; 1945 1946 for (;;) { 1947 /* 1948 * Find a slab with some space. Prefer slabs that are partially 1949 * used over those that are totally full. This helps to reduce 1950 * fragmentation. 1951 */ 1952 if (keg->uk_free != 0) { 1953 if (!LIST_EMPTY(&keg->uk_part_slab)) { 1954 slab = LIST_FIRST(&keg->uk_part_slab); 1955 } else { 1956 slab = LIST_FIRST(&keg->uk_free_slab); 1957 LIST_REMOVE(slab, us_link); 1958 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 1959 us_link); 1960 } 1961 return (slab); 1962 } 1963 1964 /* 1965 * M_NOVM means don't ask at all! 1966 */ 1967 if (flags & M_NOVM) 1968 break; 1969 1970 if (keg->uk_maxpages && 1971 keg->uk_pages >= keg->uk_maxpages) { 1972 keg->uk_flags |= UMA_ZFLAG_FULL; 1973 1974 if (flags & M_NOWAIT) 1975 break; 1976 else 1977 msleep(keg, &keg->uk_lock, PVM, 1978 "zonelimit", 0); 1979 continue; 1980 } 1981 keg->uk_recurse++; 1982 slab = slab_zalloc(zone, flags); 1983 keg->uk_recurse--; 1984 1985 /* 1986 * If we got a slab here it's safe to mark it partially used 1987 * and return. We assume that the caller is going to remove 1988 * at least one item. 1989 */ 1990 if (slab) { 1991 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 1992 return (slab); 1993 } 1994 /* 1995 * We might not have been able to get a slab but another cpu 1996 * could have while we were unlocked. Check again before we 1997 * fail. 1998 */ 1999 if (flags & M_NOWAIT) 2000 flags |= M_NOVM; 2001 } 2002 return (slab); 2003 } 2004 2005 static void * 2006 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 2007 { 2008 uma_keg_t keg; 2009 uma_slabrefcnt_t slabref; 2010 void *item; 2011 u_int8_t freei; 2012 2013 keg = zone->uz_keg; 2014 2015 freei = slab->us_firstfree; 2016 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2017 slabref = (uma_slabrefcnt_t)slab; 2018 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2019 } else { 2020 slab->us_firstfree = slab->us_freelist[freei].us_item; 2021 } 2022 item = slab->us_data + (keg->uk_rsize * freei); 2023 2024 slab->us_freecount--; 2025 keg->uk_free--; 2026 #ifdef INVARIANTS 2027 uma_dbg_alloc(zone, slab, item); 2028 #endif 2029 /* Move this slab to the full list */ 2030 if (slab->us_freecount == 0) { 2031 LIST_REMOVE(slab, us_link); 2032 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2033 } 2034 2035 return (item); 2036 } 2037 2038 static int 2039 uma_zalloc_bucket(uma_zone_t zone, int flags) 2040 { 2041 uma_bucket_t bucket; 2042 uma_slab_t slab; 2043 int16_t saved; 2044 int max, origflags = flags; 2045 2046 /* 2047 * Try this zone's free list first so we don't allocate extra buckets. 2048 */ 2049 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2050 KASSERT(bucket->ub_cnt == 0, 2051 ("uma_zalloc_bucket: Bucket on free list is not empty.")); 2052 LIST_REMOVE(bucket, ub_link); 2053 } else { 2054 int bflags; 2055 2056 bflags = (flags & ~M_ZERO); 2057 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2058 bflags |= M_NOVM; 2059 2060 ZONE_UNLOCK(zone); 2061 bucket = bucket_alloc(zone->uz_count, bflags); 2062 ZONE_LOCK(zone); 2063 } 2064 2065 if (bucket == NULL) 2066 return (0); 2067 2068 #ifdef SMP 2069 /* 2070 * This code is here to limit the number of simultaneous bucket fills 2071 * for any given zone to the number of per cpu caches in this zone. This 2072 * is done so that we don't allocate more memory than we really need. 2073 */ 2074 if (zone->uz_fills >= mp_ncpus) 2075 goto done; 2076 2077 #endif 2078 zone->uz_fills++; 2079 2080 max = MIN(bucket->ub_entries, zone->uz_count); 2081 /* Try to keep the buckets totally full */ 2082 saved = bucket->ub_cnt; 2083 while (bucket->ub_cnt < max && 2084 (slab = uma_zone_slab(zone, flags)) != NULL) { 2085 while (slab->us_freecount && bucket->ub_cnt < max) { 2086 bucket->ub_bucket[bucket->ub_cnt++] = 2087 uma_slab_alloc(zone, slab); 2088 } 2089 2090 /* Don't block on the next fill */ 2091 flags |= M_NOWAIT; 2092 } 2093 2094 /* 2095 * We unlock here because we need to call the zone's init. 2096 * It should be safe to unlock because the slab dealt with 2097 * above is already on the appropriate list within the keg 2098 * and the bucket we filled is not yet on any list, so we 2099 * own it. 2100 */ 2101 if (zone->uz_init != NULL) { 2102 int i; 2103 2104 ZONE_UNLOCK(zone); 2105 for (i = saved; i < bucket->ub_cnt; i++) 2106 if (zone->uz_init(bucket->ub_bucket[i], 2107 zone->uz_keg->uk_size, origflags) != 0) 2108 break; 2109 /* 2110 * If we couldn't initialize the whole bucket, put the 2111 * rest back onto the freelist. 2112 */ 2113 if (i != bucket->ub_cnt) { 2114 int j; 2115 2116 for (j = i; j < bucket->ub_cnt; j++) { 2117 uma_zfree_internal(zone, bucket->ub_bucket[j], 2118 NULL, SKIP_FINI); 2119 #ifdef INVARIANTS 2120 bucket->ub_bucket[j] = NULL; 2121 #endif 2122 } 2123 bucket->ub_cnt = i; 2124 } 2125 ZONE_LOCK(zone); 2126 } 2127 2128 zone->uz_fills--; 2129 if (bucket->ub_cnt != 0) { 2130 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2131 bucket, ub_link); 2132 return (1); 2133 } 2134 #ifdef SMP 2135 done: 2136 #endif 2137 bucket_free(bucket); 2138 2139 return (0); 2140 } 2141 /* 2142 * Allocates an item for an internal zone 2143 * 2144 * Arguments 2145 * zone The zone to alloc for. 2146 * udata The data to be passed to the constructor. 2147 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2148 * 2149 * Returns 2150 * NULL if there is no memory and M_NOWAIT is set 2151 * An item if successful 2152 */ 2153 2154 static void * 2155 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 2156 { 2157 uma_keg_t keg; 2158 uma_slab_t slab; 2159 void *item; 2160 2161 item = NULL; 2162 keg = zone->uz_keg; 2163 2164 #ifdef UMA_DEBUG_ALLOC 2165 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2166 #endif 2167 ZONE_LOCK(zone); 2168 2169 slab = uma_zone_slab(zone, flags); 2170 if (slab == NULL) { 2171 ZONE_UNLOCK(zone); 2172 return (NULL); 2173 } 2174 2175 item = uma_slab_alloc(zone, slab); 2176 2177 ZONE_UNLOCK(zone); 2178 2179 /* 2180 * We have to call both the zone's init (not the keg's init) 2181 * and the zone's ctor. This is because the item is going from 2182 * a keg slab directly to the user, and the user is expecting it 2183 * to be both zone-init'd as well as zone-ctor'd. 2184 */ 2185 if (zone->uz_init != NULL) { 2186 if (zone->uz_init(item, keg->uk_size, flags) != 0) { 2187 uma_zfree_internal(zone, item, udata, SKIP_FINI); 2188 return (NULL); 2189 } 2190 } 2191 if (zone->uz_ctor != NULL) { 2192 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) { 2193 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2194 return (NULL); 2195 } 2196 } 2197 if (flags & M_ZERO) 2198 bzero(item, keg->uk_size); 2199 2200 return (item); 2201 } 2202 2203 /* See uma.h */ 2204 void 2205 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2206 { 2207 uma_keg_t keg; 2208 uma_cache_t cache; 2209 uma_bucket_t bucket; 2210 int bflags; 2211 int cpu; 2212 enum zfreeskip skip; 2213 2214 /* This is the fast path free */ 2215 skip = SKIP_NONE; 2216 keg = zone->uz_keg; 2217 2218 #ifdef UMA_DEBUG_ALLOC_1 2219 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2220 #endif 2221 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2222 zone->uz_name); 2223 2224 /* 2225 * The race here is acceptable. If we miss it we'll just have to wait 2226 * a little longer for the limits to be reset. 2227 */ 2228 2229 if (keg->uk_flags & UMA_ZFLAG_FULL) 2230 goto zfree_internal; 2231 2232 if (zone->uz_dtor) { 2233 zone->uz_dtor(item, keg->uk_size, udata); 2234 skip = SKIP_DTOR; 2235 } 2236 2237 zfree_restart: 2238 cpu = PCPU_GET(cpuid); 2239 CPU_LOCK(cpu); 2240 cache = &zone->uz_cpu[cpu]; 2241 2242 zfree_start: 2243 bucket = cache->uc_freebucket; 2244 2245 if (bucket) { 2246 /* 2247 * Do we have room in our bucket? It is OK for this uz count 2248 * check to be slightly out of sync. 2249 */ 2250 2251 if (bucket->ub_cnt < bucket->ub_entries) { 2252 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2253 ("uma_zfree: Freeing to non free bucket index.")); 2254 bucket->ub_bucket[bucket->ub_cnt] = item; 2255 bucket->ub_cnt++; 2256 #ifdef INVARIANTS 2257 ZONE_LOCK(zone); 2258 if (keg->uk_flags & UMA_ZONE_MALLOC) 2259 uma_dbg_free(zone, udata, item); 2260 else 2261 uma_dbg_free(zone, NULL, item); 2262 ZONE_UNLOCK(zone); 2263 #endif 2264 CPU_UNLOCK(cpu); 2265 return; 2266 } else if (cache->uc_allocbucket) { 2267 #ifdef UMA_DEBUG_ALLOC 2268 printf("uma_zfree: Swapping buckets.\n"); 2269 #endif 2270 /* 2271 * We have run out of space in our freebucket. 2272 * See if we can switch with our alloc bucket. 2273 */ 2274 if (cache->uc_allocbucket->ub_cnt < 2275 cache->uc_freebucket->ub_cnt) { 2276 bucket = cache->uc_freebucket; 2277 cache->uc_freebucket = cache->uc_allocbucket; 2278 cache->uc_allocbucket = bucket; 2279 goto zfree_start; 2280 } 2281 } 2282 } 2283 /* 2284 * We can get here for two reasons: 2285 * 2286 * 1) The buckets are NULL 2287 * 2) The alloc and free buckets are both somewhat full. 2288 */ 2289 2290 ZONE_LOCK(zone); 2291 2292 bucket = cache->uc_freebucket; 2293 cache->uc_freebucket = NULL; 2294 2295 /* Can we throw this on the zone full list? */ 2296 if (bucket != NULL) { 2297 #ifdef UMA_DEBUG_ALLOC 2298 printf("uma_zfree: Putting old bucket on the free list.\n"); 2299 #endif 2300 /* ub_cnt is pointing to the last free item */ 2301 KASSERT(bucket->ub_cnt != 0, 2302 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2303 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2304 bucket, ub_link); 2305 } 2306 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2307 LIST_REMOVE(bucket, ub_link); 2308 ZONE_UNLOCK(zone); 2309 cache->uc_freebucket = bucket; 2310 goto zfree_start; 2311 } 2312 /* We're done with this CPU now */ 2313 CPU_UNLOCK(cpu); 2314 2315 /* And the zone.. */ 2316 ZONE_UNLOCK(zone); 2317 2318 #ifdef UMA_DEBUG_ALLOC 2319 printf("uma_zfree: Allocating new free bucket.\n"); 2320 #endif 2321 bflags = M_NOWAIT; 2322 2323 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2324 bflags |= M_NOVM; 2325 bucket = bucket_alloc(zone->uz_count, bflags); 2326 if (bucket) { 2327 ZONE_LOCK(zone); 2328 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2329 bucket, ub_link); 2330 ZONE_UNLOCK(zone); 2331 goto zfree_restart; 2332 } 2333 2334 /* 2335 * If nothing else caught this, we'll just do an internal free. 2336 */ 2337 2338 zfree_internal: 2339 2340 #ifdef INVARIANTS 2341 /* 2342 * If we need to skip the dtor and the uma_dbg_free in 2343 * uma_zfree_internal because we've already called the dtor 2344 * above, but we ended up here, then we need to make sure 2345 * that we take care of the uma_dbg_free immediately. 2346 */ 2347 if (skip) { 2348 ZONE_LOCK(zone); 2349 if (keg->uk_flags & UMA_ZONE_MALLOC) 2350 uma_dbg_free(zone, udata, item); 2351 else 2352 uma_dbg_free(zone, NULL, item); 2353 ZONE_UNLOCK(zone); 2354 } 2355 #endif 2356 uma_zfree_internal(zone, item, udata, skip); 2357 2358 return; 2359 } 2360 2361 /* 2362 * Frees an item to an INTERNAL zone or allocates a free bucket 2363 * 2364 * Arguments: 2365 * zone The zone to free to 2366 * item The item we're freeing 2367 * udata User supplied data for the dtor 2368 * skip Skip dtors and finis 2369 */ 2370 static void 2371 uma_zfree_internal(uma_zone_t zone, void *item, void *udata, 2372 enum zfreeskip skip) 2373 { 2374 uma_slab_t slab; 2375 uma_slabrefcnt_t slabref; 2376 uma_keg_t keg; 2377 u_int8_t *mem; 2378 u_int8_t freei; 2379 2380 keg = zone->uz_keg; 2381 2382 if (skip < SKIP_DTOR && zone->uz_dtor) 2383 zone->uz_dtor(item, keg->uk_size, udata); 2384 if (skip < SKIP_FINI && zone->uz_fini) 2385 zone->uz_fini(item, keg->uk_size); 2386 2387 ZONE_LOCK(zone); 2388 2389 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) { 2390 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2391 if (keg->uk_flags & UMA_ZONE_HASH) 2392 slab = hash_sfind(&keg->uk_hash, mem); 2393 else { 2394 mem += keg->uk_pgoff; 2395 slab = (uma_slab_t)mem; 2396 } 2397 } else { 2398 slab = (uma_slab_t)udata; 2399 } 2400 2401 /* Do we need to remove from any lists? */ 2402 if (slab->us_freecount+1 == keg->uk_ipers) { 2403 LIST_REMOVE(slab, us_link); 2404 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2405 } else if (slab->us_freecount == 0) { 2406 LIST_REMOVE(slab, us_link); 2407 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2408 } 2409 2410 /* Slab management stuff */ 2411 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2412 / keg->uk_rsize; 2413 2414 #ifdef INVARIANTS 2415 if (!skip) 2416 uma_dbg_free(zone, slab, item); 2417 #endif 2418 2419 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2420 slabref = (uma_slabrefcnt_t)slab; 2421 slabref->us_freelist[freei].us_item = slab->us_firstfree; 2422 } else { 2423 slab->us_freelist[freei].us_item = slab->us_firstfree; 2424 } 2425 slab->us_firstfree = freei; 2426 slab->us_freecount++; 2427 2428 /* Zone statistics */ 2429 keg->uk_free++; 2430 2431 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2432 if (keg->uk_pages < keg->uk_maxpages) 2433 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2434 2435 /* We can handle one more allocation */ 2436 wakeup_one(keg); 2437 } 2438 2439 ZONE_UNLOCK(zone); 2440 } 2441 2442 /* See uma.h */ 2443 void 2444 uma_zone_set_max(uma_zone_t zone, int nitems) 2445 { 2446 uma_keg_t keg; 2447 2448 keg = zone->uz_keg; 2449 ZONE_LOCK(zone); 2450 if (keg->uk_ppera > 1) 2451 keg->uk_maxpages = nitems * keg->uk_ppera; 2452 else 2453 keg->uk_maxpages = nitems / keg->uk_ipers; 2454 2455 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2456 keg->uk_maxpages++; 2457 2458 ZONE_UNLOCK(zone); 2459 } 2460 2461 /* See uma.h */ 2462 void 2463 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2464 { 2465 ZONE_LOCK(zone); 2466 KASSERT(zone->uz_keg->uk_pages == 0, 2467 ("uma_zone_set_init on non-empty keg")); 2468 zone->uz_keg->uk_init = uminit; 2469 ZONE_UNLOCK(zone); 2470 } 2471 2472 /* See uma.h */ 2473 void 2474 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2475 { 2476 ZONE_LOCK(zone); 2477 KASSERT(zone->uz_keg->uk_pages == 0, 2478 ("uma_zone_set_fini on non-empty keg")); 2479 zone->uz_keg->uk_fini = fini; 2480 ZONE_UNLOCK(zone); 2481 } 2482 2483 /* See uma.h */ 2484 void 2485 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2486 { 2487 ZONE_LOCK(zone); 2488 KASSERT(zone->uz_keg->uk_pages == 0, 2489 ("uma_zone_set_zinit on non-empty keg")); 2490 zone->uz_init = zinit; 2491 ZONE_UNLOCK(zone); 2492 } 2493 2494 /* See uma.h */ 2495 void 2496 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2497 { 2498 ZONE_LOCK(zone); 2499 KASSERT(zone->uz_keg->uk_pages == 0, 2500 ("uma_zone_set_zfini on non-empty keg")); 2501 zone->uz_fini = zfini; 2502 ZONE_UNLOCK(zone); 2503 } 2504 2505 /* See uma.h */ 2506 /* XXX uk_freef is not actually used with the zone locked */ 2507 void 2508 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2509 { 2510 ZONE_LOCK(zone); 2511 zone->uz_keg->uk_freef = freef; 2512 ZONE_UNLOCK(zone); 2513 } 2514 2515 /* See uma.h */ 2516 /* XXX uk_allocf is not actually used with the zone locked */ 2517 void 2518 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2519 { 2520 ZONE_LOCK(zone); 2521 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2522 zone->uz_keg->uk_allocf = allocf; 2523 ZONE_UNLOCK(zone); 2524 } 2525 2526 /* See uma.h */ 2527 int 2528 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2529 { 2530 uma_keg_t keg; 2531 vm_offset_t kva; 2532 int pages; 2533 2534 keg = zone->uz_keg; 2535 pages = count / keg->uk_ipers; 2536 2537 if (pages * keg->uk_ipers < count) 2538 pages++; 2539 2540 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2541 2542 if (kva == 0) 2543 return (0); 2544 if (obj == NULL) { 2545 obj = vm_object_allocate(OBJT_DEFAULT, 2546 pages); 2547 } else { 2548 VM_OBJECT_LOCK_INIT(obj, "uma object"); 2549 _vm_object_allocate(OBJT_DEFAULT, 2550 pages, obj); 2551 } 2552 ZONE_LOCK(zone); 2553 keg->uk_kva = kva; 2554 keg->uk_obj = obj; 2555 keg->uk_maxpages = pages; 2556 keg->uk_allocf = obj_alloc; 2557 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 2558 ZONE_UNLOCK(zone); 2559 return (1); 2560 } 2561 2562 /* See uma.h */ 2563 void 2564 uma_prealloc(uma_zone_t zone, int items) 2565 { 2566 int slabs; 2567 uma_slab_t slab; 2568 uma_keg_t keg; 2569 2570 keg = zone->uz_keg; 2571 ZONE_LOCK(zone); 2572 slabs = items / keg->uk_ipers; 2573 if (slabs * keg->uk_ipers < items) 2574 slabs++; 2575 while (slabs > 0) { 2576 slab = slab_zalloc(zone, M_WAITOK); 2577 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2578 slabs--; 2579 } 2580 ZONE_UNLOCK(zone); 2581 } 2582 2583 /* See uma.h */ 2584 u_int32_t * 2585 uma_find_refcnt(uma_zone_t zone, void *item) 2586 { 2587 uma_slabrefcnt_t slabref; 2588 uma_keg_t keg; 2589 u_int32_t *refcnt; 2590 int idx; 2591 2592 keg = zone->uz_keg; 2593 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 2594 (~UMA_SLAB_MASK)); 2595 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 2596 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2597 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 2598 / keg->uk_rsize; 2599 refcnt = &slabref->us_freelist[idx].us_refcnt; 2600 return refcnt; 2601 } 2602 2603 /* See uma.h */ 2604 void 2605 uma_reclaim(void) 2606 { 2607 #ifdef UMA_DEBUG 2608 printf("UMA: vm asked us to release pages!\n"); 2609 #endif 2610 bucket_enable(); 2611 zone_foreach(zone_drain); 2612 /* 2613 * Some slabs may have been freed but this zone will be visited early 2614 * we visit again so that we can free pages that are empty once other 2615 * zones are drained. We have to do the same for buckets. 2616 */ 2617 zone_drain(slabzone); 2618 zone_drain(slabrefzone); 2619 bucket_zone_drain(); 2620 } 2621 2622 void * 2623 uma_large_malloc(int size, int wait) 2624 { 2625 void *mem; 2626 uma_slab_t slab; 2627 u_int8_t flags; 2628 2629 slab = uma_zalloc_internal(slabzone, NULL, wait); 2630 if (slab == NULL) 2631 return (NULL); 2632 mem = page_alloc(NULL, size, &flags, wait); 2633 if (mem) { 2634 vsetslab((vm_offset_t)mem, slab); 2635 slab->us_data = mem; 2636 slab->us_flags = flags | UMA_SLAB_MALLOC; 2637 slab->us_size = size; 2638 } else { 2639 uma_zfree_internal(slabzone, slab, NULL, 0); 2640 } 2641 2642 return (mem); 2643 } 2644 2645 void 2646 uma_large_free(uma_slab_t slab) 2647 { 2648 vsetobj((vm_offset_t)slab->us_data, kmem_object); 2649 page_free(slab->us_data, slab->us_size, slab->us_flags); 2650 uma_zfree_internal(slabzone, slab, NULL, 0); 2651 } 2652 2653 void 2654 uma_print_stats(void) 2655 { 2656 zone_foreach(uma_print_zone); 2657 } 2658 2659 static void 2660 slab_print(uma_slab_t slab) 2661 { 2662 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 2663 slab->us_keg, slab->us_data, slab->us_freecount, 2664 slab->us_firstfree); 2665 } 2666 2667 static void 2668 cache_print(uma_cache_t cache) 2669 { 2670 printf("alloc: %p(%d), free: %p(%d)\n", 2671 cache->uc_allocbucket, 2672 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 2673 cache->uc_freebucket, 2674 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 2675 } 2676 2677 void 2678 uma_print_zone(uma_zone_t zone) 2679 { 2680 uma_cache_t cache; 2681 uma_keg_t keg; 2682 uma_slab_t slab; 2683 int i; 2684 2685 keg = zone->uz_keg; 2686 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 2687 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 2688 keg->uk_ipers, keg->uk_ppera, 2689 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 2690 printf("Part slabs:\n"); 2691 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 2692 slab_print(slab); 2693 printf("Free slabs:\n"); 2694 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 2695 slab_print(slab); 2696 printf("Full slabs:\n"); 2697 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 2698 slab_print(slab); 2699 for (i = 0; i <= mp_maxid; i++) { 2700 if (CPU_ABSENT(i)) 2701 continue; 2702 cache = &zone->uz_cpu[i]; 2703 printf("CPU %d Cache:\n", i); 2704 cache_print(cache); 2705 } 2706 } 2707 2708 /* 2709 * Sysctl handler for vm.zone 2710 * 2711 * stolen from vm_zone.c 2712 */ 2713 static int 2714 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 2715 { 2716 int error, len, cnt; 2717 const int linesize = 128; /* conservative */ 2718 int totalfree; 2719 char *tmpbuf, *offset; 2720 uma_zone_t z; 2721 uma_keg_t zk; 2722 char *p; 2723 int cpu; 2724 int cachefree; 2725 uma_bucket_t bucket; 2726 uma_cache_t cache; 2727 2728 cnt = 0; 2729 mtx_lock(&uma_mtx); 2730 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2731 LIST_FOREACH(z, &zk->uk_zones, uz_link) 2732 cnt++; 2733 } 2734 mtx_unlock(&uma_mtx); 2735 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize, 2736 M_TEMP, M_WAITOK); 2737 len = snprintf(tmpbuf, linesize, 2738 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n"); 2739 if (cnt == 0) 2740 tmpbuf[len - 1] = '\0'; 2741 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len); 2742 if (error || cnt == 0) 2743 goto out; 2744 offset = tmpbuf; 2745 mtx_lock(&uma_mtx); 2746 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2747 LIST_FOREACH(z, &zk->uk_zones, uz_link) { 2748 if (cnt == 0) /* list may have changed size */ 2749 break; 2750 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) { 2751 for (cpu = 0; cpu <= mp_maxid; cpu++) { 2752 if (CPU_ABSENT(cpu)) 2753 continue; 2754 CPU_LOCK(cpu); 2755 } 2756 } 2757 ZONE_LOCK(z); 2758 cachefree = 0; 2759 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) { 2760 for (cpu = 0; cpu <= mp_maxid; cpu++) { 2761 if (CPU_ABSENT(cpu)) 2762 continue; 2763 cache = &z->uz_cpu[cpu]; 2764 if (cache->uc_allocbucket != NULL) 2765 cachefree += cache->uc_allocbucket->ub_cnt; 2766 if (cache->uc_freebucket != NULL) 2767 cachefree += cache->uc_freebucket->ub_cnt; 2768 CPU_UNLOCK(cpu); 2769 } 2770 } 2771 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) { 2772 cachefree += bucket->ub_cnt; 2773 } 2774 totalfree = zk->uk_free + cachefree; 2775 len = snprintf(offset, linesize, 2776 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n", 2777 z->uz_name, zk->uk_size, 2778 zk->uk_maxpages * zk->uk_ipers, 2779 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree, 2780 totalfree, 2781 (unsigned long long)z->uz_allocs); 2782 ZONE_UNLOCK(z); 2783 for (p = offset + 12; p > offset && *p == ' '; --p) 2784 /* nothing */ ; 2785 p[1] = ':'; 2786 cnt--; 2787 offset += len; 2788 } 2789 } 2790 mtx_unlock(&uma_mtx); 2791 *offset++ = '\0'; 2792 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf); 2793 out: 2794 FREE(tmpbuf, M_TEMP); 2795 return (error); 2796 } 2797