1 /* 2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * uma_core.c Implementation of the Universal Memory allocator 29 * 30 * This allocator is intended to replace the multitude of similar object caches 31 * in the standard FreeBSD kernel. The intent is to be flexible as well as 32 * effecient. A primary design goal is to return unused memory to the rest of 33 * the system. This will make the system as a whole more flexible due to the 34 * ability to move memory to subsystems which most need it instead of leaving 35 * pools of reserved memory unused. 36 * 37 * The basic ideas stem from similar slab/zone based allocators whose algorithms 38 * are well known. 39 * 40 */ 41 42 /* 43 * TODO: 44 * - Improve memory usage for large allocations 45 * - Investigate cache size adjustments 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 /* I should really use ktr.. */ 52 /* 53 #define UMA_DEBUG 1 54 #define UMA_DEBUG_ALLOC 1 55 #define UMA_DEBUG_ALLOC_1 1 56 */ 57 58 #include "opt_param.h" 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/kernel.h> 62 #include <sys/types.h> 63 #include <sys/queue.h> 64 #include <sys/malloc.h> 65 #include <sys/ktr.h> 66 #include <sys/lock.h> 67 #include <sys/sysctl.h> 68 #include <sys/mutex.h> 69 #include <sys/proc.h> 70 #include <sys/smp.h> 71 #include <sys/vmmeter.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_page.h> 76 #include <vm/vm_param.h> 77 #include <vm/vm_map.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/uma.h> 81 #include <vm/uma_int.h> 82 #include <vm/uma_dbg.h> 83 84 #include <machine/vmparam.h> 85 86 /* 87 * This is the zone and keg from which all zones are spawned. The idea is that 88 * even the zone & keg heads are allocated from the allocator, so we use the 89 * bss section to bootstrap us. 90 */ 91 static struct uma_keg masterkeg; 92 static struct uma_zone masterzone_k; 93 static struct uma_zone masterzone_z; 94 static uma_zone_t kegs = &masterzone_k; 95 static uma_zone_t zones = &masterzone_z; 96 97 /* This is the zone from which all of uma_slab_t's are allocated. */ 98 static uma_zone_t slabzone; 99 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 100 101 /* 102 * The initial hash tables come out of this zone so they can be allocated 103 * prior to malloc coming up. 104 */ 105 static uma_zone_t hashzone; 106 107 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 108 109 /* 110 * Are we allowed to allocate buckets? 111 */ 112 static int bucketdisable = 1; 113 114 /* Linked list of all kegs in the system */ 115 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs); 116 117 /* This mutex protects the keg list */ 118 static struct mtx uma_mtx; 119 120 /* These are the pcpu cache locks */ 121 static struct mtx uma_pcpu_mtx[MAXCPU]; 122 123 /* Linked list of boot time pages */ 124 static LIST_HEAD(,uma_slab) uma_boot_pages = 125 LIST_HEAD_INITIALIZER(&uma_boot_pages); 126 127 /* Count of free boottime pages */ 128 static int uma_boot_free = 0; 129 130 /* Is the VM done starting up? */ 131 static int booted = 0; 132 133 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 134 static u_int uma_max_ipers; 135 static u_int uma_max_ipers_ref; 136 137 /* 138 * This is the handle used to schedule events that need to happen 139 * outside of the allocation fast path. 140 */ 141 static struct callout uma_callout; 142 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 143 144 /* 145 * This structure is passed as the zone ctor arg so that I don't have to create 146 * a special allocation function just for zones. 147 */ 148 struct uma_zctor_args { 149 char *name; 150 size_t size; 151 uma_ctor ctor; 152 uma_dtor dtor; 153 uma_init uminit; 154 uma_fini fini; 155 uma_keg_t keg; 156 int align; 157 u_int16_t flags; 158 }; 159 160 struct uma_kctor_args { 161 uma_zone_t zone; 162 size_t size; 163 uma_init uminit; 164 uma_fini fini; 165 int align; 166 u_int16_t flags; 167 }; 168 169 struct uma_bucket_zone { 170 uma_zone_t ubz_zone; 171 char *ubz_name; 172 int ubz_entries; 173 }; 174 175 #define BUCKET_MAX 128 176 177 struct uma_bucket_zone bucket_zones[] = { 178 { NULL, "16 Bucket", 16 }, 179 { NULL, "32 Bucket", 32 }, 180 { NULL, "64 Bucket", 64 }, 181 { NULL, "128 Bucket", 128 }, 182 { NULL, NULL, 0} 183 }; 184 185 #define BUCKET_SHIFT 4 186 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) 187 188 uint8_t bucket_size[BUCKET_ZONES]; 189 190 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI }; 191 192 /* Prototypes.. */ 193 194 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 195 static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 196 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); 197 static void page_free(void *, int, u_int8_t); 198 static uma_slab_t slab_zalloc(uma_zone_t, int); 199 static void cache_drain(uma_zone_t); 200 static void bucket_drain(uma_zone_t, uma_bucket_t); 201 static void bucket_cache_drain(uma_zone_t zone); 202 static int keg_ctor(void *, int, void *, int); 203 static void keg_dtor(void *, int, void *); 204 static int zone_ctor(void *, int, void *, int); 205 static void zone_dtor(void *, int, void *); 206 static int zero_init(void *, int, int); 207 static void zone_small_init(uma_zone_t zone); 208 static void zone_large_init(uma_zone_t zone); 209 static void zone_foreach(void (*zfunc)(uma_zone_t)); 210 static void zone_timeout(uma_zone_t zone); 211 static int hash_alloc(struct uma_hash *); 212 static int hash_expand(struct uma_hash *, struct uma_hash *); 213 static void hash_free(struct uma_hash *hash); 214 static void uma_timeout(void *); 215 static void uma_startup3(void); 216 static void *uma_zalloc_internal(uma_zone_t, void *, int); 217 static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip); 218 static void bucket_enable(void); 219 static void bucket_init(void); 220 static uma_bucket_t bucket_alloc(int, int); 221 static void bucket_free(uma_bucket_t); 222 static void bucket_zone_drain(void); 223 static int uma_zalloc_bucket(uma_zone_t zone, int flags); 224 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags); 225 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab); 226 static void zone_drain(uma_zone_t); 227 static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 228 uma_fini fini, int align, u_int16_t flags); 229 230 void uma_print_zone(uma_zone_t); 231 void uma_print_stats(void); 232 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 233 234 #ifdef WITNESS 235 static int nosleepwithlocks = 1; 236 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 237 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 238 #else 239 static int nosleepwithlocks = 0; 240 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 241 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 242 #endif 243 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, 244 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 245 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 246 247 /* 248 * This routine checks to see whether or not it's safe to enable buckets. 249 */ 250 251 static void 252 bucket_enable(void) 253 { 254 if (cnt.v_free_count < cnt.v_free_min) 255 bucketdisable = 1; 256 else 257 bucketdisable = 0; 258 } 259 260 static void 261 bucket_init(void) 262 { 263 struct uma_bucket_zone *ubz; 264 int i; 265 int j; 266 267 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 268 int size; 269 270 ubz = &bucket_zones[j]; 271 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 272 size += sizeof(void *) * ubz->ubz_entries; 273 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 274 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 275 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 276 bucket_size[i >> BUCKET_SHIFT] = j; 277 } 278 } 279 280 static uma_bucket_t 281 bucket_alloc(int entries, int bflags) 282 { 283 struct uma_bucket_zone *ubz; 284 uma_bucket_t bucket; 285 int idx; 286 287 /* 288 * This is to stop us from allocating per cpu buckets while we're 289 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 290 * boot pages. This also prevents us from allocating buckets in 291 * low memory situations. 292 */ 293 294 if (bucketdisable) 295 return (NULL); 296 idx = howmany(entries, 1 << BUCKET_SHIFT); 297 ubz = &bucket_zones[bucket_size[idx]]; 298 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 299 if (bucket) { 300 #ifdef INVARIANTS 301 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 302 #endif 303 bucket->ub_cnt = 0; 304 bucket->ub_entries = ubz->ubz_entries; 305 } 306 307 return (bucket); 308 } 309 310 static void 311 bucket_free(uma_bucket_t bucket) 312 { 313 struct uma_bucket_zone *ubz; 314 int idx; 315 316 idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT); 317 ubz = &bucket_zones[bucket_size[idx]]; 318 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE); 319 } 320 321 static void 322 bucket_zone_drain(void) 323 { 324 struct uma_bucket_zone *ubz; 325 326 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 327 zone_drain(ubz->ubz_zone); 328 } 329 330 331 /* 332 * Routine called by timeout which is used to fire off some time interval 333 * based calculations. (stats, hash size, etc.) 334 * 335 * Arguments: 336 * arg Unused 337 * 338 * Returns: 339 * Nothing 340 */ 341 static void 342 uma_timeout(void *unused) 343 { 344 bucket_enable(); 345 zone_foreach(zone_timeout); 346 347 /* Reschedule this event */ 348 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 349 } 350 351 /* 352 * Routine to perform timeout driven calculations. This expands the 353 * hashes and does per cpu statistics aggregation. 354 * 355 * Arguments: 356 * zone The zone to operate on 357 * 358 * Returns: 359 * Nothing 360 */ 361 static void 362 zone_timeout(uma_zone_t zone) 363 { 364 uma_keg_t keg; 365 uma_cache_t cache; 366 u_int64_t alloc; 367 int cpu; 368 369 keg = zone->uz_keg; 370 alloc = 0; 371 372 /* 373 * Aggregate per cpu cache statistics back to the zone. 374 * 375 * XXX This should be done in the sysctl handler. 376 * 377 * I may rewrite this to set a flag in the per cpu cache instead of 378 * locking. If the flag is not cleared on the next round I will have 379 * to lock and do it here instead so that the statistics don't get too 380 * far out of sync. 381 */ 382 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) { 383 for (cpu = 0; cpu <= mp_maxid; cpu++) { 384 if (CPU_ABSENT(cpu)) 385 continue; 386 CPU_LOCK(cpu); 387 cache = &zone->uz_cpu[cpu]; 388 /* Add them up, and reset */ 389 alloc += cache->uc_allocs; 390 cache->uc_allocs = 0; 391 CPU_UNLOCK(cpu); 392 } 393 } 394 395 /* Now push these stats back into the zone.. */ 396 ZONE_LOCK(zone); 397 zone->uz_allocs += alloc; 398 399 /* 400 * Expand the zone hash table. 401 * 402 * This is done if the number of slabs is larger than the hash size. 403 * What I'm trying to do here is completely reduce collisions. This 404 * may be a little aggressive. Should I allow for two collisions max? 405 */ 406 407 if (keg->uk_flags & UMA_ZONE_HASH && 408 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 409 struct uma_hash newhash; 410 struct uma_hash oldhash; 411 int ret; 412 413 /* 414 * This is so involved because allocating and freeing 415 * while the zone lock is held will lead to deadlock. 416 * I have to do everything in stages and check for 417 * races. 418 */ 419 newhash = keg->uk_hash; 420 ZONE_UNLOCK(zone); 421 ret = hash_alloc(&newhash); 422 ZONE_LOCK(zone); 423 if (ret) { 424 if (hash_expand(&keg->uk_hash, &newhash)) { 425 oldhash = keg->uk_hash; 426 keg->uk_hash = newhash; 427 } else 428 oldhash = newhash; 429 430 ZONE_UNLOCK(zone); 431 hash_free(&oldhash); 432 ZONE_LOCK(zone); 433 } 434 } 435 ZONE_UNLOCK(zone); 436 } 437 438 /* 439 * Allocate and zero fill the next sized hash table from the appropriate 440 * backing store. 441 * 442 * Arguments: 443 * hash A new hash structure with the old hash size in uh_hashsize 444 * 445 * Returns: 446 * 1 on sucess and 0 on failure. 447 */ 448 static int 449 hash_alloc(struct uma_hash *hash) 450 { 451 int oldsize; 452 int alloc; 453 454 oldsize = hash->uh_hashsize; 455 456 /* We're just going to go to a power of two greater */ 457 if (oldsize) { 458 hash->uh_hashsize = oldsize * 2; 459 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 460 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 461 M_UMAHASH, M_NOWAIT); 462 } else { 463 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 464 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 465 M_WAITOK); 466 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 467 } 468 if (hash->uh_slab_hash) { 469 bzero(hash->uh_slab_hash, alloc); 470 hash->uh_hashmask = hash->uh_hashsize - 1; 471 return (1); 472 } 473 474 return (0); 475 } 476 477 /* 478 * Expands the hash table for HASH zones. This is done from zone_timeout 479 * to reduce collisions. This must not be done in the regular allocation 480 * path, otherwise, we can recurse on the vm while allocating pages. 481 * 482 * Arguments: 483 * oldhash The hash you want to expand 484 * newhash The hash structure for the new table 485 * 486 * Returns: 487 * Nothing 488 * 489 * Discussion: 490 */ 491 static int 492 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 493 { 494 uma_slab_t slab; 495 int hval; 496 int i; 497 498 if (!newhash->uh_slab_hash) 499 return (0); 500 501 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 502 return (0); 503 504 /* 505 * I need to investigate hash algorithms for resizing without a 506 * full rehash. 507 */ 508 509 for (i = 0; i < oldhash->uh_hashsize; i++) 510 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 511 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 512 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 513 hval = UMA_HASH(newhash, slab->us_data); 514 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 515 slab, us_hlink); 516 } 517 518 return (1); 519 } 520 521 /* 522 * Free the hash bucket to the appropriate backing store. 523 * 524 * Arguments: 525 * slab_hash The hash bucket we're freeing 526 * hashsize The number of entries in that hash bucket 527 * 528 * Returns: 529 * Nothing 530 */ 531 static void 532 hash_free(struct uma_hash *hash) 533 { 534 if (hash->uh_slab_hash == NULL) 535 return; 536 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 537 uma_zfree_internal(hashzone, 538 hash->uh_slab_hash, NULL, SKIP_NONE); 539 else 540 free(hash->uh_slab_hash, M_UMAHASH); 541 } 542 543 /* 544 * Frees all outstanding items in a bucket 545 * 546 * Arguments: 547 * zone The zone to free to, must be unlocked. 548 * bucket The free/alloc bucket with items, cpu queue must be locked. 549 * 550 * Returns: 551 * Nothing 552 */ 553 554 static void 555 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 556 { 557 uma_slab_t slab; 558 int mzone; 559 void *item; 560 561 if (bucket == NULL) 562 return; 563 564 slab = NULL; 565 mzone = 0; 566 567 /* We have to lookup the slab again for malloc.. */ 568 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC) 569 mzone = 1; 570 571 while (bucket->ub_cnt > 0) { 572 bucket->ub_cnt--; 573 item = bucket->ub_bucket[bucket->ub_cnt]; 574 #ifdef INVARIANTS 575 bucket->ub_bucket[bucket->ub_cnt] = NULL; 576 KASSERT(item != NULL, 577 ("bucket_drain: botched ptr, item is NULL")); 578 #endif 579 /* 580 * This is extremely inefficient. The slab pointer was passed 581 * to uma_zfree_arg, but we lost it because the buckets don't 582 * hold them. This will go away when free() gets a size passed 583 * to it. 584 */ 585 if (mzone) 586 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 587 uma_zfree_internal(zone, item, slab, SKIP_DTOR); 588 } 589 } 590 591 /* 592 * Drains the per cpu caches for a zone. 593 * 594 * Arguments: 595 * zone The zone to drain, must be unlocked. 596 * 597 * Returns: 598 * Nothing 599 */ 600 static void 601 cache_drain(uma_zone_t zone) 602 { 603 uma_cache_t cache; 604 int cpu; 605 606 /* 607 * We have to lock each cpu cache before locking the zone 608 */ 609 for (cpu = 0; cpu <= mp_maxid; cpu++) { 610 if (CPU_ABSENT(cpu)) 611 continue; 612 CPU_LOCK(cpu); 613 cache = &zone->uz_cpu[cpu]; 614 bucket_drain(zone, cache->uc_allocbucket); 615 bucket_drain(zone, cache->uc_freebucket); 616 if (cache->uc_allocbucket != NULL) 617 bucket_free(cache->uc_allocbucket); 618 if (cache->uc_freebucket != NULL) 619 bucket_free(cache->uc_freebucket); 620 cache->uc_allocbucket = cache->uc_freebucket = NULL; 621 } 622 ZONE_LOCK(zone); 623 bucket_cache_drain(zone); 624 ZONE_UNLOCK(zone); 625 for (cpu = 0; cpu <= mp_maxid; cpu++) { 626 if (CPU_ABSENT(cpu)) 627 continue; 628 CPU_UNLOCK(cpu); 629 } 630 } 631 632 /* 633 * Drain the cached buckets from a zone. Expects a locked zone on entry. 634 */ 635 static void 636 bucket_cache_drain(uma_zone_t zone) 637 { 638 uma_bucket_t bucket; 639 640 /* 641 * Drain the bucket queues and free the buckets, we just keep two per 642 * cpu (alloc/free). 643 */ 644 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 645 LIST_REMOVE(bucket, ub_link); 646 ZONE_UNLOCK(zone); 647 bucket_drain(zone, bucket); 648 bucket_free(bucket); 649 ZONE_LOCK(zone); 650 } 651 652 /* Now we do the free queue.. */ 653 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 654 LIST_REMOVE(bucket, ub_link); 655 bucket_free(bucket); 656 } 657 } 658 659 /* 660 * Frees pages from a zone back to the system. This is done on demand from 661 * the pageout daemon. 662 * 663 * Arguments: 664 * zone The zone to free pages from 665 * all Should we drain all items? 666 * 667 * Returns: 668 * Nothing. 669 */ 670 static void 671 zone_drain(uma_zone_t zone) 672 { 673 struct slabhead freeslabs = {}; 674 uma_keg_t keg; 675 uma_slab_t slab; 676 uma_slab_t n; 677 u_int8_t flags; 678 u_int8_t *mem; 679 int i; 680 681 keg = zone->uz_keg; 682 683 /* 684 * We don't want to take pages from statically allocated zones at this 685 * time 686 */ 687 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 688 return; 689 690 ZONE_LOCK(zone); 691 692 #ifdef UMA_DEBUG 693 printf("%s free items: %u\n", zone->uz_name, keg->uk_free); 694 #endif 695 bucket_cache_drain(zone); 696 if (keg->uk_free == 0) 697 goto finished; 698 699 slab = LIST_FIRST(&keg->uk_free_slab); 700 while (slab) { 701 n = LIST_NEXT(slab, us_link); 702 703 /* We have no where to free these to */ 704 if (slab->us_flags & UMA_SLAB_BOOT) { 705 slab = n; 706 continue; 707 } 708 709 LIST_REMOVE(slab, us_link); 710 keg->uk_pages -= keg->uk_ppera; 711 keg->uk_free -= keg->uk_ipers; 712 713 if (keg->uk_flags & UMA_ZONE_HASH) 714 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 715 716 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 717 718 slab = n; 719 } 720 finished: 721 ZONE_UNLOCK(zone); 722 723 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 724 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 725 if (keg->uk_fini) 726 for (i = 0; i < keg->uk_ipers; i++) 727 keg->uk_fini( 728 slab->us_data + (keg->uk_rsize * i), 729 keg->uk_size); 730 flags = slab->us_flags; 731 mem = slab->us_data; 732 733 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 734 (keg->uk_flags & UMA_ZONE_REFCNT)) { 735 vm_object_t obj; 736 737 if (flags & UMA_SLAB_KMEM) 738 obj = kmem_object; 739 else 740 obj = NULL; 741 for (i = 0; i < keg->uk_ppera; i++) 742 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 743 obj); 744 } 745 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 746 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 747 SKIP_NONE); 748 #ifdef UMA_DEBUG 749 printf("%s: Returning %d bytes.\n", 750 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera); 751 #endif 752 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 753 } 754 } 755 756 /* 757 * Allocate a new slab for a zone. This does not insert the slab onto a list. 758 * 759 * Arguments: 760 * zone The zone to allocate slabs for 761 * wait Shall we wait? 762 * 763 * Returns: 764 * The slab that was allocated or NULL if there is no memory and the 765 * caller specified M_NOWAIT. 766 */ 767 static uma_slab_t 768 slab_zalloc(uma_zone_t zone, int wait) 769 { 770 uma_slabrefcnt_t slabref; 771 uma_slab_t slab; 772 uma_keg_t keg; 773 u_int8_t *mem; 774 u_int8_t flags; 775 int i; 776 777 slab = NULL; 778 keg = zone->uz_keg; 779 780 #ifdef UMA_DEBUG 781 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 782 #endif 783 ZONE_UNLOCK(zone); 784 785 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 786 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait); 787 if (slab == NULL) { 788 ZONE_LOCK(zone); 789 return NULL; 790 } 791 } 792 793 /* 794 * This reproduces the old vm_zone behavior of zero filling pages the 795 * first time they are added to a zone. 796 * 797 * Malloced items are zeroed in uma_zalloc. 798 */ 799 800 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 801 wait |= M_ZERO; 802 else 803 wait &= ~M_ZERO; 804 805 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, 806 &flags, wait); 807 if (mem == NULL) { 808 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 809 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0); 810 ZONE_LOCK(zone); 811 return (NULL); 812 } 813 814 /* Point the slab into the allocated memory */ 815 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 816 slab = (uma_slab_t )(mem + keg->uk_pgoff); 817 818 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 819 (keg->uk_flags & UMA_ZONE_REFCNT)) 820 for (i = 0; i < keg->uk_ppera; i++) 821 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 822 823 slab->us_keg = keg; 824 slab->us_data = mem; 825 slab->us_freecount = keg->uk_ipers; 826 slab->us_firstfree = 0; 827 slab->us_flags = flags; 828 for (i = 0; i < keg->uk_ipers; i++) 829 slab->us_freelist[i].us_item = i+1; 830 831 if (keg->uk_flags & UMA_ZONE_REFCNT) { 832 slabref = (uma_slabrefcnt_t)slab; 833 for (i = 0; i < keg->uk_ipers; i++) 834 slabref->us_freelist[i].us_refcnt = 0; 835 } 836 837 if (keg->uk_init != NULL) { 838 for (i = 0; i < keg->uk_ipers; i++) 839 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 840 keg->uk_size, wait) != 0) 841 break; 842 if (i != keg->uk_ipers) { 843 if (keg->uk_fini != NULL) { 844 for (i--; i > -1; i--) 845 keg->uk_fini(slab->us_data + 846 (keg->uk_rsize * i), 847 keg->uk_size); 848 } 849 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 850 (keg->uk_flags & UMA_ZONE_REFCNT)) 851 for (i = 0; i < keg->uk_ppera; i++) 852 vsetobj((vm_offset_t)mem + 853 (i * PAGE_SIZE), NULL); 854 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 855 uma_zfree_internal(keg->uk_slabzone, slab, 856 NULL, SKIP_NONE); 857 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 858 flags); 859 ZONE_LOCK(zone); 860 return (NULL); 861 } 862 } 863 ZONE_LOCK(zone); 864 865 if (keg->uk_flags & UMA_ZONE_HASH) 866 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 867 868 keg->uk_pages += keg->uk_ppera; 869 keg->uk_free += keg->uk_ipers; 870 871 return (slab); 872 } 873 874 /* 875 * This function is intended to be used early on in place of page_alloc() so 876 * that we may use the boot time page cache to satisfy allocations before 877 * the VM is ready. 878 */ 879 static void * 880 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 881 { 882 uma_keg_t keg; 883 884 keg = zone->uz_keg; 885 886 /* 887 * Check our small startup cache to see if it has pages remaining. 888 */ 889 mtx_lock(&uma_mtx); 890 if (uma_boot_free != 0) { 891 uma_slab_t tmps; 892 893 tmps = LIST_FIRST(&uma_boot_pages); 894 LIST_REMOVE(tmps, us_link); 895 uma_boot_free--; 896 mtx_unlock(&uma_mtx); 897 *pflag = tmps->us_flags; 898 return (tmps->us_data); 899 } 900 mtx_unlock(&uma_mtx); 901 if (booted == 0) 902 panic("UMA: Increase UMA_BOOT_PAGES"); 903 /* 904 * Now that we've booted reset these users to their real allocator. 905 */ 906 #ifdef UMA_MD_SMALL_ALLOC 907 keg->uk_allocf = uma_small_alloc; 908 #else 909 keg->uk_allocf = page_alloc; 910 #endif 911 return keg->uk_allocf(zone, bytes, pflag, wait); 912 } 913 914 /* 915 * Allocates a number of pages from the system 916 * 917 * Arguments: 918 * zone Unused 919 * bytes The number of bytes requested 920 * wait Shall we wait? 921 * 922 * Returns: 923 * A pointer to the alloced memory or possibly 924 * NULL if M_NOWAIT is set. 925 */ 926 static void * 927 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 928 { 929 void *p; /* Returned page */ 930 931 *pflag = UMA_SLAB_KMEM; 932 p = (void *) kmem_malloc(kmem_map, bytes, wait); 933 934 return (p); 935 } 936 937 /* 938 * Allocates a number of pages from within an object 939 * 940 * Arguments: 941 * zone Unused 942 * bytes The number of bytes requested 943 * wait Shall we wait? 944 * 945 * Returns: 946 * A pointer to the alloced memory or possibly 947 * NULL if M_NOWAIT is set. 948 */ 949 static void * 950 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 951 { 952 vm_object_t object; 953 vm_offset_t retkva, zkva; 954 vm_page_t p; 955 int pages, startpages; 956 957 object = zone->uz_keg->uk_obj; 958 retkva = 0; 959 960 /* 961 * This looks a little weird since we're getting one page at a time. 962 */ 963 VM_OBJECT_LOCK(object); 964 p = TAILQ_LAST(&object->memq, pglist); 965 pages = p != NULL ? p->pindex + 1 : 0; 966 startpages = pages; 967 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; 968 for (; bytes > 0; bytes -= PAGE_SIZE) { 969 p = vm_page_alloc(object, pages, 970 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 971 if (p == NULL) { 972 if (pages != startpages) 973 pmap_qremove(retkva, pages - startpages); 974 while (pages != startpages) { 975 pages--; 976 p = TAILQ_LAST(&object->memq, pglist); 977 vm_page_lock_queues(); 978 vm_page_unwire(p, 0); 979 vm_page_free(p); 980 vm_page_unlock_queues(); 981 } 982 retkva = 0; 983 goto done; 984 } 985 pmap_qenter(zkva, &p, 1); 986 if (retkva == 0) 987 retkva = zkva; 988 zkva += PAGE_SIZE; 989 pages += 1; 990 } 991 done: 992 VM_OBJECT_UNLOCK(object); 993 *flags = UMA_SLAB_PRIV; 994 995 return ((void *)retkva); 996 } 997 998 /* 999 * Frees a number of pages to the system 1000 * 1001 * Arguments: 1002 * mem A pointer to the memory to be freed 1003 * size The size of the memory being freed 1004 * flags The original p->us_flags field 1005 * 1006 * Returns: 1007 * Nothing 1008 */ 1009 static void 1010 page_free(void *mem, int size, u_int8_t flags) 1011 { 1012 vm_map_t map; 1013 1014 if (flags & UMA_SLAB_KMEM) 1015 map = kmem_map; 1016 else 1017 panic("UMA: page_free used with invalid flags %d\n", flags); 1018 1019 kmem_free(map, (vm_offset_t)mem, size); 1020 } 1021 1022 /* 1023 * Zero fill initializer 1024 * 1025 * Arguments/Returns follow uma_init specifications 1026 */ 1027 static int 1028 zero_init(void *mem, int size, int flags) 1029 { 1030 bzero(mem, size); 1031 return (0); 1032 } 1033 1034 /* 1035 * Finish creating a small uma zone. This calculates ipers, and the zone size. 1036 * 1037 * Arguments 1038 * zone The zone we should initialize 1039 * 1040 * Returns 1041 * Nothing 1042 */ 1043 static void 1044 zone_small_init(uma_zone_t zone) 1045 { 1046 uma_keg_t keg; 1047 u_int rsize; 1048 u_int memused; 1049 u_int wastedspace; 1050 u_int shsize; 1051 1052 keg = zone->uz_keg; 1053 KASSERT(keg != NULL, ("Keg is null in zone_small_init")); 1054 rsize = keg->uk_size; 1055 1056 if (rsize < UMA_SMALLEST_UNIT) 1057 rsize = UMA_SMALLEST_UNIT; 1058 if (rsize & keg->uk_align) 1059 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1060 1061 keg->uk_rsize = rsize; 1062 keg->uk_ppera = 1; 1063 1064 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1065 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1066 shsize = sizeof(struct uma_slab_refcnt); 1067 } else { 1068 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1069 shsize = sizeof(struct uma_slab); 1070 } 1071 1072 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1073 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0")); 1074 memused = keg->uk_ipers * rsize + shsize; 1075 wastedspace = UMA_SLAB_SIZE - memused; 1076 1077 /* 1078 * We can't do OFFPAGE if we're internal or if we've been 1079 * asked to not go to the VM for buckets. If we do this we 1080 * may end up going to the VM (kmem_map) for slabs which we 1081 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1082 * result of UMA_ZONE_VM, which clearly forbids it. 1083 */ 1084 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1085 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1086 return; 1087 1088 if ((wastedspace >= UMA_MAX_WASTE) && 1089 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1090 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1091 KASSERT(keg->uk_ipers <= 255, 1092 ("zone_small_init: keg->uk_ipers too high!")); 1093 #ifdef UMA_DEBUG 1094 printf("UMA decided we need offpage slab headers for " 1095 "zone: %s, calculated wastedspace = %d, " 1096 "maximum wasted space allowed = %d, " 1097 "calculated ipers = %d, " 1098 "new wasted space = %d\n", zone->uz_name, wastedspace, 1099 UMA_MAX_WASTE, keg->uk_ipers, 1100 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1101 #endif 1102 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1103 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1104 keg->uk_flags |= UMA_ZONE_HASH; 1105 } 1106 } 1107 1108 /* 1109 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 1110 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1111 * more complicated. 1112 * 1113 * Arguments 1114 * zone The zone we should initialize 1115 * 1116 * Returns 1117 * Nothing 1118 */ 1119 static void 1120 zone_large_init(uma_zone_t zone) 1121 { 1122 uma_keg_t keg; 1123 int pages; 1124 1125 keg = zone->uz_keg; 1126 1127 KASSERT(keg != NULL, ("Keg is null in zone_large_init")); 1128 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1129 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 1130 1131 pages = keg->uk_size / UMA_SLAB_SIZE; 1132 1133 /* Account for remainder */ 1134 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1135 pages++; 1136 1137 keg->uk_ppera = pages; 1138 keg->uk_ipers = 1; 1139 1140 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1141 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1142 keg->uk_flags |= UMA_ZONE_HASH; 1143 1144 keg->uk_rsize = keg->uk_size; 1145 } 1146 1147 /* 1148 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1149 * the keg onto the global keg list. 1150 * 1151 * Arguments/Returns follow uma_ctor specifications 1152 * udata Actually uma_kctor_args 1153 */ 1154 static int 1155 keg_ctor(void *mem, int size, void *udata, int flags) 1156 { 1157 struct uma_kctor_args *arg = udata; 1158 uma_keg_t keg = mem; 1159 uma_zone_t zone; 1160 1161 bzero(keg, size); 1162 keg->uk_size = arg->size; 1163 keg->uk_init = arg->uminit; 1164 keg->uk_fini = arg->fini; 1165 keg->uk_align = arg->align; 1166 keg->uk_free = 0; 1167 keg->uk_pages = 0; 1168 keg->uk_flags = arg->flags; 1169 keg->uk_allocf = page_alloc; 1170 keg->uk_freef = page_free; 1171 keg->uk_recurse = 0; 1172 keg->uk_slabzone = NULL; 1173 1174 /* 1175 * The master zone is passed to us at keg-creation time. 1176 */ 1177 zone = arg->zone; 1178 zone->uz_keg = keg; 1179 1180 if (arg->flags & UMA_ZONE_VM) 1181 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1182 1183 if (arg->flags & UMA_ZONE_ZINIT) 1184 keg->uk_init = zero_init; 1185 1186 /* 1187 * The +UMA_FRITM_SZ added to uk_size is to account for the 1188 * linkage that is added to the size in zone_small_init(). If 1189 * we don't account for this here then we may end up in 1190 * zone_small_init() with a calculated 'ipers' of 0. 1191 */ 1192 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1193 if ((keg->uk_size+UMA_FRITMREF_SZ) > 1194 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1195 zone_large_init(zone); 1196 else 1197 zone_small_init(zone); 1198 } else { 1199 if ((keg->uk_size+UMA_FRITM_SZ) > 1200 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1201 zone_large_init(zone); 1202 else 1203 zone_small_init(zone); 1204 } 1205 1206 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1207 if (keg->uk_flags & UMA_ZONE_REFCNT) 1208 keg->uk_slabzone = slabrefzone; 1209 else 1210 keg->uk_slabzone = slabzone; 1211 } 1212 1213 /* 1214 * If we haven't booted yet we need allocations to go through the 1215 * startup cache until the vm is ready. 1216 */ 1217 if (keg->uk_ppera == 1) { 1218 #ifdef UMA_MD_SMALL_ALLOC 1219 keg->uk_allocf = uma_small_alloc; 1220 keg->uk_freef = uma_small_free; 1221 #endif 1222 if (booted == 0) 1223 keg->uk_allocf = startup_alloc; 1224 } 1225 1226 /* 1227 * Initialize keg's lock (shared among zones) through 1228 * Master zone 1229 */ 1230 zone->uz_lock = &keg->uk_lock; 1231 if (arg->flags & UMA_ZONE_MTXCLASS) 1232 ZONE_LOCK_INIT(zone, 1); 1233 else 1234 ZONE_LOCK_INIT(zone, 0); 1235 1236 /* 1237 * If we're putting the slab header in the actual page we need to 1238 * figure out where in each page it goes. This calculates a right 1239 * justified offset into the memory on an ALIGN_PTR boundary. 1240 */ 1241 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1242 u_int totsize; 1243 1244 /* Size of the slab struct and free list */ 1245 if (keg->uk_flags & UMA_ZONE_REFCNT) 1246 totsize = sizeof(struct uma_slab_refcnt) + 1247 keg->uk_ipers * UMA_FRITMREF_SZ; 1248 else 1249 totsize = sizeof(struct uma_slab) + 1250 keg->uk_ipers * UMA_FRITM_SZ; 1251 1252 if (totsize & UMA_ALIGN_PTR) 1253 totsize = (totsize & ~UMA_ALIGN_PTR) + 1254 (UMA_ALIGN_PTR + 1); 1255 keg->uk_pgoff = UMA_SLAB_SIZE - totsize; 1256 1257 if (keg->uk_flags & UMA_ZONE_REFCNT) 1258 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1259 + keg->uk_ipers * UMA_FRITMREF_SZ; 1260 else 1261 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1262 + keg->uk_ipers * UMA_FRITM_SZ; 1263 1264 /* 1265 * The only way the following is possible is if with our 1266 * UMA_ALIGN_PTR adjustments we are now bigger than 1267 * UMA_SLAB_SIZE. I haven't checked whether this is 1268 * mathematically possible for all cases, so we make 1269 * sure here anyway. 1270 */ 1271 if (totsize > UMA_SLAB_SIZE) { 1272 printf("zone %s ipers %d rsize %d size %d\n", 1273 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1274 keg->uk_size); 1275 panic("UMA slab won't fit.\n"); 1276 } 1277 } 1278 1279 if (keg->uk_flags & UMA_ZONE_HASH) 1280 hash_alloc(&keg->uk_hash); 1281 1282 #ifdef UMA_DEBUG 1283 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1284 zone->uz_name, zone, 1285 keg->uk_size, keg->uk_ipers, 1286 keg->uk_ppera, keg->uk_pgoff); 1287 #endif 1288 1289 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1290 1291 mtx_lock(&uma_mtx); 1292 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1293 mtx_unlock(&uma_mtx); 1294 return (0); 1295 } 1296 1297 /* 1298 * Zone header ctor. This initializes all fields, locks, etc. 1299 * 1300 * Arguments/Returns follow uma_ctor specifications 1301 * udata Actually uma_zctor_args 1302 */ 1303 1304 static int 1305 zone_ctor(void *mem, int size, void *udata, int flags) 1306 { 1307 struct uma_zctor_args *arg = udata; 1308 uma_zone_t zone = mem; 1309 uma_zone_t z; 1310 uma_keg_t keg; 1311 1312 bzero(zone, size); 1313 zone->uz_name = arg->name; 1314 zone->uz_ctor = arg->ctor; 1315 zone->uz_dtor = arg->dtor; 1316 zone->uz_init = NULL; 1317 zone->uz_fini = NULL; 1318 zone->uz_allocs = 0; 1319 zone->uz_fills = zone->uz_count = 0; 1320 1321 if (arg->flags & UMA_ZONE_SECONDARY) { 1322 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1323 keg = arg->keg; 1324 zone->uz_keg = keg; 1325 zone->uz_init = arg->uminit; 1326 zone->uz_fini = arg->fini; 1327 zone->uz_lock = &keg->uk_lock; 1328 mtx_lock(&uma_mtx); 1329 ZONE_LOCK(zone); 1330 keg->uk_flags |= UMA_ZONE_SECONDARY; 1331 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1332 if (LIST_NEXT(z, uz_link) == NULL) { 1333 LIST_INSERT_AFTER(z, zone, uz_link); 1334 break; 1335 } 1336 } 1337 ZONE_UNLOCK(zone); 1338 mtx_unlock(&uma_mtx); 1339 } else if (arg->keg == NULL) { 1340 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1341 arg->align, arg->flags) == NULL) 1342 return (ENOMEM); 1343 } else { 1344 struct uma_kctor_args karg; 1345 int error; 1346 1347 /* We should only be here from uma_startup() */ 1348 karg.size = arg->size; 1349 karg.uminit = arg->uminit; 1350 karg.fini = arg->fini; 1351 karg.align = arg->align; 1352 karg.flags = arg->flags; 1353 karg.zone = zone; 1354 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1355 flags); 1356 if (error) 1357 return (error); 1358 } 1359 keg = zone->uz_keg; 1360 zone->uz_lock = &keg->uk_lock; 1361 1362 /* 1363 * Some internal zones don't have room allocated for the per cpu 1364 * caches. If we're internal, bail out here. 1365 */ 1366 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1367 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0, 1368 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1369 return (0); 1370 } 1371 1372 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1373 zone->uz_count = BUCKET_MAX; 1374 else if (keg->uk_ipers <= BUCKET_MAX) 1375 zone->uz_count = keg->uk_ipers; 1376 else 1377 zone->uz_count = BUCKET_MAX; 1378 return (0); 1379 } 1380 1381 /* 1382 * Keg header dtor. This frees all data, destroys locks, frees the hash 1383 * table and removes the keg from the global list. 1384 * 1385 * Arguments/Returns follow uma_dtor specifications 1386 * udata unused 1387 */ 1388 static void 1389 keg_dtor(void *arg, int size, void *udata) 1390 { 1391 uma_keg_t keg; 1392 1393 keg = (uma_keg_t)arg; 1394 mtx_lock(&keg->uk_lock); 1395 if (keg->uk_free != 0) { 1396 printf("Freed UMA keg was not empty (%d items). " 1397 " Lost %d pages of memory.\n", 1398 keg->uk_free, keg->uk_pages); 1399 } 1400 mtx_unlock(&keg->uk_lock); 1401 1402 if (keg->uk_flags & UMA_ZONE_HASH) 1403 hash_free(&keg->uk_hash); 1404 1405 mtx_destroy(&keg->uk_lock); 1406 } 1407 1408 /* 1409 * Zone header dtor. 1410 * 1411 * Arguments/Returns follow uma_dtor specifications 1412 * udata unused 1413 */ 1414 static void 1415 zone_dtor(void *arg, int size, void *udata) 1416 { 1417 uma_zone_t zone; 1418 uma_keg_t keg; 1419 1420 zone = (uma_zone_t)arg; 1421 keg = zone->uz_keg; 1422 1423 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1424 cache_drain(zone); 1425 1426 mtx_lock(&uma_mtx); 1427 zone_drain(zone); 1428 if (keg->uk_flags & UMA_ZONE_SECONDARY) { 1429 LIST_REMOVE(zone, uz_link); 1430 /* 1431 * XXX there are some races here where 1432 * the zone can be drained but zone lock 1433 * released and then refilled before we 1434 * remove it... we dont care for now 1435 */ 1436 ZONE_LOCK(zone); 1437 if (LIST_EMPTY(&keg->uk_zones)) 1438 keg->uk_flags &= ~UMA_ZONE_SECONDARY; 1439 ZONE_UNLOCK(zone); 1440 mtx_unlock(&uma_mtx); 1441 } else { 1442 LIST_REMOVE(keg, uk_link); 1443 LIST_REMOVE(zone, uz_link); 1444 mtx_unlock(&uma_mtx); 1445 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE); 1446 } 1447 zone->uz_keg = NULL; 1448 } 1449 1450 /* 1451 * Traverses every zone in the system and calls a callback 1452 * 1453 * Arguments: 1454 * zfunc A pointer to a function which accepts a zone 1455 * as an argument. 1456 * 1457 * Returns: 1458 * Nothing 1459 */ 1460 static void 1461 zone_foreach(void (*zfunc)(uma_zone_t)) 1462 { 1463 uma_keg_t keg; 1464 uma_zone_t zone; 1465 1466 mtx_lock(&uma_mtx); 1467 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1468 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1469 zfunc(zone); 1470 } 1471 mtx_unlock(&uma_mtx); 1472 } 1473 1474 /* Public functions */ 1475 /* See uma.h */ 1476 void 1477 uma_startup(void *bootmem) 1478 { 1479 struct uma_zctor_args args; 1480 uma_slab_t slab; 1481 u_int slabsize; 1482 u_int objsize, totsize, wsize; 1483 int i; 1484 1485 #ifdef UMA_DEBUG 1486 printf("Creating uma keg headers zone and keg.\n"); 1487 #endif 1488 /* 1489 * The general UMA lock is a recursion-allowed lock because 1490 * there is a code path where, while we're still configured 1491 * to use startup_alloc() for backend page allocations, we 1492 * may end up in uma_reclaim() which calls zone_foreach(zone_drain), 1493 * which grabs uma_mtx, only to later call into startup_alloc() 1494 * because while freeing we needed to allocate a bucket. Since 1495 * startup_alloc() also takes uma_mtx, we need to be able to 1496 * recurse on it. 1497 */ 1498 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE); 1499 1500 /* 1501 * Figure out the maximum number of items-per-slab we'll have if 1502 * we're using the OFFPAGE slab header to track free items, given 1503 * all possible object sizes and the maximum desired wastage 1504 * (UMA_MAX_WASTE). 1505 * 1506 * We iterate until we find an object size for 1507 * which the calculated wastage in zone_small_init() will be 1508 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1509 * is an overall increasing see-saw function, we find the smallest 1510 * objsize such that the wastage is always acceptable for objects 1511 * with that objsize or smaller. Since a smaller objsize always 1512 * generates a larger possible uma_max_ipers, we use this computed 1513 * objsize to calculate the largest ipers possible. Since the 1514 * ipers calculated for OFFPAGE slab headers is always larger than 1515 * the ipers initially calculated in zone_small_init(), we use 1516 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1517 * obtain the maximum ipers possible for offpage slab headers. 1518 * 1519 * It should be noted that ipers versus objsize is an inversly 1520 * proportional function which drops off rather quickly so as 1521 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1522 * falls into the portion of the inverse relation AFTER the steep 1523 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1524 * 1525 * Note that we have 8-bits (1 byte) to use as a freelist index 1526 * inside the actual slab header itself and this is enough to 1527 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1528 * object with offpage slab header would have ipers = 1529 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1530 * 1 greater than what our byte-integer freelist index can 1531 * accomodate, but we know that this situation never occurs as 1532 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1533 * that we need to go to offpage slab headers. Or, if we do, 1534 * then we trap that condition below and panic in the INVARIANTS case. 1535 */ 1536 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1537 totsize = wsize; 1538 objsize = UMA_SMALLEST_UNIT; 1539 while (totsize >= wsize) { 1540 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1541 (objsize + UMA_FRITM_SZ); 1542 totsize *= (UMA_FRITM_SZ + objsize); 1543 objsize++; 1544 } 1545 if (objsize > UMA_SMALLEST_UNIT) 1546 objsize--; 1547 uma_max_ipers = UMA_SLAB_SIZE / objsize; 1548 1549 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1550 totsize = wsize; 1551 objsize = UMA_SMALLEST_UNIT; 1552 while (totsize >= wsize) { 1553 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1554 (objsize + UMA_FRITMREF_SZ); 1555 totsize *= (UMA_FRITMREF_SZ + objsize); 1556 objsize++; 1557 } 1558 if (objsize > UMA_SMALLEST_UNIT) 1559 objsize--; 1560 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize; 1561 1562 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1563 ("uma_startup: calculated uma_max_ipers values too large!")); 1564 1565 #ifdef UMA_DEBUG 1566 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1567 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1568 uma_max_ipers_ref); 1569 #endif 1570 1571 /* "manually" create the initial zone */ 1572 args.name = "UMA Kegs"; 1573 args.size = sizeof(struct uma_keg); 1574 args.ctor = keg_ctor; 1575 args.dtor = keg_dtor; 1576 args.uminit = zero_init; 1577 args.fini = NULL; 1578 args.keg = &masterkeg; 1579 args.align = 32 - 1; 1580 args.flags = UMA_ZFLAG_INTERNAL; 1581 /* The initial zone has no Per cpu queues so it's smaller */ 1582 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1583 1584 #ifdef UMA_DEBUG 1585 printf("Filling boot free list.\n"); 1586 #endif 1587 for (i = 0; i < UMA_BOOT_PAGES; i++) { 1588 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1589 slab->us_data = (u_int8_t *)slab; 1590 slab->us_flags = UMA_SLAB_BOOT; 1591 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1592 uma_boot_free++; 1593 } 1594 1595 #ifdef UMA_DEBUG 1596 printf("Creating uma zone headers zone and keg.\n"); 1597 #endif 1598 args.name = "UMA Zones"; 1599 args.size = sizeof(struct uma_zone) + 1600 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1601 args.ctor = zone_ctor; 1602 args.dtor = zone_dtor; 1603 args.uminit = zero_init; 1604 args.fini = NULL; 1605 args.keg = NULL; 1606 args.align = 32 - 1; 1607 args.flags = UMA_ZFLAG_INTERNAL; 1608 /* The initial zone has no Per cpu queues so it's smaller */ 1609 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1610 1611 #ifdef UMA_DEBUG 1612 printf("Initializing pcpu cache locks.\n"); 1613 #endif 1614 /* Initialize the pcpu cache lock set once and for all */ 1615 for (i = 0; i <= mp_maxid; i++) 1616 CPU_LOCK_INIT(i); 1617 1618 #ifdef UMA_DEBUG 1619 printf("Creating slab and hash zones.\n"); 1620 #endif 1621 1622 /* 1623 * This is the max number of free list items we'll have with 1624 * offpage slabs. 1625 */ 1626 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1627 slabsize += sizeof(struct uma_slab); 1628 1629 /* Now make a zone for slab headers */ 1630 slabzone = uma_zcreate("UMA Slabs", 1631 slabsize, 1632 NULL, NULL, NULL, NULL, 1633 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1634 1635 /* 1636 * We also create a zone for the bigger slabs with reference 1637 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1638 */ 1639 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1640 slabsize += sizeof(struct uma_slab_refcnt); 1641 slabrefzone = uma_zcreate("UMA RCntSlabs", 1642 slabsize, 1643 NULL, NULL, NULL, NULL, 1644 UMA_ALIGN_PTR, 1645 UMA_ZFLAG_INTERNAL); 1646 1647 hashzone = uma_zcreate("UMA Hash", 1648 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1649 NULL, NULL, NULL, NULL, 1650 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1651 1652 bucket_init(); 1653 1654 #ifdef UMA_MD_SMALL_ALLOC 1655 booted = 1; 1656 #endif 1657 1658 #ifdef UMA_DEBUG 1659 printf("UMA startup complete.\n"); 1660 #endif 1661 } 1662 1663 /* see uma.h */ 1664 void 1665 uma_startup2(void) 1666 { 1667 booted = 1; 1668 bucket_enable(); 1669 #ifdef UMA_DEBUG 1670 printf("UMA startup2 complete.\n"); 1671 #endif 1672 } 1673 1674 /* 1675 * Initialize our callout handle 1676 * 1677 */ 1678 1679 static void 1680 uma_startup3(void) 1681 { 1682 #ifdef UMA_DEBUG 1683 printf("Starting callout.\n"); 1684 #endif 1685 callout_init(&uma_callout, CALLOUT_MPSAFE); 1686 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1687 #ifdef UMA_DEBUG 1688 printf("UMA startup3 complete.\n"); 1689 #endif 1690 } 1691 1692 static uma_zone_t 1693 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1694 int align, u_int16_t flags) 1695 { 1696 struct uma_kctor_args args; 1697 1698 args.size = size; 1699 args.uminit = uminit; 1700 args.fini = fini; 1701 args.align = align; 1702 args.flags = flags; 1703 args.zone = zone; 1704 return (uma_zalloc_internal(kegs, &args, M_WAITOK)); 1705 } 1706 1707 /* See uma.h */ 1708 uma_zone_t 1709 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1710 uma_init uminit, uma_fini fini, int align, u_int16_t flags) 1711 1712 { 1713 struct uma_zctor_args args; 1714 1715 /* This stuff is essential for the zone ctor */ 1716 args.name = name; 1717 args.size = size; 1718 args.ctor = ctor; 1719 args.dtor = dtor; 1720 args.uminit = uminit; 1721 args.fini = fini; 1722 args.align = align; 1723 args.flags = flags; 1724 args.keg = NULL; 1725 1726 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1727 } 1728 1729 /* See uma.h */ 1730 uma_zone_t 1731 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1732 uma_init zinit, uma_fini zfini, uma_zone_t master) 1733 { 1734 struct uma_zctor_args args; 1735 1736 args.name = name; 1737 args.size = master->uz_keg->uk_size; 1738 args.ctor = ctor; 1739 args.dtor = dtor; 1740 args.uminit = zinit; 1741 args.fini = zfini; 1742 args.align = master->uz_keg->uk_align; 1743 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY; 1744 args.keg = master->uz_keg; 1745 1746 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1747 } 1748 1749 /* See uma.h */ 1750 void 1751 uma_zdestroy(uma_zone_t zone) 1752 { 1753 uma_zfree_internal(zones, zone, NULL, SKIP_NONE); 1754 } 1755 1756 /* See uma.h */ 1757 void * 1758 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1759 { 1760 void *item; 1761 uma_cache_t cache; 1762 uma_bucket_t bucket; 1763 int cpu; 1764 int badness; 1765 1766 /* This is the fast path allocation */ 1767 #ifdef UMA_DEBUG_ALLOC_1 1768 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1769 #endif 1770 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1771 zone->uz_name, flags); 1772 1773 if (!(flags & M_NOWAIT)) { 1774 KASSERT(curthread->td_intr_nesting_level == 0, 1775 ("malloc(M_WAITOK) in interrupt context")); 1776 if (nosleepwithlocks) { 1777 #ifdef WITNESS 1778 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1779 NULL, 1780 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT", 1781 zone->uz_name); 1782 #else 1783 badness = 1; 1784 #endif 1785 } else { 1786 badness = 0; 1787 #ifdef WITNESS 1788 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1789 "malloc(M_WAITOK) of \"%s\"", zone->uz_name); 1790 #endif 1791 } 1792 if (badness) { 1793 flags &= ~M_WAITOK; 1794 flags |= M_NOWAIT; 1795 } 1796 } 1797 1798 zalloc_restart: 1799 cpu = PCPU_GET(cpuid); 1800 CPU_LOCK(cpu); 1801 cache = &zone->uz_cpu[cpu]; 1802 1803 zalloc_start: 1804 bucket = cache->uc_allocbucket; 1805 1806 if (bucket) { 1807 if (bucket->ub_cnt > 0) { 1808 bucket->ub_cnt--; 1809 item = bucket->ub_bucket[bucket->ub_cnt]; 1810 #ifdef INVARIANTS 1811 bucket->ub_bucket[bucket->ub_cnt] = NULL; 1812 #endif 1813 KASSERT(item != NULL, 1814 ("uma_zalloc: Bucket pointer mangled.")); 1815 cache->uc_allocs++; 1816 #ifdef INVARIANTS 1817 ZONE_LOCK(zone); 1818 uma_dbg_alloc(zone, NULL, item); 1819 ZONE_UNLOCK(zone); 1820 #endif 1821 CPU_UNLOCK(cpu); 1822 if (zone->uz_ctor != NULL) { 1823 if (zone->uz_ctor(item, zone->uz_keg->uk_size, 1824 udata, flags) != 0) { 1825 uma_zfree_internal(zone, item, udata, 1826 SKIP_DTOR); 1827 return (NULL); 1828 } 1829 } 1830 if (flags & M_ZERO) 1831 bzero(item, zone->uz_keg->uk_size); 1832 return (item); 1833 } else if (cache->uc_freebucket) { 1834 /* 1835 * We have run out of items in our allocbucket. 1836 * See if we can switch with our free bucket. 1837 */ 1838 if (cache->uc_freebucket->ub_cnt > 0) { 1839 #ifdef UMA_DEBUG_ALLOC 1840 printf("uma_zalloc: Swapping empty with" 1841 " alloc.\n"); 1842 #endif 1843 bucket = cache->uc_freebucket; 1844 cache->uc_freebucket = cache->uc_allocbucket; 1845 cache->uc_allocbucket = bucket; 1846 1847 goto zalloc_start; 1848 } 1849 } 1850 } 1851 ZONE_LOCK(zone); 1852 /* Since we have locked the zone we may as well send back our stats */ 1853 zone->uz_allocs += cache->uc_allocs; 1854 cache->uc_allocs = 0; 1855 1856 /* Our old one is now a free bucket */ 1857 if (cache->uc_allocbucket) { 1858 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1859 ("uma_zalloc_arg: Freeing a non free bucket.")); 1860 LIST_INSERT_HEAD(&zone->uz_free_bucket, 1861 cache->uc_allocbucket, ub_link); 1862 cache->uc_allocbucket = NULL; 1863 } 1864 1865 /* Check the free list for a new alloc bucket */ 1866 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1867 KASSERT(bucket->ub_cnt != 0, 1868 ("uma_zalloc_arg: Returning an empty bucket.")); 1869 1870 LIST_REMOVE(bucket, ub_link); 1871 cache->uc_allocbucket = bucket; 1872 ZONE_UNLOCK(zone); 1873 goto zalloc_start; 1874 } 1875 /* We are no longer associated with this cpu!!! */ 1876 CPU_UNLOCK(cpu); 1877 1878 /* Bump up our uz_count so we get here less */ 1879 if (zone->uz_count < BUCKET_MAX) 1880 zone->uz_count++; 1881 1882 /* 1883 * Now lets just fill a bucket and put it on the free list. If that 1884 * works we'll restart the allocation from the begining. 1885 */ 1886 if (uma_zalloc_bucket(zone, flags)) { 1887 ZONE_UNLOCK(zone); 1888 goto zalloc_restart; 1889 } 1890 ZONE_UNLOCK(zone); 1891 /* 1892 * We may not be able to get a bucket so return an actual item. 1893 */ 1894 #ifdef UMA_DEBUG 1895 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1896 #endif 1897 1898 return (uma_zalloc_internal(zone, udata, flags)); 1899 } 1900 1901 static uma_slab_t 1902 uma_zone_slab(uma_zone_t zone, int flags) 1903 { 1904 uma_slab_t slab; 1905 uma_keg_t keg; 1906 1907 keg = zone->uz_keg; 1908 1909 /* 1910 * This is to prevent us from recursively trying to allocate 1911 * buckets. The problem is that if an allocation forces us to 1912 * grab a new bucket we will call page_alloc, which will go off 1913 * and cause the vm to allocate vm_map_entries. If we need new 1914 * buckets there too we will recurse in kmem_alloc and bad 1915 * things happen. So instead we return a NULL bucket, and make 1916 * the code that allocates buckets smart enough to deal with it 1917 */ 1918 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0) 1919 return (NULL); 1920 1921 slab = NULL; 1922 1923 for (;;) { 1924 /* 1925 * Find a slab with some space. Prefer slabs that are partially 1926 * used over those that are totally full. This helps to reduce 1927 * fragmentation. 1928 */ 1929 if (keg->uk_free != 0) { 1930 if (!LIST_EMPTY(&keg->uk_part_slab)) { 1931 slab = LIST_FIRST(&keg->uk_part_slab); 1932 } else { 1933 slab = LIST_FIRST(&keg->uk_free_slab); 1934 LIST_REMOVE(slab, us_link); 1935 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 1936 us_link); 1937 } 1938 return (slab); 1939 } 1940 1941 /* 1942 * M_NOVM means don't ask at all! 1943 */ 1944 if (flags & M_NOVM) 1945 break; 1946 1947 if (keg->uk_maxpages && 1948 keg->uk_pages >= keg->uk_maxpages) { 1949 keg->uk_flags |= UMA_ZFLAG_FULL; 1950 1951 if (flags & M_NOWAIT) 1952 break; 1953 else 1954 msleep(keg, &keg->uk_lock, PVM, 1955 "zonelimit", 0); 1956 continue; 1957 } 1958 keg->uk_recurse++; 1959 slab = slab_zalloc(zone, flags); 1960 keg->uk_recurse--; 1961 1962 /* 1963 * If we got a slab here it's safe to mark it partially used 1964 * and return. We assume that the caller is going to remove 1965 * at least one item. 1966 */ 1967 if (slab) { 1968 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 1969 return (slab); 1970 } 1971 /* 1972 * We might not have been able to get a slab but another cpu 1973 * could have while we were unlocked. Check again before we 1974 * fail. 1975 */ 1976 if (flags & M_NOWAIT) 1977 flags |= M_NOVM; 1978 } 1979 return (slab); 1980 } 1981 1982 static void * 1983 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 1984 { 1985 uma_keg_t keg; 1986 void *item; 1987 u_int8_t freei; 1988 1989 keg = zone->uz_keg; 1990 1991 freei = slab->us_firstfree; 1992 slab->us_firstfree = slab->us_freelist[freei].us_item; 1993 item = slab->us_data + (keg->uk_rsize * freei); 1994 1995 slab->us_freecount--; 1996 keg->uk_free--; 1997 #ifdef INVARIANTS 1998 uma_dbg_alloc(zone, slab, item); 1999 #endif 2000 /* Move this slab to the full list */ 2001 if (slab->us_freecount == 0) { 2002 LIST_REMOVE(slab, us_link); 2003 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2004 } 2005 2006 return (item); 2007 } 2008 2009 static int 2010 uma_zalloc_bucket(uma_zone_t zone, int flags) 2011 { 2012 uma_bucket_t bucket; 2013 uma_slab_t slab; 2014 int16_t saved; 2015 int max, origflags = flags; 2016 2017 /* 2018 * Try this zone's free list first so we don't allocate extra buckets. 2019 */ 2020 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2021 KASSERT(bucket->ub_cnt == 0, 2022 ("uma_zalloc_bucket: Bucket on free list is not empty.")); 2023 LIST_REMOVE(bucket, ub_link); 2024 } else { 2025 int bflags; 2026 2027 bflags = (flags & ~M_ZERO); 2028 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2029 bflags |= M_NOVM; 2030 2031 ZONE_UNLOCK(zone); 2032 bucket = bucket_alloc(zone->uz_count, bflags); 2033 ZONE_LOCK(zone); 2034 } 2035 2036 if (bucket == NULL) 2037 return (0); 2038 2039 #ifdef SMP 2040 /* 2041 * This code is here to limit the number of simultaneous bucket fills 2042 * for any given zone to the number of per cpu caches in this zone. This 2043 * is done so that we don't allocate more memory than we really need. 2044 */ 2045 if (zone->uz_fills >= mp_ncpus) 2046 goto done; 2047 2048 #endif 2049 zone->uz_fills++; 2050 2051 max = MIN(bucket->ub_entries, zone->uz_count); 2052 /* Try to keep the buckets totally full */ 2053 saved = bucket->ub_cnt; 2054 while (bucket->ub_cnt < max && 2055 (slab = uma_zone_slab(zone, flags)) != NULL) { 2056 while (slab->us_freecount && bucket->ub_cnt < max) { 2057 bucket->ub_bucket[bucket->ub_cnt++] = 2058 uma_slab_alloc(zone, slab); 2059 } 2060 2061 /* Don't block on the next fill */ 2062 flags |= M_NOWAIT; 2063 } 2064 2065 /* 2066 * We unlock here because we need to call the zone's init. 2067 * It should be safe to unlock because the slab dealt with 2068 * above is already on the appropriate list within the keg 2069 * and the bucket we filled is not yet on any list, so we 2070 * own it. 2071 */ 2072 if (zone->uz_init != NULL) { 2073 int i; 2074 2075 ZONE_UNLOCK(zone); 2076 for (i = saved; i < bucket->ub_cnt; i++) 2077 if (zone->uz_init(bucket->ub_bucket[i], 2078 zone->uz_keg->uk_size, origflags) != 0) 2079 break; 2080 /* 2081 * If we couldn't initialize the whole bucket, put the 2082 * rest back onto the freelist. 2083 */ 2084 if (i != bucket->ub_cnt) { 2085 int j; 2086 2087 for (j = i; j < bucket->ub_cnt; j++) 2088 uma_zfree_internal(zone, bucket->ub_bucket[j], 2089 NULL, SKIP_FINI); 2090 bucket->ub_cnt = i; 2091 } 2092 ZONE_LOCK(zone); 2093 } 2094 2095 zone->uz_fills--; 2096 if (bucket->ub_cnt != 0) { 2097 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2098 bucket, ub_link); 2099 return (1); 2100 } 2101 #ifdef SMP 2102 done: 2103 #endif 2104 bucket_free(bucket); 2105 2106 return (0); 2107 } 2108 /* 2109 * Allocates an item for an internal zone 2110 * 2111 * Arguments 2112 * zone The zone to alloc for. 2113 * udata The data to be passed to the constructor. 2114 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2115 * 2116 * Returns 2117 * NULL if there is no memory and M_NOWAIT is set 2118 * An item if successful 2119 */ 2120 2121 static void * 2122 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 2123 { 2124 uma_keg_t keg; 2125 uma_slab_t slab; 2126 void *item; 2127 2128 item = NULL; 2129 keg = zone->uz_keg; 2130 2131 #ifdef UMA_DEBUG_ALLOC 2132 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2133 #endif 2134 ZONE_LOCK(zone); 2135 2136 slab = uma_zone_slab(zone, flags); 2137 if (slab == NULL) { 2138 ZONE_UNLOCK(zone); 2139 return (NULL); 2140 } 2141 2142 item = uma_slab_alloc(zone, slab); 2143 2144 ZONE_UNLOCK(zone); 2145 2146 /* 2147 * We have to call both the zone's init (not the keg's init) 2148 * and the zone's ctor. This is because the item is going from 2149 * a keg slab directly to the user, and the user is expecting it 2150 * to be both zone-init'd as well as zone-ctor'd. 2151 */ 2152 if (zone->uz_init != NULL) { 2153 if (zone->uz_init(item, keg->uk_size, flags) != 0) { 2154 uma_zfree_internal(zone, item, udata, SKIP_FINI); 2155 return (NULL); 2156 } 2157 } 2158 if (zone->uz_ctor != NULL) { 2159 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) { 2160 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2161 return (NULL); 2162 } 2163 } 2164 if (flags & M_ZERO) 2165 bzero(item, keg->uk_size); 2166 2167 return (item); 2168 } 2169 2170 /* See uma.h */ 2171 void 2172 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2173 { 2174 uma_keg_t keg; 2175 uma_cache_t cache; 2176 uma_bucket_t bucket; 2177 int bflags; 2178 int cpu; 2179 enum zfreeskip skip; 2180 2181 /* This is the fast path free */ 2182 skip = SKIP_NONE; 2183 keg = zone->uz_keg; 2184 2185 #ifdef UMA_DEBUG_ALLOC_1 2186 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2187 #endif 2188 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2189 zone->uz_name); 2190 2191 /* 2192 * The race here is acceptable. If we miss it we'll just have to wait 2193 * a little longer for the limits to be reset. 2194 */ 2195 2196 if (keg->uk_flags & UMA_ZFLAG_FULL) 2197 goto zfree_internal; 2198 2199 if (zone->uz_dtor) { 2200 zone->uz_dtor(item, keg->uk_size, udata); 2201 skip = SKIP_DTOR; 2202 } 2203 2204 zfree_restart: 2205 cpu = PCPU_GET(cpuid); 2206 CPU_LOCK(cpu); 2207 cache = &zone->uz_cpu[cpu]; 2208 2209 zfree_start: 2210 bucket = cache->uc_freebucket; 2211 2212 if (bucket) { 2213 /* 2214 * Do we have room in our bucket? It is OK for this uz count 2215 * check to be slightly out of sync. 2216 */ 2217 2218 if (bucket->ub_cnt < bucket->ub_entries) { 2219 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2220 ("uma_zfree: Freeing to non free bucket index.")); 2221 bucket->ub_bucket[bucket->ub_cnt] = item; 2222 bucket->ub_cnt++; 2223 #ifdef INVARIANTS 2224 ZONE_LOCK(zone); 2225 if (keg->uk_flags & UMA_ZONE_MALLOC) 2226 uma_dbg_free(zone, udata, item); 2227 else 2228 uma_dbg_free(zone, NULL, item); 2229 ZONE_UNLOCK(zone); 2230 #endif 2231 CPU_UNLOCK(cpu); 2232 return; 2233 } else if (cache->uc_allocbucket) { 2234 #ifdef UMA_DEBUG_ALLOC 2235 printf("uma_zfree: Swapping buckets.\n"); 2236 #endif 2237 /* 2238 * We have run out of space in our freebucket. 2239 * See if we can switch with our alloc bucket. 2240 */ 2241 if (cache->uc_allocbucket->ub_cnt < 2242 cache->uc_freebucket->ub_cnt) { 2243 bucket = cache->uc_freebucket; 2244 cache->uc_freebucket = cache->uc_allocbucket; 2245 cache->uc_allocbucket = bucket; 2246 goto zfree_start; 2247 } 2248 } 2249 } 2250 /* 2251 * We can get here for two reasons: 2252 * 2253 * 1) The buckets are NULL 2254 * 2) The alloc and free buckets are both somewhat full. 2255 */ 2256 2257 ZONE_LOCK(zone); 2258 2259 bucket = cache->uc_freebucket; 2260 cache->uc_freebucket = NULL; 2261 2262 /* Can we throw this on the zone full list? */ 2263 if (bucket != NULL) { 2264 #ifdef UMA_DEBUG_ALLOC 2265 printf("uma_zfree: Putting old bucket on the free list.\n"); 2266 #endif 2267 /* ub_cnt is pointing to the last free item */ 2268 KASSERT(bucket->ub_cnt != 0, 2269 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2270 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2271 bucket, ub_link); 2272 } 2273 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2274 LIST_REMOVE(bucket, ub_link); 2275 ZONE_UNLOCK(zone); 2276 cache->uc_freebucket = bucket; 2277 goto zfree_start; 2278 } 2279 /* We're done with this CPU now */ 2280 CPU_UNLOCK(cpu); 2281 2282 /* And the zone.. */ 2283 ZONE_UNLOCK(zone); 2284 2285 #ifdef UMA_DEBUG_ALLOC 2286 printf("uma_zfree: Allocating new free bucket.\n"); 2287 #endif 2288 bflags = M_NOWAIT; 2289 2290 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2291 bflags |= M_NOVM; 2292 bucket = bucket_alloc(zone->uz_count, bflags); 2293 if (bucket) { 2294 ZONE_LOCK(zone); 2295 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2296 bucket, ub_link); 2297 ZONE_UNLOCK(zone); 2298 goto zfree_restart; 2299 } 2300 2301 /* 2302 * If nothing else caught this, we'll just do an internal free. 2303 */ 2304 2305 zfree_internal: 2306 2307 #ifdef INVARIANTS 2308 /* 2309 * If we need to skip the dtor and the uma_dbg_free in 2310 * uma_zfree_internal because we've already called the dtor 2311 * above, but we ended up here, then we need to make sure 2312 * that we take care of the uma_dbg_free immediately. 2313 */ 2314 if (skip) { 2315 ZONE_LOCK(zone); 2316 if (keg->uk_flags & UMA_ZONE_MALLOC) 2317 uma_dbg_free(zone, udata, item); 2318 else 2319 uma_dbg_free(zone, NULL, item); 2320 ZONE_UNLOCK(zone); 2321 } 2322 #endif 2323 uma_zfree_internal(zone, item, udata, skip); 2324 2325 return; 2326 } 2327 2328 /* 2329 * Frees an item to an INTERNAL zone or allocates a free bucket 2330 * 2331 * Arguments: 2332 * zone The zone to free to 2333 * item The item we're freeing 2334 * udata User supplied data for the dtor 2335 * skip Skip dtors and finis 2336 */ 2337 static void 2338 uma_zfree_internal(uma_zone_t zone, void *item, void *udata, 2339 enum zfreeskip skip) 2340 { 2341 uma_slab_t slab; 2342 uma_keg_t keg; 2343 u_int8_t *mem; 2344 u_int8_t freei; 2345 2346 keg = zone->uz_keg; 2347 2348 if (skip < SKIP_DTOR && zone->uz_dtor) 2349 zone->uz_dtor(item, keg->uk_size, udata); 2350 if (skip < SKIP_FINI && zone->uz_fini) 2351 zone->uz_fini(item, keg->uk_size); 2352 2353 ZONE_LOCK(zone); 2354 2355 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) { 2356 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2357 if (keg->uk_flags & UMA_ZONE_HASH) 2358 slab = hash_sfind(&keg->uk_hash, mem); 2359 else { 2360 mem += keg->uk_pgoff; 2361 slab = (uma_slab_t)mem; 2362 } 2363 } else { 2364 slab = (uma_slab_t)udata; 2365 } 2366 2367 /* Do we need to remove from any lists? */ 2368 if (slab->us_freecount+1 == keg->uk_ipers) { 2369 LIST_REMOVE(slab, us_link); 2370 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2371 } else if (slab->us_freecount == 0) { 2372 LIST_REMOVE(slab, us_link); 2373 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2374 } 2375 2376 /* Slab management stuff */ 2377 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2378 / keg->uk_rsize; 2379 2380 #ifdef INVARIANTS 2381 if (!skip) 2382 uma_dbg_free(zone, slab, item); 2383 #endif 2384 2385 slab->us_freelist[freei].us_item = slab->us_firstfree; 2386 slab->us_firstfree = freei; 2387 slab->us_freecount++; 2388 2389 /* Zone statistics */ 2390 keg->uk_free++; 2391 2392 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2393 if (keg->uk_pages < keg->uk_maxpages) 2394 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2395 2396 /* We can handle one more allocation */ 2397 wakeup_one(keg); 2398 } 2399 2400 ZONE_UNLOCK(zone); 2401 } 2402 2403 /* See uma.h */ 2404 void 2405 uma_zone_set_max(uma_zone_t zone, int nitems) 2406 { 2407 uma_keg_t keg; 2408 2409 keg = zone->uz_keg; 2410 ZONE_LOCK(zone); 2411 if (keg->uk_ppera > 1) 2412 keg->uk_maxpages = nitems * keg->uk_ppera; 2413 else 2414 keg->uk_maxpages = nitems / keg->uk_ipers; 2415 2416 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2417 keg->uk_maxpages++; 2418 2419 ZONE_UNLOCK(zone); 2420 } 2421 2422 /* See uma.h */ 2423 void 2424 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2425 { 2426 ZONE_LOCK(zone); 2427 KASSERT(zone->uz_keg->uk_pages == 0, 2428 ("uma_zone_set_init on non-empty keg")); 2429 zone->uz_keg->uk_init = uminit; 2430 ZONE_UNLOCK(zone); 2431 } 2432 2433 /* See uma.h */ 2434 void 2435 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2436 { 2437 ZONE_LOCK(zone); 2438 KASSERT(zone->uz_keg->uk_pages == 0, 2439 ("uma_zone_set_fini on non-empty keg")); 2440 zone->uz_keg->uk_fini = fini; 2441 ZONE_UNLOCK(zone); 2442 } 2443 2444 /* See uma.h */ 2445 void 2446 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2447 { 2448 ZONE_LOCK(zone); 2449 KASSERT(zone->uz_keg->uk_pages == 0, 2450 ("uma_zone_set_zinit on non-empty keg")); 2451 zone->uz_init = zinit; 2452 ZONE_UNLOCK(zone); 2453 } 2454 2455 /* See uma.h */ 2456 void 2457 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2458 { 2459 ZONE_LOCK(zone); 2460 KASSERT(zone->uz_keg->uk_pages == 0, 2461 ("uma_zone_set_zfini on non-empty keg")); 2462 zone->uz_fini = zfini; 2463 ZONE_UNLOCK(zone); 2464 } 2465 2466 /* See uma.h */ 2467 /* XXX uk_freef is not actually used with the zone locked */ 2468 void 2469 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2470 { 2471 ZONE_LOCK(zone); 2472 zone->uz_keg->uk_freef = freef; 2473 ZONE_UNLOCK(zone); 2474 } 2475 2476 /* See uma.h */ 2477 /* XXX uk_allocf is not actually used with the zone locked */ 2478 void 2479 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2480 { 2481 ZONE_LOCK(zone); 2482 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2483 zone->uz_keg->uk_allocf = allocf; 2484 ZONE_UNLOCK(zone); 2485 } 2486 2487 /* See uma.h */ 2488 int 2489 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2490 { 2491 uma_keg_t keg; 2492 vm_offset_t kva; 2493 int pages; 2494 2495 keg = zone->uz_keg; 2496 pages = count / keg->uk_ipers; 2497 2498 if (pages * keg->uk_ipers < count) 2499 pages++; 2500 2501 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2502 2503 if (kva == 0) 2504 return (0); 2505 if (obj == NULL) { 2506 obj = vm_object_allocate(OBJT_DEFAULT, 2507 pages); 2508 } else { 2509 VM_OBJECT_LOCK_INIT(obj, "uma object"); 2510 _vm_object_allocate(OBJT_DEFAULT, 2511 pages, obj); 2512 } 2513 ZONE_LOCK(zone); 2514 keg->uk_kva = kva; 2515 keg->uk_obj = obj; 2516 keg->uk_maxpages = pages; 2517 keg->uk_allocf = obj_alloc; 2518 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 2519 ZONE_UNLOCK(zone); 2520 return (1); 2521 } 2522 2523 /* See uma.h */ 2524 void 2525 uma_prealloc(uma_zone_t zone, int items) 2526 { 2527 int slabs; 2528 uma_slab_t slab; 2529 uma_keg_t keg; 2530 2531 keg = zone->uz_keg; 2532 ZONE_LOCK(zone); 2533 slabs = items / keg->uk_ipers; 2534 if (slabs * keg->uk_ipers < items) 2535 slabs++; 2536 while (slabs > 0) { 2537 slab = slab_zalloc(zone, M_WAITOK); 2538 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2539 slabs--; 2540 } 2541 ZONE_UNLOCK(zone); 2542 } 2543 2544 /* See uma.h */ 2545 u_int32_t * 2546 uma_find_refcnt(uma_zone_t zone, void *item) 2547 { 2548 uma_slabrefcnt_t slab; 2549 uma_keg_t keg; 2550 u_int32_t *refcnt; 2551 int idx; 2552 2553 keg = zone->uz_keg; 2554 slab = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 2555 KASSERT(slab != NULL, 2556 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2557 idx = ((unsigned long)item - (unsigned long)slab->us_data) 2558 / keg->uk_rsize; 2559 refcnt = &(slab->us_freelist[idx].us_refcnt); 2560 return refcnt; 2561 } 2562 2563 /* See uma.h */ 2564 void 2565 uma_reclaim(void) 2566 { 2567 #ifdef UMA_DEBUG 2568 printf("UMA: vm asked us to release pages!\n"); 2569 #endif 2570 bucket_enable(); 2571 zone_foreach(zone_drain); 2572 /* 2573 * Some slabs may have been freed but this zone will be visited early 2574 * we visit again so that we can free pages that are empty once other 2575 * zones are drained. We have to do the same for buckets. 2576 */ 2577 zone_drain(slabzone); 2578 zone_drain(slabrefzone); 2579 bucket_zone_drain(); 2580 } 2581 2582 void * 2583 uma_large_malloc(int size, int wait) 2584 { 2585 void *mem; 2586 uma_slab_t slab; 2587 u_int8_t flags; 2588 2589 slab = uma_zalloc_internal(slabzone, NULL, wait); 2590 if (slab == NULL) 2591 return (NULL); 2592 mem = page_alloc(NULL, size, &flags, wait); 2593 if (mem) { 2594 vsetslab((vm_offset_t)mem, slab); 2595 slab->us_data = mem; 2596 slab->us_flags = flags | UMA_SLAB_MALLOC; 2597 slab->us_size = size; 2598 } else { 2599 uma_zfree_internal(slabzone, slab, NULL, 0); 2600 } 2601 2602 return (mem); 2603 } 2604 2605 void 2606 uma_large_free(uma_slab_t slab) 2607 { 2608 vsetobj((vm_offset_t)slab->us_data, kmem_object); 2609 page_free(slab->us_data, slab->us_size, slab->us_flags); 2610 uma_zfree_internal(slabzone, slab, NULL, 0); 2611 } 2612 2613 void 2614 uma_print_stats(void) 2615 { 2616 zone_foreach(uma_print_zone); 2617 } 2618 2619 static void 2620 slab_print(uma_slab_t slab) 2621 { 2622 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 2623 slab->us_keg, slab->us_data, slab->us_freecount, 2624 slab->us_firstfree); 2625 } 2626 2627 static void 2628 cache_print(uma_cache_t cache) 2629 { 2630 printf("alloc: %p(%d), free: %p(%d)\n", 2631 cache->uc_allocbucket, 2632 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 2633 cache->uc_freebucket, 2634 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 2635 } 2636 2637 void 2638 uma_print_zone(uma_zone_t zone) 2639 { 2640 uma_cache_t cache; 2641 uma_keg_t keg; 2642 uma_slab_t slab; 2643 int i; 2644 2645 keg = zone->uz_keg; 2646 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 2647 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 2648 keg->uk_ipers, keg->uk_ppera, 2649 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 2650 printf("Part slabs:\n"); 2651 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 2652 slab_print(slab); 2653 printf("Free slabs:\n"); 2654 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 2655 slab_print(slab); 2656 printf("Full slabs:\n"); 2657 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 2658 slab_print(slab); 2659 for (i = 0; i <= mp_maxid; i++) { 2660 if (CPU_ABSENT(i)) 2661 continue; 2662 cache = &zone->uz_cpu[i]; 2663 printf("CPU %d Cache:\n", i); 2664 cache_print(cache); 2665 } 2666 } 2667 2668 /* 2669 * Sysctl handler for vm.zone 2670 * 2671 * stolen from vm_zone.c 2672 */ 2673 static int 2674 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 2675 { 2676 int error, len, cnt; 2677 const int linesize = 128; /* conservative */ 2678 int totalfree; 2679 char *tmpbuf, *offset; 2680 uma_zone_t z; 2681 uma_keg_t zk; 2682 char *p; 2683 int cpu; 2684 int cachefree; 2685 uma_bucket_t bucket; 2686 uma_cache_t cache; 2687 2688 cnt = 0; 2689 mtx_lock(&uma_mtx); 2690 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2691 LIST_FOREACH(z, &zk->uk_zones, uz_link) 2692 cnt++; 2693 } 2694 mtx_unlock(&uma_mtx); 2695 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize, 2696 M_TEMP, M_WAITOK); 2697 len = snprintf(tmpbuf, linesize, 2698 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n"); 2699 if (cnt == 0) 2700 tmpbuf[len - 1] = '\0'; 2701 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len); 2702 if (error || cnt == 0) 2703 goto out; 2704 offset = tmpbuf; 2705 mtx_lock(&uma_mtx); 2706 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2707 LIST_FOREACH(z, &zk->uk_zones, uz_link) { 2708 if (cnt == 0) /* list may have changed size */ 2709 break; 2710 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) { 2711 for (cpu = 0; cpu <= mp_maxid; cpu++) { 2712 if (CPU_ABSENT(cpu)) 2713 continue; 2714 CPU_LOCK(cpu); 2715 } 2716 } 2717 ZONE_LOCK(z); 2718 cachefree = 0; 2719 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) { 2720 for (cpu = 0; cpu <= mp_maxid; cpu++) { 2721 if (CPU_ABSENT(cpu)) 2722 continue; 2723 cache = &z->uz_cpu[cpu]; 2724 if (cache->uc_allocbucket != NULL) 2725 cachefree += cache->uc_allocbucket->ub_cnt; 2726 if (cache->uc_freebucket != NULL) 2727 cachefree += cache->uc_freebucket->ub_cnt; 2728 CPU_UNLOCK(cpu); 2729 } 2730 } 2731 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) { 2732 cachefree += bucket->ub_cnt; 2733 } 2734 totalfree = zk->uk_free + cachefree; 2735 len = snprintf(offset, linesize, 2736 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n", 2737 z->uz_name, zk->uk_size, 2738 zk->uk_maxpages * zk->uk_ipers, 2739 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree, 2740 totalfree, 2741 (unsigned long long)z->uz_allocs); 2742 ZONE_UNLOCK(z); 2743 for (p = offset + 12; p > offset && *p == ' '; --p) 2744 /* nothing */ ; 2745 p[1] = ':'; 2746 cnt--; 2747 offset += len; 2748 } 2749 } 2750 mtx_unlock(&uma_mtx); 2751 *offset++ = '\0'; 2752 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf); 2753 out: 2754 FREE(tmpbuf, M_TEMP); 2755 return (error); 2756 } 2757