1 /*- 2 * Copyright (c) 2004-2005 Robert N. M. Watson 3 * Copyright (c) 2004, 2005, 4 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved. 5 * Copyright (c) 2002, 2003, 2004, 2005, 6 * Jeffrey Roberson <jeff@FreeBSD.org>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * uma_core.c Implementation of the Universal Memory allocator 32 * 33 * This allocator is intended to replace the multitude of similar object caches 34 * in the standard FreeBSD kernel. The intent is to be flexible as well as 35 * effecient. A primary design goal is to return unused memory to the rest of 36 * the system. This will make the system as a whole more flexible due to the 37 * ability to move memory to subsystems which most need it instead of leaving 38 * pools of reserved memory unused. 39 * 40 * The basic ideas stem from similar slab/zone based allocators whose algorithms 41 * are well known. 42 * 43 */ 44 45 /* 46 * TODO: 47 * - Improve memory usage for large allocations 48 * - Investigate cache size adjustments 49 */ 50 51 #include <sys/cdefs.h> 52 __FBSDID("$FreeBSD$"); 53 54 /* I should really use ktr.. */ 55 /* 56 #define UMA_DEBUG 1 57 #define UMA_DEBUG_ALLOC 1 58 #define UMA_DEBUG_ALLOC_1 1 59 */ 60 61 #include "opt_param.h" 62 #include <sys/param.h> 63 #include <sys/systm.h> 64 #include <sys/kernel.h> 65 #include <sys/types.h> 66 #include <sys/queue.h> 67 #include <sys/malloc.h> 68 #include <sys/ktr.h> 69 #include <sys/lock.h> 70 #include <sys/sysctl.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/smp.h> 74 #include <sys/vmmeter.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_page.h> 79 #include <vm/vm_param.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_kern.h> 82 #include <vm/vm_extern.h> 83 #include <vm/uma.h> 84 #include <vm/uma_int.h> 85 #include <vm/uma_dbg.h> 86 87 #include <machine/vmparam.h> 88 89 /* 90 * This is the zone and keg from which all zones are spawned. The idea is that 91 * even the zone & keg heads are allocated from the allocator, so we use the 92 * bss section to bootstrap us. 93 */ 94 static struct uma_keg masterkeg; 95 static struct uma_zone masterzone_k; 96 static struct uma_zone masterzone_z; 97 static uma_zone_t kegs = &masterzone_k; 98 static uma_zone_t zones = &masterzone_z; 99 100 /* This is the zone from which all of uma_slab_t's are allocated. */ 101 static uma_zone_t slabzone; 102 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 103 104 /* 105 * The initial hash tables come out of this zone so they can be allocated 106 * prior to malloc coming up. 107 */ 108 static uma_zone_t hashzone; 109 110 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 111 112 /* 113 * Are we allowed to allocate buckets? 114 */ 115 static int bucketdisable = 1; 116 117 /* Linked list of all kegs in the system */ 118 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs); 119 120 /* This mutex protects the keg list */ 121 static struct mtx uma_mtx; 122 123 /* Linked list of boot time pages */ 124 static LIST_HEAD(,uma_slab) uma_boot_pages = 125 LIST_HEAD_INITIALIZER(&uma_boot_pages); 126 127 /* Count of free boottime pages */ 128 static int uma_boot_free = 0; 129 130 /* Is the VM done starting up? */ 131 static int booted = 0; 132 133 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */ 134 static u_int uma_max_ipers; 135 static u_int uma_max_ipers_ref; 136 137 /* 138 * This is the handle used to schedule events that need to happen 139 * outside of the allocation fast path. 140 */ 141 static struct callout uma_callout; 142 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 143 144 /* 145 * This structure is passed as the zone ctor arg so that I don't have to create 146 * a special allocation function just for zones. 147 */ 148 struct uma_zctor_args { 149 char *name; 150 size_t size; 151 uma_ctor ctor; 152 uma_dtor dtor; 153 uma_init uminit; 154 uma_fini fini; 155 uma_keg_t keg; 156 int align; 157 u_int16_t flags; 158 }; 159 160 struct uma_kctor_args { 161 uma_zone_t zone; 162 size_t size; 163 uma_init uminit; 164 uma_fini fini; 165 int align; 166 u_int16_t flags; 167 }; 168 169 struct uma_bucket_zone { 170 uma_zone_t ubz_zone; 171 char *ubz_name; 172 int ubz_entries; 173 }; 174 175 #define BUCKET_MAX 128 176 177 struct uma_bucket_zone bucket_zones[] = { 178 { NULL, "16 Bucket", 16 }, 179 { NULL, "32 Bucket", 32 }, 180 { NULL, "64 Bucket", 64 }, 181 { NULL, "128 Bucket", 128 }, 182 { NULL, NULL, 0} 183 }; 184 185 #define BUCKET_SHIFT 4 186 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) 187 188 /* 189 * bucket_size[] maps requested bucket sizes to zones that allocate a bucket 190 * of approximately the right size. 191 */ 192 static uint8_t bucket_size[BUCKET_ZONES]; 193 194 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI }; 195 196 /* Prototypes.. */ 197 198 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 199 static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 200 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); 201 static void page_free(void *, int, u_int8_t); 202 static uma_slab_t slab_zalloc(uma_zone_t, int); 203 static void cache_drain(uma_zone_t); 204 static void bucket_drain(uma_zone_t, uma_bucket_t); 205 static void bucket_cache_drain(uma_zone_t zone); 206 static int keg_ctor(void *, int, void *, int); 207 static void keg_dtor(void *, int, void *); 208 static int zone_ctor(void *, int, void *, int); 209 static void zone_dtor(void *, int, void *); 210 static int zero_init(void *, int, int); 211 static void zone_small_init(uma_zone_t zone); 212 static void zone_large_init(uma_zone_t zone); 213 static void zone_foreach(void (*zfunc)(uma_zone_t)); 214 static void zone_timeout(uma_zone_t zone); 215 static int hash_alloc(struct uma_hash *); 216 static int hash_expand(struct uma_hash *, struct uma_hash *); 217 static void hash_free(struct uma_hash *hash); 218 static void uma_timeout(void *); 219 static void uma_startup3(void); 220 static void *uma_zalloc_internal(uma_zone_t, void *, int); 221 static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip); 222 static void bucket_enable(void); 223 static void bucket_init(void); 224 static uma_bucket_t bucket_alloc(int, int); 225 static void bucket_free(uma_bucket_t); 226 static void bucket_zone_drain(void); 227 static int uma_zalloc_bucket(uma_zone_t zone, int flags); 228 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags); 229 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab); 230 static void zone_drain(uma_zone_t); 231 static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 232 uma_fini fini, int align, u_int16_t flags); 233 234 void uma_print_zone(uma_zone_t); 235 void uma_print_stats(void); 236 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 237 238 #ifdef WITNESS 239 static int nosleepwithlocks = 1; 240 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 241 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 242 #else 243 static int nosleepwithlocks = 0; 244 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks, 245 0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths"); 246 #endif 247 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, 248 NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 249 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 250 251 /* 252 * This routine checks to see whether or not it's safe to enable buckets. 253 */ 254 255 static void 256 bucket_enable(void) 257 { 258 if (cnt.v_free_count < cnt.v_free_min) 259 bucketdisable = 1; 260 else 261 bucketdisable = 0; 262 } 263 264 /* 265 * Initialize bucket_zones, the array of zones of buckets of various sizes. 266 * 267 * For each zone, calculate the memory required for each bucket, consisting 268 * of the header and an array of pointers. Initialize bucket_size[] to point 269 * the range of appropriate bucket sizes at the zone. 270 */ 271 static void 272 bucket_init(void) 273 { 274 struct uma_bucket_zone *ubz; 275 int i; 276 int j; 277 278 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 279 int size; 280 281 ubz = &bucket_zones[j]; 282 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 283 size += sizeof(void *) * ubz->ubz_entries; 284 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 286 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 287 bucket_size[i >> BUCKET_SHIFT] = j; 288 } 289 } 290 291 /* 292 * Given a desired number of entries for a bucket, return the zone from which 293 * to allocate the bucket. 294 */ 295 static struct uma_bucket_zone * 296 bucket_zone_lookup(int entries) 297 { 298 int idx; 299 300 idx = howmany(entries, 1 << BUCKET_SHIFT); 301 return (&bucket_zones[bucket_size[idx]]); 302 } 303 304 static uma_bucket_t 305 bucket_alloc(int entries, int bflags) 306 { 307 struct uma_bucket_zone *ubz; 308 uma_bucket_t bucket; 309 310 /* 311 * This is to stop us from allocating per cpu buckets while we're 312 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 313 * boot pages. This also prevents us from allocating buckets in 314 * low memory situations. 315 */ 316 if (bucketdisable) 317 return (NULL); 318 319 ubz = bucket_zone_lookup(entries); 320 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 321 if (bucket) { 322 #ifdef INVARIANTS 323 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 324 #endif 325 bucket->ub_cnt = 0; 326 bucket->ub_entries = ubz->ubz_entries; 327 } 328 329 return (bucket); 330 } 331 332 static void 333 bucket_free(uma_bucket_t bucket) 334 { 335 struct uma_bucket_zone *ubz; 336 337 ubz = bucket_zone_lookup(bucket->ub_entries); 338 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE); 339 } 340 341 static void 342 bucket_zone_drain(void) 343 { 344 struct uma_bucket_zone *ubz; 345 346 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 347 zone_drain(ubz->ubz_zone); 348 } 349 350 351 /* 352 * Routine called by timeout which is used to fire off some time interval 353 * based calculations. (stats, hash size, etc.) 354 * 355 * Arguments: 356 * arg Unused 357 * 358 * Returns: 359 * Nothing 360 */ 361 static void 362 uma_timeout(void *unused) 363 { 364 bucket_enable(); 365 zone_foreach(zone_timeout); 366 367 /* Reschedule this event */ 368 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 369 } 370 371 /* 372 * Routine to perform timeout driven calculations. This expands the 373 * hashes and does per cpu statistics aggregation. 374 * 375 * Arguments: 376 * zone The zone to operate on 377 * 378 * Returns: 379 * Nothing 380 */ 381 static void 382 zone_timeout(uma_zone_t zone) 383 { 384 uma_keg_t keg; 385 u_int64_t alloc; 386 387 keg = zone->uz_keg; 388 alloc = 0; 389 390 /* 391 * Expand the zone hash table. 392 * 393 * This is done if the number of slabs is larger than the hash size. 394 * What I'm trying to do here is completely reduce collisions. This 395 * may be a little aggressive. Should I allow for two collisions max? 396 */ 397 ZONE_LOCK(zone); 398 if (keg->uk_flags & UMA_ZONE_HASH && 399 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 400 struct uma_hash newhash; 401 struct uma_hash oldhash; 402 int ret; 403 404 /* 405 * This is so involved because allocating and freeing 406 * while the zone lock is held will lead to deadlock. 407 * I have to do everything in stages and check for 408 * races. 409 */ 410 newhash = keg->uk_hash; 411 ZONE_UNLOCK(zone); 412 ret = hash_alloc(&newhash); 413 ZONE_LOCK(zone); 414 if (ret) { 415 if (hash_expand(&keg->uk_hash, &newhash)) { 416 oldhash = keg->uk_hash; 417 keg->uk_hash = newhash; 418 } else 419 oldhash = newhash; 420 421 ZONE_UNLOCK(zone); 422 hash_free(&oldhash); 423 ZONE_LOCK(zone); 424 } 425 } 426 ZONE_UNLOCK(zone); 427 } 428 429 /* 430 * Allocate and zero fill the next sized hash table from the appropriate 431 * backing store. 432 * 433 * Arguments: 434 * hash A new hash structure with the old hash size in uh_hashsize 435 * 436 * Returns: 437 * 1 on sucess and 0 on failure. 438 */ 439 static int 440 hash_alloc(struct uma_hash *hash) 441 { 442 int oldsize; 443 int alloc; 444 445 oldsize = hash->uh_hashsize; 446 447 /* We're just going to go to a power of two greater */ 448 if (oldsize) { 449 hash->uh_hashsize = oldsize * 2; 450 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 451 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 452 M_UMAHASH, M_NOWAIT); 453 } else { 454 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 455 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 456 M_WAITOK); 457 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 458 } 459 if (hash->uh_slab_hash) { 460 bzero(hash->uh_slab_hash, alloc); 461 hash->uh_hashmask = hash->uh_hashsize - 1; 462 return (1); 463 } 464 465 return (0); 466 } 467 468 /* 469 * Expands the hash table for HASH zones. This is done from zone_timeout 470 * to reduce collisions. This must not be done in the regular allocation 471 * path, otherwise, we can recurse on the vm while allocating pages. 472 * 473 * Arguments: 474 * oldhash The hash you want to expand 475 * newhash The hash structure for the new table 476 * 477 * Returns: 478 * Nothing 479 * 480 * Discussion: 481 */ 482 static int 483 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 484 { 485 uma_slab_t slab; 486 int hval; 487 int i; 488 489 if (!newhash->uh_slab_hash) 490 return (0); 491 492 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 493 return (0); 494 495 /* 496 * I need to investigate hash algorithms for resizing without a 497 * full rehash. 498 */ 499 500 for (i = 0; i < oldhash->uh_hashsize; i++) 501 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 502 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 503 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 504 hval = UMA_HASH(newhash, slab->us_data); 505 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 506 slab, us_hlink); 507 } 508 509 return (1); 510 } 511 512 /* 513 * Free the hash bucket to the appropriate backing store. 514 * 515 * Arguments: 516 * slab_hash The hash bucket we're freeing 517 * hashsize The number of entries in that hash bucket 518 * 519 * Returns: 520 * Nothing 521 */ 522 static void 523 hash_free(struct uma_hash *hash) 524 { 525 if (hash->uh_slab_hash == NULL) 526 return; 527 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 528 uma_zfree_internal(hashzone, 529 hash->uh_slab_hash, NULL, SKIP_NONE); 530 else 531 free(hash->uh_slab_hash, M_UMAHASH); 532 } 533 534 /* 535 * Frees all outstanding items in a bucket 536 * 537 * Arguments: 538 * zone The zone to free to, must be unlocked. 539 * bucket The free/alloc bucket with items, cpu queue must be locked. 540 * 541 * Returns: 542 * Nothing 543 */ 544 545 static void 546 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 547 { 548 uma_slab_t slab; 549 int mzone; 550 void *item; 551 552 if (bucket == NULL) 553 return; 554 555 slab = NULL; 556 mzone = 0; 557 558 /* We have to lookup the slab again for malloc.. */ 559 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC) 560 mzone = 1; 561 562 while (bucket->ub_cnt > 0) { 563 bucket->ub_cnt--; 564 item = bucket->ub_bucket[bucket->ub_cnt]; 565 #ifdef INVARIANTS 566 bucket->ub_bucket[bucket->ub_cnt] = NULL; 567 KASSERT(item != NULL, 568 ("bucket_drain: botched ptr, item is NULL")); 569 #endif 570 /* 571 * This is extremely inefficient. The slab pointer was passed 572 * to uma_zfree_arg, but we lost it because the buckets don't 573 * hold them. This will go away when free() gets a size passed 574 * to it. 575 */ 576 if (mzone) 577 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 578 uma_zfree_internal(zone, item, slab, SKIP_DTOR); 579 } 580 } 581 582 /* 583 * Drains the per cpu caches for a zone. 584 * 585 * NOTE: This may only be called while the zone is being turn down, and not 586 * during normal operation. This is necessary in order that we do not have 587 * to migrate CPUs to drain the per-CPU caches. 588 * 589 * Arguments: 590 * zone The zone to drain, must be unlocked. 591 * 592 * Returns: 593 * Nothing 594 */ 595 static void 596 cache_drain(uma_zone_t zone) 597 { 598 uma_cache_t cache; 599 int cpu; 600 601 /* 602 * XXX: It is safe to not lock the per-CPU caches, because we're 603 * tearing down the zone anyway. I.e., there will be no further use 604 * of the caches at this point. 605 * 606 * XXX: It would good to be able to assert that the zone is being 607 * torn down to prevent improper use of cache_drain(). 608 * 609 * XXX: We lock the zone before passing into bucket_cache_drain() as 610 * it is used elsewhere. Should the tear-down path be made special 611 * there in some form? 612 */ 613 for (cpu = 0; cpu <= mp_maxid; cpu++) { 614 if (CPU_ABSENT(cpu)) 615 continue; 616 cache = &zone->uz_cpu[cpu]; 617 bucket_drain(zone, cache->uc_allocbucket); 618 bucket_drain(zone, cache->uc_freebucket); 619 if (cache->uc_allocbucket != NULL) 620 bucket_free(cache->uc_allocbucket); 621 if (cache->uc_freebucket != NULL) 622 bucket_free(cache->uc_freebucket); 623 cache->uc_allocbucket = cache->uc_freebucket = NULL; 624 } 625 ZONE_LOCK(zone); 626 bucket_cache_drain(zone); 627 ZONE_UNLOCK(zone); 628 } 629 630 /* 631 * Drain the cached buckets from a zone. Expects a locked zone on entry. 632 */ 633 static void 634 bucket_cache_drain(uma_zone_t zone) 635 { 636 uma_bucket_t bucket; 637 638 /* 639 * Drain the bucket queues and free the buckets, we just keep two per 640 * cpu (alloc/free). 641 */ 642 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 643 LIST_REMOVE(bucket, ub_link); 644 ZONE_UNLOCK(zone); 645 bucket_drain(zone, bucket); 646 bucket_free(bucket); 647 ZONE_LOCK(zone); 648 } 649 650 /* Now we do the free queue.. */ 651 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 652 LIST_REMOVE(bucket, ub_link); 653 bucket_free(bucket); 654 } 655 } 656 657 /* 658 * Frees pages from a zone back to the system. This is done on demand from 659 * the pageout daemon. 660 * 661 * Arguments: 662 * zone The zone to free pages from 663 * all Should we drain all items? 664 * 665 * Returns: 666 * Nothing. 667 */ 668 static void 669 zone_drain(uma_zone_t zone) 670 { 671 struct slabhead freeslabs = { 0 }; 672 uma_keg_t keg; 673 uma_slab_t slab; 674 uma_slab_t n; 675 u_int8_t flags; 676 u_int8_t *mem; 677 int i; 678 679 keg = zone->uz_keg; 680 681 /* 682 * We don't want to take pages from statically allocated zones at this 683 * time 684 */ 685 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 686 return; 687 688 ZONE_LOCK(zone); 689 690 #ifdef UMA_DEBUG 691 printf("%s free items: %u\n", zone->uz_name, keg->uk_free); 692 #endif 693 bucket_cache_drain(zone); 694 if (keg->uk_free == 0) 695 goto finished; 696 697 slab = LIST_FIRST(&keg->uk_free_slab); 698 while (slab) { 699 n = LIST_NEXT(slab, us_link); 700 701 /* We have no where to free these to */ 702 if (slab->us_flags & UMA_SLAB_BOOT) { 703 slab = n; 704 continue; 705 } 706 707 LIST_REMOVE(slab, us_link); 708 keg->uk_pages -= keg->uk_ppera; 709 keg->uk_free -= keg->uk_ipers; 710 711 if (keg->uk_flags & UMA_ZONE_HASH) 712 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 713 714 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 715 716 slab = n; 717 } 718 finished: 719 ZONE_UNLOCK(zone); 720 721 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 722 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 723 if (keg->uk_fini) 724 for (i = 0; i < keg->uk_ipers; i++) 725 keg->uk_fini( 726 slab->us_data + (keg->uk_rsize * i), 727 keg->uk_size); 728 flags = slab->us_flags; 729 mem = slab->us_data; 730 731 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 732 (keg->uk_flags & UMA_ZONE_REFCNT)) { 733 vm_object_t obj; 734 735 if (flags & UMA_SLAB_KMEM) 736 obj = kmem_object; 737 else 738 obj = NULL; 739 for (i = 0; i < keg->uk_ppera; i++) 740 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 741 obj); 742 } 743 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 744 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 745 SKIP_NONE); 746 #ifdef UMA_DEBUG 747 printf("%s: Returning %d bytes.\n", 748 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera); 749 #endif 750 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 751 } 752 } 753 754 /* 755 * Allocate a new slab for a zone. This does not insert the slab onto a list. 756 * 757 * Arguments: 758 * zone The zone to allocate slabs for 759 * wait Shall we wait? 760 * 761 * Returns: 762 * The slab that was allocated or NULL if there is no memory and the 763 * caller specified M_NOWAIT. 764 */ 765 static uma_slab_t 766 slab_zalloc(uma_zone_t zone, int wait) 767 { 768 uma_slabrefcnt_t slabref; 769 uma_slab_t slab; 770 uma_keg_t keg; 771 u_int8_t *mem; 772 u_int8_t flags; 773 int i; 774 775 slab = NULL; 776 keg = zone->uz_keg; 777 778 #ifdef UMA_DEBUG 779 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 780 #endif 781 ZONE_UNLOCK(zone); 782 783 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 784 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait); 785 if (slab == NULL) { 786 ZONE_LOCK(zone); 787 return NULL; 788 } 789 } 790 791 /* 792 * This reproduces the old vm_zone behavior of zero filling pages the 793 * first time they are added to a zone. 794 * 795 * Malloced items are zeroed in uma_zalloc. 796 */ 797 798 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 799 wait |= M_ZERO; 800 else 801 wait &= ~M_ZERO; 802 803 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, 804 &flags, wait); 805 if (mem == NULL) { 806 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 807 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 808 SKIP_NONE); 809 ZONE_LOCK(zone); 810 return (NULL); 811 } 812 813 /* Point the slab into the allocated memory */ 814 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 815 slab = (uma_slab_t )(mem + keg->uk_pgoff); 816 817 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 818 (keg->uk_flags & UMA_ZONE_REFCNT)) 819 for (i = 0; i < keg->uk_ppera; i++) 820 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 821 822 slab->us_keg = keg; 823 slab->us_data = mem; 824 slab->us_freecount = keg->uk_ipers; 825 slab->us_firstfree = 0; 826 slab->us_flags = flags; 827 828 if (keg->uk_flags & UMA_ZONE_REFCNT) { 829 slabref = (uma_slabrefcnt_t)slab; 830 for (i = 0; i < keg->uk_ipers; i++) { 831 slabref->us_freelist[i].us_refcnt = 0; 832 slabref->us_freelist[i].us_item = i+1; 833 } 834 } else { 835 for (i = 0; i < keg->uk_ipers; i++) 836 slab->us_freelist[i].us_item = i+1; 837 } 838 839 if (keg->uk_init != NULL) { 840 for (i = 0; i < keg->uk_ipers; i++) 841 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 842 keg->uk_size, wait) != 0) 843 break; 844 if (i != keg->uk_ipers) { 845 if (keg->uk_fini != NULL) { 846 for (i--; i > -1; i--) 847 keg->uk_fini(slab->us_data + 848 (keg->uk_rsize * i), 849 keg->uk_size); 850 } 851 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 852 (keg->uk_flags & UMA_ZONE_REFCNT)) { 853 vm_object_t obj; 854 855 if (flags & UMA_SLAB_KMEM) 856 obj = kmem_object; 857 else 858 obj = NULL; 859 for (i = 0; i < keg->uk_ppera; i++) 860 vsetobj((vm_offset_t)mem + 861 (i * PAGE_SIZE), obj); 862 } 863 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 864 uma_zfree_internal(keg->uk_slabzone, slab, 865 NULL, SKIP_NONE); 866 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 867 flags); 868 ZONE_LOCK(zone); 869 return (NULL); 870 } 871 } 872 ZONE_LOCK(zone); 873 874 if (keg->uk_flags & UMA_ZONE_HASH) 875 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 876 877 keg->uk_pages += keg->uk_ppera; 878 keg->uk_free += keg->uk_ipers; 879 880 return (slab); 881 } 882 883 /* 884 * This function is intended to be used early on in place of page_alloc() so 885 * that we may use the boot time page cache to satisfy allocations before 886 * the VM is ready. 887 */ 888 static void * 889 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 890 { 891 uma_keg_t keg; 892 893 keg = zone->uz_keg; 894 895 /* 896 * Check our small startup cache to see if it has pages remaining. 897 */ 898 mtx_lock(&uma_mtx); 899 if (uma_boot_free != 0) { 900 uma_slab_t tmps; 901 902 tmps = LIST_FIRST(&uma_boot_pages); 903 LIST_REMOVE(tmps, us_link); 904 uma_boot_free--; 905 mtx_unlock(&uma_mtx); 906 *pflag = tmps->us_flags; 907 return (tmps->us_data); 908 } 909 mtx_unlock(&uma_mtx); 910 if (booted == 0) 911 panic("UMA: Increase UMA_BOOT_PAGES"); 912 /* 913 * Now that we've booted reset these users to their real allocator. 914 */ 915 #ifdef UMA_MD_SMALL_ALLOC 916 keg->uk_allocf = uma_small_alloc; 917 #else 918 keg->uk_allocf = page_alloc; 919 #endif 920 return keg->uk_allocf(zone, bytes, pflag, wait); 921 } 922 923 /* 924 * Allocates a number of pages from the system 925 * 926 * Arguments: 927 * zone Unused 928 * bytes The number of bytes requested 929 * wait Shall we wait? 930 * 931 * Returns: 932 * A pointer to the alloced memory or possibly 933 * NULL if M_NOWAIT is set. 934 */ 935 static void * 936 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 937 { 938 void *p; /* Returned page */ 939 940 *pflag = UMA_SLAB_KMEM; 941 p = (void *) kmem_malloc(kmem_map, bytes, wait); 942 943 return (p); 944 } 945 946 /* 947 * Allocates a number of pages from within an object 948 * 949 * Arguments: 950 * zone Unused 951 * bytes The number of bytes requested 952 * wait Shall we wait? 953 * 954 * Returns: 955 * A pointer to the alloced memory or possibly 956 * NULL if M_NOWAIT is set. 957 */ 958 static void * 959 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 960 { 961 vm_object_t object; 962 vm_offset_t retkva, zkva; 963 vm_page_t p; 964 int pages, startpages; 965 966 object = zone->uz_keg->uk_obj; 967 retkva = 0; 968 969 /* 970 * This looks a little weird since we're getting one page at a time. 971 */ 972 VM_OBJECT_LOCK(object); 973 p = TAILQ_LAST(&object->memq, pglist); 974 pages = p != NULL ? p->pindex + 1 : 0; 975 startpages = pages; 976 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; 977 for (; bytes > 0; bytes -= PAGE_SIZE) { 978 p = vm_page_alloc(object, pages, 979 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 980 if (p == NULL) { 981 if (pages != startpages) 982 pmap_qremove(retkva, pages - startpages); 983 while (pages != startpages) { 984 pages--; 985 p = TAILQ_LAST(&object->memq, pglist); 986 vm_page_lock_queues(); 987 vm_page_unwire(p, 0); 988 vm_page_free(p); 989 vm_page_unlock_queues(); 990 } 991 retkva = 0; 992 goto done; 993 } 994 pmap_qenter(zkva, &p, 1); 995 if (retkva == 0) 996 retkva = zkva; 997 zkva += PAGE_SIZE; 998 pages += 1; 999 } 1000 done: 1001 VM_OBJECT_UNLOCK(object); 1002 *flags = UMA_SLAB_PRIV; 1003 1004 return ((void *)retkva); 1005 } 1006 1007 /* 1008 * Frees a number of pages to the system 1009 * 1010 * Arguments: 1011 * mem A pointer to the memory to be freed 1012 * size The size of the memory being freed 1013 * flags The original p->us_flags field 1014 * 1015 * Returns: 1016 * Nothing 1017 */ 1018 static void 1019 page_free(void *mem, int size, u_int8_t flags) 1020 { 1021 vm_map_t map; 1022 1023 if (flags & UMA_SLAB_KMEM) 1024 map = kmem_map; 1025 else 1026 panic("UMA: page_free used with invalid flags %d\n", flags); 1027 1028 kmem_free(map, (vm_offset_t)mem, size); 1029 } 1030 1031 /* 1032 * Zero fill initializer 1033 * 1034 * Arguments/Returns follow uma_init specifications 1035 */ 1036 static int 1037 zero_init(void *mem, int size, int flags) 1038 { 1039 bzero(mem, size); 1040 return (0); 1041 } 1042 1043 /* 1044 * Finish creating a small uma zone. This calculates ipers, and the zone size. 1045 * 1046 * Arguments 1047 * zone The zone we should initialize 1048 * 1049 * Returns 1050 * Nothing 1051 */ 1052 static void 1053 zone_small_init(uma_zone_t zone) 1054 { 1055 uma_keg_t keg; 1056 u_int rsize; 1057 u_int memused; 1058 u_int wastedspace; 1059 u_int shsize; 1060 1061 keg = zone->uz_keg; 1062 KASSERT(keg != NULL, ("Keg is null in zone_small_init")); 1063 rsize = keg->uk_size; 1064 1065 if (rsize < UMA_SMALLEST_UNIT) 1066 rsize = UMA_SMALLEST_UNIT; 1067 if (rsize & keg->uk_align) 1068 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1069 1070 keg->uk_rsize = rsize; 1071 keg->uk_ppera = 1; 1072 1073 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1074 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1075 shsize = sizeof(struct uma_slab_refcnt); 1076 } else { 1077 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1078 shsize = sizeof(struct uma_slab); 1079 } 1080 1081 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1082 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0")); 1083 memused = keg->uk_ipers * rsize + shsize; 1084 wastedspace = UMA_SLAB_SIZE - memused; 1085 1086 /* 1087 * We can't do OFFPAGE if we're internal or if we've been 1088 * asked to not go to the VM for buckets. If we do this we 1089 * may end up going to the VM (kmem_map) for slabs which we 1090 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1091 * result of UMA_ZONE_VM, which clearly forbids it. 1092 */ 1093 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1094 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1095 return; 1096 1097 if ((wastedspace >= UMA_MAX_WASTE) && 1098 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1099 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1100 KASSERT(keg->uk_ipers <= 255, 1101 ("zone_small_init: keg->uk_ipers too high!")); 1102 #ifdef UMA_DEBUG 1103 printf("UMA decided we need offpage slab headers for " 1104 "zone: %s, calculated wastedspace = %d, " 1105 "maximum wasted space allowed = %d, " 1106 "calculated ipers = %d, " 1107 "new wasted space = %d\n", zone->uz_name, wastedspace, 1108 UMA_MAX_WASTE, keg->uk_ipers, 1109 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1110 #endif 1111 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1112 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1113 keg->uk_flags |= UMA_ZONE_HASH; 1114 } 1115 } 1116 1117 /* 1118 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 1119 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1120 * more complicated. 1121 * 1122 * Arguments 1123 * zone The zone we should initialize 1124 * 1125 * Returns 1126 * Nothing 1127 */ 1128 static void 1129 zone_large_init(uma_zone_t zone) 1130 { 1131 uma_keg_t keg; 1132 int pages; 1133 1134 keg = zone->uz_keg; 1135 1136 KASSERT(keg != NULL, ("Keg is null in zone_large_init")); 1137 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1138 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 1139 1140 pages = keg->uk_size / UMA_SLAB_SIZE; 1141 1142 /* Account for remainder */ 1143 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1144 pages++; 1145 1146 keg->uk_ppera = pages; 1147 keg->uk_ipers = 1; 1148 1149 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1150 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1151 keg->uk_flags |= UMA_ZONE_HASH; 1152 1153 keg->uk_rsize = keg->uk_size; 1154 } 1155 1156 /* 1157 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1158 * the keg onto the global keg list. 1159 * 1160 * Arguments/Returns follow uma_ctor specifications 1161 * udata Actually uma_kctor_args 1162 */ 1163 static int 1164 keg_ctor(void *mem, int size, void *udata, int flags) 1165 { 1166 struct uma_kctor_args *arg = udata; 1167 uma_keg_t keg = mem; 1168 uma_zone_t zone; 1169 1170 bzero(keg, size); 1171 keg->uk_size = arg->size; 1172 keg->uk_init = arg->uminit; 1173 keg->uk_fini = arg->fini; 1174 keg->uk_align = arg->align; 1175 keg->uk_free = 0; 1176 keg->uk_pages = 0; 1177 keg->uk_flags = arg->flags; 1178 keg->uk_allocf = page_alloc; 1179 keg->uk_freef = page_free; 1180 keg->uk_recurse = 0; 1181 keg->uk_slabzone = NULL; 1182 1183 /* 1184 * The master zone is passed to us at keg-creation time. 1185 */ 1186 zone = arg->zone; 1187 zone->uz_keg = keg; 1188 1189 if (arg->flags & UMA_ZONE_VM) 1190 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1191 1192 if (arg->flags & UMA_ZONE_ZINIT) 1193 keg->uk_init = zero_init; 1194 1195 /* 1196 * The +UMA_FRITM_SZ added to uk_size is to account for the 1197 * linkage that is added to the size in zone_small_init(). If 1198 * we don't account for this here then we may end up in 1199 * zone_small_init() with a calculated 'ipers' of 0. 1200 */ 1201 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1202 if ((keg->uk_size+UMA_FRITMREF_SZ) > 1203 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1204 zone_large_init(zone); 1205 else 1206 zone_small_init(zone); 1207 } else { 1208 if ((keg->uk_size+UMA_FRITM_SZ) > 1209 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1210 zone_large_init(zone); 1211 else 1212 zone_small_init(zone); 1213 } 1214 1215 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1216 if (keg->uk_flags & UMA_ZONE_REFCNT) 1217 keg->uk_slabzone = slabrefzone; 1218 else 1219 keg->uk_slabzone = slabzone; 1220 } 1221 1222 /* 1223 * If we haven't booted yet we need allocations to go through the 1224 * startup cache until the vm is ready. 1225 */ 1226 if (keg->uk_ppera == 1) { 1227 #ifdef UMA_MD_SMALL_ALLOC 1228 keg->uk_allocf = uma_small_alloc; 1229 keg->uk_freef = uma_small_free; 1230 #endif 1231 if (booted == 0) 1232 keg->uk_allocf = startup_alloc; 1233 } 1234 1235 /* 1236 * Initialize keg's lock (shared among zones) through 1237 * Master zone 1238 */ 1239 zone->uz_lock = &keg->uk_lock; 1240 if (arg->flags & UMA_ZONE_MTXCLASS) 1241 ZONE_LOCK_INIT(zone, 1); 1242 else 1243 ZONE_LOCK_INIT(zone, 0); 1244 1245 /* 1246 * If we're putting the slab header in the actual page we need to 1247 * figure out where in each page it goes. This calculates a right 1248 * justified offset into the memory on an ALIGN_PTR boundary. 1249 */ 1250 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1251 u_int totsize; 1252 1253 /* Size of the slab struct and free list */ 1254 if (keg->uk_flags & UMA_ZONE_REFCNT) 1255 totsize = sizeof(struct uma_slab_refcnt) + 1256 keg->uk_ipers * UMA_FRITMREF_SZ; 1257 else 1258 totsize = sizeof(struct uma_slab) + 1259 keg->uk_ipers * UMA_FRITM_SZ; 1260 1261 if (totsize & UMA_ALIGN_PTR) 1262 totsize = (totsize & ~UMA_ALIGN_PTR) + 1263 (UMA_ALIGN_PTR + 1); 1264 keg->uk_pgoff = UMA_SLAB_SIZE - totsize; 1265 1266 if (keg->uk_flags & UMA_ZONE_REFCNT) 1267 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1268 + keg->uk_ipers * UMA_FRITMREF_SZ; 1269 else 1270 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1271 + keg->uk_ipers * UMA_FRITM_SZ; 1272 1273 /* 1274 * The only way the following is possible is if with our 1275 * UMA_ALIGN_PTR adjustments we are now bigger than 1276 * UMA_SLAB_SIZE. I haven't checked whether this is 1277 * mathematically possible for all cases, so we make 1278 * sure here anyway. 1279 */ 1280 if (totsize > UMA_SLAB_SIZE) { 1281 printf("zone %s ipers %d rsize %d size %d\n", 1282 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1283 keg->uk_size); 1284 panic("UMA slab won't fit.\n"); 1285 } 1286 } 1287 1288 if (keg->uk_flags & UMA_ZONE_HASH) 1289 hash_alloc(&keg->uk_hash); 1290 1291 #ifdef UMA_DEBUG 1292 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1293 zone->uz_name, zone, 1294 keg->uk_size, keg->uk_ipers, 1295 keg->uk_ppera, keg->uk_pgoff); 1296 #endif 1297 1298 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1299 1300 mtx_lock(&uma_mtx); 1301 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1302 mtx_unlock(&uma_mtx); 1303 return (0); 1304 } 1305 1306 /* 1307 * Zone header ctor. This initializes all fields, locks, etc. 1308 * 1309 * Arguments/Returns follow uma_ctor specifications 1310 * udata Actually uma_zctor_args 1311 */ 1312 1313 static int 1314 zone_ctor(void *mem, int size, void *udata, int flags) 1315 { 1316 struct uma_zctor_args *arg = udata; 1317 uma_zone_t zone = mem; 1318 uma_zone_t z; 1319 uma_keg_t keg; 1320 1321 bzero(zone, size); 1322 zone->uz_name = arg->name; 1323 zone->uz_ctor = arg->ctor; 1324 zone->uz_dtor = arg->dtor; 1325 zone->uz_init = NULL; 1326 zone->uz_fini = NULL; 1327 zone->uz_allocs = 0; 1328 zone->uz_fills = zone->uz_count = 0; 1329 1330 if (arg->flags & UMA_ZONE_SECONDARY) { 1331 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1332 keg = arg->keg; 1333 zone->uz_keg = keg; 1334 zone->uz_init = arg->uminit; 1335 zone->uz_fini = arg->fini; 1336 zone->uz_lock = &keg->uk_lock; 1337 mtx_lock(&uma_mtx); 1338 ZONE_LOCK(zone); 1339 keg->uk_flags |= UMA_ZONE_SECONDARY; 1340 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1341 if (LIST_NEXT(z, uz_link) == NULL) { 1342 LIST_INSERT_AFTER(z, zone, uz_link); 1343 break; 1344 } 1345 } 1346 ZONE_UNLOCK(zone); 1347 mtx_unlock(&uma_mtx); 1348 } else if (arg->keg == NULL) { 1349 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1350 arg->align, arg->flags) == NULL) 1351 return (ENOMEM); 1352 } else { 1353 struct uma_kctor_args karg; 1354 int error; 1355 1356 /* We should only be here from uma_startup() */ 1357 karg.size = arg->size; 1358 karg.uminit = arg->uminit; 1359 karg.fini = arg->fini; 1360 karg.align = arg->align; 1361 karg.flags = arg->flags; 1362 karg.zone = zone; 1363 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1364 flags); 1365 if (error) 1366 return (error); 1367 } 1368 keg = zone->uz_keg; 1369 zone->uz_lock = &keg->uk_lock; 1370 1371 /* 1372 * Some internal zones don't have room allocated for the per cpu 1373 * caches. If we're internal, bail out here. 1374 */ 1375 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1376 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0, 1377 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1378 return (0); 1379 } 1380 1381 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1382 zone->uz_count = BUCKET_MAX; 1383 else if (keg->uk_ipers <= BUCKET_MAX) 1384 zone->uz_count = keg->uk_ipers; 1385 else 1386 zone->uz_count = BUCKET_MAX; 1387 return (0); 1388 } 1389 1390 /* 1391 * Keg header dtor. This frees all data, destroys locks, frees the hash 1392 * table and removes the keg from the global list. 1393 * 1394 * Arguments/Returns follow uma_dtor specifications 1395 * udata unused 1396 */ 1397 static void 1398 keg_dtor(void *arg, int size, void *udata) 1399 { 1400 uma_keg_t keg; 1401 1402 keg = (uma_keg_t)arg; 1403 mtx_lock(&keg->uk_lock); 1404 if (keg->uk_free != 0) { 1405 printf("Freed UMA keg was not empty (%d items). " 1406 " Lost %d pages of memory.\n", 1407 keg->uk_free, keg->uk_pages); 1408 } 1409 mtx_unlock(&keg->uk_lock); 1410 1411 if (keg->uk_flags & UMA_ZONE_HASH) 1412 hash_free(&keg->uk_hash); 1413 1414 mtx_destroy(&keg->uk_lock); 1415 } 1416 1417 /* 1418 * Zone header dtor. 1419 * 1420 * Arguments/Returns follow uma_dtor specifications 1421 * udata unused 1422 */ 1423 static void 1424 zone_dtor(void *arg, int size, void *udata) 1425 { 1426 uma_zone_t zone; 1427 uma_keg_t keg; 1428 1429 zone = (uma_zone_t)arg; 1430 keg = zone->uz_keg; 1431 1432 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1433 cache_drain(zone); 1434 1435 mtx_lock(&uma_mtx); 1436 zone_drain(zone); 1437 if (keg->uk_flags & UMA_ZONE_SECONDARY) { 1438 LIST_REMOVE(zone, uz_link); 1439 /* 1440 * XXX there are some races here where 1441 * the zone can be drained but zone lock 1442 * released and then refilled before we 1443 * remove it... we dont care for now 1444 */ 1445 ZONE_LOCK(zone); 1446 if (LIST_EMPTY(&keg->uk_zones)) 1447 keg->uk_flags &= ~UMA_ZONE_SECONDARY; 1448 ZONE_UNLOCK(zone); 1449 mtx_unlock(&uma_mtx); 1450 } else { 1451 LIST_REMOVE(keg, uk_link); 1452 LIST_REMOVE(zone, uz_link); 1453 mtx_unlock(&uma_mtx); 1454 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE); 1455 } 1456 zone->uz_keg = NULL; 1457 } 1458 1459 /* 1460 * Traverses every zone in the system and calls a callback 1461 * 1462 * Arguments: 1463 * zfunc A pointer to a function which accepts a zone 1464 * as an argument. 1465 * 1466 * Returns: 1467 * Nothing 1468 */ 1469 static void 1470 zone_foreach(void (*zfunc)(uma_zone_t)) 1471 { 1472 uma_keg_t keg; 1473 uma_zone_t zone; 1474 1475 mtx_lock(&uma_mtx); 1476 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1477 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1478 zfunc(zone); 1479 } 1480 mtx_unlock(&uma_mtx); 1481 } 1482 1483 /* Public functions */ 1484 /* See uma.h */ 1485 void 1486 uma_startup(void *bootmem) 1487 { 1488 struct uma_zctor_args args; 1489 uma_slab_t slab; 1490 u_int slabsize; 1491 u_int objsize, totsize, wsize; 1492 int i; 1493 1494 #ifdef UMA_DEBUG 1495 printf("Creating uma keg headers zone and keg.\n"); 1496 #endif 1497 /* 1498 * The general UMA lock is a recursion-allowed lock because 1499 * there is a code path where, while we're still configured 1500 * to use startup_alloc() for backend page allocations, we 1501 * may end up in uma_reclaim() which calls zone_foreach(zone_drain), 1502 * which grabs uma_mtx, only to later call into startup_alloc() 1503 * because while freeing we needed to allocate a bucket. Since 1504 * startup_alloc() also takes uma_mtx, we need to be able to 1505 * recurse on it. 1506 */ 1507 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE); 1508 1509 /* 1510 * Figure out the maximum number of items-per-slab we'll have if 1511 * we're using the OFFPAGE slab header to track free items, given 1512 * all possible object sizes and the maximum desired wastage 1513 * (UMA_MAX_WASTE). 1514 * 1515 * We iterate until we find an object size for 1516 * which the calculated wastage in zone_small_init() will be 1517 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1518 * is an overall increasing see-saw function, we find the smallest 1519 * objsize such that the wastage is always acceptable for objects 1520 * with that objsize or smaller. Since a smaller objsize always 1521 * generates a larger possible uma_max_ipers, we use this computed 1522 * objsize to calculate the largest ipers possible. Since the 1523 * ipers calculated for OFFPAGE slab headers is always larger than 1524 * the ipers initially calculated in zone_small_init(), we use 1525 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1526 * obtain the maximum ipers possible for offpage slab headers. 1527 * 1528 * It should be noted that ipers versus objsize is an inversly 1529 * proportional function which drops off rather quickly so as 1530 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1531 * falls into the portion of the inverse relation AFTER the steep 1532 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1533 * 1534 * Note that we have 8-bits (1 byte) to use as a freelist index 1535 * inside the actual slab header itself and this is enough to 1536 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1537 * object with offpage slab header would have ipers = 1538 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1539 * 1 greater than what our byte-integer freelist index can 1540 * accomodate, but we know that this situation never occurs as 1541 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1542 * that we need to go to offpage slab headers. Or, if we do, 1543 * then we trap that condition below and panic in the INVARIANTS case. 1544 */ 1545 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1546 totsize = wsize; 1547 objsize = UMA_SMALLEST_UNIT; 1548 while (totsize >= wsize) { 1549 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1550 (objsize + UMA_FRITM_SZ); 1551 totsize *= (UMA_FRITM_SZ + objsize); 1552 objsize++; 1553 } 1554 if (objsize > UMA_SMALLEST_UNIT) 1555 objsize--; 1556 uma_max_ipers = UMA_SLAB_SIZE / objsize; 1557 1558 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1559 totsize = wsize; 1560 objsize = UMA_SMALLEST_UNIT; 1561 while (totsize >= wsize) { 1562 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1563 (objsize + UMA_FRITMREF_SZ); 1564 totsize *= (UMA_FRITMREF_SZ + objsize); 1565 objsize++; 1566 } 1567 if (objsize > UMA_SMALLEST_UNIT) 1568 objsize--; 1569 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize; 1570 1571 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1572 ("uma_startup: calculated uma_max_ipers values too large!")); 1573 1574 #ifdef UMA_DEBUG 1575 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1576 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1577 uma_max_ipers_ref); 1578 #endif 1579 1580 /* "manually" create the initial zone */ 1581 args.name = "UMA Kegs"; 1582 args.size = sizeof(struct uma_keg); 1583 args.ctor = keg_ctor; 1584 args.dtor = keg_dtor; 1585 args.uminit = zero_init; 1586 args.fini = NULL; 1587 args.keg = &masterkeg; 1588 args.align = 32 - 1; 1589 args.flags = UMA_ZFLAG_INTERNAL; 1590 /* The initial zone has no Per cpu queues so it's smaller */ 1591 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1592 1593 #ifdef UMA_DEBUG 1594 printf("Filling boot free list.\n"); 1595 #endif 1596 for (i = 0; i < UMA_BOOT_PAGES; i++) { 1597 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1598 slab->us_data = (u_int8_t *)slab; 1599 slab->us_flags = UMA_SLAB_BOOT; 1600 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1601 uma_boot_free++; 1602 } 1603 1604 #ifdef UMA_DEBUG 1605 printf("Creating uma zone headers zone and keg.\n"); 1606 #endif 1607 args.name = "UMA Zones"; 1608 args.size = sizeof(struct uma_zone) + 1609 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1610 args.ctor = zone_ctor; 1611 args.dtor = zone_dtor; 1612 args.uminit = zero_init; 1613 args.fini = NULL; 1614 args.keg = NULL; 1615 args.align = 32 - 1; 1616 args.flags = UMA_ZFLAG_INTERNAL; 1617 /* The initial zone has no Per cpu queues so it's smaller */ 1618 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1619 1620 #ifdef UMA_DEBUG 1621 printf("Initializing pcpu cache locks.\n"); 1622 #endif 1623 #ifdef UMA_DEBUG 1624 printf("Creating slab and hash zones.\n"); 1625 #endif 1626 1627 /* 1628 * This is the max number of free list items we'll have with 1629 * offpage slabs. 1630 */ 1631 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1632 slabsize += sizeof(struct uma_slab); 1633 1634 /* Now make a zone for slab headers */ 1635 slabzone = uma_zcreate("UMA Slabs", 1636 slabsize, 1637 NULL, NULL, NULL, NULL, 1638 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1639 1640 /* 1641 * We also create a zone for the bigger slabs with reference 1642 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1643 */ 1644 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1645 slabsize += sizeof(struct uma_slab_refcnt); 1646 slabrefzone = uma_zcreate("UMA RCntSlabs", 1647 slabsize, 1648 NULL, NULL, NULL, NULL, 1649 UMA_ALIGN_PTR, 1650 UMA_ZFLAG_INTERNAL); 1651 1652 hashzone = uma_zcreate("UMA Hash", 1653 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1654 NULL, NULL, NULL, NULL, 1655 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1656 1657 bucket_init(); 1658 1659 #ifdef UMA_MD_SMALL_ALLOC 1660 booted = 1; 1661 #endif 1662 1663 #ifdef UMA_DEBUG 1664 printf("UMA startup complete.\n"); 1665 #endif 1666 } 1667 1668 /* see uma.h */ 1669 void 1670 uma_startup2(void) 1671 { 1672 booted = 1; 1673 bucket_enable(); 1674 #ifdef UMA_DEBUG 1675 printf("UMA startup2 complete.\n"); 1676 #endif 1677 } 1678 1679 /* 1680 * Initialize our callout handle 1681 * 1682 */ 1683 1684 static void 1685 uma_startup3(void) 1686 { 1687 #ifdef UMA_DEBUG 1688 printf("Starting callout.\n"); 1689 #endif 1690 callout_init(&uma_callout, CALLOUT_MPSAFE); 1691 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1692 #ifdef UMA_DEBUG 1693 printf("UMA startup3 complete.\n"); 1694 #endif 1695 } 1696 1697 static uma_zone_t 1698 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1699 int align, u_int16_t flags) 1700 { 1701 struct uma_kctor_args args; 1702 1703 args.size = size; 1704 args.uminit = uminit; 1705 args.fini = fini; 1706 args.align = align; 1707 args.flags = flags; 1708 args.zone = zone; 1709 return (uma_zalloc_internal(kegs, &args, M_WAITOK)); 1710 } 1711 1712 /* See uma.h */ 1713 uma_zone_t 1714 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1715 uma_init uminit, uma_fini fini, int align, u_int16_t flags) 1716 1717 { 1718 struct uma_zctor_args args; 1719 1720 /* This stuff is essential for the zone ctor */ 1721 args.name = name; 1722 args.size = size; 1723 args.ctor = ctor; 1724 args.dtor = dtor; 1725 args.uminit = uminit; 1726 args.fini = fini; 1727 args.align = align; 1728 args.flags = flags; 1729 args.keg = NULL; 1730 1731 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1732 } 1733 1734 /* See uma.h */ 1735 uma_zone_t 1736 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1737 uma_init zinit, uma_fini zfini, uma_zone_t master) 1738 { 1739 struct uma_zctor_args args; 1740 1741 args.name = name; 1742 args.size = master->uz_keg->uk_size; 1743 args.ctor = ctor; 1744 args.dtor = dtor; 1745 args.uminit = zinit; 1746 args.fini = zfini; 1747 args.align = master->uz_keg->uk_align; 1748 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY; 1749 args.keg = master->uz_keg; 1750 1751 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1752 } 1753 1754 /* See uma.h */ 1755 void 1756 uma_zdestroy(uma_zone_t zone) 1757 { 1758 uma_zfree_internal(zones, zone, NULL, SKIP_NONE); 1759 } 1760 1761 /* See uma.h */ 1762 void * 1763 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1764 { 1765 void *item; 1766 uma_cache_t cache; 1767 uma_bucket_t bucket; 1768 int cpu; 1769 int badness; 1770 1771 /* This is the fast path allocation */ 1772 #ifdef UMA_DEBUG_ALLOC_1 1773 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1774 #endif 1775 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1776 zone->uz_name, flags); 1777 1778 if (!(flags & M_NOWAIT)) { 1779 KASSERT(curthread->td_intr_nesting_level == 0, 1780 ("malloc(M_WAITOK) in interrupt context")); 1781 if (nosleepwithlocks) { 1782 #ifdef WITNESS 1783 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1784 NULL, 1785 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT", 1786 zone->uz_name); 1787 #else 1788 badness = 1; 1789 #endif 1790 } else { 1791 badness = 0; 1792 #ifdef WITNESS 1793 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1794 "malloc(M_WAITOK) of \"%s\"", zone->uz_name); 1795 #endif 1796 } 1797 if (badness) { 1798 flags &= ~M_WAITOK; 1799 flags |= M_NOWAIT; 1800 } 1801 } 1802 1803 /* 1804 * If possible, allocate from the per-CPU cache. There are two 1805 * requirements for safe access to the per-CPU cache: (1) the thread 1806 * accessing the cache must not be preempted or yield during access, 1807 * and (2) the thread must not migrate CPUs without switching which 1808 * cache it accesses. We rely on a critical section to prevent 1809 * preemption and migration. We release the critical section in 1810 * order to acquire the zone mutex if we are unable to allocate from 1811 * the current cache; when we re-acquire the critical section, we 1812 * must detect and handle migration if it has occurred. 1813 */ 1814 zalloc_restart: 1815 critical_enter(); 1816 cpu = curcpu; 1817 cache = &zone->uz_cpu[cpu]; 1818 1819 zalloc_start: 1820 bucket = cache->uc_allocbucket; 1821 1822 if (bucket) { 1823 if (bucket->ub_cnt > 0) { 1824 bucket->ub_cnt--; 1825 item = bucket->ub_bucket[bucket->ub_cnt]; 1826 #ifdef INVARIANTS 1827 bucket->ub_bucket[bucket->ub_cnt] = NULL; 1828 #endif 1829 KASSERT(item != NULL, 1830 ("uma_zalloc: Bucket pointer mangled.")); 1831 cache->uc_allocs++; 1832 critical_exit(); 1833 #ifdef INVARIANTS 1834 ZONE_LOCK(zone); 1835 uma_dbg_alloc(zone, NULL, item); 1836 ZONE_UNLOCK(zone); 1837 #endif 1838 if (zone->uz_ctor != NULL) { 1839 if (zone->uz_ctor(item, zone->uz_keg->uk_size, 1840 udata, flags) != 0) { 1841 uma_zfree_internal(zone, item, udata, 1842 SKIP_DTOR); 1843 return (NULL); 1844 } 1845 } 1846 if (flags & M_ZERO) 1847 bzero(item, zone->uz_keg->uk_size); 1848 return (item); 1849 } else if (cache->uc_freebucket) { 1850 /* 1851 * We have run out of items in our allocbucket. 1852 * See if we can switch with our free bucket. 1853 */ 1854 if (cache->uc_freebucket->ub_cnt > 0) { 1855 #ifdef UMA_DEBUG_ALLOC 1856 printf("uma_zalloc: Swapping empty with" 1857 " alloc.\n"); 1858 #endif 1859 bucket = cache->uc_freebucket; 1860 cache->uc_freebucket = cache->uc_allocbucket; 1861 cache->uc_allocbucket = bucket; 1862 1863 goto zalloc_start; 1864 } 1865 } 1866 } 1867 /* 1868 * Attempt to retrieve the item from the per-CPU cache has failed, so 1869 * we must go back to the zone. This requires the zone lock, so we 1870 * must drop the critical section, then re-acquire it when we go back 1871 * to the cache. Since the critical section is released, we may be 1872 * preempted or migrate. As such, make sure not to maintain any 1873 * thread-local state specific to the cache from prior to releasing 1874 * the critical section. 1875 */ 1876 critical_exit(); 1877 ZONE_LOCK(zone); 1878 critical_enter(); 1879 cpu = curcpu; 1880 cache = &zone->uz_cpu[cpu]; 1881 bucket = cache->uc_allocbucket; 1882 if (bucket != NULL) { 1883 if (bucket->ub_cnt > 0) { 1884 ZONE_UNLOCK(zone); 1885 goto zalloc_start; 1886 } 1887 bucket = cache->uc_freebucket; 1888 if (bucket != NULL && bucket->ub_cnt > 0) { 1889 ZONE_UNLOCK(zone); 1890 goto zalloc_start; 1891 } 1892 } 1893 1894 /* Since we have locked the zone we may as well send back our stats */ 1895 zone->uz_allocs += cache->uc_allocs; 1896 cache->uc_allocs = 0; 1897 1898 /* Our old one is now a free bucket */ 1899 if (cache->uc_allocbucket) { 1900 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1901 ("uma_zalloc_arg: Freeing a non free bucket.")); 1902 LIST_INSERT_HEAD(&zone->uz_free_bucket, 1903 cache->uc_allocbucket, ub_link); 1904 cache->uc_allocbucket = NULL; 1905 } 1906 1907 /* Check the free list for a new alloc bucket */ 1908 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1909 KASSERT(bucket->ub_cnt != 0, 1910 ("uma_zalloc_arg: Returning an empty bucket.")); 1911 1912 LIST_REMOVE(bucket, ub_link); 1913 cache->uc_allocbucket = bucket; 1914 ZONE_UNLOCK(zone); 1915 goto zalloc_start; 1916 } 1917 /* We are no longer associated with this CPU. */ 1918 critical_exit(); 1919 1920 /* Bump up our uz_count so we get here less */ 1921 if (zone->uz_count < BUCKET_MAX) 1922 zone->uz_count++; 1923 1924 /* 1925 * Now lets just fill a bucket and put it on the free list. If that 1926 * works we'll restart the allocation from the begining. 1927 */ 1928 if (uma_zalloc_bucket(zone, flags)) { 1929 ZONE_UNLOCK(zone); 1930 goto zalloc_restart; 1931 } 1932 ZONE_UNLOCK(zone); 1933 /* 1934 * We may not be able to get a bucket so return an actual item. 1935 */ 1936 #ifdef UMA_DEBUG 1937 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1938 #endif 1939 1940 return (uma_zalloc_internal(zone, udata, flags)); 1941 } 1942 1943 static uma_slab_t 1944 uma_zone_slab(uma_zone_t zone, int flags) 1945 { 1946 uma_slab_t slab; 1947 uma_keg_t keg; 1948 1949 keg = zone->uz_keg; 1950 1951 /* 1952 * This is to prevent us from recursively trying to allocate 1953 * buckets. The problem is that if an allocation forces us to 1954 * grab a new bucket we will call page_alloc, which will go off 1955 * and cause the vm to allocate vm_map_entries. If we need new 1956 * buckets there too we will recurse in kmem_alloc and bad 1957 * things happen. So instead we return a NULL bucket, and make 1958 * the code that allocates buckets smart enough to deal with it 1959 * 1960 * XXX: While we want this protection for the bucket zones so that 1961 * recursion from the VM is handled (and the calling code that 1962 * allocates buckets knows how to deal with it), we do not want 1963 * to prevent allocation from the slab header zones (slabzone 1964 * and slabrefzone) if uk_recurse is not zero for them. The 1965 * reason is that it could lead to NULL being returned for 1966 * slab header allocations even in the M_WAITOK case, and the 1967 * caller can't handle that. 1968 */ 1969 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0) 1970 if ((zone != slabzone) && (zone != slabrefzone)) 1971 return (NULL); 1972 1973 slab = NULL; 1974 1975 for (;;) { 1976 /* 1977 * Find a slab with some space. Prefer slabs that are partially 1978 * used over those that are totally full. This helps to reduce 1979 * fragmentation. 1980 */ 1981 if (keg->uk_free != 0) { 1982 if (!LIST_EMPTY(&keg->uk_part_slab)) { 1983 slab = LIST_FIRST(&keg->uk_part_slab); 1984 } else { 1985 slab = LIST_FIRST(&keg->uk_free_slab); 1986 LIST_REMOVE(slab, us_link); 1987 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 1988 us_link); 1989 } 1990 return (slab); 1991 } 1992 1993 /* 1994 * M_NOVM means don't ask at all! 1995 */ 1996 if (flags & M_NOVM) 1997 break; 1998 1999 if (keg->uk_maxpages && 2000 keg->uk_pages >= keg->uk_maxpages) { 2001 keg->uk_flags |= UMA_ZFLAG_FULL; 2002 2003 if (flags & M_NOWAIT) 2004 break; 2005 else 2006 msleep(keg, &keg->uk_lock, PVM, 2007 "zonelimit", 0); 2008 continue; 2009 } 2010 keg->uk_recurse++; 2011 slab = slab_zalloc(zone, flags); 2012 keg->uk_recurse--; 2013 2014 /* 2015 * If we got a slab here it's safe to mark it partially used 2016 * and return. We assume that the caller is going to remove 2017 * at least one item. 2018 */ 2019 if (slab) { 2020 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2021 return (slab); 2022 } 2023 /* 2024 * We might not have been able to get a slab but another cpu 2025 * could have while we were unlocked. Check again before we 2026 * fail. 2027 */ 2028 if (flags & M_NOWAIT) 2029 flags |= M_NOVM; 2030 } 2031 return (slab); 2032 } 2033 2034 static void * 2035 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 2036 { 2037 uma_keg_t keg; 2038 uma_slabrefcnt_t slabref; 2039 void *item; 2040 u_int8_t freei; 2041 2042 keg = zone->uz_keg; 2043 2044 freei = slab->us_firstfree; 2045 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2046 slabref = (uma_slabrefcnt_t)slab; 2047 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2048 } else { 2049 slab->us_firstfree = slab->us_freelist[freei].us_item; 2050 } 2051 item = slab->us_data + (keg->uk_rsize * freei); 2052 2053 slab->us_freecount--; 2054 keg->uk_free--; 2055 #ifdef INVARIANTS 2056 uma_dbg_alloc(zone, slab, item); 2057 #endif 2058 /* Move this slab to the full list */ 2059 if (slab->us_freecount == 0) { 2060 LIST_REMOVE(slab, us_link); 2061 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2062 } 2063 2064 return (item); 2065 } 2066 2067 static int 2068 uma_zalloc_bucket(uma_zone_t zone, int flags) 2069 { 2070 uma_bucket_t bucket; 2071 uma_slab_t slab; 2072 int16_t saved; 2073 int max, origflags = flags; 2074 2075 /* 2076 * Try this zone's free list first so we don't allocate extra buckets. 2077 */ 2078 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2079 KASSERT(bucket->ub_cnt == 0, 2080 ("uma_zalloc_bucket: Bucket on free list is not empty.")); 2081 LIST_REMOVE(bucket, ub_link); 2082 } else { 2083 int bflags; 2084 2085 bflags = (flags & ~M_ZERO); 2086 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2087 bflags |= M_NOVM; 2088 2089 ZONE_UNLOCK(zone); 2090 bucket = bucket_alloc(zone->uz_count, bflags); 2091 ZONE_LOCK(zone); 2092 } 2093 2094 if (bucket == NULL) 2095 return (0); 2096 2097 #ifdef SMP 2098 /* 2099 * This code is here to limit the number of simultaneous bucket fills 2100 * for any given zone to the number of per cpu caches in this zone. This 2101 * is done so that we don't allocate more memory than we really need. 2102 */ 2103 if (zone->uz_fills >= mp_ncpus) 2104 goto done; 2105 2106 #endif 2107 zone->uz_fills++; 2108 2109 max = MIN(bucket->ub_entries, zone->uz_count); 2110 /* Try to keep the buckets totally full */ 2111 saved = bucket->ub_cnt; 2112 while (bucket->ub_cnt < max && 2113 (slab = uma_zone_slab(zone, flags)) != NULL) { 2114 while (slab->us_freecount && bucket->ub_cnt < max) { 2115 bucket->ub_bucket[bucket->ub_cnt++] = 2116 uma_slab_alloc(zone, slab); 2117 } 2118 2119 /* Don't block on the next fill */ 2120 flags |= M_NOWAIT; 2121 } 2122 2123 /* 2124 * We unlock here because we need to call the zone's init. 2125 * It should be safe to unlock because the slab dealt with 2126 * above is already on the appropriate list within the keg 2127 * and the bucket we filled is not yet on any list, so we 2128 * own it. 2129 */ 2130 if (zone->uz_init != NULL) { 2131 int i; 2132 2133 ZONE_UNLOCK(zone); 2134 for (i = saved; i < bucket->ub_cnt; i++) 2135 if (zone->uz_init(bucket->ub_bucket[i], 2136 zone->uz_keg->uk_size, origflags) != 0) 2137 break; 2138 /* 2139 * If we couldn't initialize the whole bucket, put the 2140 * rest back onto the freelist. 2141 */ 2142 if (i != bucket->ub_cnt) { 2143 int j; 2144 2145 for (j = i; j < bucket->ub_cnt; j++) { 2146 uma_zfree_internal(zone, bucket->ub_bucket[j], 2147 NULL, SKIP_FINI); 2148 #ifdef INVARIANTS 2149 bucket->ub_bucket[j] = NULL; 2150 #endif 2151 } 2152 bucket->ub_cnt = i; 2153 } 2154 ZONE_LOCK(zone); 2155 } 2156 2157 zone->uz_fills--; 2158 if (bucket->ub_cnt != 0) { 2159 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2160 bucket, ub_link); 2161 return (1); 2162 } 2163 #ifdef SMP 2164 done: 2165 #endif 2166 bucket_free(bucket); 2167 2168 return (0); 2169 } 2170 /* 2171 * Allocates an item for an internal zone 2172 * 2173 * Arguments 2174 * zone The zone to alloc for. 2175 * udata The data to be passed to the constructor. 2176 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2177 * 2178 * Returns 2179 * NULL if there is no memory and M_NOWAIT is set 2180 * An item if successful 2181 */ 2182 2183 static void * 2184 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 2185 { 2186 uma_keg_t keg; 2187 uma_slab_t slab; 2188 void *item; 2189 2190 item = NULL; 2191 keg = zone->uz_keg; 2192 2193 #ifdef UMA_DEBUG_ALLOC 2194 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2195 #endif 2196 ZONE_LOCK(zone); 2197 2198 slab = uma_zone_slab(zone, flags); 2199 if (slab == NULL) { 2200 ZONE_UNLOCK(zone); 2201 return (NULL); 2202 } 2203 2204 item = uma_slab_alloc(zone, slab); 2205 2206 ZONE_UNLOCK(zone); 2207 2208 /* 2209 * We have to call both the zone's init (not the keg's init) 2210 * and the zone's ctor. This is because the item is going from 2211 * a keg slab directly to the user, and the user is expecting it 2212 * to be both zone-init'd as well as zone-ctor'd. 2213 */ 2214 if (zone->uz_init != NULL) { 2215 if (zone->uz_init(item, keg->uk_size, flags) != 0) { 2216 uma_zfree_internal(zone, item, udata, SKIP_FINI); 2217 return (NULL); 2218 } 2219 } 2220 if (zone->uz_ctor != NULL) { 2221 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) { 2222 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2223 return (NULL); 2224 } 2225 } 2226 if (flags & M_ZERO) 2227 bzero(item, keg->uk_size); 2228 2229 return (item); 2230 } 2231 2232 /* See uma.h */ 2233 void 2234 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2235 { 2236 uma_keg_t keg; 2237 uma_cache_t cache; 2238 uma_bucket_t bucket; 2239 int bflags; 2240 int cpu; 2241 2242 keg = zone->uz_keg; 2243 2244 #ifdef UMA_DEBUG_ALLOC_1 2245 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2246 #endif 2247 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2248 zone->uz_name); 2249 2250 if (zone->uz_dtor) 2251 zone->uz_dtor(item, keg->uk_size, udata); 2252 #ifdef INVARIANTS 2253 ZONE_LOCK(zone); 2254 if (keg->uk_flags & UMA_ZONE_MALLOC) 2255 uma_dbg_free(zone, udata, item); 2256 else 2257 uma_dbg_free(zone, NULL, item); 2258 ZONE_UNLOCK(zone); 2259 #endif 2260 /* 2261 * The race here is acceptable. If we miss it we'll just have to wait 2262 * a little longer for the limits to be reset. 2263 */ 2264 if (keg->uk_flags & UMA_ZFLAG_FULL) 2265 goto zfree_internal; 2266 2267 /* 2268 * If possible, free to the per-CPU cache. There are two 2269 * requirements for safe access to the per-CPU cache: (1) the thread 2270 * accessing the cache must not be preempted or yield during access, 2271 * and (2) the thread must not migrate CPUs without switching which 2272 * cache it accesses. We rely on a critical section to prevent 2273 * preemption and migration. We release the critical section in 2274 * order to acquire the zone mutex if we are unable to free to the 2275 * current cache; when we re-acquire the critical section, we must 2276 * detect and handle migration if it has occurred. 2277 */ 2278 zfree_restart: 2279 critical_enter(); 2280 cpu = curcpu; 2281 cache = &zone->uz_cpu[cpu]; 2282 2283 zfree_start: 2284 bucket = cache->uc_freebucket; 2285 2286 if (bucket) { 2287 /* 2288 * Do we have room in our bucket? It is OK for this uz count 2289 * check to be slightly out of sync. 2290 */ 2291 2292 if (bucket->ub_cnt < bucket->ub_entries) { 2293 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2294 ("uma_zfree: Freeing to non free bucket index.")); 2295 bucket->ub_bucket[bucket->ub_cnt] = item; 2296 bucket->ub_cnt++; 2297 critical_exit(); 2298 return; 2299 } else if (cache->uc_allocbucket) { 2300 #ifdef UMA_DEBUG_ALLOC 2301 printf("uma_zfree: Swapping buckets.\n"); 2302 #endif 2303 /* 2304 * We have run out of space in our freebucket. 2305 * See if we can switch with our alloc bucket. 2306 */ 2307 if (cache->uc_allocbucket->ub_cnt < 2308 cache->uc_freebucket->ub_cnt) { 2309 bucket = cache->uc_freebucket; 2310 cache->uc_freebucket = cache->uc_allocbucket; 2311 cache->uc_allocbucket = bucket; 2312 goto zfree_start; 2313 } 2314 } 2315 } 2316 /* 2317 * We can get here for two reasons: 2318 * 2319 * 1) The buckets are NULL 2320 * 2) The alloc and free buckets are both somewhat full. 2321 * 2322 * We must go back the zone, which requires acquiring the zone lock, 2323 * which in turn means we must release and re-acquire the critical 2324 * section. Since the critical section is released, we may be 2325 * preempted or migrate. As such, make sure not to maintain any 2326 * thread-local state specific to the cache from prior to releasing 2327 * the critical section. 2328 */ 2329 critical_exit(); 2330 ZONE_LOCK(zone); 2331 critical_enter(); 2332 cpu = curcpu; 2333 cache = &zone->uz_cpu[cpu]; 2334 if (cache->uc_freebucket != NULL) { 2335 if (cache->uc_freebucket->ub_cnt < 2336 cache->uc_freebucket->ub_entries) { 2337 ZONE_UNLOCK(zone); 2338 goto zfree_start; 2339 } 2340 if (cache->uc_allocbucket != NULL && 2341 (cache->uc_allocbucket->ub_cnt < 2342 cache->uc_freebucket->ub_cnt)) { 2343 ZONE_UNLOCK(zone); 2344 goto zfree_start; 2345 } 2346 } 2347 2348 bucket = cache->uc_freebucket; 2349 cache->uc_freebucket = NULL; 2350 2351 /* Can we throw this on the zone full list? */ 2352 if (bucket != NULL) { 2353 #ifdef UMA_DEBUG_ALLOC 2354 printf("uma_zfree: Putting old bucket on the free list.\n"); 2355 #endif 2356 /* ub_cnt is pointing to the last free item */ 2357 KASSERT(bucket->ub_cnt != 0, 2358 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2359 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2360 bucket, ub_link); 2361 } 2362 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2363 LIST_REMOVE(bucket, ub_link); 2364 ZONE_UNLOCK(zone); 2365 cache->uc_freebucket = bucket; 2366 goto zfree_start; 2367 } 2368 /* We are no longer associated with this CPU. */ 2369 critical_exit(); 2370 2371 /* And the zone.. */ 2372 ZONE_UNLOCK(zone); 2373 2374 #ifdef UMA_DEBUG_ALLOC 2375 printf("uma_zfree: Allocating new free bucket.\n"); 2376 #endif 2377 bflags = M_NOWAIT; 2378 2379 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2380 bflags |= M_NOVM; 2381 bucket = bucket_alloc(zone->uz_count, bflags); 2382 if (bucket) { 2383 ZONE_LOCK(zone); 2384 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2385 bucket, ub_link); 2386 ZONE_UNLOCK(zone); 2387 goto zfree_restart; 2388 } 2389 2390 /* 2391 * If nothing else caught this, we'll just do an internal free. 2392 */ 2393 zfree_internal: 2394 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2395 2396 return; 2397 } 2398 2399 /* 2400 * Frees an item to an INTERNAL zone or allocates a free bucket 2401 * 2402 * Arguments: 2403 * zone The zone to free to 2404 * item The item we're freeing 2405 * udata User supplied data for the dtor 2406 * skip Skip dtors and finis 2407 */ 2408 static void 2409 uma_zfree_internal(uma_zone_t zone, void *item, void *udata, 2410 enum zfreeskip skip) 2411 { 2412 uma_slab_t slab; 2413 uma_slabrefcnt_t slabref; 2414 uma_keg_t keg; 2415 u_int8_t *mem; 2416 u_int8_t freei; 2417 2418 keg = zone->uz_keg; 2419 2420 if (skip < SKIP_DTOR && zone->uz_dtor) 2421 zone->uz_dtor(item, keg->uk_size, udata); 2422 if (skip < SKIP_FINI && zone->uz_fini) 2423 zone->uz_fini(item, keg->uk_size); 2424 2425 ZONE_LOCK(zone); 2426 2427 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) { 2428 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2429 if (keg->uk_flags & UMA_ZONE_HASH) 2430 slab = hash_sfind(&keg->uk_hash, mem); 2431 else { 2432 mem += keg->uk_pgoff; 2433 slab = (uma_slab_t)mem; 2434 } 2435 } else { 2436 slab = (uma_slab_t)udata; 2437 } 2438 2439 /* Do we need to remove from any lists? */ 2440 if (slab->us_freecount+1 == keg->uk_ipers) { 2441 LIST_REMOVE(slab, us_link); 2442 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2443 } else if (slab->us_freecount == 0) { 2444 LIST_REMOVE(slab, us_link); 2445 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2446 } 2447 2448 /* Slab management stuff */ 2449 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2450 / keg->uk_rsize; 2451 2452 #ifdef INVARIANTS 2453 if (!skip) 2454 uma_dbg_free(zone, slab, item); 2455 #endif 2456 2457 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2458 slabref = (uma_slabrefcnt_t)slab; 2459 slabref->us_freelist[freei].us_item = slab->us_firstfree; 2460 } else { 2461 slab->us_freelist[freei].us_item = slab->us_firstfree; 2462 } 2463 slab->us_firstfree = freei; 2464 slab->us_freecount++; 2465 2466 /* Zone statistics */ 2467 keg->uk_free++; 2468 2469 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2470 if (keg->uk_pages < keg->uk_maxpages) 2471 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2472 2473 /* We can handle one more allocation */ 2474 wakeup_one(keg); 2475 } 2476 2477 ZONE_UNLOCK(zone); 2478 } 2479 2480 /* See uma.h */ 2481 void 2482 uma_zone_set_max(uma_zone_t zone, int nitems) 2483 { 2484 uma_keg_t keg; 2485 2486 keg = zone->uz_keg; 2487 ZONE_LOCK(zone); 2488 if (keg->uk_ppera > 1) 2489 keg->uk_maxpages = nitems * keg->uk_ppera; 2490 else 2491 keg->uk_maxpages = nitems / keg->uk_ipers; 2492 2493 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2494 keg->uk_maxpages++; 2495 2496 ZONE_UNLOCK(zone); 2497 } 2498 2499 /* See uma.h */ 2500 void 2501 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2502 { 2503 ZONE_LOCK(zone); 2504 KASSERT(zone->uz_keg->uk_pages == 0, 2505 ("uma_zone_set_init on non-empty keg")); 2506 zone->uz_keg->uk_init = uminit; 2507 ZONE_UNLOCK(zone); 2508 } 2509 2510 /* See uma.h */ 2511 void 2512 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2513 { 2514 ZONE_LOCK(zone); 2515 KASSERT(zone->uz_keg->uk_pages == 0, 2516 ("uma_zone_set_fini on non-empty keg")); 2517 zone->uz_keg->uk_fini = fini; 2518 ZONE_UNLOCK(zone); 2519 } 2520 2521 /* See uma.h */ 2522 void 2523 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2524 { 2525 ZONE_LOCK(zone); 2526 KASSERT(zone->uz_keg->uk_pages == 0, 2527 ("uma_zone_set_zinit on non-empty keg")); 2528 zone->uz_init = zinit; 2529 ZONE_UNLOCK(zone); 2530 } 2531 2532 /* See uma.h */ 2533 void 2534 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2535 { 2536 ZONE_LOCK(zone); 2537 KASSERT(zone->uz_keg->uk_pages == 0, 2538 ("uma_zone_set_zfini on non-empty keg")); 2539 zone->uz_fini = zfini; 2540 ZONE_UNLOCK(zone); 2541 } 2542 2543 /* See uma.h */ 2544 /* XXX uk_freef is not actually used with the zone locked */ 2545 void 2546 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2547 { 2548 ZONE_LOCK(zone); 2549 zone->uz_keg->uk_freef = freef; 2550 ZONE_UNLOCK(zone); 2551 } 2552 2553 /* See uma.h */ 2554 /* XXX uk_allocf is not actually used with the zone locked */ 2555 void 2556 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2557 { 2558 ZONE_LOCK(zone); 2559 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2560 zone->uz_keg->uk_allocf = allocf; 2561 ZONE_UNLOCK(zone); 2562 } 2563 2564 /* See uma.h */ 2565 int 2566 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2567 { 2568 uma_keg_t keg; 2569 vm_offset_t kva; 2570 int pages; 2571 2572 keg = zone->uz_keg; 2573 pages = count / keg->uk_ipers; 2574 2575 if (pages * keg->uk_ipers < count) 2576 pages++; 2577 2578 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2579 2580 if (kva == 0) 2581 return (0); 2582 if (obj == NULL) { 2583 obj = vm_object_allocate(OBJT_DEFAULT, 2584 pages); 2585 } else { 2586 VM_OBJECT_LOCK_INIT(obj, "uma object"); 2587 _vm_object_allocate(OBJT_DEFAULT, 2588 pages, obj); 2589 } 2590 ZONE_LOCK(zone); 2591 keg->uk_kva = kva; 2592 keg->uk_obj = obj; 2593 keg->uk_maxpages = pages; 2594 keg->uk_allocf = obj_alloc; 2595 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 2596 ZONE_UNLOCK(zone); 2597 return (1); 2598 } 2599 2600 /* See uma.h */ 2601 void 2602 uma_prealloc(uma_zone_t zone, int items) 2603 { 2604 int slabs; 2605 uma_slab_t slab; 2606 uma_keg_t keg; 2607 2608 keg = zone->uz_keg; 2609 ZONE_LOCK(zone); 2610 slabs = items / keg->uk_ipers; 2611 if (slabs * keg->uk_ipers < items) 2612 slabs++; 2613 while (slabs > 0) { 2614 slab = slab_zalloc(zone, M_WAITOK); 2615 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2616 slabs--; 2617 } 2618 ZONE_UNLOCK(zone); 2619 } 2620 2621 /* See uma.h */ 2622 u_int32_t * 2623 uma_find_refcnt(uma_zone_t zone, void *item) 2624 { 2625 uma_slabrefcnt_t slabref; 2626 uma_keg_t keg; 2627 u_int32_t *refcnt; 2628 int idx; 2629 2630 keg = zone->uz_keg; 2631 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 2632 (~UMA_SLAB_MASK)); 2633 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 2634 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2635 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 2636 / keg->uk_rsize; 2637 refcnt = &slabref->us_freelist[idx].us_refcnt; 2638 return refcnt; 2639 } 2640 2641 /* See uma.h */ 2642 void 2643 uma_reclaim(void) 2644 { 2645 #ifdef UMA_DEBUG 2646 printf("UMA: vm asked us to release pages!\n"); 2647 #endif 2648 bucket_enable(); 2649 zone_foreach(zone_drain); 2650 /* 2651 * Some slabs may have been freed but this zone will be visited early 2652 * we visit again so that we can free pages that are empty once other 2653 * zones are drained. We have to do the same for buckets. 2654 */ 2655 zone_drain(slabzone); 2656 zone_drain(slabrefzone); 2657 bucket_zone_drain(); 2658 } 2659 2660 void * 2661 uma_large_malloc(int size, int wait) 2662 { 2663 void *mem; 2664 uma_slab_t slab; 2665 u_int8_t flags; 2666 2667 slab = uma_zalloc_internal(slabzone, NULL, wait); 2668 if (slab == NULL) 2669 return (NULL); 2670 mem = page_alloc(NULL, size, &flags, wait); 2671 if (mem) { 2672 vsetslab((vm_offset_t)mem, slab); 2673 slab->us_data = mem; 2674 slab->us_flags = flags | UMA_SLAB_MALLOC; 2675 slab->us_size = size; 2676 } else { 2677 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2678 } 2679 2680 return (mem); 2681 } 2682 2683 void 2684 uma_large_free(uma_slab_t slab) 2685 { 2686 vsetobj((vm_offset_t)slab->us_data, kmem_object); 2687 page_free(slab->us_data, slab->us_size, slab->us_flags); 2688 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2689 } 2690 2691 void 2692 uma_print_stats(void) 2693 { 2694 zone_foreach(uma_print_zone); 2695 } 2696 2697 static void 2698 slab_print(uma_slab_t slab) 2699 { 2700 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 2701 slab->us_keg, slab->us_data, slab->us_freecount, 2702 slab->us_firstfree); 2703 } 2704 2705 static void 2706 cache_print(uma_cache_t cache) 2707 { 2708 printf("alloc: %p(%d), free: %p(%d)\n", 2709 cache->uc_allocbucket, 2710 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 2711 cache->uc_freebucket, 2712 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 2713 } 2714 2715 void 2716 uma_print_zone(uma_zone_t zone) 2717 { 2718 uma_cache_t cache; 2719 uma_keg_t keg; 2720 uma_slab_t slab; 2721 int i; 2722 2723 keg = zone->uz_keg; 2724 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 2725 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 2726 keg->uk_ipers, keg->uk_ppera, 2727 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 2728 printf("Part slabs:\n"); 2729 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 2730 slab_print(slab); 2731 printf("Free slabs:\n"); 2732 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 2733 slab_print(slab); 2734 printf("Full slabs:\n"); 2735 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 2736 slab_print(slab); 2737 for (i = 0; i <= mp_maxid; i++) { 2738 if (CPU_ABSENT(i)) 2739 continue; 2740 cache = &zone->uz_cpu[i]; 2741 printf("CPU %d Cache:\n", i); 2742 cache_print(cache); 2743 } 2744 } 2745 2746 /* 2747 * Sysctl handler for vm.zone 2748 * 2749 * stolen from vm_zone.c 2750 */ 2751 static int 2752 sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 2753 { 2754 int error, len, cnt; 2755 const int linesize = 128; /* conservative */ 2756 int totalfree; 2757 char *tmpbuf, *offset; 2758 uma_zone_t z; 2759 uma_keg_t zk; 2760 char *p; 2761 int cpu; 2762 int cachefree; 2763 uma_bucket_t bucket; 2764 uma_cache_t cache; 2765 u_int64_t alloc; 2766 2767 cnt = 0; 2768 mtx_lock(&uma_mtx); 2769 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2770 LIST_FOREACH(z, &zk->uk_zones, uz_link) 2771 cnt++; 2772 } 2773 mtx_unlock(&uma_mtx); 2774 MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize, 2775 M_TEMP, M_WAITOK); 2776 len = snprintf(tmpbuf, linesize, 2777 "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n"); 2778 if (cnt == 0) 2779 tmpbuf[len - 1] = '\0'; 2780 error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len); 2781 if (error || cnt == 0) 2782 goto out; 2783 offset = tmpbuf; 2784 mtx_lock(&uma_mtx); 2785 LIST_FOREACH(zk, &uma_kegs, uk_link) { 2786 LIST_FOREACH(z, &zk->uk_zones, uz_link) { 2787 if (cnt == 0) /* list may have changed size */ 2788 break; 2789 ZONE_LOCK(z); 2790 cachefree = 0; 2791 alloc = 0; 2792 if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) { 2793 for (cpu = 0; cpu <= mp_maxid; cpu++) { 2794 if (CPU_ABSENT(cpu)) 2795 continue; 2796 cache = &z->uz_cpu[cpu]; 2797 if (cache->uc_allocbucket != NULL) 2798 cachefree += cache->uc_allocbucket->ub_cnt; 2799 if (cache->uc_freebucket != NULL) 2800 cachefree += cache->uc_freebucket->ub_cnt; 2801 alloc += cache->uc_allocs; 2802 cache->uc_allocs = 0; 2803 } 2804 } 2805 alloc += z->uz_allocs; 2806 2807 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) { 2808 cachefree += bucket->ub_cnt; 2809 } 2810 totalfree = zk->uk_free + cachefree; 2811 len = snprintf(offset, linesize, 2812 "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n", 2813 z->uz_name, zk->uk_size, 2814 zk->uk_maxpages * zk->uk_ipers, 2815 (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree, 2816 totalfree, 2817 (unsigned long long)alloc); 2818 ZONE_UNLOCK(z); 2819 for (p = offset + 12; p > offset && *p == ' '; --p) 2820 /* nothing */ ; 2821 p[1] = ':'; 2822 cnt--; 2823 offset += len; 2824 } 2825 } 2826 mtx_unlock(&uma_mtx); 2827 *offset++ = '\0'; 2828 error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf); 2829 out: 2830 FREE(tmpbuf, M_TEMP); 2831 return (error); 2832 } 2833