1 /*- 2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uma_core.c Implementation of the Universal Memory allocator 31 * 32 * This allocator is intended to replace the multitude of similar object caches 33 * in the standard FreeBSD kernel. The intent is to be flexible as well as 34 * effecient. A primary design goal is to return unused memory to the rest of 35 * the system. This will make the system as a whole more flexible due to the 36 * ability to move memory to subsystems which most need it instead of leaving 37 * pools of reserved memory unused. 38 * 39 * The basic ideas stem from similar slab/zone based allocators whose algorithms 40 * are well known. 41 * 42 */ 43 44 /* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* I should really use ktr.. */ 54 /* 55 #define UMA_DEBUG 1 56 #define UMA_DEBUG_ALLOC 1 57 #define UMA_DEBUG_ALLOC_1 1 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_param.h" 62 #include "opt_vm.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bitset.h> 67 #include <sys/kernel.h> 68 #include <sys/types.h> 69 #include <sys/queue.h> 70 #include <sys/malloc.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/random.h> 77 #include <sys/rwlock.h> 78 #include <sys/sbuf.h> 79 #include <sys/sched.h> 80 #include <sys/smp.h> 81 #include <sys/vmmeter.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pageout.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/uma.h> 92 #include <vm/uma_int.h> 93 #include <vm/uma_dbg.h> 94 95 #include <ddb/ddb.h> 96 97 #ifdef DEBUG_MEMGUARD 98 #include <vm/memguard.h> 99 #endif 100 101 /* 102 * This is the zone and keg from which all zones are spawned. The idea is that 103 * even the zone & keg heads are allocated from the allocator, so we use the 104 * bss section to bootstrap us. 105 */ 106 static struct uma_keg masterkeg; 107 static struct uma_zone masterzone_k; 108 static struct uma_zone masterzone_z; 109 static uma_zone_t kegs = &masterzone_k; 110 static uma_zone_t zones = &masterzone_z; 111 112 /* This is the zone from which all of uma_slab_t's are allocated. */ 113 static uma_zone_t slabzone; 114 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 115 116 /* 117 * The initial hash tables come out of this zone so they can be allocated 118 * prior to malloc coming up. 119 */ 120 static uma_zone_t hashzone; 121 122 /* The boot-time adjusted value for cache line alignment. */ 123 int uma_align_cache = 64 - 1; 124 125 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 126 127 /* 128 * Are we allowed to allocate buckets? 129 */ 130 static int bucketdisable = 1; 131 132 /* Linked list of all kegs in the system */ 133 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 134 135 /* Linked list of all cache-only zones in the system */ 136 static LIST_HEAD(,uma_zone) uma_cachezones = 137 LIST_HEAD_INITIALIZER(uma_cachezones); 138 139 /* This RW lock protects the keg list */ 140 static struct rwlock_padalign uma_rwlock; 141 142 /* Linked list of boot time pages */ 143 static LIST_HEAD(,uma_slab) uma_boot_pages = 144 LIST_HEAD_INITIALIZER(uma_boot_pages); 145 146 /* This mutex protects the boot time pages list */ 147 static struct mtx_padalign uma_boot_pages_mtx; 148 149 static struct sx uma_drain_lock; 150 151 /* Is the VM done starting up? */ 152 static int booted = 0; 153 #define UMA_STARTUP 1 154 #define UMA_STARTUP2 2 155 156 /* 157 * Only mbuf clusters use ref zones. Just provide enough references 158 * to support the one user. New code should not use the ref facility. 159 */ 160 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; 161 162 /* 163 * This is the handle used to schedule events that need to happen 164 * outside of the allocation fast path. 165 */ 166 static struct callout uma_callout; 167 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 168 169 /* 170 * This structure is passed as the zone ctor arg so that I don't have to create 171 * a special allocation function just for zones. 172 */ 173 struct uma_zctor_args { 174 const char *name; 175 size_t size; 176 uma_ctor ctor; 177 uma_dtor dtor; 178 uma_init uminit; 179 uma_fini fini; 180 uma_import import; 181 uma_release release; 182 void *arg; 183 uma_keg_t keg; 184 int align; 185 uint32_t flags; 186 }; 187 188 struct uma_kctor_args { 189 uma_zone_t zone; 190 size_t size; 191 uma_init uminit; 192 uma_fini fini; 193 int align; 194 uint32_t flags; 195 }; 196 197 struct uma_bucket_zone { 198 uma_zone_t ubz_zone; 199 char *ubz_name; 200 int ubz_entries; /* Number of items it can hold. */ 201 int ubz_maxsize; /* Maximum allocation size per-item. */ 202 }; 203 204 /* 205 * Compute the actual number of bucket entries to pack them in power 206 * of two sizes for more efficient space utilization. 207 */ 208 #define BUCKET_SIZE(n) \ 209 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 210 211 #define BUCKET_MAX BUCKET_SIZE(256) 212 213 struct uma_bucket_zone bucket_zones[] = { 214 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 215 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 216 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 217 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 218 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 219 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 220 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 221 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 222 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 223 { NULL, NULL, 0} 224 }; 225 226 /* 227 * Flags and enumerations to be passed to internal functions. 228 */ 229 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 230 231 /* Prototypes.. */ 232 233 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int); 234 static void *page_alloc(uma_zone_t, int, uint8_t *, int); 235 static void *startup_alloc(uma_zone_t, int, uint8_t *, int); 236 static void page_free(void *, int, uint8_t); 237 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 238 static void cache_drain(uma_zone_t); 239 static void bucket_drain(uma_zone_t, uma_bucket_t); 240 static void bucket_cache_drain(uma_zone_t zone); 241 static int keg_ctor(void *, int, void *, int); 242 static void keg_dtor(void *, int, void *); 243 static int zone_ctor(void *, int, void *, int); 244 static void zone_dtor(void *, int, void *); 245 static int zero_init(void *, int, int); 246 static void keg_small_init(uma_keg_t keg); 247 static void keg_large_init(uma_keg_t keg); 248 static void zone_foreach(void (*zfunc)(uma_zone_t)); 249 static void zone_timeout(uma_zone_t zone); 250 static int hash_alloc(struct uma_hash *); 251 static int hash_expand(struct uma_hash *, struct uma_hash *); 252 static void hash_free(struct uma_hash *hash); 253 static void uma_timeout(void *); 254 static void uma_startup3(void); 255 static void *zone_alloc_item(uma_zone_t, void *, int); 256 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 257 static void bucket_enable(void); 258 static void bucket_init(void); 259 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 260 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 261 static void bucket_zone_drain(void); 262 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 263 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 264 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 265 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 266 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 267 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 268 uma_fini fini, int align, uint32_t flags); 269 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 270 static void zone_release(uma_zone_t zone, void **bucket, int cnt); 271 static void uma_zero_item(void *item, uma_zone_t zone); 272 273 void uma_print_zone(uma_zone_t); 274 void uma_print_stats(void); 275 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 276 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 277 278 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 279 280 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 281 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 282 283 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 284 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 285 286 static int zone_warnings = 1; 287 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 288 "Warn when UMA zones becomes full"); 289 290 /* 291 * This routine checks to see whether or not it's safe to enable buckets. 292 */ 293 static void 294 bucket_enable(void) 295 { 296 bucketdisable = vm_page_count_min(); 297 } 298 299 /* 300 * Initialize bucket_zones, the array of zones of buckets of various sizes. 301 * 302 * For each zone, calculate the memory required for each bucket, consisting 303 * of the header and an array of pointers. 304 */ 305 static void 306 bucket_init(void) 307 { 308 struct uma_bucket_zone *ubz; 309 int size; 310 int i; 311 312 for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 313 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 314 size += sizeof(void *) * ubz->ubz_entries; 315 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 316 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 317 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 318 } 319 } 320 321 /* 322 * Given a desired number of entries for a bucket, return the zone from which 323 * to allocate the bucket. 324 */ 325 static struct uma_bucket_zone * 326 bucket_zone_lookup(int entries) 327 { 328 struct uma_bucket_zone *ubz; 329 330 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 331 if (ubz->ubz_entries >= entries) 332 return (ubz); 333 ubz--; 334 return (ubz); 335 } 336 337 static int 338 bucket_select(int size) 339 { 340 struct uma_bucket_zone *ubz; 341 342 ubz = &bucket_zones[0]; 343 if (size > ubz->ubz_maxsize) 344 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 345 346 for (; ubz->ubz_entries != 0; ubz++) 347 if (ubz->ubz_maxsize < size) 348 break; 349 ubz--; 350 return (ubz->ubz_entries); 351 } 352 353 static uma_bucket_t 354 bucket_alloc(uma_zone_t zone, void *udata, int flags) 355 { 356 struct uma_bucket_zone *ubz; 357 uma_bucket_t bucket; 358 359 /* 360 * This is to stop us from allocating per cpu buckets while we're 361 * running out of vm.boot_pages. Otherwise, we would exhaust the 362 * boot pages. This also prevents us from allocating buckets in 363 * low memory situations. 364 */ 365 if (bucketdisable) 366 return (NULL); 367 /* 368 * To limit bucket recursion we store the original zone flags 369 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 370 * NOVM flag to persist even through deep recursions. We also 371 * store ZFLAG_BUCKET once we have recursed attempting to allocate 372 * a bucket for a bucket zone so we do not allow infinite bucket 373 * recursion. This cookie will even persist to frees of unused 374 * buckets via the allocation path or bucket allocations in the 375 * free path. 376 */ 377 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 378 udata = (void *)(uintptr_t)zone->uz_flags; 379 else { 380 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 381 return (NULL); 382 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 383 } 384 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 385 flags |= M_NOVM; 386 ubz = bucket_zone_lookup(zone->uz_count); 387 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 388 ubz++; 389 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 390 if (bucket) { 391 #ifdef INVARIANTS 392 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 393 #endif 394 bucket->ub_cnt = 0; 395 bucket->ub_entries = ubz->ubz_entries; 396 } 397 398 return (bucket); 399 } 400 401 static void 402 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 403 { 404 struct uma_bucket_zone *ubz; 405 406 KASSERT(bucket->ub_cnt == 0, 407 ("bucket_free: Freeing a non free bucket.")); 408 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 409 udata = (void *)(uintptr_t)zone->uz_flags; 410 ubz = bucket_zone_lookup(bucket->ub_entries); 411 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 412 } 413 414 static void 415 bucket_zone_drain(void) 416 { 417 struct uma_bucket_zone *ubz; 418 419 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 420 zone_drain(ubz->ubz_zone); 421 } 422 423 static void 424 zone_log_warning(uma_zone_t zone) 425 { 426 static const struct timeval warninterval = { 300, 0 }; 427 428 if (!zone_warnings || zone->uz_warning == NULL) 429 return; 430 431 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 432 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 433 } 434 435 static void 436 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 437 { 438 uma_klink_t klink; 439 440 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 441 kegfn(klink->kl_keg); 442 } 443 444 /* 445 * Routine called by timeout which is used to fire off some time interval 446 * based calculations. (stats, hash size, etc.) 447 * 448 * Arguments: 449 * arg Unused 450 * 451 * Returns: 452 * Nothing 453 */ 454 static void 455 uma_timeout(void *unused) 456 { 457 bucket_enable(); 458 zone_foreach(zone_timeout); 459 460 /* Reschedule this event */ 461 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 462 } 463 464 /* 465 * Routine to perform timeout driven calculations. This expands the 466 * hashes and does per cpu statistics aggregation. 467 * 468 * Returns nothing. 469 */ 470 static void 471 keg_timeout(uma_keg_t keg) 472 { 473 474 KEG_LOCK(keg); 475 /* 476 * Expand the keg hash table. 477 * 478 * This is done if the number of slabs is larger than the hash size. 479 * What I'm trying to do here is completely reduce collisions. This 480 * may be a little aggressive. Should I allow for two collisions max? 481 */ 482 if (keg->uk_flags & UMA_ZONE_HASH && 483 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 484 struct uma_hash newhash; 485 struct uma_hash oldhash; 486 int ret; 487 488 /* 489 * This is so involved because allocating and freeing 490 * while the keg lock is held will lead to deadlock. 491 * I have to do everything in stages and check for 492 * races. 493 */ 494 newhash = keg->uk_hash; 495 KEG_UNLOCK(keg); 496 ret = hash_alloc(&newhash); 497 KEG_LOCK(keg); 498 if (ret) { 499 if (hash_expand(&keg->uk_hash, &newhash)) { 500 oldhash = keg->uk_hash; 501 keg->uk_hash = newhash; 502 } else 503 oldhash = newhash; 504 505 KEG_UNLOCK(keg); 506 hash_free(&oldhash); 507 return; 508 } 509 } 510 KEG_UNLOCK(keg); 511 } 512 513 static void 514 zone_timeout(uma_zone_t zone) 515 { 516 517 zone_foreach_keg(zone, &keg_timeout); 518 } 519 520 /* 521 * Allocate and zero fill the next sized hash table from the appropriate 522 * backing store. 523 * 524 * Arguments: 525 * hash A new hash structure with the old hash size in uh_hashsize 526 * 527 * Returns: 528 * 1 on sucess and 0 on failure. 529 */ 530 static int 531 hash_alloc(struct uma_hash *hash) 532 { 533 int oldsize; 534 int alloc; 535 536 oldsize = hash->uh_hashsize; 537 538 /* We're just going to go to a power of two greater */ 539 if (oldsize) { 540 hash->uh_hashsize = oldsize * 2; 541 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 542 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 543 M_UMAHASH, M_NOWAIT); 544 } else { 545 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 546 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 547 M_WAITOK); 548 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 549 } 550 if (hash->uh_slab_hash) { 551 bzero(hash->uh_slab_hash, alloc); 552 hash->uh_hashmask = hash->uh_hashsize - 1; 553 return (1); 554 } 555 556 return (0); 557 } 558 559 /* 560 * Expands the hash table for HASH zones. This is done from zone_timeout 561 * to reduce collisions. This must not be done in the regular allocation 562 * path, otherwise, we can recurse on the vm while allocating pages. 563 * 564 * Arguments: 565 * oldhash The hash you want to expand 566 * newhash The hash structure for the new table 567 * 568 * Returns: 569 * Nothing 570 * 571 * Discussion: 572 */ 573 static int 574 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 575 { 576 uma_slab_t slab; 577 int hval; 578 int i; 579 580 if (!newhash->uh_slab_hash) 581 return (0); 582 583 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 584 return (0); 585 586 /* 587 * I need to investigate hash algorithms for resizing without a 588 * full rehash. 589 */ 590 591 for (i = 0; i < oldhash->uh_hashsize; i++) 592 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 593 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 594 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 595 hval = UMA_HASH(newhash, slab->us_data); 596 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 597 slab, us_hlink); 598 } 599 600 return (1); 601 } 602 603 /* 604 * Free the hash bucket to the appropriate backing store. 605 * 606 * Arguments: 607 * slab_hash The hash bucket we're freeing 608 * hashsize The number of entries in that hash bucket 609 * 610 * Returns: 611 * Nothing 612 */ 613 static void 614 hash_free(struct uma_hash *hash) 615 { 616 if (hash->uh_slab_hash == NULL) 617 return; 618 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 619 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 620 else 621 free(hash->uh_slab_hash, M_UMAHASH); 622 } 623 624 /* 625 * Frees all outstanding items in a bucket 626 * 627 * Arguments: 628 * zone The zone to free to, must be unlocked. 629 * bucket The free/alloc bucket with items, cpu queue must be locked. 630 * 631 * Returns: 632 * Nothing 633 */ 634 635 static void 636 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 637 { 638 int i; 639 640 if (bucket == NULL) 641 return; 642 643 if (zone->uz_fini) 644 for (i = 0; i < bucket->ub_cnt; i++) 645 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 646 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 647 bucket->ub_cnt = 0; 648 } 649 650 /* 651 * Drains the per cpu caches for a zone. 652 * 653 * NOTE: This may only be called while the zone is being turn down, and not 654 * during normal operation. This is necessary in order that we do not have 655 * to migrate CPUs to drain the per-CPU caches. 656 * 657 * Arguments: 658 * zone The zone to drain, must be unlocked. 659 * 660 * Returns: 661 * Nothing 662 */ 663 static void 664 cache_drain(uma_zone_t zone) 665 { 666 uma_cache_t cache; 667 int cpu; 668 669 /* 670 * XXX: It is safe to not lock the per-CPU caches, because we're 671 * tearing down the zone anyway. I.e., there will be no further use 672 * of the caches at this point. 673 * 674 * XXX: It would good to be able to assert that the zone is being 675 * torn down to prevent improper use of cache_drain(). 676 * 677 * XXX: We lock the zone before passing into bucket_cache_drain() as 678 * it is used elsewhere. Should the tear-down path be made special 679 * there in some form? 680 */ 681 CPU_FOREACH(cpu) { 682 cache = &zone->uz_cpu[cpu]; 683 bucket_drain(zone, cache->uc_allocbucket); 684 bucket_drain(zone, cache->uc_freebucket); 685 if (cache->uc_allocbucket != NULL) 686 bucket_free(zone, cache->uc_allocbucket, NULL); 687 if (cache->uc_freebucket != NULL) 688 bucket_free(zone, cache->uc_freebucket, NULL); 689 cache->uc_allocbucket = cache->uc_freebucket = NULL; 690 } 691 ZONE_LOCK(zone); 692 bucket_cache_drain(zone); 693 ZONE_UNLOCK(zone); 694 } 695 696 static void 697 cache_shrink(uma_zone_t zone) 698 { 699 700 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 701 return; 702 703 ZONE_LOCK(zone); 704 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 705 ZONE_UNLOCK(zone); 706 } 707 708 static void 709 cache_drain_safe_cpu(uma_zone_t zone) 710 { 711 uma_cache_t cache; 712 uma_bucket_t b1, b2; 713 714 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 715 return; 716 717 b1 = b2 = NULL; 718 ZONE_LOCK(zone); 719 critical_enter(); 720 cache = &zone->uz_cpu[curcpu]; 721 if (cache->uc_allocbucket) { 722 if (cache->uc_allocbucket->ub_cnt != 0) 723 LIST_INSERT_HEAD(&zone->uz_buckets, 724 cache->uc_allocbucket, ub_link); 725 else 726 b1 = cache->uc_allocbucket; 727 cache->uc_allocbucket = NULL; 728 } 729 if (cache->uc_freebucket) { 730 if (cache->uc_freebucket->ub_cnt != 0) 731 LIST_INSERT_HEAD(&zone->uz_buckets, 732 cache->uc_freebucket, ub_link); 733 else 734 b2 = cache->uc_freebucket; 735 cache->uc_freebucket = NULL; 736 } 737 critical_exit(); 738 ZONE_UNLOCK(zone); 739 if (b1) 740 bucket_free(zone, b1, NULL); 741 if (b2) 742 bucket_free(zone, b2, NULL); 743 } 744 745 /* 746 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 747 * This is an expensive call because it needs to bind to all CPUs 748 * one by one and enter a critical section on each of them in order 749 * to safely access their cache buckets. 750 * Zone lock must not be held on call this function. 751 */ 752 static void 753 cache_drain_safe(uma_zone_t zone) 754 { 755 int cpu; 756 757 /* 758 * Polite bucket sizes shrinking was not enouth, shrink aggressively. 759 */ 760 if (zone) 761 cache_shrink(zone); 762 else 763 zone_foreach(cache_shrink); 764 765 CPU_FOREACH(cpu) { 766 thread_lock(curthread); 767 sched_bind(curthread, cpu); 768 thread_unlock(curthread); 769 770 if (zone) 771 cache_drain_safe_cpu(zone); 772 else 773 zone_foreach(cache_drain_safe_cpu); 774 } 775 thread_lock(curthread); 776 sched_unbind(curthread); 777 thread_unlock(curthread); 778 } 779 780 /* 781 * Drain the cached buckets from a zone. Expects a locked zone on entry. 782 */ 783 static void 784 bucket_cache_drain(uma_zone_t zone) 785 { 786 uma_bucket_t bucket; 787 788 /* 789 * Drain the bucket queues and free the buckets, we just keep two per 790 * cpu (alloc/free). 791 */ 792 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 793 LIST_REMOVE(bucket, ub_link); 794 ZONE_UNLOCK(zone); 795 bucket_drain(zone, bucket); 796 bucket_free(zone, bucket, NULL); 797 ZONE_LOCK(zone); 798 } 799 800 /* 801 * Shrink further bucket sizes. Price of single zone lock collision 802 * is probably lower then price of global cache drain. 803 */ 804 if (zone->uz_count > zone->uz_count_min) 805 zone->uz_count--; 806 } 807 808 static void 809 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 810 { 811 uint8_t *mem; 812 int i; 813 uint8_t flags; 814 815 mem = slab->us_data; 816 flags = slab->us_flags; 817 i = start; 818 if (keg->uk_fini != NULL) { 819 for (i--; i > -1; i--) 820 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 821 keg->uk_size); 822 } 823 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 824 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 825 #ifdef UMA_DEBUG 826 printf("%s: Returning %d bytes.\n", keg->uk_name, 827 PAGE_SIZE * keg->uk_ppera); 828 #endif 829 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 830 } 831 832 /* 833 * Frees pages from a keg back to the system. This is done on demand from 834 * the pageout daemon. 835 * 836 * Returns nothing. 837 */ 838 static void 839 keg_drain(uma_keg_t keg) 840 { 841 struct slabhead freeslabs = { 0 }; 842 uma_slab_t slab; 843 uma_slab_t n; 844 845 /* 846 * We don't want to take pages from statically allocated kegs at this 847 * time 848 */ 849 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 850 return; 851 852 #ifdef UMA_DEBUG 853 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 854 #endif 855 KEG_LOCK(keg); 856 if (keg->uk_free == 0) 857 goto finished; 858 859 slab = LIST_FIRST(&keg->uk_free_slab); 860 while (slab) { 861 n = LIST_NEXT(slab, us_link); 862 863 /* We have no where to free these to */ 864 if (slab->us_flags & UMA_SLAB_BOOT) { 865 slab = n; 866 continue; 867 } 868 869 LIST_REMOVE(slab, us_link); 870 keg->uk_pages -= keg->uk_ppera; 871 keg->uk_free -= keg->uk_ipers; 872 873 if (keg->uk_flags & UMA_ZONE_HASH) 874 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 875 876 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 877 878 slab = n; 879 } 880 finished: 881 KEG_UNLOCK(keg); 882 883 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 884 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 885 keg_free_slab(keg, slab, keg->uk_ipers); 886 } 887 } 888 889 static void 890 zone_drain_wait(uma_zone_t zone, int waitok) 891 { 892 893 /* 894 * Set draining to interlock with zone_dtor() so we can release our 895 * locks as we go. Only dtor() should do a WAITOK call since it 896 * is the only call that knows the structure will still be available 897 * when it wakes up. 898 */ 899 ZONE_LOCK(zone); 900 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 901 if (waitok == M_NOWAIT) 902 goto out; 903 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 904 } 905 zone->uz_flags |= UMA_ZFLAG_DRAINING; 906 bucket_cache_drain(zone); 907 ZONE_UNLOCK(zone); 908 /* 909 * The DRAINING flag protects us from being freed while 910 * we're running. Normally the uma_rwlock would protect us but we 911 * must be able to release and acquire the right lock for each keg. 912 */ 913 zone_foreach_keg(zone, &keg_drain); 914 ZONE_LOCK(zone); 915 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 916 wakeup(zone); 917 out: 918 ZONE_UNLOCK(zone); 919 } 920 921 void 922 zone_drain(uma_zone_t zone) 923 { 924 925 zone_drain_wait(zone, M_NOWAIT); 926 } 927 928 /* 929 * Allocate a new slab for a keg. This does not insert the slab onto a list. 930 * 931 * Arguments: 932 * wait Shall we wait? 933 * 934 * Returns: 935 * The slab that was allocated or NULL if there is no memory and the 936 * caller specified M_NOWAIT. 937 */ 938 static uma_slab_t 939 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 940 { 941 uma_slabrefcnt_t slabref; 942 uma_alloc allocf; 943 uma_slab_t slab; 944 uint8_t *mem; 945 uint8_t flags; 946 int i; 947 948 mtx_assert(&keg->uk_lock, MA_OWNED); 949 slab = NULL; 950 mem = NULL; 951 952 #ifdef UMA_DEBUG 953 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name); 954 #endif 955 allocf = keg->uk_allocf; 956 KEG_UNLOCK(keg); 957 958 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 959 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 960 if (slab == NULL) 961 goto out; 962 } 963 964 /* 965 * This reproduces the old vm_zone behavior of zero filling pages the 966 * first time they are added to a zone. 967 * 968 * Malloced items are zeroed in uma_zalloc. 969 */ 970 971 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 972 wait |= M_ZERO; 973 else 974 wait &= ~M_ZERO; 975 976 if (keg->uk_flags & UMA_ZONE_NODUMP) 977 wait |= M_NODUMP; 978 979 /* zone is passed for legacy reasons. */ 980 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 981 if (mem == NULL) { 982 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 983 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 984 slab = NULL; 985 goto out; 986 } 987 988 /* Point the slab into the allocated memory */ 989 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 990 slab = (uma_slab_t )(mem + keg->uk_pgoff); 991 992 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 993 for (i = 0; i < keg->uk_ppera; i++) 994 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 995 996 slab->us_keg = keg; 997 slab->us_data = mem; 998 slab->us_freecount = keg->uk_ipers; 999 slab->us_flags = flags; 1000 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 1001 #ifdef INVARIANTS 1002 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1003 #endif 1004 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1005 slabref = (uma_slabrefcnt_t)slab; 1006 for (i = 0; i < keg->uk_ipers; i++) 1007 slabref->us_refcnt[i] = 0; 1008 } 1009 1010 if (keg->uk_init != NULL) { 1011 for (i = 0; i < keg->uk_ipers; i++) 1012 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1013 keg->uk_size, wait) != 0) 1014 break; 1015 if (i != keg->uk_ipers) { 1016 keg_free_slab(keg, slab, i); 1017 slab = NULL; 1018 goto out; 1019 } 1020 } 1021 out: 1022 KEG_LOCK(keg); 1023 1024 if (slab != NULL) { 1025 if (keg->uk_flags & UMA_ZONE_HASH) 1026 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1027 1028 keg->uk_pages += keg->uk_ppera; 1029 keg->uk_free += keg->uk_ipers; 1030 } 1031 1032 return (slab); 1033 } 1034 1035 /* 1036 * This function is intended to be used early on in place of page_alloc() so 1037 * that we may use the boot time page cache to satisfy allocations before 1038 * the VM is ready. 1039 */ 1040 static void * 1041 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1042 { 1043 uma_keg_t keg; 1044 uma_slab_t tmps; 1045 int pages, check_pages; 1046 1047 keg = zone_first_keg(zone); 1048 pages = howmany(bytes, PAGE_SIZE); 1049 check_pages = pages - 1; 1050 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1051 1052 /* 1053 * Check our small startup cache to see if it has pages remaining. 1054 */ 1055 mtx_lock(&uma_boot_pages_mtx); 1056 1057 /* First check if we have enough room. */ 1058 tmps = LIST_FIRST(&uma_boot_pages); 1059 while (tmps != NULL && check_pages-- > 0) 1060 tmps = LIST_NEXT(tmps, us_link); 1061 if (tmps != NULL) { 1062 /* 1063 * It's ok to lose tmps references. The last one will 1064 * have tmps->us_data pointing to the start address of 1065 * "pages" contiguous pages of memory. 1066 */ 1067 while (pages-- > 0) { 1068 tmps = LIST_FIRST(&uma_boot_pages); 1069 LIST_REMOVE(tmps, us_link); 1070 } 1071 mtx_unlock(&uma_boot_pages_mtx); 1072 *pflag = tmps->us_flags; 1073 return (tmps->us_data); 1074 } 1075 mtx_unlock(&uma_boot_pages_mtx); 1076 if (booted < UMA_STARTUP2) 1077 panic("UMA: Increase vm.boot_pages"); 1078 /* 1079 * Now that we've booted reset these users to their real allocator. 1080 */ 1081 #ifdef UMA_MD_SMALL_ALLOC 1082 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1083 #else 1084 keg->uk_allocf = page_alloc; 1085 #endif 1086 return keg->uk_allocf(zone, bytes, pflag, wait); 1087 } 1088 1089 /* 1090 * Allocates a number of pages from the system 1091 * 1092 * Arguments: 1093 * bytes The number of bytes requested 1094 * wait Shall we wait? 1095 * 1096 * Returns: 1097 * A pointer to the alloced memory or possibly 1098 * NULL if M_NOWAIT is set. 1099 */ 1100 static void * 1101 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1102 { 1103 void *p; /* Returned page */ 1104 1105 *pflag = UMA_SLAB_KMEM; 1106 p = (void *) kmem_malloc(kmem_arena, bytes, wait); 1107 1108 return (p); 1109 } 1110 1111 /* 1112 * Allocates a number of pages from within an object 1113 * 1114 * Arguments: 1115 * bytes The number of bytes requested 1116 * wait Shall we wait? 1117 * 1118 * Returns: 1119 * A pointer to the alloced memory or possibly 1120 * NULL if M_NOWAIT is set. 1121 */ 1122 static void * 1123 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 1124 { 1125 TAILQ_HEAD(, vm_page) alloctail; 1126 u_long npages; 1127 vm_offset_t retkva, zkva; 1128 vm_page_t p, p_next; 1129 uma_keg_t keg; 1130 1131 TAILQ_INIT(&alloctail); 1132 keg = zone_first_keg(zone); 1133 1134 npages = howmany(bytes, PAGE_SIZE); 1135 while (npages > 0) { 1136 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1137 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1138 if (p != NULL) { 1139 /* 1140 * Since the page does not belong to an object, its 1141 * listq is unused. 1142 */ 1143 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1144 npages--; 1145 continue; 1146 } 1147 if (wait & M_WAITOK) { 1148 VM_WAIT; 1149 continue; 1150 } 1151 1152 /* 1153 * Page allocation failed, free intermediate pages and 1154 * exit. 1155 */ 1156 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1157 vm_page_unwire(p, PQ_INACTIVE); 1158 vm_page_free(p); 1159 } 1160 return (NULL); 1161 } 1162 *flags = UMA_SLAB_PRIV; 1163 zkva = keg->uk_kva + 1164 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1165 retkva = zkva; 1166 TAILQ_FOREACH(p, &alloctail, listq) { 1167 pmap_qenter(zkva, &p, 1); 1168 zkva += PAGE_SIZE; 1169 } 1170 1171 return ((void *)retkva); 1172 } 1173 1174 /* 1175 * Frees a number of pages to the system 1176 * 1177 * Arguments: 1178 * mem A pointer to the memory to be freed 1179 * size The size of the memory being freed 1180 * flags The original p->us_flags field 1181 * 1182 * Returns: 1183 * Nothing 1184 */ 1185 static void 1186 page_free(void *mem, int size, uint8_t flags) 1187 { 1188 struct vmem *vmem; 1189 1190 if (flags & UMA_SLAB_KMEM) 1191 vmem = kmem_arena; 1192 else if (flags & UMA_SLAB_KERNEL) 1193 vmem = kernel_arena; 1194 else 1195 panic("UMA: page_free used with invalid flags %d", flags); 1196 1197 kmem_free(vmem, (vm_offset_t)mem, size); 1198 } 1199 1200 /* 1201 * Zero fill initializer 1202 * 1203 * Arguments/Returns follow uma_init specifications 1204 */ 1205 static int 1206 zero_init(void *mem, int size, int flags) 1207 { 1208 bzero(mem, size); 1209 return (0); 1210 } 1211 1212 /* 1213 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1214 * 1215 * Arguments 1216 * keg The zone we should initialize 1217 * 1218 * Returns 1219 * Nothing 1220 */ 1221 static void 1222 keg_small_init(uma_keg_t keg) 1223 { 1224 u_int rsize; 1225 u_int memused; 1226 u_int wastedspace; 1227 u_int shsize; 1228 1229 if (keg->uk_flags & UMA_ZONE_PCPU) { 1230 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 1231 1232 keg->uk_slabsize = sizeof(struct pcpu); 1233 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1234 PAGE_SIZE); 1235 } else { 1236 keg->uk_slabsize = UMA_SLAB_SIZE; 1237 keg->uk_ppera = 1; 1238 } 1239 1240 /* 1241 * Calculate the size of each allocation (rsize) according to 1242 * alignment. If the requested size is smaller than we have 1243 * allocation bits for we round it up. 1244 */ 1245 rsize = keg->uk_size; 1246 if (rsize < keg->uk_slabsize / SLAB_SETSIZE) 1247 rsize = keg->uk_slabsize / SLAB_SETSIZE; 1248 if (rsize & keg->uk_align) 1249 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1250 keg->uk_rsize = rsize; 1251 1252 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1253 keg->uk_rsize < sizeof(struct pcpu), 1254 ("%s: size %u too large", __func__, keg->uk_rsize)); 1255 1256 if (keg->uk_flags & UMA_ZONE_REFCNT) 1257 rsize += sizeof(uint32_t); 1258 1259 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1260 shsize = 0; 1261 else 1262 shsize = sizeof(struct uma_slab); 1263 1264 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize; 1265 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1266 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1267 1268 memused = keg->uk_ipers * rsize + shsize; 1269 wastedspace = keg->uk_slabsize - memused; 1270 1271 /* 1272 * We can't do OFFPAGE if we're internal or if we've been 1273 * asked to not go to the VM for buckets. If we do this we 1274 * may end up going to the VM for slabs which we do not 1275 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1276 * of UMA_ZONE_VM, which clearly forbids it. 1277 */ 1278 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1279 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1280 return; 1281 1282 /* 1283 * See if using an OFFPAGE slab will limit our waste. Only do 1284 * this if it permits more items per-slab. 1285 * 1286 * XXX We could try growing slabsize to limit max waste as well. 1287 * Historically this was not done because the VM could not 1288 * efficiently handle contiguous allocations. 1289 */ 1290 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) && 1291 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) { 1292 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize; 1293 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1294 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1295 #ifdef UMA_DEBUG 1296 printf("UMA decided we need offpage slab headers for " 1297 "keg: %s, calculated wastedspace = %d, " 1298 "maximum wasted space allowed = %d, " 1299 "calculated ipers = %d, " 1300 "new wasted space = %d\n", keg->uk_name, wastedspace, 1301 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1302 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize); 1303 #endif 1304 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1305 } 1306 1307 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1308 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1309 keg->uk_flags |= UMA_ZONE_HASH; 1310 } 1311 1312 /* 1313 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1314 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1315 * more complicated. 1316 * 1317 * Arguments 1318 * keg The keg we should initialize 1319 * 1320 * Returns 1321 * Nothing 1322 */ 1323 static void 1324 keg_large_init(uma_keg_t keg) 1325 { 1326 u_int shsize; 1327 1328 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1329 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1330 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1331 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1332 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1333 1334 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1335 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE; 1336 keg->uk_ipers = 1; 1337 keg->uk_rsize = keg->uk_size; 1338 1339 /* We can't do OFFPAGE if we're internal, bail out here. */ 1340 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1341 return; 1342 1343 /* Check whether we have enough space to not do OFFPAGE. */ 1344 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1345 shsize = sizeof(struct uma_slab); 1346 if (keg->uk_flags & UMA_ZONE_REFCNT) 1347 shsize += keg->uk_ipers * sizeof(uint32_t); 1348 if (shsize & UMA_ALIGN_PTR) 1349 shsize = (shsize & ~UMA_ALIGN_PTR) + 1350 (UMA_ALIGN_PTR + 1); 1351 1352 if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize) 1353 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1354 } 1355 1356 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1357 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1358 keg->uk_flags |= UMA_ZONE_HASH; 1359 } 1360 1361 static void 1362 keg_cachespread_init(uma_keg_t keg) 1363 { 1364 int alignsize; 1365 int trailer; 1366 int pages; 1367 int rsize; 1368 1369 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1370 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1371 1372 alignsize = keg->uk_align + 1; 1373 rsize = keg->uk_size; 1374 /* 1375 * We want one item to start on every align boundary in a page. To 1376 * do this we will span pages. We will also extend the item by the 1377 * size of align if it is an even multiple of align. Otherwise, it 1378 * would fall on the same boundary every time. 1379 */ 1380 if (rsize & keg->uk_align) 1381 rsize = (rsize & ~keg->uk_align) + alignsize; 1382 if ((rsize & alignsize) == 0) 1383 rsize += alignsize; 1384 trailer = rsize - keg->uk_size; 1385 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1386 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1387 keg->uk_rsize = rsize; 1388 keg->uk_ppera = pages; 1389 keg->uk_slabsize = UMA_SLAB_SIZE; 1390 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1391 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1392 KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 1393 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1394 keg->uk_ipers)); 1395 } 1396 1397 /* 1398 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1399 * the keg onto the global keg list. 1400 * 1401 * Arguments/Returns follow uma_ctor specifications 1402 * udata Actually uma_kctor_args 1403 */ 1404 static int 1405 keg_ctor(void *mem, int size, void *udata, int flags) 1406 { 1407 struct uma_kctor_args *arg = udata; 1408 uma_keg_t keg = mem; 1409 uma_zone_t zone; 1410 1411 bzero(keg, size); 1412 keg->uk_size = arg->size; 1413 keg->uk_init = arg->uminit; 1414 keg->uk_fini = arg->fini; 1415 keg->uk_align = arg->align; 1416 keg->uk_free = 0; 1417 keg->uk_reserve = 0; 1418 keg->uk_pages = 0; 1419 keg->uk_flags = arg->flags; 1420 keg->uk_allocf = page_alloc; 1421 keg->uk_freef = page_free; 1422 keg->uk_slabzone = NULL; 1423 1424 /* 1425 * The master zone is passed to us at keg-creation time. 1426 */ 1427 zone = arg->zone; 1428 keg->uk_name = zone->uz_name; 1429 1430 if (arg->flags & UMA_ZONE_VM) 1431 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1432 1433 if (arg->flags & UMA_ZONE_ZINIT) 1434 keg->uk_init = zero_init; 1435 1436 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1437 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1438 1439 if (arg->flags & UMA_ZONE_PCPU) 1440 #ifdef SMP 1441 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1442 #else 1443 keg->uk_flags &= ~UMA_ZONE_PCPU; 1444 #endif 1445 1446 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1447 keg_cachespread_init(keg); 1448 } else if (keg->uk_flags & UMA_ZONE_REFCNT) { 1449 if (keg->uk_size > 1450 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - 1451 sizeof(uint32_t))) 1452 keg_large_init(keg); 1453 else 1454 keg_small_init(keg); 1455 } else { 1456 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1457 keg_large_init(keg); 1458 else 1459 keg_small_init(keg); 1460 } 1461 1462 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1463 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1464 if (keg->uk_ipers > uma_max_ipers_ref) 1465 panic("Too many ref items per zone: %d > %d\n", 1466 keg->uk_ipers, uma_max_ipers_ref); 1467 keg->uk_slabzone = slabrefzone; 1468 } else 1469 keg->uk_slabzone = slabzone; 1470 } 1471 1472 /* 1473 * If we haven't booted yet we need allocations to go through the 1474 * startup cache until the vm is ready. 1475 */ 1476 if (keg->uk_ppera == 1) { 1477 #ifdef UMA_MD_SMALL_ALLOC 1478 keg->uk_allocf = uma_small_alloc; 1479 keg->uk_freef = uma_small_free; 1480 1481 if (booted < UMA_STARTUP) 1482 keg->uk_allocf = startup_alloc; 1483 #else 1484 if (booted < UMA_STARTUP2) 1485 keg->uk_allocf = startup_alloc; 1486 #endif 1487 } else if (booted < UMA_STARTUP2 && 1488 (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1489 keg->uk_allocf = startup_alloc; 1490 1491 /* 1492 * Initialize keg's lock 1493 */ 1494 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1495 1496 /* 1497 * If we're putting the slab header in the actual page we need to 1498 * figure out where in each page it goes. This calculates a right 1499 * justified offset into the memory on an ALIGN_PTR boundary. 1500 */ 1501 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1502 u_int totsize; 1503 1504 /* Size of the slab struct and free list */ 1505 totsize = sizeof(struct uma_slab); 1506 1507 /* Size of the reference counts. */ 1508 if (keg->uk_flags & UMA_ZONE_REFCNT) 1509 totsize += keg->uk_ipers * sizeof(uint32_t); 1510 1511 if (totsize & UMA_ALIGN_PTR) 1512 totsize = (totsize & ~UMA_ALIGN_PTR) + 1513 (UMA_ALIGN_PTR + 1); 1514 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1515 1516 /* 1517 * The only way the following is possible is if with our 1518 * UMA_ALIGN_PTR adjustments we are now bigger than 1519 * UMA_SLAB_SIZE. I haven't checked whether this is 1520 * mathematically possible for all cases, so we make 1521 * sure here anyway. 1522 */ 1523 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1524 if (keg->uk_flags & UMA_ZONE_REFCNT) 1525 totsize += keg->uk_ipers * sizeof(uint32_t); 1526 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1527 printf("zone %s ipers %d rsize %d size %d\n", 1528 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1529 keg->uk_size); 1530 panic("UMA slab won't fit."); 1531 } 1532 } 1533 1534 if (keg->uk_flags & UMA_ZONE_HASH) 1535 hash_alloc(&keg->uk_hash); 1536 1537 #ifdef UMA_DEBUG 1538 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1539 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1540 keg->uk_ipers, keg->uk_ppera, 1541 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1542 #endif 1543 1544 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1545 1546 rw_wlock(&uma_rwlock); 1547 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1548 rw_wunlock(&uma_rwlock); 1549 return (0); 1550 } 1551 1552 /* 1553 * Zone header ctor. This initializes all fields, locks, etc. 1554 * 1555 * Arguments/Returns follow uma_ctor specifications 1556 * udata Actually uma_zctor_args 1557 */ 1558 static int 1559 zone_ctor(void *mem, int size, void *udata, int flags) 1560 { 1561 struct uma_zctor_args *arg = udata; 1562 uma_zone_t zone = mem; 1563 uma_zone_t z; 1564 uma_keg_t keg; 1565 1566 bzero(zone, size); 1567 zone->uz_name = arg->name; 1568 zone->uz_ctor = arg->ctor; 1569 zone->uz_dtor = arg->dtor; 1570 zone->uz_slab = zone_fetch_slab; 1571 zone->uz_init = NULL; 1572 zone->uz_fini = NULL; 1573 zone->uz_allocs = 0; 1574 zone->uz_frees = 0; 1575 zone->uz_fails = 0; 1576 zone->uz_sleeps = 0; 1577 zone->uz_count = 0; 1578 zone->uz_count_min = 0; 1579 zone->uz_flags = 0; 1580 zone->uz_warning = NULL; 1581 timevalclear(&zone->uz_ratecheck); 1582 keg = arg->keg; 1583 1584 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1585 1586 /* 1587 * This is a pure cache zone, no kegs. 1588 */ 1589 if (arg->import) { 1590 if (arg->flags & UMA_ZONE_VM) 1591 arg->flags |= UMA_ZFLAG_CACHEONLY; 1592 zone->uz_flags = arg->flags; 1593 zone->uz_size = arg->size; 1594 zone->uz_import = arg->import; 1595 zone->uz_release = arg->release; 1596 zone->uz_arg = arg->arg; 1597 zone->uz_lockptr = &zone->uz_lock; 1598 rw_wlock(&uma_rwlock); 1599 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1600 rw_wunlock(&uma_rwlock); 1601 goto out; 1602 } 1603 1604 /* 1605 * Use the regular zone/keg/slab allocator. 1606 */ 1607 zone->uz_import = (uma_import)zone_import; 1608 zone->uz_release = (uma_release)zone_release; 1609 zone->uz_arg = zone; 1610 1611 if (arg->flags & UMA_ZONE_SECONDARY) { 1612 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1613 zone->uz_init = arg->uminit; 1614 zone->uz_fini = arg->fini; 1615 zone->uz_lockptr = &keg->uk_lock; 1616 zone->uz_flags |= UMA_ZONE_SECONDARY; 1617 rw_wlock(&uma_rwlock); 1618 ZONE_LOCK(zone); 1619 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1620 if (LIST_NEXT(z, uz_link) == NULL) { 1621 LIST_INSERT_AFTER(z, zone, uz_link); 1622 break; 1623 } 1624 } 1625 ZONE_UNLOCK(zone); 1626 rw_wunlock(&uma_rwlock); 1627 } else if (keg == NULL) { 1628 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1629 arg->align, arg->flags)) == NULL) 1630 return (ENOMEM); 1631 } else { 1632 struct uma_kctor_args karg; 1633 int error; 1634 1635 /* We should only be here from uma_startup() */ 1636 karg.size = arg->size; 1637 karg.uminit = arg->uminit; 1638 karg.fini = arg->fini; 1639 karg.align = arg->align; 1640 karg.flags = arg->flags; 1641 karg.zone = zone; 1642 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1643 flags); 1644 if (error) 1645 return (error); 1646 } 1647 1648 /* 1649 * Link in the first keg. 1650 */ 1651 zone->uz_klink.kl_keg = keg; 1652 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1653 zone->uz_lockptr = &keg->uk_lock; 1654 zone->uz_size = keg->uk_size; 1655 zone->uz_flags |= (keg->uk_flags & 1656 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1657 1658 /* 1659 * Some internal zones don't have room allocated for the per cpu 1660 * caches. If we're internal, bail out here. 1661 */ 1662 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1663 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1664 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1665 return (0); 1666 } 1667 1668 out: 1669 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1670 zone->uz_count = bucket_select(zone->uz_size); 1671 else 1672 zone->uz_count = BUCKET_MAX; 1673 zone->uz_count_min = zone->uz_count; 1674 1675 return (0); 1676 } 1677 1678 /* 1679 * Keg header dtor. This frees all data, destroys locks, frees the hash 1680 * table and removes the keg from the global list. 1681 * 1682 * Arguments/Returns follow uma_dtor specifications 1683 * udata unused 1684 */ 1685 static void 1686 keg_dtor(void *arg, int size, void *udata) 1687 { 1688 uma_keg_t keg; 1689 1690 keg = (uma_keg_t)arg; 1691 KEG_LOCK(keg); 1692 if (keg->uk_free != 0) { 1693 printf("Freed UMA keg (%s) was not empty (%d items). " 1694 " Lost %d pages of memory.\n", 1695 keg->uk_name ? keg->uk_name : "", 1696 keg->uk_free, keg->uk_pages); 1697 } 1698 KEG_UNLOCK(keg); 1699 1700 hash_free(&keg->uk_hash); 1701 1702 KEG_LOCK_FINI(keg); 1703 } 1704 1705 /* 1706 * Zone header dtor. 1707 * 1708 * Arguments/Returns follow uma_dtor specifications 1709 * udata unused 1710 */ 1711 static void 1712 zone_dtor(void *arg, int size, void *udata) 1713 { 1714 uma_klink_t klink; 1715 uma_zone_t zone; 1716 uma_keg_t keg; 1717 1718 zone = (uma_zone_t)arg; 1719 keg = zone_first_keg(zone); 1720 1721 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1722 cache_drain(zone); 1723 1724 rw_wlock(&uma_rwlock); 1725 LIST_REMOVE(zone, uz_link); 1726 rw_wunlock(&uma_rwlock); 1727 /* 1728 * XXX there are some races here where 1729 * the zone can be drained but zone lock 1730 * released and then refilled before we 1731 * remove it... we dont care for now 1732 */ 1733 zone_drain_wait(zone, M_WAITOK); 1734 /* 1735 * Unlink all of our kegs. 1736 */ 1737 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1738 klink->kl_keg = NULL; 1739 LIST_REMOVE(klink, kl_link); 1740 if (klink == &zone->uz_klink) 1741 continue; 1742 free(klink, M_TEMP); 1743 } 1744 /* 1745 * We only destroy kegs from non secondary zones. 1746 */ 1747 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1748 rw_wlock(&uma_rwlock); 1749 LIST_REMOVE(keg, uk_link); 1750 rw_wunlock(&uma_rwlock); 1751 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1752 } 1753 ZONE_LOCK_FINI(zone); 1754 } 1755 1756 /* 1757 * Traverses every zone in the system and calls a callback 1758 * 1759 * Arguments: 1760 * zfunc A pointer to a function which accepts a zone 1761 * as an argument. 1762 * 1763 * Returns: 1764 * Nothing 1765 */ 1766 static void 1767 zone_foreach(void (*zfunc)(uma_zone_t)) 1768 { 1769 uma_keg_t keg; 1770 uma_zone_t zone; 1771 1772 rw_rlock(&uma_rwlock); 1773 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1774 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1775 zfunc(zone); 1776 } 1777 rw_runlock(&uma_rwlock); 1778 } 1779 1780 /* Public functions */ 1781 /* See uma.h */ 1782 void 1783 uma_startup(void *bootmem, int boot_pages) 1784 { 1785 struct uma_zctor_args args; 1786 uma_slab_t slab; 1787 u_int slabsize; 1788 int i; 1789 1790 #ifdef UMA_DEBUG 1791 printf("Creating uma keg headers zone and keg.\n"); 1792 #endif 1793 rw_init(&uma_rwlock, "UMA lock"); 1794 1795 /* "manually" create the initial zone */ 1796 memset(&args, 0, sizeof(args)); 1797 args.name = "UMA Kegs"; 1798 args.size = sizeof(struct uma_keg); 1799 args.ctor = keg_ctor; 1800 args.dtor = keg_dtor; 1801 args.uminit = zero_init; 1802 args.fini = NULL; 1803 args.keg = &masterkeg; 1804 args.align = 32 - 1; 1805 args.flags = UMA_ZFLAG_INTERNAL; 1806 /* The initial zone has no Per cpu queues so it's smaller */ 1807 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1808 1809 #ifdef UMA_DEBUG 1810 printf("Filling boot free list.\n"); 1811 #endif 1812 for (i = 0; i < boot_pages; i++) { 1813 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1814 slab->us_data = (uint8_t *)slab; 1815 slab->us_flags = UMA_SLAB_BOOT; 1816 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1817 } 1818 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1819 1820 #ifdef UMA_DEBUG 1821 printf("Creating uma zone headers zone and keg.\n"); 1822 #endif 1823 args.name = "UMA Zones"; 1824 args.size = sizeof(struct uma_zone) + 1825 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1826 args.ctor = zone_ctor; 1827 args.dtor = zone_dtor; 1828 args.uminit = zero_init; 1829 args.fini = NULL; 1830 args.keg = NULL; 1831 args.align = 32 - 1; 1832 args.flags = UMA_ZFLAG_INTERNAL; 1833 /* The initial zone has no Per cpu queues so it's smaller */ 1834 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1835 1836 #ifdef UMA_DEBUG 1837 printf("Creating slab and hash zones.\n"); 1838 #endif 1839 1840 /* Now make a zone for slab headers */ 1841 slabzone = uma_zcreate("UMA Slabs", 1842 sizeof(struct uma_slab), 1843 NULL, NULL, NULL, NULL, 1844 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1845 1846 /* 1847 * We also create a zone for the bigger slabs with reference 1848 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1849 */ 1850 slabsize = sizeof(struct uma_slab_refcnt); 1851 slabsize += uma_max_ipers_ref * sizeof(uint32_t); 1852 slabrefzone = uma_zcreate("UMA RCntSlabs", 1853 slabsize, 1854 NULL, NULL, NULL, NULL, 1855 UMA_ALIGN_PTR, 1856 UMA_ZFLAG_INTERNAL); 1857 1858 hashzone = uma_zcreate("UMA Hash", 1859 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1860 NULL, NULL, NULL, NULL, 1861 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1862 1863 bucket_init(); 1864 1865 booted = UMA_STARTUP; 1866 1867 #ifdef UMA_DEBUG 1868 printf("UMA startup complete.\n"); 1869 #endif 1870 } 1871 1872 /* see uma.h */ 1873 void 1874 uma_startup2(void) 1875 { 1876 booted = UMA_STARTUP2; 1877 bucket_enable(); 1878 sx_init(&uma_drain_lock, "umadrain"); 1879 #ifdef UMA_DEBUG 1880 printf("UMA startup2 complete.\n"); 1881 #endif 1882 } 1883 1884 /* 1885 * Initialize our callout handle 1886 * 1887 */ 1888 1889 static void 1890 uma_startup3(void) 1891 { 1892 #ifdef UMA_DEBUG 1893 printf("Starting callout.\n"); 1894 #endif 1895 callout_init(&uma_callout, CALLOUT_MPSAFE); 1896 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1897 #ifdef UMA_DEBUG 1898 printf("UMA startup3 complete.\n"); 1899 #endif 1900 } 1901 1902 static uma_keg_t 1903 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1904 int align, uint32_t flags) 1905 { 1906 struct uma_kctor_args args; 1907 1908 args.size = size; 1909 args.uminit = uminit; 1910 args.fini = fini; 1911 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1912 args.flags = flags; 1913 args.zone = zone; 1914 return (zone_alloc_item(kegs, &args, M_WAITOK)); 1915 } 1916 1917 /* See uma.h */ 1918 void 1919 uma_set_align(int align) 1920 { 1921 1922 if (align != UMA_ALIGN_CACHE) 1923 uma_align_cache = align; 1924 } 1925 1926 /* See uma.h */ 1927 uma_zone_t 1928 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1929 uma_init uminit, uma_fini fini, int align, uint32_t flags) 1930 1931 { 1932 struct uma_zctor_args args; 1933 uma_zone_t res; 1934 bool locked; 1935 1936 /* This stuff is essential for the zone ctor */ 1937 memset(&args, 0, sizeof(args)); 1938 args.name = name; 1939 args.size = size; 1940 args.ctor = ctor; 1941 args.dtor = dtor; 1942 args.uminit = uminit; 1943 args.fini = fini; 1944 args.align = align; 1945 args.flags = flags; 1946 args.keg = NULL; 1947 1948 if (booted < UMA_STARTUP2) { 1949 locked = false; 1950 } else { 1951 sx_slock(&uma_drain_lock); 1952 locked = true; 1953 } 1954 res = zone_alloc_item(zones, &args, M_WAITOK); 1955 if (locked) 1956 sx_sunlock(&uma_drain_lock); 1957 return (res); 1958 } 1959 1960 /* See uma.h */ 1961 uma_zone_t 1962 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1963 uma_init zinit, uma_fini zfini, uma_zone_t master) 1964 { 1965 struct uma_zctor_args args; 1966 uma_keg_t keg; 1967 uma_zone_t res; 1968 bool locked; 1969 1970 keg = zone_first_keg(master); 1971 memset(&args, 0, sizeof(args)); 1972 args.name = name; 1973 args.size = keg->uk_size; 1974 args.ctor = ctor; 1975 args.dtor = dtor; 1976 args.uminit = zinit; 1977 args.fini = zfini; 1978 args.align = keg->uk_align; 1979 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1980 args.keg = keg; 1981 1982 if (booted < UMA_STARTUP2) { 1983 locked = false; 1984 } else { 1985 sx_slock(&uma_drain_lock); 1986 locked = true; 1987 } 1988 /* XXX Attaches only one keg of potentially many. */ 1989 res = zone_alloc_item(zones, &args, M_WAITOK); 1990 if (locked) 1991 sx_sunlock(&uma_drain_lock); 1992 return (res); 1993 } 1994 1995 /* See uma.h */ 1996 uma_zone_t 1997 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1998 uma_init zinit, uma_fini zfini, uma_import zimport, 1999 uma_release zrelease, void *arg, int flags) 2000 { 2001 struct uma_zctor_args args; 2002 2003 memset(&args, 0, sizeof(args)); 2004 args.name = name; 2005 args.size = size; 2006 args.ctor = ctor; 2007 args.dtor = dtor; 2008 args.uminit = zinit; 2009 args.fini = zfini; 2010 args.import = zimport; 2011 args.release = zrelease; 2012 args.arg = arg; 2013 args.align = 0; 2014 args.flags = flags; 2015 2016 return (zone_alloc_item(zones, &args, M_WAITOK)); 2017 } 2018 2019 static void 2020 zone_lock_pair(uma_zone_t a, uma_zone_t b) 2021 { 2022 if (a < b) { 2023 ZONE_LOCK(a); 2024 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 2025 } else { 2026 ZONE_LOCK(b); 2027 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 2028 } 2029 } 2030 2031 static void 2032 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 2033 { 2034 2035 ZONE_UNLOCK(a); 2036 ZONE_UNLOCK(b); 2037 } 2038 2039 int 2040 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 2041 { 2042 uma_klink_t klink; 2043 uma_klink_t kl; 2044 int error; 2045 2046 error = 0; 2047 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 2048 2049 zone_lock_pair(zone, master); 2050 /* 2051 * zone must use vtoslab() to resolve objects and must already be 2052 * a secondary. 2053 */ 2054 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 2055 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 2056 error = EINVAL; 2057 goto out; 2058 } 2059 /* 2060 * The new master must also use vtoslab(). 2061 */ 2062 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2063 error = EINVAL; 2064 goto out; 2065 } 2066 /* 2067 * Both must either be refcnt, or not be refcnt. 2068 */ 2069 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 2070 (master->uz_flags & UMA_ZONE_REFCNT)) { 2071 error = EINVAL; 2072 goto out; 2073 } 2074 /* 2075 * The underlying object must be the same size. rsize 2076 * may be different. 2077 */ 2078 if (master->uz_size != zone->uz_size) { 2079 error = E2BIG; 2080 goto out; 2081 } 2082 /* 2083 * Put it at the end of the list. 2084 */ 2085 klink->kl_keg = zone_first_keg(master); 2086 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2087 if (LIST_NEXT(kl, kl_link) == NULL) { 2088 LIST_INSERT_AFTER(kl, klink, kl_link); 2089 break; 2090 } 2091 } 2092 klink = NULL; 2093 zone->uz_flags |= UMA_ZFLAG_MULTI; 2094 zone->uz_slab = zone_fetch_slab_multi; 2095 2096 out: 2097 zone_unlock_pair(zone, master); 2098 if (klink != NULL) 2099 free(klink, M_TEMP); 2100 2101 return (error); 2102 } 2103 2104 2105 /* See uma.h */ 2106 void 2107 uma_zdestroy(uma_zone_t zone) 2108 { 2109 2110 sx_slock(&uma_drain_lock); 2111 zone_free_item(zones, zone, NULL, SKIP_NONE); 2112 sx_sunlock(&uma_drain_lock); 2113 } 2114 2115 /* See uma.h */ 2116 void * 2117 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 2118 { 2119 void *item; 2120 uma_cache_t cache; 2121 uma_bucket_t bucket; 2122 int lockfail; 2123 int cpu; 2124 2125 #if 0 2126 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2127 /* The entropy here is desirable, but the harvesting is expensive */ 2128 random_harvest(&(zone->uz_name), sizeof(void *), 1, RANDOM_UMA_ALLOC); 2129 #endif 2130 2131 /* This is the fast path allocation */ 2132 #ifdef UMA_DEBUG_ALLOC_1 2133 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 2134 #endif 2135 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 2136 zone->uz_name, flags); 2137 2138 if (flags & M_WAITOK) { 2139 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2140 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2141 } 2142 #ifdef DEBUG_MEMGUARD 2143 if (memguard_cmp_zone(zone)) { 2144 item = memguard_alloc(zone->uz_size, flags); 2145 if (item != NULL) { 2146 /* 2147 * Avoid conflict with the use-after-free 2148 * protecting infrastructure from INVARIANTS. 2149 */ 2150 if (zone->uz_init != NULL && 2151 zone->uz_init != mtrash_init && 2152 zone->uz_init(item, zone->uz_size, flags) != 0) 2153 return (NULL); 2154 if (zone->uz_ctor != NULL && 2155 zone->uz_ctor != mtrash_ctor && 2156 zone->uz_ctor(item, zone->uz_size, udata, 2157 flags) != 0) { 2158 zone->uz_fini(item, zone->uz_size); 2159 return (NULL); 2160 } 2161 #if 0 2162 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2163 /* The entropy here is desirable, but the harvesting is expensive */ 2164 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2165 #endif 2166 return (item); 2167 } 2168 /* This is unfortunate but should not be fatal. */ 2169 } 2170 #endif 2171 /* 2172 * If possible, allocate from the per-CPU cache. There are two 2173 * requirements for safe access to the per-CPU cache: (1) the thread 2174 * accessing the cache must not be preempted or yield during access, 2175 * and (2) the thread must not migrate CPUs without switching which 2176 * cache it accesses. We rely on a critical section to prevent 2177 * preemption and migration. We release the critical section in 2178 * order to acquire the zone mutex if we are unable to allocate from 2179 * the current cache; when we re-acquire the critical section, we 2180 * must detect and handle migration if it has occurred. 2181 */ 2182 critical_enter(); 2183 cpu = curcpu; 2184 cache = &zone->uz_cpu[cpu]; 2185 2186 zalloc_start: 2187 bucket = cache->uc_allocbucket; 2188 if (bucket != NULL && bucket->ub_cnt > 0) { 2189 bucket->ub_cnt--; 2190 item = bucket->ub_bucket[bucket->ub_cnt]; 2191 #ifdef INVARIANTS 2192 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2193 #endif 2194 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2195 cache->uc_allocs++; 2196 critical_exit(); 2197 if (zone->uz_ctor != NULL && 2198 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2199 atomic_add_long(&zone->uz_fails, 1); 2200 zone_free_item(zone, item, udata, SKIP_DTOR); 2201 return (NULL); 2202 } 2203 #ifdef INVARIANTS 2204 uma_dbg_alloc(zone, NULL, item); 2205 #endif 2206 if (flags & M_ZERO) 2207 uma_zero_item(item, zone); 2208 #if 0 2209 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2210 /* The entropy here is desirable, but the harvesting is expensive */ 2211 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2212 #endif 2213 return (item); 2214 } 2215 2216 /* 2217 * We have run out of items in our alloc bucket. 2218 * See if we can switch with our free bucket. 2219 */ 2220 bucket = cache->uc_freebucket; 2221 if (bucket != NULL && bucket->ub_cnt > 0) { 2222 #ifdef UMA_DEBUG_ALLOC 2223 printf("uma_zalloc: Swapping empty with alloc.\n"); 2224 #endif 2225 cache->uc_freebucket = cache->uc_allocbucket; 2226 cache->uc_allocbucket = bucket; 2227 goto zalloc_start; 2228 } 2229 2230 /* 2231 * Discard any empty allocation bucket while we hold no locks. 2232 */ 2233 bucket = cache->uc_allocbucket; 2234 cache->uc_allocbucket = NULL; 2235 critical_exit(); 2236 if (bucket != NULL) 2237 bucket_free(zone, bucket, udata); 2238 2239 /* Short-circuit for zones without buckets and low memory. */ 2240 if (zone->uz_count == 0 || bucketdisable) 2241 goto zalloc_item; 2242 2243 /* 2244 * Attempt to retrieve the item from the per-CPU cache has failed, so 2245 * we must go back to the zone. This requires the zone lock, so we 2246 * must drop the critical section, then re-acquire it when we go back 2247 * to the cache. Since the critical section is released, we may be 2248 * preempted or migrate. As such, make sure not to maintain any 2249 * thread-local state specific to the cache from prior to releasing 2250 * the critical section. 2251 */ 2252 lockfail = 0; 2253 if (ZONE_TRYLOCK(zone) == 0) { 2254 /* Record contention to size the buckets. */ 2255 ZONE_LOCK(zone); 2256 lockfail = 1; 2257 } 2258 critical_enter(); 2259 cpu = curcpu; 2260 cache = &zone->uz_cpu[cpu]; 2261 2262 /* 2263 * Since we have locked the zone we may as well send back our stats. 2264 */ 2265 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2266 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2267 cache->uc_allocs = 0; 2268 cache->uc_frees = 0; 2269 2270 /* See if we lost the race to fill the cache. */ 2271 if (cache->uc_allocbucket != NULL) { 2272 ZONE_UNLOCK(zone); 2273 goto zalloc_start; 2274 } 2275 2276 /* 2277 * Check the zone's cache of buckets. 2278 */ 2279 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2280 KASSERT(bucket->ub_cnt != 0, 2281 ("uma_zalloc_arg: Returning an empty bucket.")); 2282 2283 LIST_REMOVE(bucket, ub_link); 2284 cache->uc_allocbucket = bucket; 2285 ZONE_UNLOCK(zone); 2286 goto zalloc_start; 2287 } 2288 /* We are no longer associated with this CPU. */ 2289 critical_exit(); 2290 2291 /* 2292 * We bump the uz count when the cache size is insufficient to 2293 * handle the working set. 2294 */ 2295 if (lockfail && zone->uz_count < BUCKET_MAX) 2296 zone->uz_count++; 2297 ZONE_UNLOCK(zone); 2298 2299 /* 2300 * Now lets just fill a bucket and put it on the free list. If that 2301 * works we'll restart the allocation from the begining and it 2302 * will use the just filled bucket. 2303 */ 2304 bucket = zone_alloc_bucket(zone, udata, flags); 2305 if (bucket != NULL) { 2306 ZONE_LOCK(zone); 2307 critical_enter(); 2308 cpu = curcpu; 2309 cache = &zone->uz_cpu[cpu]; 2310 /* 2311 * See if we lost the race or were migrated. Cache the 2312 * initialized bucket to make this less likely or claim 2313 * the memory directly. 2314 */ 2315 if (cache->uc_allocbucket == NULL) 2316 cache->uc_allocbucket = bucket; 2317 else 2318 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2319 ZONE_UNLOCK(zone); 2320 goto zalloc_start; 2321 } 2322 2323 /* 2324 * We may not be able to get a bucket so return an actual item. 2325 */ 2326 #ifdef UMA_DEBUG 2327 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2328 #endif 2329 2330 zalloc_item: 2331 item = zone_alloc_item(zone, udata, flags); 2332 2333 #if 0 2334 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2335 /* The entropy here is desirable, but the harvesting is expensive */ 2336 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2337 #endif 2338 return (item); 2339 } 2340 2341 static uma_slab_t 2342 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2343 { 2344 uma_slab_t slab; 2345 int reserve; 2346 2347 mtx_assert(&keg->uk_lock, MA_OWNED); 2348 slab = NULL; 2349 reserve = 0; 2350 if ((flags & M_USE_RESERVE) == 0) 2351 reserve = keg->uk_reserve; 2352 2353 for (;;) { 2354 /* 2355 * Find a slab with some space. Prefer slabs that are partially 2356 * used over those that are totally full. This helps to reduce 2357 * fragmentation. 2358 */ 2359 if (keg->uk_free > reserve) { 2360 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2361 slab = LIST_FIRST(&keg->uk_part_slab); 2362 } else { 2363 slab = LIST_FIRST(&keg->uk_free_slab); 2364 LIST_REMOVE(slab, us_link); 2365 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2366 us_link); 2367 } 2368 MPASS(slab->us_keg == keg); 2369 return (slab); 2370 } 2371 2372 /* 2373 * M_NOVM means don't ask at all! 2374 */ 2375 if (flags & M_NOVM) 2376 break; 2377 2378 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2379 keg->uk_flags |= UMA_ZFLAG_FULL; 2380 /* 2381 * If this is not a multi-zone, set the FULL bit. 2382 * Otherwise slab_multi() takes care of it. 2383 */ 2384 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2385 zone->uz_flags |= UMA_ZFLAG_FULL; 2386 zone_log_warning(zone); 2387 } 2388 if (flags & M_NOWAIT) 2389 break; 2390 zone->uz_sleeps++; 2391 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2392 continue; 2393 } 2394 slab = keg_alloc_slab(keg, zone, flags); 2395 /* 2396 * If we got a slab here it's safe to mark it partially used 2397 * and return. We assume that the caller is going to remove 2398 * at least one item. 2399 */ 2400 if (slab) { 2401 MPASS(slab->us_keg == keg); 2402 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2403 return (slab); 2404 } 2405 /* 2406 * We might not have been able to get a slab but another cpu 2407 * could have while we were unlocked. Check again before we 2408 * fail. 2409 */ 2410 flags |= M_NOVM; 2411 } 2412 return (slab); 2413 } 2414 2415 static uma_slab_t 2416 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2417 { 2418 uma_slab_t slab; 2419 2420 if (keg == NULL) { 2421 keg = zone_first_keg(zone); 2422 KEG_LOCK(keg); 2423 } 2424 2425 for (;;) { 2426 slab = keg_fetch_slab(keg, zone, flags); 2427 if (slab) 2428 return (slab); 2429 if (flags & (M_NOWAIT | M_NOVM)) 2430 break; 2431 } 2432 KEG_UNLOCK(keg); 2433 return (NULL); 2434 } 2435 2436 /* 2437 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2438 * with the keg locked. On NULL no lock is held. 2439 * 2440 * The last pointer is used to seed the search. It is not required. 2441 */ 2442 static uma_slab_t 2443 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2444 { 2445 uma_klink_t klink; 2446 uma_slab_t slab; 2447 uma_keg_t keg; 2448 int flags; 2449 int empty; 2450 int full; 2451 2452 /* 2453 * Don't wait on the first pass. This will skip limit tests 2454 * as well. We don't want to block if we can find a provider 2455 * without blocking. 2456 */ 2457 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2458 /* 2459 * Use the last slab allocated as a hint for where to start 2460 * the search. 2461 */ 2462 if (last != NULL) { 2463 slab = keg_fetch_slab(last, zone, flags); 2464 if (slab) 2465 return (slab); 2466 KEG_UNLOCK(last); 2467 } 2468 /* 2469 * Loop until we have a slab incase of transient failures 2470 * while M_WAITOK is specified. I'm not sure this is 100% 2471 * required but we've done it for so long now. 2472 */ 2473 for (;;) { 2474 empty = 0; 2475 full = 0; 2476 /* 2477 * Search the available kegs for slabs. Be careful to hold the 2478 * correct lock while calling into the keg layer. 2479 */ 2480 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2481 keg = klink->kl_keg; 2482 KEG_LOCK(keg); 2483 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2484 slab = keg_fetch_slab(keg, zone, flags); 2485 if (slab) 2486 return (slab); 2487 } 2488 if (keg->uk_flags & UMA_ZFLAG_FULL) 2489 full++; 2490 else 2491 empty++; 2492 KEG_UNLOCK(keg); 2493 } 2494 if (rflags & (M_NOWAIT | M_NOVM)) 2495 break; 2496 flags = rflags; 2497 /* 2498 * All kegs are full. XXX We can't atomically check all kegs 2499 * and sleep so just sleep for a short period and retry. 2500 */ 2501 if (full && !empty) { 2502 ZONE_LOCK(zone); 2503 zone->uz_flags |= UMA_ZFLAG_FULL; 2504 zone->uz_sleeps++; 2505 zone_log_warning(zone); 2506 msleep(zone, zone->uz_lockptr, PVM, 2507 "zonelimit", hz/100); 2508 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2509 ZONE_UNLOCK(zone); 2510 continue; 2511 } 2512 } 2513 return (NULL); 2514 } 2515 2516 static void * 2517 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2518 { 2519 void *item; 2520 uint8_t freei; 2521 2522 MPASS(keg == slab->us_keg); 2523 mtx_assert(&keg->uk_lock, MA_OWNED); 2524 2525 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2526 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2527 item = slab->us_data + (keg->uk_rsize * freei); 2528 slab->us_freecount--; 2529 keg->uk_free--; 2530 2531 /* Move this slab to the full list */ 2532 if (slab->us_freecount == 0) { 2533 LIST_REMOVE(slab, us_link); 2534 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2535 } 2536 2537 return (item); 2538 } 2539 2540 static int 2541 zone_import(uma_zone_t zone, void **bucket, int max, int flags) 2542 { 2543 uma_slab_t slab; 2544 uma_keg_t keg; 2545 int i; 2546 2547 slab = NULL; 2548 keg = NULL; 2549 /* Try to keep the buckets totally full */ 2550 for (i = 0; i < max; ) { 2551 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 2552 break; 2553 keg = slab->us_keg; 2554 while (slab->us_freecount && i < max) { 2555 bucket[i++] = slab_alloc_item(keg, slab); 2556 if (keg->uk_free <= keg->uk_reserve) 2557 break; 2558 } 2559 /* Don't grab more than one slab at a time. */ 2560 flags &= ~M_WAITOK; 2561 flags |= M_NOWAIT; 2562 } 2563 if (slab != NULL) 2564 KEG_UNLOCK(keg); 2565 2566 return i; 2567 } 2568 2569 static uma_bucket_t 2570 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2571 { 2572 uma_bucket_t bucket; 2573 int max; 2574 2575 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2576 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2577 if (bucket == NULL) 2578 return (NULL); 2579 2580 max = MIN(bucket->ub_entries, zone->uz_count); 2581 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2582 max, flags); 2583 2584 /* 2585 * Initialize the memory if necessary. 2586 */ 2587 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2588 int i; 2589 2590 for (i = 0; i < bucket->ub_cnt; i++) 2591 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2592 flags) != 0) 2593 break; 2594 /* 2595 * If we couldn't initialize the whole bucket, put the 2596 * rest back onto the freelist. 2597 */ 2598 if (i != bucket->ub_cnt) { 2599 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2600 bucket->ub_cnt - i); 2601 #ifdef INVARIANTS 2602 bzero(&bucket->ub_bucket[i], 2603 sizeof(void *) * (bucket->ub_cnt - i)); 2604 #endif 2605 bucket->ub_cnt = i; 2606 } 2607 } 2608 2609 if (bucket->ub_cnt == 0) { 2610 bucket_free(zone, bucket, udata); 2611 atomic_add_long(&zone->uz_fails, 1); 2612 return (NULL); 2613 } 2614 2615 return (bucket); 2616 } 2617 2618 /* 2619 * Allocates a single item from a zone. 2620 * 2621 * Arguments 2622 * zone The zone to alloc for. 2623 * udata The data to be passed to the constructor. 2624 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2625 * 2626 * Returns 2627 * NULL if there is no memory and M_NOWAIT is set 2628 * An item if successful 2629 */ 2630 2631 static void * 2632 zone_alloc_item(uma_zone_t zone, void *udata, int flags) 2633 { 2634 void *item; 2635 2636 item = NULL; 2637 2638 #ifdef UMA_DEBUG_ALLOC 2639 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2640 #endif 2641 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 2642 goto fail; 2643 atomic_add_long(&zone->uz_allocs, 1); 2644 2645 /* 2646 * We have to call both the zone's init (not the keg's init) 2647 * and the zone's ctor. This is because the item is going from 2648 * a keg slab directly to the user, and the user is expecting it 2649 * to be both zone-init'd as well as zone-ctor'd. 2650 */ 2651 if (zone->uz_init != NULL) { 2652 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2653 zone_free_item(zone, item, udata, SKIP_FINI); 2654 goto fail; 2655 } 2656 } 2657 if (zone->uz_ctor != NULL) { 2658 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2659 zone_free_item(zone, item, udata, SKIP_DTOR); 2660 goto fail; 2661 } 2662 } 2663 #ifdef INVARIANTS 2664 uma_dbg_alloc(zone, NULL, item); 2665 #endif 2666 if (flags & M_ZERO) 2667 uma_zero_item(item, zone); 2668 2669 return (item); 2670 2671 fail: 2672 atomic_add_long(&zone->uz_fails, 1); 2673 return (NULL); 2674 } 2675 2676 /* See uma.h */ 2677 void 2678 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2679 { 2680 uma_cache_t cache; 2681 uma_bucket_t bucket; 2682 int lockfail; 2683 int cpu; 2684 2685 #if 0 2686 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2687 /* The entropy here is desirable, but the harvesting is expensive */ 2688 struct entropy { 2689 const void *uz_name; 2690 const void *item; 2691 } entropy; 2692 2693 entropy.uz_name = zone->uz_name; 2694 entropy.item = item; 2695 random_harvest(&entropy, sizeof(struct entropy), 2, RANDOM_UMA_ALLOC); 2696 #endif 2697 2698 #ifdef UMA_DEBUG_ALLOC_1 2699 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2700 #endif 2701 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2702 zone->uz_name); 2703 2704 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2705 if (item == NULL) 2706 return; 2707 #ifdef DEBUG_MEMGUARD 2708 if (is_memguard_addr(item)) { 2709 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 2710 zone->uz_dtor(item, zone->uz_size, udata); 2711 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 2712 zone->uz_fini(item, zone->uz_size); 2713 memguard_free(item); 2714 return; 2715 } 2716 #endif 2717 #ifdef INVARIANTS 2718 if (zone->uz_flags & UMA_ZONE_MALLOC) 2719 uma_dbg_free(zone, udata, item); 2720 else 2721 uma_dbg_free(zone, NULL, item); 2722 #endif 2723 if (zone->uz_dtor != NULL) 2724 zone->uz_dtor(item, zone->uz_size, udata); 2725 2726 /* 2727 * The race here is acceptable. If we miss it we'll just have to wait 2728 * a little longer for the limits to be reset. 2729 */ 2730 if (zone->uz_flags & UMA_ZFLAG_FULL) 2731 goto zfree_item; 2732 2733 /* 2734 * If possible, free to the per-CPU cache. There are two 2735 * requirements for safe access to the per-CPU cache: (1) the thread 2736 * accessing the cache must not be preempted or yield during access, 2737 * and (2) the thread must not migrate CPUs without switching which 2738 * cache it accesses. We rely on a critical section to prevent 2739 * preemption and migration. We release the critical section in 2740 * order to acquire the zone mutex if we are unable to free to the 2741 * current cache; when we re-acquire the critical section, we must 2742 * detect and handle migration if it has occurred. 2743 */ 2744 zfree_restart: 2745 critical_enter(); 2746 cpu = curcpu; 2747 cache = &zone->uz_cpu[cpu]; 2748 2749 zfree_start: 2750 /* 2751 * Try to free into the allocbucket first to give LIFO ordering 2752 * for cache-hot datastructures. Spill over into the freebucket 2753 * if necessary. Alloc will swap them if one runs dry. 2754 */ 2755 bucket = cache->uc_allocbucket; 2756 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2757 bucket = cache->uc_freebucket; 2758 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2759 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2760 ("uma_zfree: Freeing to non free bucket index.")); 2761 bucket->ub_bucket[bucket->ub_cnt] = item; 2762 bucket->ub_cnt++; 2763 cache->uc_frees++; 2764 critical_exit(); 2765 return; 2766 } 2767 2768 /* 2769 * We must go back the zone, which requires acquiring the zone lock, 2770 * which in turn means we must release and re-acquire the critical 2771 * section. Since the critical section is released, we may be 2772 * preempted or migrate. As such, make sure not to maintain any 2773 * thread-local state specific to the cache from prior to releasing 2774 * the critical section. 2775 */ 2776 critical_exit(); 2777 if (zone->uz_count == 0 || bucketdisable) 2778 goto zfree_item; 2779 2780 lockfail = 0; 2781 if (ZONE_TRYLOCK(zone) == 0) { 2782 /* Record contention to size the buckets. */ 2783 ZONE_LOCK(zone); 2784 lockfail = 1; 2785 } 2786 critical_enter(); 2787 cpu = curcpu; 2788 cache = &zone->uz_cpu[cpu]; 2789 2790 /* 2791 * Since we have locked the zone we may as well send back our stats. 2792 */ 2793 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2794 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2795 cache->uc_allocs = 0; 2796 cache->uc_frees = 0; 2797 2798 bucket = cache->uc_freebucket; 2799 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2800 ZONE_UNLOCK(zone); 2801 goto zfree_start; 2802 } 2803 cache->uc_freebucket = NULL; 2804 2805 /* Can we throw this on the zone full list? */ 2806 if (bucket != NULL) { 2807 #ifdef UMA_DEBUG_ALLOC 2808 printf("uma_zfree: Putting old bucket on the free list.\n"); 2809 #endif 2810 /* ub_cnt is pointing to the last free item */ 2811 KASSERT(bucket->ub_cnt != 0, 2812 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2813 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2814 } 2815 2816 /* We are no longer associated with this CPU. */ 2817 critical_exit(); 2818 2819 /* 2820 * We bump the uz count when the cache size is insufficient to 2821 * handle the working set. 2822 */ 2823 if (lockfail && zone->uz_count < BUCKET_MAX) 2824 zone->uz_count++; 2825 ZONE_UNLOCK(zone); 2826 2827 #ifdef UMA_DEBUG_ALLOC 2828 printf("uma_zfree: Allocating new free bucket.\n"); 2829 #endif 2830 bucket = bucket_alloc(zone, udata, M_NOWAIT); 2831 if (bucket) { 2832 critical_enter(); 2833 cpu = curcpu; 2834 cache = &zone->uz_cpu[cpu]; 2835 if (cache->uc_freebucket == NULL) { 2836 cache->uc_freebucket = bucket; 2837 goto zfree_start; 2838 } 2839 /* 2840 * We lost the race, start over. We have to drop our 2841 * critical section to free the bucket. 2842 */ 2843 critical_exit(); 2844 bucket_free(zone, bucket, udata); 2845 goto zfree_restart; 2846 } 2847 2848 /* 2849 * If nothing else caught this, we'll just do an internal free. 2850 */ 2851 zfree_item: 2852 zone_free_item(zone, item, udata, SKIP_DTOR); 2853 2854 return; 2855 } 2856 2857 static void 2858 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 2859 { 2860 uint8_t freei; 2861 2862 mtx_assert(&keg->uk_lock, MA_OWNED); 2863 MPASS(keg == slab->us_keg); 2864 2865 /* Do we need to remove from any lists? */ 2866 if (slab->us_freecount+1 == keg->uk_ipers) { 2867 LIST_REMOVE(slab, us_link); 2868 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2869 } else if (slab->us_freecount == 0) { 2870 LIST_REMOVE(slab, us_link); 2871 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2872 } 2873 2874 /* Slab management. */ 2875 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2876 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 2877 slab->us_freecount++; 2878 2879 /* Keg statistics. */ 2880 keg->uk_free++; 2881 } 2882 2883 static void 2884 zone_release(uma_zone_t zone, void **bucket, int cnt) 2885 { 2886 void *item; 2887 uma_slab_t slab; 2888 uma_keg_t keg; 2889 uint8_t *mem; 2890 int clearfull; 2891 int i; 2892 2893 clearfull = 0; 2894 keg = zone_first_keg(zone); 2895 KEG_LOCK(keg); 2896 for (i = 0; i < cnt; i++) { 2897 item = bucket[i]; 2898 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2899 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 2900 if (zone->uz_flags & UMA_ZONE_HASH) { 2901 slab = hash_sfind(&keg->uk_hash, mem); 2902 } else { 2903 mem += keg->uk_pgoff; 2904 slab = (uma_slab_t)mem; 2905 } 2906 } else { 2907 slab = vtoslab((vm_offset_t)item); 2908 if (slab->us_keg != keg) { 2909 KEG_UNLOCK(keg); 2910 keg = slab->us_keg; 2911 KEG_LOCK(keg); 2912 } 2913 } 2914 slab_free_item(keg, slab, item); 2915 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2916 if (keg->uk_pages < keg->uk_maxpages) { 2917 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2918 clearfull = 1; 2919 } 2920 2921 /* 2922 * We can handle one more allocation. Since we're 2923 * clearing ZFLAG_FULL, wake up all procs blocked 2924 * on pages. This should be uncommon, so keeping this 2925 * simple for now (rather than adding count of blocked 2926 * threads etc). 2927 */ 2928 wakeup(keg); 2929 } 2930 } 2931 KEG_UNLOCK(keg); 2932 if (clearfull) { 2933 ZONE_LOCK(zone); 2934 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2935 wakeup(zone); 2936 ZONE_UNLOCK(zone); 2937 } 2938 2939 } 2940 2941 /* 2942 * Frees a single item to any zone. 2943 * 2944 * Arguments: 2945 * zone The zone to free to 2946 * item The item we're freeing 2947 * udata User supplied data for the dtor 2948 * skip Skip dtors and finis 2949 */ 2950 static void 2951 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 2952 { 2953 2954 #ifdef INVARIANTS 2955 if (skip == SKIP_NONE) { 2956 if (zone->uz_flags & UMA_ZONE_MALLOC) 2957 uma_dbg_free(zone, udata, item); 2958 else 2959 uma_dbg_free(zone, NULL, item); 2960 } 2961 #endif 2962 if (skip < SKIP_DTOR && zone->uz_dtor) 2963 zone->uz_dtor(item, zone->uz_size, udata); 2964 2965 if (skip < SKIP_FINI && zone->uz_fini) 2966 zone->uz_fini(item, zone->uz_size); 2967 2968 atomic_add_long(&zone->uz_frees, 1); 2969 zone->uz_release(zone->uz_arg, &item, 1); 2970 } 2971 2972 /* See uma.h */ 2973 int 2974 uma_zone_set_max(uma_zone_t zone, int nitems) 2975 { 2976 uma_keg_t keg; 2977 2978 keg = zone_first_keg(zone); 2979 if (keg == NULL) 2980 return (0); 2981 KEG_LOCK(keg); 2982 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2983 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2984 keg->uk_maxpages += keg->uk_ppera; 2985 nitems = keg->uk_maxpages * keg->uk_ipers; 2986 KEG_UNLOCK(keg); 2987 2988 return (nitems); 2989 } 2990 2991 /* See uma.h */ 2992 int 2993 uma_zone_get_max(uma_zone_t zone) 2994 { 2995 int nitems; 2996 uma_keg_t keg; 2997 2998 keg = zone_first_keg(zone); 2999 if (keg == NULL) 3000 return (0); 3001 KEG_LOCK(keg); 3002 nitems = keg->uk_maxpages * keg->uk_ipers; 3003 KEG_UNLOCK(keg); 3004 3005 return (nitems); 3006 } 3007 3008 /* See uma.h */ 3009 void 3010 uma_zone_set_warning(uma_zone_t zone, const char *warning) 3011 { 3012 3013 ZONE_LOCK(zone); 3014 zone->uz_warning = warning; 3015 ZONE_UNLOCK(zone); 3016 } 3017 3018 /* See uma.h */ 3019 int 3020 uma_zone_get_cur(uma_zone_t zone) 3021 { 3022 int64_t nitems; 3023 u_int i; 3024 3025 ZONE_LOCK(zone); 3026 nitems = zone->uz_allocs - zone->uz_frees; 3027 CPU_FOREACH(i) { 3028 /* 3029 * See the comment in sysctl_vm_zone_stats() regarding the 3030 * safety of accessing the per-cpu caches. With the zone lock 3031 * held, it is safe, but can potentially result in stale data. 3032 */ 3033 nitems += zone->uz_cpu[i].uc_allocs - 3034 zone->uz_cpu[i].uc_frees; 3035 } 3036 ZONE_UNLOCK(zone); 3037 3038 return (nitems < 0 ? 0 : nitems); 3039 } 3040 3041 /* See uma.h */ 3042 void 3043 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 3044 { 3045 uma_keg_t keg; 3046 3047 keg = zone_first_keg(zone); 3048 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3049 KEG_LOCK(keg); 3050 KASSERT(keg->uk_pages == 0, 3051 ("uma_zone_set_init on non-empty keg")); 3052 keg->uk_init = uminit; 3053 KEG_UNLOCK(keg); 3054 } 3055 3056 /* See uma.h */ 3057 void 3058 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 3059 { 3060 uma_keg_t keg; 3061 3062 keg = zone_first_keg(zone); 3063 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3064 KEG_LOCK(keg); 3065 KASSERT(keg->uk_pages == 0, 3066 ("uma_zone_set_fini on non-empty keg")); 3067 keg->uk_fini = fini; 3068 KEG_UNLOCK(keg); 3069 } 3070 3071 /* See uma.h */ 3072 void 3073 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 3074 { 3075 3076 ZONE_LOCK(zone); 3077 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3078 ("uma_zone_set_zinit on non-empty keg")); 3079 zone->uz_init = zinit; 3080 ZONE_UNLOCK(zone); 3081 } 3082 3083 /* See uma.h */ 3084 void 3085 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3086 { 3087 3088 ZONE_LOCK(zone); 3089 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3090 ("uma_zone_set_zfini on non-empty keg")); 3091 zone->uz_fini = zfini; 3092 ZONE_UNLOCK(zone); 3093 } 3094 3095 /* See uma.h */ 3096 /* XXX uk_freef is not actually used with the zone locked */ 3097 void 3098 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 3099 { 3100 uma_keg_t keg; 3101 3102 keg = zone_first_keg(zone); 3103 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3104 KEG_LOCK(keg); 3105 keg->uk_freef = freef; 3106 KEG_UNLOCK(keg); 3107 } 3108 3109 /* See uma.h */ 3110 /* XXX uk_allocf is not actually used with the zone locked */ 3111 void 3112 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 3113 { 3114 uma_keg_t keg; 3115 3116 keg = zone_first_keg(zone); 3117 KEG_LOCK(keg); 3118 keg->uk_allocf = allocf; 3119 KEG_UNLOCK(keg); 3120 } 3121 3122 /* See uma.h */ 3123 void 3124 uma_zone_reserve(uma_zone_t zone, int items) 3125 { 3126 uma_keg_t keg; 3127 3128 keg = zone_first_keg(zone); 3129 if (keg == NULL) 3130 return; 3131 KEG_LOCK(keg); 3132 keg->uk_reserve = items; 3133 KEG_UNLOCK(keg); 3134 3135 return; 3136 } 3137 3138 /* See uma.h */ 3139 int 3140 uma_zone_reserve_kva(uma_zone_t zone, int count) 3141 { 3142 uma_keg_t keg; 3143 vm_offset_t kva; 3144 int pages; 3145 3146 keg = zone_first_keg(zone); 3147 if (keg == NULL) 3148 return (0); 3149 pages = count / keg->uk_ipers; 3150 3151 if (pages * keg->uk_ipers < count) 3152 pages++; 3153 3154 #ifdef UMA_MD_SMALL_ALLOC 3155 if (keg->uk_ppera > 1) { 3156 #else 3157 if (1) { 3158 #endif 3159 kva = kva_alloc(pages * UMA_SLAB_SIZE); 3160 if (kva == 0) 3161 return (0); 3162 } else 3163 kva = 0; 3164 KEG_LOCK(keg); 3165 keg->uk_kva = kva; 3166 keg->uk_offset = 0; 3167 keg->uk_maxpages = pages; 3168 #ifdef UMA_MD_SMALL_ALLOC 3169 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3170 #else 3171 keg->uk_allocf = noobj_alloc; 3172 #endif 3173 keg->uk_flags |= UMA_ZONE_NOFREE; 3174 KEG_UNLOCK(keg); 3175 3176 return (1); 3177 } 3178 3179 /* See uma.h */ 3180 void 3181 uma_prealloc(uma_zone_t zone, int items) 3182 { 3183 int slabs; 3184 uma_slab_t slab; 3185 uma_keg_t keg; 3186 3187 keg = zone_first_keg(zone); 3188 if (keg == NULL) 3189 return; 3190 KEG_LOCK(keg); 3191 slabs = items / keg->uk_ipers; 3192 if (slabs * keg->uk_ipers < items) 3193 slabs++; 3194 while (slabs > 0) { 3195 slab = keg_alloc_slab(keg, zone, M_WAITOK); 3196 if (slab == NULL) 3197 break; 3198 MPASS(slab->us_keg == keg); 3199 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 3200 slabs--; 3201 } 3202 KEG_UNLOCK(keg); 3203 } 3204 3205 /* See uma.h */ 3206 uint32_t * 3207 uma_find_refcnt(uma_zone_t zone, void *item) 3208 { 3209 uma_slabrefcnt_t slabref; 3210 uma_slab_t slab; 3211 uma_keg_t keg; 3212 uint32_t *refcnt; 3213 int idx; 3214 3215 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 3216 slabref = (uma_slabrefcnt_t)slab; 3217 keg = slab->us_keg; 3218 KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, 3219 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 3220 idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3221 refcnt = &slabref->us_refcnt[idx]; 3222 return refcnt; 3223 } 3224 3225 /* See uma.h */ 3226 void 3227 uma_reclaim(void) 3228 { 3229 #ifdef UMA_DEBUG 3230 printf("UMA: vm asked us to release pages!\n"); 3231 #endif 3232 sx_xlock(&uma_drain_lock); 3233 bucket_enable(); 3234 zone_foreach(zone_drain); 3235 if (vm_page_count_min()) { 3236 cache_drain_safe(NULL); 3237 zone_foreach(zone_drain); 3238 } 3239 /* 3240 * Some slabs may have been freed but this zone will be visited early 3241 * we visit again so that we can free pages that are empty once other 3242 * zones are drained. We have to do the same for buckets. 3243 */ 3244 zone_drain(slabzone); 3245 zone_drain(slabrefzone); 3246 bucket_zone_drain(); 3247 sx_xunlock(&uma_drain_lock); 3248 } 3249 3250 /* See uma.h */ 3251 int 3252 uma_zone_exhausted(uma_zone_t zone) 3253 { 3254 int full; 3255 3256 ZONE_LOCK(zone); 3257 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3258 ZONE_UNLOCK(zone); 3259 return (full); 3260 } 3261 3262 int 3263 uma_zone_exhausted_nolock(uma_zone_t zone) 3264 { 3265 return (zone->uz_flags & UMA_ZFLAG_FULL); 3266 } 3267 3268 void * 3269 uma_large_malloc(int size, int wait) 3270 { 3271 void *mem; 3272 uma_slab_t slab; 3273 uint8_t flags; 3274 3275 slab = zone_alloc_item(slabzone, NULL, wait); 3276 if (slab == NULL) 3277 return (NULL); 3278 mem = page_alloc(NULL, size, &flags, wait); 3279 if (mem) { 3280 vsetslab((vm_offset_t)mem, slab); 3281 slab->us_data = mem; 3282 slab->us_flags = flags | UMA_SLAB_MALLOC; 3283 slab->us_size = size; 3284 } else { 3285 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3286 } 3287 3288 return (mem); 3289 } 3290 3291 void 3292 uma_large_free(uma_slab_t slab) 3293 { 3294 3295 page_free(slab->us_data, slab->us_size, slab->us_flags); 3296 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3297 } 3298 3299 static void 3300 uma_zero_item(void *item, uma_zone_t zone) 3301 { 3302 3303 if (zone->uz_flags & UMA_ZONE_PCPU) { 3304 for (int i = 0; i < mp_ncpus; i++) 3305 bzero(zpcpu_get_cpu(item, i), zone->uz_size); 3306 } else 3307 bzero(item, zone->uz_size); 3308 } 3309 3310 void 3311 uma_print_stats(void) 3312 { 3313 zone_foreach(uma_print_zone); 3314 } 3315 3316 static void 3317 slab_print(uma_slab_t slab) 3318 { 3319 printf("slab: keg %p, data %p, freecount %d\n", 3320 slab->us_keg, slab->us_data, slab->us_freecount); 3321 } 3322 3323 static void 3324 cache_print(uma_cache_t cache) 3325 { 3326 printf("alloc: %p(%d), free: %p(%d)\n", 3327 cache->uc_allocbucket, 3328 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3329 cache->uc_freebucket, 3330 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3331 } 3332 3333 static void 3334 uma_print_keg(uma_keg_t keg) 3335 { 3336 uma_slab_t slab; 3337 3338 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3339 "out %d free %d limit %d\n", 3340 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3341 keg->uk_ipers, keg->uk_ppera, 3342 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3343 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3344 printf("Part slabs:\n"); 3345 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3346 slab_print(slab); 3347 printf("Free slabs:\n"); 3348 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3349 slab_print(slab); 3350 printf("Full slabs:\n"); 3351 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3352 slab_print(slab); 3353 } 3354 3355 void 3356 uma_print_zone(uma_zone_t zone) 3357 { 3358 uma_cache_t cache; 3359 uma_klink_t kl; 3360 int i; 3361 3362 printf("zone: %s(%p) size %d flags %#x\n", 3363 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3364 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3365 uma_print_keg(kl->kl_keg); 3366 CPU_FOREACH(i) { 3367 cache = &zone->uz_cpu[i]; 3368 printf("CPU %d Cache:\n", i); 3369 cache_print(cache); 3370 } 3371 } 3372 3373 #ifdef DDB 3374 /* 3375 * Generate statistics across both the zone and its per-cpu cache's. Return 3376 * desired statistics if the pointer is non-NULL for that statistic. 3377 * 3378 * Note: does not update the zone statistics, as it can't safely clear the 3379 * per-CPU cache statistic. 3380 * 3381 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3382 * safe from off-CPU; we should modify the caches to track this information 3383 * directly so that we don't have to. 3384 */ 3385 static void 3386 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3387 uint64_t *freesp, uint64_t *sleepsp) 3388 { 3389 uma_cache_t cache; 3390 uint64_t allocs, frees, sleeps; 3391 int cachefree, cpu; 3392 3393 allocs = frees = sleeps = 0; 3394 cachefree = 0; 3395 CPU_FOREACH(cpu) { 3396 cache = &z->uz_cpu[cpu]; 3397 if (cache->uc_allocbucket != NULL) 3398 cachefree += cache->uc_allocbucket->ub_cnt; 3399 if (cache->uc_freebucket != NULL) 3400 cachefree += cache->uc_freebucket->ub_cnt; 3401 allocs += cache->uc_allocs; 3402 frees += cache->uc_frees; 3403 } 3404 allocs += z->uz_allocs; 3405 frees += z->uz_frees; 3406 sleeps += z->uz_sleeps; 3407 if (cachefreep != NULL) 3408 *cachefreep = cachefree; 3409 if (allocsp != NULL) 3410 *allocsp = allocs; 3411 if (freesp != NULL) 3412 *freesp = frees; 3413 if (sleepsp != NULL) 3414 *sleepsp = sleeps; 3415 } 3416 #endif /* DDB */ 3417 3418 static int 3419 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3420 { 3421 uma_keg_t kz; 3422 uma_zone_t z; 3423 int count; 3424 3425 count = 0; 3426 rw_rlock(&uma_rwlock); 3427 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3428 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3429 count++; 3430 } 3431 rw_runlock(&uma_rwlock); 3432 return (sysctl_handle_int(oidp, &count, 0, req)); 3433 } 3434 3435 static int 3436 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3437 { 3438 struct uma_stream_header ush; 3439 struct uma_type_header uth; 3440 struct uma_percpu_stat ups; 3441 uma_bucket_t bucket; 3442 struct sbuf sbuf; 3443 uma_cache_t cache; 3444 uma_klink_t kl; 3445 uma_keg_t kz; 3446 uma_zone_t z; 3447 uma_keg_t k; 3448 int count, error, i; 3449 3450 error = sysctl_wire_old_buffer(req, 0); 3451 if (error != 0) 3452 return (error); 3453 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3454 3455 count = 0; 3456 rw_rlock(&uma_rwlock); 3457 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3458 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3459 count++; 3460 } 3461 3462 /* 3463 * Insert stream header. 3464 */ 3465 bzero(&ush, sizeof(ush)); 3466 ush.ush_version = UMA_STREAM_VERSION; 3467 ush.ush_maxcpus = (mp_maxid + 1); 3468 ush.ush_count = count; 3469 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3470 3471 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3472 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3473 bzero(&uth, sizeof(uth)); 3474 ZONE_LOCK(z); 3475 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3476 uth.uth_align = kz->uk_align; 3477 uth.uth_size = kz->uk_size; 3478 uth.uth_rsize = kz->uk_rsize; 3479 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3480 k = kl->kl_keg; 3481 uth.uth_maxpages += k->uk_maxpages; 3482 uth.uth_pages += k->uk_pages; 3483 uth.uth_keg_free += k->uk_free; 3484 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3485 * k->uk_ipers; 3486 } 3487 3488 /* 3489 * A zone is secondary is it is not the first entry 3490 * on the keg's zone list. 3491 */ 3492 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3493 (LIST_FIRST(&kz->uk_zones) != z)) 3494 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3495 3496 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3497 uth.uth_zone_free += bucket->ub_cnt; 3498 uth.uth_allocs = z->uz_allocs; 3499 uth.uth_frees = z->uz_frees; 3500 uth.uth_fails = z->uz_fails; 3501 uth.uth_sleeps = z->uz_sleeps; 3502 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3503 /* 3504 * While it is not normally safe to access the cache 3505 * bucket pointers while not on the CPU that owns the 3506 * cache, we only allow the pointers to be exchanged 3507 * without the zone lock held, not invalidated, so 3508 * accept the possible race associated with bucket 3509 * exchange during monitoring. 3510 */ 3511 for (i = 0; i < (mp_maxid + 1); i++) { 3512 bzero(&ups, sizeof(ups)); 3513 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3514 goto skip; 3515 if (CPU_ABSENT(i)) 3516 goto skip; 3517 cache = &z->uz_cpu[i]; 3518 if (cache->uc_allocbucket != NULL) 3519 ups.ups_cache_free += 3520 cache->uc_allocbucket->ub_cnt; 3521 if (cache->uc_freebucket != NULL) 3522 ups.ups_cache_free += 3523 cache->uc_freebucket->ub_cnt; 3524 ups.ups_allocs = cache->uc_allocs; 3525 ups.ups_frees = cache->uc_frees; 3526 skip: 3527 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3528 } 3529 ZONE_UNLOCK(z); 3530 } 3531 } 3532 rw_runlock(&uma_rwlock); 3533 error = sbuf_finish(&sbuf); 3534 sbuf_delete(&sbuf); 3535 return (error); 3536 } 3537 3538 int 3539 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 3540 { 3541 uma_zone_t zone = *(uma_zone_t *)arg1; 3542 int error, max, old; 3543 3544 old = max = uma_zone_get_max(zone); 3545 error = sysctl_handle_int(oidp, &max, 0, req); 3546 if (error || !req->newptr) 3547 return (error); 3548 3549 if (max < old) 3550 return (EINVAL); 3551 3552 uma_zone_set_max(zone, max); 3553 3554 return (0); 3555 } 3556 3557 int 3558 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 3559 { 3560 uma_zone_t zone = *(uma_zone_t *)arg1; 3561 int cur; 3562 3563 cur = uma_zone_get_cur(zone); 3564 return (sysctl_handle_int(oidp, &cur, 0, req)); 3565 } 3566 3567 #ifdef DDB 3568 DB_SHOW_COMMAND(uma, db_show_uma) 3569 { 3570 uint64_t allocs, frees, sleeps; 3571 uma_bucket_t bucket; 3572 uma_keg_t kz; 3573 uma_zone_t z; 3574 int cachefree; 3575 3576 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 3577 "Free", "Requests", "Sleeps", "Bucket"); 3578 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3579 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3580 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3581 allocs = z->uz_allocs; 3582 frees = z->uz_frees; 3583 sleeps = z->uz_sleeps; 3584 cachefree = 0; 3585 } else 3586 uma_zone_sumstat(z, &cachefree, &allocs, 3587 &frees, &sleeps); 3588 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3589 (LIST_FIRST(&kz->uk_zones) != z))) 3590 cachefree += kz->uk_free; 3591 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3592 cachefree += bucket->ub_cnt; 3593 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 3594 z->uz_name, (uintmax_t)kz->uk_size, 3595 (intmax_t)(allocs - frees), cachefree, 3596 (uintmax_t)allocs, sleeps, z->uz_count); 3597 if (db_pager_quit) 3598 return; 3599 } 3600 } 3601 } 3602 3603 DB_SHOW_COMMAND(umacache, db_show_umacache) 3604 { 3605 uint64_t allocs, frees; 3606 uma_bucket_t bucket; 3607 uma_zone_t z; 3608 int cachefree; 3609 3610 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3611 "Requests", "Bucket"); 3612 LIST_FOREACH(z, &uma_cachezones, uz_link) { 3613 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 3614 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3615 cachefree += bucket->ub_cnt; 3616 db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 3617 z->uz_name, (uintmax_t)z->uz_size, 3618 (intmax_t)(allocs - frees), cachefree, 3619 (uintmax_t)allocs, z->uz_count); 3620 if (db_pager_quit) 3621 return; 3622 } 3623 } 3624 #endif 3625