1 /*- 2 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uma_core.c Implementation of the Universal Memory allocator 31 * 32 * This allocator is intended to replace the multitude of similar object caches 33 * in the standard FreeBSD kernel. The intent is to be flexible as well as 34 * effecient. A primary design goal is to return unused memory to the rest of 35 * the system. This will make the system as a whole more flexible due to the 36 * ability to move memory to subsystems which most need it instead of leaving 37 * pools of reserved memory unused. 38 * 39 * The basic ideas stem from similar slab/zone based allocators whose algorithms 40 * are well known. 41 * 42 */ 43 44 /* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* I should really use ktr.. */ 54 /* 55 #define UMA_DEBUG 1 56 #define UMA_DEBUG_ALLOC 1 57 #define UMA_DEBUG_ALLOC_1 1 58 */ 59 60 #include "opt_ddb.h" 61 #include "opt_param.h" 62 #include "opt_vm.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/bitset.h> 67 #include <sys/kernel.h> 68 #include <sys/types.h> 69 #include <sys/queue.h> 70 #include <sys/malloc.h> 71 #include <sys/ktr.h> 72 #include <sys/lock.h> 73 #include <sys/sysctl.h> 74 #include <sys/mutex.h> 75 #include <sys/proc.h> 76 #include <sys/random.h> 77 #include <sys/rwlock.h> 78 #include <sys/sbuf.h> 79 #include <sys/sched.h> 80 #include <sys/smp.h> 81 #include <sys/vmmeter.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pageout.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 #include <vm/uma.h> 92 #include <vm/uma_int.h> 93 #include <vm/uma_dbg.h> 94 95 #include <ddb/ddb.h> 96 97 #ifdef DEBUG_MEMGUARD 98 #include <vm/memguard.h> 99 #endif 100 101 /* 102 * This is the zone and keg from which all zones are spawned. The idea is that 103 * even the zone & keg heads are allocated from the allocator, so we use the 104 * bss section to bootstrap us. 105 */ 106 static struct uma_keg masterkeg; 107 static struct uma_zone masterzone_k; 108 static struct uma_zone masterzone_z; 109 static uma_zone_t kegs = &masterzone_k; 110 static uma_zone_t zones = &masterzone_z; 111 112 /* This is the zone from which all of uma_slab_t's are allocated. */ 113 static uma_zone_t slabzone; 114 static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */ 115 116 /* 117 * The initial hash tables come out of this zone so they can be allocated 118 * prior to malloc coming up. 119 */ 120 static uma_zone_t hashzone; 121 122 /* The boot-time adjusted value for cache line alignment. */ 123 int uma_align_cache = 64 - 1; 124 125 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 126 127 /* 128 * Are we allowed to allocate buckets? 129 */ 130 static int bucketdisable = 1; 131 132 /* Linked list of all kegs in the system */ 133 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 134 135 /* Linked list of all cache-only zones in the system */ 136 static LIST_HEAD(,uma_zone) uma_cachezones = 137 LIST_HEAD_INITIALIZER(uma_cachezones); 138 139 /* This RW lock protects the keg list */ 140 static struct rwlock_padalign uma_rwlock; 141 142 /* Linked list of boot time pages */ 143 static LIST_HEAD(,uma_slab) uma_boot_pages = 144 LIST_HEAD_INITIALIZER(uma_boot_pages); 145 146 /* This mutex protects the boot time pages list */ 147 static struct mtx_padalign uma_boot_pages_mtx; 148 149 /* Is the VM done starting up? */ 150 static int booted = 0; 151 #define UMA_STARTUP 1 152 #define UMA_STARTUP2 2 153 154 /* 155 * Only mbuf clusters use ref zones. Just provide enough references 156 * to support the one user. New code should not use the ref facility. 157 */ 158 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES; 159 160 /* 161 * This is the handle used to schedule events that need to happen 162 * outside of the allocation fast path. 163 */ 164 static struct callout uma_callout; 165 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 166 167 /* 168 * This structure is passed as the zone ctor arg so that I don't have to create 169 * a special allocation function just for zones. 170 */ 171 struct uma_zctor_args { 172 const char *name; 173 size_t size; 174 uma_ctor ctor; 175 uma_dtor dtor; 176 uma_init uminit; 177 uma_fini fini; 178 uma_import import; 179 uma_release release; 180 void *arg; 181 uma_keg_t keg; 182 int align; 183 uint32_t flags; 184 }; 185 186 struct uma_kctor_args { 187 uma_zone_t zone; 188 size_t size; 189 uma_init uminit; 190 uma_fini fini; 191 int align; 192 uint32_t flags; 193 }; 194 195 struct uma_bucket_zone { 196 uma_zone_t ubz_zone; 197 char *ubz_name; 198 int ubz_entries; /* Number of items it can hold. */ 199 int ubz_maxsize; /* Maximum allocation size per-item. */ 200 }; 201 202 /* 203 * Compute the actual number of bucket entries to pack them in power 204 * of two sizes for more efficient space utilization. 205 */ 206 #define BUCKET_SIZE(n) \ 207 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 208 209 #define BUCKET_MAX BUCKET_SIZE(256) 210 211 struct uma_bucket_zone bucket_zones[] = { 212 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 213 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 214 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 215 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 216 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 217 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 218 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 219 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 220 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 221 { NULL, NULL, 0} 222 }; 223 224 /* 225 * Flags and enumerations to be passed to internal functions. 226 */ 227 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 228 229 /* Prototypes.. */ 230 231 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int); 232 static void *page_alloc(uma_zone_t, int, uint8_t *, int); 233 static void *startup_alloc(uma_zone_t, int, uint8_t *, int); 234 static void page_free(void *, int, uint8_t); 235 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 236 static void cache_drain(uma_zone_t); 237 static void bucket_drain(uma_zone_t, uma_bucket_t); 238 static void bucket_cache_drain(uma_zone_t zone); 239 static int keg_ctor(void *, int, void *, int); 240 static void keg_dtor(void *, int, void *); 241 static int zone_ctor(void *, int, void *, int); 242 static void zone_dtor(void *, int, void *); 243 static int zero_init(void *, int, int); 244 static void keg_small_init(uma_keg_t keg); 245 static void keg_large_init(uma_keg_t keg); 246 static void zone_foreach(void (*zfunc)(uma_zone_t)); 247 static void zone_timeout(uma_zone_t zone); 248 static int hash_alloc(struct uma_hash *); 249 static int hash_expand(struct uma_hash *, struct uma_hash *); 250 static void hash_free(struct uma_hash *hash); 251 static void uma_timeout(void *); 252 static void uma_startup3(void); 253 static void *zone_alloc_item(uma_zone_t, void *, int); 254 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 255 static void bucket_enable(void); 256 static void bucket_init(void); 257 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 258 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 259 static void bucket_zone_drain(void); 260 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 261 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 262 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 263 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 264 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 265 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 266 uma_fini fini, int align, uint32_t flags); 267 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 268 static void zone_release(uma_zone_t zone, void **bucket, int cnt); 269 static void uma_zero_item(void *item, uma_zone_t zone); 270 271 void uma_print_zone(uma_zone_t); 272 void uma_print_stats(void); 273 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 274 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 275 276 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 277 278 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 279 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 280 281 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 282 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 283 284 static int zone_warnings = 1; 285 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 286 "Warn when UMA zones becomes full"); 287 288 /* 289 * This routine checks to see whether or not it's safe to enable buckets. 290 */ 291 static void 292 bucket_enable(void) 293 { 294 bucketdisable = vm_page_count_min(); 295 } 296 297 /* 298 * Initialize bucket_zones, the array of zones of buckets of various sizes. 299 * 300 * For each zone, calculate the memory required for each bucket, consisting 301 * of the header and an array of pointers. 302 */ 303 static void 304 bucket_init(void) 305 { 306 struct uma_bucket_zone *ubz; 307 int size; 308 int i; 309 310 for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 311 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 312 size += sizeof(void *) * ubz->ubz_entries; 313 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 314 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 315 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 316 } 317 } 318 319 /* 320 * Given a desired number of entries for a bucket, return the zone from which 321 * to allocate the bucket. 322 */ 323 static struct uma_bucket_zone * 324 bucket_zone_lookup(int entries) 325 { 326 struct uma_bucket_zone *ubz; 327 328 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 329 if (ubz->ubz_entries >= entries) 330 return (ubz); 331 ubz--; 332 return (ubz); 333 } 334 335 static int 336 bucket_select(int size) 337 { 338 struct uma_bucket_zone *ubz; 339 340 ubz = &bucket_zones[0]; 341 if (size > ubz->ubz_maxsize) 342 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 343 344 for (; ubz->ubz_entries != 0; ubz++) 345 if (ubz->ubz_maxsize < size) 346 break; 347 ubz--; 348 return (ubz->ubz_entries); 349 } 350 351 static uma_bucket_t 352 bucket_alloc(uma_zone_t zone, void *udata, int flags) 353 { 354 struct uma_bucket_zone *ubz; 355 uma_bucket_t bucket; 356 357 /* 358 * This is to stop us from allocating per cpu buckets while we're 359 * running out of vm.boot_pages. Otherwise, we would exhaust the 360 * boot pages. This also prevents us from allocating buckets in 361 * low memory situations. 362 */ 363 if (bucketdisable) 364 return (NULL); 365 /* 366 * To limit bucket recursion we store the original zone flags 367 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 368 * NOVM flag to persist even through deep recursions. We also 369 * store ZFLAG_BUCKET once we have recursed attempting to allocate 370 * a bucket for a bucket zone so we do not allow infinite bucket 371 * recursion. This cookie will even persist to frees of unused 372 * buckets via the allocation path or bucket allocations in the 373 * free path. 374 */ 375 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 376 udata = (void *)(uintptr_t)zone->uz_flags; 377 else { 378 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 379 return (NULL); 380 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 381 } 382 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 383 flags |= M_NOVM; 384 ubz = bucket_zone_lookup(zone->uz_count); 385 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 386 ubz++; 387 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 388 if (bucket) { 389 #ifdef INVARIANTS 390 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 391 #endif 392 bucket->ub_cnt = 0; 393 bucket->ub_entries = ubz->ubz_entries; 394 } 395 396 return (bucket); 397 } 398 399 static void 400 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 401 { 402 struct uma_bucket_zone *ubz; 403 404 KASSERT(bucket->ub_cnt == 0, 405 ("bucket_free: Freeing a non free bucket.")); 406 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 407 udata = (void *)(uintptr_t)zone->uz_flags; 408 ubz = bucket_zone_lookup(bucket->ub_entries); 409 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 410 } 411 412 static void 413 bucket_zone_drain(void) 414 { 415 struct uma_bucket_zone *ubz; 416 417 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 418 zone_drain(ubz->ubz_zone); 419 } 420 421 static void 422 zone_log_warning(uma_zone_t zone) 423 { 424 static const struct timeval warninterval = { 300, 0 }; 425 426 if (!zone_warnings || zone->uz_warning == NULL) 427 return; 428 429 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 430 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 431 } 432 433 static void 434 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 435 { 436 uma_klink_t klink; 437 438 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 439 kegfn(klink->kl_keg); 440 } 441 442 /* 443 * Routine called by timeout which is used to fire off some time interval 444 * based calculations. (stats, hash size, etc.) 445 * 446 * Arguments: 447 * arg Unused 448 * 449 * Returns: 450 * Nothing 451 */ 452 static void 453 uma_timeout(void *unused) 454 { 455 bucket_enable(); 456 zone_foreach(zone_timeout); 457 458 /* Reschedule this event */ 459 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 460 } 461 462 /* 463 * Routine to perform timeout driven calculations. This expands the 464 * hashes and does per cpu statistics aggregation. 465 * 466 * Returns nothing. 467 */ 468 static void 469 keg_timeout(uma_keg_t keg) 470 { 471 472 KEG_LOCK(keg); 473 /* 474 * Expand the keg hash table. 475 * 476 * This is done if the number of slabs is larger than the hash size. 477 * What I'm trying to do here is completely reduce collisions. This 478 * may be a little aggressive. Should I allow for two collisions max? 479 */ 480 if (keg->uk_flags & UMA_ZONE_HASH && 481 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 482 struct uma_hash newhash; 483 struct uma_hash oldhash; 484 int ret; 485 486 /* 487 * This is so involved because allocating and freeing 488 * while the keg lock is held will lead to deadlock. 489 * I have to do everything in stages and check for 490 * races. 491 */ 492 newhash = keg->uk_hash; 493 KEG_UNLOCK(keg); 494 ret = hash_alloc(&newhash); 495 KEG_LOCK(keg); 496 if (ret) { 497 if (hash_expand(&keg->uk_hash, &newhash)) { 498 oldhash = keg->uk_hash; 499 keg->uk_hash = newhash; 500 } else 501 oldhash = newhash; 502 503 KEG_UNLOCK(keg); 504 hash_free(&oldhash); 505 return; 506 } 507 } 508 KEG_UNLOCK(keg); 509 } 510 511 static void 512 zone_timeout(uma_zone_t zone) 513 { 514 515 zone_foreach_keg(zone, &keg_timeout); 516 } 517 518 /* 519 * Allocate and zero fill the next sized hash table from the appropriate 520 * backing store. 521 * 522 * Arguments: 523 * hash A new hash structure with the old hash size in uh_hashsize 524 * 525 * Returns: 526 * 1 on sucess and 0 on failure. 527 */ 528 static int 529 hash_alloc(struct uma_hash *hash) 530 { 531 int oldsize; 532 int alloc; 533 534 oldsize = hash->uh_hashsize; 535 536 /* We're just going to go to a power of two greater */ 537 if (oldsize) { 538 hash->uh_hashsize = oldsize * 2; 539 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 540 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 541 M_UMAHASH, M_NOWAIT); 542 } else { 543 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 544 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 545 M_WAITOK); 546 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 547 } 548 if (hash->uh_slab_hash) { 549 bzero(hash->uh_slab_hash, alloc); 550 hash->uh_hashmask = hash->uh_hashsize - 1; 551 return (1); 552 } 553 554 return (0); 555 } 556 557 /* 558 * Expands the hash table for HASH zones. This is done from zone_timeout 559 * to reduce collisions. This must not be done in the regular allocation 560 * path, otherwise, we can recurse on the vm while allocating pages. 561 * 562 * Arguments: 563 * oldhash The hash you want to expand 564 * newhash The hash structure for the new table 565 * 566 * Returns: 567 * Nothing 568 * 569 * Discussion: 570 */ 571 static int 572 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 573 { 574 uma_slab_t slab; 575 int hval; 576 int i; 577 578 if (!newhash->uh_slab_hash) 579 return (0); 580 581 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 582 return (0); 583 584 /* 585 * I need to investigate hash algorithms for resizing without a 586 * full rehash. 587 */ 588 589 for (i = 0; i < oldhash->uh_hashsize; i++) 590 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 591 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 592 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 593 hval = UMA_HASH(newhash, slab->us_data); 594 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 595 slab, us_hlink); 596 } 597 598 return (1); 599 } 600 601 /* 602 * Free the hash bucket to the appropriate backing store. 603 * 604 * Arguments: 605 * slab_hash The hash bucket we're freeing 606 * hashsize The number of entries in that hash bucket 607 * 608 * Returns: 609 * Nothing 610 */ 611 static void 612 hash_free(struct uma_hash *hash) 613 { 614 if (hash->uh_slab_hash == NULL) 615 return; 616 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 617 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 618 else 619 free(hash->uh_slab_hash, M_UMAHASH); 620 } 621 622 /* 623 * Frees all outstanding items in a bucket 624 * 625 * Arguments: 626 * zone The zone to free to, must be unlocked. 627 * bucket The free/alloc bucket with items, cpu queue must be locked. 628 * 629 * Returns: 630 * Nothing 631 */ 632 633 static void 634 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 635 { 636 int i; 637 638 if (bucket == NULL) 639 return; 640 641 if (zone->uz_fini) 642 for (i = 0; i < bucket->ub_cnt; i++) 643 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 644 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 645 bucket->ub_cnt = 0; 646 } 647 648 /* 649 * Drains the per cpu caches for a zone. 650 * 651 * NOTE: This may only be called while the zone is being turn down, and not 652 * during normal operation. This is necessary in order that we do not have 653 * to migrate CPUs to drain the per-CPU caches. 654 * 655 * Arguments: 656 * zone The zone to drain, must be unlocked. 657 * 658 * Returns: 659 * Nothing 660 */ 661 static void 662 cache_drain(uma_zone_t zone) 663 { 664 uma_cache_t cache; 665 int cpu; 666 667 /* 668 * XXX: It is safe to not lock the per-CPU caches, because we're 669 * tearing down the zone anyway. I.e., there will be no further use 670 * of the caches at this point. 671 * 672 * XXX: It would good to be able to assert that the zone is being 673 * torn down to prevent improper use of cache_drain(). 674 * 675 * XXX: We lock the zone before passing into bucket_cache_drain() as 676 * it is used elsewhere. Should the tear-down path be made special 677 * there in some form? 678 */ 679 CPU_FOREACH(cpu) { 680 cache = &zone->uz_cpu[cpu]; 681 bucket_drain(zone, cache->uc_allocbucket); 682 bucket_drain(zone, cache->uc_freebucket); 683 if (cache->uc_allocbucket != NULL) 684 bucket_free(zone, cache->uc_allocbucket, NULL); 685 if (cache->uc_freebucket != NULL) 686 bucket_free(zone, cache->uc_freebucket, NULL); 687 cache->uc_allocbucket = cache->uc_freebucket = NULL; 688 } 689 ZONE_LOCK(zone); 690 bucket_cache_drain(zone); 691 ZONE_UNLOCK(zone); 692 } 693 694 static void 695 cache_shrink(uma_zone_t zone) 696 { 697 698 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 699 return; 700 701 ZONE_LOCK(zone); 702 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 703 ZONE_UNLOCK(zone); 704 } 705 706 static void 707 cache_drain_safe_cpu(uma_zone_t zone) 708 { 709 uma_cache_t cache; 710 uma_bucket_t b1, b2; 711 712 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 713 return; 714 715 b1 = b2 = NULL; 716 ZONE_LOCK(zone); 717 critical_enter(); 718 cache = &zone->uz_cpu[curcpu]; 719 if (cache->uc_allocbucket) { 720 if (cache->uc_allocbucket->ub_cnt != 0) 721 LIST_INSERT_HEAD(&zone->uz_buckets, 722 cache->uc_allocbucket, ub_link); 723 else 724 b1 = cache->uc_allocbucket; 725 cache->uc_allocbucket = NULL; 726 } 727 if (cache->uc_freebucket) { 728 if (cache->uc_freebucket->ub_cnt != 0) 729 LIST_INSERT_HEAD(&zone->uz_buckets, 730 cache->uc_freebucket, ub_link); 731 else 732 b2 = cache->uc_freebucket; 733 cache->uc_freebucket = NULL; 734 } 735 critical_exit(); 736 ZONE_UNLOCK(zone); 737 if (b1) 738 bucket_free(zone, b1, NULL); 739 if (b2) 740 bucket_free(zone, b2, NULL); 741 } 742 743 /* 744 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 745 * This is an expensive call because it needs to bind to all CPUs 746 * one by one and enter a critical section on each of them in order 747 * to safely access their cache buckets. 748 * Zone lock must not be held on call this function. 749 */ 750 static void 751 cache_drain_safe(uma_zone_t zone) 752 { 753 int cpu; 754 755 /* 756 * Polite bucket sizes shrinking was not enouth, shrink aggressively. 757 */ 758 if (zone) 759 cache_shrink(zone); 760 else 761 zone_foreach(cache_shrink); 762 763 CPU_FOREACH(cpu) { 764 thread_lock(curthread); 765 sched_bind(curthread, cpu); 766 thread_unlock(curthread); 767 768 if (zone) 769 cache_drain_safe_cpu(zone); 770 else 771 zone_foreach(cache_drain_safe_cpu); 772 } 773 thread_lock(curthread); 774 sched_unbind(curthread); 775 thread_unlock(curthread); 776 } 777 778 /* 779 * Drain the cached buckets from a zone. Expects a locked zone on entry. 780 */ 781 static void 782 bucket_cache_drain(uma_zone_t zone) 783 { 784 uma_bucket_t bucket; 785 786 /* 787 * Drain the bucket queues and free the buckets, we just keep two per 788 * cpu (alloc/free). 789 */ 790 while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 791 LIST_REMOVE(bucket, ub_link); 792 ZONE_UNLOCK(zone); 793 bucket_drain(zone, bucket); 794 bucket_free(zone, bucket, NULL); 795 ZONE_LOCK(zone); 796 } 797 798 /* 799 * Shrink further bucket sizes. Price of single zone lock collision 800 * is probably lower then price of global cache drain. 801 */ 802 if (zone->uz_count > zone->uz_count_min) 803 zone->uz_count--; 804 } 805 806 static void 807 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 808 { 809 uint8_t *mem; 810 int i; 811 uint8_t flags; 812 813 mem = slab->us_data; 814 flags = slab->us_flags; 815 i = start; 816 if (keg->uk_fini != NULL) { 817 for (i--; i > -1; i--) 818 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 819 keg->uk_size); 820 } 821 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 822 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 823 #ifdef UMA_DEBUG 824 printf("%s: Returning %d bytes.\n", keg->uk_name, 825 PAGE_SIZE * keg->uk_ppera); 826 #endif 827 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 828 } 829 830 /* 831 * Frees pages from a keg back to the system. This is done on demand from 832 * the pageout daemon. 833 * 834 * Returns nothing. 835 */ 836 static void 837 keg_drain(uma_keg_t keg) 838 { 839 struct slabhead freeslabs = { 0 }; 840 uma_slab_t slab; 841 uma_slab_t n; 842 843 /* 844 * We don't want to take pages from statically allocated kegs at this 845 * time 846 */ 847 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 848 return; 849 850 #ifdef UMA_DEBUG 851 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 852 #endif 853 KEG_LOCK(keg); 854 if (keg->uk_free == 0) 855 goto finished; 856 857 slab = LIST_FIRST(&keg->uk_free_slab); 858 while (slab) { 859 n = LIST_NEXT(slab, us_link); 860 861 /* We have no where to free these to */ 862 if (slab->us_flags & UMA_SLAB_BOOT) { 863 slab = n; 864 continue; 865 } 866 867 LIST_REMOVE(slab, us_link); 868 keg->uk_pages -= keg->uk_ppera; 869 keg->uk_free -= keg->uk_ipers; 870 871 if (keg->uk_flags & UMA_ZONE_HASH) 872 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 873 874 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 875 876 slab = n; 877 } 878 finished: 879 KEG_UNLOCK(keg); 880 881 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 882 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 883 keg_free_slab(keg, slab, keg->uk_ipers); 884 } 885 } 886 887 static void 888 zone_drain_wait(uma_zone_t zone, int waitok) 889 { 890 891 /* 892 * Set draining to interlock with zone_dtor() so we can release our 893 * locks as we go. Only dtor() should do a WAITOK call since it 894 * is the only call that knows the structure will still be available 895 * when it wakes up. 896 */ 897 ZONE_LOCK(zone); 898 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 899 if (waitok == M_NOWAIT) 900 goto out; 901 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 902 } 903 zone->uz_flags |= UMA_ZFLAG_DRAINING; 904 bucket_cache_drain(zone); 905 ZONE_UNLOCK(zone); 906 /* 907 * The DRAINING flag protects us from being freed while 908 * we're running. Normally the uma_rwlock would protect us but we 909 * must be able to release and acquire the right lock for each keg. 910 */ 911 zone_foreach_keg(zone, &keg_drain); 912 ZONE_LOCK(zone); 913 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 914 wakeup(zone); 915 out: 916 ZONE_UNLOCK(zone); 917 } 918 919 void 920 zone_drain(uma_zone_t zone) 921 { 922 923 zone_drain_wait(zone, M_NOWAIT); 924 } 925 926 /* 927 * Allocate a new slab for a keg. This does not insert the slab onto a list. 928 * 929 * Arguments: 930 * wait Shall we wait? 931 * 932 * Returns: 933 * The slab that was allocated or NULL if there is no memory and the 934 * caller specified M_NOWAIT. 935 */ 936 static uma_slab_t 937 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 938 { 939 uma_slabrefcnt_t slabref; 940 uma_alloc allocf; 941 uma_slab_t slab; 942 uint8_t *mem; 943 uint8_t flags; 944 int i; 945 946 mtx_assert(&keg->uk_lock, MA_OWNED); 947 slab = NULL; 948 mem = NULL; 949 950 #ifdef UMA_DEBUG 951 printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name); 952 #endif 953 allocf = keg->uk_allocf; 954 KEG_UNLOCK(keg); 955 956 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 957 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 958 if (slab == NULL) 959 goto out; 960 } 961 962 /* 963 * This reproduces the old vm_zone behavior of zero filling pages the 964 * first time they are added to a zone. 965 * 966 * Malloced items are zeroed in uma_zalloc. 967 */ 968 969 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 970 wait |= M_ZERO; 971 else 972 wait &= ~M_ZERO; 973 974 if (keg->uk_flags & UMA_ZONE_NODUMP) 975 wait |= M_NODUMP; 976 977 /* zone is passed for legacy reasons. */ 978 mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 979 if (mem == NULL) { 980 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 981 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 982 slab = NULL; 983 goto out; 984 } 985 986 /* Point the slab into the allocated memory */ 987 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 988 slab = (uma_slab_t )(mem + keg->uk_pgoff); 989 990 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 991 for (i = 0; i < keg->uk_ppera; i++) 992 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 993 994 slab->us_keg = keg; 995 slab->us_data = mem; 996 slab->us_freecount = keg->uk_ipers; 997 slab->us_flags = flags; 998 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 999 #ifdef INVARIANTS 1000 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1001 #endif 1002 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1003 slabref = (uma_slabrefcnt_t)slab; 1004 for (i = 0; i < keg->uk_ipers; i++) 1005 slabref->us_refcnt[i] = 0; 1006 } 1007 1008 if (keg->uk_init != NULL) { 1009 for (i = 0; i < keg->uk_ipers; i++) 1010 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1011 keg->uk_size, wait) != 0) 1012 break; 1013 if (i != keg->uk_ipers) { 1014 keg_free_slab(keg, slab, i); 1015 slab = NULL; 1016 goto out; 1017 } 1018 } 1019 out: 1020 KEG_LOCK(keg); 1021 1022 if (slab != NULL) { 1023 if (keg->uk_flags & UMA_ZONE_HASH) 1024 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1025 1026 keg->uk_pages += keg->uk_ppera; 1027 keg->uk_free += keg->uk_ipers; 1028 } 1029 1030 return (slab); 1031 } 1032 1033 /* 1034 * This function is intended to be used early on in place of page_alloc() so 1035 * that we may use the boot time page cache to satisfy allocations before 1036 * the VM is ready. 1037 */ 1038 static void * 1039 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1040 { 1041 uma_keg_t keg; 1042 uma_slab_t tmps; 1043 int pages, check_pages; 1044 1045 keg = zone_first_keg(zone); 1046 pages = howmany(bytes, PAGE_SIZE); 1047 check_pages = pages - 1; 1048 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1049 1050 /* 1051 * Check our small startup cache to see if it has pages remaining. 1052 */ 1053 mtx_lock(&uma_boot_pages_mtx); 1054 1055 /* First check if we have enough room. */ 1056 tmps = LIST_FIRST(&uma_boot_pages); 1057 while (tmps != NULL && check_pages-- > 0) 1058 tmps = LIST_NEXT(tmps, us_link); 1059 if (tmps != NULL) { 1060 /* 1061 * It's ok to lose tmps references. The last one will 1062 * have tmps->us_data pointing to the start address of 1063 * "pages" contiguous pages of memory. 1064 */ 1065 while (pages-- > 0) { 1066 tmps = LIST_FIRST(&uma_boot_pages); 1067 LIST_REMOVE(tmps, us_link); 1068 } 1069 mtx_unlock(&uma_boot_pages_mtx); 1070 *pflag = tmps->us_flags; 1071 return (tmps->us_data); 1072 } 1073 mtx_unlock(&uma_boot_pages_mtx); 1074 if (booted < UMA_STARTUP2) 1075 panic("UMA: Increase vm.boot_pages"); 1076 /* 1077 * Now that we've booted reset these users to their real allocator. 1078 */ 1079 #ifdef UMA_MD_SMALL_ALLOC 1080 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1081 #else 1082 keg->uk_allocf = page_alloc; 1083 #endif 1084 return keg->uk_allocf(zone, bytes, pflag, wait); 1085 } 1086 1087 /* 1088 * Allocates a number of pages from the system 1089 * 1090 * Arguments: 1091 * bytes The number of bytes requested 1092 * wait Shall we wait? 1093 * 1094 * Returns: 1095 * A pointer to the alloced memory or possibly 1096 * NULL if M_NOWAIT is set. 1097 */ 1098 static void * 1099 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 1100 { 1101 void *p; /* Returned page */ 1102 1103 *pflag = UMA_SLAB_KMEM; 1104 p = (void *) kmem_malloc(kmem_arena, bytes, wait); 1105 1106 return (p); 1107 } 1108 1109 /* 1110 * Allocates a number of pages from within an object 1111 * 1112 * Arguments: 1113 * bytes The number of bytes requested 1114 * wait Shall we wait? 1115 * 1116 * Returns: 1117 * A pointer to the alloced memory or possibly 1118 * NULL if M_NOWAIT is set. 1119 */ 1120 static void * 1121 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait) 1122 { 1123 TAILQ_HEAD(, vm_page) alloctail; 1124 u_long npages; 1125 vm_offset_t retkva, zkva; 1126 vm_page_t p, p_next; 1127 uma_keg_t keg; 1128 1129 TAILQ_INIT(&alloctail); 1130 keg = zone_first_keg(zone); 1131 1132 npages = howmany(bytes, PAGE_SIZE); 1133 while (npages > 0) { 1134 p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1135 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1136 if (p != NULL) { 1137 /* 1138 * Since the page does not belong to an object, its 1139 * listq is unused. 1140 */ 1141 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1142 npages--; 1143 continue; 1144 } 1145 if (wait & M_WAITOK) { 1146 VM_WAIT; 1147 continue; 1148 } 1149 1150 /* 1151 * Page allocation failed, free intermediate pages and 1152 * exit. 1153 */ 1154 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1155 vm_page_unwire(p, PQ_INACTIVE); 1156 vm_page_free(p); 1157 } 1158 return (NULL); 1159 } 1160 *flags = UMA_SLAB_PRIV; 1161 zkva = keg->uk_kva + 1162 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1163 retkva = zkva; 1164 TAILQ_FOREACH(p, &alloctail, listq) { 1165 pmap_qenter(zkva, &p, 1); 1166 zkva += PAGE_SIZE; 1167 } 1168 1169 return ((void *)retkva); 1170 } 1171 1172 /* 1173 * Frees a number of pages to the system 1174 * 1175 * Arguments: 1176 * mem A pointer to the memory to be freed 1177 * size The size of the memory being freed 1178 * flags The original p->us_flags field 1179 * 1180 * Returns: 1181 * Nothing 1182 */ 1183 static void 1184 page_free(void *mem, int size, uint8_t flags) 1185 { 1186 struct vmem *vmem; 1187 1188 if (flags & UMA_SLAB_KMEM) 1189 vmem = kmem_arena; 1190 else if (flags & UMA_SLAB_KERNEL) 1191 vmem = kernel_arena; 1192 else 1193 panic("UMA: page_free used with invalid flags %d", flags); 1194 1195 kmem_free(vmem, (vm_offset_t)mem, size); 1196 } 1197 1198 /* 1199 * Zero fill initializer 1200 * 1201 * Arguments/Returns follow uma_init specifications 1202 */ 1203 static int 1204 zero_init(void *mem, int size, int flags) 1205 { 1206 bzero(mem, size); 1207 return (0); 1208 } 1209 1210 /* 1211 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1212 * 1213 * Arguments 1214 * keg The zone we should initialize 1215 * 1216 * Returns 1217 * Nothing 1218 */ 1219 static void 1220 keg_small_init(uma_keg_t keg) 1221 { 1222 u_int rsize; 1223 u_int memused; 1224 u_int wastedspace; 1225 u_int shsize; 1226 1227 if (keg->uk_flags & UMA_ZONE_PCPU) { 1228 u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 1229 1230 keg->uk_slabsize = sizeof(struct pcpu); 1231 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1232 PAGE_SIZE); 1233 } else { 1234 keg->uk_slabsize = UMA_SLAB_SIZE; 1235 keg->uk_ppera = 1; 1236 } 1237 1238 /* 1239 * Calculate the size of each allocation (rsize) according to 1240 * alignment. If the requested size is smaller than we have 1241 * allocation bits for we round it up. 1242 */ 1243 rsize = keg->uk_size; 1244 if (rsize < keg->uk_slabsize / SLAB_SETSIZE) 1245 rsize = keg->uk_slabsize / SLAB_SETSIZE; 1246 if (rsize & keg->uk_align) 1247 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1248 keg->uk_rsize = rsize; 1249 1250 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1251 keg->uk_rsize < sizeof(struct pcpu), 1252 ("%s: size %u too large", __func__, keg->uk_rsize)); 1253 1254 if (keg->uk_flags & UMA_ZONE_REFCNT) 1255 rsize += sizeof(uint32_t); 1256 1257 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1258 shsize = 0; 1259 else 1260 shsize = sizeof(struct uma_slab); 1261 1262 keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize; 1263 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1264 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1265 1266 memused = keg->uk_ipers * rsize + shsize; 1267 wastedspace = keg->uk_slabsize - memused; 1268 1269 /* 1270 * We can't do OFFPAGE if we're internal or if we've been 1271 * asked to not go to the VM for buckets. If we do this we 1272 * may end up going to the VM for slabs which we do not 1273 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1274 * of UMA_ZONE_VM, which clearly forbids it. 1275 */ 1276 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1277 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1278 return; 1279 1280 /* 1281 * See if using an OFFPAGE slab will limit our waste. Only do 1282 * this if it permits more items per-slab. 1283 * 1284 * XXX We could try growing slabsize to limit max waste as well. 1285 * Historically this was not done because the VM could not 1286 * efficiently handle contiguous allocations. 1287 */ 1288 if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) && 1289 (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) { 1290 keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize; 1291 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1292 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1293 #ifdef UMA_DEBUG 1294 printf("UMA decided we need offpage slab headers for " 1295 "keg: %s, calculated wastedspace = %d, " 1296 "maximum wasted space allowed = %d, " 1297 "calculated ipers = %d, " 1298 "new wasted space = %d\n", keg->uk_name, wastedspace, 1299 keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1300 keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize); 1301 #endif 1302 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1303 } 1304 1305 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1306 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1307 keg->uk_flags |= UMA_ZONE_HASH; 1308 } 1309 1310 /* 1311 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1312 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1313 * more complicated. 1314 * 1315 * Arguments 1316 * keg The keg we should initialize 1317 * 1318 * Returns 1319 * Nothing 1320 */ 1321 static void 1322 keg_large_init(uma_keg_t keg) 1323 { 1324 u_int shsize; 1325 1326 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1327 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1328 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1329 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1330 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1331 1332 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1333 keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE; 1334 keg->uk_ipers = 1; 1335 keg->uk_rsize = keg->uk_size; 1336 1337 /* We can't do OFFPAGE if we're internal, bail out here. */ 1338 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1339 return; 1340 1341 /* Check whether we have enough space to not do OFFPAGE. */ 1342 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1343 shsize = sizeof(struct uma_slab); 1344 if (keg->uk_flags & UMA_ZONE_REFCNT) 1345 shsize += keg->uk_ipers * sizeof(uint32_t); 1346 if (shsize & UMA_ALIGN_PTR) 1347 shsize = (shsize & ~UMA_ALIGN_PTR) + 1348 (UMA_ALIGN_PTR + 1); 1349 1350 if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize) 1351 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1352 } 1353 1354 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1355 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1356 keg->uk_flags |= UMA_ZONE_HASH; 1357 } 1358 1359 static void 1360 keg_cachespread_init(uma_keg_t keg) 1361 { 1362 int alignsize; 1363 int trailer; 1364 int pages; 1365 int rsize; 1366 1367 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1368 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1369 1370 alignsize = keg->uk_align + 1; 1371 rsize = keg->uk_size; 1372 /* 1373 * We want one item to start on every align boundary in a page. To 1374 * do this we will span pages. We will also extend the item by the 1375 * size of align if it is an even multiple of align. Otherwise, it 1376 * would fall on the same boundary every time. 1377 */ 1378 if (rsize & keg->uk_align) 1379 rsize = (rsize & ~keg->uk_align) + alignsize; 1380 if ((rsize & alignsize) == 0) 1381 rsize += alignsize; 1382 trailer = rsize - keg->uk_size; 1383 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1384 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1385 keg->uk_rsize = rsize; 1386 keg->uk_ppera = pages; 1387 keg->uk_slabsize = UMA_SLAB_SIZE; 1388 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1389 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1390 KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 1391 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1392 keg->uk_ipers)); 1393 } 1394 1395 /* 1396 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1397 * the keg onto the global keg list. 1398 * 1399 * Arguments/Returns follow uma_ctor specifications 1400 * udata Actually uma_kctor_args 1401 */ 1402 static int 1403 keg_ctor(void *mem, int size, void *udata, int flags) 1404 { 1405 struct uma_kctor_args *arg = udata; 1406 uma_keg_t keg = mem; 1407 uma_zone_t zone; 1408 1409 bzero(keg, size); 1410 keg->uk_size = arg->size; 1411 keg->uk_init = arg->uminit; 1412 keg->uk_fini = arg->fini; 1413 keg->uk_align = arg->align; 1414 keg->uk_free = 0; 1415 keg->uk_reserve = 0; 1416 keg->uk_pages = 0; 1417 keg->uk_flags = arg->flags; 1418 keg->uk_allocf = page_alloc; 1419 keg->uk_freef = page_free; 1420 keg->uk_slabzone = NULL; 1421 1422 /* 1423 * The master zone is passed to us at keg-creation time. 1424 */ 1425 zone = arg->zone; 1426 keg->uk_name = zone->uz_name; 1427 1428 if (arg->flags & UMA_ZONE_VM) 1429 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1430 1431 if (arg->flags & UMA_ZONE_ZINIT) 1432 keg->uk_init = zero_init; 1433 1434 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1435 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1436 1437 if (arg->flags & UMA_ZONE_PCPU) 1438 #ifdef SMP 1439 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1440 #else 1441 keg->uk_flags &= ~UMA_ZONE_PCPU; 1442 #endif 1443 1444 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1445 keg_cachespread_init(keg); 1446 } else if (keg->uk_flags & UMA_ZONE_REFCNT) { 1447 if (keg->uk_size > 1448 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - 1449 sizeof(uint32_t))) 1450 keg_large_init(keg); 1451 else 1452 keg_small_init(keg); 1453 } else { 1454 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1455 keg_large_init(keg); 1456 else 1457 keg_small_init(keg); 1458 } 1459 1460 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1461 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1462 if (keg->uk_ipers > uma_max_ipers_ref) 1463 panic("Too many ref items per zone: %d > %d\n", 1464 keg->uk_ipers, uma_max_ipers_ref); 1465 keg->uk_slabzone = slabrefzone; 1466 } else 1467 keg->uk_slabzone = slabzone; 1468 } 1469 1470 /* 1471 * If we haven't booted yet we need allocations to go through the 1472 * startup cache until the vm is ready. 1473 */ 1474 if (keg->uk_ppera == 1) { 1475 #ifdef UMA_MD_SMALL_ALLOC 1476 keg->uk_allocf = uma_small_alloc; 1477 keg->uk_freef = uma_small_free; 1478 1479 if (booted < UMA_STARTUP) 1480 keg->uk_allocf = startup_alloc; 1481 #else 1482 if (booted < UMA_STARTUP2) 1483 keg->uk_allocf = startup_alloc; 1484 #endif 1485 } else if (booted < UMA_STARTUP2 && 1486 (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1487 keg->uk_allocf = startup_alloc; 1488 1489 /* 1490 * Initialize keg's lock 1491 */ 1492 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1493 1494 /* 1495 * If we're putting the slab header in the actual page we need to 1496 * figure out where in each page it goes. This calculates a right 1497 * justified offset into the memory on an ALIGN_PTR boundary. 1498 */ 1499 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1500 u_int totsize; 1501 1502 /* Size of the slab struct and free list */ 1503 totsize = sizeof(struct uma_slab); 1504 1505 /* Size of the reference counts. */ 1506 if (keg->uk_flags & UMA_ZONE_REFCNT) 1507 totsize += keg->uk_ipers * sizeof(uint32_t); 1508 1509 if (totsize & UMA_ALIGN_PTR) 1510 totsize = (totsize & ~UMA_ALIGN_PTR) + 1511 (UMA_ALIGN_PTR + 1); 1512 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1513 1514 /* 1515 * The only way the following is possible is if with our 1516 * UMA_ALIGN_PTR adjustments we are now bigger than 1517 * UMA_SLAB_SIZE. I haven't checked whether this is 1518 * mathematically possible for all cases, so we make 1519 * sure here anyway. 1520 */ 1521 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1522 if (keg->uk_flags & UMA_ZONE_REFCNT) 1523 totsize += keg->uk_ipers * sizeof(uint32_t); 1524 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1525 printf("zone %s ipers %d rsize %d size %d\n", 1526 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1527 keg->uk_size); 1528 panic("UMA slab won't fit."); 1529 } 1530 } 1531 1532 if (keg->uk_flags & UMA_ZONE_HASH) 1533 hash_alloc(&keg->uk_hash); 1534 1535 #ifdef UMA_DEBUG 1536 printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1537 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1538 keg->uk_ipers, keg->uk_ppera, 1539 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1540 #endif 1541 1542 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1543 1544 rw_wlock(&uma_rwlock); 1545 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1546 rw_wunlock(&uma_rwlock); 1547 return (0); 1548 } 1549 1550 /* 1551 * Zone header ctor. This initializes all fields, locks, etc. 1552 * 1553 * Arguments/Returns follow uma_ctor specifications 1554 * udata Actually uma_zctor_args 1555 */ 1556 static int 1557 zone_ctor(void *mem, int size, void *udata, int flags) 1558 { 1559 struct uma_zctor_args *arg = udata; 1560 uma_zone_t zone = mem; 1561 uma_zone_t z; 1562 uma_keg_t keg; 1563 1564 bzero(zone, size); 1565 zone->uz_name = arg->name; 1566 zone->uz_ctor = arg->ctor; 1567 zone->uz_dtor = arg->dtor; 1568 zone->uz_slab = zone_fetch_slab; 1569 zone->uz_init = NULL; 1570 zone->uz_fini = NULL; 1571 zone->uz_allocs = 0; 1572 zone->uz_frees = 0; 1573 zone->uz_fails = 0; 1574 zone->uz_sleeps = 0; 1575 zone->uz_count = 0; 1576 zone->uz_count_min = 0; 1577 zone->uz_flags = 0; 1578 zone->uz_warning = NULL; 1579 timevalclear(&zone->uz_ratecheck); 1580 keg = arg->keg; 1581 1582 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1583 1584 /* 1585 * This is a pure cache zone, no kegs. 1586 */ 1587 if (arg->import) { 1588 if (arg->flags & UMA_ZONE_VM) 1589 arg->flags |= UMA_ZFLAG_CACHEONLY; 1590 zone->uz_flags = arg->flags; 1591 zone->uz_size = arg->size; 1592 zone->uz_import = arg->import; 1593 zone->uz_release = arg->release; 1594 zone->uz_arg = arg->arg; 1595 zone->uz_lockptr = &zone->uz_lock; 1596 rw_wlock(&uma_rwlock); 1597 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1598 rw_wunlock(&uma_rwlock); 1599 goto out; 1600 } 1601 1602 /* 1603 * Use the regular zone/keg/slab allocator. 1604 */ 1605 zone->uz_import = (uma_import)zone_import; 1606 zone->uz_release = (uma_release)zone_release; 1607 zone->uz_arg = zone; 1608 1609 if (arg->flags & UMA_ZONE_SECONDARY) { 1610 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1611 zone->uz_init = arg->uminit; 1612 zone->uz_fini = arg->fini; 1613 zone->uz_lockptr = &keg->uk_lock; 1614 zone->uz_flags |= UMA_ZONE_SECONDARY; 1615 rw_wlock(&uma_rwlock); 1616 ZONE_LOCK(zone); 1617 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1618 if (LIST_NEXT(z, uz_link) == NULL) { 1619 LIST_INSERT_AFTER(z, zone, uz_link); 1620 break; 1621 } 1622 } 1623 ZONE_UNLOCK(zone); 1624 rw_wunlock(&uma_rwlock); 1625 } else if (keg == NULL) { 1626 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1627 arg->align, arg->flags)) == NULL) 1628 return (ENOMEM); 1629 } else { 1630 struct uma_kctor_args karg; 1631 int error; 1632 1633 /* We should only be here from uma_startup() */ 1634 karg.size = arg->size; 1635 karg.uminit = arg->uminit; 1636 karg.fini = arg->fini; 1637 karg.align = arg->align; 1638 karg.flags = arg->flags; 1639 karg.zone = zone; 1640 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1641 flags); 1642 if (error) 1643 return (error); 1644 } 1645 1646 /* 1647 * Link in the first keg. 1648 */ 1649 zone->uz_klink.kl_keg = keg; 1650 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1651 zone->uz_lockptr = &keg->uk_lock; 1652 zone->uz_size = keg->uk_size; 1653 zone->uz_flags |= (keg->uk_flags & 1654 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1655 1656 /* 1657 * Some internal zones don't have room allocated for the per cpu 1658 * caches. If we're internal, bail out here. 1659 */ 1660 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1661 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1662 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1663 return (0); 1664 } 1665 1666 out: 1667 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1668 zone->uz_count = bucket_select(zone->uz_size); 1669 else 1670 zone->uz_count = BUCKET_MAX; 1671 zone->uz_count_min = zone->uz_count; 1672 1673 return (0); 1674 } 1675 1676 /* 1677 * Keg header dtor. This frees all data, destroys locks, frees the hash 1678 * table and removes the keg from the global list. 1679 * 1680 * Arguments/Returns follow uma_dtor specifications 1681 * udata unused 1682 */ 1683 static void 1684 keg_dtor(void *arg, int size, void *udata) 1685 { 1686 uma_keg_t keg; 1687 1688 keg = (uma_keg_t)arg; 1689 KEG_LOCK(keg); 1690 if (keg->uk_free != 0) { 1691 printf("Freed UMA keg (%s) was not empty (%d items). " 1692 " Lost %d pages of memory.\n", 1693 keg->uk_name ? keg->uk_name : "", 1694 keg->uk_free, keg->uk_pages); 1695 } 1696 KEG_UNLOCK(keg); 1697 1698 hash_free(&keg->uk_hash); 1699 1700 KEG_LOCK_FINI(keg); 1701 } 1702 1703 /* 1704 * Zone header dtor. 1705 * 1706 * Arguments/Returns follow uma_dtor specifications 1707 * udata unused 1708 */ 1709 static void 1710 zone_dtor(void *arg, int size, void *udata) 1711 { 1712 uma_klink_t klink; 1713 uma_zone_t zone; 1714 uma_keg_t keg; 1715 1716 zone = (uma_zone_t)arg; 1717 keg = zone_first_keg(zone); 1718 1719 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1720 cache_drain(zone); 1721 1722 rw_wlock(&uma_rwlock); 1723 LIST_REMOVE(zone, uz_link); 1724 rw_wunlock(&uma_rwlock); 1725 /* 1726 * XXX there are some races here where 1727 * the zone can be drained but zone lock 1728 * released and then refilled before we 1729 * remove it... we dont care for now 1730 */ 1731 zone_drain_wait(zone, M_WAITOK); 1732 /* 1733 * Unlink all of our kegs. 1734 */ 1735 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1736 klink->kl_keg = NULL; 1737 LIST_REMOVE(klink, kl_link); 1738 if (klink == &zone->uz_klink) 1739 continue; 1740 free(klink, M_TEMP); 1741 } 1742 /* 1743 * We only destroy kegs from non secondary zones. 1744 */ 1745 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1746 rw_wlock(&uma_rwlock); 1747 LIST_REMOVE(keg, uk_link); 1748 rw_wunlock(&uma_rwlock); 1749 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1750 } 1751 ZONE_LOCK_FINI(zone); 1752 } 1753 1754 /* 1755 * Traverses every zone in the system and calls a callback 1756 * 1757 * Arguments: 1758 * zfunc A pointer to a function which accepts a zone 1759 * as an argument. 1760 * 1761 * Returns: 1762 * Nothing 1763 */ 1764 static void 1765 zone_foreach(void (*zfunc)(uma_zone_t)) 1766 { 1767 uma_keg_t keg; 1768 uma_zone_t zone; 1769 1770 rw_rlock(&uma_rwlock); 1771 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1772 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1773 zfunc(zone); 1774 } 1775 rw_runlock(&uma_rwlock); 1776 } 1777 1778 /* Public functions */ 1779 /* See uma.h */ 1780 void 1781 uma_startup(void *bootmem, int boot_pages) 1782 { 1783 struct uma_zctor_args args; 1784 uma_slab_t slab; 1785 u_int slabsize; 1786 int i; 1787 1788 #ifdef UMA_DEBUG 1789 printf("Creating uma keg headers zone and keg.\n"); 1790 #endif 1791 rw_init(&uma_rwlock, "UMA lock"); 1792 1793 /* "manually" create the initial zone */ 1794 memset(&args, 0, sizeof(args)); 1795 args.name = "UMA Kegs"; 1796 args.size = sizeof(struct uma_keg); 1797 args.ctor = keg_ctor; 1798 args.dtor = keg_dtor; 1799 args.uminit = zero_init; 1800 args.fini = NULL; 1801 args.keg = &masterkeg; 1802 args.align = 32 - 1; 1803 args.flags = UMA_ZFLAG_INTERNAL; 1804 /* The initial zone has no Per cpu queues so it's smaller */ 1805 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1806 1807 #ifdef UMA_DEBUG 1808 printf("Filling boot free list.\n"); 1809 #endif 1810 for (i = 0; i < boot_pages; i++) { 1811 slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1812 slab->us_data = (uint8_t *)slab; 1813 slab->us_flags = UMA_SLAB_BOOT; 1814 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1815 } 1816 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1817 1818 #ifdef UMA_DEBUG 1819 printf("Creating uma zone headers zone and keg.\n"); 1820 #endif 1821 args.name = "UMA Zones"; 1822 args.size = sizeof(struct uma_zone) + 1823 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1824 args.ctor = zone_ctor; 1825 args.dtor = zone_dtor; 1826 args.uminit = zero_init; 1827 args.fini = NULL; 1828 args.keg = NULL; 1829 args.align = 32 - 1; 1830 args.flags = UMA_ZFLAG_INTERNAL; 1831 /* The initial zone has no Per cpu queues so it's smaller */ 1832 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1833 1834 #ifdef UMA_DEBUG 1835 printf("Initializing pcpu cache locks.\n"); 1836 #endif 1837 #ifdef UMA_DEBUG 1838 printf("Creating slab and hash zones.\n"); 1839 #endif 1840 1841 /* Now make a zone for slab headers */ 1842 slabzone = uma_zcreate("UMA Slabs", 1843 sizeof(struct uma_slab), 1844 NULL, NULL, NULL, NULL, 1845 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1846 1847 /* 1848 * We also create a zone for the bigger slabs with reference 1849 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1850 */ 1851 slabsize = sizeof(struct uma_slab_refcnt); 1852 slabsize += uma_max_ipers_ref * sizeof(uint32_t); 1853 slabrefzone = uma_zcreate("UMA RCntSlabs", 1854 slabsize, 1855 NULL, NULL, NULL, NULL, 1856 UMA_ALIGN_PTR, 1857 UMA_ZFLAG_INTERNAL); 1858 1859 hashzone = uma_zcreate("UMA Hash", 1860 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1861 NULL, NULL, NULL, NULL, 1862 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1863 1864 bucket_init(); 1865 1866 booted = UMA_STARTUP; 1867 1868 #ifdef UMA_DEBUG 1869 printf("UMA startup complete.\n"); 1870 #endif 1871 } 1872 1873 /* see uma.h */ 1874 void 1875 uma_startup2(void) 1876 { 1877 booted = UMA_STARTUP2; 1878 bucket_enable(); 1879 #ifdef UMA_DEBUG 1880 printf("UMA startup2 complete.\n"); 1881 #endif 1882 } 1883 1884 /* 1885 * Initialize our callout handle 1886 * 1887 */ 1888 1889 static void 1890 uma_startup3(void) 1891 { 1892 #ifdef UMA_DEBUG 1893 printf("Starting callout.\n"); 1894 #endif 1895 callout_init(&uma_callout, CALLOUT_MPSAFE); 1896 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1897 #ifdef UMA_DEBUG 1898 printf("UMA startup3 complete.\n"); 1899 #endif 1900 } 1901 1902 static uma_keg_t 1903 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1904 int align, uint32_t flags) 1905 { 1906 struct uma_kctor_args args; 1907 1908 args.size = size; 1909 args.uminit = uminit; 1910 args.fini = fini; 1911 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1912 args.flags = flags; 1913 args.zone = zone; 1914 return (zone_alloc_item(kegs, &args, M_WAITOK)); 1915 } 1916 1917 /* See uma.h */ 1918 void 1919 uma_set_align(int align) 1920 { 1921 1922 if (align != UMA_ALIGN_CACHE) 1923 uma_align_cache = align; 1924 } 1925 1926 /* See uma.h */ 1927 uma_zone_t 1928 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1929 uma_init uminit, uma_fini fini, int align, uint32_t flags) 1930 1931 { 1932 struct uma_zctor_args args; 1933 1934 /* This stuff is essential for the zone ctor */ 1935 memset(&args, 0, sizeof(args)); 1936 args.name = name; 1937 args.size = size; 1938 args.ctor = ctor; 1939 args.dtor = dtor; 1940 args.uminit = uminit; 1941 args.fini = fini; 1942 args.align = align; 1943 args.flags = flags; 1944 args.keg = NULL; 1945 1946 return (zone_alloc_item(zones, &args, M_WAITOK)); 1947 } 1948 1949 /* See uma.h */ 1950 uma_zone_t 1951 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1952 uma_init zinit, uma_fini zfini, uma_zone_t master) 1953 { 1954 struct uma_zctor_args args; 1955 uma_keg_t keg; 1956 1957 keg = zone_first_keg(master); 1958 memset(&args, 0, sizeof(args)); 1959 args.name = name; 1960 args.size = keg->uk_size; 1961 args.ctor = ctor; 1962 args.dtor = dtor; 1963 args.uminit = zinit; 1964 args.fini = zfini; 1965 args.align = keg->uk_align; 1966 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1967 args.keg = keg; 1968 1969 /* XXX Attaches only one keg of potentially many. */ 1970 return (zone_alloc_item(zones, &args, M_WAITOK)); 1971 } 1972 1973 /* See uma.h */ 1974 uma_zone_t 1975 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1976 uma_init zinit, uma_fini zfini, uma_import zimport, 1977 uma_release zrelease, void *arg, int flags) 1978 { 1979 struct uma_zctor_args args; 1980 1981 memset(&args, 0, sizeof(args)); 1982 args.name = name; 1983 args.size = size; 1984 args.ctor = ctor; 1985 args.dtor = dtor; 1986 args.uminit = zinit; 1987 args.fini = zfini; 1988 args.import = zimport; 1989 args.release = zrelease; 1990 args.arg = arg; 1991 args.align = 0; 1992 args.flags = flags; 1993 1994 return (zone_alloc_item(zones, &args, M_WAITOK)); 1995 } 1996 1997 static void 1998 zone_lock_pair(uma_zone_t a, uma_zone_t b) 1999 { 2000 if (a < b) { 2001 ZONE_LOCK(a); 2002 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 2003 } else { 2004 ZONE_LOCK(b); 2005 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 2006 } 2007 } 2008 2009 static void 2010 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 2011 { 2012 2013 ZONE_UNLOCK(a); 2014 ZONE_UNLOCK(b); 2015 } 2016 2017 int 2018 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 2019 { 2020 uma_klink_t klink; 2021 uma_klink_t kl; 2022 int error; 2023 2024 error = 0; 2025 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 2026 2027 zone_lock_pair(zone, master); 2028 /* 2029 * zone must use vtoslab() to resolve objects and must already be 2030 * a secondary. 2031 */ 2032 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 2033 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 2034 error = EINVAL; 2035 goto out; 2036 } 2037 /* 2038 * The new master must also use vtoslab(). 2039 */ 2040 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2041 error = EINVAL; 2042 goto out; 2043 } 2044 /* 2045 * Both must either be refcnt, or not be refcnt. 2046 */ 2047 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 2048 (master->uz_flags & UMA_ZONE_REFCNT)) { 2049 error = EINVAL; 2050 goto out; 2051 } 2052 /* 2053 * The underlying object must be the same size. rsize 2054 * may be different. 2055 */ 2056 if (master->uz_size != zone->uz_size) { 2057 error = E2BIG; 2058 goto out; 2059 } 2060 /* 2061 * Put it at the end of the list. 2062 */ 2063 klink->kl_keg = zone_first_keg(master); 2064 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2065 if (LIST_NEXT(kl, kl_link) == NULL) { 2066 LIST_INSERT_AFTER(kl, klink, kl_link); 2067 break; 2068 } 2069 } 2070 klink = NULL; 2071 zone->uz_flags |= UMA_ZFLAG_MULTI; 2072 zone->uz_slab = zone_fetch_slab_multi; 2073 2074 out: 2075 zone_unlock_pair(zone, master); 2076 if (klink != NULL) 2077 free(klink, M_TEMP); 2078 2079 return (error); 2080 } 2081 2082 2083 /* See uma.h */ 2084 void 2085 uma_zdestroy(uma_zone_t zone) 2086 { 2087 2088 zone_free_item(zones, zone, NULL, SKIP_NONE); 2089 } 2090 2091 /* See uma.h */ 2092 void * 2093 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 2094 { 2095 void *item; 2096 uma_cache_t cache; 2097 uma_bucket_t bucket; 2098 int lockfail; 2099 int cpu; 2100 2101 #if 0 2102 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2103 /* The entropy here is desirable, but the harvesting is expensive */ 2104 random_harvest(&(zone->uz_name), sizeof(void *), 1, RANDOM_UMA_ALLOC); 2105 #endif 2106 2107 /* This is the fast path allocation */ 2108 #ifdef UMA_DEBUG_ALLOC_1 2109 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 2110 #endif 2111 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 2112 zone->uz_name, flags); 2113 2114 if (flags & M_WAITOK) { 2115 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2116 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2117 } 2118 #ifdef DEBUG_MEMGUARD 2119 if (memguard_cmp_zone(zone)) { 2120 item = memguard_alloc(zone->uz_size, flags); 2121 if (item != NULL) { 2122 /* 2123 * Avoid conflict with the use-after-free 2124 * protecting infrastructure from INVARIANTS. 2125 */ 2126 if (zone->uz_init != NULL && 2127 zone->uz_init != mtrash_init && 2128 zone->uz_init(item, zone->uz_size, flags) != 0) 2129 return (NULL); 2130 if (zone->uz_ctor != NULL && 2131 zone->uz_ctor != mtrash_ctor && 2132 zone->uz_ctor(item, zone->uz_size, udata, 2133 flags) != 0) { 2134 zone->uz_fini(item, zone->uz_size); 2135 return (NULL); 2136 } 2137 #if 0 2138 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2139 /* The entropy here is desirable, but the harvesting is expensive */ 2140 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2141 #endif 2142 return (item); 2143 } 2144 /* This is unfortunate but should not be fatal. */ 2145 } 2146 #endif 2147 /* 2148 * If possible, allocate from the per-CPU cache. There are two 2149 * requirements for safe access to the per-CPU cache: (1) the thread 2150 * accessing the cache must not be preempted or yield during access, 2151 * and (2) the thread must not migrate CPUs without switching which 2152 * cache it accesses. We rely on a critical section to prevent 2153 * preemption and migration. We release the critical section in 2154 * order to acquire the zone mutex if we are unable to allocate from 2155 * the current cache; when we re-acquire the critical section, we 2156 * must detect and handle migration if it has occurred. 2157 */ 2158 critical_enter(); 2159 cpu = curcpu; 2160 cache = &zone->uz_cpu[cpu]; 2161 2162 zalloc_start: 2163 bucket = cache->uc_allocbucket; 2164 if (bucket != NULL && bucket->ub_cnt > 0) { 2165 bucket->ub_cnt--; 2166 item = bucket->ub_bucket[bucket->ub_cnt]; 2167 #ifdef INVARIANTS 2168 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2169 #endif 2170 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2171 cache->uc_allocs++; 2172 critical_exit(); 2173 if (zone->uz_ctor != NULL && 2174 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2175 atomic_add_long(&zone->uz_fails, 1); 2176 zone_free_item(zone, item, udata, SKIP_DTOR); 2177 return (NULL); 2178 } 2179 #ifdef INVARIANTS 2180 uma_dbg_alloc(zone, NULL, item); 2181 #endif 2182 if (flags & M_ZERO) 2183 uma_zero_item(item, zone); 2184 #if 0 2185 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2186 /* The entropy here is desirable, but the harvesting is expensive */ 2187 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2188 #endif 2189 return (item); 2190 } 2191 2192 /* 2193 * We have run out of items in our alloc bucket. 2194 * See if we can switch with our free bucket. 2195 */ 2196 bucket = cache->uc_freebucket; 2197 if (bucket != NULL && bucket->ub_cnt > 0) { 2198 #ifdef UMA_DEBUG_ALLOC 2199 printf("uma_zalloc: Swapping empty with alloc.\n"); 2200 #endif 2201 cache->uc_freebucket = cache->uc_allocbucket; 2202 cache->uc_allocbucket = bucket; 2203 goto zalloc_start; 2204 } 2205 2206 /* 2207 * Discard any empty allocation bucket while we hold no locks. 2208 */ 2209 bucket = cache->uc_allocbucket; 2210 cache->uc_allocbucket = NULL; 2211 critical_exit(); 2212 if (bucket != NULL) 2213 bucket_free(zone, bucket, udata); 2214 2215 /* Short-circuit for zones without buckets and low memory. */ 2216 if (zone->uz_count == 0 || bucketdisable) 2217 goto zalloc_item; 2218 2219 /* 2220 * Attempt to retrieve the item from the per-CPU cache has failed, so 2221 * we must go back to the zone. This requires the zone lock, so we 2222 * must drop the critical section, then re-acquire it when we go back 2223 * to the cache. Since the critical section is released, we may be 2224 * preempted or migrate. As such, make sure not to maintain any 2225 * thread-local state specific to the cache from prior to releasing 2226 * the critical section. 2227 */ 2228 lockfail = 0; 2229 if (ZONE_TRYLOCK(zone) == 0) { 2230 /* Record contention to size the buckets. */ 2231 ZONE_LOCK(zone); 2232 lockfail = 1; 2233 } 2234 critical_enter(); 2235 cpu = curcpu; 2236 cache = &zone->uz_cpu[cpu]; 2237 2238 /* 2239 * Since we have locked the zone we may as well send back our stats. 2240 */ 2241 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2242 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2243 cache->uc_allocs = 0; 2244 cache->uc_frees = 0; 2245 2246 /* See if we lost the race to fill the cache. */ 2247 if (cache->uc_allocbucket != NULL) { 2248 ZONE_UNLOCK(zone); 2249 goto zalloc_start; 2250 } 2251 2252 /* 2253 * Check the zone's cache of buckets. 2254 */ 2255 if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2256 KASSERT(bucket->ub_cnt != 0, 2257 ("uma_zalloc_arg: Returning an empty bucket.")); 2258 2259 LIST_REMOVE(bucket, ub_link); 2260 cache->uc_allocbucket = bucket; 2261 ZONE_UNLOCK(zone); 2262 goto zalloc_start; 2263 } 2264 /* We are no longer associated with this CPU. */ 2265 critical_exit(); 2266 2267 /* 2268 * We bump the uz count when the cache size is insufficient to 2269 * handle the working set. 2270 */ 2271 if (lockfail && zone->uz_count < BUCKET_MAX) 2272 zone->uz_count++; 2273 ZONE_UNLOCK(zone); 2274 2275 /* 2276 * Now lets just fill a bucket and put it on the free list. If that 2277 * works we'll restart the allocation from the begining and it 2278 * will use the just filled bucket. 2279 */ 2280 bucket = zone_alloc_bucket(zone, udata, flags); 2281 if (bucket != NULL) { 2282 ZONE_LOCK(zone); 2283 critical_enter(); 2284 cpu = curcpu; 2285 cache = &zone->uz_cpu[cpu]; 2286 /* 2287 * See if we lost the race or were migrated. Cache the 2288 * initialized bucket to make this less likely or claim 2289 * the memory directly. 2290 */ 2291 if (cache->uc_allocbucket == NULL) 2292 cache->uc_allocbucket = bucket; 2293 else 2294 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2295 ZONE_UNLOCK(zone); 2296 goto zalloc_start; 2297 } 2298 2299 /* 2300 * We may not be able to get a bucket so return an actual item. 2301 */ 2302 #ifdef UMA_DEBUG 2303 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2304 #endif 2305 2306 zalloc_item: 2307 item = zone_alloc_item(zone, udata, flags); 2308 2309 #if 0 2310 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2311 /* The entropy here is desirable, but the harvesting is expensive */ 2312 random_harvest(&item, sizeof(void *), 1, RANDOM_UMA_ALLOC); 2313 #endif 2314 return (item); 2315 } 2316 2317 static uma_slab_t 2318 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2319 { 2320 uma_slab_t slab; 2321 int reserve; 2322 2323 mtx_assert(&keg->uk_lock, MA_OWNED); 2324 slab = NULL; 2325 reserve = 0; 2326 if ((flags & M_USE_RESERVE) == 0) 2327 reserve = keg->uk_reserve; 2328 2329 for (;;) { 2330 /* 2331 * Find a slab with some space. Prefer slabs that are partially 2332 * used over those that are totally full. This helps to reduce 2333 * fragmentation. 2334 */ 2335 if (keg->uk_free > reserve) { 2336 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2337 slab = LIST_FIRST(&keg->uk_part_slab); 2338 } else { 2339 slab = LIST_FIRST(&keg->uk_free_slab); 2340 LIST_REMOVE(slab, us_link); 2341 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2342 us_link); 2343 } 2344 MPASS(slab->us_keg == keg); 2345 return (slab); 2346 } 2347 2348 /* 2349 * M_NOVM means don't ask at all! 2350 */ 2351 if (flags & M_NOVM) 2352 break; 2353 2354 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2355 keg->uk_flags |= UMA_ZFLAG_FULL; 2356 /* 2357 * If this is not a multi-zone, set the FULL bit. 2358 * Otherwise slab_multi() takes care of it. 2359 */ 2360 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2361 zone->uz_flags |= UMA_ZFLAG_FULL; 2362 zone_log_warning(zone); 2363 } 2364 if (flags & M_NOWAIT) 2365 break; 2366 zone->uz_sleeps++; 2367 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2368 continue; 2369 } 2370 slab = keg_alloc_slab(keg, zone, flags); 2371 /* 2372 * If we got a slab here it's safe to mark it partially used 2373 * and return. We assume that the caller is going to remove 2374 * at least one item. 2375 */ 2376 if (slab) { 2377 MPASS(slab->us_keg == keg); 2378 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2379 return (slab); 2380 } 2381 /* 2382 * We might not have been able to get a slab but another cpu 2383 * could have while we were unlocked. Check again before we 2384 * fail. 2385 */ 2386 flags |= M_NOVM; 2387 } 2388 return (slab); 2389 } 2390 2391 static uma_slab_t 2392 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2393 { 2394 uma_slab_t slab; 2395 2396 if (keg == NULL) { 2397 keg = zone_first_keg(zone); 2398 KEG_LOCK(keg); 2399 } 2400 2401 for (;;) { 2402 slab = keg_fetch_slab(keg, zone, flags); 2403 if (slab) 2404 return (slab); 2405 if (flags & (M_NOWAIT | M_NOVM)) 2406 break; 2407 } 2408 KEG_UNLOCK(keg); 2409 return (NULL); 2410 } 2411 2412 /* 2413 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2414 * with the keg locked. On NULL no lock is held. 2415 * 2416 * The last pointer is used to seed the search. It is not required. 2417 */ 2418 static uma_slab_t 2419 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2420 { 2421 uma_klink_t klink; 2422 uma_slab_t slab; 2423 uma_keg_t keg; 2424 int flags; 2425 int empty; 2426 int full; 2427 2428 /* 2429 * Don't wait on the first pass. This will skip limit tests 2430 * as well. We don't want to block if we can find a provider 2431 * without blocking. 2432 */ 2433 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2434 /* 2435 * Use the last slab allocated as a hint for where to start 2436 * the search. 2437 */ 2438 if (last != NULL) { 2439 slab = keg_fetch_slab(last, zone, flags); 2440 if (slab) 2441 return (slab); 2442 KEG_UNLOCK(last); 2443 } 2444 /* 2445 * Loop until we have a slab incase of transient failures 2446 * while M_WAITOK is specified. I'm not sure this is 100% 2447 * required but we've done it for so long now. 2448 */ 2449 for (;;) { 2450 empty = 0; 2451 full = 0; 2452 /* 2453 * Search the available kegs for slabs. Be careful to hold the 2454 * correct lock while calling into the keg layer. 2455 */ 2456 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2457 keg = klink->kl_keg; 2458 KEG_LOCK(keg); 2459 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2460 slab = keg_fetch_slab(keg, zone, flags); 2461 if (slab) 2462 return (slab); 2463 } 2464 if (keg->uk_flags & UMA_ZFLAG_FULL) 2465 full++; 2466 else 2467 empty++; 2468 KEG_UNLOCK(keg); 2469 } 2470 if (rflags & (M_NOWAIT | M_NOVM)) 2471 break; 2472 flags = rflags; 2473 /* 2474 * All kegs are full. XXX We can't atomically check all kegs 2475 * and sleep so just sleep for a short period and retry. 2476 */ 2477 if (full && !empty) { 2478 ZONE_LOCK(zone); 2479 zone->uz_flags |= UMA_ZFLAG_FULL; 2480 zone->uz_sleeps++; 2481 zone_log_warning(zone); 2482 msleep(zone, zone->uz_lockptr, PVM, 2483 "zonelimit", hz/100); 2484 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2485 ZONE_UNLOCK(zone); 2486 continue; 2487 } 2488 } 2489 return (NULL); 2490 } 2491 2492 static void * 2493 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2494 { 2495 void *item; 2496 uint8_t freei; 2497 2498 MPASS(keg == slab->us_keg); 2499 mtx_assert(&keg->uk_lock, MA_OWNED); 2500 2501 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2502 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2503 item = slab->us_data + (keg->uk_rsize * freei); 2504 slab->us_freecount--; 2505 keg->uk_free--; 2506 2507 /* Move this slab to the full list */ 2508 if (slab->us_freecount == 0) { 2509 LIST_REMOVE(slab, us_link); 2510 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2511 } 2512 2513 return (item); 2514 } 2515 2516 static int 2517 zone_import(uma_zone_t zone, void **bucket, int max, int flags) 2518 { 2519 uma_slab_t slab; 2520 uma_keg_t keg; 2521 int i; 2522 2523 slab = NULL; 2524 keg = NULL; 2525 /* Try to keep the buckets totally full */ 2526 for (i = 0; i < max; ) { 2527 if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 2528 break; 2529 keg = slab->us_keg; 2530 while (slab->us_freecount && i < max) { 2531 bucket[i++] = slab_alloc_item(keg, slab); 2532 if (keg->uk_free <= keg->uk_reserve) 2533 break; 2534 } 2535 /* Don't grab more than one slab at a time. */ 2536 flags &= ~M_WAITOK; 2537 flags |= M_NOWAIT; 2538 } 2539 if (slab != NULL) 2540 KEG_UNLOCK(keg); 2541 2542 return i; 2543 } 2544 2545 static uma_bucket_t 2546 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2547 { 2548 uma_bucket_t bucket; 2549 int max; 2550 2551 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2552 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2553 if (bucket == NULL) 2554 return (NULL); 2555 2556 max = MIN(bucket->ub_entries, zone->uz_count); 2557 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2558 max, flags); 2559 2560 /* 2561 * Initialize the memory if necessary. 2562 */ 2563 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2564 int i; 2565 2566 for (i = 0; i < bucket->ub_cnt; i++) 2567 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2568 flags) != 0) 2569 break; 2570 /* 2571 * If we couldn't initialize the whole bucket, put the 2572 * rest back onto the freelist. 2573 */ 2574 if (i != bucket->ub_cnt) { 2575 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2576 bucket->ub_cnt - i); 2577 #ifdef INVARIANTS 2578 bzero(&bucket->ub_bucket[i], 2579 sizeof(void *) * (bucket->ub_cnt - i)); 2580 #endif 2581 bucket->ub_cnt = i; 2582 } 2583 } 2584 2585 if (bucket->ub_cnt == 0) { 2586 bucket_free(zone, bucket, udata); 2587 atomic_add_long(&zone->uz_fails, 1); 2588 return (NULL); 2589 } 2590 2591 return (bucket); 2592 } 2593 2594 /* 2595 * Allocates a single item from a zone. 2596 * 2597 * Arguments 2598 * zone The zone to alloc for. 2599 * udata The data to be passed to the constructor. 2600 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2601 * 2602 * Returns 2603 * NULL if there is no memory and M_NOWAIT is set 2604 * An item if successful 2605 */ 2606 2607 static void * 2608 zone_alloc_item(uma_zone_t zone, void *udata, int flags) 2609 { 2610 void *item; 2611 2612 item = NULL; 2613 2614 #ifdef UMA_DEBUG_ALLOC 2615 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2616 #endif 2617 if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 2618 goto fail; 2619 atomic_add_long(&zone->uz_allocs, 1); 2620 2621 /* 2622 * We have to call both the zone's init (not the keg's init) 2623 * and the zone's ctor. This is because the item is going from 2624 * a keg slab directly to the user, and the user is expecting it 2625 * to be both zone-init'd as well as zone-ctor'd. 2626 */ 2627 if (zone->uz_init != NULL) { 2628 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2629 zone_free_item(zone, item, udata, SKIP_FINI); 2630 goto fail; 2631 } 2632 } 2633 if (zone->uz_ctor != NULL) { 2634 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2635 zone_free_item(zone, item, udata, SKIP_DTOR); 2636 goto fail; 2637 } 2638 } 2639 #ifdef INVARIANTS 2640 uma_dbg_alloc(zone, NULL, item); 2641 #endif 2642 if (flags & M_ZERO) 2643 uma_zero_item(item, zone); 2644 2645 return (item); 2646 2647 fail: 2648 atomic_add_long(&zone->uz_fails, 1); 2649 return (NULL); 2650 } 2651 2652 /* See uma.h */ 2653 void 2654 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2655 { 2656 uma_cache_t cache; 2657 uma_bucket_t bucket; 2658 int lockfail; 2659 int cpu; 2660 2661 #if 0 2662 /* XXX: FIX!! Do not enable this in CURRENT!! MarkM */ 2663 /* The entropy here is desirable, but the harvesting is expensive */ 2664 struct entropy { 2665 const void *uz_name; 2666 const void *item; 2667 } entropy; 2668 2669 entropy.uz_name = zone->uz_name; 2670 entropy.item = item; 2671 random_harvest(&entropy, sizeof(struct entropy), 2, RANDOM_UMA_ALLOC); 2672 #endif 2673 2674 #ifdef UMA_DEBUG_ALLOC_1 2675 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2676 #endif 2677 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2678 zone->uz_name); 2679 2680 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2681 if (item == NULL) 2682 return; 2683 #ifdef DEBUG_MEMGUARD 2684 if (is_memguard_addr(item)) { 2685 if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 2686 zone->uz_dtor(item, zone->uz_size, udata); 2687 if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 2688 zone->uz_fini(item, zone->uz_size); 2689 memguard_free(item); 2690 return; 2691 } 2692 #endif 2693 #ifdef INVARIANTS 2694 if (zone->uz_flags & UMA_ZONE_MALLOC) 2695 uma_dbg_free(zone, udata, item); 2696 else 2697 uma_dbg_free(zone, NULL, item); 2698 #endif 2699 if (zone->uz_dtor != NULL) 2700 zone->uz_dtor(item, zone->uz_size, udata); 2701 2702 /* 2703 * The race here is acceptable. If we miss it we'll just have to wait 2704 * a little longer for the limits to be reset. 2705 */ 2706 if (zone->uz_flags & UMA_ZFLAG_FULL) 2707 goto zfree_item; 2708 2709 /* 2710 * If possible, free to the per-CPU cache. There are two 2711 * requirements for safe access to the per-CPU cache: (1) the thread 2712 * accessing the cache must not be preempted or yield during access, 2713 * and (2) the thread must not migrate CPUs without switching which 2714 * cache it accesses. We rely on a critical section to prevent 2715 * preemption and migration. We release the critical section in 2716 * order to acquire the zone mutex if we are unable to free to the 2717 * current cache; when we re-acquire the critical section, we must 2718 * detect and handle migration if it has occurred. 2719 */ 2720 zfree_restart: 2721 critical_enter(); 2722 cpu = curcpu; 2723 cache = &zone->uz_cpu[cpu]; 2724 2725 zfree_start: 2726 /* 2727 * Try to free into the allocbucket first to give LIFO ordering 2728 * for cache-hot datastructures. Spill over into the freebucket 2729 * if necessary. Alloc will swap them if one runs dry. 2730 */ 2731 bucket = cache->uc_allocbucket; 2732 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2733 bucket = cache->uc_freebucket; 2734 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2735 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2736 ("uma_zfree: Freeing to non free bucket index.")); 2737 bucket->ub_bucket[bucket->ub_cnt] = item; 2738 bucket->ub_cnt++; 2739 cache->uc_frees++; 2740 critical_exit(); 2741 return; 2742 } 2743 2744 /* 2745 * We must go back the zone, which requires acquiring the zone lock, 2746 * which in turn means we must release and re-acquire the critical 2747 * section. Since the critical section is released, we may be 2748 * preempted or migrate. As such, make sure not to maintain any 2749 * thread-local state specific to the cache from prior to releasing 2750 * the critical section. 2751 */ 2752 critical_exit(); 2753 if (zone->uz_count == 0 || bucketdisable) 2754 goto zfree_item; 2755 2756 lockfail = 0; 2757 if (ZONE_TRYLOCK(zone) == 0) { 2758 /* Record contention to size the buckets. */ 2759 ZONE_LOCK(zone); 2760 lockfail = 1; 2761 } 2762 critical_enter(); 2763 cpu = curcpu; 2764 cache = &zone->uz_cpu[cpu]; 2765 2766 /* 2767 * Since we have locked the zone we may as well send back our stats. 2768 */ 2769 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2770 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2771 cache->uc_allocs = 0; 2772 cache->uc_frees = 0; 2773 2774 bucket = cache->uc_freebucket; 2775 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2776 ZONE_UNLOCK(zone); 2777 goto zfree_start; 2778 } 2779 cache->uc_freebucket = NULL; 2780 2781 /* Can we throw this on the zone full list? */ 2782 if (bucket != NULL) { 2783 #ifdef UMA_DEBUG_ALLOC 2784 printf("uma_zfree: Putting old bucket on the free list.\n"); 2785 #endif 2786 /* ub_cnt is pointing to the last free item */ 2787 KASSERT(bucket->ub_cnt != 0, 2788 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2789 LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2790 } 2791 2792 /* We are no longer associated with this CPU. */ 2793 critical_exit(); 2794 2795 /* 2796 * We bump the uz count when the cache size is insufficient to 2797 * handle the working set. 2798 */ 2799 if (lockfail && zone->uz_count < BUCKET_MAX) 2800 zone->uz_count++; 2801 ZONE_UNLOCK(zone); 2802 2803 #ifdef UMA_DEBUG_ALLOC 2804 printf("uma_zfree: Allocating new free bucket.\n"); 2805 #endif 2806 bucket = bucket_alloc(zone, udata, M_NOWAIT); 2807 if (bucket) { 2808 critical_enter(); 2809 cpu = curcpu; 2810 cache = &zone->uz_cpu[cpu]; 2811 if (cache->uc_freebucket == NULL) { 2812 cache->uc_freebucket = bucket; 2813 goto zfree_start; 2814 } 2815 /* 2816 * We lost the race, start over. We have to drop our 2817 * critical section to free the bucket. 2818 */ 2819 critical_exit(); 2820 bucket_free(zone, bucket, udata); 2821 goto zfree_restart; 2822 } 2823 2824 /* 2825 * If nothing else caught this, we'll just do an internal free. 2826 */ 2827 zfree_item: 2828 zone_free_item(zone, item, udata, SKIP_DTOR); 2829 2830 return; 2831 } 2832 2833 static void 2834 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 2835 { 2836 uint8_t freei; 2837 2838 mtx_assert(&keg->uk_lock, MA_OWNED); 2839 MPASS(keg == slab->us_keg); 2840 2841 /* Do we need to remove from any lists? */ 2842 if (slab->us_freecount+1 == keg->uk_ipers) { 2843 LIST_REMOVE(slab, us_link); 2844 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2845 } else if (slab->us_freecount == 0) { 2846 LIST_REMOVE(slab, us_link); 2847 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2848 } 2849 2850 /* Slab management. */ 2851 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2852 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 2853 slab->us_freecount++; 2854 2855 /* Keg statistics. */ 2856 keg->uk_free++; 2857 } 2858 2859 static void 2860 zone_release(uma_zone_t zone, void **bucket, int cnt) 2861 { 2862 void *item; 2863 uma_slab_t slab; 2864 uma_keg_t keg; 2865 uint8_t *mem; 2866 int clearfull; 2867 int i; 2868 2869 clearfull = 0; 2870 keg = zone_first_keg(zone); 2871 KEG_LOCK(keg); 2872 for (i = 0; i < cnt; i++) { 2873 item = bucket[i]; 2874 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2875 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 2876 if (zone->uz_flags & UMA_ZONE_HASH) { 2877 slab = hash_sfind(&keg->uk_hash, mem); 2878 } else { 2879 mem += keg->uk_pgoff; 2880 slab = (uma_slab_t)mem; 2881 } 2882 } else { 2883 slab = vtoslab((vm_offset_t)item); 2884 if (slab->us_keg != keg) { 2885 KEG_UNLOCK(keg); 2886 keg = slab->us_keg; 2887 KEG_LOCK(keg); 2888 } 2889 } 2890 slab_free_item(keg, slab, item); 2891 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2892 if (keg->uk_pages < keg->uk_maxpages) { 2893 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2894 clearfull = 1; 2895 } 2896 2897 /* 2898 * We can handle one more allocation. Since we're 2899 * clearing ZFLAG_FULL, wake up all procs blocked 2900 * on pages. This should be uncommon, so keeping this 2901 * simple for now (rather than adding count of blocked 2902 * threads etc). 2903 */ 2904 wakeup(keg); 2905 } 2906 } 2907 KEG_UNLOCK(keg); 2908 if (clearfull) { 2909 ZONE_LOCK(zone); 2910 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2911 wakeup(zone); 2912 ZONE_UNLOCK(zone); 2913 } 2914 2915 } 2916 2917 /* 2918 * Frees a single item to any zone. 2919 * 2920 * Arguments: 2921 * zone The zone to free to 2922 * item The item we're freeing 2923 * udata User supplied data for the dtor 2924 * skip Skip dtors and finis 2925 */ 2926 static void 2927 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 2928 { 2929 2930 #ifdef INVARIANTS 2931 if (skip == SKIP_NONE) { 2932 if (zone->uz_flags & UMA_ZONE_MALLOC) 2933 uma_dbg_free(zone, udata, item); 2934 else 2935 uma_dbg_free(zone, NULL, item); 2936 } 2937 #endif 2938 if (skip < SKIP_DTOR && zone->uz_dtor) 2939 zone->uz_dtor(item, zone->uz_size, udata); 2940 2941 if (skip < SKIP_FINI && zone->uz_fini) 2942 zone->uz_fini(item, zone->uz_size); 2943 2944 atomic_add_long(&zone->uz_frees, 1); 2945 zone->uz_release(zone->uz_arg, &item, 1); 2946 } 2947 2948 /* See uma.h */ 2949 int 2950 uma_zone_set_max(uma_zone_t zone, int nitems) 2951 { 2952 uma_keg_t keg; 2953 2954 keg = zone_first_keg(zone); 2955 if (keg == NULL) 2956 return (0); 2957 KEG_LOCK(keg); 2958 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2959 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2960 keg->uk_maxpages += keg->uk_ppera; 2961 nitems = keg->uk_maxpages * keg->uk_ipers; 2962 KEG_UNLOCK(keg); 2963 2964 return (nitems); 2965 } 2966 2967 /* See uma.h */ 2968 int 2969 uma_zone_get_max(uma_zone_t zone) 2970 { 2971 int nitems; 2972 uma_keg_t keg; 2973 2974 keg = zone_first_keg(zone); 2975 if (keg == NULL) 2976 return (0); 2977 KEG_LOCK(keg); 2978 nitems = keg->uk_maxpages * keg->uk_ipers; 2979 KEG_UNLOCK(keg); 2980 2981 return (nitems); 2982 } 2983 2984 /* See uma.h */ 2985 void 2986 uma_zone_set_warning(uma_zone_t zone, const char *warning) 2987 { 2988 2989 ZONE_LOCK(zone); 2990 zone->uz_warning = warning; 2991 ZONE_UNLOCK(zone); 2992 } 2993 2994 /* See uma.h */ 2995 int 2996 uma_zone_get_cur(uma_zone_t zone) 2997 { 2998 int64_t nitems; 2999 u_int i; 3000 3001 ZONE_LOCK(zone); 3002 nitems = zone->uz_allocs - zone->uz_frees; 3003 CPU_FOREACH(i) { 3004 /* 3005 * See the comment in sysctl_vm_zone_stats() regarding the 3006 * safety of accessing the per-cpu caches. With the zone lock 3007 * held, it is safe, but can potentially result in stale data. 3008 */ 3009 nitems += zone->uz_cpu[i].uc_allocs - 3010 zone->uz_cpu[i].uc_frees; 3011 } 3012 ZONE_UNLOCK(zone); 3013 3014 return (nitems < 0 ? 0 : nitems); 3015 } 3016 3017 /* See uma.h */ 3018 void 3019 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 3020 { 3021 uma_keg_t keg; 3022 3023 keg = zone_first_keg(zone); 3024 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3025 KEG_LOCK(keg); 3026 KASSERT(keg->uk_pages == 0, 3027 ("uma_zone_set_init on non-empty keg")); 3028 keg->uk_init = uminit; 3029 KEG_UNLOCK(keg); 3030 } 3031 3032 /* See uma.h */ 3033 void 3034 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 3035 { 3036 uma_keg_t keg; 3037 3038 keg = zone_first_keg(zone); 3039 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3040 KEG_LOCK(keg); 3041 KASSERT(keg->uk_pages == 0, 3042 ("uma_zone_set_fini on non-empty keg")); 3043 keg->uk_fini = fini; 3044 KEG_UNLOCK(keg); 3045 } 3046 3047 /* See uma.h */ 3048 void 3049 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 3050 { 3051 3052 ZONE_LOCK(zone); 3053 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3054 ("uma_zone_set_zinit on non-empty keg")); 3055 zone->uz_init = zinit; 3056 ZONE_UNLOCK(zone); 3057 } 3058 3059 /* See uma.h */ 3060 void 3061 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3062 { 3063 3064 ZONE_LOCK(zone); 3065 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3066 ("uma_zone_set_zfini on non-empty keg")); 3067 zone->uz_fini = zfini; 3068 ZONE_UNLOCK(zone); 3069 } 3070 3071 /* See uma.h */ 3072 /* XXX uk_freef is not actually used with the zone locked */ 3073 void 3074 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 3075 { 3076 uma_keg_t keg; 3077 3078 keg = zone_first_keg(zone); 3079 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3080 KEG_LOCK(keg); 3081 keg->uk_freef = freef; 3082 KEG_UNLOCK(keg); 3083 } 3084 3085 /* See uma.h */ 3086 /* XXX uk_allocf is not actually used with the zone locked */ 3087 void 3088 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 3089 { 3090 uma_keg_t keg; 3091 3092 keg = zone_first_keg(zone); 3093 KEG_LOCK(keg); 3094 keg->uk_allocf = allocf; 3095 KEG_UNLOCK(keg); 3096 } 3097 3098 /* See uma.h */ 3099 void 3100 uma_zone_reserve(uma_zone_t zone, int items) 3101 { 3102 uma_keg_t keg; 3103 3104 keg = zone_first_keg(zone); 3105 if (keg == NULL) 3106 return; 3107 KEG_LOCK(keg); 3108 keg->uk_reserve = items; 3109 KEG_UNLOCK(keg); 3110 3111 return; 3112 } 3113 3114 /* See uma.h */ 3115 int 3116 uma_zone_reserve_kva(uma_zone_t zone, int count) 3117 { 3118 uma_keg_t keg; 3119 vm_offset_t kva; 3120 int pages; 3121 3122 keg = zone_first_keg(zone); 3123 if (keg == NULL) 3124 return (0); 3125 pages = count / keg->uk_ipers; 3126 3127 if (pages * keg->uk_ipers < count) 3128 pages++; 3129 3130 #ifdef UMA_MD_SMALL_ALLOC 3131 if (keg->uk_ppera > 1) { 3132 #else 3133 if (1) { 3134 #endif 3135 kva = kva_alloc(pages * UMA_SLAB_SIZE); 3136 if (kva == 0) 3137 return (0); 3138 } else 3139 kva = 0; 3140 KEG_LOCK(keg); 3141 keg->uk_kva = kva; 3142 keg->uk_offset = 0; 3143 keg->uk_maxpages = pages; 3144 #ifdef UMA_MD_SMALL_ALLOC 3145 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3146 #else 3147 keg->uk_allocf = noobj_alloc; 3148 #endif 3149 keg->uk_flags |= UMA_ZONE_NOFREE; 3150 KEG_UNLOCK(keg); 3151 3152 return (1); 3153 } 3154 3155 /* See uma.h */ 3156 void 3157 uma_prealloc(uma_zone_t zone, int items) 3158 { 3159 int slabs; 3160 uma_slab_t slab; 3161 uma_keg_t keg; 3162 3163 keg = zone_first_keg(zone); 3164 if (keg == NULL) 3165 return; 3166 KEG_LOCK(keg); 3167 slabs = items / keg->uk_ipers; 3168 if (slabs * keg->uk_ipers < items) 3169 slabs++; 3170 while (slabs > 0) { 3171 slab = keg_alloc_slab(keg, zone, M_WAITOK); 3172 if (slab == NULL) 3173 break; 3174 MPASS(slab->us_keg == keg); 3175 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 3176 slabs--; 3177 } 3178 KEG_UNLOCK(keg); 3179 } 3180 3181 /* See uma.h */ 3182 uint32_t * 3183 uma_find_refcnt(uma_zone_t zone, void *item) 3184 { 3185 uma_slabrefcnt_t slabref; 3186 uma_slab_t slab; 3187 uma_keg_t keg; 3188 uint32_t *refcnt; 3189 int idx; 3190 3191 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 3192 slabref = (uma_slabrefcnt_t)slab; 3193 keg = slab->us_keg; 3194 KASSERT(keg->uk_flags & UMA_ZONE_REFCNT, 3195 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 3196 idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3197 refcnt = &slabref->us_refcnt[idx]; 3198 return refcnt; 3199 } 3200 3201 /* See uma.h */ 3202 void 3203 uma_reclaim(void) 3204 { 3205 #ifdef UMA_DEBUG 3206 printf("UMA: vm asked us to release pages!\n"); 3207 #endif 3208 bucket_enable(); 3209 zone_foreach(zone_drain); 3210 if (vm_page_count_min()) { 3211 cache_drain_safe(NULL); 3212 zone_foreach(zone_drain); 3213 } 3214 /* 3215 * Some slabs may have been freed but this zone will be visited early 3216 * we visit again so that we can free pages that are empty once other 3217 * zones are drained. We have to do the same for buckets. 3218 */ 3219 zone_drain(slabzone); 3220 zone_drain(slabrefzone); 3221 bucket_zone_drain(); 3222 } 3223 3224 /* See uma.h */ 3225 int 3226 uma_zone_exhausted(uma_zone_t zone) 3227 { 3228 int full; 3229 3230 ZONE_LOCK(zone); 3231 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3232 ZONE_UNLOCK(zone); 3233 return (full); 3234 } 3235 3236 int 3237 uma_zone_exhausted_nolock(uma_zone_t zone) 3238 { 3239 return (zone->uz_flags & UMA_ZFLAG_FULL); 3240 } 3241 3242 void * 3243 uma_large_malloc(int size, int wait) 3244 { 3245 void *mem; 3246 uma_slab_t slab; 3247 uint8_t flags; 3248 3249 slab = zone_alloc_item(slabzone, NULL, wait); 3250 if (slab == NULL) 3251 return (NULL); 3252 mem = page_alloc(NULL, size, &flags, wait); 3253 if (mem) { 3254 vsetslab((vm_offset_t)mem, slab); 3255 slab->us_data = mem; 3256 slab->us_flags = flags | UMA_SLAB_MALLOC; 3257 slab->us_size = size; 3258 } else { 3259 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3260 } 3261 3262 return (mem); 3263 } 3264 3265 void 3266 uma_large_free(uma_slab_t slab) 3267 { 3268 3269 page_free(slab->us_data, slab->us_size, slab->us_flags); 3270 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3271 } 3272 3273 static void 3274 uma_zero_item(void *item, uma_zone_t zone) 3275 { 3276 3277 if (zone->uz_flags & UMA_ZONE_PCPU) { 3278 for (int i = 0; i < mp_ncpus; i++) 3279 bzero(zpcpu_get_cpu(item, i), zone->uz_size); 3280 } else 3281 bzero(item, zone->uz_size); 3282 } 3283 3284 void 3285 uma_print_stats(void) 3286 { 3287 zone_foreach(uma_print_zone); 3288 } 3289 3290 static void 3291 slab_print(uma_slab_t slab) 3292 { 3293 printf("slab: keg %p, data %p, freecount %d\n", 3294 slab->us_keg, slab->us_data, slab->us_freecount); 3295 } 3296 3297 static void 3298 cache_print(uma_cache_t cache) 3299 { 3300 printf("alloc: %p(%d), free: %p(%d)\n", 3301 cache->uc_allocbucket, 3302 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3303 cache->uc_freebucket, 3304 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3305 } 3306 3307 static void 3308 uma_print_keg(uma_keg_t keg) 3309 { 3310 uma_slab_t slab; 3311 3312 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3313 "out %d free %d limit %d\n", 3314 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3315 keg->uk_ipers, keg->uk_ppera, 3316 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3317 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3318 printf("Part slabs:\n"); 3319 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3320 slab_print(slab); 3321 printf("Free slabs:\n"); 3322 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3323 slab_print(slab); 3324 printf("Full slabs:\n"); 3325 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3326 slab_print(slab); 3327 } 3328 3329 void 3330 uma_print_zone(uma_zone_t zone) 3331 { 3332 uma_cache_t cache; 3333 uma_klink_t kl; 3334 int i; 3335 3336 printf("zone: %s(%p) size %d flags %#x\n", 3337 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3338 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3339 uma_print_keg(kl->kl_keg); 3340 CPU_FOREACH(i) { 3341 cache = &zone->uz_cpu[i]; 3342 printf("CPU %d Cache:\n", i); 3343 cache_print(cache); 3344 } 3345 } 3346 3347 #ifdef DDB 3348 /* 3349 * Generate statistics across both the zone and its per-cpu cache's. Return 3350 * desired statistics if the pointer is non-NULL for that statistic. 3351 * 3352 * Note: does not update the zone statistics, as it can't safely clear the 3353 * per-CPU cache statistic. 3354 * 3355 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3356 * safe from off-CPU; we should modify the caches to track this information 3357 * directly so that we don't have to. 3358 */ 3359 static void 3360 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3361 uint64_t *freesp, uint64_t *sleepsp) 3362 { 3363 uma_cache_t cache; 3364 uint64_t allocs, frees, sleeps; 3365 int cachefree, cpu; 3366 3367 allocs = frees = sleeps = 0; 3368 cachefree = 0; 3369 CPU_FOREACH(cpu) { 3370 cache = &z->uz_cpu[cpu]; 3371 if (cache->uc_allocbucket != NULL) 3372 cachefree += cache->uc_allocbucket->ub_cnt; 3373 if (cache->uc_freebucket != NULL) 3374 cachefree += cache->uc_freebucket->ub_cnt; 3375 allocs += cache->uc_allocs; 3376 frees += cache->uc_frees; 3377 } 3378 allocs += z->uz_allocs; 3379 frees += z->uz_frees; 3380 sleeps += z->uz_sleeps; 3381 if (cachefreep != NULL) 3382 *cachefreep = cachefree; 3383 if (allocsp != NULL) 3384 *allocsp = allocs; 3385 if (freesp != NULL) 3386 *freesp = frees; 3387 if (sleepsp != NULL) 3388 *sleepsp = sleeps; 3389 } 3390 #endif /* DDB */ 3391 3392 static int 3393 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3394 { 3395 uma_keg_t kz; 3396 uma_zone_t z; 3397 int count; 3398 3399 count = 0; 3400 rw_rlock(&uma_rwlock); 3401 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3402 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3403 count++; 3404 } 3405 rw_runlock(&uma_rwlock); 3406 return (sysctl_handle_int(oidp, &count, 0, req)); 3407 } 3408 3409 static int 3410 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3411 { 3412 struct uma_stream_header ush; 3413 struct uma_type_header uth; 3414 struct uma_percpu_stat ups; 3415 uma_bucket_t bucket; 3416 struct sbuf sbuf; 3417 uma_cache_t cache; 3418 uma_klink_t kl; 3419 uma_keg_t kz; 3420 uma_zone_t z; 3421 uma_keg_t k; 3422 int count, error, i; 3423 3424 error = sysctl_wire_old_buffer(req, 0); 3425 if (error != 0) 3426 return (error); 3427 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3428 3429 count = 0; 3430 rw_rlock(&uma_rwlock); 3431 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3432 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3433 count++; 3434 } 3435 3436 /* 3437 * Insert stream header. 3438 */ 3439 bzero(&ush, sizeof(ush)); 3440 ush.ush_version = UMA_STREAM_VERSION; 3441 ush.ush_maxcpus = (mp_maxid + 1); 3442 ush.ush_count = count; 3443 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3444 3445 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3446 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3447 bzero(&uth, sizeof(uth)); 3448 ZONE_LOCK(z); 3449 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3450 uth.uth_align = kz->uk_align; 3451 uth.uth_size = kz->uk_size; 3452 uth.uth_rsize = kz->uk_rsize; 3453 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3454 k = kl->kl_keg; 3455 uth.uth_maxpages += k->uk_maxpages; 3456 uth.uth_pages += k->uk_pages; 3457 uth.uth_keg_free += k->uk_free; 3458 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3459 * k->uk_ipers; 3460 } 3461 3462 /* 3463 * A zone is secondary is it is not the first entry 3464 * on the keg's zone list. 3465 */ 3466 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3467 (LIST_FIRST(&kz->uk_zones) != z)) 3468 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3469 3470 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3471 uth.uth_zone_free += bucket->ub_cnt; 3472 uth.uth_allocs = z->uz_allocs; 3473 uth.uth_frees = z->uz_frees; 3474 uth.uth_fails = z->uz_fails; 3475 uth.uth_sleeps = z->uz_sleeps; 3476 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3477 /* 3478 * While it is not normally safe to access the cache 3479 * bucket pointers while not on the CPU that owns the 3480 * cache, we only allow the pointers to be exchanged 3481 * without the zone lock held, not invalidated, so 3482 * accept the possible race associated with bucket 3483 * exchange during monitoring. 3484 */ 3485 for (i = 0; i < (mp_maxid + 1); i++) { 3486 bzero(&ups, sizeof(ups)); 3487 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3488 goto skip; 3489 if (CPU_ABSENT(i)) 3490 goto skip; 3491 cache = &z->uz_cpu[i]; 3492 if (cache->uc_allocbucket != NULL) 3493 ups.ups_cache_free += 3494 cache->uc_allocbucket->ub_cnt; 3495 if (cache->uc_freebucket != NULL) 3496 ups.ups_cache_free += 3497 cache->uc_freebucket->ub_cnt; 3498 ups.ups_allocs = cache->uc_allocs; 3499 ups.ups_frees = cache->uc_frees; 3500 skip: 3501 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3502 } 3503 ZONE_UNLOCK(z); 3504 } 3505 } 3506 rw_runlock(&uma_rwlock); 3507 error = sbuf_finish(&sbuf); 3508 sbuf_delete(&sbuf); 3509 return (error); 3510 } 3511 3512 int 3513 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 3514 { 3515 uma_zone_t zone = *(uma_zone_t *)arg1; 3516 int error, max, old; 3517 3518 old = max = uma_zone_get_max(zone); 3519 error = sysctl_handle_int(oidp, &max, 0, req); 3520 if (error || !req->newptr) 3521 return (error); 3522 3523 if (max < old) 3524 return (EINVAL); 3525 3526 uma_zone_set_max(zone, max); 3527 3528 return (0); 3529 } 3530 3531 int 3532 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 3533 { 3534 uma_zone_t zone = *(uma_zone_t *)arg1; 3535 int cur; 3536 3537 cur = uma_zone_get_cur(zone); 3538 return (sysctl_handle_int(oidp, &cur, 0, req)); 3539 } 3540 3541 #ifdef DDB 3542 DB_SHOW_COMMAND(uma, db_show_uma) 3543 { 3544 uint64_t allocs, frees, sleeps; 3545 uma_bucket_t bucket; 3546 uma_keg_t kz; 3547 uma_zone_t z; 3548 int cachefree; 3549 3550 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 3551 "Free", "Requests", "Sleeps", "Bucket"); 3552 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3553 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3554 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3555 allocs = z->uz_allocs; 3556 frees = z->uz_frees; 3557 sleeps = z->uz_sleeps; 3558 cachefree = 0; 3559 } else 3560 uma_zone_sumstat(z, &cachefree, &allocs, 3561 &frees, &sleeps); 3562 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3563 (LIST_FIRST(&kz->uk_zones) != z))) 3564 cachefree += kz->uk_free; 3565 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3566 cachefree += bucket->ub_cnt; 3567 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 3568 z->uz_name, (uintmax_t)kz->uk_size, 3569 (intmax_t)(allocs - frees), cachefree, 3570 (uintmax_t)allocs, sleeps, z->uz_count); 3571 if (db_pager_quit) 3572 return; 3573 } 3574 } 3575 } 3576 3577 DB_SHOW_COMMAND(umacache, db_show_umacache) 3578 { 3579 uint64_t allocs, frees; 3580 uma_bucket_t bucket; 3581 uma_zone_t z; 3582 int cachefree; 3583 3584 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3585 "Requests", "Bucket"); 3586 LIST_FOREACH(z, &uma_cachezones, uz_link) { 3587 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 3588 LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 3589 cachefree += bucket->ub_cnt; 3590 db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 3591 z->uz_name, (uintmax_t)z->uz_size, 3592 (intmax_t)(allocs - frees), cachefree, 3593 (uintmax_t)allocs, z->uz_count); 3594 if (db_pager_quit) 3595 return; 3596 } 3597 } 3598 #endif 3599