1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6 * Copyright (c) 2004-2006 Robert N. M. Watson 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 /* 32 * uma_core.c Implementation of the Universal Memory allocator 33 * 34 * This allocator is intended to replace the multitude of similar object caches 35 * in the standard FreeBSD kernel. The intent is to be flexible as well as 36 * efficient. A primary design goal is to return unused memory to the rest of 37 * the system. This will make the system as a whole more flexible due to the 38 * ability to move memory to subsystems which most need it instead of leaving 39 * pools of reserved memory unused. 40 * 41 * The basic ideas stem from similar slab/zone based allocators whose algorithms 42 * are well known. 43 * 44 */ 45 46 /* 47 * TODO: 48 * - Improve memory usage for large allocations 49 * - Investigate cache size adjustments 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 #include "opt_ddb.h" 56 #include "opt_param.h" 57 #include "opt_vm.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/bitset.h> 62 #include <sys/eventhandler.h> 63 #include <sys/kernel.h> 64 #include <sys/types.h> 65 #include <sys/limits.h> 66 #include <sys/queue.h> 67 #include <sys/malloc.h> 68 #include <sys/ktr.h> 69 #include <sys/lock.h> 70 #include <sys/sysctl.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/random.h> 74 #include <sys/rwlock.h> 75 #include <sys/sbuf.h> 76 #include <sys/sched.h> 77 #include <sys/smp.h> 78 #include <sys/taskqueue.h> 79 #include <sys/vmmeter.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_param.h> 86 #include <vm/vm_phys.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 #include <vm/uma.h> 91 #include <vm/uma_int.h> 92 #include <vm/uma_dbg.h> 93 94 #include <ddb/ddb.h> 95 96 #ifdef DEBUG_MEMGUARD 97 #include <vm/memguard.h> 98 #endif 99 100 /* 101 * This is the zone and keg from which all zones are spawned. 102 */ 103 static uma_zone_t kegs; 104 static uma_zone_t zones; 105 106 /* This is the zone from which all offpage uma_slab_ts are allocated. */ 107 static uma_zone_t slabzone; 108 109 /* 110 * The initial hash tables come out of this zone so they can be allocated 111 * prior to malloc coming up. 112 */ 113 static uma_zone_t hashzone; 114 115 /* The boot-time adjusted value for cache line alignment. */ 116 int uma_align_cache = 64 - 1; 117 118 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 119 120 /* 121 * Are we allowed to allocate buckets? 122 */ 123 static int bucketdisable = 1; 124 125 /* Linked list of all kegs in the system */ 126 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 127 128 /* Linked list of all cache-only zones in the system */ 129 static LIST_HEAD(,uma_zone) uma_cachezones = 130 LIST_HEAD_INITIALIZER(uma_cachezones); 131 132 /* This RW lock protects the keg list */ 133 static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 134 135 /* 136 * Pointer and counter to pool of pages, that is preallocated at 137 * startup to bootstrap UMA. Early zones continue to use the pool 138 * until it is depleted, so allocations may happen after boot, thus 139 * we need a mutex to protect it. 140 */ 141 static char *bootmem; 142 static int boot_pages; 143 static struct mtx uma_boot_pages_mtx; 144 145 static struct sx uma_drain_lock; 146 147 /* kmem soft limit. */ 148 static unsigned long uma_kmem_limit = LONG_MAX; 149 static volatile unsigned long uma_kmem_total; 150 151 /* Is the VM done starting up? */ 152 static int booted = 0; 153 #define UMA_STARTUP 1 154 #define UMA_STARTUP2 2 155 156 /* 157 * This is the handle used to schedule events that need to happen 158 * outside of the allocation fast path. 159 */ 160 static struct callout uma_callout; 161 #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 162 163 /* 164 * This structure is passed as the zone ctor arg so that I don't have to create 165 * a special allocation function just for zones. 166 */ 167 struct uma_zctor_args { 168 const char *name; 169 size_t size; 170 uma_ctor ctor; 171 uma_dtor dtor; 172 uma_init uminit; 173 uma_fini fini; 174 uma_import import; 175 uma_release release; 176 void *arg; 177 uma_keg_t keg; 178 int align; 179 uint32_t flags; 180 }; 181 182 struct uma_kctor_args { 183 uma_zone_t zone; 184 size_t size; 185 uma_init uminit; 186 uma_fini fini; 187 int align; 188 uint32_t flags; 189 }; 190 191 struct uma_bucket_zone { 192 uma_zone_t ubz_zone; 193 char *ubz_name; 194 int ubz_entries; /* Number of items it can hold. */ 195 int ubz_maxsize; /* Maximum allocation size per-item. */ 196 }; 197 198 /* 199 * Compute the actual number of bucket entries to pack them in power 200 * of two sizes for more efficient space utilization. 201 */ 202 #define BUCKET_SIZE(n) \ 203 (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 204 205 #define BUCKET_MAX BUCKET_SIZE(256) 206 207 struct uma_bucket_zone bucket_zones[] = { 208 { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 209 { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 210 { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 211 { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 212 { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 213 { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 214 { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 215 { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 216 { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 217 { NULL, NULL, 0} 218 }; 219 220 /* 221 * Flags and enumerations to be passed to internal functions. 222 */ 223 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 224 225 #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ 226 227 /* Prototypes.. */ 228 229 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 230 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 231 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); 232 static void page_free(void *, vm_size_t, uint8_t); 233 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int); 234 static void cache_drain(uma_zone_t); 235 static void bucket_drain(uma_zone_t, uma_bucket_t); 236 static void bucket_cache_drain(uma_zone_t zone); 237 static int keg_ctor(void *, int, void *, int); 238 static void keg_dtor(void *, int, void *); 239 static int zone_ctor(void *, int, void *, int); 240 static void zone_dtor(void *, int, void *); 241 static int zero_init(void *, int, int); 242 static void keg_small_init(uma_keg_t keg); 243 static void keg_large_init(uma_keg_t keg); 244 static void zone_foreach(void (*zfunc)(uma_zone_t)); 245 static void zone_timeout(uma_zone_t zone); 246 static int hash_alloc(struct uma_hash *); 247 static int hash_expand(struct uma_hash *, struct uma_hash *); 248 static void hash_free(struct uma_hash *hash); 249 static void uma_timeout(void *); 250 static void uma_startup3(void); 251 static void *zone_alloc_item(uma_zone_t, void *, int, int); 252 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 253 static void bucket_enable(void); 254 static void bucket_init(void); 255 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 256 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 257 static void bucket_zone_drain(void); 258 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); 259 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); 260 static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); 261 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 262 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 263 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 264 uma_fini fini, int align, uint32_t flags); 265 static int zone_import(uma_zone_t, void **, int, int, int); 266 static void zone_release(uma_zone_t, void **, int); 267 static void uma_zero_item(void *, uma_zone_t); 268 269 void uma_print_zone(uma_zone_t); 270 void uma_print_stats(void); 271 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 272 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 273 274 #ifdef INVARIANTS 275 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 276 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 277 #endif 278 279 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 280 281 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 282 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 283 284 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 285 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 286 287 static int zone_warnings = 1; 288 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 289 "Warn when UMA zones becomes full"); 290 291 /* Adjust bytes under management by UMA. */ 292 static inline void 293 uma_total_dec(unsigned long size) 294 { 295 296 atomic_subtract_long(&uma_kmem_total, size); 297 } 298 299 static inline void 300 uma_total_inc(unsigned long size) 301 { 302 303 if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) 304 uma_reclaim_wakeup(); 305 } 306 307 /* 308 * This routine checks to see whether or not it's safe to enable buckets. 309 */ 310 static void 311 bucket_enable(void) 312 { 313 bucketdisable = vm_page_count_min(); 314 } 315 316 /* 317 * Initialize bucket_zones, the array of zones of buckets of various sizes. 318 * 319 * For each zone, calculate the memory required for each bucket, consisting 320 * of the header and an array of pointers. 321 */ 322 static void 323 bucket_init(void) 324 { 325 struct uma_bucket_zone *ubz; 326 int size; 327 328 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 329 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 330 size += sizeof(void *) * ubz->ubz_entries; 331 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 332 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 333 UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); 334 } 335 } 336 337 /* 338 * Given a desired number of entries for a bucket, return the zone from which 339 * to allocate the bucket. 340 */ 341 static struct uma_bucket_zone * 342 bucket_zone_lookup(int entries) 343 { 344 struct uma_bucket_zone *ubz; 345 346 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 347 if (ubz->ubz_entries >= entries) 348 return (ubz); 349 ubz--; 350 return (ubz); 351 } 352 353 static int 354 bucket_select(int size) 355 { 356 struct uma_bucket_zone *ubz; 357 358 ubz = &bucket_zones[0]; 359 if (size > ubz->ubz_maxsize) 360 return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 361 362 for (; ubz->ubz_entries != 0; ubz++) 363 if (ubz->ubz_maxsize < size) 364 break; 365 ubz--; 366 return (ubz->ubz_entries); 367 } 368 369 static uma_bucket_t 370 bucket_alloc(uma_zone_t zone, void *udata, int flags) 371 { 372 struct uma_bucket_zone *ubz; 373 uma_bucket_t bucket; 374 375 /* 376 * This is to stop us from allocating per cpu buckets while we're 377 * running out of vm.boot_pages. Otherwise, we would exhaust the 378 * boot pages. This also prevents us from allocating buckets in 379 * low memory situations. 380 */ 381 if (bucketdisable) 382 return (NULL); 383 /* 384 * To limit bucket recursion we store the original zone flags 385 * in a cookie passed via zalloc_arg/zfree_arg. This allows the 386 * NOVM flag to persist even through deep recursions. We also 387 * store ZFLAG_BUCKET once we have recursed attempting to allocate 388 * a bucket for a bucket zone so we do not allow infinite bucket 389 * recursion. This cookie will even persist to frees of unused 390 * buckets via the allocation path or bucket allocations in the 391 * free path. 392 */ 393 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 394 udata = (void *)(uintptr_t)zone->uz_flags; 395 else { 396 if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 397 return (NULL); 398 udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 399 } 400 if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 401 flags |= M_NOVM; 402 ubz = bucket_zone_lookup(zone->uz_count); 403 if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 404 ubz++; 405 bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 406 if (bucket) { 407 #ifdef INVARIANTS 408 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 409 #endif 410 bucket->ub_cnt = 0; 411 bucket->ub_entries = ubz->ubz_entries; 412 } 413 414 return (bucket); 415 } 416 417 static void 418 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 419 { 420 struct uma_bucket_zone *ubz; 421 422 KASSERT(bucket->ub_cnt == 0, 423 ("bucket_free: Freeing a non free bucket.")); 424 if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 425 udata = (void *)(uintptr_t)zone->uz_flags; 426 ubz = bucket_zone_lookup(bucket->ub_entries); 427 uma_zfree_arg(ubz->ubz_zone, bucket, udata); 428 } 429 430 static void 431 bucket_zone_drain(void) 432 { 433 struct uma_bucket_zone *ubz; 434 435 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 436 zone_drain(ubz->ubz_zone); 437 } 438 439 static void 440 zone_log_warning(uma_zone_t zone) 441 { 442 static const struct timeval warninterval = { 300, 0 }; 443 444 if (!zone_warnings || zone->uz_warning == NULL) 445 return; 446 447 if (ratecheck(&zone->uz_ratecheck, &warninterval)) 448 printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 449 } 450 451 static inline void 452 zone_maxaction(uma_zone_t zone) 453 { 454 455 if (zone->uz_maxaction.ta_func != NULL) 456 taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 457 } 458 459 static void 460 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 461 { 462 uma_klink_t klink; 463 464 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 465 kegfn(klink->kl_keg); 466 } 467 468 /* 469 * Routine called by timeout which is used to fire off some time interval 470 * based calculations. (stats, hash size, etc.) 471 * 472 * Arguments: 473 * arg Unused 474 * 475 * Returns: 476 * Nothing 477 */ 478 static void 479 uma_timeout(void *unused) 480 { 481 bucket_enable(); 482 zone_foreach(zone_timeout); 483 484 /* Reschedule this event */ 485 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 486 } 487 488 /* 489 * Routine to perform timeout driven calculations. This expands the 490 * hashes and does per cpu statistics aggregation. 491 * 492 * Returns nothing. 493 */ 494 static void 495 keg_timeout(uma_keg_t keg) 496 { 497 498 KEG_LOCK(keg); 499 /* 500 * Expand the keg hash table. 501 * 502 * This is done if the number of slabs is larger than the hash size. 503 * What I'm trying to do here is completely reduce collisions. This 504 * may be a little aggressive. Should I allow for two collisions max? 505 */ 506 if (keg->uk_flags & UMA_ZONE_HASH && 507 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 508 struct uma_hash newhash; 509 struct uma_hash oldhash; 510 int ret; 511 512 /* 513 * This is so involved because allocating and freeing 514 * while the keg lock is held will lead to deadlock. 515 * I have to do everything in stages and check for 516 * races. 517 */ 518 newhash = keg->uk_hash; 519 KEG_UNLOCK(keg); 520 ret = hash_alloc(&newhash); 521 KEG_LOCK(keg); 522 if (ret) { 523 if (hash_expand(&keg->uk_hash, &newhash)) { 524 oldhash = keg->uk_hash; 525 keg->uk_hash = newhash; 526 } else 527 oldhash = newhash; 528 529 KEG_UNLOCK(keg); 530 hash_free(&oldhash); 531 return; 532 } 533 } 534 KEG_UNLOCK(keg); 535 } 536 537 static void 538 zone_timeout(uma_zone_t zone) 539 { 540 541 zone_foreach_keg(zone, &keg_timeout); 542 } 543 544 /* 545 * Allocate and zero fill the next sized hash table from the appropriate 546 * backing store. 547 * 548 * Arguments: 549 * hash A new hash structure with the old hash size in uh_hashsize 550 * 551 * Returns: 552 * 1 on success and 0 on failure. 553 */ 554 static int 555 hash_alloc(struct uma_hash *hash) 556 { 557 int oldsize; 558 int alloc; 559 560 oldsize = hash->uh_hashsize; 561 562 /* We're just going to go to a power of two greater */ 563 if (oldsize) { 564 hash->uh_hashsize = oldsize * 2; 565 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 566 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 567 M_UMAHASH, M_NOWAIT); 568 } else { 569 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 570 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 571 UMA_ANYDOMAIN, M_WAITOK); 572 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 573 } 574 if (hash->uh_slab_hash) { 575 bzero(hash->uh_slab_hash, alloc); 576 hash->uh_hashmask = hash->uh_hashsize - 1; 577 return (1); 578 } 579 580 return (0); 581 } 582 583 /* 584 * Expands the hash table for HASH zones. This is done from zone_timeout 585 * to reduce collisions. This must not be done in the regular allocation 586 * path, otherwise, we can recurse on the vm while allocating pages. 587 * 588 * Arguments: 589 * oldhash The hash you want to expand 590 * newhash The hash structure for the new table 591 * 592 * Returns: 593 * Nothing 594 * 595 * Discussion: 596 */ 597 static int 598 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 599 { 600 uma_slab_t slab; 601 int hval; 602 int i; 603 604 if (!newhash->uh_slab_hash) 605 return (0); 606 607 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 608 return (0); 609 610 /* 611 * I need to investigate hash algorithms for resizing without a 612 * full rehash. 613 */ 614 615 for (i = 0; i < oldhash->uh_hashsize; i++) 616 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 617 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 618 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 619 hval = UMA_HASH(newhash, slab->us_data); 620 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 621 slab, us_hlink); 622 } 623 624 return (1); 625 } 626 627 /* 628 * Free the hash bucket to the appropriate backing store. 629 * 630 * Arguments: 631 * slab_hash The hash bucket we're freeing 632 * hashsize The number of entries in that hash bucket 633 * 634 * Returns: 635 * Nothing 636 */ 637 static void 638 hash_free(struct uma_hash *hash) 639 { 640 if (hash->uh_slab_hash == NULL) 641 return; 642 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 643 zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 644 else 645 free(hash->uh_slab_hash, M_UMAHASH); 646 } 647 648 /* 649 * Frees all outstanding items in a bucket 650 * 651 * Arguments: 652 * zone The zone to free to, must be unlocked. 653 * bucket The free/alloc bucket with items, cpu queue must be locked. 654 * 655 * Returns: 656 * Nothing 657 */ 658 659 static void 660 bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 661 { 662 int i; 663 664 if (bucket == NULL) 665 return; 666 667 if (zone->uz_fini) 668 for (i = 0; i < bucket->ub_cnt; i++) 669 zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 670 zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 671 bucket->ub_cnt = 0; 672 } 673 674 /* 675 * Drains the per cpu caches for a zone. 676 * 677 * NOTE: This may only be called while the zone is being turn down, and not 678 * during normal operation. This is necessary in order that we do not have 679 * to migrate CPUs to drain the per-CPU caches. 680 * 681 * Arguments: 682 * zone The zone to drain, must be unlocked. 683 * 684 * Returns: 685 * Nothing 686 */ 687 static void 688 cache_drain(uma_zone_t zone) 689 { 690 uma_cache_t cache; 691 int cpu; 692 693 /* 694 * XXX: It is safe to not lock the per-CPU caches, because we're 695 * tearing down the zone anyway. I.e., there will be no further use 696 * of the caches at this point. 697 * 698 * XXX: It would good to be able to assert that the zone is being 699 * torn down to prevent improper use of cache_drain(). 700 * 701 * XXX: We lock the zone before passing into bucket_cache_drain() as 702 * it is used elsewhere. Should the tear-down path be made special 703 * there in some form? 704 */ 705 CPU_FOREACH(cpu) { 706 cache = &zone->uz_cpu[cpu]; 707 bucket_drain(zone, cache->uc_allocbucket); 708 bucket_drain(zone, cache->uc_freebucket); 709 if (cache->uc_allocbucket != NULL) 710 bucket_free(zone, cache->uc_allocbucket, NULL); 711 if (cache->uc_freebucket != NULL) 712 bucket_free(zone, cache->uc_freebucket, NULL); 713 cache->uc_allocbucket = cache->uc_freebucket = NULL; 714 } 715 ZONE_LOCK(zone); 716 bucket_cache_drain(zone); 717 ZONE_UNLOCK(zone); 718 } 719 720 static void 721 cache_shrink(uma_zone_t zone) 722 { 723 724 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 725 return; 726 727 ZONE_LOCK(zone); 728 zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 729 ZONE_UNLOCK(zone); 730 } 731 732 static void 733 cache_drain_safe_cpu(uma_zone_t zone) 734 { 735 uma_cache_t cache; 736 uma_bucket_t b1, b2; 737 int domain; 738 739 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 740 return; 741 742 b1 = b2 = NULL; 743 ZONE_LOCK(zone); 744 critical_enter(); 745 if (zone->uz_flags & UMA_ZONE_NUMA) 746 domain = PCPU_GET(domain); 747 else 748 domain = 0; 749 cache = &zone->uz_cpu[curcpu]; 750 if (cache->uc_allocbucket) { 751 if (cache->uc_allocbucket->ub_cnt != 0) 752 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, 753 cache->uc_allocbucket, ub_link); 754 else 755 b1 = cache->uc_allocbucket; 756 cache->uc_allocbucket = NULL; 757 } 758 if (cache->uc_freebucket) { 759 if (cache->uc_freebucket->ub_cnt != 0) 760 LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets, 761 cache->uc_freebucket, ub_link); 762 else 763 b2 = cache->uc_freebucket; 764 cache->uc_freebucket = NULL; 765 } 766 critical_exit(); 767 ZONE_UNLOCK(zone); 768 if (b1) 769 bucket_free(zone, b1, NULL); 770 if (b2) 771 bucket_free(zone, b2, NULL); 772 } 773 774 /* 775 * Safely drain per-CPU caches of a zone(s) to alloc bucket. 776 * This is an expensive call because it needs to bind to all CPUs 777 * one by one and enter a critical section on each of them in order 778 * to safely access their cache buckets. 779 * Zone lock must not be held on call this function. 780 */ 781 static void 782 cache_drain_safe(uma_zone_t zone) 783 { 784 int cpu; 785 786 /* 787 * Polite bucket sizes shrinking was not enouth, shrink aggressively. 788 */ 789 if (zone) 790 cache_shrink(zone); 791 else 792 zone_foreach(cache_shrink); 793 794 CPU_FOREACH(cpu) { 795 thread_lock(curthread); 796 sched_bind(curthread, cpu); 797 thread_unlock(curthread); 798 799 if (zone) 800 cache_drain_safe_cpu(zone); 801 else 802 zone_foreach(cache_drain_safe_cpu); 803 } 804 thread_lock(curthread); 805 sched_unbind(curthread); 806 thread_unlock(curthread); 807 } 808 809 /* 810 * Drain the cached buckets from a zone. Expects a locked zone on entry. 811 */ 812 static void 813 bucket_cache_drain(uma_zone_t zone) 814 { 815 uma_zone_domain_t zdom; 816 uma_bucket_t bucket; 817 int i; 818 819 /* 820 * Drain the bucket queues and free the buckets. 821 */ 822 for (i = 0; i < vm_ndomains; i++) { 823 zdom = &zone->uz_domain[i]; 824 while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { 825 LIST_REMOVE(bucket, ub_link); 826 ZONE_UNLOCK(zone); 827 bucket_drain(zone, bucket); 828 bucket_free(zone, bucket, NULL); 829 ZONE_LOCK(zone); 830 } 831 } 832 833 /* 834 * Shrink further bucket sizes. Price of single zone lock collision 835 * is probably lower then price of global cache drain. 836 */ 837 if (zone->uz_count > zone->uz_count_min) 838 zone->uz_count--; 839 } 840 841 static void 842 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 843 { 844 uint8_t *mem; 845 int i; 846 uint8_t flags; 847 848 CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 849 keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 850 851 mem = slab->us_data; 852 flags = slab->us_flags; 853 i = start; 854 if (keg->uk_fini != NULL) { 855 for (i--; i > -1; i--) 856 keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 857 keg->uk_size); 858 } 859 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 860 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 861 keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 862 uma_total_dec(PAGE_SIZE * keg->uk_ppera); 863 } 864 865 /* 866 * Frees pages from a keg back to the system. This is done on demand from 867 * the pageout daemon. 868 * 869 * Returns nothing. 870 */ 871 static void 872 keg_drain(uma_keg_t keg) 873 { 874 struct slabhead freeslabs = { 0 }; 875 uma_domain_t dom; 876 uma_slab_t slab, tmp; 877 int i; 878 879 /* 880 * We don't want to take pages from statically allocated kegs at this 881 * time 882 */ 883 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 884 return; 885 886 CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", 887 keg->uk_name, keg, keg->uk_free); 888 KEG_LOCK(keg); 889 if (keg->uk_free == 0) 890 goto finished; 891 892 for (i = 0; i < vm_ndomains; i++) { 893 dom = &keg->uk_domain[i]; 894 LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { 895 /* We have nowhere to free these to. */ 896 if (slab->us_flags & UMA_SLAB_BOOT) 897 continue; 898 899 LIST_REMOVE(slab, us_link); 900 keg->uk_pages -= keg->uk_ppera; 901 keg->uk_free -= keg->uk_ipers; 902 903 if (keg->uk_flags & UMA_ZONE_HASH) 904 UMA_HASH_REMOVE(&keg->uk_hash, slab, 905 slab->us_data); 906 907 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 908 } 909 } 910 911 finished: 912 KEG_UNLOCK(keg); 913 914 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 915 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 916 keg_free_slab(keg, slab, keg->uk_ipers); 917 } 918 } 919 920 static void 921 zone_drain_wait(uma_zone_t zone, int waitok) 922 { 923 924 /* 925 * Set draining to interlock with zone_dtor() so we can release our 926 * locks as we go. Only dtor() should do a WAITOK call since it 927 * is the only call that knows the structure will still be available 928 * when it wakes up. 929 */ 930 ZONE_LOCK(zone); 931 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 932 if (waitok == M_NOWAIT) 933 goto out; 934 msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 935 } 936 zone->uz_flags |= UMA_ZFLAG_DRAINING; 937 bucket_cache_drain(zone); 938 ZONE_UNLOCK(zone); 939 /* 940 * The DRAINING flag protects us from being freed while 941 * we're running. Normally the uma_rwlock would protect us but we 942 * must be able to release and acquire the right lock for each keg. 943 */ 944 zone_foreach_keg(zone, &keg_drain); 945 ZONE_LOCK(zone); 946 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 947 wakeup(zone); 948 out: 949 ZONE_UNLOCK(zone); 950 } 951 952 void 953 zone_drain(uma_zone_t zone) 954 { 955 956 zone_drain_wait(zone, M_NOWAIT); 957 } 958 959 /* 960 * Allocate a new slab for a keg. This does not insert the slab onto a list. 961 * 962 * Arguments: 963 * wait Shall we wait? 964 * 965 * Returns: 966 * The slab that was allocated or NULL if there is no memory and the 967 * caller specified M_NOWAIT. 968 */ 969 static uma_slab_t 970 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait) 971 { 972 uma_alloc allocf; 973 uma_slab_t slab; 974 unsigned long size; 975 uint8_t *mem; 976 uint8_t flags; 977 int i; 978 979 KASSERT(domain >= 0 && domain < vm_ndomains, 980 ("keg_alloc_slab: domain %d out of range", domain)); 981 mtx_assert(&keg->uk_lock, MA_OWNED); 982 slab = NULL; 983 mem = NULL; 984 985 allocf = keg->uk_allocf; 986 KEG_UNLOCK(keg); 987 size = keg->uk_ppera * PAGE_SIZE; 988 989 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 990 slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait); 991 if (slab == NULL) 992 goto out; 993 } 994 995 /* 996 * This reproduces the old vm_zone behavior of zero filling pages the 997 * first time they are added to a zone. 998 * 999 * Malloced items are zeroed in uma_zalloc. 1000 */ 1001 1002 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1003 wait |= M_ZERO; 1004 else 1005 wait &= ~M_ZERO; 1006 1007 if (keg->uk_flags & UMA_ZONE_NODUMP) 1008 wait |= M_NODUMP; 1009 1010 /* zone is passed for legacy reasons. */ 1011 mem = allocf(zone, size, domain, &flags, wait); 1012 if (mem == NULL) { 1013 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1014 zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 1015 slab = NULL; 1016 goto out; 1017 } 1018 uma_total_inc(size); 1019 1020 /* Point the slab into the allocated memory */ 1021 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 1022 slab = (uma_slab_t )(mem + keg->uk_pgoff); 1023 1024 if (keg->uk_flags & UMA_ZONE_VTOSLAB) 1025 for (i = 0; i < keg->uk_ppera; i++) 1026 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 1027 1028 slab->us_keg = keg; 1029 slab->us_data = mem; 1030 slab->us_freecount = keg->uk_ipers; 1031 slab->us_flags = flags; 1032 slab->us_domain = domain; 1033 BIT_FILL(SLAB_SETSIZE, &slab->us_free); 1034 #ifdef INVARIANTS 1035 BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1036 #endif 1037 1038 if (keg->uk_init != NULL) { 1039 for (i = 0; i < keg->uk_ipers; i++) 1040 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1041 keg->uk_size, wait) != 0) 1042 break; 1043 if (i != keg->uk_ipers) { 1044 keg_free_slab(keg, slab, i); 1045 slab = NULL; 1046 goto out; 1047 } 1048 } 1049 out: 1050 KEG_LOCK(keg); 1051 1052 CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1053 slab, keg->uk_name, keg); 1054 1055 if (slab != NULL) { 1056 if (keg->uk_flags & UMA_ZONE_HASH) 1057 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 1058 1059 keg->uk_pages += keg->uk_ppera; 1060 keg->uk_free += keg->uk_ipers; 1061 } 1062 1063 return (slab); 1064 } 1065 1066 /* 1067 * This function is intended to be used early on in place of page_alloc() so 1068 * that we may use the boot time page cache to satisfy allocations before 1069 * the VM is ready. 1070 */ 1071 static void * 1072 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1073 int wait) 1074 { 1075 uma_keg_t keg; 1076 void *mem; 1077 int pages; 1078 1079 keg = zone_first_keg(zone); 1080 pages = howmany(bytes, PAGE_SIZE); 1081 KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1082 1083 /* 1084 * Check our small startup cache to see if it has pages remaining. 1085 */ 1086 mtx_lock(&uma_boot_pages_mtx); 1087 if (pages <= boot_pages) { 1088 mem = bootmem; 1089 boot_pages -= pages; 1090 bootmem += pages * PAGE_SIZE; 1091 mtx_unlock(&uma_boot_pages_mtx); 1092 *pflag = UMA_SLAB_BOOT; 1093 return (mem); 1094 } 1095 mtx_unlock(&uma_boot_pages_mtx); 1096 if (booted < UMA_STARTUP2) 1097 panic("UMA: Increase vm.boot_pages"); 1098 /* 1099 * Now that we've booted reset these users to their real allocator. 1100 */ 1101 #ifdef UMA_MD_SMALL_ALLOC 1102 keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1103 #else 1104 keg->uk_allocf = page_alloc; 1105 #endif 1106 return keg->uk_allocf(zone, bytes, domain, pflag, wait); 1107 } 1108 1109 /* 1110 * Allocates a number of pages from the system 1111 * 1112 * Arguments: 1113 * bytes The number of bytes requested 1114 * wait Shall we wait? 1115 * 1116 * Returns: 1117 * A pointer to the alloced memory or possibly 1118 * NULL if M_NOWAIT is set. 1119 */ 1120 static void * 1121 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, 1122 int wait) 1123 { 1124 void *p; /* Returned page */ 1125 1126 *pflag = UMA_SLAB_KERNEL; 1127 p = (void *) kmem_malloc_domain(domain, bytes, wait); 1128 1129 return (p); 1130 } 1131 1132 /* 1133 * Allocates a number of pages from within an object 1134 * 1135 * Arguments: 1136 * bytes The number of bytes requested 1137 * wait Shall we wait? 1138 * 1139 * Returns: 1140 * A pointer to the alloced memory or possibly 1141 * NULL if M_NOWAIT is set. 1142 */ 1143 static void * 1144 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, 1145 int wait) 1146 { 1147 TAILQ_HEAD(, vm_page) alloctail; 1148 u_long npages; 1149 vm_offset_t retkva, zkva; 1150 vm_page_t p, p_next; 1151 uma_keg_t keg; 1152 1153 TAILQ_INIT(&alloctail); 1154 keg = zone_first_keg(zone); 1155 1156 npages = howmany(bytes, PAGE_SIZE); 1157 while (npages > 0) { 1158 p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | 1159 VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1160 ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1161 VM_ALLOC_NOWAIT)); 1162 if (p != NULL) { 1163 /* 1164 * Since the page does not belong to an object, its 1165 * listq is unused. 1166 */ 1167 TAILQ_INSERT_TAIL(&alloctail, p, listq); 1168 npages--; 1169 continue; 1170 } 1171 /* 1172 * Page allocation failed, free intermediate pages and 1173 * exit. 1174 */ 1175 TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1176 vm_page_unwire(p, PQ_NONE); 1177 vm_page_free(p); 1178 } 1179 return (NULL); 1180 } 1181 *flags = UMA_SLAB_PRIV; 1182 zkva = keg->uk_kva + 1183 atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1184 retkva = zkva; 1185 TAILQ_FOREACH(p, &alloctail, listq) { 1186 pmap_qenter(zkva, &p, 1); 1187 zkva += PAGE_SIZE; 1188 } 1189 1190 return ((void *)retkva); 1191 } 1192 1193 /* 1194 * Frees a number of pages to the system 1195 * 1196 * Arguments: 1197 * mem A pointer to the memory to be freed 1198 * size The size of the memory being freed 1199 * flags The original p->us_flags field 1200 * 1201 * Returns: 1202 * Nothing 1203 */ 1204 static void 1205 page_free(void *mem, vm_size_t size, uint8_t flags) 1206 { 1207 struct vmem *vmem; 1208 1209 if (flags & UMA_SLAB_KERNEL) 1210 vmem = kernel_arena; 1211 else 1212 panic("UMA: page_free used with invalid flags %x", flags); 1213 1214 kmem_free(vmem, (vm_offset_t)mem, size); 1215 } 1216 1217 /* 1218 * Zero fill initializer 1219 * 1220 * Arguments/Returns follow uma_init specifications 1221 */ 1222 static int 1223 zero_init(void *mem, int size, int flags) 1224 { 1225 bzero(mem, size); 1226 return (0); 1227 } 1228 1229 /* 1230 * Finish creating a small uma keg. This calculates ipers, and the keg size. 1231 * 1232 * Arguments 1233 * keg The zone we should initialize 1234 * 1235 * Returns 1236 * Nothing 1237 */ 1238 static void 1239 keg_small_init(uma_keg_t keg) 1240 { 1241 u_int rsize; 1242 u_int memused; 1243 u_int wastedspace; 1244 u_int shsize; 1245 u_int slabsize; 1246 1247 if (keg->uk_flags & UMA_ZONE_PCPU) { 1248 u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; 1249 1250 slabsize = sizeof(struct pcpu); 1251 keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1252 PAGE_SIZE); 1253 } else { 1254 slabsize = UMA_SLAB_SIZE; 1255 keg->uk_ppera = 1; 1256 } 1257 1258 /* 1259 * Calculate the size of each allocation (rsize) according to 1260 * alignment. If the requested size is smaller than we have 1261 * allocation bits for we round it up. 1262 */ 1263 rsize = keg->uk_size; 1264 if (rsize < slabsize / SLAB_SETSIZE) 1265 rsize = slabsize / SLAB_SETSIZE; 1266 if (rsize & keg->uk_align) 1267 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1268 keg->uk_rsize = rsize; 1269 1270 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1271 keg->uk_rsize < sizeof(struct pcpu), 1272 ("%s: size %u too large", __func__, keg->uk_rsize)); 1273 1274 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1275 shsize = 0; 1276 else 1277 shsize = sizeof(struct uma_slab); 1278 1279 keg->uk_ipers = (slabsize - shsize) / rsize; 1280 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1281 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1282 1283 memused = keg->uk_ipers * rsize + shsize; 1284 wastedspace = slabsize - memused; 1285 1286 /* 1287 * We can't do OFFPAGE if we're internal or if we've been 1288 * asked to not go to the VM for buckets. If we do this we 1289 * may end up going to the VM for slabs which we do not 1290 * want to do if we're UMA_ZFLAG_CACHEONLY as a result 1291 * of UMA_ZONE_VM, which clearly forbids it. 1292 */ 1293 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1294 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1295 return; 1296 1297 /* 1298 * See if using an OFFPAGE slab will limit our waste. Only do 1299 * this if it permits more items per-slab. 1300 * 1301 * XXX We could try growing slabsize to limit max waste as well. 1302 * Historically this was not done because the VM could not 1303 * efficiently handle contiguous allocations. 1304 */ 1305 if ((wastedspace >= slabsize / UMA_MAX_WASTE) && 1306 (keg->uk_ipers < (slabsize / keg->uk_rsize))) { 1307 keg->uk_ipers = slabsize / keg->uk_rsize; 1308 KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1309 ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1310 CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " 1311 "keg: %s(%p), calculated wastedspace = %d, " 1312 "maximum wasted space allowed = %d, " 1313 "calculated ipers = %d, " 1314 "new wasted space = %d\n", keg->uk_name, keg, wastedspace, 1315 slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1316 slabsize - keg->uk_ipers * keg->uk_rsize); 1317 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1318 } 1319 1320 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1321 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1322 keg->uk_flags |= UMA_ZONE_HASH; 1323 } 1324 1325 /* 1326 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 1327 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1328 * more complicated. 1329 * 1330 * Arguments 1331 * keg The keg we should initialize 1332 * 1333 * Returns 1334 * Nothing 1335 */ 1336 static void 1337 keg_large_init(uma_keg_t keg) 1338 { 1339 u_int shsize; 1340 1341 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1342 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1343 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1344 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1345 ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 1346 1347 keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1348 keg->uk_ipers = 1; 1349 keg->uk_rsize = keg->uk_size; 1350 1351 /* Check whether we have enough space to not do OFFPAGE. */ 1352 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1353 shsize = sizeof(struct uma_slab); 1354 if (shsize & UMA_ALIGN_PTR) 1355 shsize = (shsize & ~UMA_ALIGN_PTR) + 1356 (UMA_ALIGN_PTR + 1); 1357 1358 if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { 1359 /* 1360 * We can't do OFFPAGE if we're internal, in which case 1361 * we need an extra page per allocation to contain the 1362 * slab header. 1363 */ 1364 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) 1365 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1366 else 1367 keg->uk_ppera++; 1368 } 1369 } 1370 1371 if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1372 (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1373 keg->uk_flags |= UMA_ZONE_HASH; 1374 } 1375 1376 static void 1377 keg_cachespread_init(uma_keg_t keg) 1378 { 1379 int alignsize; 1380 int trailer; 1381 int pages; 1382 int rsize; 1383 1384 KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1385 ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1386 1387 alignsize = keg->uk_align + 1; 1388 rsize = keg->uk_size; 1389 /* 1390 * We want one item to start on every align boundary in a page. To 1391 * do this we will span pages. We will also extend the item by the 1392 * size of align if it is an even multiple of align. Otherwise, it 1393 * would fall on the same boundary every time. 1394 */ 1395 if (rsize & keg->uk_align) 1396 rsize = (rsize & ~keg->uk_align) + alignsize; 1397 if ((rsize & alignsize) == 0) 1398 rsize += alignsize; 1399 trailer = rsize - keg->uk_size; 1400 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1401 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1402 keg->uk_rsize = rsize; 1403 keg->uk_ppera = pages; 1404 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1405 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1406 KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 1407 ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1408 keg->uk_ipers)); 1409 } 1410 1411 /* 1412 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1413 * the keg onto the global keg list. 1414 * 1415 * Arguments/Returns follow uma_ctor specifications 1416 * udata Actually uma_kctor_args 1417 */ 1418 static int 1419 keg_ctor(void *mem, int size, void *udata, int flags) 1420 { 1421 struct uma_kctor_args *arg = udata; 1422 uma_keg_t keg = mem; 1423 uma_zone_t zone; 1424 1425 bzero(keg, size); 1426 keg->uk_size = arg->size; 1427 keg->uk_init = arg->uminit; 1428 keg->uk_fini = arg->fini; 1429 keg->uk_align = arg->align; 1430 keg->uk_cursor = 0; 1431 keg->uk_free = 0; 1432 keg->uk_reserve = 0; 1433 keg->uk_pages = 0; 1434 keg->uk_flags = arg->flags; 1435 keg->uk_slabzone = NULL; 1436 1437 /* 1438 * The master zone is passed to us at keg-creation time. 1439 */ 1440 zone = arg->zone; 1441 keg->uk_name = zone->uz_name; 1442 1443 if (arg->flags & UMA_ZONE_VM) 1444 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1445 1446 if (arg->flags & UMA_ZONE_ZINIT) 1447 keg->uk_init = zero_init; 1448 1449 if (arg->flags & UMA_ZONE_MALLOC) 1450 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1451 1452 if (arg->flags & UMA_ZONE_PCPU) 1453 #ifdef SMP 1454 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1455 #else 1456 keg->uk_flags &= ~UMA_ZONE_PCPU; 1457 #endif 1458 1459 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1460 keg_cachespread_init(keg); 1461 } else { 1462 if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1463 keg_large_init(keg); 1464 else 1465 keg_small_init(keg); 1466 } 1467 1468 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1469 keg->uk_slabzone = slabzone; 1470 1471 /* 1472 * If we haven't booted yet we need allocations to go through the 1473 * startup cache until the vm is ready. 1474 */ 1475 if (booted < UMA_STARTUP2) 1476 keg->uk_allocf = startup_alloc; 1477 #ifdef UMA_MD_SMALL_ALLOC 1478 else if (keg->uk_ppera == 1) 1479 keg->uk_allocf = uma_small_alloc; 1480 #endif 1481 else 1482 keg->uk_allocf = page_alloc; 1483 #ifdef UMA_MD_SMALL_ALLOC 1484 if (keg->uk_ppera == 1) 1485 keg->uk_freef = uma_small_free; 1486 else 1487 #endif 1488 keg->uk_freef = page_free; 1489 1490 /* 1491 * Initialize keg's lock 1492 */ 1493 KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1494 1495 /* 1496 * If we're putting the slab header in the actual page we need to 1497 * figure out where in each page it goes. This calculates a right 1498 * justified offset into the memory on an ALIGN_PTR boundary. 1499 */ 1500 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1501 u_int totsize; 1502 1503 /* Size of the slab struct and free list */ 1504 totsize = sizeof(struct uma_slab); 1505 1506 if (totsize & UMA_ALIGN_PTR) 1507 totsize = (totsize & ~UMA_ALIGN_PTR) + 1508 (UMA_ALIGN_PTR + 1); 1509 keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1510 1511 /* 1512 * The only way the following is possible is if with our 1513 * UMA_ALIGN_PTR adjustments we are now bigger than 1514 * UMA_SLAB_SIZE. I haven't checked whether this is 1515 * mathematically possible for all cases, so we make 1516 * sure here anyway. 1517 */ 1518 totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1519 if (totsize > PAGE_SIZE * keg->uk_ppera) { 1520 printf("zone %s ipers %d rsize %d size %d\n", 1521 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1522 keg->uk_size); 1523 panic("UMA slab won't fit."); 1524 } 1525 } 1526 1527 if (keg->uk_flags & UMA_ZONE_HASH) 1528 hash_alloc(&keg->uk_hash); 1529 1530 CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", 1531 keg, zone->uz_name, zone, 1532 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 1533 keg->uk_free); 1534 1535 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1536 1537 rw_wlock(&uma_rwlock); 1538 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1539 rw_wunlock(&uma_rwlock); 1540 return (0); 1541 } 1542 1543 /* 1544 * Zone header ctor. This initializes all fields, locks, etc. 1545 * 1546 * Arguments/Returns follow uma_ctor specifications 1547 * udata Actually uma_zctor_args 1548 */ 1549 static int 1550 zone_ctor(void *mem, int size, void *udata, int flags) 1551 { 1552 struct uma_zctor_args *arg = udata; 1553 uma_zone_t zone = mem; 1554 uma_zone_t z; 1555 uma_keg_t keg; 1556 1557 bzero(zone, size); 1558 zone->uz_name = arg->name; 1559 zone->uz_ctor = arg->ctor; 1560 zone->uz_dtor = arg->dtor; 1561 zone->uz_slab = zone_fetch_slab; 1562 zone->uz_init = NULL; 1563 zone->uz_fini = NULL; 1564 zone->uz_allocs = 0; 1565 zone->uz_frees = 0; 1566 zone->uz_fails = 0; 1567 zone->uz_sleeps = 0; 1568 zone->uz_count = 0; 1569 zone->uz_count_min = 0; 1570 zone->uz_flags = 0; 1571 zone->uz_warning = NULL; 1572 /* The domain structures follow the cpu structures. */ 1573 zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; 1574 timevalclear(&zone->uz_ratecheck); 1575 keg = arg->keg; 1576 1577 ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1578 1579 /* 1580 * This is a pure cache zone, no kegs. 1581 */ 1582 if (arg->import) { 1583 if (arg->flags & UMA_ZONE_VM) 1584 arg->flags |= UMA_ZFLAG_CACHEONLY; 1585 zone->uz_flags = arg->flags; 1586 zone->uz_size = arg->size; 1587 zone->uz_import = arg->import; 1588 zone->uz_release = arg->release; 1589 zone->uz_arg = arg->arg; 1590 zone->uz_lockptr = &zone->uz_lock; 1591 rw_wlock(&uma_rwlock); 1592 LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1593 rw_wunlock(&uma_rwlock); 1594 goto out; 1595 } 1596 1597 /* 1598 * Use the regular zone/keg/slab allocator. 1599 */ 1600 zone->uz_import = (uma_import)zone_import; 1601 zone->uz_release = (uma_release)zone_release; 1602 zone->uz_arg = zone; 1603 1604 if (arg->flags & UMA_ZONE_SECONDARY) { 1605 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1606 zone->uz_init = arg->uminit; 1607 zone->uz_fini = arg->fini; 1608 zone->uz_lockptr = &keg->uk_lock; 1609 zone->uz_flags |= UMA_ZONE_SECONDARY; 1610 rw_wlock(&uma_rwlock); 1611 ZONE_LOCK(zone); 1612 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1613 if (LIST_NEXT(z, uz_link) == NULL) { 1614 LIST_INSERT_AFTER(z, zone, uz_link); 1615 break; 1616 } 1617 } 1618 ZONE_UNLOCK(zone); 1619 rw_wunlock(&uma_rwlock); 1620 } else if (keg == NULL) { 1621 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1622 arg->align, arg->flags)) == NULL) 1623 return (ENOMEM); 1624 } else { 1625 struct uma_kctor_args karg; 1626 int error; 1627 1628 /* We should only be here from uma_startup() */ 1629 karg.size = arg->size; 1630 karg.uminit = arg->uminit; 1631 karg.fini = arg->fini; 1632 karg.align = arg->align; 1633 karg.flags = arg->flags; 1634 karg.zone = zone; 1635 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1636 flags); 1637 if (error) 1638 return (error); 1639 } 1640 1641 /* 1642 * Link in the first keg. 1643 */ 1644 zone->uz_klink.kl_keg = keg; 1645 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1646 zone->uz_lockptr = &keg->uk_lock; 1647 zone->uz_size = keg->uk_size; 1648 zone->uz_flags |= (keg->uk_flags & 1649 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 1650 1651 /* 1652 * Some internal zones don't have room allocated for the per cpu 1653 * caches. If we're internal, bail out here. 1654 */ 1655 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1656 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1657 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1658 return (0); 1659 } 1660 1661 out: 1662 if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1663 zone->uz_count = bucket_select(zone->uz_size); 1664 else 1665 zone->uz_count = BUCKET_MAX; 1666 zone->uz_count_min = zone->uz_count; 1667 1668 return (0); 1669 } 1670 1671 /* 1672 * Keg header dtor. This frees all data, destroys locks, frees the hash 1673 * table and removes the keg from the global list. 1674 * 1675 * Arguments/Returns follow uma_dtor specifications 1676 * udata unused 1677 */ 1678 static void 1679 keg_dtor(void *arg, int size, void *udata) 1680 { 1681 uma_keg_t keg; 1682 1683 keg = (uma_keg_t)arg; 1684 KEG_LOCK(keg); 1685 if (keg->uk_free != 0) { 1686 printf("Freed UMA keg (%s) was not empty (%d items). " 1687 " Lost %d pages of memory.\n", 1688 keg->uk_name ? keg->uk_name : "", 1689 keg->uk_free, keg->uk_pages); 1690 } 1691 KEG_UNLOCK(keg); 1692 1693 hash_free(&keg->uk_hash); 1694 1695 KEG_LOCK_FINI(keg); 1696 } 1697 1698 /* 1699 * Zone header dtor. 1700 * 1701 * Arguments/Returns follow uma_dtor specifications 1702 * udata unused 1703 */ 1704 static void 1705 zone_dtor(void *arg, int size, void *udata) 1706 { 1707 uma_klink_t klink; 1708 uma_zone_t zone; 1709 uma_keg_t keg; 1710 1711 zone = (uma_zone_t)arg; 1712 keg = zone_first_keg(zone); 1713 1714 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 1715 cache_drain(zone); 1716 1717 rw_wlock(&uma_rwlock); 1718 LIST_REMOVE(zone, uz_link); 1719 rw_wunlock(&uma_rwlock); 1720 /* 1721 * XXX there are some races here where 1722 * the zone can be drained but zone lock 1723 * released and then refilled before we 1724 * remove it... we dont care for now 1725 */ 1726 zone_drain_wait(zone, M_WAITOK); 1727 /* 1728 * Unlink all of our kegs. 1729 */ 1730 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1731 klink->kl_keg = NULL; 1732 LIST_REMOVE(klink, kl_link); 1733 if (klink == &zone->uz_klink) 1734 continue; 1735 free(klink, M_TEMP); 1736 } 1737 /* 1738 * We only destroy kegs from non secondary zones. 1739 */ 1740 if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1741 rw_wlock(&uma_rwlock); 1742 LIST_REMOVE(keg, uk_link); 1743 rw_wunlock(&uma_rwlock); 1744 zone_free_item(kegs, keg, NULL, SKIP_NONE); 1745 } 1746 ZONE_LOCK_FINI(zone); 1747 } 1748 1749 /* 1750 * Traverses every zone in the system and calls a callback 1751 * 1752 * Arguments: 1753 * zfunc A pointer to a function which accepts a zone 1754 * as an argument. 1755 * 1756 * Returns: 1757 * Nothing 1758 */ 1759 static void 1760 zone_foreach(void (*zfunc)(uma_zone_t)) 1761 { 1762 uma_keg_t keg; 1763 uma_zone_t zone; 1764 1765 rw_rlock(&uma_rwlock); 1766 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1767 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1768 zfunc(zone); 1769 } 1770 rw_runlock(&uma_rwlock); 1771 } 1772 1773 /* Public functions */ 1774 /* See uma.h */ 1775 void 1776 uma_startup(void *mem, int npages) 1777 { 1778 struct uma_zctor_args args; 1779 uma_keg_t masterkeg; 1780 uintptr_t m; 1781 int zsize; 1782 int ksize; 1783 1784 rw_init(&uma_rwlock, "UMA lock"); 1785 1786 ksize = sizeof(struct uma_keg) + 1787 (sizeof(struct uma_domain) * vm_ndomains); 1788 zsize = sizeof(struct uma_zone) + 1789 (sizeof(struct uma_cache) * mp_ncpus) + 1790 (sizeof(struct uma_zone_domain) * vm_ndomains); 1791 1792 /* Use bootpages memory for the zone of zones and zone of kegs. */ 1793 m = (uintptr_t)mem; 1794 zones = (uma_zone_t)m; 1795 m += roundup(zsize, CACHE_LINE_SIZE); 1796 kegs = (uma_zone_t)m; 1797 m += roundup(zsize, CACHE_LINE_SIZE); 1798 masterkeg = (uma_keg_t)m; 1799 m += roundup(ksize, CACHE_LINE_SIZE); 1800 m = roundup(m, PAGE_SIZE); 1801 npages -= (m - (uintptr_t)mem) / PAGE_SIZE; 1802 mem = (void *)m; 1803 1804 /* "manually" create the initial zone */ 1805 memset(&args, 0, sizeof(args)); 1806 args.name = "UMA Kegs"; 1807 args.size = ksize; 1808 args.ctor = keg_ctor; 1809 args.dtor = keg_dtor; 1810 args.uminit = zero_init; 1811 args.fini = NULL; 1812 args.keg = masterkeg; 1813 args.align = 32 - 1; 1814 args.flags = UMA_ZFLAG_INTERNAL; 1815 zone_ctor(kegs, zsize, &args, M_WAITOK); 1816 1817 mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1818 bootmem = mem; 1819 boot_pages = npages; 1820 1821 args.name = "UMA Zones"; 1822 args.size = sizeof(struct uma_zone) + 1823 (sizeof(struct uma_cache) * (mp_maxid + 1)) + 1824 (sizeof(struct uma_zone_domain) * vm_ndomains); 1825 args.ctor = zone_ctor; 1826 args.dtor = zone_dtor; 1827 args.uminit = zero_init; 1828 args.fini = NULL; 1829 args.keg = NULL; 1830 args.align = 32 - 1; 1831 args.flags = UMA_ZFLAG_INTERNAL; 1832 zone_ctor(zones, zsize, &args, M_WAITOK); 1833 1834 /* Now make a zone for slab headers */ 1835 slabzone = uma_zcreate("UMA Slabs", 1836 sizeof(struct uma_slab), 1837 NULL, NULL, NULL, NULL, 1838 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1839 1840 hashzone = uma_zcreate("UMA Hash", 1841 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1842 NULL, NULL, NULL, NULL, 1843 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1844 1845 bucket_init(); 1846 1847 booted = UMA_STARTUP; 1848 } 1849 1850 /* see uma.h */ 1851 void 1852 uma_startup2(void) 1853 { 1854 booted = UMA_STARTUP2; 1855 bucket_enable(); 1856 sx_init(&uma_drain_lock, "umadrain"); 1857 } 1858 1859 /* 1860 * Initialize our callout handle 1861 * 1862 */ 1863 1864 static void 1865 uma_startup3(void) 1866 { 1867 1868 callout_init(&uma_callout, 1); 1869 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1870 } 1871 1872 static uma_keg_t 1873 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1874 int align, uint32_t flags) 1875 { 1876 struct uma_kctor_args args; 1877 1878 args.size = size; 1879 args.uminit = uminit; 1880 args.fini = fini; 1881 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1882 args.flags = flags; 1883 args.zone = zone; 1884 return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); 1885 } 1886 1887 /* See uma.h */ 1888 void 1889 uma_set_align(int align) 1890 { 1891 1892 if (align != UMA_ALIGN_CACHE) 1893 uma_align_cache = align; 1894 } 1895 1896 /* See uma.h */ 1897 uma_zone_t 1898 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1899 uma_init uminit, uma_fini fini, int align, uint32_t flags) 1900 1901 { 1902 struct uma_zctor_args args; 1903 uma_zone_t res; 1904 bool locked; 1905 1906 KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 1907 align, name)); 1908 1909 /* This stuff is essential for the zone ctor */ 1910 memset(&args, 0, sizeof(args)); 1911 args.name = name; 1912 args.size = size; 1913 args.ctor = ctor; 1914 args.dtor = dtor; 1915 args.uminit = uminit; 1916 args.fini = fini; 1917 #ifdef INVARIANTS 1918 /* 1919 * If a zone is being created with an empty constructor and 1920 * destructor, pass UMA constructor/destructor which checks for 1921 * memory use after free. 1922 */ 1923 if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && 1924 ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { 1925 args.ctor = trash_ctor; 1926 args.dtor = trash_dtor; 1927 args.uminit = trash_init; 1928 args.fini = trash_fini; 1929 } 1930 #endif 1931 args.align = align; 1932 args.flags = flags; 1933 args.keg = NULL; 1934 1935 if (booted < UMA_STARTUP2) { 1936 locked = false; 1937 } else { 1938 sx_slock(&uma_drain_lock); 1939 locked = true; 1940 } 1941 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 1942 if (locked) 1943 sx_sunlock(&uma_drain_lock); 1944 return (res); 1945 } 1946 1947 /* See uma.h */ 1948 uma_zone_t 1949 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1950 uma_init zinit, uma_fini zfini, uma_zone_t master) 1951 { 1952 struct uma_zctor_args args; 1953 uma_keg_t keg; 1954 uma_zone_t res; 1955 bool locked; 1956 1957 keg = zone_first_keg(master); 1958 memset(&args, 0, sizeof(args)); 1959 args.name = name; 1960 args.size = keg->uk_size; 1961 args.ctor = ctor; 1962 args.dtor = dtor; 1963 args.uminit = zinit; 1964 args.fini = zfini; 1965 args.align = keg->uk_align; 1966 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1967 args.keg = keg; 1968 1969 if (booted < UMA_STARTUP2) { 1970 locked = false; 1971 } else { 1972 sx_slock(&uma_drain_lock); 1973 locked = true; 1974 } 1975 /* XXX Attaches only one keg of potentially many. */ 1976 res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); 1977 if (locked) 1978 sx_sunlock(&uma_drain_lock); 1979 return (res); 1980 } 1981 1982 /* See uma.h */ 1983 uma_zone_t 1984 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1985 uma_init zinit, uma_fini zfini, uma_import zimport, 1986 uma_release zrelease, void *arg, int flags) 1987 { 1988 struct uma_zctor_args args; 1989 1990 memset(&args, 0, sizeof(args)); 1991 args.name = name; 1992 args.size = size; 1993 args.ctor = ctor; 1994 args.dtor = dtor; 1995 args.uminit = zinit; 1996 args.fini = zfini; 1997 args.import = zimport; 1998 args.release = zrelease; 1999 args.arg = arg; 2000 args.align = 0; 2001 args.flags = flags; 2002 2003 return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); 2004 } 2005 2006 static void 2007 zone_lock_pair(uma_zone_t a, uma_zone_t b) 2008 { 2009 if (a < b) { 2010 ZONE_LOCK(a); 2011 mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 2012 } else { 2013 ZONE_LOCK(b); 2014 mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 2015 } 2016 } 2017 2018 static void 2019 zone_unlock_pair(uma_zone_t a, uma_zone_t b) 2020 { 2021 2022 ZONE_UNLOCK(a); 2023 ZONE_UNLOCK(b); 2024 } 2025 2026 int 2027 uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 2028 { 2029 uma_klink_t klink; 2030 uma_klink_t kl; 2031 int error; 2032 2033 error = 0; 2034 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 2035 2036 zone_lock_pair(zone, master); 2037 /* 2038 * zone must use vtoslab() to resolve objects and must already be 2039 * a secondary. 2040 */ 2041 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 2042 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 2043 error = EINVAL; 2044 goto out; 2045 } 2046 /* 2047 * The new master must also use vtoslab(). 2048 */ 2049 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2050 error = EINVAL; 2051 goto out; 2052 } 2053 2054 /* 2055 * The underlying object must be the same size. rsize 2056 * may be different. 2057 */ 2058 if (master->uz_size != zone->uz_size) { 2059 error = E2BIG; 2060 goto out; 2061 } 2062 /* 2063 * Put it at the end of the list. 2064 */ 2065 klink->kl_keg = zone_first_keg(master); 2066 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2067 if (LIST_NEXT(kl, kl_link) == NULL) { 2068 LIST_INSERT_AFTER(kl, klink, kl_link); 2069 break; 2070 } 2071 } 2072 klink = NULL; 2073 zone->uz_flags |= UMA_ZFLAG_MULTI; 2074 zone->uz_slab = zone_fetch_slab_multi; 2075 2076 out: 2077 zone_unlock_pair(zone, master); 2078 if (klink != NULL) 2079 free(klink, M_TEMP); 2080 2081 return (error); 2082 } 2083 2084 2085 /* See uma.h */ 2086 void 2087 uma_zdestroy(uma_zone_t zone) 2088 { 2089 2090 sx_slock(&uma_drain_lock); 2091 zone_free_item(zones, zone, NULL, SKIP_NONE); 2092 sx_sunlock(&uma_drain_lock); 2093 } 2094 2095 void 2096 uma_zwait(uma_zone_t zone) 2097 { 2098 void *item; 2099 2100 item = uma_zalloc_arg(zone, NULL, M_WAITOK); 2101 uma_zfree(zone, item); 2102 } 2103 2104 /* See uma.h */ 2105 void * 2106 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 2107 { 2108 uma_zone_domain_t zdom; 2109 uma_bucket_t bucket; 2110 uma_cache_t cache; 2111 void *item; 2112 int cpu, domain, lockfail; 2113 2114 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2115 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2116 2117 /* This is the fast path allocation */ 2118 CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", 2119 curthread, zone->uz_name, zone, flags); 2120 2121 if (flags & M_WAITOK) { 2122 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2123 "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 2124 } 2125 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2126 ("uma_zalloc_arg: called with spinlock or critical section held")); 2127 2128 #ifdef DEBUG_MEMGUARD 2129 if (memguard_cmp_zone(zone)) { 2130 item = memguard_alloc(zone->uz_size, flags); 2131 if (item != NULL) { 2132 if (zone->uz_init != NULL && 2133 zone->uz_init(item, zone->uz_size, flags) != 0) 2134 return (NULL); 2135 if (zone->uz_ctor != NULL && 2136 zone->uz_ctor(item, zone->uz_size, udata, 2137 flags) != 0) { 2138 zone->uz_fini(item, zone->uz_size); 2139 return (NULL); 2140 } 2141 return (item); 2142 } 2143 /* This is unfortunate but should not be fatal. */ 2144 } 2145 #endif 2146 /* 2147 * If possible, allocate from the per-CPU cache. There are two 2148 * requirements for safe access to the per-CPU cache: (1) the thread 2149 * accessing the cache must not be preempted or yield during access, 2150 * and (2) the thread must not migrate CPUs without switching which 2151 * cache it accesses. We rely on a critical section to prevent 2152 * preemption and migration. We release the critical section in 2153 * order to acquire the zone mutex if we are unable to allocate from 2154 * the current cache; when we re-acquire the critical section, we 2155 * must detect and handle migration if it has occurred. 2156 */ 2157 critical_enter(); 2158 cpu = curcpu; 2159 cache = &zone->uz_cpu[cpu]; 2160 2161 zalloc_start: 2162 bucket = cache->uc_allocbucket; 2163 if (bucket != NULL && bucket->ub_cnt > 0) { 2164 bucket->ub_cnt--; 2165 item = bucket->ub_bucket[bucket->ub_cnt]; 2166 #ifdef INVARIANTS 2167 bucket->ub_bucket[bucket->ub_cnt] = NULL; 2168 #endif 2169 KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 2170 cache->uc_allocs++; 2171 critical_exit(); 2172 if (zone->uz_ctor != NULL && 2173 zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2174 atomic_add_long(&zone->uz_fails, 1); 2175 zone_free_item(zone, item, udata, SKIP_DTOR); 2176 return (NULL); 2177 } 2178 #ifdef INVARIANTS 2179 uma_dbg_alloc(zone, NULL, item); 2180 #endif 2181 if (flags & M_ZERO) 2182 uma_zero_item(item, zone); 2183 return (item); 2184 } 2185 2186 /* 2187 * We have run out of items in our alloc bucket. 2188 * See if we can switch with our free bucket. 2189 */ 2190 bucket = cache->uc_freebucket; 2191 if (bucket != NULL && bucket->ub_cnt > 0) { 2192 CTR2(KTR_UMA, 2193 "uma_zalloc: zone %s(%p) swapping empty with alloc", 2194 zone->uz_name, zone); 2195 cache->uc_freebucket = cache->uc_allocbucket; 2196 cache->uc_allocbucket = bucket; 2197 goto zalloc_start; 2198 } 2199 2200 /* 2201 * Discard any empty allocation bucket while we hold no locks. 2202 */ 2203 bucket = cache->uc_allocbucket; 2204 cache->uc_allocbucket = NULL; 2205 critical_exit(); 2206 if (bucket != NULL) 2207 bucket_free(zone, bucket, udata); 2208 2209 if (zone->uz_flags & UMA_ZONE_NUMA) 2210 domain = PCPU_GET(domain); 2211 else 2212 domain = UMA_ANYDOMAIN; 2213 2214 /* Short-circuit for zones without buckets and low memory. */ 2215 if (zone->uz_count == 0 || bucketdisable) 2216 goto zalloc_item; 2217 2218 /* 2219 * Attempt to retrieve the item from the per-CPU cache has failed, so 2220 * we must go back to the zone. This requires the zone lock, so we 2221 * must drop the critical section, then re-acquire it when we go back 2222 * to the cache. Since the critical section is released, we may be 2223 * preempted or migrate. As such, make sure not to maintain any 2224 * thread-local state specific to the cache from prior to releasing 2225 * the critical section. 2226 */ 2227 lockfail = 0; 2228 if (ZONE_TRYLOCK(zone) == 0) { 2229 /* Record contention to size the buckets. */ 2230 ZONE_LOCK(zone); 2231 lockfail = 1; 2232 } 2233 critical_enter(); 2234 cpu = curcpu; 2235 cache = &zone->uz_cpu[cpu]; 2236 2237 /* 2238 * Since we have locked the zone we may as well send back our stats. 2239 */ 2240 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2241 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2242 cache->uc_allocs = 0; 2243 cache->uc_frees = 0; 2244 2245 /* See if we lost the race to fill the cache. */ 2246 if (cache->uc_allocbucket != NULL) { 2247 ZONE_UNLOCK(zone); 2248 goto zalloc_start; 2249 } 2250 2251 /* 2252 * Check the zone's cache of buckets. 2253 */ 2254 if (domain == UMA_ANYDOMAIN) 2255 zdom = &zone->uz_domain[0]; 2256 else 2257 zdom = &zone->uz_domain[domain]; 2258 if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { 2259 KASSERT(bucket->ub_cnt != 0, 2260 ("uma_zalloc_arg: Returning an empty bucket.")); 2261 2262 LIST_REMOVE(bucket, ub_link); 2263 cache->uc_allocbucket = bucket; 2264 ZONE_UNLOCK(zone); 2265 goto zalloc_start; 2266 } 2267 /* We are no longer associated with this CPU. */ 2268 critical_exit(); 2269 2270 /* 2271 * We bump the uz count when the cache size is insufficient to 2272 * handle the working set. 2273 */ 2274 if (lockfail && zone->uz_count < BUCKET_MAX) 2275 zone->uz_count++; 2276 ZONE_UNLOCK(zone); 2277 2278 /* 2279 * Now lets just fill a bucket and put it on the free list. If that 2280 * works we'll restart the allocation from the beginning and it 2281 * will use the just filled bucket. 2282 */ 2283 bucket = zone_alloc_bucket(zone, udata, domain, flags); 2284 CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 2285 zone->uz_name, zone, bucket); 2286 if (bucket != NULL) { 2287 ZONE_LOCK(zone); 2288 critical_enter(); 2289 cpu = curcpu; 2290 cache = &zone->uz_cpu[cpu]; 2291 /* 2292 * See if we lost the race or were migrated. Cache the 2293 * initialized bucket to make this less likely or claim 2294 * the memory directly. 2295 */ 2296 if (cache->uc_allocbucket != NULL || 2297 (zone->uz_flags & UMA_ZONE_NUMA && 2298 domain != PCPU_GET(domain))) 2299 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 2300 else 2301 cache->uc_allocbucket = bucket; 2302 ZONE_UNLOCK(zone); 2303 goto zalloc_start; 2304 } 2305 2306 /* 2307 * We may not be able to get a bucket so return an actual item. 2308 */ 2309 zalloc_item: 2310 item = zone_alloc_item(zone, udata, domain, flags); 2311 2312 return (item); 2313 } 2314 2315 void * 2316 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) 2317 { 2318 2319 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2320 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2321 2322 /* This is the fast path allocation */ 2323 CTR5(KTR_UMA, 2324 "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d", 2325 curthread, zone->uz_name, zone, domain, flags); 2326 2327 if (flags & M_WAITOK) { 2328 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2329 "uma_zalloc_domain: zone \"%s\"", zone->uz_name); 2330 } 2331 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2332 ("uma_zalloc_domain: called with spinlock or critical section held")); 2333 2334 return (zone_alloc_item(zone, udata, domain, flags)); 2335 } 2336 2337 /* 2338 * Find a slab with some space. Prefer slabs that are partially used over those 2339 * that are totally full. This helps to reduce fragmentation. 2340 * 2341 * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check 2342 * only 'domain'. 2343 */ 2344 static uma_slab_t 2345 keg_first_slab(uma_keg_t keg, int domain, int rr) 2346 { 2347 uma_domain_t dom; 2348 uma_slab_t slab; 2349 int start; 2350 2351 KASSERT(domain >= 0 && domain < vm_ndomains, 2352 ("keg_first_slab: domain %d out of range", domain)); 2353 2354 slab = NULL; 2355 start = domain; 2356 do { 2357 dom = &keg->uk_domain[domain]; 2358 if (!LIST_EMPTY(&dom->ud_part_slab)) 2359 return (LIST_FIRST(&dom->ud_part_slab)); 2360 if (!LIST_EMPTY(&dom->ud_free_slab)) { 2361 slab = LIST_FIRST(&dom->ud_free_slab); 2362 LIST_REMOVE(slab, us_link); 2363 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 2364 return (slab); 2365 } 2366 if (rr) 2367 domain = (domain + 1) % vm_ndomains; 2368 } while (domain != start); 2369 2370 return (NULL); 2371 } 2372 2373 static uma_slab_t 2374 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags) 2375 { 2376 uma_domain_t dom; 2377 uma_slab_t slab; 2378 int allocflags, domain, reserve, rr, start; 2379 2380 mtx_assert(&keg->uk_lock, MA_OWNED); 2381 slab = NULL; 2382 reserve = 0; 2383 allocflags = flags; 2384 if ((flags & M_USE_RESERVE) == 0) 2385 reserve = keg->uk_reserve; 2386 2387 /* 2388 * Round-robin for non first-touch zones when there is more than one 2389 * domain. 2390 */ 2391 if (vm_ndomains == 1) 2392 rdomain = 0; 2393 rr = rdomain == UMA_ANYDOMAIN; 2394 if (rr) { 2395 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; 2396 domain = start = keg->uk_cursor; 2397 /* Only block on the second pass. */ 2398 if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK) 2399 allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT; 2400 } else 2401 domain = start = rdomain; 2402 2403 again: 2404 do { 2405 if (keg->uk_free > reserve && 2406 (slab = keg_first_slab(keg, domain, rr)) != NULL) { 2407 MPASS(slab->us_keg == keg); 2408 return (slab); 2409 } 2410 2411 /* 2412 * M_NOVM means don't ask at all! 2413 */ 2414 if (flags & M_NOVM) 2415 break; 2416 2417 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2418 keg->uk_flags |= UMA_ZFLAG_FULL; 2419 /* 2420 * If this is not a multi-zone, set the FULL bit. 2421 * Otherwise slab_multi() takes care of it. 2422 */ 2423 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2424 zone->uz_flags |= UMA_ZFLAG_FULL; 2425 zone_log_warning(zone); 2426 zone_maxaction(zone); 2427 } 2428 if (flags & M_NOWAIT) 2429 return (NULL); 2430 zone->uz_sleeps++; 2431 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2432 continue; 2433 } 2434 slab = keg_alloc_slab(keg, zone, domain, allocflags); 2435 /* 2436 * If we got a slab here it's safe to mark it partially used 2437 * and return. We assume that the caller is going to remove 2438 * at least one item. 2439 */ 2440 if (slab) { 2441 MPASS(slab->us_keg == keg); 2442 dom = &keg->uk_domain[slab->us_domain]; 2443 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 2444 return (slab); 2445 } 2446 if (rr) { 2447 keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains; 2448 domain = keg->uk_cursor; 2449 } 2450 } while (domain != start); 2451 2452 /* Retry domain scan with blocking. */ 2453 if (allocflags != flags) { 2454 allocflags = flags; 2455 goto again; 2456 } 2457 2458 /* 2459 * We might not have been able to get a slab but another cpu 2460 * could have while we were unlocked. Check again before we 2461 * fail. 2462 */ 2463 if (keg->uk_free > reserve && 2464 (slab = keg_first_slab(keg, domain, rr)) != NULL) { 2465 MPASS(slab->us_keg == keg); 2466 return (slab); 2467 } 2468 return (NULL); 2469 } 2470 2471 static uma_slab_t 2472 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) 2473 { 2474 uma_slab_t slab; 2475 2476 if (keg == NULL) { 2477 keg = zone_first_keg(zone); 2478 KEG_LOCK(keg); 2479 } 2480 2481 for (;;) { 2482 slab = keg_fetch_slab(keg, zone, domain, flags); 2483 if (slab) 2484 return (slab); 2485 if (flags & (M_NOWAIT | M_NOVM)) 2486 break; 2487 } 2488 KEG_UNLOCK(keg); 2489 return (NULL); 2490 } 2491 2492 /* 2493 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2494 * with the keg locked. On NULL no lock is held. 2495 * 2496 * The last pointer is used to seed the search. It is not required. 2497 */ 2498 static uma_slab_t 2499 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) 2500 { 2501 uma_klink_t klink; 2502 uma_slab_t slab; 2503 uma_keg_t keg; 2504 int flags; 2505 int empty; 2506 int full; 2507 2508 /* 2509 * Don't wait on the first pass. This will skip limit tests 2510 * as well. We don't want to block if we can find a provider 2511 * without blocking. 2512 */ 2513 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2514 /* 2515 * Use the last slab allocated as a hint for where to start 2516 * the search. 2517 */ 2518 if (last != NULL) { 2519 slab = keg_fetch_slab(last, zone, domain, flags); 2520 if (slab) 2521 return (slab); 2522 KEG_UNLOCK(last); 2523 } 2524 /* 2525 * Loop until we have a slab incase of transient failures 2526 * while M_WAITOK is specified. I'm not sure this is 100% 2527 * required but we've done it for so long now. 2528 */ 2529 for (;;) { 2530 empty = 0; 2531 full = 0; 2532 /* 2533 * Search the available kegs for slabs. Be careful to hold the 2534 * correct lock while calling into the keg layer. 2535 */ 2536 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2537 keg = klink->kl_keg; 2538 KEG_LOCK(keg); 2539 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2540 slab = keg_fetch_slab(keg, zone, domain, flags); 2541 if (slab) 2542 return (slab); 2543 } 2544 if (keg->uk_flags & UMA_ZFLAG_FULL) 2545 full++; 2546 else 2547 empty++; 2548 KEG_UNLOCK(keg); 2549 } 2550 if (rflags & (M_NOWAIT | M_NOVM)) 2551 break; 2552 flags = rflags; 2553 /* 2554 * All kegs are full. XXX We can't atomically check all kegs 2555 * and sleep so just sleep for a short period and retry. 2556 */ 2557 if (full && !empty) { 2558 ZONE_LOCK(zone); 2559 zone->uz_flags |= UMA_ZFLAG_FULL; 2560 zone->uz_sleeps++; 2561 zone_log_warning(zone); 2562 zone_maxaction(zone); 2563 msleep(zone, zone->uz_lockptr, PVM, 2564 "zonelimit", hz/100); 2565 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2566 ZONE_UNLOCK(zone); 2567 continue; 2568 } 2569 } 2570 return (NULL); 2571 } 2572 2573 static void * 2574 slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2575 { 2576 uma_domain_t dom; 2577 void *item; 2578 uint8_t freei; 2579 2580 MPASS(keg == slab->us_keg); 2581 mtx_assert(&keg->uk_lock, MA_OWNED); 2582 2583 freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2584 BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2585 item = slab->us_data + (keg->uk_rsize * freei); 2586 slab->us_freecount--; 2587 keg->uk_free--; 2588 2589 /* Move this slab to the full list */ 2590 if (slab->us_freecount == 0) { 2591 LIST_REMOVE(slab, us_link); 2592 dom = &keg->uk_domain[slab->us_domain]; 2593 LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); 2594 } 2595 2596 return (item); 2597 } 2598 2599 static int 2600 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) 2601 { 2602 uma_slab_t slab; 2603 uma_keg_t keg; 2604 int stripe; 2605 int i; 2606 2607 slab = NULL; 2608 keg = NULL; 2609 /* Try to keep the buckets totally full */ 2610 for (i = 0; i < max; ) { 2611 if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) 2612 break; 2613 keg = slab->us_keg; 2614 stripe = howmany(max, vm_ndomains); 2615 while (slab->us_freecount && i < max) { 2616 bucket[i++] = slab_alloc_item(keg, slab); 2617 if (keg->uk_free <= keg->uk_reserve) 2618 break; 2619 #ifdef NUMA 2620 /* 2621 * If the zone is striped we pick a new slab for every 2622 * N allocations. Eliminating this conditional will 2623 * instead pick a new domain for each bucket rather 2624 * than stripe within each bucket. The current option 2625 * produces more fragmentation and requires more cpu 2626 * time but yields better distribution. 2627 */ 2628 if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && 2629 vm_ndomains > 1 && --stripe == 0) 2630 break; 2631 #endif 2632 } 2633 /* Don't block if we allocated any successfully. */ 2634 flags &= ~M_WAITOK; 2635 flags |= M_NOWAIT; 2636 } 2637 if (slab != NULL) 2638 KEG_UNLOCK(keg); 2639 2640 return i; 2641 } 2642 2643 static uma_bucket_t 2644 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) 2645 { 2646 uma_bucket_t bucket; 2647 int max; 2648 2649 /* Don't wait for buckets, preserve caller's NOVM setting. */ 2650 bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 2651 if (bucket == NULL) 2652 return (NULL); 2653 2654 max = MIN(bucket->ub_entries, zone->uz_count); 2655 bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 2656 max, domain, flags); 2657 2658 /* 2659 * Initialize the memory if necessary. 2660 */ 2661 if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2662 int i; 2663 2664 for (i = 0; i < bucket->ub_cnt; i++) 2665 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2666 flags) != 0) 2667 break; 2668 /* 2669 * If we couldn't initialize the whole bucket, put the 2670 * rest back onto the freelist. 2671 */ 2672 if (i != bucket->ub_cnt) { 2673 zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 2674 bucket->ub_cnt - i); 2675 #ifdef INVARIANTS 2676 bzero(&bucket->ub_bucket[i], 2677 sizeof(void *) * (bucket->ub_cnt - i)); 2678 #endif 2679 bucket->ub_cnt = i; 2680 } 2681 } 2682 2683 if (bucket->ub_cnt == 0) { 2684 bucket_free(zone, bucket, udata); 2685 atomic_add_long(&zone->uz_fails, 1); 2686 return (NULL); 2687 } 2688 2689 return (bucket); 2690 } 2691 2692 /* 2693 * Allocates a single item from a zone. 2694 * 2695 * Arguments 2696 * zone The zone to alloc for. 2697 * udata The data to be passed to the constructor. 2698 * domain The domain to allocate from or UMA_ANYDOMAIN. 2699 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2700 * 2701 * Returns 2702 * NULL if there is no memory and M_NOWAIT is set 2703 * An item if successful 2704 */ 2705 2706 static void * 2707 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) 2708 { 2709 void *item; 2710 2711 item = NULL; 2712 2713 if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) 2714 goto fail; 2715 atomic_add_long(&zone->uz_allocs, 1); 2716 2717 /* 2718 * We have to call both the zone's init (not the keg's init) 2719 * and the zone's ctor. This is because the item is going from 2720 * a keg slab directly to the user, and the user is expecting it 2721 * to be both zone-init'd as well as zone-ctor'd. 2722 */ 2723 if (zone->uz_init != NULL) { 2724 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2725 zone_free_item(zone, item, udata, SKIP_FINI); 2726 goto fail; 2727 } 2728 } 2729 if (zone->uz_ctor != NULL) { 2730 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2731 zone_free_item(zone, item, udata, SKIP_DTOR); 2732 goto fail; 2733 } 2734 } 2735 #ifdef INVARIANTS 2736 uma_dbg_alloc(zone, NULL, item); 2737 #endif 2738 if (flags & M_ZERO) 2739 uma_zero_item(item, zone); 2740 2741 CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 2742 zone->uz_name, zone); 2743 2744 return (item); 2745 2746 fail: 2747 CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 2748 zone->uz_name, zone); 2749 atomic_add_long(&zone->uz_fails, 1); 2750 return (NULL); 2751 } 2752 2753 /* See uma.h */ 2754 void 2755 uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2756 { 2757 uma_cache_t cache; 2758 uma_bucket_t bucket; 2759 uma_zone_domain_t zdom; 2760 int cpu, domain, lockfail; 2761 2762 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2763 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2764 2765 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2766 zone->uz_name); 2767 2768 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2769 ("uma_zfree_arg: called with spinlock or critical section held")); 2770 2771 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2772 if (item == NULL) 2773 return; 2774 #ifdef DEBUG_MEMGUARD 2775 if (is_memguard_addr(item)) { 2776 if (zone->uz_dtor != NULL) 2777 zone->uz_dtor(item, zone->uz_size, udata); 2778 if (zone->uz_fini != NULL) 2779 zone->uz_fini(item, zone->uz_size); 2780 memguard_free(item); 2781 return; 2782 } 2783 #endif 2784 #ifdef INVARIANTS 2785 if (zone->uz_flags & UMA_ZONE_MALLOC) 2786 uma_dbg_free(zone, udata, item); 2787 else 2788 uma_dbg_free(zone, NULL, item); 2789 #endif 2790 if (zone->uz_dtor != NULL) 2791 zone->uz_dtor(item, zone->uz_size, udata); 2792 2793 /* 2794 * The race here is acceptable. If we miss it we'll just have to wait 2795 * a little longer for the limits to be reset. 2796 */ 2797 if (zone->uz_flags & UMA_ZFLAG_FULL) 2798 goto zfree_item; 2799 2800 /* 2801 * If possible, free to the per-CPU cache. There are two 2802 * requirements for safe access to the per-CPU cache: (1) the thread 2803 * accessing the cache must not be preempted or yield during access, 2804 * and (2) the thread must not migrate CPUs without switching which 2805 * cache it accesses. We rely on a critical section to prevent 2806 * preemption and migration. We release the critical section in 2807 * order to acquire the zone mutex if we are unable to free to the 2808 * current cache; when we re-acquire the critical section, we must 2809 * detect and handle migration if it has occurred. 2810 */ 2811 zfree_restart: 2812 critical_enter(); 2813 cpu = curcpu; 2814 cache = &zone->uz_cpu[cpu]; 2815 2816 zfree_start: 2817 /* 2818 * Try to free into the allocbucket first to give LIFO ordering 2819 * for cache-hot datastructures. Spill over into the freebucket 2820 * if necessary. Alloc will swap them if one runs dry. 2821 */ 2822 bucket = cache->uc_allocbucket; 2823 if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2824 bucket = cache->uc_freebucket; 2825 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2826 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2827 ("uma_zfree: Freeing to non free bucket index.")); 2828 bucket->ub_bucket[bucket->ub_cnt] = item; 2829 bucket->ub_cnt++; 2830 cache->uc_frees++; 2831 critical_exit(); 2832 return; 2833 } 2834 2835 /* 2836 * We must go back the zone, which requires acquiring the zone lock, 2837 * which in turn means we must release and re-acquire the critical 2838 * section. Since the critical section is released, we may be 2839 * preempted or migrate. As such, make sure not to maintain any 2840 * thread-local state specific to the cache from prior to releasing 2841 * the critical section. 2842 */ 2843 critical_exit(); 2844 if (zone->uz_count == 0 || bucketdisable) 2845 goto zfree_item; 2846 2847 lockfail = 0; 2848 if (ZONE_TRYLOCK(zone) == 0) { 2849 /* Record contention to size the buckets. */ 2850 ZONE_LOCK(zone); 2851 lockfail = 1; 2852 } 2853 critical_enter(); 2854 cpu = curcpu; 2855 cache = &zone->uz_cpu[cpu]; 2856 2857 /* 2858 * Since we have locked the zone we may as well send back our stats. 2859 */ 2860 atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 2861 atomic_add_long(&zone->uz_frees, cache->uc_frees); 2862 cache->uc_allocs = 0; 2863 cache->uc_frees = 0; 2864 2865 bucket = cache->uc_freebucket; 2866 if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2867 ZONE_UNLOCK(zone); 2868 goto zfree_start; 2869 } 2870 cache->uc_freebucket = NULL; 2871 /* We are no longer associated with this CPU. */ 2872 critical_exit(); 2873 2874 if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) 2875 domain = PCPU_GET(domain); 2876 else 2877 domain = 0; 2878 zdom = &zone->uz_domain[0]; 2879 2880 /* Can we throw this on the zone full list? */ 2881 if (bucket != NULL) { 2882 CTR3(KTR_UMA, 2883 "uma_zfree: zone %s(%p) putting bucket %p on free list", 2884 zone->uz_name, zone, bucket); 2885 /* ub_cnt is pointing to the last free item */ 2886 KASSERT(bucket->ub_cnt != 0, 2887 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2888 LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); 2889 } 2890 2891 /* 2892 * We bump the uz count when the cache size is insufficient to 2893 * handle the working set. 2894 */ 2895 if (lockfail && zone->uz_count < BUCKET_MAX) 2896 zone->uz_count++; 2897 ZONE_UNLOCK(zone); 2898 2899 bucket = bucket_alloc(zone, udata, M_NOWAIT); 2900 CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", 2901 zone->uz_name, zone, bucket); 2902 if (bucket) { 2903 critical_enter(); 2904 cpu = curcpu; 2905 cache = &zone->uz_cpu[cpu]; 2906 if (cache->uc_freebucket == NULL && 2907 ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || 2908 domain == PCPU_GET(domain))) { 2909 cache->uc_freebucket = bucket; 2910 goto zfree_start; 2911 } 2912 /* 2913 * We lost the race, start over. We have to drop our 2914 * critical section to free the bucket. 2915 */ 2916 critical_exit(); 2917 bucket_free(zone, bucket, udata); 2918 goto zfree_restart; 2919 } 2920 2921 /* 2922 * If nothing else caught this, we'll just do an internal free. 2923 */ 2924 zfree_item: 2925 zone_free_item(zone, item, udata, SKIP_DTOR); 2926 2927 return; 2928 } 2929 2930 void 2931 uma_zfree_domain(uma_zone_t zone, void *item, void *udata) 2932 { 2933 2934 /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2935 random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 2936 2937 CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, 2938 zone->uz_name); 2939 2940 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 2941 ("uma_zfree_domain: called with spinlock or critical section held")); 2942 2943 /* uma_zfree(..., NULL) does nothing, to match free(9). */ 2944 if (item == NULL) 2945 return; 2946 zone_free_item(zone, item, udata, SKIP_NONE); 2947 } 2948 2949 static void 2950 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 2951 { 2952 uma_domain_t dom; 2953 uint8_t freei; 2954 2955 mtx_assert(&keg->uk_lock, MA_OWNED); 2956 MPASS(keg == slab->us_keg); 2957 2958 dom = &keg->uk_domain[slab->us_domain]; 2959 2960 /* Do we need to remove from any lists? */ 2961 if (slab->us_freecount+1 == keg->uk_ipers) { 2962 LIST_REMOVE(slab, us_link); 2963 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 2964 } else if (slab->us_freecount == 0) { 2965 LIST_REMOVE(slab, us_link); 2966 LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); 2967 } 2968 2969 /* Slab management. */ 2970 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2971 BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 2972 slab->us_freecount++; 2973 2974 /* Keg statistics. */ 2975 keg->uk_free++; 2976 } 2977 2978 static void 2979 zone_release(uma_zone_t zone, void **bucket, int cnt) 2980 { 2981 void *item; 2982 uma_slab_t slab; 2983 uma_keg_t keg; 2984 uint8_t *mem; 2985 int clearfull; 2986 int i; 2987 2988 clearfull = 0; 2989 keg = zone_first_keg(zone); 2990 KEG_LOCK(keg); 2991 for (i = 0; i < cnt; i++) { 2992 item = bucket[i]; 2993 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 2994 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 2995 if (zone->uz_flags & UMA_ZONE_HASH) { 2996 slab = hash_sfind(&keg->uk_hash, mem); 2997 } else { 2998 mem += keg->uk_pgoff; 2999 slab = (uma_slab_t)mem; 3000 } 3001 } else { 3002 slab = vtoslab((vm_offset_t)item); 3003 if (slab->us_keg != keg) { 3004 KEG_UNLOCK(keg); 3005 keg = slab->us_keg; 3006 KEG_LOCK(keg); 3007 } 3008 } 3009 slab_free_item(keg, slab, item); 3010 if (keg->uk_flags & UMA_ZFLAG_FULL) { 3011 if (keg->uk_pages < keg->uk_maxpages) { 3012 keg->uk_flags &= ~UMA_ZFLAG_FULL; 3013 clearfull = 1; 3014 } 3015 3016 /* 3017 * We can handle one more allocation. Since we're 3018 * clearing ZFLAG_FULL, wake up all procs blocked 3019 * on pages. This should be uncommon, so keeping this 3020 * simple for now (rather than adding count of blocked 3021 * threads etc). 3022 */ 3023 wakeup(keg); 3024 } 3025 } 3026 KEG_UNLOCK(keg); 3027 if (clearfull) { 3028 ZONE_LOCK(zone); 3029 zone->uz_flags &= ~UMA_ZFLAG_FULL; 3030 wakeup(zone); 3031 ZONE_UNLOCK(zone); 3032 } 3033 3034 } 3035 3036 /* 3037 * Frees a single item to any zone. 3038 * 3039 * Arguments: 3040 * zone The zone to free to 3041 * item The item we're freeing 3042 * udata User supplied data for the dtor 3043 * skip Skip dtors and finis 3044 */ 3045 static void 3046 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 3047 { 3048 3049 #ifdef INVARIANTS 3050 if (skip == SKIP_NONE) { 3051 if (zone->uz_flags & UMA_ZONE_MALLOC) 3052 uma_dbg_free(zone, udata, item); 3053 else 3054 uma_dbg_free(zone, NULL, item); 3055 } 3056 #endif 3057 if (skip < SKIP_DTOR && zone->uz_dtor) 3058 zone->uz_dtor(item, zone->uz_size, udata); 3059 3060 if (skip < SKIP_FINI && zone->uz_fini) 3061 zone->uz_fini(item, zone->uz_size); 3062 3063 atomic_add_long(&zone->uz_frees, 1); 3064 zone->uz_release(zone->uz_arg, &item, 1); 3065 } 3066 3067 /* See uma.h */ 3068 int 3069 uma_zone_set_max(uma_zone_t zone, int nitems) 3070 { 3071 uma_keg_t keg; 3072 3073 keg = zone_first_keg(zone); 3074 if (keg == NULL) 3075 return (0); 3076 KEG_LOCK(keg); 3077 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 3078 if (keg->uk_maxpages * keg->uk_ipers < nitems) 3079 keg->uk_maxpages += keg->uk_ppera; 3080 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 3081 KEG_UNLOCK(keg); 3082 3083 return (nitems); 3084 } 3085 3086 /* See uma.h */ 3087 int 3088 uma_zone_get_max(uma_zone_t zone) 3089 { 3090 int nitems; 3091 uma_keg_t keg; 3092 3093 keg = zone_first_keg(zone); 3094 if (keg == NULL) 3095 return (0); 3096 KEG_LOCK(keg); 3097 nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 3098 KEG_UNLOCK(keg); 3099 3100 return (nitems); 3101 } 3102 3103 /* See uma.h */ 3104 void 3105 uma_zone_set_warning(uma_zone_t zone, const char *warning) 3106 { 3107 3108 ZONE_LOCK(zone); 3109 zone->uz_warning = warning; 3110 ZONE_UNLOCK(zone); 3111 } 3112 3113 /* See uma.h */ 3114 void 3115 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 3116 { 3117 3118 ZONE_LOCK(zone); 3119 TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 3120 ZONE_UNLOCK(zone); 3121 } 3122 3123 /* See uma.h */ 3124 int 3125 uma_zone_get_cur(uma_zone_t zone) 3126 { 3127 int64_t nitems; 3128 u_int i; 3129 3130 ZONE_LOCK(zone); 3131 nitems = zone->uz_allocs - zone->uz_frees; 3132 CPU_FOREACH(i) { 3133 /* 3134 * See the comment in sysctl_vm_zone_stats() regarding the 3135 * safety of accessing the per-cpu caches. With the zone lock 3136 * held, it is safe, but can potentially result in stale data. 3137 */ 3138 nitems += zone->uz_cpu[i].uc_allocs - 3139 zone->uz_cpu[i].uc_frees; 3140 } 3141 ZONE_UNLOCK(zone); 3142 3143 return (nitems < 0 ? 0 : nitems); 3144 } 3145 3146 /* See uma.h */ 3147 void 3148 uma_zone_set_init(uma_zone_t zone, uma_init uminit) 3149 { 3150 uma_keg_t keg; 3151 3152 keg = zone_first_keg(zone); 3153 KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3154 KEG_LOCK(keg); 3155 KASSERT(keg->uk_pages == 0, 3156 ("uma_zone_set_init on non-empty keg")); 3157 keg->uk_init = uminit; 3158 KEG_UNLOCK(keg); 3159 } 3160 3161 /* See uma.h */ 3162 void 3163 uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 3164 { 3165 uma_keg_t keg; 3166 3167 keg = zone_first_keg(zone); 3168 KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); 3169 KEG_LOCK(keg); 3170 KASSERT(keg->uk_pages == 0, 3171 ("uma_zone_set_fini on non-empty keg")); 3172 keg->uk_fini = fini; 3173 KEG_UNLOCK(keg); 3174 } 3175 3176 /* See uma.h */ 3177 void 3178 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 3179 { 3180 3181 ZONE_LOCK(zone); 3182 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3183 ("uma_zone_set_zinit on non-empty keg")); 3184 zone->uz_init = zinit; 3185 ZONE_UNLOCK(zone); 3186 } 3187 3188 /* See uma.h */ 3189 void 3190 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3191 { 3192 3193 ZONE_LOCK(zone); 3194 KASSERT(zone_first_keg(zone)->uk_pages == 0, 3195 ("uma_zone_set_zfini on non-empty keg")); 3196 zone->uz_fini = zfini; 3197 ZONE_UNLOCK(zone); 3198 } 3199 3200 /* See uma.h */ 3201 /* XXX uk_freef is not actually used with the zone locked */ 3202 void 3203 uma_zone_set_freef(uma_zone_t zone, uma_free freef) 3204 { 3205 uma_keg_t keg; 3206 3207 keg = zone_first_keg(zone); 3208 KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); 3209 KEG_LOCK(keg); 3210 keg->uk_freef = freef; 3211 KEG_UNLOCK(keg); 3212 } 3213 3214 /* See uma.h */ 3215 /* XXX uk_allocf is not actually used with the zone locked */ 3216 void 3217 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 3218 { 3219 uma_keg_t keg; 3220 3221 keg = zone_first_keg(zone); 3222 KEG_LOCK(keg); 3223 keg->uk_allocf = allocf; 3224 KEG_UNLOCK(keg); 3225 } 3226 3227 /* See uma.h */ 3228 void 3229 uma_zone_reserve(uma_zone_t zone, int items) 3230 { 3231 uma_keg_t keg; 3232 3233 keg = zone_first_keg(zone); 3234 if (keg == NULL) 3235 return; 3236 KEG_LOCK(keg); 3237 keg->uk_reserve = items; 3238 KEG_UNLOCK(keg); 3239 3240 return; 3241 } 3242 3243 /* See uma.h */ 3244 int 3245 uma_zone_reserve_kva(uma_zone_t zone, int count) 3246 { 3247 uma_keg_t keg; 3248 vm_offset_t kva; 3249 u_int pages; 3250 3251 keg = zone_first_keg(zone); 3252 if (keg == NULL) 3253 return (0); 3254 pages = count / keg->uk_ipers; 3255 3256 if (pages * keg->uk_ipers < count) 3257 pages++; 3258 pages *= keg->uk_ppera; 3259 3260 #ifdef UMA_MD_SMALL_ALLOC 3261 if (keg->uk_ppera > 1) { 3262 #else 3263 if (1) { 3264 #endif 3265 kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 3266 if (kva == 0) 3267 return (0); 3268 } else 3269 kva = 0; 3270 KEG_LOCK(keg); 3271 keg->uk_kva = kva; 3272 keg->uk_offset = 0; 3273 keg->uk_maxpages = pages; 3274 #ifdef UMA_MD_SMALL_ALLOC 3275 keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3276 #else 3277 keg->uk_allocf = noobj_alloc; 3278 #endif 3279 keg->uk_flags |= UMA_ZONE_NOFREE; 3280 KEG_UNLOCK(keg); 3281 3282 return (1); 3283 } 3284 3285 /* See uma.h */ 3286 void 3287 uma_prealloc(uma_zone_t zone, int items) 3288 { 3289 uma_domain_t dom; 3290 uma_slab_t slab; 3291 uma_keg_t keg; 3292 int domain, slabs; 3293 3294 keg = zone_first_keg(zone); 3295 if (keg == NULL) 3296 return; 3297 KEG_LOCK(keg); 3298 slabs = items / keg->uk_ipers; 3299 domain = 0; 3300 if (slabs * keg->uk_ipers < items) 3301 slabs++; 3302 while (slabs > 0) { 3303 slab = keg_alloc_slab(keg, zone, domain, M_WAITOK); 3304 if (slab == NULL) 3305 break; 3306 MPASS(slab->us_keg == keg); 3307 dom = &keg->uk_domain[slab->us_domain]; 3308 LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); 3309 slabs--; 3310 domain = (domain + 1) % vm_ndomains; 3311 } 3312 KEG_UNLOCK(keg); 3313 } 3314 3315 /* See uma.h */ 3316 static void 3317 uma_reclaim_locked(bool kmem_danger) 3318 { 3319 3320 CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 3321 sx_assert(&uma_drain_lock, SA_XLOCKED); 3322 bucket_enable(); 3323 zone_foreach(zone_drain); 3324 if (vm_page_count_min() || kmem_danger) { 3325 cache_drain_safe(NULL); 3326 zone_foreach(zone_drain); 3327 } 3328 /* 3329 * Some slabs may have been freed but this zone will be visited early 3330 * we visit again so that we can free pages that are empty once other 3331 * zones are drained. We have to do the same for buckets. 3332 */ 3333 zone_drain(slabzone); 3334 bucket_zone_drain(); 3335 } 3336 3337 void 3338 uma_reclaim(void) 3339 { 3340 3341 sx_xlock(&uma_drain_lock); 3342 uma_reclaim_locked(false); 3343 sx_xunlock(&uma_drain_lock); 3344 } 3345 3346 static volatile int uma_reclaim_needed; 3347 3348 void 3349 uma_reclaim_wakeup(void) 3350 { 3351 3352 if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 3353 wakeup(uma_reclaim); 3354 } 3355 3356 void 3357 uma_reclaim_worker(void *arg __unused) 3358 { 3359 3360 for (;;) { 3361 sx_xlock(&uma_drain_lock); 3362 while (atomic_load_int(&uma_reclaim_needed) == 0) 3363 sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", 3364 hz); 3365 sx_xunlock(&uma_drain_lock); 3366 EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 3367 sx_xlock(&uma_drain_lock); 3368 uma_reclaim_locked(true); 3369 atomic_store_int(&uma_reclaim_needed, 0); 3370 sx_xunlock(&uma_drain_lock); 3371 /* Don't fire more than once per-second. */ 3372 pause("umarclslp", hz); 3373 } 3374 } 3375 3376 /* See uma.h */ 3377 int 3378 uma_zone_exhausted(uma_zone_t zone) 3379 { 3380 int full; 3381 3382 ZONE_LOCK(zone); 3383 full = (zone->uz_flags & UMA_ZFLAG_FULL); 3384 ZONE_UNLOCK(zone); 3385 return (full); 3386 } 3387 3388 int 3389 uma_zone_exhausted_nolock(uma_zone_t zone) 3390 { 3391 return (zone->uz_flags & UMA_ZFLAG_FULL); 3392 } 3393 3394 void * 3395 uma_large_malloc_domain(vm_size_t size, int domain, int wait) 3396 { 3397 vm_offset_t addr; 3398 uma_slab_t slab; 3399 3400 slab = zone_alloc_item(slabzone, NULL, domain, wait); 3401 if (slab == NULL) 3402 return (NULL); 3403 if (domain == UMA_ANYDOMAIN) 3404 addr = kmem_malloc(kernel_arena, size, wait); 3405 else 3406 addr = kmem_malloc_domain(domain, size, wait); 3407 if (addr != 0) { 3408 vsetslab(addr, slab); 3409 slab->us_data = (void *)addr; 3410 slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; 3411 slab->us_size = size; 3412 slab->us_domain = vm_phys_domidx(PHYS_TO_VM_PAGE( 3413 pmap_kextract(addr))); 3414 uma_total_inc(size); 3415 } else { 3416 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3417 } 3418 3419 return ((void *)addr); 3420 } 3421 3422 void * 3423 uma_large_malloc(vm_size_t size, int wait) 3424 { 3425 3426 return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); 3427 } 3428 3429 void 3430 uma_large_free(uma_slab_t slab) 3431 { 3432 3433 KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, 3434 ("uma_large_free: Memory not allocated with uma_large_malloc.")); 3435 kmem_free(kernel_arena, (vm_offset_t)slab->us_data, slab->us_size); 3436 uma_total_dec(slab->us_size); 3437 zone_free_item(slabzone, slab, NULL, SKIP_NONE); 3438 } 3439 3440 static void 3441 uma_zero_item(void *item, uma_zone_t zone) 3442 { 3443 int i; 3444 3445 if (zone->uz_flags & UMA_ZONE_PCPU) { 3446 CPU_FOREACH(i) 3447 bzero(zpcpu_get_cpu(item, i), zone->uz_size); 3448 } else 3449 bzero(item, zone->uz_size); 3450 } 3451 3452 unsigned long 3453 uma_limit(void) 3454 { 3455 3456 return (uma_kmem_limit); 3457 } 3458 3459 void 3460 uma_set_limit(unsigned long limit) 3461 { 3462 3463 uma_kmem_limit = limit; 3464 } 3465 3466 unsigned long 3467 uma_size(void) 3468 { 3469 3470 return (uma_kmem_total); 3471 } 3472 3473 long 3474 uma_avail(void) 3475 { 3476 3477 return (uma_kmem_limit - uma_kmem_total); 3478 } 3479 3480 void 3481 uma_print_stats(void) 3482 { 3483 zone_foreach(uma_print_zone); 3484 } 3485 3486 static void 3487 slab_print(uma_slab_t slab) 3488 { 3489 printf("slab: keg %p, data %p, freecount %d\n", 3490 slab->us_keg, slab->us_data, slab->us_freecount); 3491 } 3492 3493 static void 3494 cache_print(uma_cache_t cache) 3495 { 3496 printf("alloc: %p(%d), free: %p(%d)\n", 3497 cache->uc_allocbucket, 3498 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3499 cache->uc_freebucket, 3500 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3501 } 3502 3503 static void 3504 uma_print_keg(uma_keg_t keg) 3505 { 3506 uma_domain_t dom; 3507 uma_slab_t slab; 3508 int i; 3509 3510 printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3511 "out %d free %d limit %d\n", 3512 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3513 keg->uk_ipers, keg->uk_ppera, 3514 (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 3515 keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3516 for (i = 0; i < vm_ndomains; i++) { 3517 dom = &keg->uk_domain[i]; 3518 printf("Part slabs:\n"); 3519 LIST_FOREACH(slab, &dom->ud_part_slab, us_link) 3520 slab_print(slab); 3521 printf("Free slabs:\n"); 3522 LIST_FOREACH(slab, &dom->ud_free_slab, us_link) 3523 slab_print(slab); 3524 printf("Full slabs:\n"); 3525 LIST_FOREACH(slab, &dom->ud_full_slab, us_link) 3526 slab_print(slab); 3527 } 3528 } 3529 3530 void 3531 uma_print_zone(uma_zone_t zone) 3532 { 3533 uma_cache_t cache; 3534 uma_klink_t kl; 3535 int i; 3536 3537 printf("zone: %s(%p) size %d flags %#x\n", 3538 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3539 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3540 uma_print_keg(kl->kl_keg); 3541 CPU_FOREACH(i) { 3542 cache = &zone->uz_cpu[i]; 3543 printf("CPU %d Cache:\n", i); 3544 cache_print(cache); 3545 } 3546 } 3547 3548 #ifdef DDB 3549 /* 3550 * Generate statistics across both the zone and its per-cpu cache's. Return 3551 * desired statistics if the pointer is non-NULL for that statistic. 3552 * 3553 * Note: does not update the zone statistics, as it can't safely clear the 3554 * per-CPU cache statistic. 3555 * 3556 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 3557 * safe from off-CPU; we should modify the caches to track this information 3558 * directly so that we don't have to. 3559 */ 3560 static void 3561 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 3562 uint64_t *freesp, uint64_t *sleepsp) 3563 { 3564 uma_cache_t cache; 3565 uint64_t allocs, frees, sleeps; 3566 int cachefree, cpu; 3567 3568 allocs = frees = sleeps = 0; 3569 cachefree = 0; 3570 CPU_FOREACH(cpu) { 3571 cache = &z->uz_cpu[cpu]; 3572 if (cache->uc_allocbucket != NULL) 3573 cachefree += cache->uc_allocbucket->ub_cnt; 3574 if (cache->uc_freebucket != NULL) 3575 cachefree += cache->uc_freebucket->ub_cnt; 3576 allocs += cache->uc_allocs; 3577 frees += cache->uc_frees; 3578 } 3579 allocs += z->uz_allocs; 3580 frees += z->uz_frees; 3581 sleeps += z->uz_sleeps; 3582 if (cachefreep != NULL) 3583 *cachefreep = cachefree; 3584 if (allocsp != NULL) 3585 *allocsp = allocs; 3586 if (freesp != NULL) 3587 *freesp = frees; 3588 if (sleepsp != NULL) 3589 *sleepsp = sleeps; 3590 } 3591 #endif /* DDB */ 3592 3593 static int 3594 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 3595 { 3596 uma_keg_t kz; 3597 uma_zone_t z; 3598 int count; 3599 3600 count = 0; 3601 rw_rlock(&uma_rwlock); 3602 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3603 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3604 count++; 3605 } 3606 rw_runlock(&uma_rwlock); 3607 return (sysctl_handle_int(oidp, &count, 0, req)); 3608 } 3609 3610 static int 3611 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3612 { 3613 struct uma_stream_header ush; 3614 struct uma_type_header uth; 3615 struct uma_percpu_stat ups; 3616 uma_bucket_t bucket; 3617 uma_zone_domain_t zdom; 3618 struct sbuf sbuf; 3619 uma_cache_t cache; 3620 uma_klink_t kl; 3621 uma_keg_t kz; 3622 uma_zone_t z; 3623 uma_keg_t k; 3624 int count, error, i; 3625 3626 error = sysctl_wire_old_buffer(req, 0); 3627 if (error != 0) 3628 return (error); 3629 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 3630 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 3631 3632 count = 0; 3633 rw_rlock(&uma_rwlock); 3634 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3635 LIST_FOREACH(z, &kz->uk_zones, uz_link) 3636 count++; 3637 } 3638 3639 /* 3640 * Insert stream header. 3641 */ 3642 bzero(&ush, sizeof(ush)); 3643 ush.ush_version = UMA_STREAM_VERSION; 3644 ush.ush_maxcpus = (mp_maxid + 1); 3645 ush.ush_count = count; 3646 (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 3647 3648 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3649 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3650 bzero(&uth, sizeof(uth)); 3651 ZONE_LOCK(z); 3652 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3653 uth.uth_align = kz->uk_align; 3654 uth.uth_size = kz->uk_size; 3655 uth.uth_rsize = kz->uk_rsize; 3656 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3657 k = kl->kl_keg; 3658 uth.uth_maxpages += k->uk_maxpages; 3659 uth.uth_pages += k->uk_pages; 3660 uth.uth_keg_free += k->uk_free; 3661 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3662 * k->uk_ipers; 3663 } 3664 3665 /* 3666 * A zone is secondary is it is not the first entry 3667 * on the keg's zone list. 3668 */ 3669 if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3670 (LIST_FIRST(&kz->uk_zones) != z)) 3671 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3672 3673 for (i = 0; i < vm_ndomains; i++) { 3674 zdom = &z->uz_domain[i]; 3675 LIST_FOREACH(bucket, &zdom->uzd_buckets, 3676 ub_link) 3677 uth.uth_zone_free += bucket->ub_cnt; 3678 } 3679 uth.uth_allocs = z->uz_allocs; 3680 uth.uth_frees = z->uz_frees; 3681 uth.uth_fails = z->uz_fails; 3682 uth.uth_sleeps = z->uz_sleeps; 3683 (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 3684 /* 3685 * While it is not normally safe to access the cache 3686 * bucket pointers while not on the CPU that owns the 3687 * cache, we only allow the pointers to be exchanged 3688 * without the zone lock held, not invalidated, so 3689 * accept the possible race associated with bucket 3690 * exchange during monitoring. 3691 */ 3692 for (i = 0; i < (mp_maxid + 1); i++) { 3693 bzero(&ups, sizeof(ups)); 3694 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 3695 goto skip; 3696 if (CPU_ABSENT(i)) 3697 goto skip; 3698 cache = &z->uz_cpu[i]; 3699 if (cache->uc_allocbucket != NULL) 3700 ups.ups_cache_free += 3701 cache->uc_allocbucket->ub_cnt; 3702 if (cache->uc_freebucket != NULL) 3703 ups.ups_cache_free += 3704 cache->uc_freebucket->ub_cnt; 3705 ups.ups_allocs = cache->uc_allocs; 3706 ups.ups_frees = cache->uc_frees; 3707 skip: 3708 (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 3709 } 3710 ZONE_UNLOCK(z); 3711 } 3712 } 3713 rw_runlock(&uma_rwlock); 3714 error = sbuf_finish(&sbuf); 3715 sbuf_delete(&sbuf); 3716 return (error); 3717 } 3718 3719 int 3720 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 3721 { 3722 uma_zone_t zone = *(uma_zone_t *)arg1; 3723 int error, max; 3724 3725 max = uma_zone_get_max(zone); 3726 error = sysctl_handle_int(oidp, &max, 0, req); 3727 if (error || !req->newptr) 3728 return (error); 3729 3730 uma_zone_set_max(zone, max); 3731 3732 return (0); 3733 } 3734 3735 int 3736 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 3737 { 3738 uma_zone_t zone = *(uma_zone_t *)arg1; 3739 int cur; 3740 3741 cur = uma_zone_get_cur(zone); 3742 return (sysctl_handle_int(oidp, &cur, 0, req)); 3743 } 3744 3745 #ifdef INVARIANTS 3746 static uma_slab_t 3747 uma_dbg_getslab(uma_zone_t zone, void *item) 3748 { 3749 uma_slab_t slab; 3750 uma_keg_t keg; 3751 uint8_t *mem; 3752 3753 mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 3754 if (zone->uz_flags & UMA_ZONE_VTOSLAB) { 3755 slab = vtoslab((vm_offset_t)mem); 3756 } else { 3757 /* 3758 * It is safe to return the slab here even though the 3759 * zone is unlocked because the item's allocation state 3760 * essentially holds a reference. 3761 */ 3762 ZONE_LOCK(zone); 3763 keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; 3764 if (keg->uk_flags & UMA_ZONE_HASH) 3765 slab = hash_sfind(&keg->uk_hash, mem); 3766 else 3767 slab = (uma_slab_t)(mem + keg->uk_pgoff); 3768 ZONE_UNLOCK(zone); 3769 } 3770 3771 return (slab); 3772 } 3773 3774 /* 3775 * Set up the slab's freei data such that uma_dbg_free can function. 3776 * 3777 */ 3778 static void 3779 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 3780 { 3781 uma_keg_t keg; 3782 int freei; 3783 3784 if (zone_first_keg(zone) == NULL) 3785 return; 3786 if (slab == NULL) { 3787 slab = uma_dbg_getslab(zone, item); 3788 if (slab == NULL) 3789 panic("uma: item %p did not belong to zone %s\n", 3790 item, zone->uz_name); 3791 } 3792 keg = slab->us_keg; 3793 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3794 3795 if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 3796 panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 3797 item, zone, zone->uz_name, slab, freei); 3798 BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 3799 3800 return; 3801 } 3802 3803 /* 3804 * Verifies freed addresses. Checks for alignment, valid slab membership 3805 * and duplicate frees. 3806 * 3807 */ 3808 static void 3809 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 3810 { 3811 uma_keg_t keg; 3812 int freei; 3813 3814 if (zone_first_keg(zone) == NULL) 3815 return; 3816 if (slab == NULL) { 3817 slab = uma_dbg_getslab(zone, item); 3818 if (slab == NULL) 3819 panic("uma: Freed item %p did not belong to zone %s\n", 3820 item, zone->uz_name); 3821 } 3822 keg = slab->us_keg; 3823 freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 3824 3825 if (freei >= keg->uk_ipers) 3826 panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 3827 item, zone, zone->uz_name, slab, freei); 3828 3829 if (((freei * keg->uk_rsize) + slab->us_data) != item) 3830 panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 3831 item, zone, zone->uz_name, slab, freei); 3832 3833 if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 3834 panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 3835 item, zone, zone->uz_name, slab, freei); 3836 3837 BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 3838 } 3839 #endif /* INVARIANTS */ 3840 3841 #ifdef DDB 3842 DB_SHOW_COMMAND(uma, db_show_uma) 3843 { 3844 uma_bucket_t bucket; 3845 uma_keg_t kz; 3846 uma_zone_t z; 3847 uma_zone_domain_t zdom; 3848 uint64_t allocs, frees, sleeps; 3849 int cachefree, i; 3850 3851 db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 3852 "Free", "Requests", "Sleeps", "Bucket"); 3853 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3854 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3855 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3856 allocs = z->uz_allocs; 3857 frees = z->uz_frees; 3858 sleeps = z->uz_sleeps; 3859 cachefree = 0; 3860 } else 3861 uma_zone_sumstat(z, &cachefree, &allocs, 3862 &frees, &sleeps); 3863 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 3864 (LIST_FIRST(&kz->uk_zones) != z))) 3865 cachefree += kz->uk_free; 3866 for (i = 0; i < vm_ndomains; i++) { 3867 zdom = &z->uz_domain[i]; 3868 LIST_FOREACH(bucket, &zdom->uzd_buckets, 3869 ub_link) 3870 cachefree += bucket->ub_cnt; 3871 } 3872 db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 3873 z->uz_name, (uintmax_t)kz->uk_size, 3874 (intmax_t)(allocs - frees), cachefree, 3875 (uintmax_t)allocs, sleeps, z->uz_count); 3876 if (db_pager_quit) 3877 return; 3878 } 3879 } 3880 } 3881 3882 DB_SHOW_COMMAND(umacache, db_show_umacache) 3883 { 3884 uma_bucket_t bucket; 3885 uma_zone_t z; 3886 uma_zone_domain_t zdom; 3887 uint64_t allocs, frees; 3888 int cachefree, i; 3889 3890 db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 3891 "Requests", "Bucket"); 3892 LIST_FOREACH(z, &uma_cachezones, uz_link) { 3893 uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 3894 for (i = 0; i < vm_ndomains; i++) { 3895 zdom = &z->uz_domain[i]; 3896 LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link) 3897 cachefree += bucket->ub_cnt; 3898 } 3899 db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 3900 z->uz_name, (uintmax_t)z->uz_size, 3901 (intmax_t)(allocs - frees), cachefree, 3902 (uintmax_t)allocs, z->uz_count); 3903 if (db_pager_quit) 3904 return; 3905 } 3906 } 3907 #endif /* DDB */ 3908