160727d8bSWarner Losh /*- 2ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 308ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4ae4e9636SRobert Watson * Copyright (c) 2004-2006 Robert N. M. Watson 508ecce74SRobert Watson * All rights reserved. 68355f576SJeff Roberson * 78355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 88355f576SJeff Roberson * modification, are permitted provided that the following conditions 98355f576SJeff Roberson * are met: 108355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 118355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 128355f576SJeff Roberson * disclaimer. 138355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 148355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 158355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 168355f576SJeff Roberson * 178355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 188355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 198355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 208355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 218355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 228355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 238355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 248355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 258355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 268355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 278355f576SJeff Roberson */ 288355f576SJeff Roberson 298355f576SJeff Roberson /* 308355f576SJeff Roberson * uma_core.c Implementation of the Universal Memory allocator 318355f576SJeff Roberson * 328355f576SJeff Roberson * This allocator is intended to replace the multitude of similar object caches 338355f576SJeff Roberson * in the standard FreeBSD kernel. The intent is to be flexible as well as 348355f576SJeff Roberson * effecient. A primary design goal is to return unused memory to the rest of 358355f576SJeff Roberson * the system. This will make the system as a whole more flexible due to the 368355f576SJeff Roberson * ability to move memory to subsystems which most need it instead of leaving 378355f576SJeff Roberson * pools of reserved memory unused. 388355f576SJeff Roberson * 398355f576SJeff Roberson * The basic ideas stem from similar slab/zone based allocators whose algorithms 408355f576SJeff Roberson * are well known. 418355f576SJeff Roberson * 428355f576SJeff Roberson */ 438355f576SJeff Roberson 448355f576SJeff Roberson /* 458355f576SJeff Roberson * TODO: 468355f576SJeff Roberson * - Improve memory usage for large allocations 478355f576SJeff Roberson * - Investigate cache size adjustments 488355f576SJeff Roberson */ 498355f576SJeff Roberson 50874651b1SDavid E. O'Brien #include <sys/cdefs.h> 51874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 52874651b1SDavid E. O'Brien 538355f576SJeff Roberson /* I should really use ktr.. */ 548355f576SJeff Roberson /* 558355f576SJeff Roberson #define UMA_DEBUG 1 568355f576SJeff Roberson #define UMA_DEBUG_ALLOC 1 578355f576SJeff Roberson #define UMA_DEBUG_ALLOC_1 1 588355f576SJeff Roberson */ 598355f576SJeff Roberson 6048c5777eSRobert Watson #include "opt_ddb.h" 618355f576SJeff Roberson #include "opt_param.h" 628d689e04SGleb Smirnoff #include "opt_vm.h" 6348c5777eSRobert Watson 648355f576SJeff Roberson #include <sys/param.h> 658355f576SJeff Roberson #include <sys/systm.h> 66ef72505eSJeff Roberson #include <sys/bitset.h> 678355f576SJeff Roberson #include <sys/kernel.h> 688355f576SJeff Roberson #include <sys/types.h> 698355f576SJeff Roberson #include <sys/queue.h> 708355f576SJeff Roberson #include <sys/malloc.h> 713659f747SRobert Watson #include <sys/ktr.h> 728355f576SJeff Roberson #include <sys/lock.h> 738355f576SJeff Roberson #include <sys/sysctl.h> 748355f576SJeff Roberson #include <sys/mutex.h> 754c1cc01cSJohn Baldwin #include <sys/proc.h> 7610cb2424SMark Murray #include <sys/random.h> 7789f6b863SAttilio Rao #include <sys/rwlock.h> 787a52a97eSRobert Watson #include <sys/sbuf.h> 79a2de44abSAlexander Motin #include <sys/sched.h> 808355f576SJeff Roberson #include <sys/smp.h> 81e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h> 8286bbae32SJeff Roberson #include <sys/vmmeter.h> 8386bbae32SJeff Roberson 848355f576SJeff Roberson #include <vm/vm.h> 858355f576SJeff Roberson #include <vm/vm_object.h> 868355f576SJeff Roberson #include <vm/vm_page.h> 87a4915c21SAttilio Rao #include <vm/vm_pageout.h> 888355f576SJeff Roberson #include <vm/vm_param.h> 898355f576SJeff Roberson #include <vm/vm_map.h> 908355f576SJeff Roberson #include <vm/vm_kern.h> 918355f576SJeff Roberson #include <vm/vm_extern.h> 928355f576SJeff Roberson #include <vm/uma.h> 938355f576SJeff Roberson #include <vm/uma_int.h> 94639c9550SJeff Roberson #include <vm/uma_dbg.h> 958355f576SJeff Roberson 9648c5777eSRobert Watson #include <ddb/ddb.h> 9748c5777eSRobert Watson 988d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 998d689e04SGleb Smirnoff #include <vm/memguard.h> 1008d689e04SGleb Smirnoff #endif 1018d689e04SGleb Smirnoff 1028355f576SJeff Roberson /* 103099a0e58SBosko Milekic * This is the zone and keg from which all zones are spawned. The idea is that 104099a0e58SBosko Milekic * even the zone & keg heads are allocated from the allocator, so we use the 105099a0e58SBosko Milekic * bss section to bootstrap us. 1068355f576SJeff Roberson */ 107099a0e58SBosko Milekic static struct uma_keg masterkeg; 108099a0e58SBosko Milekic static struct uma_zone masterzone_k; 109099a0e58SBosko Milekic static struct uma_zone masterzone_z; 110099a0e58SBosko Milekic static uma_zone_t kegs = &masterzone_k; 111099a0e58SBosko Milekic static uma_zone_t zones = &masterzone_z; 1128355f576SJeff Roberson 1138355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */ 1148355f576SJeff Roberson static uma_zone_t slabzone; 1158355f576SJeff Roberson 1168355f576SJeff Roberson /* 1178355f576SJeff Roberson * The initial hash tables come out of this zone so they can be allocated 1188355f576SJeff Roberson * prior to malloc coming up. 1198355f576SJeff Roberson */ 1208355f576SJeff Roberson static uma_zone_t hashzone; 1218355f576SJeff Roberson 1221e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */ 123e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1; 1241e319f6dSRobert Watson 125961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 126961647dfSJeff Roberson 1278355f576SJeff Roberson /* 12886bbae32SJeff Roberson * Are we allowed to allocate buckets? 12986bbae32SJeff Roberson */ 13086bbae32SJeff Roberson static int bucketdisable = 1; 13186bbae32SJeff Roberson 132099a0e58SBosko Milekic /* Linked list of all kegs in the system */ 13313e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 1348355f576SJeff Roberson 13503175483SAlexander Motin /* Linked list of all cache-only zones in the system */ 13603175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones = 13703175483SAlexander Motin LIST_HEAD_INITIALIZER(uma_cachezones); 13803175483SAlexander Motin 139111fbcd5SBryan Venteicher /* This RW lock protects the keg list */ 140111fbcd5SBryan Venteicher static struct rwlock_padalign uma_rwlock; 1418355f576SJeff Roberson 1428355f576SJeff Roberson /* Linked list of boot time pages */ 1438355f576SJeff Roberson static LIST_HEAD(,uma_slab) uma_boot_pages = 14413e403fdSAntoine Brodin LIST_HEAD_INITIALIZER(uma_boot_pages); 1458355f576SJeff Roberson 146f353d338SAlan Cox /* This mutex protects the boot time pages list */ 1470095a784SJeff Roberson static struct mtx_padalign uma_boot_pages_mtx; 1488355f576SJeff Roberson 14995c4bf75SKonstantin Belousov static struct sx uma_drain_lock; 15095c4bf75SKonstantin Belousov 1518355f576SJeff Roberson /* Is the VM done starting up? */ 1528355f576SJeff Roberson static int booted = 0; 153342f1793SAlan Cox #define UMA_STARTUP 1 154342f1793SAlan Cox #define UMA_STARTUP2 2 1558355f576SJeff Roberson 156ef72505eSJeff Roberson /* 1579643769aSJeff Roberson * This is the handle used to schedule events that need to happen 1589643769aSJeff Roberson * outside of the allocation fast path. 1599643769aSJeff Roberson */ 1608355f576SJeff Roberson static struct callout uma_callout; 1619643769aSJeff Roberson #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 1628355f576SJeff Roberson 1638355f576SJeff Roberson /* 1648355f576SJeff Roberson * This structure is passed as the zone ctor arg so that I don't have to create 1658355f576SJeff Roberson * a special allocation function just for zones. 1668355f576SJeff Roberson */ 1678355f576SJeff Roberson struct uma_zctor_args { 168bb196eb4SMatthew D Fleming const char *name; 169c3bdc05fSAndrew R. Reiter size_t size; 1708355f576SJeff Roberson uma_ctor ctor; 1718355f576SJeff Roberson uma_dtor dtor; 1728355f576SJeff Roberson uma_init uminit; 1738355f576SJeff Roberson uma_fini fini; 1740095a784SJeff Roberson uma_import import; 1750095a784SJeff Roberson uma_release release; 1760095a784SJeff Roberson void *arg; 177099a0e58SBosko Milekic uma_keg_t keg; 178099a0e58SBosko Milekic int align; 17985dcf349SGleb Smirnoff uint32_t flags; 180099a0e58SBosko Milekic }; 181099a0e58SBosko Milekic 182099a0e58SBosko Milekic struct uma_kctor_args { 183099a0e58SBosko Milekic uma_zone_t zone; 184099a0e58SBosko Milekic size_t size; 185099a0e58SBosko Milekic uma_init uminit; 186099a0e58SBosko Milekic uma_fini fini; 1878355f576SJeff Roberson int align; 18885dcf349SGleb Smirnoff uint32_t flags; 1898355f576SJeff Roberson }; 1908355f576SJeff Roberson 191cae33c14SJeff Roberson struct uma_bucket_zone { 192cae33c14SJeff Roberson uma_zone_t ubz_zone; 193cae33c14SJeff Roberson char *ubz_name; 194fc03d22bSJeff Roberson int ubz_entries; /* Number of items it can hold. */ 195fc03d22bSJeff Roberson int ubz_maxsize; /* Maximum allocation size per-item. */ 196cae33c14SJeff Roberson }; 197cae33c14SJeff Roberson 198f9d27e75SRobert Watson /* 199fc03d22bSJeff Roberson * Compute the actual number of bucket entries to pack them in power 200fc03d22bSJeff Roberson * of two sizes for more efficient space utilization. 201f9d27e75SRobert Watson */ 202fc03d22bSJeff Roberson #define BUCKET_SIZE(n) \ 203fc03d22bSJeff Roberson (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 204fc03d22bSJeff Roberson 2051aa6c758SAlexander Motin #define BUCKET_MAX BUCKET_SIZE(256) 206fc03d22bSJeff Roberson 207fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = { 2086fd34d6fSJeff Roberson { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 209f3932e90SAlexander Motin { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 2106fd34d6fSJeff Roberson { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 211f3932e90SAlexander Motin { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 2126fd34d6fSJeff Roberson { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 213fc03d22bSJeff Roberson { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 214fc03d22bSJeff Roberson { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 215fc03d22bSJeff Roberson { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 2161aa6c758SAlexander Motin { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 217fc03d22bSJeff Roberson { NULL, NULL, 0} 218fc03d22bSJeff Roberson }; 219cae33c14SJeff Roberson 2202019094aSRobert Watson /* 2212019094aSRobert Watson * Flags and enumerations to be passed to internal functions. 2222019094aSRobert Watson */ 223ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 224b23f72e9SBrian Feldman 2258355f576SJeff Roberson /* Prototypes.. */ 2268355f576SJeff Roberson 227f2c2231eSRyan Stone static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 228f2c2231eSRyan Stone static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 229f2c2231eSRyan Stone static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 230f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t); 231e20a199fSJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 2329643769aSJeff Roberson static void cache_drain(uma_zone_t); 2338355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t); 234aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone); 235b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int); 236099a0e58SBosko Milekic static void keg_dtor(void *, int, void *); 237b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int); 2389c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *); 239b23f72e9SBrian Feldman static int zero_init(void *, int, int); 240e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg); 241e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg); 2428355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t)); 2438355f576SJeff Roberson static void zone_timeout(uma_zone_t zone); 2440aef6126SJeff Roberson static int hash_alloc(struct uma_hash *); 2450aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *); 2460aef6126SJeff Roberson static void hash_free(struct uma_hash *hash); 2478355f576SJeff Roberson static void uma_timeout(void *); 2488355f576SJeff Roberson static void uma_startup3(void); 249e20a199fSJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int); 2500095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 25186bbae32SJeff Roberson static void bucket_enable(void); 252cae33c14SJeff Roberson static void bucket_init(void); 2536fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 2546fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 255cae33c14SJeff Roberson static void bucket_zone_drain(void); 2566fd34d6fSJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 257e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 258e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 2590095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 2600095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 261e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 26285dcf349SGleb Smirnoff uma_fini fini, int align, uint32_t flags); 2630095a784SJeff Roberson static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 2640095a784SJeff Roberson static void zone_release(uma_zone_t zone, void **bucket, int cnt); 26548343a2fSGleb Smirnoff static void uma_zero_item(void *item, uma_zone_t zone); 266bbee39c6SJeff Roberson 2678355f576SJeff Roberson void uma_print_zone(uma_zone_t); 2688355f576SJeff Roberson void uma_print_stats(void); 2697a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 2707a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 2718355f576SJeff Roberson 2729542ea7bSGleb Smirnoff #ifdef INVARIANTS 2739542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 2749542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 2759542ea7bSGleb Smirnoff #endif 2769542ea7bSGleb Smirnoff 2778355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 2788355f576SJeff Roberson 2797a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 2807a52a97eSRobert Watson 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 2817a52a97eSRobert Watson 2827a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 2837a52a97eSRobert Watson 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 2847a52a97eSRobert Watson 2852f891cd5SPawel Jakub Dawidek static int zone_warnings = 1; 286af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 2872f891cd5SPawel Jakub Dawidek "Warn when UMA zones becomes full"); 2882f891cd5SPawel Jakub Dawidek 28986bbae32SJeff Roberson /* 29086bbae32SJeff Roberson * This routine checks to see whether or not it's safe to enable buckets. 29186bbae32SJeff Roberson */ 29286bbae32SJeff Roberson static void 29386bbae32SJeff Roberson bucket_enable(void) 29486bbae32SJeff Roberson { 295251386b4SMaksim Yevmenkin bucketdisable = vm_page_count_min(); 29686bbae32SJeff Roberson } 29786bbae32SJeff Roberson 298dc2c7965SRobert Watson /* 299dc2c7965SRobert Watson * Initialize bucket_zones, the array of zones of buckets of various sizes. 300dc2c7965SRobert Watson * 301dc2c7965SRobert Watson * For each zone, calculate the memory required for each bucket, consisting 302fc03d22bSJeff Roberson * of the header and an array of pointers. 303dc2c7965SRobert Watson */ 304cae33c14SJeff Roberson static void 305cae33c14SJeff Roberson bucket_init(void) 306cae33c14SJeff Roberson { 307cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 308cae33c14SJeff Roberson int size; 309cae33c14SJeff Roberson 310d74e6a1dSAlan Cox for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 311cae33c14SJeff Roberson size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 312cae33c14SJeff Roberson size += sizeof(void *) * ubz->ubz_entries; 313cae33c14SJeff Roberson ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 314e20a199fSJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 3156fd34d6fSJeff Roberson UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 316cae33c14SJeff Roberson } 317cae33c14SJeff Roberson } 318cae33c14SJeff Roberson 319dc2c7965SRobert Watson /* 320dc2c7965SRobert Watson * Given a desired number of entries for a bucket, return the zone from which 321dc2c7965SRobert Watson * to allocate the bucket. 322dc2c7965SRobert Watson */ 323dc2c7965SRobert Watson static struct uma_bucket_zone * 324dc2c7965SRobert Watson bucket_zone_lookup(int entries) 325dc2c7965SRobert Watson { 326fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 327dc2c7965SRobert Watson 328fc03d22bSJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 329fc03d22bSJeff Roberson if (ubz->ubz_entries >= entries) 330fc03d22bSJeff Roberson return (ubz); 331fc03d22bSJeff Roberson ubz--; 332fc03d22bSJeff Roberson return (ubz); 333fc03d22bSJeff Roberson } 334fc03d22bSJeff Roberson 335fc03d22bSJeff Roberson static int 336fc03d22bSJeff Roberson bucket_select(int size) 337fc03d22bSJeff Roberson { 338fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 339fc03d22bSJeff Roberson 340fc03d22bSJeff Roberson ubz = &bucket_zones[0]; 341fc03d22bSJeff Roberson if (size > ubz->ubz_maxsize) 342fc03d22bSJeff Roberson return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 343fc03d22bSJeff Roberson 344fc03d22bSJeff Roberson for (; ubz->ubz_entries != 0; ubz++) 345fc03d22bSJeff Roberson if (ubz->ubz_maxsize < size) 346fc03d22bSJeff Roberson break; 347fc03d22bSJeff Roberson ubz--; 348fc03d22bSJeff Roberson return (ubz->ubz_entries); 349dc2c7965SRobert Watson } 350dc2c7965SRobert Watson 351cae33c14SJeff Roberson static uma_bucket_t 3526fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags) 353cae33c14SJeff Roberson { 354cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 355cae33c14SJeff Roberson uma_bucket_t bucket; 356cae33c14SJeff Roberson 357cae33c14SJeff Roberson /* 358cae33c14SJeff Roberson * This is to stop us from allocating per cpu buckets while we're 3593803b26bSDag-Erling Smørgrav * running out of vm.boot_pages. Otherwise, we would exhaust the 360cae33c14SJeff Roberson * boot pages. This also prevents us from allocating buckets in 361cae33c14SJeff Roberson * low memory situations. 362cae33c14SJeff Roberson */ 363cae33c14SJeff Roberson if (bucketdisable) 364cae33c14SJeff Roberson return (NULL); 3656fd34d6fSJeff Roberson /* 3666fd34d6fSJeff Roberson * To limit bucket recursion we store the original zone flags 3676fd34d6fSJeff Roberson * in a cookie passed via zalloc_arg/zfree_arg. This allows the 3686fd34d6fSJeff Roberson * NOVM flag to persist even through deep recursions. We also 3696fd34d6fSJeff Roberson * store ZFLAG_BUCKET once we have recursed attempting to allocate 3706fd34d6fSJeff Roberson * a bucket for a bucket zone so we do not allow infinite bucket 3716fd34d6fSJeff Roberson * recursion. This cookie will even persist to frees of unused 3726fd34d6fSJeff Roberson * buckets via the allocation path or bucket allocations in the 3736fd34d6fSJeff Roberson * free path. 3746fd34d6fSJeff Roberson */ 3756fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 3766fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 377e8a720feSAlexander Motin else { 378e8a720feSAlexander Motin if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 379e8a720feSAlexander Motin return (NULL); 3806fd34d6fSJeff Roberson udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 381e8a720feSAlexander Motin } 3826fd34d6fSJeff Roberson if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 383af526374SJeff Roberson flags |= M_NOVM; 384af526374SJeff Roberson ubz = bucket_zone_lookup(zone->uz_count); 38520d3ab87SAlexander Motin if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 38620d3ab87SAlexander Motin ubz++; 3876fd34d6fSJeff Roberson bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 388cae33c14SJeff Roberson if (bucket) { 389cae33c14SJeff Roberson #ifdef INVARIANTS 390cae33c14SJeff Roberson bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 391cae33c14SJeff Roberson #endif 392cae33c14SJeff Roberson bucket->ub_cnt = 0; 393cae33c14SJeff Roberson bucket->ub_entries = ubz->ubz_entries; 394cae33c14SJeff Roberson } 395cae33c14SJeff Roberson 396cae33c14SJeff Roberson return (bucket); 397cae33c14SJeff Roberson } 398cae33c14SJeff Roberson 399cae33c14SJeff Roberson static void 4006fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 401cae33c14SJeff Roberson { 402cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 403cae33c14SJeff Roberson 404fc03d22bSJeff Roberson KASSERT(bucket->ub_cnt == 0, 405fc03d22bSJeff Roberson ("bucket_free: Freeing a non free bucket.")); 4066fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 4076fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 408dc2c7965SRobert Watson ubz = bucket_zone_lookup(bucket->ub_entries); 4096fd34d6fSJeff Roberson uma_zfree_arg(ubz->ubz_zone, bucket, udata); 410cae33c14SJeff Roberson } 411cae33c14SJeff Roberson 412cae33c14SJeff Roberson static void 413cae33c14SJeff Roberson bucket_zone_drain(void) 414cae33c14SJeff Roberson { 415cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 416cae33c14SJeff Roberson 417cae33c14SJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 418cae33c14SJeff Roberson zone_drain(ubz->ubz_zone); 419cae33c14SJeff Roberson } 420cae33c14SJeff Roberson 4212f891cd5SPawel Jakub Dawidek static void 4222f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone) 4232f891cd5SPawel Jakub Dawidek { 4242f891cd5SPawel Jakub Dawidek static const struct timeval warninterval = { 300, 0 }; 4252f891cd5SPawel Jakub Dawidek 4262f891cd5SPawel Jakub Dawidek if (!zone_warnings || zone->uz_warning == NULL) 4272f891cd5SPawel Jakub Dawidek return; 4282f891cd5SPawel Jakub Dawidek 4292f891cd5SPawel Jakub Dawidek if (ratecheck(&zone->uz_ratecheck, &warninterval)) 4302f891cd5SPawel Jakub Dawidek printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 4312f891cd5SPawel Jakub Dawidek } 4322f891cd5SPawel Jakub Dawidek 43354503a13SJonathan T. Looney static inline void 43454503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone) 43554503a13SJonathan T. Looney { 436e60b2fcbSGleb Smirnoff 437e60b2fcbSGleb Smirnoff if (zone->uz_maxaction.ta_func != NULL) 438e60b2fcbSGleb Smirnoff taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 43954503a13SJonathan T. Looney } 44054503a13SJonathan T. Looney 441e20a199fSJeff Roberson static void 442e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 443e20a199fSJeff Roberson { 444e20a199fSJeff Roberson uma_klink_t klink; 445e20a199fSJeff Roberson 446e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 447e20a199fSJeff Roberson kegfn(klink->kl_keg); 448e20a199fSJeff Roberson } 4498355f576SJeff Roberson 4508355f576SJeff Roberson /* 4518355f576SJeff Roberson * Routine called by timeout which is used to fire off some time interval 4529643769aSJeff Roberson * based calculations. (stats, hash size, etc.) 4538355f576SJeff Roberson * 4548355f576SJeff Roberson * Arguments: 4558355f576SJeff Roberson * arg Unused 4568355f576SJeff Roberson * 4578355f576SJeff Roberson * Returns: 4588355f576SJeff Roberson * Nothing 4598355f576SJeff Roberson */ 4608355f576SJeff Roberson static void 4618355f576SJeff Roberson uma_timeout(void *unused) 4628355f576SJeff Roberson { 46386bbae32SJeff Roberson bucket_enable(); 4648355f576SJeff Roberson zone_foreach(zone_timeout); 4658355f576SJeff Roberson 4668355f576SJeff Roberson /* Reschedule this event */ 4679643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 4688355f576SJeff Roberson } 4698355f576SJeff Roberson 4708355f576SJeff Roberson /* 4719643769aSJeff Roberson * Routine to perform timeout driven calculations. This expands the 4729643769aSJeff Roberson * hashes and does per cpu statistics aggregation. 4738355f576SJeff Roberson * 474e20a199fSJeff Roberson * Returns nothing. 4758355f576SJeff Roberson */ 4768355f576SJeff Roberson static void 477e20a199fSJeff Roberson keg_timeout(uma_keg_t keg) 4788355f576SJeff Roberson { 4798355f576SJeff Roberson 480e20a199fSJeff Roberson KEG_LOCK(keg); 4818355f576SJeff Roberson /* 482e20a199fSJeff Roberson * Expand the keg hash table. 4838355f576SJeff Roberson * 4848355f576SJeff Roberson * This is done if the number of slabs is larger than the hash size. 4858355f576SJeff Roberson * What I'm trying to do here is completely reduce collisions. This 4868355f576SJeff Roberson * may be a little aggressive. Should I allow for two collisions max? 4878355f576SJeff Roberson */ 488099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH && 489099a0e58SBosko Milekic keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 4900aef6126SJeff Roberson struct uma_hash newhash; 4910aef6126SJeff Roberson struct uma_hash oldhash; 4920aef6126SJeff Roberson int ret; 4935300d9ddSJeff Roberson 4940aef6126SJeff Roberson /* 4950aef6126SJeff Roberson * This is so involved because allocating and freeing 496e20a199fSJeff Roberson * while the keg lock is held will lead to deadlock. 4970aef6126SJeff Roberson * I have to do everything in stages and check for 4980aef6126SJeff Roberson * races. 4990aef6126SJeff Roberson */ 500099a0e58SBosko Milekic newhash = keg->uk_hash; 501e20a199fSJeff Roberson KEG_UNLOCK(keg); 5020aef6126SJeff Roberson ret = hash_alloc(&newhash); 503e20a199fSJeff Roberson KEG_LOCK(keg); 5040aef6126SJeff Roberson if (ret) { 505099a0e58SBosko Milekic if (hash_expand(&keg->uk_hash, &newhash)) { 506099a0e58SBosko Milekic oldhash = keg->uk_hash; 507099a0e58SBosko Milekic keg->uk_hash = newhash; 5080aef6126SJeff Roberson } else 5090aef6126SJeff Roberson oldhash = newhash; 5100aef6126SJeff Roberson 511e20a199fSJeff Roberson KEG_UNLOCK(keg); 5120aef6126SJeff Roberson hash_free(&oldhash); 513a1dff920SDavide Italiano return; 5140aef6126SJeff Roberson } 5155300d9ddSJeff Roberson } 516e20a199fSJeff Roberson KEG_UNLOCK(keg); 517e20a199fSJeff Roberson } 518e20a199fSJeff Roberson 519e20a199fSJeff Roberson static void 520e20a199fSJeff Roberson zone_timeout(uma_zone_t zone) 521e20a199fSJeff Roberson { 522e20a199fSJeff Roberson 523e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_timeout); 5248355f576SJeff Roberson } 5258355f576SJeff Roberson 5268355f576SJeff Roberson /* 5275300d9ddSJeff Roberson * Allocate and zero fill the next sized hash table from the appropriate 5285300d9ddSJeff Roberson * backing store. 5295300d9ddSJeff Roberson * 5305300d9ddSJeff Roberson * Arguments: 5310aef6126SJeff Roberson * hash A new hash structure with the old hash size in uh_hashsize 5325300d9ddSJeff Roberson * 5335300d9ddSJeff Roberson * Returns: 5340aef6126SJeff Roberson * 1 on sucess and 0 on failure. 5355300d9ddSJeff Roberson */ 53637c84183SPoul-Henning Kamp static int 5370aef6126SJeff Roberson hash_alloc(struct uma_hash *hash) 5385300d9ddSJeff Roberson { 5390aef6126SJeff Roberson int oldsize; 5405300d9ddSJeff Roberson int alloc; 5415300d9ddSJeff Roberson 5420aef6126SJeff Roberson oldsize = hash->uh_hashsize; 5430aef6126SJeff Roberson 5445300d9ddSJeff Roberson /* We're just going to go to a power of two greater */ 5450aef6126SJeff Roberson if (oldsize) { 5460aef6126SJeff Roberson hash->uh_hashsize = oldsize * 2; 5470aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 5480aef6126SJeff Roberson hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 549961647dfSJeff Roberson M_UMAHASH, M_NOWAIT); 5505300d9ddSJeff Roberson } else { 5510aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 552e20a199fSJeff Roberson hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 553a163d034SWarner Losh M_WAITOK); 5540aef6126SJeff Roberson hash->uh_hashsize = UMA_HASH_SIZE_INIT; 5555300d9ddSJeff Roberson } 5560aef6126SJeff Roberson if (hash->uh_slab_hash) { 5570aef6126SJeff Roberson bzero(hash->uh_slab_hash, alloc); 5580aef6126SJeff Roberson hash->uh_hashmask = hash->uh_hashsize - 1; 5590aef6126SJeff Roberson return (1); 5600aef6126SJeff Roberson } 5615300d9ddSJeff Roberson 5620aef6126SJeff Roberson return (0); 5635300d9ddSJeff Roberson } 5645300d9ddSJeff Roberson 5655300d9ddSJeff Roberson /* 56664f051e9SJeff Roberson * Expands the hash table for HASH zones. This is done from zone_timeout 56764f051e9SJeff Roberson * to reduce collisions. This must not be done in the regular allocation 56864f051e9SJeff Roberson * path, otherwise, we can recurse on the vm while allocating pages. 5698355f576SJeff Roberson * 5708355f576SJeff Roberson * Arguments: 5710aef6126SJeff Roberson * oldhash The hash you want to expand 5720aef6126SJeff Roberson * newhash The hash structure for the new table 5738355f576SJeff Roberson * 5748355f576SJeff Roberson * Returns: 5758355f576SJeff Roberson * Nothing 5768355f576SJeff Roberson * 5778355f576SJeff Roberson * Discussion: 5788355f576SJeff Roberson */ 5790aef6126SJeff Roberson static int 5800aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 5818355f576SJeff Roberson { 5828355f576SJeff Roberson uma_slab_t slab; 5838355f576SJeff Roberson int hval; 5848355f576SJeff Roberson int i; 5858355f576SJeff Roberson 5860aef6126SJeff Roberson if (!newhash->uh_slab_hash) 5870aef6126SJeff Roberson return (0); 5888355f576SJeff Roberson 5890aef6126SJeff Roberson if (oldhash->uh_hashsize >= newhash->uh_hashsize) 5900aef6126SJeff Roberson return (0); 5918355f576SJeff Roberson 5928355f576SJeff Roberson /* 5938355f576SJeff Roberson * I need to investigate hash algorithms for resizing without a 5948355f576SJeff Roberson * full rehash. 5958355f576SJeff Roberson */ 5968355f576SJeff Roberson 5970aef6126SJeff Roberson for (i = 0; i < oldhash->uh_hashsize; i++) 5980aef6126SJeff Roberson while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 5990aef6126SJeff Roberson slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 6000aef6126SJeff Roberson SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 6010aef6126SJeff Roberson hval = UMA_HASH(newhash, slab->us_data); 6020aef6126SJeff Roberson SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 6030aef6126SJeff Roberson slab, us_hlink); 6048355f576SJeff Roberson } 6058355f576SJeff Roberson 6060aef6126SJeff Roberson return (1); 6079c2cd7e5SJeff Roberson } 6089c2cd7e5SJeff Roberson 6095300d9ddSJeff Roberson /* 6105300d9ddSJeff Roberson * Free the hash bucket to the appropriate backing store. 6115300d9ddSJeff Roberson * 6125300d9ddSJeff Roberson * Arguments: 6135300d9ddSJeff Roberson * slab_hash The hash bucket we're freeing 6145300d9ddSJeff Roberson * hashsize The number of entries in that hash bucket 6155300d9ddSJeff Roberson * 6165300d9ddSJeff Roberson * Returns: 6175300d9ddSJeff Roberson * Nothing 6185300d9ddSJeff Roberson */ 6199c2cd7e5SJeff Roberson static void 6200aef6126SJeff Roberson hash_free(struct uma_hash *hash) 6219c2cd7e5SJeff Roberson { 6220aef6126SJeff Roberson if (hash->uh_slab_hash == NULL) 6230aef6126SJeff Roberson return; 6240aef6126SJeff Roberson if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 6250095a784SJeff Roberson zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 6268355f576SJeff Roberson else 627961647dfSJeff Roberson free(hash->uh_slab_hash, M_UMAHASH); 6288355f576SJeff Roberson } 6298355f576SJeff Roberson 6308355f576SJeff Roberson /* 6318355f576SJeff Roberson * Frees all outstanding items in a bucket 6328355f576SJeff Roberson * 6338355f576SJeff Roberson * Arguments: 6348355f576SJeff Roberson * zone The zone to free to, must be unlocked. 6358355f576SJeff Roberson * bucket The free/alloc bucket with items, cpu queue must be locked. 6368355f576SJeff Roberson * 6378355f576SJeff Roberson * Returns: 6388355f576SJeff Roberson * Nothing 6398355f576SJeff Roberson */ 6408355f576SJeff Roberson 6418355f576SJeff Roberson static void 6428355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 6438355f576SJeff Roberson { 6440095a784SJeff Roberson int i; 6458355f576SJeff Roberson 6468355f576SJeff Roberson if (bucket == NULL) 6478355f576SJeff Roberson return; 6488355f576SJeff Roberson 6490095a784SJeff Roberson if (zone->uz_fini) 6500095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 6510095a784SJeff Roberson zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 6520095a784SJeff Roberson zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 6530095a784SJeff Roberson bucket->ub_cnt = 0; 6548355f576SJeff Roberson } 6558355f576SJeff Roberson 6568355f576SJeff Roberson /* 6578355f576SJeff Roberson * Drains the per cpu caches for a zone. 6588355f576SJeff Roberson * 6595d1ae027SRobert Watson * NOTE: This may only be called while the zone is being turn down, and not 6605d1ae027SRobert Watson * during normal operation. This is necessary in order that we do not have 6615d1ae027SRobert Watson * to migrate CPUs to drain the per-CPU caches. 6625d1ae027SRobert Watson * 6638355f576SJeff Roberson * Arguments: 6648355f576SJeff Roberson * zone The zone to drain, must be unlocked. 6658355f576SJeff Roberson * 6668355f576SJeff Roberson * Returns: 6678355f576SJeff Roberson * Nothing 6688355f576SJeff Roberson */ 6698355f576SJeff Roberson static void 6709643769aSJeff Roberson cache_drain(uma_zone_t zone) 6718355f576SJeff Roberson { 6728355f576SJeff Roberson uma_cache_t cache; 6738355f576SJeff Roberson int cpu; 6748355f576SJeff Roberson 6758355f576SJeff Roberson /* 6765d1ae027SRobert Watson * XXX: It is safe to not lock the per-CPU caches, because we're 6775d1ae027SRobert Watson * tearing down the zone anyway. I.e., there will be no further use 6785d1ae027SRobert Watson * of the caches at this point. 6795d1ae027SRobert Watson * 6805d1ae027SRobert Watson * XXX: It would good to be able to assert that the zone is being 6815d1ae027SRobert Watson * torn down to prevent improper use of cache_drain(). 6825d1ae027SRobert Watson * 6835d1ae027SRobert Watson * XXX: We lock the zone before passing into bucket_cache_drain() as 6845d1ae027SRobert Watson * it is used elsewhere. Should the tear-down path be made special 6855d1ae027SRobert Watson * there in some form? 6868355f576SJeff Roberson */ 6873aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 6888355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 6898355f576SJeff Roberson bucket_drain(zone, cache->uc_allocbucket); 6908355f576SJeff Roberson bucket_drain(zone, cache->uc_freebucket); 691174ab450SBosko Milekic if (cache->uc_allocbucket != NULL) 6926fd34d6fSJeff Roberson bucket_free(zone, cache->uc_allocbucket, NULL); 693174ab450SBosko Milekic if (cache->uc_freebucket != NULL) 6946fd34d6fSJeff Roberson bucket_free(zone, cache->uc_freebucket, NULL); 695d56368d7SBosko Milekic cache->uc_allocbucket = cache->uc_freebucket = NULL; 696d56368d7SBosko Milekic } 697aaa8bb16SJeff Roberson ZONE_LOCK(zone); 698aaa8bb16SJeff Roberson bucket_cache_drain(zone); 699aaa8bb16SJeff Roberson ZONE_UNLOCK(zone); 700aaa8bb16SJeff Roberson } 701aaa8bb16SJeff Roberson 702a2de44abSAlexander Motin static void 703a2de44abSAlexander Motin cache_shrink(uma_zone_t zone) 704a2de44abSAlexander Motin { 705a2de44abSAlexander Motin 706a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 707a2de44abSAlexander Motin return; 708a2de44abSAlexander Motin 709a2de44abSAlexander Motin ZONE_LOCK(zone); 710a2de44abSAlexander Motin zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 711a2de44abSAlexander Motin ZONE_UNLOCK(zone); 712a2de44abSAlexander Motin } 713a2de44abSAlexander Motin 714a2de44abSAlexander Motin static void 715a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone) 716a2de44abSAlexander Motin { 717a2de44abSAlexander Motin uma_cache_t cache; 7188a8d9d14SAlexander Motin uma_bucket_t b1, b2; 719a2de44abSAlexander Motin 720a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 721a2de44abSAlexander Motin return; 722a2de44abSAlexander Motin 7238a8d9d14SAlexander Motin b1 = b2 = NULL; 724a2de44abSAlexander Motin ZONE_LOCK(zone); 725a2de44abSAlexander Motin critical_enter(); 726a2de44abSAlexander Motin cache = &zone->uz_cpu[curcpu]; 727a2de44abSAlexander Motin if (cache->uc_allocbucket) { 7288a8d9d14SAlexander Motin if (cache->uc_allocbucket->ub_cnt != 0) 7298a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7308a8d9d14SAlexander Motin cache->uc_allocbucket, ub_link); 7318a8d9d14SAlexander Motin else 7328a8d9d14SAlexander Motin b1 = cache->uc_allocbucket; 733a2de44abSAlexander Motin cache->uc_allocbucket = NULL; 734a2de44abSAlexander Motin } 735a2de44abSAlexander Motin if (cache->uc_freebucket) { 7368a8d9d14SAlexander Motin if (cache->uc_freebucket->ub_cnt != 0) 7378a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7388a8d9d14SAlexander Motin cache->uc_freebucket, ub_link); 7398a8d9d14SAlexander Motin else 7408a8d9d14SAlexander Motin b2 = cache->uc_freebucket; 741a2de44abSAlexander Motin cache->uc_freebucket = NULL; 742a2de44abSAlexander Motin } 743a2de44abSAlexander Motin critical_exit(); 744a2de44abSAlexander Motin ZONE_UNLOCK(zone); 7458a8d9d14SAlexander Motin if (b1) 7468a8d9d14SAlexander Motin bucket_free(zone, b1, NULL); 7478a8d9d14SAlexander Motin if (b2) 7488a8d9d14SAlexander Motin bucket_free(zone, b2, NULL); 749a2de44abSAlexander Motin } 750a2de44abSAlexander Motin 751a2de44abSAlexander Motin /* 752a2de44abSAlexander Motin * Safely drain per-CPU caches of a zone(s) to alloc bucket. 753a2de44abSAlexander Motin * This is an expensive call because it needs to bind to all CPUs 754a2de44abSAlexander Motin * one by one and enter a critical section on each of them in order 755a2de44abSAlexander Motin * to safely access their cache buckets. 756a2de44abSAlexander Motin * Zone lock must not be held on call this function. 757a2de44abSAlexander Motin */ 758a2de44abSAlexander Motin static void 759a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone) 760a2de44abSAlexander Motin { 761a2de44abSAlexander Motin int cpu; 762a2de44abSAlexander Motin 763a2de44abSAlexander Motin /* 764a2de44abSAlexander Motin * Polite bucket sizes shrinking was not enouth, shrink aggressively. 765a2de44abSAlexander Motin */ 766a2de44abSAlexander Motin if (zone) 767a2de44abSAlexander Motin cache_shrink(zone); 768a2de44abSAlexander Motin else 769a2de44abSAlexander Motin zone_foreach(cache_shrink); 770a2de44abSAlexander Motin 771a2de44abSAlexander Motin CPU_FOREACH(cpu) { 772a2de44abSAlexander Motin thread_lock(curthread); 773a2de44abSAlexander Motin sched_bind(curthread, cpu); 774a2de44abSAlexander Motin thread_unlock(curthread); 775a2de44abSAlexander Motin 776a2de44abSAlexander Motin if (zone) 777a2de44abSAlexander Motin cache_drain_safe_cpu(zone); 778a2de44abSAlexander Motin else 779a2de44abSAlexander Motin zone_foreach(cache_drain_safe_cpu); 780a2de44abSAlexander Motin } 781a2de44abSAlexander Motin thread_lock(curthread); 782a2de44abSAlexander Motin sched_unbind(curthread); 783a2de44abSAlexander Motin thread_unlock(curthread); 784a2de44abSAlexander Motin } 785a2de44abSAlexander Motin 786aaa8bb16SJeff Roberson /* 787aaa8bb16SJeff Roberson * Drain the cached buckets from a zone. Expects a locked zone on entry. 788aaa8bb16SJeff Roberson */ 789aaa8bb16SJeff Roberson static void 790aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone) 791aaa8bb16SJeff Roberson { 792aaa8bb16SJeff Roberson uma_bucket_t bucket; 7938355f576SJeff Roberson 7948355f576SJeff Roberson /* 7958355f576SJeff Roberson * Drain the bucket queues and free the buckets, we just keep two per 7968355f576SJeff Roberson * cpu (alloc/free). 7978355f576SJeff Roberson */ 798fc03d22bSJeff Roberson while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 7998355f576SJeff Roberson LIST_REMOVE(bucket, ub_link); 8008355f576SJeff Roberson ZONE_UNLOCK(zone); 8018355f576SJeff Roberson bucket_drain(zone, bucket); 8026fd34d6fSJeff Roberson bucket_free(zone, bucket, NULL); 8038355f576SJeff Roberson ZONE_LOCK(zone); 8048355f576SJeff Roberson } 805ace66b56SAlexander Motin 806ace66b56SAlexander Motin /* 807ace66b56SAlexander Motin * Shrink further bucket sizes. Price of single zone lock collision 808ace66b56SAlexander Motin * is probably lower then price of global cache drain. 809ace66b56SAlexander Motin */ 810ace66b56SAlexander Motin if (zone->uz_count > zone->uz_count_min) 811ace66b56SAlexander Motin zone->uz_count--; 8128355f576SJeff Roberson } 813fc03d22bSJeff Roberson 814fc03d22bSJeff Roberson static void 815fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 816fc03d22bSJeff Roberson { 817fc03d22bSJeff Roberson uint8_t *mem; 818fc03d22bSJeff Roberson int i; 819fc03d22bSJeff Roberson uint8_t flags; 820fc03d22bSJeff Roberson 821fc03d22bSJeff Roberson mem = slab->us_data; 822fc03d22bSJeff Roberson flags = slab->us_flags; 823fc03d22bSJeff Roberson i = start; 824fc03d22bSJeff Roberson if (keg->uk_fini != NULL) { 825fc03d22bSJeff Roberson for (i--; i > -1; i--) 826fc03d22bSJeff Roberson keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 827fc03d22bSJeff Roberson keg->uk_size); 828fc03d22bSJeff Roberson } 829fc03d22bSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 830fc03d22bSJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 831fc03d22bSJeff Roberson #ifdef UMA_DEBUG 832fc03d22bSJeff Roberson printf("%s: Returning %d bytes.\n", keg->uk_name, 833fc03d22bSJeff Roberson PAGE_SIZE * keg->uk_ppera); 834fc03d22bSJeff Roberson #endif 835fc03d22bSJeff Roberson keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 8368355f576SJeff Roberson } 8378355f576SJeff Roberson 8388355f576SJeff Roberson /* 839e20a199fSJeff Roberson * Frees pages from a keg back to the system. This is done on demand from 8408355f576SJeff Roberson * the pageout daemon. 8418355f576SJeff Roberson * 842e20a199fSJeff Roberson * Returns nothing. 8438355f576SJeff Roberson */ 844e20a199fSJeff Roberson static void 845e20a199fSJeff Roberson keg_drain(uma_keg_t keg) 8468355f576SJeff Roberson { 8471e183df2SStefan Farfeleder struct slabhead freeslabs = { 0 }; 8488355f576SJeff Roberson uma_slab_t slab; 8498355f576SJeff Roberson uma_slab_t n; 8508355f576SJeff Roberson 8518355f576SJeff Roberson /* 852e20a199fSJeff Roberson * We don't want to take pages from statically allocated kegs at this 8538355f576SJeff Roberson * time 8548355f576SJeff Roberson */ 855099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 8568355f576SJeff Roberson return; 8578355f576SJeff Roberson 8588355f576SJeff Roberson #ifdef UMA_DEBUG 859e20a199fSJeff Roberson printf("%s free items: %u\n", keg->uk_name, keg->uk_free); 8608355f576SJeff Roberson #endif 861e20a199fSJeff Roberson KEG_LOCK(keg); 862099a0e58SBosko Milekic if (keg->uk_free == 0) 8638355f576SJeff Roberson goto finished; 8648355f576SJeff Roberson 865099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_free_slab); 8669643769aSJeff Roberson while (slab) { 8678355f576SJeff Roberson n = LIST_NEXT(slab, us_link); 8688355f576SJeff Roberson 8698355f576SJeff Roberson /* We have no where to free these to */ 8708355f576SJeff Roberson if (slab->us_flags & UMA_SLAB_BOOT) { 8718355f576SJeff Roberson slab = n; 8728355f576SJeff Roberson continue; 8738355f576SJeff Roberson } 8748355f576SJeff Roberson 8758355f576SJeff Roberson LIST_REMOVE(slab, us_link); 876099a0e58SBosko Milekic keg->uk_pages -= keg->uk_ppera; 877099a0e58SBosko Milekic keg->uk_free -= keg->uk_ipers; 878713deb36SJeff Roberson 879099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 880099a0e58SBosko Milekic UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 881713deb36SJeff Roberson 882713deb36SJeff Roberson SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 883713deb36SJeff Roberson 884713deb36SJeff Roberson slab = n; 885713deb36SJeff Roberson } 886713deb36SJeff Roberson finished: 887e20a199fSJeff Roberson KEG_UNLOCK(keg); 888713deb36SJeff Roberson 889713deb36SJeff Roberson while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 890713deb36SJeff Roberson SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 8911645995bSKirk McKusick keg_free_slab(keg, slab, keg->uk_ipers); 8928355f576SJeff Roberson } 8938355f576SJeff Roberson } 8948355f576SJeff Roberson 895e20a199fSJeff Roberson static void 896e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok) 897e20a199fSJeff Roberson { 898e20a199fSJeff Roberson 8998355f576SJeff Roberson /* 900e20a199fSJeff Roberson * Set draining to interlock with zone_dtor() so we can release our 901e20a199fSJeff Roberson * locks as we go. Only dtor() should do a WAITOK call since it 902e20a199fSJeff Roberson * is the only call that knows the structure will still be available 903e20a199fSJeff Roberson * when it wakes up. 904e20a199fSJeff Roberson */ 905e20a199fSJeff Roberson ZONE_LOCK(zone); 906e20a199fSJeff Roberson while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 907e20a199fSJeff Roberson if (waitok == M_NOWAIT) 908e20a199fSJeff Roberson goto out; 909af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 910e20a199fSJeff Roberson } 911e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_DRAINING; 912e20a199fSJeff Roberson bucket_cache_drain(zone); 913e20a199fSJeff Roberson ZONE_UNLOCK(zone); 914e20a199fSJeff Roberson /* 915e20a199fSJeff Roberson * The DRAINING flag protects us from being freed while 916111fbcd5SBryan Venteicher * we're running. Normally the uma_rwlock would protect us but we 917e20a199fSJeff Roberson * must be able to release and acquire the right lock for each keg. 918e20a199fSJeff Roberson */ 919e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_drain); 920e20a199fSJeff Roberson ZONE_LOCK(zone); 921e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 922e20a199fSJeff Roberson wakeup(zone); 923e20a199fSJeff Roberson out: 924e20a199fSJeff Roberson ZONE_UNLOCK(zone); 925e20a199fSJeff Roberson } 926e20a199fSJeff Roberson 927e20a199fSJeff Roberson void 928e20a199fSJeff Roberson zone_drain(uma_zone_t zone) 929e20a199fSJeff Roberson { 930e20a199fSJeff Roberson 931e20a199fSJeff Roberson zone_drain_wait(zone, M_NOWAIT); 932e20a199fSJeff Roberson } 933e20a199fSJeff Roberson 934e20a199fSJeff Roberson /* 935e20a199fSJeff Roberson * Allocate a new slab for a keg. This does not insert the slab onto a list. 9368355f576SJeff Roberson * 9378355f576SJeff Roberson * Arguments: 9388355f576SJeff Roberson * wait Shall we wait? 9398355f576SJeff Roberson * 9408355f576SJeff Roberson * Returns: 9418355f576SJeff Roberson * The slab that was allocated or NULL if there is no memory and the 9428355f576SJeff Roberson * caller specified M_NOWAIT. 9438355f576SJeff Roberson */ 9448355f576SJeff Roberson static uma_slab_t 945e20a199fSJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 9468355f576SJeff Roberson { 947e20a199fSJeff Roberson uma_alloc allocf; 948099a0e58SBosko Milekic uma_slab_t slab; 94985dcf349SGleb Smirnoff uint8_t *mem; 95085dcf349SGleb Smirnoff uint8_t flags; 9518355f576SJeff Roberson int i; 9528355f576SJeff Roberson 953e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 954a553d4b8SJeff Roberson slab = NULL; 955fc03d22bSJeff Roberson mem = NULL; 956a553d4b8SJeff Roberson 9578355f576SJeff Roberson #ifdef UMA_DEBUG 9580095a784SJeff Roberson printf("alloc_slab: Allocating a new slab for %s\n", keg->uk_name); 9598355f576SJeff Roberson #endif 960e20a199fSJeff Roberson allocf = keg->uk_allocf; 961e20a199fSJeff Roberson KEG_UNLOCK(keg); 962a553d4b8SJeff Roberson 963099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 964e20a199fSJeff Roberson slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 965fc03d22bSJeff Roberson if (slab == NULL) 966fc03d22bSJeff Roberson goto out; 967a553d4b8SJeff Roberson } 968a553d4b8SJeff Roberson 9693370c5bfSJeff Roberson /* 9703370c5bfSJeff Roberson * This reproduces the old vm_zone behavior of zero filling pages the 9713370c5bfSJeff Roberson * first time they are added to a zone. 9723370c5bfSJeff Roberson * 9733370c5bfSJeff Roberson * Malloced items are zeroed in uma_zalloc. 9743370c5bfSJeff Roberson */ 9753370c5bfSJeff Roberson 976099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 9773370c5bfSJeff Roberson wait |= M_ZERO; 9783370c5bfSJeff Roberson else 9793370c5bfSJeff Roberson wait &= ~M_ZERO; 9803370c5bfSJeff Roberson 981263811f7SKip Macy if (keg->uk_flags & UMA_ZONE_NODUMP) 982263811f7SKip Macy wait |= M_NODUMP; 983263811f7SKip Macy 984e20a199fSJeff Roberson /* zone is passed for legacy reasons. */ 985ad97af7eSGleb Smirnoff mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 986a553d4b8SJeff Roberson if (mem == NULL) { 987b23f72e9SBrian Feldman if (keg->uk_flags & UMA_ZONE_OFFPAGE) 9880095a784SJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 989fc03d22bSJeff Roberson slab = NULL; 990fc03d22bSJeff Roberson goto out; 991a553d4b8SJeff Roberson } 9928355f576SJeff Roberson 9935c0e403bSJeff Roberson /* Point the slab into the allocated memory */ 994099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 995099a0e58SBosko Milekic slab = (uma_slab_t )(mem + keg->uk_pgoff); 9965c0e403bSJeff Roberson 997e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZONE_VTOSLAB) 998099a0e58SBosko Milekic for (i = 0; i < keg->uk_ppera; i++) 99999571dc3SJeff Roberson vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 10008355f576SJeff Roberson 1001099a0e58SBosko Milekic slab->us_keg = keg; 10028355f576SJeff Roberson slab->us_data = mem; 1003099a0e58SBosko Milekic slab->us_freecount = keg->uk_ipers; 10048355f576SJeff Roberson slab->us_flags = flags; 1005ef72505eSJeff Roberson BIT_FILL(SLAB_SETSIZE, &slab->us_free); 1006ef72505eSJeff Roberson #ifdef INVARIANTS 1007ef72505eSJeff Roberson BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1008ef72505eSJeff Roberson #endif 1009099a0e58SBosko Milekic 1010b23f72e9SBrian Feldman if (keg->uk_init != NULL) { 1011099a0e58SBosko Milekic for (i = 0; i < keg->uk_ipers; i++) 1012b23f72e9SBrian Feldman if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1013b23f72e9SBrian Feldman keg->uk_size, wait) != 0) 1014b23f72e9SBrian Feldman break; 1015b23f72e9SBrian Feldman if (i != keg->uk_ipers) { 1016fc03d22bSJeff Roberson keg_free_slab(keg, slab, i); 1017fc03d22bSJeff Roberson slab = NULL; 1018fc03d22bSJeff Roberson goto out; 1019b23f72e9SBrian Feldman } 1020b23f72e9SBrian Feldman } 1021fc03d22bSJeff Roberson out: 1022e20a199fSJeff Roberson KEG_LOCK(keg); 10235c0e403bSJeff Roberson 1024fc03d22bSJeff Roberson if (slab != NULL) { 1025099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1026099a0e58SBosko Milekic UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 10278355f576SJeff Roberson 1028099a0e58SBosko Milekic keg->uk_pages += keg->uk_ppera; 1029099a0e58SBosko Milekic keg->uk_free += keg->uk_ipers; 1030fc03d22bSJeff Roberson } 10318355f576SJeff Roberson 10328355f576SJeff Roberson return (slab); 10338355f576SJeff Roberson } 10348355f576SJeff Roberson 10358355f576SJeff Roberson /* 1036009b6fcbSJeff Roberson * This function is intended to be used early on in place of page_alloc() so 1037009b6fcbSJeff Roberson * that we may use the boot time page cache to satisfy allocations before 1038009b6fcbSJeff Roberson * the VM is ready. 1039009b6fcbSJeff Roberson */ 1040009b6fcbSJeff Roberson static void * 1041f2c2231eSRyan Stone startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 1042009b6fcbSJeff Roberson { 1043099a0e58SBosko Milekic uma_keg_t keg; 1044f353d338SAlan Cox uma_slab_t tmps; 1045e9a069d8SJohn Baldwin int pages, check_pages; 1046099a0e58SBosko Milekic 1047e20a199fSJeff Roberson keg = zone_first_keg(zone); 1048e9a069d8SJohn Baldwin pages = howmany(bytes, PAGE_SIZE); 1049e9a069d8SJohn Baldwin check_pages = pages - 1; 1050e9a069d8SJohn Baldwin KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1051099a0e58SBosko Milekic 1052009b6fcbSJeff Roberson /* 1053009b6fcbSJeff Roberson * Check our small startup cache to see if it has pages remaining. 1054009b6fcbSJeff Roberson */ 1055f353d338SAlan Cox mtx_lock(&uma_boot_pages_mtx); 1056e9a069d8SJohn Baldwin 1057e9a069d8SJohn Baldwin /* First check if we have enough room. */ 1058e9a069d8SJohn Baldwin tmps = LIST_FIRST(&uma_boot_pages); 1059e9a069d8SJohn Baldwin while (tmps != NULL && check_pages-- > 0) 1060e9a069d8SJohn Baldwin tmps = LIST_NEXT(tmps, us_link); 1061e9a069d8SJohn Baldwin if (tmps != NULL) { 1062e9a069d8SJohn Baldwin /* 1063e9a069d8SJohn Baldwin * It's ok to lose tmps references. The last one will 1064e9a069d8SJohn Baldwin * have tmps->us_data pointing to the start address of 1065e9a069d8SJohn Baldwin * "pages" contiguous pages of memory. 1066e9a069d8SJohn Baldwin */ 1067e9a069d8SJohn Baldwin while (pages-- > 0) { 1068e9a069d8SJohn Baldwin tmps = LIST_FIRST(&uma_boot_pages); 1069009b6fcbSJeff Roberson LIST_REMOVE(tmps, us_link); 1070e9a069d8SJohn Baldwin } 1071f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1072009b6fcbSJeff Roberson *pflag = tmps->us_flags; 1073009b6fcbSJeff Roberson return (tmps->us_data); 1074009b6fcbSJeff Roberson } 1075f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1076342f1793SAlan Cox if (booted < UMA_STARTUP2) 10773803b26bSDag-Erling Smørgrav panic("UMA: Increase vm.boot_pages"); 1078009b6fcbSJeff Roberson /* 1079009b6fcbSJeff Roberson * Now that we've booted reset these users to their real allocator. 1080009b6fcbSJeff Roberson */ 1081009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC 1082e9a069d8SJohn Baldwin keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1083009b6fcbSJeff Roberson #else 1084099a0e58SBosko Milekic keg->uk_allocf = page_alloc; 1085009b6fcbSJeff Roberson #endif 1086099a0e58SBosko Milekic return keg->uk_allocf(zone, bytes, pflag, wait); 1087009b6fcbSJeff Roberson } 1088009b6fcbSJeff Roberson 1089009b6fcbSJeff Roberson /* 10908355f576SJeff Roberson * Allocates a number of pages from the system 10918355f576SJeff Roberson * 10928355f576SJeff Roberson * Arguments: 10938355f576SJeff Roberson * bytes The number of bytes requested 10948355f576SJeff Roberson * wait Shall we wait? 10958355f576SJeff Roberson * 10968355f576SJeff Roberson * Returns: 10978355f576SJeff Roberson * A pointer to the alloced memory or possibly 10988355f576SJeff Roberson * NULL if M_NOWAIT is set. 10998355f576SJeff Roberson */ 11008355f576SJeff Roberson static void * 1101f2c2231eSRyan Stone page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 11028355f576SJeff Roberson { 11038355f576SJeff Roberson void *p; /* Returned page */ 11048355f576SJeff Roberson 11058355f576SJeff Roberson *pflag = UMA_SLAB_KMEM; 11065df87b21SJeff Roberson p = (void *) kmem_malloc(kmem_arena, bytes, wait); 11078355f576SJeff Roberson 11088355f576SJeff Roberson return (p); 11098355f576SJeff Roberson } 11108355f576SJeff Roberson 11118355f576SJeff Roberson /* 11128355f576SJeff Roberson * Allocates a number of pages from within an object 11138355f576SJeff Roberson * 11148355f576SJeff Roberson * Arguments: 11158355f576SJeff Roberson * bytes The number of bytes requested 11168355f576SJeff Roberson * wait Shall we wait? 11178355f576SJeff Roberson * 11188355f576SJeff Roberson * Returns: 11198355f576SJeff Roberson * A pointer to the alloced memory or possibly 11208355f576SJeff Roberson * NULL if M_NOWAIT is set. 11218355f576SJeff Roberson */ 11228355f576SJeff Roberson static void * 1123f2c2231eSRyan Stone noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) 11248355f576SJeff Roberson { 1125a4915c21SAttilio Rao TAILQ_HEAD(, vm_page) alloctail; 1126a4915c21SAttilio Rao u_long npages; 1127b245ac95SAlan Cox vm_offset_t retkva, zkva; 1128a4915c21SAttilio Rao vm_page_t p, p_next; 1129e20a199fSJeff Roberson uma_keg_t keg; 11308355f576SJeff Roberson 1131a4915c21SAttilio Rao TAILQ_INIT(&alloctail); 1132e20a199fSJeff Roberson keg = zone_first_keg(zone); 1133a4915c21SAttilio Rao 1134a4915c21SAttilio Rao npages = howmany(bytes, PAGE_SIZE); 1135a4915c21SAttilio Rao while (npages > 0) { 1136a4915c21SAttilio Rao p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1137a4915c21SAttilio Rao VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1138a4915c21SAttilio Rao if (p != NULL) { 1139a4915c21SAttilio Rao /* 1140a4915c21SAttilio Rao * Since the page does not belong to an object, its 1141a4915c21SAttilio Rao * listq is unused. 1142a4915c21SAttilio Rao */ 1143a4915c21SAttilio Rao TAILQ_INSERT_TAIL(&alloctail, p, listq); 1144a4915c21SAttilio Rao npages--; 1145a4915c21SAttilio Rao continue; 1146a4915c21SAttilio Rao } 1147a4915c21SAttilio Rao if (wait & M_WAITOK) { 1148a4915c21SAttilio Rao VM_WAIT; 1149a4915c21SAttilio Rao continue; 1150a4915c21SAttilio Rao } 11518355f576SJeff Roberson 11528355f576SJeff Roberson /* 1153a4915c21SAttilio Rao * Page allocation failed, free intermediate pages and 1154a4915c21SAttilio Rao * exit. 11558355f576SJeff Roberson */ 1156a4915c21SAttilio Rao TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1157087a6132SAlan Cox vm_page_unwire(p, PQ_NONE); 1158b245ac95SAlan Cox vm_page_free(p); 1159b245ac95SAlan Cox } 1160a4915c21SAttilio Rao return (NULL); 1161b245ac95SAlan Cox } 11628355f576SJeff Roberson *flags = UMA_SLAB_PRIV; 1163a4915c21SAttilio Rao zkva = keg->uk_kva + 1164a4915c21SAttilio Rao atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1165a4915c21SAttilio Rao retkva = zkva; 1166a4915c21SAttilio Rao TAILQ_FOREACH(p, &alloctail, listq) { 1167a4915c21SAttilio Rao pmap_qenter(zkva, &p, 1); 1168a4915c21SAttilio Rao zkva += PAGE_SIZE; 1169a4915c21SAttilio Rao } 11708355f576SJeff Roberson 11718355f576SJeff Roberson return ((void *)retkva); 11728355f576SJeff Roberson } 11738355f576SJeff Roberson 11748355f576SJeff Roberson /* 11758355f576SJeff Roberson * Frees a number of pages to the system 11768355f576SJeff Roberson * 11778355f576SJeff Roberson * Arguments: 11788355f576SJeff Roberson * mem A pointer to the memory to be freed 11798355f576SJeff Roberson * size The size of the memory being freed 11808355f576SJeff Roberson * flags The original p->us_flags field 11818355f576SJeff Roberson * 11828355f576SJeff Roberson * Returns: 11838355f576SJeff Roberson * Nothing 11848355f576SJeff Roberson */ 11858355f576SJeff Roberson static void 1186f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags) 11878355f576SJeff Roberson { 11885df87b21SJeff Roberson struct vmem *vmem; 11893370c5bfSJeff Roberson 11908355f576SJeff Roberson if (flags & UMA_SLAB_KMEM) 11915df87b21SJeff Roberson vmem = kmem_arena; 1192aea6e893SAlan Cox else if (flags & UMA_SLAB_KERNEL) 11935df87b21SJeff Roberson vmem = kernel_arena; 11948355f576SJeff Roberson else 1195aea6e893SAlan Cox panic("UMA: page_free used with invalid flags %d", flags); 11968355f576SJeff Roberson 11975df87b21SJeff Roberson kmem_free(vmem, (vm_offset_t)mem, size); 11988355f576SJeff Roberson } 11998355f576SJeff Roberson 12008355f576SJeff Roberson /* 12018355f576SJeff Roberson * Zero fill initializer 12028355f576SJeff Roberson * 12038355f576SJeff Roberson * Arguments/Returns follow uma_init specifications 12048355f576SJeff Roberson */ 1205b23f72e9SBrian Feldman static int 1206b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags) 12078355f576SJeff Roberson { 12088355f576SJeff Roberson bzero(mem, size); 1209b23f72e9SBrian Feldman return (0); 12108355f576SJeff Roberson } 12118355f576SJeff Roberson 12128355f576SJeff Roberson /* 1213e20a199fSJeff Roberson * Finish creating a small uma keg. This calculates ipers, and the keg size. 12148355f576SJeff Roberson * 12158355f576SJeff Roberson * Arguments 1216e20a199fSJeff Roberson * keg The zone we should initialize 12178355f576SJeff Roberson * 12188355f576SJeff Roberson * Returns 12198355f576SJeff Roberson * Nothing 12208355f576SJeff Roberson */ 12218355f576SJeff Roberson static void 1222e20a199fSJeff Roberson keg_small_init(uma_keg_t keg) 12238355f576SJeff Roberson { 1224244f4554SBosko Milekic u_int rsize; 1225244f4554SBosko Milekic u_int memused; 1226244f4554SBosko Milekic u_int wastedspace; 1227244f4554SBosko Milekic u_int shsize; 12288355f576SJeff Roberson 1229ad97af7eSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_PCPU) { 1230e28a647dSGleb Smirnoff u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU; 1231e28a647dSGleb Smirnoff 1232ad97af7eSGleb Smirnoff keg->uk_slabsize = sizeof(struct pcpu); 1233e28a647dSGleb Smirnoff keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1234ad97af7eSGleb Smirnoff PAGE_SIZE); 1235ad97af7eSGleb Smirnoff } else { 1236ad97af7eSGleb Smirnoff keg->uk_slabsize = UMA_SLAB_SIZE; 1237ad97af7eSGleb Smirnoff keg->uk_ppera = 1; 1238ad97af7eSGleb Smirnoff } 1239ad97af7eSGleb Smirnoff 1240ef72505eSJeff Roberson /* 1241ef72505eSJeff Roberson * Calculate the size of each allocation (rsize) according to 1242ef72505eSJeff Roberson * alignment. If the requested size is smaller than we have 1243ef72505eSJeff Roberson * allocation bits for we round it up. 1244ef72505eSJeff Roberson */ 1245099a0e58SBosko Milekic rsize = keg->uk_size; 1246ef72505eSJeff Roberson if (rsize < keg->uk_slabsize / SLAB_SETSIZE) 1247ef72505eSJeff Roberson rsize = keg->uk_slabsize / SLAB_SETSIZE; 1248099a0e58SBosko Milekic if (rsize & keg->uk_align) 1249099a0e58SBosko Milekic rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1250099a0e58SBosko Milekic keg->uk_rsize = rsize; 1251ad97af7eSGleb Smirnoff 1252ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1253ad97af7eSGleb Smirnoff keg->uk_rsize < sizeof(struct pcpu), 1254ad97af7eSGleb Smirnoff ("%s: size %u too large", __func__, keg->uk_rsize)); 12558355f576SJeff Roberson 1256ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 12572864dbbfSGleb Smirnoff shsize = 0; 1258ef72505eSJeff Roberson else 1259244f4554SBosko Milekic shsize = sizeof(struct uma_slab); 12608355f576SJeff Roberson 1261ad97af7eSGleb Smirnoff keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize; 1262ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1263ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1264ad97af7eSGleb Smirnoff 1265244f4554SBosko Milekic memused = keg->uk_ipers * rsize + shsize; 1266ad97af7eSGleb Smirnoff wastedspace = keg->uk_slabsize - memused; 1267244f4554SBosko Milekic 126820e8e865SBosko Milekic /* 1269244f4554SBosko Milekic * We can't do OFFPAGE if we're internal or if we've been 127020e8e865SBosko Milekic * asked to not go to the VM for buckets. If we do this we 12716fd34d6fSJeff Roberson * may end up going to the VM for slabs which we do not 12726fd34d6fSJeff Roberson * want to do if we're UMA_ZFLAG_CACHEONLY as a result 12736fd34d6fSJeff Roberson * of UMA_ZONE_VM, which clearly forbids it. 127420e8e865SBosko Milekic */ 1275099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1276099a0e58SBosko Milekic (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 12778355f576SJeff Roberson return; 1278244f4554SBosko Milekic 1279ef72505eSJeff Roberson /* 1280ef72505eSJeff Roberson * See if using an OFFPAGE slab will limit our waste. Only do 1281ef72505eSJeff Roberson * this if it permits more items per-slab. 1282ef72505eSJeff Roberson * 1283ef72505eSJeff Roberson * XXX We could try growing slabsize to limit max waste as well. 1284ef72505eSJeff Roberson * Historically this was not done because the VM could not 1285ef72505eSJeff Roberson * efficiently handle contiguous allocations. 1286ef72505eSJeff Roberson */ 1287ad97af7eSGleb Smirnoff if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) && 1288ad97af7eSGleb Smirnoff (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) { 1289ad97af7eSGleb Smirnoff keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize; 1290ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1291ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1292244f4554SBosko Milekic #ifdef UMA_DEBUG 1293244f4554SBosko Milekic printf("UMA decided we need offpage slab headers for " 1294e20a199fSJeff Roberson "keg: %s, calculated wastedspace = %d, " 1295244f4554SBosko Milekic "maximum wasted space allowed = %d, " 1296244f4554SBosko Milekic "calculated ipers = %d, " 1297e20a199fSJeff Roberson "new wasted space = %d\n", keg->uk_name, wastedspace, 1298ad97af7eSGleb Smirnoff keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1299ad97af7eSGleb Smirnoff keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize); 1300244f4554SBosko Milekic #endif 1301099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 13028355f576SJeff Roberson } 1303ad97af7eSGleb Smirnoff 1304ad97af7eSGleb Smirnoff if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1305ad97af7eSGleb Smirnoff (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1306ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_HASH; 13078355f576SJeff Roberson } 13088355f576SJeff Roberson 13098355f576SJeff Roberson /* 1310e20a199fSJeff Roberson * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 13118355f576SJeff Roberson * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 13128355f576SJeff Roberson * more complicated. 13138355f576SJeff Roberson * 13148355f576SJeff Roberson * Arguments 1315e20a199fSJeff Roberson * keg The keg we should initialize 13168355f576SJeff Roberson * 13178355f576SJeff Roberson * Returns 13188355f576SJeff Roberson * Nothing 13198355f576SJeff Roberson */ 13208355f576SJeff Roberson static void 1321e20a199fSJeff Roberson keg_large_init(uma_keg_t keg) 13228355f576SJeff Roberson { 1323cec48e00SAlexander Motin u_int shsize; 13248355f576SJeff Roberson 1325e20a199fSJeff Roberson KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1326099a0e58SBosko Milekic KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1327e20a199fSJeff Roberson ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1328ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1329ad97af7eSGleb Smirnoff ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 133020e8e865SBosko Milekic 1331ad97af7eSGleb Smirnoff keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1332ad97af7eSGleb Smirnoff keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE; 1333099a0e58SBosko Milekic keg->uk_ipers = 1; 1334e9a069d8SJohn Baldwin keg->uk_rsize = keg->uk_size; 1335e9a069d8SJohn Baldwin 1336e9a069d8SJohn Baldwin /* We can't do OFFPAGE if we're internal, bail out here. */ 1337e9a069d8SJohn Baldwin if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1338e9a069d8SJohn Baldwin return; 13398355f576SJeff Roberson 1340cec48e00SAlexander Motin /* Check whether we have enough space to not do OFFPAGE. */ 1341cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1342cec48e00SAlexander Motin shsize = sizeof(struct uma_slab); 1343cec48e00SAlexander Motin if (shsize & UMA_ALIGN_PTR) 1344cec48e00SAlexander Motin shsize = (shsize & ~UMA_ALIGN_PTR) + 1345cec48e00SAlexander Motin (UMA_ALIGN_PTR + 1); 1346cec48e00SAlexander Motin 1347cec48e00SAlexander Motin if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize) 1348099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 1349cec48e00SAlexander Motin } 1350cec48e00SAlexander Motin 1351cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1352cec48e00SAlexander Motin (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1353099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_HASH; 13548355f576SJeff Roberson } 13558355f576SJeff Roberson 1356e20a199fSJeff Roberson static void 1357e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg) 1358e20a199fSJeff Roberson { 1359e20a199fSJeff Roberson int alignsize; 1360e20a199fSJeff Roberson int trailer; 1361e20a199fSJeff Roberson int pages; 1362e20a199fSJeff Roberson int rsize; 1363e20a199fSJeff Roberson 1364ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1365ad97af7eSGleb Smirnoff ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1366ad97af7eSGleb Smirnoff 1367e20a199fSJeff Roberson alignsize = keg->uk_align + 1; 1368e20a199fSJeff Roberson rsize = keg->uk_size; 1369e20a199fSJeff Roberson /* 1370e20a199fSJeff Roberson * We want one item to start on every align boundary in a page. To 1371e20a199fSJeff Roberson * do this we will span pages. We will also extend the item by the 1372e20a199fSJeff Roberson * size of align if it is an even multiple of align. Otherwise, it 1373e20a199fSJeff Roberson * would fall on the same boundary every time. 1374e20a199fSJeff Roberson */ 1375e20a199fSJeff Roberson if (rsize & keg->uk_align) 1376e20a199fSJeff Roberson rsize = (rsize & ~keg->uk_align) + alignsize; 1377e20a199fSJeff Roberson if ((rsize & alignsize) == 0) 1378e20a199fSJeff Roberson rsize += alignsize; 1379e20a199fSJeff Roberson trailer = rsize - keg->uk_size; 1380e20a199fSJeff Roberson pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1381e20a199fSJeff Roberson pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1382e20a199fSJeff Roberson keg->uk_rsize = rsize; 1383e20a199fSJeff Roberson keg->uk_ppera = pages; 1384ad97af7eSGleb Smirnoff keg->uk_slabsize = UMA_SLAB_SIZE; 1385e20a199fSJeff Roberson keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1386e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 13872367b4ddSDimitry Andric KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 138842321809SGleb Smirnoff ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1389e20a199fSJeff Roberson keg->uk_ipers)); 1390e20a199fSJeff Roberson } 1391e20a199fSJeff Roberson 13928355f576SJeff Roberson /* 1393099a0e58SBosko Milekic * Keg header ctor. This initializes all fields, locks, etc. And inserts 1394099a0e58SBosko Milekic * the keg onto the global keg list. 13958355f576SJeff Roberson * 13968355f576SJeff Roberson * Arguments/Returns follow uma_ctor specifications 1397099a0e58SBosko Milekic * udata Actually uma_kctor_args 1398099a0e58SBosko Milekic */ 1399b23f72e9SBrian Feldman static int 1400b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags) 1401099a0e58SBosko Milekic { 1402099a0e58SBosko Milekic struct uma_kctor_args *arg = udata; 1403099a0e58SBosko Milekic uma_keg_t keg = mem; 1404099a0e58SBosko Milekic uma_zone_t zone; 1405099a0e58SBosko Milekic 1406099a0e58SBosko Milekic bzero(keg, size); 1407099a0e58SBosko Milekic keg->uk_size = arg->size; 1408099a0e58SBosko Milekic keg->uk_init = arg->uminit; 1409099a0e58SBosko Milekic keg->uk_fini = arg->fini; 1410099a0e58SBosko Milekic keg->uk_align = arg->align; 1411099a0e58SBosko Milekic keg->uk_free = 0; 14126fd34d6fSJeff Roberson keg->uk_reserve = 0; 1413099a0e58SBosko Milekic keg->uk_pages = 0; 1414099a0e58SBosko Milekic keg->uk_flags = arg->flags; 1415099a0e58SBosko Milekic keg->uk_allocf = page_alloc; 1416099a0e58SBosko Milekic keg->uk_freef = page_free; 1417099a0e58SBosko Milekic keg->uk_slabzone = NULL; 1418099a0e58SBosko Milekic 1419099a0e58SBosko Milekic /* 1420099a0e58SBosko Milekic * The master zone is passed to us at keg-creation time. 1421099a0e58SBosko Milekic */ 1422099a0e58SBosko Milekic zone = arg->zone; 1423e20a199fSJeff Roberson keg->uk_name = zone->uz_name; 1424099a0e58SBosko Milekic 1425099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_VM) 1426099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1427099a0e58SBosko Milekic 1428099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_ZINIT) 1429099a0e58SBosko Milekic keg->uk_init = zero_init; 1430099a0e58SBosko Milekic 1431*cfcae3f8SGleb Smirnoff if (arg->flags & UMA_ZONE_MALLOC) 1432e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_VTOSLAB; 1433e20a199fSJeff Roberson 1434ad97af7eSGleb Smirnoff if (arg->flags & UMA_ZONE_PCPU) 1435ad97af7eSGleb Smirnoff #ifdef SMP 1436ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_OFFPAGE; 1437ad97af7eSGleb Smirnoff #else 1438ad97af7eSGleb Smirnoff keg->uk_flags &= ~UMA_ZONE_PCPU; 1439ad97af7eSGleb Smirnoff #endif 1440ad97af7eSGleb Smirnoff 1441ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1442e20a199fSJeff Roberson keg_cachespread_init(keg); 1443244f4554SBosko Milekic } else { 1444ef72505eSJeff Roberson if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1445e20a199fSJeff Roberson keg_large_init(keg); 1446244f4554SBosko Milekic else 1447e20a199fSJeff Roberson keg_small_init(keg); 1448244f4554SBosko Milekic } 1449099a0e58SBosko Milekic 1450*cfcae3f8SGleb Smirnoff if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1451099a0e58SBosko Milekic keg->uk_slabzone = slabzone; 1452099a0e58SBosko Milekic 1453099a0e58SBosko Milekic /* 1454099a0e58SBosko Milekic * If we haven't booted yet we need allocations to go through the 1455099a0e58SBosko Milekic * startup cache until the vm is ready. 1456099a0e58SBosko Milekic */ 1457099a0e58SBosko Milekic if (keg->uk_ppera == 1) { 1458099a0e58SBosko Milekic #ifdef UMA_MD_SMALL_ALLOC 1459099a0e58SBosko Milekic keg->uk_allocf = uma_small_alloc; 1460099a0e58SBosko Milekic keg->uk_freef = uma_small_free; 14618cd02d00SAlan Cox 1462342f1793SAlan Cox if (booted < UMA_STARTUP) 1463099a0e58SBosko Milekic keg->uk_allocf = startup_alloc; 14648cd02d00SAlan Cox #else 14658cd02d00SAlan Cox if (booted < UMA_STARTUP2) 14668cd02d00SAlan Cox keg->uk_allocf = startup_alloc; 14678cd02d00SAlan Cox #endif 1468342f1793SAlan Cox } else if (booted < UMA_STARTUP2 && 1469342f1793SAlan Cox (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1470e9a069d8SJohn Baldwin keg->uk_allocf = startup_alloc; 1471099a0e58SBosko Milekic 1472099a0e58SBosko Milekic /* 1473af526374SJeff Roberson * Initialize keg's lock 1474099a0e58SBosko Milekic */ 1475af526374SJeff Roberson KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1476099a0e58SBosko Milekic 1477099a0e58SBosko Milekic /* 1478099a0e58SBosko Milekic * If we're putting the slab header in the actual page we need to 1479099a0e58SBosko Milekic * figure out where in each page it goes. This calculates a right 1480099a0e58SBosko Milekic * justified offset into the memory on an ALIGN_PTR boundary. 1481099a0e58SBosko Milekic */ 1482099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1483244f4554SBosko Milekic u_int totsize; 1484099a0e58SBosko Milekic 1485099a0e58SBosko Milekic /* Size of the slab struct and free list */ 1486ef72505eSJeff Roberson totsize = sizeof(struct uma_slab); 1487ef72505eSJeff Roberson 1488099a0e58SBosko Milekic if (totsize & UMA_ALIGN_PTR) 1489099a0e58SBosko Milekic totsize = (totsize & ~UMA_ALIGN_PTR) + 1490099a0e58SBosko Milekic (UMA_ALIGN_PTR + 1); 1491ad97af7eSGleb Smirnoff keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1492244f4554SBosko Milekic 1493244f4554SBosko Milekic /* 1494244f4554SBosko Milekic * The only way the following is possible is if with our 1495244f4554SBosko Milekic * UMA_ALIGN_PTR adjustments we are now bigger than 1496244f4554SBosko Milekic * UMA_SLAB_SIZE. I haven't checked whether this is 1497244f4554SBosko Milekic * mathematically possible for all cases, so we make 1498244f4554SBosko Milekic * sure here anyway. 1499244f4554SBosko Milekic */ 1500ef72505eSJeff Roberson totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1501ad97af7eSGleb Smirnoff if (totsize > PAGE_SIZE * keg->uk_ppera) { 1502099a0e58SBosko Milekic printf("zone %s ipers %d rsize %d size %d\n", 1503099a0e58SBosko Milekic zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1504099a0e58SBosko Milekic keg->uk_size); 1505aea6e893SAlan Cox panic("UMA slab won't fit."); 1506099a0e58SBosko Milekic } 1507099a0e58SBosko Milekic } 1508099a0e58SBosko Milekic 1509099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1510099a0e58SBosko Milekic hash_alloc(&keg->uk_hash); 1511099a0e58SBosko Milekic 1512099a0e58SBosko Milekic #ifdef UMA_DEBUG 15130b80c1e4SEitan Adler printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n", 1514e20a199fSJeff Roberson zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1515e20a199fSJeff Roberson keg->uk_ipers, keg->uk_ppera, 1516e20a199fSJeff Roberson (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 1517099a0e58SBosko Milekic #endif 1518099a0e58SBosko Milekic 1519099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1520099a0e58SBosko Milekic 1521111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1522099a0e58SBosko Milekic LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1523111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1524b23f72e9SBrian Feldman return (0); 1525099a0e58SBosko Milekic } 1526099a0e58SBosko Milekic 1527099a0e58SBosko Milekic /* 1528099a0e58SBosko Milekic * Zone header ctor. This initializes all fields, locks, etc. 1529099a0e58SBosko Milekic * 1530099a0e58SBosko Milekic * Arguments/Returns follow uma_ctor specifications 1531099a0e58SBosko Milekic * udata Actually uma_zctor_args 15328355f576SJeff Roberson */ 1533b23f72e9SBrian Feldman static int 1534b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags) 15358355f576SJeff Roberson { 15368355f576SJeff Roberson struct uma_zctor_args *arg = udata; 15378355f576SJeff Roberson uma_zone_t zone = mem; 1538099a0e58SBosko Milekic uma_zone_t z; 1539099a0e58SBosko Milekic uma_keg_t keg; 15408355f576SJeff Roberson 15418355f576SJeff Roberson bzero(zone, size); 15428355f576SJeff Roberson zone->uz_name = arg->name; 15438355f576SJeff Roberson zone->uz_ctor = arg->ctor; 15448355f576SJeff Roberson zone->uz_dtor = arg->dtor; 1545e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab; 1546099a0e58SBosko Milekic zone->uz_init = NULL; 1547099a0e58SBosko Milekic zone->uz_fini = NULL; 1548099a0e58SBosko Milekic zone->uz_allocs = 0; 1549773df9abSRobert Watson zone->uz_frees = 0; 15502019094aSRobert Watson zone->uz_fails = 0; 1551bf965959SSean Bruno zone->uz_sleeps = 0; 1552fc03d22bSJeff Roberson zone->uz_count = 0; 1553ace66b56SAlexander Motin zone->uz_count_min = 0; 1554e20a199fSJeff Roberson zone->uz_flags = 0; 15552f891cd5SPawel Jakub Dawidek zone->uz_warning = NULL; 15562f891cd5SPawel Jakub Dawidek timevalclear(&zone->uz_ratecheck); 1557e20a199fSJeff Roberson keg = arg->keg; 1558099a0e58SBosko Milekic 1559af526374SJeff Roberson ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1560af526374SJeff Roberson 15610095a784SJeff Roberson /* 15620095a784SJeff Roberson * This is a pure cache zone, no kegs. 15630095a784SJeff Roberson */ 15640095a784SJeff Roberson if (arg->import) { 15656fd34d6fSJeff Roberson if (arg->flags & UMA_ZONE_VM) 15666fd34d6fSJeff Roberson arg->flags |= UMA_ZFLAG_CACHEONLY; 15676fd34d6fSJeff Roberson zone->uz_flags = arg->flags; 1568af526374SJeff Roberson zone->uz_size = arg->size; 15690095a784SJeff Roberson zone->uz_import = arg->import; 15700095a784SJeff Roberson zone->uz_release = arg->release; 15710095a784SJeff Roberson zone->uz_arg = arg->arg; 1572af526374SJeff Roberson zone->uz_lockptr = &zone->uz_lock; 1573111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 157403175483SAlexander Motin LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1575111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1576af526374SJeff Roberson goto out; 15770095a784SJeff Roberson } 15780095a784SJeff Roberson 15790095a784SJeff Roberson /* 15800095a784SJeff Roberson * Use the regular zone/keg/slab allocator. 15810095a784SJeff Roberson */ 15820095a784SJeff Roberson zone->uz_import = (uma_import)zone_import; 15830095a784SJeff Roberson zone->uz_release = (uma_release)zone_release; 15840095a784SJeff Roberson zone->uz_arg = zone; 15850095a784SJeff Roberson 1586099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_SECONDARY) { 1587099a0e58SBosko Milekic KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 15888355f576SJeff Roberson zone->uz_init = arg->uminit; 1589e221e841SJeff Roberson zone->uz_fini = arg->fini; 1590af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1591e20a199fSJeff Roberson zone->uz_flags |= UMA_ZONE_SECONDARY; 1592111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1593099a0e58SBosko Milekic ZONE_LOCK(zone); 1594099a0e58SBosko Milekic LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1595099a0e58SBosko Milekic if (LIST_NEXT(z, uz_link) == NULL) { 1596099a0e58SBosko Milekic LIST_INSERT_AFTER(z, zone, uz_link); 1597099a0e58SBosko Milekic break; 1598099a0e58SBosko Milekic } 1599099a0e58SBosko Milekic } 1600099a0e58SBosko Milekic ZONE_UNLOCK(zone); 1601111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1602e20a199fSJeff Roberson } else if (keg == NULL) { 1603e20a199fSJeff Roberson if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1604e20a199fSJeff Roberson arg->align, arg->flags)) == NULL) 1605b23f72e9SBrian Feldman return (ENOMEM); 1606099a0e58SBosko Milekic } else { 1607099a0e58SBosko Milekic struct uma_kctor_args karg; 1608b23f72e9SBrian Feldman int error; 1609099a0e58SBosko Milekic 1610099a0e58SBosko Milekic /* We should only be here from uma_startup() */ 1611099a0e58SBosko Milekic karg.size = arg->size; 1612099a0e58SBosko Milekic karg.uminit = arg->uminit; 1613099a0e58SBosko Milekic karg.fini = arg->fini; 1614099a0e58SBosko Milekic karg.align = arg->align; 1615099a0e58SBosko Milekic karg.flags = arg->flags; 1616099a0e58SBosko Milekic karg.zone = zone; 1617b23f72e9SBrian Feldman error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1618b23f72e9SBrian Feldman flags); 1619b23f72e9SBrian Feldman if (error) 1620b23f72e9SBrian Feldman return (error); 1621099a0e58SBosko Milekic } 16220095a784SJeff Roberson 1623e20a199fSJeff Roberson /* 1624e20a199fSJeff Roberson * Link in the first keg. 1625e20a199fSJeff Roberson */ 1626e20a199fSJeff Roberson zone->uz_klink.kl_keg = keg; 1627e20a199fSJeff Roberson LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1628af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1629e20a199fSJeff Roberson zone->uz_size = keg->uk_size; 1630e20a199fSJeff Roberson zone->uz_flags |= (keg->uk_flags & 1631e20a199fSJeff Roberson (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 16328355f576SJeff Roberson 16338355f576SJeff Roberson /* 16348355f576SJeff Roberson * Some internal zones don't have room allocated for the per cpu 16358355f576SJeff Roberson * caches. If we're internal, bail out here. 16368355f576SJeff Roberson */ 1637099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1638e20a199fSJeff Roberson KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1639099a0e58SBosko Milekic ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1640b23f72e9SBrian Feldman return (0); 1641099a0e58SBosko Milekic } 16428355f576SJeff Roberson 1643af526374SJeff Roberson out: 1644af526374SJeff Roberson if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1645af526374SJeff Roberson zone->uz_count = bucket_select(zone->uz_size); 16468355f576SJeff Roberson else 1647cae33c14SJeff Roberson zone->uz_count = BUCKET_MAX; 1648ace66b56SAlexander Motin zone->uz_count_min = zone->uz_count; 1649fc03d22bSJeff Roberson 1650b23f72e9SBrian Feldman return (0); 16518355f576SJeff Roberson } 16528355f576SJeff Roberson 16538355f576SJeff Roberson /* 1654099a0e58SBosko Milekic * Keg header dtor. This frees all data, destroys locks, frees the hash 1655099a0e58SBosko Milekic * table and removes the keg from the global list. 16569c2cd7e5SJeff Roberson * 16579c2cd7e5SJeff Roberson * Arguments/Returns follow uma_dtor specifications 16589c2cd7e5SJeff Roberson * udata unused 16599c2cd7e5SJeff Roberson */ 1660099a0e58SBosko Milekic static void 1661099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata) 1662099a0e58SBosko Milekic { 1663099a0e58SBosko Milekic uma_keg_t keg; 16649c2cd7e5SJeff Roberson 1665099a0e58SBosko Milekic keg = (uma_keg_t)arg; 1666e20a199fSJeff Roberson KEG_LOCK(keg); 1667099a0e58SBosko Milekic if (keg->uk_free != 0) { 1668a3845534SCraig Rodrigues printf("Freed UMA keg (%s) was not empty (%d items). " 1669099a0e58SBosko Milekic " Lost %d pages of memory.\n", 1670a3845534SCraig Rodrigues keg->uk_name ? keg->uk_name : "", 1671099a0e58SBosko Milekic keg->uk_free, keg->uk_pages); 1672099a0e58SBosko Milekic } 1673e20a199fSJeff Roberson KEG_UNLOCK(keg); 1674099a0e58SBosko Milekic 1675099a0e58SBosko Milekic hash_free(&keg->uk_hash); 1676099a0e58SBosko Milekic 1677e20a199fSJeff Roberson KEG_LOCK_FINI(keg); 1678099a0e58SBosko Milekic } 1679099a0e58SBosko Milekic 1680099a0e58SBosko Milekic /* 1681099a0e58SBosko Milekic * Zone header dtor. 1682099a0e58SBosko Milekic * 1683099a0e58SBosko Milekic * Arguments/Returns follow uma_dtor specifications 1684099a0e58SBosko Milekic * udata unused 1685099a0e58SBosko Milekic */ 16869c2cd7e5SJeff Roberson static void 16879c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata) 16889c2cd7e5SJeff Roberson { 1689e20a199fSJeff Roberson uma_klink_t klink; 16909c2cd7e5SJeff Roberson uma_zone_t zone; 1691099a0e58SBosko Milekic uma_keg_t keg; 16929c2cd7e5SJeff Roberson 16939c2cd7e5SJeff Roberson zone = (uma_zone_t)arg; 1694e20a199fSJeff Roberson keg = zone_first_keg(zone); 16959643769aSJeff Roberson 1696e20a199fSJeff Roberson if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 16979643769aSJeff Roberson cache_drain(zone); 1698099a0e58SBosko Milekic 1699111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1700099a0e58SBosko Milekic LIST_REMOVE(zone, uz_link); 1701111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1702099a0e58SBosko Milekic /* 1703099a0e58SBosko Milekic * XXX there are some races here where 1704099a0e58SBosko Milekic * the zone can be drained but zone lock 1705099a0e58SBosko Milekic * released and then refilled before we 1706099a0e58SBosko Milekic * remove it... we dont care for now 1707099a0e58SBosko Milekic */ 1708e20a199fSJeff Roberson zone_drain_wait(zone, M_WAITOK); 1709e20a199fSJeff Roberson /* 1710e20a199fSJeff Roberson * Unlink all of our kegs. 1711e20a199fSJeff Roberson */ 1712e20a199fSJeff Roberson while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1713e20a199fSJeff Roberson klink->kl_keg = NULL; 1714e20a199fSJeff Roberson LIST_REMOVE(klink, kl_link); 1715e20a199fSJeff Roberson if (klink == &zone->uz_klink) 1716e20a199fSJeff Roberson continue; 1717e20a199fSJeff Roberson free(klink, M_TEMP); 1718e20a199fSJeff Roberson } 1719e20a199fSJeff Roberson /* 1720e20a199fSJeff Roberson * We only destroy kegs from non secondary zones. 1721e20a199fSJeff Roberson */ 17220095a784SJeff Roberson if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1723111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1724099a0e58SBosko Milekic LIST_REMOVE(keg, uk_link); 1725111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 17260095a784SJeff Roberson zone_free_item(kegs, keg, NULL, SKIP_NONE); 17279c2cd7e5SJeff Roberson } 1728af526374SJeff Roberson ZONE_LOCK_FINI(zone); 1729099a0e58SBosko Milekic } 1730099a0e58SBosko Milekic 17319c2cd7e5SJeff Roberson /* 17328355f576SJeff Roberson * Traverses every zone in the system and calls a callback 17338355f576SJeff Roberson * 17348355f576SJeff Roberson * Arguments: 17358355f576SJeff Roberson * zfunc A pointer to a function which accepts a zone 17368355f576SJeff Roberson * as an argument. 17378355f576SJeff Roberson * 17388355f576SJeff Roberson * Returns: 17398355f576SJeff Roberson * Nothing 17408355f576SJeff Roberson */ 17418355f576SJeff Roberson static void 17428355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t)) 17438355f576SJeff Roberson { 1744099a0e58SBosko Milekic uma_keg_t keg; 17458355f576SJeff Roberson uma_zone_t zone; 17468355f576SJeff Roberson 1747111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 1748099a0e58SBosko Milekic LIST_FOREACH(keg, &uma_kegs, uk_link) { 1749099a0e58SBosko Milekic LIST_FOREACH(zone, &keg->uk_zones, uz_link) 17508355f576SJeff Roberson zfunc(zone); 1751099a0e58SBosko Milekic } 1752111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 17538355f576SJeff Roberson } 17548355f576SJeff Roberson 17558355f576SJeff Roberson /* Public functions */ 17568355f576SJeff Roberson /* See uma.h */ 17578355f576SJeff Roberson void 17583803b26bSDag-Erling Smørgrav uma_startup(void *bootmem, int boot_pages) 17598355f576SJeff Roberson { 17608355f576SJeff Roberson struct uma_zctor_args args; 17618355f576SJeff Roberson uma_slab_t slab; 17628355f576SJeff Roberson int i; 17638355f576SJeff Roberson 17648355f576SJeff Roberson #ifdef UMA_DEBUG 1765099a0e58SBosko Milekic printf("Creating uma keg headers zone and keg.\n"); 17668355f576SJeff Roberson #endif 1767111fbcd5SBryan Venteicher rw_init(&uma_rwlock, "UMA lock"); 1768099a0e58SBosko Milekic 1769099a0e58SBosko Milekic /* "manually" create the initial zone */ 17700095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1771099a0e58SBosko Milekic args.name = "UMA Kegs"; 1772099a0e58SBosko Milekic args.size = sizeof(struct uma_keg); 1773099a0e58SBosko Milekic args.ctor = keg_ctor; 1774099a0e58SBosko Milekic args.dtor = keg_dtor; 17758355f576SJeff Roberson args.uminit = zero_init; 17768355f576SJeff Roberson args.fini = NULL; 1777099a0e58SBosko Milekic args.keg = &masterkeg; 17788355f576SJeff Roberson args.align = 32 - 1; 1779b60f5b79SJeff Roberson args.flags = UMA_ZFLAG_INTERNAL; 17808355f576SJeff Roberson /* The initial zone has no Per cpu queues so it's smaller */ 1781b23f72e9SBrian Feldman zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 17828355f576SJeff Roberson 17838355f576SJeff Roberson #ifdef UMA_DEBUG 17848355f576SJeff Roberson printf("Filling boot free list.\n"); 17858355f576SJeff Roberson #endif 17863803b26bSDag-Erling Smørgrav for (i = 0; i < boot_pages; i++) { 178785dcf349SGleb Smirnoff slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE)); 178885dcf349SGleb Smirnoff slab->us_data = (uint8_t *)slab; 17898355f576SJeff Roberson slab->us_flags = UMA_SLAB_BOOT; 17908355f576SJeff Roberson LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 17918355f576SJeff Roberson } 1792f353d338SAlan Cox mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 17938355f576SJeff Roberson 17948355f576SJeff Roberson #ifdef UMA_DEBUG 1795099a0e58SBosko Milekic printf("Creating uma zone headers zone and keg.\n"); 1796099a0e58SBosko Milekic #endif 1797099a0e58SBosko Milekic args.name = "UMA Zones"; 1798099a0e58SBosko Milekic args.size = sizeof(struct uma_zone) + 179951cfb0beSDmitry Chagin (sizeof(struct uma_cache) * (mp_maxid + 1)); 1800099a0e58SBosko Milekic args.ctor = zone_ctor; 1801099a0e58SBosko Milekic args.dtor = zone_dtor; 1802099a0e58SBosko Milekic args.uminit = zero_init; 1803099a0e58SBosko Milekic args.fini = NULL; 1804099a0e58SBosko Milekic args.keg = NULL; 1805099a0e58SBosko Milekic args.align = 32 - 1; 1806099a0e58SBosko Milekic args.flags = UMA_ZFLAG_INTERNAL; 1807099a0e58SBosko Milekic /* The initial zone has no Per cpu queues so it's smaller */ 1808b23f72e9SBrian Feldman zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1809099a0e58SBosko Milekic 1810099a0e58SBosko Milekic #ifdef UMA_DEBUG 1811099a0e58SBosko Milekic printf("Creating slab and hash zones.\n"); 18128355f576SJeff Roberson #endif 18138355f576SJeff Roberson 18148355f576SJeff Roberson /* Now make a zone for slab headers */ 18158355f576SJeff Roberson slabzone = uma_zcreate("UMA Slabs", 1816ef72505eSJeff Roberson sizeof(struct uma_slab), 18178355f576SJeff Roberson NULL, NULL, NULL, NULL, 1818b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 18198355f576SJeff Roberson 18208355f576SJeff Roberson hashzone = uma_zcreate("UMA Hash", 18218355f576SJeff Roberson sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 18228355f576SJeff Roberson NULL, NULL, NULL, NULL, 1823b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 18248355f576SJeff Roberson 1825cae33c14SJeff Roberson bucket_init(); 18268355f576SJeff Roberson 1827342f1793SAlan Cox booted = UMA_STARTUP; 18288355f576SJeff Roberson 18298355f576SJeff Roberson #ifdef UMA_DEBUG 18308355f576SJeff Roberson printf("UMA startup complete.\n"); 18318355f576SJeff Roberson #endif 18328355f576SJeff Roberson } 18338355f576SJeff Roberson 18348355f576SJeff Roberson /* see uma.h */ 18358355f576SJeff Roberson void 183699571dc3SJeff Roberson uma_startup2(void) 18378355f576SJeff Roberson { 1838342f1793SAlan Cox booted = UMA_STARTUP2; 183986bbae32SJeff Roberson bucket_enable(); 184095c4bf75SKonstantin Belousov sx_init(&uma_drain_lock, "umadrain"); 18418355f576SJeff Roberson #ifdef UMA_DEBUG 18428355f576SJeff Roberson printf("UMA startup2 complete.\n"); 18438355f576SJeff Roberson #endif 18448355f576SJeff Roberson } 18458355f576SJeff Roberson 18468355f576SJeff Roberson /* 18478355f576SJeff Roberson * Initialize our callout handle 18488355f576SJeff Roberson * 18498355f576SJeff Roberson */ 18508355f576SJeff Roberson 18518355f576SJeff Roberson static void 18528355f576SJeff Roberson uma_startup3(void) 18538355f576SJeff Roberson { 18548355f576SJeff Roberson #ifdef UMA_DEBUG 18558355f576SJeff Roberson printf("Starting callout.\n"); 18568355f576SJeff Roberson #endif 1857fd90e2edSJung-uk Kim callout_init(&uma_callout, 1); 18589643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 18598355f576SJeff Roberson #ifdef UMA_DEBUG 18608355f576SJeff Roberson printf("UMA startup3 complete.\n"); 18618355f576SJeff Roberson #endif 18628355f576SJeff Roberson } 18638355f576SJeff Roberson 1864e20a199fSJeff Roberson static uma_keg_t 1865099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 186685dcf349SGleb Smirnoff int align, uint32_t flags) 1867099a0e58SBosko Milekic { 1868099a0e58SBosko Milekic struct uma_kctor_args args; 1869099a0e58SBosko Milekic 1870099a0e58SBosko Milekic args.size = size; 1871099a0e58SBosko Milekic args.uminit = uminit; 1872099a0e58SBosko Milekic args.fini = fini; 18731e319f6dSRobert Watson args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1874099a0e58SBosko Milekic args.flags = flags; 1875099a0e58SBosko Milekic args.zone = zone; 1876e20a199fSJeff Roberson return (zone_alloc_item(kegs, &args, M_WAITOK)); 1877099a0e58SBosko Milekic } 1878099a0e58SBosko Milekic 18798355f576SJeff Roberson /* See uma.h */ 18801e319f6dSRobert Watson void 18811e319f6dSRobert Watson uma_set_align(int align) 18821e319f6dSRobert Watson { 18831e319f6dSRobert Watson 18841e319f6dSRobert Watson if (align != UMA_ALIGN_CACHE) 18851e319f6dSRobert Watson uma_align_cache = align; 18861e319f6dSRobert Watson } 18871e319f6dSRobert Watson 18881e319f6dSRobert Watson /* See uma.h */ 18898355f576SJeff Roberson uma_zone_t 1890bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 189185dcf349SGleb Smirnoff uma_init uminit, uma_fini fini, int align, uint32_t flags) 18928355f576SJeff Roberson 18938355f576SJeff Roberson { 18948355f576SJeff Roberson struct uma_zctor_args args; 189595c4bf75SKonstantin Belousov uma_zone_t res; 189695c4bf75SKonstantin Belousov bool locked; 18978355f576SJeff Roberson 18988355f576SJeff Roberson /* This stuff is essential for the zone ctor */ 18990095a784SJeff Roberson memset(&args, 0, sizeof(args)); 19008355f576SJeff Roberson args.name = name; 19018355f576SJeff Roberson args.size = size; 19028355f576SJeff Roberson args.ctor = ctor; 19038355f576SJeff Roberson args.dtor = dtor; 19048355f576SJeff Roberson args.uminit = uminit; 19058355f576SJeff Roberson args.fini = fini; 1906afc6dc36SJohn-Mark Gurney #ifdef INVARIANTS 1907afc6dc36SJohn-Mark Gurney /* 1908afc6dc36SJohn-Mark Gurney * If a zone is being created with an empty constructor and 1909afc6dc36SJohn-Mark Gurney * destructor, pass UMA constructor/destructor which checks for 1910afc6dc36SJohn-Mark Gurney * memory use after free. 1911afc6dc36SJohn-Mark Gurney */ 191219c591bfSMateusz Guzik if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && 191319c591bfSMateusz Guzik ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { 1914afc6dc36SJohn-Mark Gurney args.ctor = trash_ctor; 1915afc6dc36SJohn-Mark Gurney args.dtor = trash_dtor; 1916afc6dc36SJohn-Mark Gurney args.uminit = trash_init; 1917afc6dc36SJohn-Mark Gurney args.fini = trash_fini; 1918afc6dc36SJohn-Mark Gurney } 1919afc6dc36SJohn-Mark Gurney #endif 19208355f576SJeff Roberson args.align = align; 19218355f576SJeff Roberson args.flags = flags; 1922099a0e58SBosko Milekic args.keg = NULL; 1923099a0e58SBosko Milekic 192495c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 192595c4bf75SKonstantin Belousov locked = false; 192695c4bf75SKonstantin Belousov } else { 192795c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 192895c4bf75SKonstantin Belousov locked = true; 192995c4bf75SKonstantin Belousov } 193095c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 193195c4bf75SKonstantin Belousov if (locked) 193295c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 193395c4bf75SKonstantin Belousov return (res); 1934099a0e58SBosko Milekic } 1935099a0e58SBosko Milekic 1936099a0e58SBosko Milekic /* See uma.h */ 1937099a0e58SBosko Milekic uma_zone_t 1938099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1939099a0e58SBosko Milekic uma_init zinit, uma_fini zfini, uma_zone_t master) 1940099a0e58SBosko Milekic { 1941099a0e58SBosko Milekic struct uma_zctor_args args; 1942e20a199fSJeff Roberson uma_keg_t keg; 194395c4bf75SKonstantin Belousov uma_zone_t res; 194495c4bf75SKonstantin Belousov bool locked; 1945099a0e58SBosko Milekic 1946e20a199fSJeff Roberson keg = zone_first_keg(master); 19470095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1948099a0e58SBosko Milekic args.name = name; 1949e20a199fSJeff Roberson args.size = keg->uk_size; 1950099a0e58SBosko Milekic args.ctor = ctor; 1951099a0e58SBosko Milekic args.dtor = dtor; 1952099a0e58SBosko Milekic args.uminit = zinit; 1953099a0e58SBosko Milekic args.fini = zfini; 1954e20a199fSJeff Roberson args.align = keg->uk_align; 1955e20a199fSJeff Roberson args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1956e20a199fSJeff Roberson args.keg = keg; 19578355f576SJeff Roberson 195895c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 195995c4bf75SKonstantin Belousov locked = false; 196095c4bf75SKonstantin Belousov } else { 196195c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 196295c4bf75SKonstantin Belousov locked = true; 196395c4bf75SKonstantin Belousov } 1964e20a199fSJeff Roberson /* XXX Attaches only one keg of potentially many. */ 196595c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 196695c4bf75SKonstantin Belousov if (locked) 196795c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 196895c4bf75SKonstantin Belousov return (res); 19698355f576SJeff Roberson } 19708355f576SJeff Roberson 19710095a784SJeff Roberson /* See uma.h */ 19720095a784SJeff Roberson uma_zone_t 1973af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1974af526374SJeff Roberson uma_init zinit, uma_fini zfini, uma_import zimport, 1975af526374SJeff Roberson uma_release zrelease, void *arg, int flags) 19760095a784SJeff Roberson { 19770095a784SJeff Roberson struct uma_zctor_args args; 19780095a784SJeff Roberson 19790095a784SJeff Roberson memset(&args, 0, sizeof(args)); 19800095a784SJeff Roberson args.name = name; 1981af526374SJeff Roberson args.size = size; 19820095a784SJeff Roberson args.ctor = ctor; 19830095a784SJeff Roberson args.dtor = dtor; 19840095a784SJeff Roberson args.uminit = zinit; 19850095a784SJeff Roberson args.fini = zfini; 19860095a784SJeff Roberson args.import = zimport; 19870095a784SJeff Roberson args.release = zrelease; 19880095a784SJeff Roberson args.arg = arg; 19890095a784SJeff Roberson args.align = 0; 19900095a784SJeff Roberson args.flags = flags; 19910095a784SJeff Roberson 19920095a784SJeff Roberson return (zone_alloc_item(zones, &args, M_WAITOK)); 19930095a784SJeff Roberson } 19940095a784SJeff Roberson 1995e20a199fSJeff Roberson static void 1996e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b) 1997e20a199fSJeff Roberson { 1998e20a199fSJeff Roberson if (a < b) { 1999e20a199fSJeff Roberson ZONE_LOCK(a); 2000af526374SJeff Roberson mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 2001e20a199fSJeff Roberson } else { 2002e20a199fSJeff Roberson ZONE_LOCK(b); 2003af526374SJeff Roberson mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 2004e20a199fSJeff Roberson } 2005e20a199fSJeff Roberson } 2006e20a199fSJeff Roberson 2007e20a199fSJeff Roberson static void 2008e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b) 2009e20a199fSJeff Roberson { 2010e20a199fSJeff Roberson 2011e20a199fSJeff Roberson ZONE_UNLOCK(a); 2012e20a199fSJeff Roberson ZONE_UNLOCK(b); 2013e20a199fSJeff Roberson } 2014e20a199fSJeff Roberson 2015e20a199fSJeff Roberson int 2016e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 2017e20a199fSJeff Roberson { 2018e20a199fSJeff Roberson uma_klink_t klink; 2019e20a199fSJeff Roberson uma_klink_t kl; 2020e20a199fSJeff Roberson int error; 2021e20a199fSJeff Roberson 2022e20a199fSJeff Roberson error = 0; 2023e20a199fSJeff Roberson klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 2024e20a199fSJeff Roberson 2025e20a199fSJeff Roberson zone_lock_pair(zone, master); 2026e20a199fSJeff Roberson /* 2027e20a199fSJeff Roberson * zone must use vtoslab() to resolve objects and must already be 2028e20a199fSJeff Roberson * a secondary. 2029e20a199fSJeff Roberson */ 2030e20a199fSJeff Roberson if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 2031e20a199fSJeff Roberson != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 2032e20a199fSJeff Roberson error = EINVAL; 2033e20a199fSJeff Roberson goto out; 2034e20a199fSJeff Roberson } 2035e20a199fSJeff Roberson /* 2036e20a199fSJeff Roberson * The new master must also use vtoslab(). 2037e20a199fSJeff Roberson */ 2038e20a199fSJeff Roberson if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2039e20a199fSJeff Roberson error = EINVAL; 2040e20a199fSJeff Roberson goto out; 2041e20a199fSJeff Roberson } 2042*cfcae3f8SGleb Smirnoff 2043e20a199fSJeff Roberson /* 2044e20a199fSJeff Roberson * The underlying object must be the same size. rsize 2045e20a199fSJeff Roberson * may be different. 2046e20a199fSJeff Roberson */ 2047e20a199fSJeff Roberson if (master->uz_size != zone->uz_size) { 2048e20a199fSJeff Roberson error = E2BIG; 2049e20a199fSJeff Roberson goto out; 2050e20a199fSJeff Roberson } 2051e20a199fSJeff Roberson /* 2052e20a199fSJeff Roberson * Put it at the end of the list. 2053e20a199fSJeff Roberson */ 2054e20a199fSJeff Roberson klink->kl_keg = zone_first_keg(master); 2055e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2056e20a199fSJeff Roberson if (LIST_NEXT(kl, kl_link) == NULL) { 2057e20a199fSJeff Roberson LIST_INSERT_AFTER(kl, klink, kl_link); 2058e20a199fSJeff Roberson break; 2059e20a199fSJeff Roberson } 2060e20a199fSJeff Roberson } 2061e20a199fSJeff Roberson klink = NULL; 2062e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_MULTI; 2063e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab_multi; 2064e20a199fSJeff Roberson 2065e20a199fSJeff Roberson out: 2066e20a199fSJeff Roberson zone_unlock_pair(zone, master); 2067e20a199fSJeff Roberson if (klink != NULL) 2068e20a199fSJeff Roberson free(klink, M_TEMP); 2069e20a199fSJeff Roberson 2070e20a199fSJeff Roberson return (error); 2071e20a199fSJeff Roberson } 2072e20a199fSJeff Roberson 2073e20a199fSJeff Roberson 20748355f576SJeff Roberson /* See uma.h */ 20759c2cd7e5SJeff Roberson void 20769c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone) 20779c2cd7e5SJeff Roberson { 2078f4ff923bSRobert Watson 207995c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 20800095a784SJeff Roberson zone_free_item(zones, zone, NULL, SKIP_NONE); 208195c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 20829c2cd7e5SJeff Roberson } 20839c2cd7e5SJeff Roberson 20849c2cd7e5SJeff Roberson /* See uma.h */ 20858355f576SJeff Roberson void * 20862cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 20878355f576SJeff Roberson { 20888355f576SJeff Roberson void *item; 20898355f576SJeff Roberson uma_cache_t cache; 20908355f576SJeff Roberson uma_bucket_t bucket; 2091fc03d22bSJeff Roberson int lockfail; 20928355f576SJeff Roberson int cpu; 20938355f576SJeff Roberson 2094e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2095e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 209610cb2424SMark Murray 20978355f576SJeff Roberson /* This is the fast path allocation */ 20988355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1 20998355f576SJeff Roberson printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 21008355f576SJeff Roberson #endif 21013659f747SRobert Watson CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 21023659f747SRobert Watson zone->uz_name, flags); 2103a553d4b8SJeff Roberson 2104635fd505SRobert Watson if (flags & M_WAITOK) { 2105b23f72e9SBrian Feldman WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2106635fd505SRobert Watson "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 21074c1cc01cSJohn Baldwin } 2108d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 21091067a2baSJonathan T. Looney ("uma_zalloc_arg: called with spinlock or critical section held")); 21101067a2baSJonathan T. Looney 21118d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 21128d689e04SGleb Smirnoff if (memguard_cmp_zone(zone)) { 21138d689e04SGleb Smirnoff item = memguard_alloc(zone->uz_size, flags); 21148d689e04SGleb Smirnoff if (item != NULL) { 21158d689e04SGleb Smirnoff /* 21168d689e04SGleb Smirnoff * Avoid conflict with the use-after-free 21178d689e04SGleb Smirnoff * protecting infrastructure from INVARIANTS. 21188d689e04SGleb Smirnoff */ 21198d689e04SGleb Smirnoff if (zone->uz_init != NULL && 21208d689e04SGleb Smirnoff zone->uz_init != mtrash_init && 21218d689e04SGleb Smirnoff zone->uz_init(item, zone->uz_size, flags) != 0) 21228d689e04SGleb Smirnoff return (NULL); 21238d689e04SGleb Smirnoff if (zone->uz_ctor != NULL && 21248d689e04SGleb Smirnoff zone->uz_ctor != mtrash_ctor && 2125fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, 2126fc03d22bSJeff Roberson flags) != 0) { 21278d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 21288d689e04SGleb Smirnoff return (NULL); 21298d689e04SGleb Smirnoff } 21308d689e04SGleb Smirnoff return (item); 21318d689e04SGleb Smirnoff } 21328d689e04SGleb Smirnoff /* This is unfortunate but should not be fatal. */ 21338d689e04SGleb Smirnoff } 21348d689e04SGleb Smirnoff #endif 21355d1ae027SRobert Watson /* 21365d1ae027SRobert Watson * If possible, allocate from the per-CPU cache. There are two 21375d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 21385d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 21395d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 21405d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 21415d1ae027SRobert Watson * preemption and migration. We release the critical section in 21425d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to allocate from 21435d1ae027SRobert Watson * the current cache; when we re-acquire the critical section, we 21445d1ae027SRobert Watson * must detect and handle migration if it has occurred. 21455d1ae027SRobert Watson */ 21465d1ae027SRobert Watson critical_enter(); 21475d1ae027SRobert Watson cpu = curcpu; 21488355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 21498355f576SJeff Roberson 21508355f576SJeff Roberson zalloc_start: 21518355f576SJeff Roberson bucket = cache->uc_allocbucket; 2152fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 2153cae33c14SJeff Roberson bucket->ub_cnt--; 2154cae33c14SJeff Roberson item = bucket->ub_bucket[bucket->ub_cnt]; 21558355f576SJeff Roberson #ifdef INVARIANTS 2156cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = NULL; 21578355f576SJeff Roberson #endif 2158fc03d22bSJeff Roberson KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 21598355f576SJeff Roberson cache->uc_allocs++; 21605d1ae027SRobert Watson critical_exit(); 2161fc03d22bSJeff Roberson if (zone->uz_ctor != NULL && 2162fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 21630095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2164fc03d22bSJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 2165b23f72e9SBrian Feldman return (NULL); 2166b23f72e9SBrian Feldman } 2167ef72505eSJeff Roberson #ifdef INVARIANTS 2168ef72505eSJeff Roberson uma_dbg_alloc(zone, NULL, item); 2169ef72505eSJeff Roberson #endif 21702cc35ff9SJeff Roberson if (flags & M_ZERO) 217148343a2fSGleb Smirnoff uma_zero_item(item, zone); 21728355f576SJeff Roberson return (item); 2173fc03d22bSJeff Roberson } 2174fc03d22bSJeff Roberson 21758355f576SJeff Roberson /* 21768355f576SJeff Roberson * We have run out of items in our alloc bucket. 21778355f576SJeff Roberson * See if we can switch with our free bucket. 21788355f576SJeff Roberson */ 2179b983089aSJeff Roberson bucket = cache->uc_freebucket; 2180fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 2181fc03d22bSJeff Roberson #ifdef UMA_DEBUG_ALLOC 2182fc03d22bSJeff Roberson printf("uma_zalloc: Swapping empty with alloc.\n"); 2183fc03d22bSJeff Roberson #endif 21848355f576SJeff Roberson cache->uc_freebucket = cache->uc_allocbucket; 2185b983089aSJeff Roberson cache->uc_allocbucket = bucket; 21868355f576SJeff Roberson goto zalloc_start; 21878355f576SJeff Roberson } 2188fc03d22bSJeff Roberson 2189fc03d22bSJeff Roberson /* 2190fc03d22bSJeff Roberson * Discard any empty allocation bucket while we hold no locks. 2191fc03d22bSJeff Roberson */ 2192fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2193fc03d22bSJeff Roberson cache->uc_allocbucket = NULL; 2194fc03d22bSJeff Roberson critical_exit(); 2195fc03d22bSJeff Roberson if (bucket != NULL) 21966fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2197fc03d22bSJeff Roberson 2198fc03d22bSJeff Roberson /* Short-circuit for zones without buckets and low memory. */ 2199fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2200fc03d22bSJeff Roberson goto zalloc_item; 2201fc03d22bSJeff Roberson 22025d1ae027SRobert Watson /* 22035d1ae027SRobert Watson * Attempt to retrieve the item from the per-CPU cache has failed, so 22045d1ae027SRobert Watson * we must go back to the zone. This requires the zone lock, so we 22055d1ae027SRobert Watson * must drop the critical section, then re-acquire it when we go back 22065d1ae027SRobert Watson * to the cache. Since the critical section is released, we may be 22075d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 22085d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 22095d1ae027SRobert Watson * the critical section. 22105d1ae027SRobert Watson */ 2211fc03d22bSJeff Roberson lockfail = 0; 2212fc03d22bSJeff Roberson if (ZONE_TRYLOCK(zone) == 0) { 2213fc03d22bSJeff Roberson /* Record contention to size the buckets. */ 2214a553d4b8SJeff Roberson ZONE_LOCK(zone); 2215fc03d22bSJeff Roberson lockfail = 1; 2216fc03d22bSJeff Roberson } 22175d1ae027SRobert Watson critical_enter(); 22185d1ae027SRobert Watson cpu = curcpu; 22195d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 22205d1ae027SRobert Watson 2221fc03d22bSJeff Roberson /* 2222fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2223fc03d22bSJeff Roberson */ 22240095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 22250095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2226a553d4b8SJeff Roberson cache->uc_allocs = 0; 2227773df9abSRobert Watson cache->uc_frees = 0; 22288355f576SJeff Roberson 2229fc03d22bSJeff Roberson /* See if we lost the race to fill the cache. */ 2230fc03d22bSJeff Roberson if (cache->uc_allocbucket != NULL) { 2231fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2232fc03d22bSJeff Roberson goto zalloc_start; 2233a553d4b8SJeff Roberson } 22348355f576SJeff Roberson 2235fc03d22bSJeff Roberson /* 2236fc03d22bSJeff Roberson * Check the zone's cache of buckets. 2237fc03d22bSJeff Roberson */ 2238fc03d22bSJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2239cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 2240a553d4b8SJeff Roberson ("uma_zalloc_arg: Returning an empty bucket.")); 22418355f576SJeff Roberson 2242a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 2243a553d4b8SJeff Roberson cache->uc_allocbucket = bucket; 2244a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 22458355f576SJeff Roberson goto zalloc_start; 2246a553d4b8SJeff Roberson } 22475d1ae027SRobert Watson /* We are no longer associated with this CPU. */ 22485d1ae027SRobert Watson critical_exit(); 2249bbee39c6SJeff Roberson 2250fc03d22bSJeff Roberson /* 2251fc03d22bSJeff Roberson * We bump the uz count when the cache size is insufficient to 2252fc03d22bSJeff Roberson * handle the working set. 2253fc03d22bSJeff Roberson */ 22546fd34d6fSJeff Roberson if (lockfail && zone->uz_count < BUCKET_MAX) 2255a553d4b8SJeff Roberson zone->uz_count++; 2256fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2257099a0e58SBosko Milekic 22588355f576SJeff Roberson /* 2259a553d4b8SJeff Roberson * Now lets just fill a bucket and put it on the free list. If that 2260fc03d22bSJeff Roberson * works we'll restart the allocation from the begining and it 2261fc03d22bSJeff Roberson * will use the just filled bucket. 2262bbee39c6SJeff Roberson */ 22636fd34d6fSJeff Roberson bucket = zone_alloc_bucket(zone, udata, flags); 2264fc03d22bSJeff Roberson if (bucket != NULL) { 2265fc03d22bSJeff Roberson ZONE_LOCK(zone); 2266fc03d22bSJeff Roberson critical_enter(); 2267fc03d22bSJeff Roberson cpu = curcpu; 2268fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2269fc03d22bSJeff Roberson /* 2270fc03d22bSJeff Roberson * See if we lost the race or were migrated. Cache the 2271fc03d22bSJeff Roberson * initialized bucket to make this less likely or claim 2272fc03d22bSJeff Roberson * the memory directly. 2273fc03d22bSJeff Roberson */ 2274fc03d22bSJeff Roberson if (cache->uc_allocbucket == NULL) 2275fc03d22bSJeff Roberson cache->uc_allocbucket = bucket; 2276fc03d22bSJeff Roberson else 2277fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2278bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 2279fc03d22bSJeff Roberson goto zalloc_start; 2280bbee39c6SJeff Roberson } 2281fc03d22bSJeff Roberson 2282bbee39c6SJeff Roberson /* 2283bbee39c6SJeff Roberson * We may not be able to get a bucket so return an actual item. 2284bbee39c6SJeff Roberson */ 2285bbee39c6SJeff Roberson #ifdef UMA_DEBUG 2286bbee39c6SJeff Roberson printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2287bbee39c6SJeff Roberson #endif 2288bbee39c6SJeff Roberson 2289fc03d22bSJeff Roberson zalloc_item: 2290e20a199fSJeff Roberson item = zone_alloc_item(zone, udata, flags); 2291fc03d22bSJeff Roberson 2292e20a199fSJeff Roberson return (item); 2293bbee39c6SJeff Roberson } 2294bbee39c6SJeff Roberson 2295bbee39c6SJeff Roberson static uma_slab_t 2296e20a199fSJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2297bbee39c6SJeff Roberson { 2298bbee39c6SJeff Roberson uma_slab_t slab; 22996fd34d6fSJeff Roberson int reserve; 2300099a0e58SBosko Milekic 2301e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2302bbee39c6SJeff Roberson slab = NULL; 23036fd34d6fSJeff Roberson reserve = 0; 23046fd34d6fSJeff Roberson if ((flags & M_USE_RESERVE) == 0) 23056fd34d6fSJeff Roberson reserve = keg->uk_reserve; 2306bbee39c6SJeff Roberson 2307bbee39c6SJeff Roberson for (;;) { 2308bbee39c6SJeff Roberson /* 2309bbee39c6SJeff Roberson * Find a slab with some space. Prefer slabs that are partially 2310bbee39c6SJeff Roberson * used over those that are totally full. This helps to reduce 2311bbee39c6SJeff Roberson * fragmentation. 2312bbee39c6SJeff Roberson */ 23136fd34d6fSJeff Roberson if (keg->uk_free > reserve) { 2314099a0e58SBosko Milekic if (!LIST_EMPTY(&keg->uk_part_slab)) { 2315099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_part_slab); 2316bbee39c6SJeff Roberson } else { 2317099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_free_slab); 2318bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2319099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2320bbee39c6SJeff Roberson us_link); 2321bbee39c6SJeff Roberson } 2322e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2323bbee39c6SJeff Roberson return (slab); 2324bbee39c6SJeff Roberson } 2325bbee39c6SJeff Roberson 2326bbee39c6SJeff Roberson /* 2327bbee39c6SJeff Roberson * M_NOVM means don't ask at all! 2328bbee39c6SJeff Roberson */ 2329bbee39c6SJeff Roberson if (flags & M_NOVM) 2330bbee39c6SJeff Roberson break; 2331bbee39c6SJeff Roberson 2332e20a199fSJeff Roberson if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2333099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_FULL; 2334e20a199fSJeff Roberson /* 2335e20a199fSJeff Roberson * If this is not a multi-zone, set the FULL bit. 2336e20a199fSJeff Roberson * Otherwise slab_multi() takes care of it. 2337e20a199fSJeff Roberson */ 23382f891cd5SPawel Jakub Dawidek if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2339e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 23402f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 234154503a13SJonathan T. Looney zone_maxaction(zone); 23422f891cd5SPawel Jakub Dawidek } 2343ebc85edfSJeff Roberson if (flags & M_NOWAIT) 2344bbee39c6SJeff Roberson break; 2345c288b548SEitan Adler zone->uz_sleeps++; 2346e20a199fSJeff Roberson msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2347bbee39c6SJeff Roberson continue; 2348bbee39c6SJeff Roberson } 2349e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, flags); 2350bbee39c6SJeff Roberson /* 2351bbee39c6SJeff Roberson * If we got a slab here it's safe to mark it partially used 2352bbee39c6SJeff Roberson * and return. We assume that the caller is going to remove 2353bbee39c6SJeff Roberson * at least one item. 2354bbee39c6SJeff Roberson */ 2355bbee39c6SJeff Roberson if (slab) { 2356e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2357099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2358bbee39c6SJeff Roberson return (slab); 2359bbee39c6SJeff Roberson } 2360bbee39c6SJeff Roberson /* 2361bbee39c6SJeff Roberson * We might not have been able to get a slab but another cpu 2362bbee39c6SJeff Roberson * could have while we were unlocked. Check again before we 2363bbee39c6SJeff Roberson * fail. 2364bbee39c6SJeff Roberson */ 2365bbee39c6SJeff Roberson flags |= M_NOVM; 2366bbee39c6SJeff Roberson } 2367bbee39c6SJeff Roberson return (slab); 2368bbee39c6SJeff Roberson } 2369bbee39c6SJeff Roberson 2370e20a199fSJeff Roberson static uma_slab_t 2371e20a199fSJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2372e20a199fSJeff Roberson { 2373e20a199fSJeff Roberson uma_slab_t slab; 2374e20a199fSJeff Roberson 2375af526374SJeff Roberson if (keg == NULL) { 2376e20a199fSJeff Roberson keg = zone_first_keg(zone); 2377af526374SJeff Roberson KEG_LOCK(keg); 2378af526374SJeff Roberson } 2379e20a199fSJeff Roberson 2380e20a199fSJeff Roberson for (;;) { 2381e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2382e20a199fSJeff Roberson if (slab) 2383e20a199fSJeff Roberson return (slab); 2384e20a199fSJeff Roberson if (flags & (M_NOWAIT | M_NOVM)) 2385e20a199fSJeff Roberson break; 2386e20a199fSJeff Roberson } 2387af526374SJeff Roberson KEG_UNLOCK(keg); 2388e20a199fSJeff Roberson return (NULL); 2389e20a199fSJeff Roberson } 2390e20a199fSJeff Roberson 2391e20a199fSJeff Roberson /* 2392e20a199fSJeff Roberson * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2393af526374SJeff Roberson * with the keg locked. On NULL no lock is held. 2394e20a199fSJeff Roberson * 2395e20a199fSJeff Roberson * The last pointer is used to seed the search. It is not required. 2396e20a199fSJeff Roberson */ 2397e20a199fSJeff Roberson static uma_slab_t 2398e20a199fSJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2399e20a199fSJeff Roberson { 2400e20a199fSJeff Roberson uma_klink_t klink; 2401e20a199fSJeff Roberson uma_slab_t slab; 2402e20a199fSJeff Roberson uma_keg_t keg; 2403e20a199fSJeff Roberson int flags; 2404e20a199fSJeff Roberson int empty; 2405e20a199fSJeff Roberson int full; 2406e20a199fSJeff Roberson 2407e20a199fSJeff Roberson /* 2408e20a199fSJeff Roberson * Don't wait on the first pass. This will skip limit tests 2409e20a199fSJeff Roberson * as well. We don't want to block if we can find a provider 2410e20a199fSJeff Roberson * without blocking. 2411e20a199fSJeff Roberson */ 2412e20a199fSJeff Roberson flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2413e20a199fSJeff Roberson /* 2414e20a199fSJeff Roberson * Use the last slab allocated as a hint for where to start 2415e20a199fSJeff Roberson * the search. 2416e20a199fSJeff Roberson */ 2417af526374SJeff Roberson if (last != NULL) { 2418e20a199fSJeff Roberson slab = keg_fetch_slab(last, zone, flags); 2419e20a199fSJeff Roberson if (slab) 2420e20a199fSJeff Roberson return (slab); 2421af526374SJeff Roberson KEG_UNLOCK(last); 2422e20a199fSJeff Roberson } 2423e20a199fSJeff Roberson /* 2424e20a199fSJeff Roberson * Loop until we have a slab incase of transient failures 2425e20a199fSJeff Roberson * while M_WAITOK is specified. I'm not sure this is 100% 2426e20a199fSJeff Roberson * required but we've done it for so long now. 2427e20a199fSJeff Roberson */ 2428e20a199fSJeff Roberson for (;;) { 2429e20a199fSJeff Roberson empty = 0; 2430e20a199fSJeff Roberson full = 0; 2431e20a199fSJeff Roberson /* 2432e20a199fSJeff Roberson * Search the available kegs for slabs. Be careful to hold the 2433e20a199fSJeff Roberson * correct lock while calling into the keg layer. 2434e20a199fSJeff Roberson */ 2435e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2436e20a199fSJeff Roberson keg = klink->kl_keg; 2437af526374SJeff Roberson KEG_LOCK(keg); 2438e20a199fSJeff Roberson if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2439e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2440e20a199fSJeff Roberson if (slab) 2441e20a199fSJeff Roberson return (slab); 2442e20a199fSJeff Roberson } 2443e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZFLAG_FULL) 2444e20a199fSJeff Roberson full++; 2445e20a199fSJeff Roberson else 2446e20a199fSJeff Roberson empty++; 2447af526374SJeff Roberson KEG_UNLOCK(keg); 2448e20a199fSJeff Roberson } 2449e20a199fSJeff Roberson if (rflags & (M_NOWAIT | M_NOVM)) 2450e20a199fSJeff Roberson break; 2451e20a199fSJeff Roberson flags = rflags; 2452e20a199fSJeff Roberson /* 2453e20a199fSJeff Roberson * All kegs are full. XXX We can't atomically check all kegs 2454e20a199fSJeff Roberson * and sleep so just sleep for a short period and retry. 2455e20a199fSJeff Roberson */ 2456e20a199fSJeff Roberson if (full && !empty) { 2457af526374SJeff Roberson ZONE_LOCK(zone); 2458e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 2459bf965959SSean Bruno zone->uz_sleeps++; 24602f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 246154503a13SJonathan T. Looney zone_maxaction(zone); 2462af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, 2463af526374SJeff Roberson "zonelimit", hz/100); 2464e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2465af526374SJeff Roberson ZONE_UNLOCK(zone); 2466e20a199fSJeff Roberson continue; 2467e20a199fSJeff Roberson } 2468e20a199fSJeff Roberson } 2469e20a199fSJeff Roberson return (NULL); 2470e20a199fSJeff Roberson } 2471e20a199fSJeff Roberson 2472d56368d7SBosko Milekic static void * 24730095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2474bbee39c6SJeff Roberson { 2475bbee39c6SJeff Roberson void *item; 247685dcf349SGleb Smirnoff uint8_t freei; 2477bbee39c6SJeff Roberson 24780095a784SJeff Roberson MPASS(keg == slab->us_keg); 2479e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2480099a0e58SBosko Milekic 2481ef72505eSJeff Roberson freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2482ef72505eSJeff Roberson BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2483099a0e58SBosko Milekic item = slab->us_data + (keg->uk_rsize * freei); 2484bbee39c6SJeff Roberson slab->us_freecount--; 2485099a0e58SBosko Milekic keg->uk_free--; 2486ef72505eSJeff Roberson 2487bbee39c6SJeff Roberson /* Move this slab to the full list */ 2488bbee39c6SJeff Roberson if (slab->us_freecount == 0) { 2489bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2490099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2491bbee39c6SJeff Roberson } 2492bbee39c6SJeff Roberson 2493bbee39c6SJeff Roberson return (item); 2494bbee39c6SJeff Roberson } 2495bbee39c6SJeff Roberson 2496bbee39c6SJeff Roberson static int 24970095a784SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int flags) 24980095a784SJeff Roberson { 24990095a784SJeff Roberson uma_slab_t slab; 25000095a784SJeff Roberson uma_keg_t keg; 25010095a784SJeff Roberson int i; 25020095a784SJeff Roberson 25030095a784SJeff Roberson slab = NULL; 25040095a784SJeff Roberson keg = NULL; 2505af526374SJeff Roberson /* Try to keep the buckets totally full */ 25060095a784SJeff Roberson for (i = 0; i < max; ) { 25070095a784SJeff Roberson if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 25080095a784SJeff Roberson break; 25090095a784SJeff Roberson keg = slab->us_keg; 25106fd34d6fSJeff Roberson while (slab->us_freecount && i < max) { 25110095a784SJeff Roberson bucket[i++] = slab_alloc_item(keg, slab); 25126fd34d6fSJeff Roberson if (keg->uk_free <= keg->uk_reserve) 25136fd34d6fSJeff Roberson break; 25146fd34d6fSJeff Roberson } 25156fd34d6fSJeff Roberson /* Don't grab more than one slab at a time. */ 25160095a784SJeff Roberson flags &= ~M_WAITOK; 25170095a784SJeff Roberson flags |= M_NOWAIT; 25180095a784SJeff Roberson } 25190095a784SJeff Roberson if (slab != NULL) 25200095a784SJeff Roberson KEG_UNLOCK(keg); 25210095a784SJeff Roberson 25220095a784SJeff Roberson return i; 25230095a784SJeff Roberson } 25240095a784SJeff Roberson 2525fc03d22bSJeff Roberson static uma_bucket_t 25266fd34d6fSJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2527bbee39c6SJeff Roberson { 2528bbee39c6SJeff Roberson uma_bucket_t bucket; 25290095a784SJeff Roberson int max; 2530bbee39c6SJeff Roberson 25316fd34d6fSJeff Roberson /* Don't wait for buckets, preserve caller's NOVM setting. */ 25326fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 25330095a784SJeff Roberson if (bucket == NULL) 2534f7104ccdSAlexander Motin return (NULL); 25350095a784SJeff Roberson 2536af526374SJeff Roberson max = MIN(bucket->ub_entries, zone->uz_count); 25370095a784SJeff Roberson bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 25380095a784SJeff Roberson max, flags); 25390095a784SJeff Roberson 25400095a784SJeff Roberson /* 25410095a784SJeff Roberson * Initialize the memory if necessary. 25420095a784SJeff Roberson */ 25430095a784SJeff Roberson if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2544099a0e58SBosko Milekic int i; 2545bbee39c6SJeff Roberson 25460095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 2547e20a199fSJeff Roberson if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 25480095a784SJeff Roberson flags) != 0) 2549b23f72e9SBrian Feldman break; 2550b23f72e9SBrian Feldman /* 2551b23f72e9SBrian Feldman * If we couldn't initialize the whole bucket, put the 2552b23f72e9SBrian Feldman * rest back onto the freelist. 2553b23f72e9SBrian Feldman */ 2554b23f72e9SBrian Feldman if (i != bucket->ub_cnt) { 2555af526374SJeff Roberson zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 25560095a784SJeff Roberson bucket->ub_cnt - i); 2557a5a262c6SBosko Milekic #ifdef INVARIANTS 25580095a784SJeff Roberson bzero(&bucket->ub_bucket[i], 25590095a784SJeff Roberson sizeof(void *) * (bucket->ub_cnt - i)); 2560a5a262c6SBosko Milekic #endif 2561b23f72e9SBrian Feldman bucket->ub_cnt = i; 2562b23f72e9SBrian Feldman } 2563099a0e58SBosko Milekic } 2564099a0e58SBosko Milekic 2565f7104ccdSAlexander Motin if (bucket->ub_cnt == 0) { 25666fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2567fc03d22bSJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2568fc03d22bSJeff Roberson return (NULL); 2569bbee39c6SJeff Roberson } 2570fc03d22bSJeff Roberson 2571fc03d22bSJeff Roberson return (bucket); 2572fc03d22bSJeff Roberson } 2573fc03d22bSJeff Roberson 25748355f576SJeff Roberson /* 25750095a784SJeff Roberson * Allocates a single item from a zone. 25768355f576SJeff Roberson * 25778355f576SJeff Roberson * Arguments 25788355f576SJeff Roberson * zone The zone to alloc for. 25798355f576SJeff Roberson * udata The data to be passed to the constructor. 2580a163d034SWarner Losh * flags M_WAITOK, M_NOWAIT, M_ZERO. 25818355f576SJeff Roberson * 25828355f576SJeff Roberson * Returns 25838355f576SJeff Roberson * NULL if there is no memory and M_NOWAIT is set 2584bbee39c6SJeff Roberson * An item if successful 25858355f576SJeff Roberson */ 25868355f576SJeff Roberson 25878355f576SJeff Roberson static void * 2588e20a199fSJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int flags) 25898355f576SJeff Roberson { 25908355f576SJeff Roberson void *item; 25918355f576SJeff Roberson 25928355f576SJeff Roberson item = NULL; 25938355f576SJeff Roberson 25948355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 25958355f576SJeff Roberson printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 25968355f576SJeff Roberson #endif 25970095a784SJeff Roberson if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 25980095a784SJeff Roberson goto fail; 25990095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, 1); 26008355f576SJeff Roberson 2601099a0e58SBosko Milekic /* 2602099a0e58SBosko Milekic * We have to call both the zone's init (not the keg's init) 2603099a0e58SBosko Milekic * and the zone's ctor. This is because the item is going from 2604099a0e58SBosko Milekic * a keg slab directly to the user, and the user is expecting it 2605099a0e58SBosko Milekic * to be both zone-init'd as well as zone-ctor'd. 2606099a0e58SBosko Milekic */ 2607b23f72e9SBrian Feldman if (zone->uz_init != NULL) { 2608e20a199fSJeff Roberson if (zone->uz_init(item, zone->uz_size, flags) != 0) { 26090095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_FINI); 26100095a784SJeff Roberson goto fail; 2611b23f72e9SBrian Feldman } 2612b23f72e9SBrian Feldman } 2613b23f72e9SBrian Feldman if (zone->uz_ctor != NULL) { 2614e20a199fSJeff Roberson if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 26150095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 26160095a784SJeff Roberson goto fail; 2617b23f72e9SBrian Feldman } 2618b23f72e9SBrian Feldman } 2619ef72505eSJeff Roberson #ifdef INVARIANTS 26200095a784SJeff Roberson uma_dbg_alloc(zone, NULL, item); 2621ef72505eSJeff Roberson #endif 26222cc35ff9SJeff Roberson if (flags & M_ZERO) 262348343a2fSGleb Smirnoff uma_zero_item(item, zone); 26248355f576SJeff Roberson 26258355f576SJeff Roberson return (item); 26260095a784SJeff Roberson 26270095a784SJeff Roberson fail: 26280095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 26290095a784SJeff Roberson return (NULL); 26308355f576SJeff Roberson } 26318355f576SJeff Roberson 26328355f576SJeff Roberson /* See uma.h */ 26338355f576SJeff Roberson void 26348355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 26358355f576SJeff Roberson { 26368355f576SJeff Roberson uma_cache_t cache; 26378355f576SJeff Roberson uma_bucket_t bucket; 26384d104ba0SAlexander Motin int lockfail; 26398355f576SJeff Roberson int cpu; 26408355f576SJeff Roberson 2641e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2642e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 264310cb2424SMark Murray 26448355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1 26458355f576SJeff Roberson printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 26468355f576SJeff Roberson #endif 26473659f747SRobert Watson CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 26483659f747SRobert Watson zone->uz_name); 26493659f747SRobert Watson 2650d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 26511067a2baSJonathan T. Looney ("uma_zfree_arg: called with spinlock or critical section held")); 26521067a2baSJonathan T. Looney 265320ed0cb0SMatthew D Fleming /* uma_zfree(..., NULL) does nothing, to match free(9). */ 265420ed0cb0SMatthew D Fleming if (item == NULL) 265520ed0cb0SMatthew D Fleming return; 26568d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 26578d689e04SGleb Smirnoff if (is_memguard_addr(item)) { 26588d689e04SGleb Smirnoff if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor) 26598d689e04SGleb Smirnoff zone->uz_dtor(item, zone->uz_size, udata); 26608d689e04SGleb Smirnoff if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini) 26618d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 26628d689e04SGleb Smirnoff memguard_free(item); 26638d689e04SGleb Smirnoff return; 26648d689e04SGleb Smirnoff } 26658d689e04SGleb Smirnoff #endif 26665d1ae027SRobert Watson #ifdef INVARIANTS 2667e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 26685d1ae027SRobert Watson uma_dbg_free(zone, udata, item); 26695d1ae027SRobert Watson else 26705d1ae027SRobert Watson uma_dbg_free(zone, NULL, item); 26715d1ae027SRobert Watson #endif 2672fc03d22bSJeff Roberson if (zone->uz_dtor != NULL) 2673ef72505eSJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 2674ef72505eSJeff Roberson 2675af7f9b97SJeff Roberson /* 2676af7f9b97SJeff Roberson * The race here is acceptable. If we miss it we'll just have to wait 2677af7f9b97SJeff Roberson * a little longer for the limits to be reset. 2678af7f9b97SJeff Roberson */ 2679e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZFLAG_FULL) 2680fc03d22bSJeff Roberson goto zfree_item; 2681af7f9b97SJeff Roberson 26825d1ae027SRobert Watson /* 26835d1ae027SRobert Watson * If possible, free to the per-CPU cache. There are two 26845d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 26855d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 26865d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 26875d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 26885d1ae027SRobert Watson * preemption and migration. We release the critical section in 26895d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to free to the 26905d1ae027SRobert Watson * current cache; when we re-acquire the critical section, we must 26915d1ae027SRobert Watson * detect and handle migration if it has occurred. 26925d1ae027SRobert Watson */ 2693a553d4b8SJeff Roberson zfree_restart: 26945d1ae027SRobert Watson critical_enter(); 26955d1ae027SRobert Watson cpu = curcpu; 26968355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 26978355f576SJeff Roberson 26988355f576SJeff Roberson zfree_start: 2699a553d4b8SJeff Roberson /* 2700fc03d22bSJeff Roberson * Try to free into the allocbucket first to give LIFO ordering 2701fc03d22bSJeff Roberson * for cache-hot datastructures. Spill over into the freebucket 2702fc03d22bSJeff Roberson * if necessary. Alloc will swap them if one runs dry. 2703a553d4b8SJeff Roberson */ 2704fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2705fc03d22bSJeff Roberson if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2706fc03d22bSJeff Roberson bucket = cache->uc_freebucket; 2707fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2708cae33c14SJeff Roberson KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 27098355f576SJeff Roberson ("uma_zfree: Freeing to non free bucket index.")); 2710cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = item; 2711cae33c14SJeff Roberson bucket->ub_cnt++; 2712773df9abSRobert Watson cache->uc_frees++; 27135d1ae027SRobert Watson critical_exit(); 27148355f576SJeff Roberson return; 2715fc03d22bSJeff Roberson } 2716fc03d22bSJeff Roberson 27178355f576SJeff Roberson /* 27185d1ae027SRobert Watson * We must go back the zone, which requires acquiring the zone lock, 27195d1ae027SRobert Watson * which in turn means we must release and re-acquire the critical 27205d1ae027SRobert Watson * section. Since the critical section is released, we may be 27215d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 27225d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 27235d1ae027SRobert Watson * the critical section. 27248355f576SJeff Roberson */ 27255d1ae027SRobert Watson critical_exit(); 2726fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2727fc03d22bSJeff Roberson goto zfree_item; 2728fc03d22bSJeff Roberson 27294d104ba0SAlexander Motin lockfail = 0; 27304d104ba0SAlexander Motin if (ZONE_TRYLOCK(zone) == 0) { 27314d104ba0SAlexander Motin /* Record contention to size the buckets. */ 27328355f576SJeff Roberson ZONE_LOCK(zone); 27334d104ba0SAlexander Motin lockfail = 1; 27344d104ba0SAlexander Motin } 27355d1ae027SRobert Watson critical_enter(); 27365d1ae027SRobert Watson cpu = curcpu; 27375d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 27388355f576SJeff Roberson 2739fc03d22bSJeff Roberson /* 2740fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2741fc03d22bSJeff Roberson */ 27420095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 27430095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2744f4ff923bSRobert Watson cache->uc_allocs = 0; 2745f4ff923bSRobert Watson cache->uc_frees = 0; 2746f4ff923bSRobert Watson 27478355f576SJeff Roberson bucket = cache->uc_freebucket; 2748fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2749fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2750fc03d22bSJeff Roberson goto zfree_start; 2751fc03d22bSJeff Roberson } 27528355f576SJeff Roberson cache->uc_freebucket = NULL; 27538355f576SJeff Roberson 27548355f576SJeff Roberson /* Can we throw this on the zone full list? */ 27558355f576SJeff Roberson if (bucket != NULL) { 27568355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 27578355f576SJeff Roberson printf("uma_zfree: Putting old bucket on the free list.\n"); 27588355f576SJeff Roberson #endif 2759cae33c14SJeff Roberson /* ub_cnt is pointing to the last free item */ 2760cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 27618355f576SJeff Roberson ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2762fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 27638355f576SJeff Roberson } 2764fc03d22bSJeff Roberson 27655d1ae027SRobert Watson /* We are no longer associated with this CPU. */ 27665d1ae027SRobert Watson critical_exit(); 2767a553d4b8SJeff Roberson 27684d104ba0SAlexander Motin /* 27694d104ba0SAlexander Motin * We bump the uz count when the cache size is insufficient to 27704d104ba0SAlexander Motin * handle the working set. 27714d104ba0SAlexander Motin */ 27724d104ba0SAlexander Motin if (lockfail && zone->uz_count < BUCKET_MAX) 27734d104ba0SAlexander Motin zone->uz_count++; 2774a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 2775a553d4b8SJeff Roberson 27768355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 27778355f576SJeff Roberson printf("uma_zfree: Allocating new free bucket.\n"); 27788355f576SJeff Roberson #endif 27796fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT); 27804741dcbfSJeff Roberson if (bucket) { 2781fc03d22bSJeff Roberson critical_enter(); 2782fc03d22bSJeff Roberson cpu = curcpu; 2783fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2784fc03d22bSJeff Roberson if (cache->uc_freebucket == NULL) { 2785fc03d22bSJeff Roberson cache->uc_freebucket = bucket; 2786fc03d22bSJeff Roberson goto zfree_start; 2787fc03d22bSJeff Roberson } 2788fc03d22bSJeff Roberson /* 2789fc03d22bSJeff Roberson * We lost the race, start over. We have to drop our 2790fc03d22bSJeff Roberson * critical section to free the bucket. 2791fc03d22bSJeff Roberson */ 2792fc03d22bSJeff Roberson critical_exit(); 27936fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2794a553d4b8SJeff Roberson goto zfree_restart; 27958355f576SJeff Roberson } 27968355f576SJeff Roberson 2797a553d4b8SJeff Roberson /* 2798a553d4b8SJeff Roberson * If nothing else caught this, we'll just do an internal free. 2799a553d4b8SJeff Roberson */ 2800fc03d22bSJeff Roberson zfree_item: 28010095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 28028355f576SJeff Roberson 28038355f576SJeff Roberson return; 28048355f576SJeff Roberson } 28058355f576SJeff Roberson 28068355f576SJeff Roberson static void 28070095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 28088355f576SJeff Roberson { 280985dcf349SGleb Smirnoff uint8_t freei; 2810099a0e58SBosko Milekic 28110095a784SJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2812e20a199fSJeff Roberson MPASS(keg == slab->us_keg); 28138355f576SJeff Roberson 28148355f576SJeff Roberson /* Do we need to remove from any lists? */ 2815099a0e58SBosko Milekic if (slab->us_freecount+1 == keg->uk_ipers) { 28168355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2817099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 28188355f576SJeff Roberson } else if (slab->us_freecount == 0) { 28198355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2820099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 28218355f576SJeff Roberson } 28228355f576SJeff Roberson 2823ef72505eSJeff Roberson /* Slab management. */ 2824ef72505eSJeff Roberson freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2825ef72505eSJeff Roberson BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 28268355f576SJeff Roberson slab->us_freecount++; 28278355f576SJeff Roberson 2828ef72505eSJeff Roberson /* Keg statistics. */ 2829099a0e58SBosko Milekic keg->uk_free++; 28300095a784SJeff Roberson } 28310095a784SJeff Roberson 28320095a784SJeff Roberson static void 28330095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt) 28340095a784SJeff Roberson { 28350095a784SJeff Roberson void *item; 28360095a784SJeff Roberson uma_slab_t slab; 28370095a784SJeff Roberson uma_keg_t keg; 28380095a784SJeff Roberson uint8_t *mem; 28390095a784SJeff Roberson int clearfull; 28400095a784SJeff Roberson int i; 28418355f576SJeff Roberson 2842e20a199fSJeff Roberson clearfull = 0; 28430095a784SJeff Roberson keg = zone_first_keg(zone); 2844af526374SJeff Roberson KEG_LOCK(keg); 28450095a784SJeff Roberson for (i = 0; i < cnt; i++) { 28460095a784SJeff Roberson item = bucket[i]; 28470095a784SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 28480095a784SJeff Roberson mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 28490095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) { 28500095a784SJeff Roberson slab = hash_sfind(&keg->uk_hash, mem); 28510095a784SJeff Roberson } else { 28520095a784SJeff Roberson mem += keg->uk_pgoff; 28530095a784SJeff Roberson slab = (uma_slab_t)mem; 28540095a784SJeff Roberson } 28550095a784SJeff Roberson } else { 28560095a784SJeff Roberson slab = vtoslab((vm_offset_t)item); 28570095a784SJeff Roberson if (slab->us_keg != keg) { 28580095a784SJeff Roberson KEG_UNLOCK(keg); 28590095a784SJeff Roberson keg = slab->us_keg; 28600095a784SJeff Roberson KEG_LOCK(keg); 28610095a784SJeff Roberson } 28620095a784SJeff Roberson } 28630095a784SJeff Roberson slab_free_item(keg, slab, item); 2864099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_FULL) { 2865e20a199fSJeff Roberson if (keg->uk_pages < keg->uk_maxpages) { 2866099a0e58SBosko Milekic keg->uk_flags &= ~UMA_ZFLAG_FULL; 2867e20a199fSJeff Roberson clearfull = 1; 2868e20a199fSJeff Roberson } 2869af7f9b97SJeff Roberson 287077380291SMohan Srinivasan /* 2871ef72505eSJeff Roberson * We can handle one more allocation. Since we're 2872ef72505eSJeff Roberson * clearing ZFLAG_FULL, wake up all procs blocked 2873ef72505eSJeff Roberson * on pages. This should be uncommon, so keeping this 2874ef72505eSJeff Roberson * simple for now (rather than adding count of blocked 287577380291SMohan Srinivasan * threads etc). 287677380291SMohan Srinivasan */ 287777380291SMohan Srinivasan wakeup(keg); 2878af7f9b97SJeff Roberson } 28790095a784SJeff Roberson } 2880af526374SJeff Roberson KEG_UNLOCK(keg); 28810095a784SJeff Roberson if (clearfull) { 2882af526374SJeff Roberson ZONE_LOCK(zone); 2883e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2884e20a199fSJeff Roberson wakeup(zone); 2885605cbd6aSJeff Roberson ZONE_UNLOCK(zone); 2886af526374SJeff Roberson } 2887ef72505eSJeff Roberson 28888355f576SJeff Roberson } 28898355f576SJeff Roberson 28900095a784SJeff Roberson /* 28910095a784SJeff Roberson * Frees a single item to any zone. 28920095a784SJeff Roberson * 28930095a784SJeff Roberson * Arguments: 28940095a784SJeff Roberson * zone The zone to free to 28950095a784SJeff Roberson * item The item we're freeing 28960095a784SJeff Roberson * udata User supplied data for the dtor 28970095a784SJeff Roberson * skip Skip dtors and finis 28980095a784SJeff Roberson */ 28990095a784SJeff Roberson static void 29000095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 29010095a784SJeff Roberson { 29020095a784SJeff Roberson 29030095a784SJeff Roberson #ifdef INVARIANTS 29040095a784SJeff Roberson if (skip == SKIP_NONE) { 29050095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 29060095a784SJeff Roberson uma_dbg_free(zone, udata, item); 29070095a784SJeff Roberson else 29080095a784SJeff Roberson uma_dbg_free(zone, NULL, item); 29090095a784SJeff Roberson } 29100095a784SJeff Roberson #endif 29110095a784SJeff Roberson if (skip < SKIP_DTOR && zone->uz_dtor) 29120095a784SJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 29130095a784SJeff Roberson 29140095a784SJeff Roberson if (skip < SKIP_FINI && zone->uz_fini) 29150095a784SJeff Roberson zone->uz_fini(item, zone->uz_size); 29160095a784SJeff Roberson 29170095a784SJeff Roberson atomic_add_long(&zone->uz_frees, 1); 29180095a784SJeff Roberson zone->uz_release(zone->uz_arg, &item, 1); 29190095a784SJeff Roberson } 29200095a784SJeff Roberson 29218355f576SJeff Roberson /* See uma.h */ 29221c6cae97SLawrence Stewart int 2923736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems) 2924736ee590SJeff Roberson { 2925099a0e58SBosko Milekic uma_keg_t keg; 2926099a0e58SBosko Milekic 2927e20a199fSJeff Roberson keg = zone_first_keg(zone); 29280095a784SJeff Roberson if (keg == NULL) 29290095a784SJeff Roberson return (0); 2930af526374SJeff Roberson KEG_LOCK(keg); 2931e20a199fSJeff Roberson keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2932099a0e58SBosko Milekic if (keg->uk_maxpages * keg->uk_ipers < nitems) 2933e20a199fSJeff Roberson keg->uk_maxpages += keg->uk_ppera; 29341c6cae97SLawrence Stewart nitems = keg->uk_maxpages * keg->uk_ipers; 2935af526374SJeff Roberson KEG_UNLOCK(keg); 29361c6cae97SLawrence Stewart 29371c6cae97SLawrence Stewart return (nitems); 2938736ee590SJeff Roberson } 2939736ee590SJeff Roberson 2940736ee590SJeff Roberson /* See uma.h */ 2941e49471b0SAndre Oppermann int 2942e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone) 2943e49471b0SAndre Oppermann { 2944e49471b0SAndre Oppermann int nitems; 2945e49471b0SAndre Oppermann uma_keg_t keg; 2946e49471b0SAndre Oppermann 2947e49471b0SAndre Oppermann keg = zone_first_keg(zone); 29480095a784SJeff Roberson if (keg == NULL) 29490095a784SJeff Roberson return (0); 2950af526374SJeff Roberson KEG_LOCK(keg); 2951e49471b0SAndre Oppermann nitems = keg->uk_maxpages * keg->uk_ipers; 2952af526374SJeff Roberson KEG_UNLOCK(keg); 2953e49471b0SAndre Oppermann 2954e49471b0SAndre Oppermann return (nitems); 2955e49471b0SAndre Oppermann } 2956e49471b0SAndre Oppermann 2957e49471b0SAndre Oppermann /* See uma.h */ 29582f891cd5SPawel Jakub Dawidek void 29592f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning) 29602f891cd5SPawel Jakub Dawidek { 29612f891cd5SPawel Jakub Dawidek 29622f891cd5SPawel Jakub Dawidek ZONE_LOCK(zone); 29632f891cd5SPawel Jakub Dawidek zone->uz_warning = warning; 29642f891cd5SPawel Jakub Dawidek ZONE_UNLOCK(zone); 29652f891cd5SPawel Jakub Dawidek } 29662f891cd5SPawel Jakub Dawidek 29672f891cd5SPawel Jakub Dawidek /* See uma.h */ 296854503a13SJonathan T. Looney void 296954503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 297054503a13SJonathan T. Looney { 297154503a13SJonathan T. Looney 297254503a13SJonathan T. Looney ZONE_LOCK(zone); 2973e60b2fcbSGleb Smirnoff TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 297454503a13SJonathan T. Looney ZONE_UNLOCK(zone); 297554503a13SJonathan T. Looney } 297654503a13SJonathan T. Looney 297754503a13SJonathan T. Looney /* See uma.h */ 2978c4ae7908SLawrence Stewart int 2979c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone) 2980c4ae7908SLawrence Stewart { 2981c4ae7908SLawrence Stewart int64_t nitems; 2982c4ae7908SLawrence Stewart u_int i; 2983c4ae7908SLawrence Stewart 2984c4ae7908SLawrence Stewart ZONE_LOCK(zone); 2985c4ae7908SLawrence Stewart nitems = zone->uz_allocs - zone->uz_frees; 2986c4ae7908SLawrence Stewart CPU_FOREACH(i) { 2987c4ae7908SLawrence Stewart /* 2988c4ae7908SLawrence Stewart * See the comment in sysctl_vm_zone_stats() regarding the 2989c4ae7908SLawrence Stewart * safety of accessing the per-cpu caches. With the zone lock 2990c4ae7908SLawrence Stewart * held, it is safe, but can potentially result in stale data. 2991c4ae7908SLawrence Stewart */ 2992c4ae7908SLawrence Stewart nitems += zone->uz_cpu[i].uc_allocs - 2993c4ae7908SLawrence Stewart zone->uz_cpu[i].uc_frees; 2994c4ae7908SLawrence Stewart } 2995c4ae7908SLawrence Stewart ZONE_UNLOCK(zone); 2996c4ae7908SLawrence Stewart 2997c4ae7908SLawrence Stewart return (nitems < 0 ? 0 : nitems); 2998c4ae7908SLawrence Stewart } 2999c4ae7908SLawrence Stewart 3000c4ae7908SLawrence Stewart /* See uma.h */ 3001736ee590SJeff Roberson void 3002099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit) 3003099a0e58SBosko Milekic { 3004e20a199fSJeff Roberson uma_keg_t keg; 3005e20a199fSJeff Roberson 3006e20a199fSJeff Roberson keg = zone_first_keg(zone); 30070095a784SJeff Roberson KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 3008af526374SJeff Roberson KEG_LOCK(keg); 3009e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 3010099a0e58SBosko Milekic ("uma_zone_set_init on non-empty keg")); 3011e20a199fSJeff Roberson keg->uk_init = uminit; 3012af526374SJeff Roberson KEG_UNLOCK(keg); 3013099a0e58SBosko Milekic } 3014099a0e58SBosko Milekic 3015099a0e58SBosko Milekic /* See uma.h */ 3016099a0e58SBosko Milekic void 3017099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 3018099a0e58SBosko Milekic { 3019e20a199fSJeff Roberson uma_keg_t keg; 3020e20a199fSJeff Roberson 3021e20a199fSJeff Roberson keg = zone_first_keg(zone); 30221d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); 3023af526374SJeff Roberson KEG_LOCK(keg); 3024e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 3025099a0e58SBosko Milekic ("uma_zone_set_fini on non-empty keg")); 3026e20a199fSJeff Roberson keg->uk_fini = fini; 3027af526374SJeff Roberson KEG_UNLOCK(keg); 3028099a0e58SBosko Milekic } 3029099a0e58SBosko Milekic 3030099a0e58SBosko Milekic /* See uma.h */ 3031099a0e58SBosko Milekic void 3032099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 3033099a0e58SBosko Milekic { 3034af526374SJeff Roberson 3035099a0e58SBosko Milekic ZONE_LOCK(zone); 3036e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 3037099a0e58SBosko Milekic ("uma_zone_set_zinit on non-empty keg")); 3038099a0e58SBosko Milekic zone->uz_init = zinit; 3039099a0e58SBosko Milekic ZONE_UNLOCK(zone); 3040099a0e58SBosko Milekic } 3041099a0e58SBosko Milekic 3042099a0e58SBosko Milekic /* See uma.h */ 3043099a0e58SBosko Milekic void 3044099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3045099a0e58SBosko Milekic { 3046af526374SJeff Roberson 3047099a0e58SBosko Milekic ZONE_LOCK(zone); 3048e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 3049099a0e58SBosko Milekic ("uma_zone_set_zfini on non-empty keg")); 3050099a0e58SBosko Milekic zone->uz_fini = zfini; 3051099a0e58SBosko Milekic ZONE_UNLOCK(zone); 3052099a0e58SBosko Milekic } 3053099a0e58SBosko Milekic 3054099a0e58SBosko Milekic /* See uma.h */ 3055b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */ 3056099a0e58SBosko Milekic void 30578355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef) 30588355f576SJeff Roberson { 30590095a784SJeff Roberson uma_keg_t keg; 3060e20a199fSJeff Roberson 30610095a784SJeff Roberson keg = zone_first_keg(zone); 30621d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); 3063af526374SJeff Roberson KEG_LOCK(keg); 30640095a784SJeff Roberson keg->uk_freef = freef; 3065af526374SJeff Roberson KEG_UNLOCK(keg); 30668355f576SJeff Roberson } 30678355f576SJeff Roberson 30688355f576SJeff Roberson /* See uma.h */ 3069b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */ 30708355f576SJeff Roberson void 30718355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 30728355f576SJeff Roberson { 3073e20a199fSJeff Roberson uma_keg_t keg; 3074e20a199fSJeff Roberson 3075e20a199fSJeff Roberson keg = zone_first_keg(zone); 3076af526374SJeff Roberson KEG_LOCK(keg); 3077e20a199fSJeff Roberson keg->uk_allocf = allocf; 3078af526374SJeff Roberson KEG_UNLOCK(keg); 30798355f576SJeff Roberson } 30808355f576SJeff Roberson 30818355f576SJeff Roberson /* See uma.h */ 30826fd34d6fSJeff Roberson void 30836fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items) 30846fd34d6fSJeff Roberson { 30856fd34d6fSJeff Roberson uma_keg_t keg; 30866fd34d6fSJeff Roberson 30876fd34d6fSJeff Roberson keg = zone_first_keg(zone); 30886fd34d6fSJeff Roberson if (keg == NULL) 30896fd34d6fSJeff Roberson return; 30906fd34d6fSJeff Roberson KEG_LOCK(keg); 30916fd34d6fSJeff Roberson keg->uk_reserve = items; 30926fd34d6fSJeff Roberson KEG_UNLOCK(keg); 30936fd34d6fSJeff Roberson 30946fd34d6fSJeff Roberson return; 30956fd34d6fSJeff Roberson } 30966fd34d6fSJeff Roberson 30976fd34d6fSJeff Roberson /* See uma.h */ 30988355f576SJeff Roberson int 3099a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count) 31008355f576SJeff Roberson { 3101099a0e58SBosko Milekic uma_keg_t keg; 31028355f576SJeff Roberson vm_offset_t kva; 31039ba30bcbSZbigniew Bodek u_int pages; 31048355f576SJeff Roberson 3105e20a199fSJeff Roberson keg = zone_first_keg(zone); 31060095a784SJeff Roberson if (keg == NULL) 31070095a784SJeff Roberson return (0); 3108099a0e58SBosko Milekic pages = count / keg->uk_ipers; 31098355f576SJeff Roberson 3110099a0e58SBosko Milekic if (pages * keg->uk_ipers < count) 31118355f576SJeff Roberson pages++; 3112a553d4b8SJeff Roberson 3113a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3114a4915c21SAttilio Rao if (keg->uk_ppera > 1) { 3115a4915c21SAttilio Rao #else 3116a4915c21SAttilio Rao if (1) { 3117a4915c21SAttilio Rao #endif 31189ba30bcbSZbigniew Bodek kva = kva_alloc((vm_size_t)pages * UMA_SLAB_SIZE); 3119d1f42ac2SAlan Cox if (kva == 0) 31208355f576SJeff Roberson return (0); 3121a4915c21SAttilio Rao } else 3122a4915c21SAttilio Rao kva = 0; 3123af526374SJeff Roberson KEG_LOCK(keg); 3124099a0e58SBosko Milekic keg->uk_kva = kva; 3125a4915c21SAttilio Rao keg->uk_offset = 0; 3126099a0e58SBosko Milekic keg->uk_maxpages = pages; 3127a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3128a4915c21SAttilio Rao keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3129a4915c21SAttilio Rao #else 3130a4915c21SAttilio Rao keg->uk_allocf = noobj_alloc; 3131a4915c21SAttilio Rao #endif 31326fd34d6fSJeff Roberson keg->uk_flags |= UMA_ZONE_NOFREE; 3133af526374SJeff Roberson KEG_UNLOCK(keg); 3134af526374SJeff Roberson 31358355f576SJeff Roberson return (1); 31368355f576SJeff Roberson } 31378355f576SJeff Roberson 31388355f576SJeff Roberson /* See uma.h */ 31398355f576SJeff Roberson void 31408355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items) 31418355f576SJeff Roberson { 31428355f576SJeff Roberson int slabs; 31438355f576SJeff Roberson uma_slab_t slab; 3144099a0e58SBosko Milekic uma_keg_t keg; 31458355f576SJeff Roberson 3146e20a199fSJeff Roberson keg = zone_first_keg(zone); 31470095a784SJeff Roberson if (keg == NULL) 31480095a784SJeff Roberson return; 3149af526374SJeff Roberson KEG_LOCK(keg); 3150099a0e58SBosko Milekic slabs = items / keg->uk_ipers; 3151099a0e58SBosko Milekic if (slabs * keg->uk_ipers < items) 31528355f576SJeff Roberson slabs++; 31538355f576SJeff Roberson while (slabs > 0) { 3154e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, M_WAITOK); 3155e20a199fSJeff Roberson if (slab == NULL) 3156e20a199fSJeff Roberson break; 3157e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 3158099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 31598355f576SJeff Roberson slabs--; 31608355f576SJeff Roberson } 3161af526374SJeff Roberson KEG_UNLOCK(keg); 31628355f576SJeff Roberson } 31638355f576SJeff Roberson 31648355f576SJeff Roberson /* See uma.h */ 316544ec2b63SKonstantin Belousov static void 316644ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger) 31678355f576SJeff Roberson { 316844ec2b63SKonstantin Belousov 31698355f576SJeff Roberson #ifdef UMA_DEBUG 31708355f576SJeff Roberson printf("UMA: vm asked us to release pages!\n"); 31718355f576SJeff Roberson #endif 317244ec2b63SKonstantin Belousov sx_assert(&uma_drain_lock, SA_XLOCKED); 317386bbae32SJeff Roberson bucket_enable(); 31748355f576SJeff Roberson zone_foreach(zone_drain); 317544ec2b63SKonstantin Belousov if (vm_page_count_min() || kmem_danger) { 3176a2de44abSAlexander Motin cache_drain_safe(NULL); 3177a2de44abSAlexander Motin zone_foreach(zone_drain); 3178a2de44abSAlexander Motin } 31798355f576SJeff Roberson /* 31808355f576SJeff Roberson * Some slabs may have been freed but this zone will be visited early 31818355f576SJeff Roberson * we visit again so that we can free pages that are empty once other 31828355f576SJeff Roberson * zones are drained. We have to do the same for buckets. 31838355f576SJeff Roberson */ 31849643769aSJeff Roberson zone_drain(slabzone); 3185cae33c14SJeff Roberson bucket_zone_drain(); 318644ec2b63SKonstantin Belousov } 318744ec2b63SKonstantin Belousov 318844ec2b63SKonstantin Belousov void 318944ec2b63SKonstantin Belousov uma_reclaim(void) 319044ec2b63SKonstantin Belousov { 319144ec2b63SKonstantin Belousov 319244ec2b63SKonstantin Belousov sx_xlock(&uma_drain_lock); 319344ec2b63SKonstantin Belousov uma_reclaim_locked(false); 319495c4bf75SKonstantin Belousov sx_xunlock(&uma_drain_lock); 31958355f576SJeff Roberson } 31968355f576SJeff Roberson 319744ec2b63SKonstantin Belousov static int uma_reclaim_needed; 319844ec2b63SKonstantin Belousov 319944ec2b63SKonstantin Belousov void 320044ec2b63SKonstantin Belousov uma_reclaim_wakeup(void) 320144ec2b63SKonstantin Belousov { 320244ec2b63SKonstantin Belousov 320344ec2b63SKonstantin Belousov uma_reclaim_needed = 1; 320444ec2b63SKonstantin Belousov wakeup(&uma_reclaim_needed); 320544ec2b63SKonstantin Belousov } 320644ec2b63SKonstantin Belousov 320744ec2b63SKonstantin Belousov void 320844ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused) 320944ec2b63SKonstantin Belousov { 321044ec2b63SKonstantin Belousov 321144ec2b63SKonstantin Belousov sx_xlock(&uma_drain_lock); 321244ec2b63SKonstantin Belousov for (;;) { 321344ec2b63SKonstantin Belousov sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM, 321444ec2b63SKonstantin Belousov "umarcl", 0); 321544ec2b63SKonstantin Belousov if (uma_reclaim_needed) { 321644ec2b63SKonstantin Belousov uma_reclaim_needed = 0; 321744ec2b63SKonstantin Belousov uma_reclaim_locked(true); 321844ec2b63SKonstantin Belousov } 321944ec2b63SKonstantin Belousov } 322044ec2b63SKonstantin Belousov } 322144ec2b63SKonstantin Belousov 3222663b416fSJohn Baldwin /* See uma.h */ 3223663b416fSJohn Baldwin int 3224663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone) 3225663b416fSJohn Baldwin { 3226663b416fSJohn Baldwin int full; 3227663b416fSJohn Baldwin 3228663b416fSJohn Baldwin ZONE_LOCK(zone); 3229e20a199fSJeff Roberson full = (zone->uz_flags & UMA_ZFLAG_FULL); 3230663b416fSJohn Baldwin ZONE_UNLOCK(zone); 3231663b416fSJohn Baldwin return (full); 3232663b416fSJohn Baldwin } 3233663b416fSJohn Baldwin 32346c125b8dSMohan Srinivasan int 32356c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone) 32366c125b8dSMohan Srinivasan { 3237e20a199fSJeff Roberson return (zone->uz_flags & UMA_ZFLAG_FULL); 32386c125b8dSMohan Srinivasan } 32396c125b8dSMohan Srinivasan 32408355f576SJeff Roberson void * 3241f2c2231eSRyan Stone uma_large_malloc(vm_size_t size, int wait) 32428355f576SJeff Roberson { 32438355f576SJeff Roberson void *mem; 32448355f576SJeff Roberson uma_slab_t slab; 324585dcf349SGleb Smirnoff uint8_t flags; 32468355f576SJeff Roberson 3247e20a199fSJeff Roberson slab = zone_alloc_item(slabzone, NULL, wait); 32488355f576SJeff Roberson if (slab == NULL) 32498355f576SJeff Roberson return (NULL); 32508355f576SJeff Roberson mem = page_alloc(NULL, size, &flags, wait); 32518355f576SJeff Roberson if (mem) { 325299571dc3SJeff Roberson vsetslab((vm_offset_t)mem, slab); 32538355f576SJeff Roberson slab->us_data = mem; 32548355f576SJeff Roberson slab->us_flags = flags | UMA_SLAB_MALLOC; 32558355f576SJeff Roberson slab->us_size = size; 32568355f576SJeff Roberson } else { 32570095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 32588355f576SJeff Roberson } 32598355f576SJeff Roberson 32608355f576SJeff Roberson return (mem); 32618355f576SJeff Roberson } 32628355f576SJeff Roberson 32638355f576SJeff Roberson void 32648355f576SJeff Roberson uma_large_free(uma_slab_t slab) 32658355f576SJeff Roberson { 3266c325e866SKonstantin Belousov 32678355f576SJeff Roberson page_free(slab->us_data, slab->us_size, slab->us_flags); 32680095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 32698355f576SJeff Roberson } 32708355f576SJeff Roberson 327148343a2fSGleb Smirnoff static void 327248343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone) 327348343a2fSGleb Smirnoff { 327448343a2fSGleb Smirnoff 327548343a2fSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_PCPU) { 327648343a2fSGleb Smirnoff for (int i = 0; i < mp_ncpus; i++) 327748343a2fSGleb Smirnoff bzero(zpcpu_get_cpu(item, i), zone->uz_size); 327848343a2fSGleb Smirnoff } else 327948343a2fSGleb Smirnoff bzero(item, zone->uz_size); 328048343a2fSGleb Smirnoff } 328148343a2fSGleb Smirnoff 32828355f576SJeff Roberson void 32838355f576SJeff Roberson uma_print_stats(void) 32848355f576SJeff Roberson { 32858355f576SJeff Roberson zone_foreach(uma_print_zone); 32868355f576SJeff Roberson } 32878355f576SJeff Roberson 3288504d5de3SJeff Roberson static void 3289504d5de3SJeff Roberson slab_print(uma_slab_t slab) 3290504d5de3SJeff Roberson { 3291ef72505eSJeff Roberson printf("slab: keg %p, data %p, freecount %d\n", 3292ef72505eSJeff Roberson slab->us_keg, slab->us_data, slab->us_freecount); 3293504d5de3SJeff Roberson } 3294504d5de3SJeff Roberson 3295504d5de3SJeff Roberson static void 3296504d5de3SJeff Roberson cache_print(uma_cache_t cache) 3297504d5de3SJeff Roberson { 3298504d5de3SJeff Roberson printf("alloc: %p(%d), free: %p(%d)\n", 3299504d5de3SJeff Roberson cache->uc_allocbucket, 3300504d5de3SJeff Roberson cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3301504d5de3SJeff Roberson cache->uc_freebucket, 3302504d5de3SJeff Roberson cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3303504d5de3SJeff Roberson } 3304504d5de3SJeff Roberson 3305e20a199fSJeff Roberson static void 3306e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg) 33078355f576SJeff Roberson { 3308504d5de3SJeff Roberson uma_slab_t slab; 3309504d5de3SJeff Roberson 33100b80c1e4SEitan Adler printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3311e20a199fSJeff Roberson "out %d free %d limit %d\n", 3312e20a199fSJeff Roberson keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3313099a0e58SBosko Milekic keg->uk_ipers, keg->uk_ppera, 3314e20a199fSJeff Roberson (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3315e20a199fSJeff Roberson (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3316504d5de3SJeff Roberson printf("Part slabs:\n"); 3317099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3318504d5de3SJeff Roberson slab_print(slab); 3319504d5de3SJeff Roberson printf("Free slabs:\n"); 3320099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3321504d5de3SJeff Roberson slab_print(slab); 3322504d5de3SJeff Roberson printf("Full slabs:\n"); 3323099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3324504d5de3SJeff Roberson slab_print(slab); 3325e20a199fSJeff Roberson } 3326e20a199fSJeff Roberson 3327e20a199fSJeff Roberson void 3328e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone) 3329e20a199fSJeff Roberson { 3330e20a199fSJeff Roberson uma_cache_t cache; 3331e20a199fSJeff Roberson uma_klink_t kl; 3332e20a199fSJeff Roberson int i; 3333e20a199fSJeff Roberson 33340b80c1e4SEitan Adler printf("zone: %s(%p) size %d flags %#x\n", 3335e20a199fSJeff Roberson zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3336e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3337e20a199fSJeff Roberson uma_print_keg(kl->kl_keg); 33383aa6d94eSJohn Baldwin CPU_FOREACH(i) { 3339504d5de3SJeff Roberson cache = &zone->uz_cpu[i]; 3340504d5de3SJeff Roberson printf("CPU %d Cache:\n", i); 3341504d5de3SJeff Roberson cache_print(cache); 3342504d5de3SJeff Roberson } 33438355f576SJeff Roberson } 33448355f576SJeff Roberson 3345a0d4b0aeSRobert Watson #ifdef DDB 33468355f576SJeff Roberson /* 33477a52a97eSRobert Watson * Generate statistics across both the zone and its per-cpu cache's. Return 33487a52a97eSRobert Watson * desired statistics if the pointer is non-NULL for that statistic. 33497a52a97eSRobert Watson * 33507a52a97eSRobert Watson * Note: does not update the zone statistics, as it can't safely clear the 33517a52a97eSRobert Watson * per-CPU cache statistic. 33527a52a97eSRobert Watson * 33537a52a97eSRobert Watson * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 33547a52a97eSRobert Watson * safe from off-CPU; we should modify the caches to track this information 33557a52a97eSRobert Watson * directly so that we don't have to. 33567a52a97eSRobert Watson */ 33577a52a97eSRobert Watson static void 335885dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 335985dcf349SGleb Smirnoff uint64_t *freesp, uint64_t *sleepsp) 33607a52a97eSRobert Watson { 33617a52a97eSRobert Watson uma_cache_t cache; 336285dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 33637a52a97eSRobert Watson int cachefree, cpu; 33647a52a97eSRobert Watson 3365bf965959SSean Bruno allocs = frees = sleeps = 0; 33667a52a97eSRobert Watson cachefree = 0; 33673aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 33687a52a97eSRobert Watson cache = &z->uz_cpu[cpu]; 33697a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 33707a52a97eSRobert Watson cachefree += cache->uc_allocbucket->ub_cnt; 33717a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 33727a52a97eSRobert Watson cachefree += cache->uc_freebucket->ub_cnt; 33737a52a97eSRobert Watson allocs += cache->uc_allocs; 33747a52a97eSRobert Watson frees += cache->uc_frees; 33757a52a97eSRobert Watson } 33767a52a97eSRobert Watson allocs += z->uz_allocs; 33777a52a97eSRobert Watson frees += z->uz_frees; 3378bf965959SSean Bruno sleeps += z->uz_sleeps; 33797a52a97eSRobert Watson if (cachefreep != NULL) 33807a52a97eSRobert Watson *cachefreep = cachefree; 33817a52a97eSRobert Watson if (allocsp != NULL) 33827a52a97eSRobert Watson *allocsp = allocs; 33837a52a97eSRobert Watson if (freesp != NULL) 33847a52a97eSRobert Watson *freesp = frees; 3385bf965959SSean Bruno if (sleepsp != NULL) 3386bf965959SSean Bruno *sleepsp = sleeps; 33877a52a97eSRobert Watson } 3388a0d4b0aeSRobert Watson #endif /* DDB */ 33897a52a97eSRobert Watson 33907a52a97eSRobert Watson static int 33917a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 33927a52a97eSRobert Watson { 33937a52a97eSRobert Watson uma_keg_t kz; 33947a52a97eSRobert Watson uma_zone_t z; 33957a52a97eSRobert Watson int count; 33967a52a97eSRobert Watson 33977a52a97eSRobert Watson count = 0; 3398111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 33997a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 34007a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 34017a52a97eSRobert Watson count++; 34027a52a97eSRobert Watson } 3403111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 34047a52a97eSRobert Watson return (sysctl_handle_int(oidp, &count, 0, req)); 34057a52a97eSRobert Watson } 34067a52a97eSRobert Watson 34077a52a97eSRobert Watson static int 34087a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 34097a52a97eSRobert Watson { 34107a52a97eSRobert Watson struct uma_stream_header ush; 34117a52a97eSRobert Watson struct uma_type_header uth; 34127a52a97eSRobert Watson struct uma_percpu_stat ups; 34137a52a97eSRobert Watson uma_bucket_t bucket; 34147a52a97eSRobert Watson struct sbuf sbuf; 34157a52a97eSRobert Watson uma_cache_t cache; 3416e20a199fSJeff Roberson uma_klink_t kl; 34177a52a97eSRobert Watson uma_keg_t kz; 34187a52a97eSRobert Watson uma_zone_t z; 3419e20a199fSJeff Roberson uma_keg_t k; 34204e657159SMatthew D Fleming int count, error, i; 34217a52a97eSRobert Watson 342200f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 342300f0e671SMatthew D Fleming if (error != 0) 342400f0e671SMatthew D Fleming return (error); 34254e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 34261eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 34274e657159SMatthew D Fleming 3428404a593eSMatthew D Fleming count = 0; 3429111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 34307a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 34317a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 34327a52a97eSRobert Watson count++; 34337a52a97eSRobert Watson } 34347a52a97eSRobert Watson 34357a52a97eSRobert Watson /* 34367a52a97eSRobert Watson * Insert stream header. 34377a52a97eSRobert Watson */ 34387a52a97eSRobert Watson bzero(&ush, sizeof(ush)); 34397a52a97eSRobert Watson ush.ush_version = UMA_STREAM_VERSION; 3440ab3a57c0SRobert Watson ush.ush_maxcpus = (mp_maxid + 1); 34417a52a97eSRobert Watson ush.ush_count = count; 34424e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 34437a52a97eSRobert Watson 34447a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 34457a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 34467a52a97eSRobert Watson bzero(&uth, sizeof(uth)); 34477a52a97eSRobert Watson ZONE_LOCK(z); 3448cbbb4a00SRobert Watson strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 34497a52a97eSRobert Watson uth.uth_align = kz->uk_align; 34507a52a97eSRobert Watson uth.uth_size = kz->uk_size; 34517a52a97eSRobert Watson uth.uth_rsize = kz->uk_rsize; 3452e20a199fSJeff Roberson LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3453e20a199fSJeff Roberson k = kl->kl_keg; 3454e20a199fSJeff Roberson uth.uth_maxpages += k->uk_maxpages; 3455e20a199fSJeff Roberson uth.uth_pages += k->uk_pages; 3456e20a199fSJeff Roberson uth.uth_keg_free += k->uk_free; 3457e20a199fSJeff Roberson uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3458e20a199fSJeff Roberson * k->uk_ipers; 3459e20a199fSJeff Roberson } 3460cbbb4a00SRobert Watson 3461cbbb4a00SRobert Watson /* 3462cbbb4a00SRobert Watson * A zone is secondary is it is not the first entry 3463cbbb4a00SRobert Watson * on the keg's zone list. 3464cbbb4a00SRobert Watson */ 3465e20a199fSJeff Roberson if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3466cbbb4a00SRobert Watson (LIST_FIRST(&kz->uk_zones) != z)) 3467cbbb4a00SRobert Watson uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3468cbbb4a00SRobert Watson 3469fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 34707a52a97eSRobert Watson uth.uth_zone_free += bucket->ub_cnt; 34717a52a97eSRobert Watson uth.uth_allocs = z->uz_allocs; 34727a52a97eSRobert Watson uth.uth_frees = z->uz_frees; 34732019094aSRobert Watson uth.uth_fails = z->uz_fails; 3474bf965959SSean Bruno uth.uth_sleeps = z->uz_sleeps; 34754e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 34767a52a97eSRobert Watson /* 34772450bbb8SRobert Watson * While it is not normally safe to access the cache 34782450bbb8SRobert Watson * bucket pointers while not on the CPU that owns the 34792450bbb8SRobert Watson * cache, we only allow the pointers to be exchanged 34802450bbb8SRobert Watson * without the zone lock held, not invalidated, so 34812450bbb8SRobert Watson * accept the possible race associated with bucket 34822450bbb8SRobert Watson * exchange during monitoring. 34837a52a97eSRobert Watson */ 3484ab3a57c0SRobert Watson for (i = 0; i < (mp_maxid + 1); i++) { 34857a52a97eSRobert Watson bzero(&ups, sizeof(ups)); 34867a52a97eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 34877a52a97eSRobert Watson goto skip; 3488082dc776SRobert Watson if (CPU_ABSENT(i)) 3489082dc776SRobert Watson goto skip; 34907a52a97eSRobert Watson cache = &z->uz_cpu[i]; 34917a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 34927a52a97eSRobert Watson ups.ups_cache_free += 34937a52a97eSRobert Watson cache->uc_allocbucket->ub_cnt; 34947a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 34957a52a97eSRobert Watson ups.ups_cache_free += 34967a52a97eSRobert Watson cache->uc_freebucket->ub_cnt; 34977a52a97eSRobert Watson ups.ups_allocs = cache->uc_allocs; 34987a52a97eSRobert Watson ups.ups_frees = cache->uc_frees; 34997a52a97eSRobert Watson skip: 35004e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 35017a52a97eSRobert Watson } 35022450bbb8SRobert Watson ZONE_UNLOCK(z); 35037a52a97eSRobert Watson } 35047a52a97eSRobert Watson } 3505111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 35064e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 35074e657159SMatthew D Fleming sbuf_delete(&sbuf); 35087a52a97eSRobert Watson return (error); 35097a52a97eSRobert Watson } 351048c5777eSRobert Watson 35110a5a3ccbSGleb Smirnoff int 35120a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 35130a5a3ccbSGleb Smirnoff { 35140a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 351516be9f54SGleb Smirnoff int error, max; 35160a5a3ccbSGleb Smirnoff 351716be9f54SGleb Smirnoff max = uma_zone_get_max(zone); 35180a5a3ccbSGleb Smirnoff error = sysctl_handle_int(oidp, &max, 0, req); 35190a5a3ccbSGleb Smirnoff if (error || !req->newptr) 35200a5a3ccbSGleb Smirnoff return (error); 35210a5a3ccbSGleb Smirnoff 35220a5a3ccbSGleb Smirnoff uma_zone_set_max(zone, max); 35230a5a3ccbSGleb Smirnoff 35240a5a3ccbSGleb Smirnoff return (0); 35250a5a3ccbSGleb Smirnoff } 35260a5a3ccbSGleb Smirnoff 35270a5a3ccbSGleb Smirnoff int 35280a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 35290a5a3ccbSGleb Smirnoff { 35300a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 35310a5a3ccbSGleb Smirnoff int cur; 35320a5a3ccbSGleb Smirnoff 35330a5a3ccbSGleb Smirnoff cur = uma_zone_get_cur(zone); 35340a5a3ccbSGleb Smirnoff return (sysctl_handle_int(oidp, &cur, 0, req)); 35350a5a3ccbSGleb Smirnoff } 35360a5a3ccbSGleb Smirnoff 35379542ea7bSGleb Smirnoff #ifdef INVARIANTS 35389542ea7bSGleb Smirnoff static uma_slab_t 35399542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item) 35409542ea7bSGleb Smirnoff { 35419542ea7bSGleb Smirnoff uma_slab_t slab; 35429542ea7bSGleb Smirnoff uma_keg_t keg; 35439542ea7bSGleb Smirnoff uint8_t *mem; 35449542ea7bSGleb Smirnoff 35459542ea7bSGleb Smirnoff mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 35469542ea7bSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_VTOSLAB) { 35479542ea7bSGleb Smirnoff slab = vtoslab((vm_offset_t)mem); 35489542ea7bSGleb Smirnoff } else { 35499542ea7bSGleb Smirnoff /* 35509542ea7bSGleb Smirnoff * It is safe to return the slab here even though the 35519542ea7bSGleb Smirnoff * zone is unlocked because the item's allocation state 35529542ea7bSGleb Smirnoff * essentially holds a reference. 35539542ea7bSGleb Smirnoff */ 35549542ea7bSGleb Smirnoff ZONE_LOCK(zone); 35559542ea7bSGleb Smirnoff keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; 35569542ea7bSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_HASH) 35579542ea7bSGleb Smirnoff slab = hash_sfind(&keg->uk_hash, mem); 35589542ea7bSGleb Smirnoff else 35599542ea7bSGleb Smirnoff slab = (uma_slab_t)(mem + keg->uk_pgoff); 35609542ea7bSGleb Smirnoff ZONE_UNLOCK(zone); 35619542ea7bSGleb Smirnoff } 35629542ea7bSGleb Smirnoff 35639542ea7bSGleb Smirnoff return (slab); 35649542ea7bSGleb Smirnoff } 35659542ea7bSGleb Smirnoff 35669542ea7bSGleb Smirnoff /* 35679542ea7bSGleb Smirnoff * Set up the slab's freei data such that uma_dbg_free can function. 35689542ea7bSGleb Smirnoff * 35699542ea7bSGleb Smirnoff */ 35709542ea7bSGleb Smirnoff static void 35719542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 35729542ea7bSGleb Smirnoff { 35739542ea7bSGleb Smirnoff uma_keg_t keg; 35749542ea7bSGleb Smirnoff int freei; 35759542ea7bSGleb Smirnoff 35769542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 35779542ea7bSGleb Smirnoff return; 35789542ea7bSGleb Smirnoff if (slab == NULL) { 35799542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 35809542ea7bSGleb Smirnoff if (slab == NULL) 35819542ea7bSGleb Smirnoff panic("uma: item %p did not belong to zone %s\n", 35829542ea7bSGleb Smirnoff item, zone->uz_name); 35839542ea7bSGleb Smirnoff } 35849542ea7bSGleb Smirnoff keg = slab->us_keg; 35859542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 35869542ea7bSGleb Smirnoff 35879542ea7bSGleb Smirnoff if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 35889542ea7bSGleb Smirnoff panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 35899542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35909542ea7bSGleb Smirnoff BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 35919542ea7bSGleb Smirnoff 35929542ea7bSGleb Smirnoff return; 35939542ea7bSGleb Smirnoff } 35949542ea7bSGleb Smirnoff 35959542ea7bSGleb Smirnoff /* 35969542ea7bSGleb Smirnoff * Verifies freed addresses. Checks for alignment, valid slab membership 35979542ea7bSGleb Smirnoff * and duplicate frees. 35989542ea7bSGleb Smirnoff * 35999542ea7bSGleb Smirnoff */ 36009542ea7bSGleb Smirnoff static void 36019542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 36029542ea7bSGleb Smirnoff { 36039542ea7bSGleb Smirnoff uma_keg_t keg; 36049542ea7bSGleb Smirnoff int freei; 36059542ea7bSGleb Smirnoff 36069542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 36079542ea7bSGleb Smirnoff return; 36089542ea7bSGleb Smirnoff if (slab == NULL) { 36099542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 36109542ea7bSGleb Smirnoff if (slab == NULL) 36119542ea7bSGleb Smirnoff panic("uma: Freed item %p did not belong to zone %s\n", 36129542ea7bSGleb Smirnoff item, zone->uz_name); 36139542ea7bSGleb Smirnoff } 36149542ea7bSGleb Smirnoff keg = slab->us_keg; 36159542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 36169542ea7bSGleb Smirnoff 36179542ea7bSGleb Smirnoff if (freei >= keg->uk_ipers) 36189542ea7bSGleb Smirnoff panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 36199542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36209542ea7bSGleb Smirnoff 36219542ea7bSGleb Smirnoff if (((freei * keg->uk_rsize) + slab->us_data) != item) 36229542ea7bSGleb Smirnoff panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 36239542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36249542ea7bSGleb Smirnoff 36259542ea7bSGleb Smirnoff if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 36269542ea7bSGleb Smirnoff panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 36279542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36289542ea7bSGleb Smirnoff 36299542ea7bSGleb Smirnoff BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 36309542ea7bSGleb Smirnoff } 36319542ea7bSGleb Smirnoff #endif /* INVARIANTS */ 36329542ea7bSGleb Smirnoff 363348c5777eSRobert Watson #ifdef DDB 363448c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma) 363548c5777eSRobert Watson { 363685dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 363748c5777eSRobert Watson uma_bucket_t bucket; 363848c5777eSRobert Watson uma_keg_t kz; 363948c5777eSRobert Watson uma_zone_t z; 364048c5777eSRobert Watson int cachefree; 364148c5777eSRobert Watson 364203175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 364303175483SAlexander Motin "Free", "Requests", "Sleeps", "Bucket"); 364448c5777eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 364548c5777eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 364648c5777eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 364748c5777eSRobert Watson allocs = z->uz_allocs; 364848c5777eSRobert Watson frees = z->uz_frees; 3649bf965959SSean Bruno sleeps = z->uz_sleeps; 365048c5777eSRobert Watson cachefree = 0; 365148c5777eSRobert Watson } else 365248c5777eSRobert Watson uma_zone_sumstat(z, &cachefree, &allocs, 3653bf965959SSean Bruno &frees, &sleeps); 3654e20a199fSJeff Roberson if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 365548c5777eSRobert Watson (LIST_FIRST(&kz->uk_zones) != z))) 365648c5777eSRobert Watson cachefree += kz->uk_free; 3657fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 365848c5777eSRobert Watson cachefree += bucket->ub_cnt; 365903175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 366003175483SAlexander Motin z->uz_name, (uintmax_t)kz->uk_size, 3661ae4e9636SRobert Watson (intmax_t)(allocs - frees), cachefree, 366203175483SAlexander Motin (uintmax_t)allocs, sleeps, z->uz_count); 3663687c94aaSJohn Baldwin if (db_pager_quit) 3664687c94aaSJohn Baldwin return; 366548c5777eSRobert Watson } 366648c5777eSRobert Watson } 366748c5777eSRobert Watson } 366803175483SAlexander Motin 366903175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache) 367003175483SAlexander Motin { 367103175483SAlexander Motin uint64_t allocs, frees; 367203175483SAlexander Motin uma_bucket_t bucket; 367303175483SAlexander Motin uma_zone_t z; 367403175483SAlexander Motin int cachefree; 367503175483SAlexander Motin 367603175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 367703175483SAlexander Motin "Requests", "Bucket"); 367803175483SAlexander Motin LIST_FOREACH(z, &uma_cachezones, uz_link) { 367903175483SAlexander Motin uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 368003175483SAlexander Motin LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 368103175483SAlexander Motin cachefree += bucket->ub_cnt; 368203175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 368303175483SAlexander Motin z->uz_name, (uintmax_t)z->uz_size, 368403175483SAlexander Motin (intmax_t)(allocs - frees), cachefree, 368503175483SAlexander Motin (uintmax_t)allocs, z->uz_count); 368603175483SAlexander Motin if (db_pager_quit) 368703175483SAlexander Motin return; 368803175483SAlexander Motin } 368903175483SAlexander Motin } 36909542ea7bSGleb Smirnoff #endif /* DDB */ 3691