160727d8bSWarner Losh /*- 2ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 308ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4ae4e9636SRobert Watson * Copyright (c) 2004-2006 Robert N. M. Watson 508ecce74SRobert Watson * All rights reserved. 68355f576SJeff Roberson * 78355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 88355f576SJeff Roberson * modification, are permitted provided that the following conditions 98355f576SJeff Roberson * are met: 108355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 118355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 128355f576SJeff Roberson * disclaimer. 138355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 148355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 158355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 168355f576SJeff Roberson * 178355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 188355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 198355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 208355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 218355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 228355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 238355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 248355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 258355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 268355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 278355f576SJeff Roberson */ 288355f576SJeff Roberson 298355f576SJeff Roberson /* 308355f576SJeff Roberson * uma_core.c Implementation of the Universal Memory allocator 318355f576SJeff Roberson * 328355f576SJeff Roberson * This allocator is intended to replace the multitude of similar object caches 338355f576SJeff Roberson * in the standard FreeBSD kernel. The intent is to be flexible as well as 34763df3ecSPedro F. Giffuni * efficient. A primary design goal is to return unused memory to the rest of 358355f576SJeff Roberson * the system. This will make the system as a whole more flexible due to the 368355f576SJeff Roberson * ability to move memory to subsystems which most need it instead of leaving 378355f576SJeff Roberson * pools of reserved memory unused. 388355f576SJeff Roberson * 398355f576SJeff Roberson * The basic ideas stem from similar slab/zone based allocators whose algorithms 408355f576SJeff Roberson * are well known. 418355f576SJeff Roberson * 428355f576SJeff Roberson */ 438355f576SJeff Roberson 448355f576SJeff Roberson /* 458355f576SJeff Roberson * TODO: 468355f576SJeff Roberson * - Improve memory usage for large allocations 478355f576SJeff Roberson * - Investigate cache size adjustments 488355f576SJeff Roberson */ 498355f576SJeff Roberson 50874651b1SDavid E. O'Brien #include <sys/cdefs.h> 51874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 52874651b1SDavid E. O'Brien 5348c5777eSRobert Watson #include "opt_ddb.h" 548355f576SJeff Roberson #include "opt_param.h" 558d689e04SGleb Smirnoff #include "opt_vm.h" 5648c5777eSRobert Watson 578355f576SJeff Roberson #include <sys/param.h> 588355f576SJeff Roberson #include <sys/systm.h> 59ef72505eSJeff Roberson #include <sys/bitset.h> 609b43bc27SAndriy Gapon #include <sys/eventhandler.h> 618355f576SJeff Roberson #include <sys/kernel.h> 628355f576SJeff Roberson #include <sys/types.h> 638355f576SJeff Roberson #include <sys/queue.h> 648355f576SJeff Roberson #include <sys/malloc.h> 653659f747SRobert Watson #include <sys/ktr.h> 668355f576SJeff Roberson #include <sys/lock.h> 678355f576SJeff Roberson #include <sys/sysctl.h> 688355f576SJeff Roberson #include <sys/mutex.h> 694c1cc01cSJohn Baldwin #include <sys/proc.h> 7010cb2424SMark Murray #include <sys/random.h> 7189f6b863SAttilio Rao #include <sys/rwlock.h> 727a52a97eSRobert Watson #include <sys/sbuf.h> 73a2de44abSAlexander Motin #include <sys/sched.h> 748355f576SJeff Roberson #include <sys/smp.h> 75e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h> 7686bbae32SJeff Roberson #include <sys/vmmeter.h> 7786bbae32SJeff Roberson 788355f576SJeff Roberson #include <vm/vm.h> 798355f576SJeff Roberson #include <vm/vm_object.h> 808355f576SJeff Roberson #include <vm/vm_page.h> 81a4915c21SAttilio Rao #include <vm/vm_pageout.h> 828355f576SJeff Roberson #include <vm/vm_param.h> 838355f576SJeff Roberson #include <vm/vm_map.h> 848355f576SJeff Roberson #include <vm/vm_kern.h> 858355f576SJeff Roberson #include <vm/vm_extern.h> 868355f576SJeff Roberson #include <vm/uma.h> 878355f576SJeff Roberson #include <vm/uma_int.h> 88639c9550SJeff Roberson #include <vm/uma_dbg.h> 898355f576SJeff Roberson 9048c5777eSRobert Watson #include <ddb/ddb.h> 9148c5777eSRobert Watson 928d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 938d689e04SGleb Smirnoff #include <vm/memguard.h> 948d689e04SGleb Smirnoff #endif 958d689e04SGleb Smirnoff 968355f576SJeff Roberson /* 97099a0e58SBosko Milekic * This is the zone and keg from which all zones are spawned. The idea is that 98099a0e58SBosko Milekic * even the zone & keg heads are allocated from the allocator, so we use the 99099a0e58SBosko Milekic * bss section to bootstrap us. 1008355f576SJeff Roberson */ 101099a0e58SBosko Milekic static struct uma_keg masterkeg; 102099a0e58SBosko Milekic static struct uma_zone masterzone_k; 103099a0e58SBosko Milekic static struct uma_zone masterzone_z; 104099a0e58SBosko Milekic static uma_zone_t kegs = &masterzone_k; 105099a0e58SBosko Milekic static uma_zone_t zones = &masterzone_z; 1068355f576SJeff Roberson 1078355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */ 1088355f576SJeff Roberson static uma_zone_t slabzone; 1098355f576SJeff Roberson 1108355f576SJeff Roberson /* 1118355f576SJeff Roberson * The initial hash tables come out of this zone so they can be allocated 1128355f576SJeff Roberson * prior to malloc coming up. 1138355f576SJeff Roberson */ 1148355f576SJeff Roberson static uma_zone_t hashzone; 1158355f576SJeff Roberson 1161e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */ 117e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1; 1181e319f6dSRobert Watson 119961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 120961647dfSJeff Roberson 1218355f576SJeff Roberson /* 12286bbae32SJeff Roberson * Are we allowed to allocate buckets? 12386bbae32SJeff Roberson */ 12486bbae32SJeff Roberson static int bucketdisable = 1; 12586bbae32SJeff Roberson 126099a0e58SBosko Milekic /* Linked list of all kegs in the system */ 12713e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 1288355f576SJeff Roberson 12903175483SAlexander Motin /* Linked list of all cache-only zones in the system */ 13003175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones = 13103175483SAlexander Motin LIST_HEAD_INITIALIZER(uma_cachezones); 13203175483SAlexander Motin 133111fbcd5SBryan Venteicher /* This RW lock protects the keg list */ 134111fbcd5SBryan Venteicher static struct rwlock_padalign uma_rwlock; 1358355f576SJeff Roberson 136ac0a6fd0SGleb Smirnoff /* 137ac0a6fd0SGleb Smirnoff * Pointer and counter to pool of pages, that is preallocated at 138ac0a6fd0SGleb Smirnoff * startup to bootstrap UMA. Early zones continue to use the pool 139ac0a6fd0SGleb Smirnoff * until it is depleted, so allocations may happen after boot, thus 140ac0a6fd0SGleb Smirnoff * we need a mutex to protect it. 141ac0a6fd0SGleb Smirnoff */ 142ac0a6fd0SGleb Smirnoff static char *bootmem; 143ac0a6fd0SGleb Smirnoff static int boot_pages; 144ac0a6fd0SGleb Smirnoff static struct mtx uma_boot_pages_mtx; 1458355f576SJeff Roberson 14695c4bf75SKonstantin Belousov static struct sx uma_drain_lock; 14795c4bf75SKonstantin Belousov 1488355f576SJeff Roberson /* Is the VM done starting up? */ 1498355f576SJeff Roberson static int booted = 0; 150342f1793SAlan Cox #define UMA_STARTUP 1 151342f1793SAlan Cox #define UMA_STARTUP2 2 1528355f576SJeff Roberson 153ef72505eSJeff Roberson /* 1549643769aSJeff Roberson * This is the handle used to schedule events that need to happen 1559643769aSJeff Roberson * outside of the allocation fast path. 1569643769aSJeff Roberson */ 1578355f576SJeff Roberson static struct callout uma_callout; 1589643769aSJeff Roberson #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 1598355f576SJeff Roberson 1608355f576SJeff Roberson /* 1618355f576SJeff Roberson * This structure is passed as the zone ctor arg so that I don't have to create 1628355f576SJeff Roberson * a special allocation function just for zones. 1638355f576SJeff Roberson */ 1648355f576SJeff Roberson struct uma_zctor_args { 165bb196eb4SMatthew D Fleming const char *name; 166c3bdc05fSAndrew R. Reiter size_t size; 1678355f576SJeff Roberson uma_ctor ctor; 1688355f576SJeff Roberson uma_dtor dtor; 1698355f576SJeff Roberson uma_init uminit; 1708355f576SJeff Roberson uma_fini fini; 1710095a784SJeff Roberson uma_import import; 1720095a784SJeff Roberson uma_release release; 1730095a784SJeff Roberson void *arg; 174099a0e58SBosko Milekic uma_keg_t keg; 175099a0e58SBosko Milekic int align; 17685dcf349SGleb Smirnoff uint32_t flags; 177099a0e58SBosko Milekic }; 178099a0e58SBosko Milekic 179099a0e58SBosko Milekic struct uma_kctor_args { 180099a0e58SBosko Milekic uma_zone_t zone; 181099a0e58SBosko Milekic size_t size; 182099a0e58SBosko Milekic uma_init uminit; 183099a0e58SBosko Milekic uma_fini fini; 1848355f576SJeff Roberson int align; 18585dcf349SGleb Smirnoff uint32_t flags; 1868355f576SJeff Roberson }; 1878355f576SJeff Roberson 188cae33c14SJeff Roberson struct uma_bucket_zone { 189cae33c14SJeff Roberson uma_zone_t ubz_zone; 190cae33c14SJeff Roberson char *ubz_name; 191fc03d22bSJeff Roberson int ubz_entries; /* Number of items it can hold. */ 192fc03d22bSJeff Roberson int ubz_maxsize; /* Maximum allocation size per-item. */ 193cae33c14SJeff Roberson }; 194cae33c14SJeff Roberson 195f9d27e75SRobert Watson /* 196fc03d22bSJeff Roberson * Compute the actual number of bucket entries to pack them in power 197fc03d22bSJeff Roberson * of two sizes for more efficient space utilization. 198f9d27e75SRobert Watson */ 199fc03d22bSJeff Roberson #define BUCKET_SIZE(n) \ 200fc03d22bSJeff Roberson (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 201fc03d22bSJeff Roberson 2021aa6c758SAlexander Motin #define BUCKET_MAX BUCKET_SIZE(256) 203fc03d22bSJeff Roberson 204fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = { 2056fd34d6fSJeff Roberson { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 206f3932e90SAlexander Motin { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 2076fd34d6fSJeff Roberson { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 208f3932e90SAlexander Motin { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 2096fd34d6fSJeff Roberson { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 210fc03d22bSJeff Roberson { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 211fc03d22bSJeff Roberson { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 212fc03d22bSJeff Roberson { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 2131aa6c758SAlexander Motin { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 214fc03d22bSJeff Roberson { NULL, NULL, 0} 215fc03d22bSJeff Roberson }; 216cae33c14SJeff Roberson 2172019094aSRobert Watson /* 2182019094aSRobert Watson * Flags and enumerations to be passed to internal functions. 2192019094aSRobert Watson */ 220ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 221b23f72e9SBrian Feldman 2228355f576SJeff Roberson /* Prototypes.. */ 2238355f576SJeff Roberson 224f2c2231eSRyan Stone static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 225f2c2231eSRyan Stone static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 226f2c2231eSRyan Stone static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 227f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t); 228e20a199fSJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 2299643769aSJeff Roberson static void cache_drain(uma_zone_t); 2308355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t); 231aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone); 232b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int); 233099a0e58SBosko Milekic static void keg_dtor(void *, int, void *); 234b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int); 2359c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *); 236b23f72e9SBrian Feldman static int zero_init(void *, int, int); 237e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg); 238e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg); 2398355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t)); 2408355f576SJeff Roberson static void zone_timeout(uma_zone_t zone); 2410aef6126SJeff Roberson static int hash_alloc(struct uma_hash *); 2420aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *); 2430aef6126SJeff Roberson static void hash_free(struct uma_hash *hash); 2448355f576SJeff Roberson static void uma_timeout(void *); 2458355f576SJeff Roberson static void uma_startup3(void); 246e20a199fSJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int); 2470095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 24886bbae32SJeff Roberson static void bucket_enable(void); 249cae33c14SJeff Roberson static void bucket_init(void); 2506fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 2516fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 252cae33c14SJeff Roberson static void bucket_zone_drain(void); 2536fd34d6fSJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 254e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 255e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 2560095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 2570095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 258e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 25985dcf349SGleb Smirnoff uma_fini fini, int align, uint32_t flags); 2600095a784SJeff Roberson static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 2610095a784SJeff Roberson static void zone_release(uma_zone_t zone, void **bucket, int cnt); 26248343a2fSGleb Smirnoff static void uma_zero_item(void *item, uma_zone_t zone); 263bbee39c6SJeff Roberson 2648355f576SJeff Roberson void uma_print_zone(uma_zone_t); 2658355f576SJeff Roberson void uma_print_stats(void); 2667a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 2677a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 2688355f576SJeff Roberson 2699542ea7bSGleb Smirnoff #ifdef INVARIANTS 2709542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 2719542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 2729542ea7bSGleb Smirnoff #endif 2739542ea7bSGleb Smirnoff 2748355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 2758355f576SJeff Roberson 2767a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 2777a52a97eSRobert Watson 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 2787a52a97eSRobert Watson 2797a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 2807a52a97eSRobert Watson 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 2817a52a97eSRobert Watson 2822f891cd5SPawel Jakub Dawidek static int zone_warnings = 1; 283af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 2842f891cd5SPawel Jakub Dawidek "Warn when UMA zones becomes full"); 2852f891cd5SPawel Jakub Dawidek 28686bbae32SJeff Roberson /* 28786bbae32SJeff Roberson * This routine checks to see whether or not it's safe to enable buckets. 28886bbae32SJeff Roberson */ 28986bbae32SJeff Roberson static void 29086bbae32SJeff Roberson bucket_enable(void) 29186bbae32SJeff Roberson { 292251386b4SMaksim Yevmenkin bucketdisable = vm_page_count_min(); 29386bbae32SJeff Roberson } 29486bbae32SJeff Roberson 295dc2c7965SRobert Watson /* 296dc2c7965SRobert Watson * Initialize bucket_zones, the array of zones of buckets of various sizes. 297dc2c7965SRobert Watson * 298dc2c7965SRobert Watson * For each zone, calculate the memory required for each bucket, consisting 299fc03d22bSJeff Roberson * of the header and an array of pointers. 300dc2c7965SRobert Watson */ 301cae33c14SJeff Roberson static void 302cae33c14SJeff Roberson bucket_init(void) 303cae33c14SJeff Roberson { 304cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 305cae33c14SJeff Roberson int size; 306cae33c14SJeff Roberson 307d74e6a1dSAlan Cox for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 308cae33c14SJeff Roberson size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 309cae33c14SJeff Roberson size += sizeof(void *) * ubz->ubz_entries; 310cae33c14SJeff Roberson ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 311e20a199fSJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 3126fd34d6fSJeff Roberson UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 313cae33c14SJeff Roberson } 314cae33c14SJeff Roberson } 315cae33c14SJeff Roberson 316dc2c7965SRobert Watson /* 317dc2c7965SRobert Watson * Given a desired number of entries for a bucket, return the zone from which 318dc2c7965SRobert Watson * to allocate the bucket. 319dc2c7965SRobert Watson */ 320dc2c7965SRobert Watson static struct uma_bucket_zone * 321dc2c7965SRobert Watson bucket_zone_lookup(int entries) 322dc2c7965SRobert Watson { 323fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 324dc2c7965SRobert Watson 325fc03d22bSJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 326fc03d22bSJeff Roberson if (ubz->ubz_entries >= entries) 327fc03d22bSJeff Roberson return (ubz); 328fc03d22bSJeff Roberson ubz--; 329fc03d22bSJeff Roberson return (ubz); 330fc03d22bSJeff Roberson } 331fc03d22bSJeff Roberson 332fc03d22bSJeff Roberson static int 333fc03d22bSJeff Roberson bucket_select(int size) 334fc03d22bSJeff Roberson { 335fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 336fc03d22bSJeff Roberson 337fc03d22bSJeff Roberson ubz = &bucket_zones[0]; 338fc03d22bSJeff Roberson if (size > ubz->ubz_maxsize) 339fc03d22bSJeff Roberson return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 340fc03d22bSJeff Roberson 341fc03d22bSJeff Roberson for (; ubz->ubz_entries != 0; ubz++) 342fc03d22bSJeff Roberson if (ubz->ubz_maxsize < size) 343fc03d22bSJeff Roberson break; 344fc03d22bSJeff Roberson ubz--; 345fc03d22bSJeff Roberson return (ubz->ubz_entries); 346dc2c7965SRobert Watson } 347dc2c7965SRobert Watson 348cae33c14SJeff Roberson static uma_bucket_t 3496fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags) 350cae33c14SJeff Roberson { 351cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 352cae33c14SJeff Roberson uma_bucket_t bucket; 353cae33c14SJeff Roberson 354cae33c14SJeff Roberson /* 355cae33c14SJeff Roberson * This is to stop us from allocating per cpu buckets while we're 3563803b26bSDag-Erling Smørgrav * running out of vm.boot_pages. Otherwise, we would exhaust the 357cae33c14SJeff Roberson * boot pages. This also prevents us from allocating buckets in 358cae33c14SJeff Roberson * low memory situations. 359cae33c14SJeff Roberson */ 360cae33c14SJeff Roberson if (bucketdisable) 361cae33c14SJeff Roberson return (NULL); 3626fd34d6fSJeff Roberson /* 3636fd34d6fSJeff Roberson * To limit bucket recursion we store the original zone flags 3646fd34d6fSJeff Roberson * in a cookie passed via zalloc_arg/zfree_arg. This allows the 3656fd34d6fSJeff Roberson * NOVM flag to persist even through deep recursions. We also 3666fd34d6fSJeff Roberson * store ZFLAG_BUCKET once we have recursed attempting to allocate 3676fd34d6fSJeff Roberson * a bucket for a bucket zone so we do not allow infinite bucket 3686fd34d6fSJeff Roberson * recursion. This cookie will even persist to frees of unused 3696fd34d6fSJeff Roberson * buckets via the allocation path or bucket allocations in the 3706fd34d6fSJeff Roberson * free path. 3716fd34d6fSJeff Roberson */ 3726fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 3736fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 374e8a720feSAlexander Motin else { 375e8a720feSAlexander Motin if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 376e8a720feSAlexander Motin return (NULL); 3776fd34d6fSJeff Roberson udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 378e8a720feSAlexander Motin } 3796fd34d6fSJeff Roberson if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 380af526374SJeff Roberson flags |= M_NOVM; 381af526374SJeff Roberson ubz = bucket_zone_lookup(zone->uz_count); 38220d3ab87SAlexander Motin if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 38320d3ab87SAlexander Motin ubz++; 3846fd34d6fSJeff Roberson bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 385cae33c14SJeff Roberson if (bucket) { 386cae33c14SJeff Roberson #ifdef INVARIANTS 387cae33c14SJeff Roberson bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 388cae33c14SJeff Roberson #endif 389cae33c14SJeff Roberson bucket->ub_cnt = 0; 390cae33c14SJeff Roberson bucket->ub_entries = ubz->ubz_entries; 391cae33c14SJeff Roberson } 392cae33c14SJeff Roberson 393cae33c14SJeff Roberson return (bucket); 394cae33c14SJeff Roberson } 395cae33c14SJeff Roberson 396cae33c14SJeff Roberson static void 3976fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 398cae33c14SJeff Roberson { 399cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 400cae33c14SJeff Roberson 401fc03d22bSJeff Roberson KASSERT(bucket->ub_cnt == 0, 402fc03d22bSJeff Roberson ("bucket_free: Freeing a non free bucket.")); 4036fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 4046fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 405dc2c7965SRobert Watson ubz = bucket_zone_lookup(bucket->ub_entries); 4066fd34d6fSJeff Roberson uma_zfree_arg(ubz->ubz_zone, bucket, udata); 407cae33c14SJeff Roberson } 408cae33c14SJeff Roberson 409cae33c14SJeff Roberson static void 410cae33c14SJeff Roberson bucket_zone_drain(void) 411cae33c14SJeff Roberson { 412cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 413cae33c14SJeff Roberson 414cae33c14SJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 415cae33c14SJeff Roberson zone_drain(ubz->ubz_zone); 416cae33c14SJeff Roberson } 417cae33c14SJeff Roberson 4182f891cd5SPawel Jakub Dawidek static void 4192f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone) 4202f891cd5SPawel Jakub Dawidek { 4212f891cd5SPawel Jakub Dawidek static const struct timeval warninterval = { 300, 0 }; 4222f891cd5SPawel Jakub Dawidek 4232f891cd5SPawel Jakub Dawidek if (!zone_warnings || zone->uz_warning == NULL) 4242f891cd5SPawel Jakub Dawidek return; 4252f891cd5SPawel Jakub Dawidek 4262f891cd5SPawel Jakub Dawidek if (ratecheck(&zone->uz_ratecheck, &warninterval)) 4272f891cd5SPawel Jakub Dawidek printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 4282f891cd5SPawel Jakub Dawidek } 4292f891cd5SPawel Jakub Dawidek 43054503a13SJonathan T. Looney static inline void 43154503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone) 43254503a13SJonathan T. Looney { 433e60b2fcbSGleb Smirnoff 434e60b2fcbSGleb Smirnoff if (zone->uz_maxaction.ta_func != NULL) 435e60b2fcbSGleb Smirnoff taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 43654503a13SJonathan T. Looney } 43754503a13SJonathan T. Looney 438e20a199fSJeff Roberson static void 439e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 440e20a199fSJeff Roberson { 441e20a199fSJeff Roberson uma_klink_t klink; 442e20a199fSJeff Roberson 443e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 444e20a199fSJeff Roberson kegfn(klink->kl_keg); 445e20a199fSJeff Roberson } 4468355f576SJeff Roberson 4478355f576SJeff Roberson /* 4488355f576SJeff Roberson * Routine called by timeout which is used to fire off some time interval 4499643769aSJeff Roberson * based calculations. (stats, hash size, etc.) 4508355f576SJeff Roberson * 4518355f576SJeff Roberson * Arguments: 4528355f576SJeff Roberson * arg Unused 4538355f576SJeff Roberson * 4548355f576SJeff Roberson * Returns: 4558355f576SJeff Roberson * Nothing 4568355f576SJeff Roberson */ 4578355f576SJeff Roberson static void 4588355f576SJeff Roberson uma_timeout(void *unused) 4598355f576SJeff Roberson { 46086bbae32SJeff Roberson bucket_enable(); 4618355f576SJeff Roberson zone_foreach(zone_timeout); 4628355f576SJeff Roberson 4638355f576SJeff Roberson /* Reschedule this event */ 4649643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 4658355f576SJeff Roberson } 4668355f576SJeff Roberson 4678355f576SJeff Roberson /* 4689643769aSJeff Roberson * Routine to perform timeout driven calculations. This expands the 4699643769aSJeff Roberson * hashes and does per cpu statistics aggregation. 4708355f576SJeff Roberson * 471e20a199fSJeff Roberson * Returns nothing. 4728355f576SJeff Roberson */ 4738355f576SJeff Roberson static void 474e20a199fSJeff Roberson keg_timeout(uma_keg_t keg) 4758355f576SJeff Roberson { 4768355f576SJeff Roberson 477e20a199fSJeff Roberson KEG_LOCK(keg); 4788355f576SJeff Roberson /* 479e20a199fSJeff Roberson * Expand the keg hash table. 4808355f576SJeff Roberson * 4818355f576SJeff Roberson * This is done if the number of slabs is larger than the hash size. 4828355f576SJeff Roberson * What I'm trying to do here is completely reduce collisions. This 4838355f576SJeff Roberson * may be a little aggressive. Should I allow for two collisions max? 4848355f576SJeff Roberson */ 485099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH && 486099a0e58SBosko Milekic keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 4870aef6126SJeff Roberson struct uma_hash newhash; 4880aef6126SJeff Roberson struct uma_hash oldhash; 4890aef6126SJeff Roberson int ret; 4905300d9ddSJeff Roberson 4910aef6126SJeff Roberson /* 4920aef6126SJeff Roberson * This is so involved because allocating and freeing 493e20a199fSJeff Roberson * while the keg lock is held will lead to deadlock. 4940aef6126SJeff Roberson * I have to do everything in stages and check for 4950aef6126SJeff Roberson * races. 4960aef6126SJeff Roberson */ 497099a0e58SBosko Milekic newhash = keg->uk_hash; 498e20a199fSJeff Roberson KEG_UNLOCK(keg); 4990aef6126SJeff Roberson ret = hash_alloc(&newhash); 500e20a199fSJeff Roberson KEG_LOCK(keg); 5010aef6126SJeff Roberson if (ret) { 502099a0e58SBosko Milekic if (hash_expand(&keg->uk_hash, &newhash)) { 503099a0e58SBosko Milekic oldhash = keg->uk_hash; 504099a0e58SBosko Milekic keg->uk_hash = newhash; 5050aef6126SJeff Roberson } else 5060aef6126SJeff Roberson oldhash = newhash; 5070aef6126SJeff Roberson 508e20a199fSJeff Roberson KEG_UNLOCK(keg); 5090aef6126SJeff Roberson hash_free(&oldhash); 510a1dff920SDavide Italiano return; 5110aef6126SJeff Roberson } 5125300d9ddSJeff Roberson } 513e20a199fSJeff Roberson KEG_UNLOCK(keg); 514e20a199fSJeff Roberson } 515e20a199fSJeff Roberson 516e20a199fSJeff Roberson static void 517e20a199fSJeff Roberson zone_timeout(uma_zone_t zone) 518e20a199fSJeff Roberson { 519e20a199fSJeff Roberson 520e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_timeout); 5218355f576SJeff Roberson } 5228355f576SJeff Roberson 5238355f576SJeff Roberson /* 5245300d9ddSJeff Roberson * Allocate and zero fill the next sized hash table from the appropriate 5255300d9ddSJeff Roberson * backing store. 5265300d9ddSJeff Roberson * 5275300d9ddSJeff Roberson * Arguments: 5280aef6126SJeff Roberson * hash A new hash structure with the old hash size in uh_hashsize 5295300d9ddSJeff Roberson * 5305300d9ddSJeff Roberson * Returns: 531763df3ecSPedro F. Giffuni * 1 on success and 0 on failure. 5325300d9ddSJeff Roberson */ 53337c84183SPoul-Henning Kamp static int 5340aef6126SJeff Roberson hash_alloc(struct uma_hash *hash) 5355300d9ddSJeff Roberson { 5360aef6126SJeff Roberson int oldsize; 5375300d9ddSJeff Roberson int alloc; 5385300d9ddSJeff Roberson 5390aef6126SJeff Roberson oldsize = hash->uh_hashsize; 5400aef6126SJeff Roberson 5415300d9ddSJeff Roberson /* We're just going to go to a power of two greater */ 5420aef6126SJeff Roberson if (oldsize) { 5430aef6126SJeff Roberson hash->uh_hashsize = oldsize * 2; 5440aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 5450aef6126SJeff Roberson hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 546961647dfSJeff Roberson M_UMAHASH, M_NOWAIT); 5475300d9ddSJeff Roberson } else { 5480aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 549e20a199fSJeff Roberson hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 550a163d034SWarner Losh M_WAITOK); 5510aef6126SJeff Roberson hash->uh_hashsize = UMA_HASH_SIZE_INIT; 5525300d9ddSJeff Roberson } 5530aef6126SJeff Roberson if (hash->uh_slab_hash) { 5540aef6126SJeff Roberson bzero(hash->uh_slab_hash, alloc); 5550aef6126SJeff Roberson hash->uh_hashmask = hash->uh_hashsize - 1; 5560aef6126SJeff Roberson return (1); 5570aef6126SJeff Roberson } 5585300d9ddSJeff Roberson 5590aef6126SJeff Roberson return (0); 5605300d9ddSJeff Roberson } 5615300d9ddSJeff Roberson 5625300d9ddSJeff Roberson /* 56364f051e9SJeff Roberson * Expands the hash table for HASH zones. This is done from zone_timeout 56464f051e9SJeff Roberson * to reduce collisions. This must not be done in the regular allocation 56564f051e9SJeff Roberson * path, otherwise, we can recurse on the vm while allocating pages. 5668355f576SJeff Roberson * 5678355f576SJeff Roberson * Arguments: 5680aef6126SJeff Roberson * oldhash The hash you want to expand 5690aef6126SJeff Roberson * newhash The hash structure for the new table 5708355f576SJeff Roberson * 5718355f576SJeff Roberson * Returns: 5728355f576SJeff Roberson * Nothing 5738355f576SJeff Roberson * 5748355f576SJeff Roberson * Discussion: 5758355f576SJeff Roberson */ 5760aef6126SJeff Roberson static int 5770aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 5788355f576SJeff Roberson { 5798355f576SJeff Roberson uma_slab_t slab; 5808355f576SJeff Roberson int hval; 5818355f576SJeff Roberson int i; 5828355f576SJeff Roberson 5830aef6126SJeff Roberson if (!newhash->uh_slab_hash) 5840aef6126SJeff Roberson return (0); 5858355f576SJeff Roberson 5860aef6126SJeff Roberson if (oldhash->uh_hashsize >= newhash->uh_hashsize) 5870aef6126SJeff Roberson return (0); 5888355f576SJeff Roberson 5898355f576SJeff Roberson /* 5908355f576SJeff Roberson * I need to investigate hash algorithms for resizing without a 5918355f576SJeff Roberson * full rehash. 5928355f576SJeff Roberson */ 5938355f576SJeff Roberson 5940aef6126SJeff Roberson for (i = 0; i < oldhash->uh_hashsize; i++) 5950aef6126SJeff Roberson while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 5960aef6126SJeff Roberson slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 5970aef6126SJeff Roberson SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 5980aef6126SJeff Roberson hval = UMA_HASH(newhash, slab->us_data); 5990aef6126SJeff Roberson SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 6000aef6126SJeff Roberson slab, us_hlink); 6018355f576SJeff Roberson } 6028355f576SJeff Roberson 6030aef6126SJeff Roberson return (1); 6049c2cd7e5SJeff Roberson } 6059c2cd7e5SJeff Roberson 6065300d9ddSJeff Roberson /* 6075300d9ddSJeff Roberson * Free the hash bucket to the appropriate backing store. 6085300d9ddSJeff Roberson * 6095300d9ddSJeff Roberson * Arguments: 6105300d9ddSJeff Roberson * slab_hash The hash bucket we're freeing 6115300d9ddSJeff Roberson * hashsize The number of entries in that hash bucket 6125300d9ddSJeff Roberson * 6135300d9ddSJeff Roberson * Returns: 6145300d9ddSJeff Roberson * Nothing 6155300d9ddSJeff Roberson */ 6169c2cd7e5SJeff Roberson static void 6170aef6126SJeff Roberson hash_free(struct uma_hash *hash) 6189c2cd7e5SJeff Roberson { 6190aef6126SJeff Roberson if (hash->uh_slab_hash == NULL) 6200aef6126SJeff Roberson return; 6210aef6126SJeff Roberson if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 6220095a784SJeff Roberson zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 6238355f576SJeff Roberson else 624961647dfSJeff Roberson free(hash->uh_slab_hash, M_UMAHASH); 6258355f576SJeff Roberson } 6268355f576SJeff Roberson 6278355f576SJeff Roberson /* 6288355f576SJeff Roberson * Frees all outstanding items in a bucket 6298355f576SJeff Roberson * 6308355f576SJeff Roberson * Arguments: 6318355f576SJeff Roberson * zone The zone to free to, must be unlocked. 6328355f576SJeff Roberson * bucket The free/alloc bucket with items, cpu queue must be locked. 6338355f576SJeff Roberson * 6348355f576SJeff Roberson * Returns: 6358355f576SJeff Roberson * Nothing 6368355f576SJeff Roberson */ 6378355f576SJeff Roberson 6388355f576SJeff Roberson static void 6398355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 6408355f576SJeff Roberson { 6410095a784SJeff Roberson int i; 6428355f576SJeff Roberson 6438355f576SJeff Roberson if (bucket == NULL) 6448355f576SJeff Roberson return; 6458355f576SJeff Roberson 6460095a784SJeff Roberson if (zone->uz_fini) 6470095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 6480095a784SJeff Roberson zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 6490095a784SJeff Roberson zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 6500095a784SJeff Roberson bucket->ub_cnt = 0; 6518355f576SJeff Roberson } 6528355f576SJeff Roberson 6538355f576SJeff Roberson /* 6548355f576SJeff Roberson * Drains the per cpu caches for a zone. 6558355f576SJeff Roberson * 6565d1ae027SRobert Watson * NOTE: This may only be called while the zone is being turn down, and not 6575d1ae027SRobert Watson * during normal operation. This is necessary in order that we do not have 6585d1ae027SRobert Watson * to migrate CPUs to drain the per-CPU caches. 6595d1ae027SRobert Watson * 6608355f576SJeff Roberson * Arguments: 6618355f576SJeff Roberson * zone The zone to drain, must be unlocked. 6628355f576SJeff Roberson * 6638355f576SJeff Roberson * Returns: 6648355f576SJeff Roberson * Nothing 6658355f576SJeff Roberson */ 6668355f576SJeff Roberson static void 6679643769aSJeff Roberson cache_drain(uma_zone_t zone) 6688355f576SJeff Roberson { 6698355f576SJeff Roberson uma_cache_t cache; 6708355f576SJeff Roberson int cpu; 6718355f576SJeff Roberson 6728355f576SJeff Roberson /* 6735d1ae027SRobert Watson * XXX: It is safe to not lock the per-CPU caches, because we're 6745d1ae027SRobert Watson * tearing down the zone anyway. I.e., there will be no further use 6755d1ae027SRobert Watson * of the caches at this point. 6765d1ae027SRobert Watson * 6775d1ae027SRobert Watson * XXX: It would good to be able to assert that the zone is being 6785d1ae027SRobert Watson * torn down to prevent improper use of cache_drain(). 6795d1ae027SRobert Watson * 6805d1ae027SRobert Watson * XXX: We lock the zone before passing into bucket_cache_drain() as 6815d1ae027SRobert Watson * it is used elsewhere. Should the tear-down path be made special 6825d1ae027SRobert Watson * there in some form? 6838355f576SJeff Roberson */ 6843aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 6858355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 6868355f576SJeff Roberson bucket_drain(zone, cache->uc_allocbucket); 6878355f576SJeff Roberson bucket_drain(zone, cache->uc_freebucket); 688174ab450SBosko Milekic if (cache->uc_allocbucket != NULL) 6896fd34d6fSJeff Roberson bucket_free(zone, cache->uc_allocbucket, NULL); 690174ab450SBosko Milekic if (cache->uc_freebucket != NULL) 6916fd34d6fSJeff Roberson bucket_free(zone, cache->uc_freebucket, NULL); 692d56368d7SBosko Milekic cache->uc_allocbucket = cache->uc_freebucket = NULL; 693d56368d7SBosko Milekic } 694aaa8bb16SJeff Roberson ZONE_LOCK(zone); 695aaa8bb16SJeff Roberson bucket_cache_drain(zone); 696aaa8bb16SJeff Roberson ZONE_UNLOCK(zone); 697aaa8bb16SJeff Roberson } 698aaa8bb16SJeff Roberson 699a2de44abSAlexander Motin static void 700a2de44abSAlexander Motin cache_shrink(uma_zone_t zone) 701a2de44abSAlexander Motin { 702a2de44abSAlexander Motin 703a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 704a2de44abSAlexander Motin return; 705a2de44abSAlexander Motin 706a2de44abSAlexander Motin ZONE_LOCK(zone); 707a2de44abSAlexander Motin zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 708a2de44abSAlexander Motin ZONE_UNLOCK(zone); 709a2de44abSAlexander Motin } 710a2de44abSAlexander Motin 711a2de44abSAlexander Motin static void 712a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone) 713a2de44abSAlexander Motin { 714a2de44abSAlexander Motin uma_cache_t cache; 7158a8d9d14SAlexander Motin uma_bucket_t b1, b2; 716a2de44abSAlexander Motin 717a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 718a2de44abSAlexander Motin return; 719a2de44abSAlexander Motin 7208a8d9d14SAlexander Motin b1 = b2 = NULL; 721a2de44abSAlexander Motin ZONE_LOCK(zone); 722a2de44abSAlexander Motin critical_enter(); 723a2de44abSAlexander Motin cache = &zone->uz_cpu[curcpu]; 724a2de44abSAlexander Motin if (cache->uc_allocbucket) { 7258a8d9d14SAlexander Motin if (cache->uc_allocbucket->ub_cnt != 0) 7268a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7278a8d9d14SAlexander Motin cache->uc_allocbucket, ub_link); 7288a8d9d14SAlexander Motin else 7298a8d9d14SAlexander Motin b1 = cache->uc_allocbucket; 730a2de44abSAlexander Motin cache->uc_allocbucket = NULL; 731a2de44abSAlexander Motin } 732a2de44abSAlexander Motin if (cache->uc_freebucket) { 7338a8d9d14SAlexander Motin if (cache->uc_freebucket->ub_cnt != 0) 7348a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7358a8d9d14SAlexander Motin cache->uc_freebucket, ub_link); 7368a8d9d14SAlexander Motin else 7378a8d9d14SAlexander Motin b2 = cache->uc_freebucket; 738a2de44abSAlexander Motin cache->uc_freebucket = NULL; 739a2de44abSAlexander Motin } 740a2de44abSAlexander Motin critical_exit(); 741a2de44abSAlexander Motin ZONE_UNLOCK(zone); 7428a8d9d14SAlexander Motin if (b1) 7438a8d9d14SAlexander Motin bucket_free(zone, b1, NULL); 7448a8d9d14SAlexander Motin if (b2) 7458a8d9d14SAlexander Motin bucket_free(zone, b2, NULL); 746a2de44abSAlexander Motin } 747a2de44abSAlexander Motin 748a2de44abSAlexander Motin /* 749a2de44abSAlexander Motin * Safely drain per-CPU caches of a zone(s) to alloc bucket. 750a2de44abSAlexander Motin * This is an expensive call because it needs to bind to all CPUs 751a2de44abSAlexander Motin * one by one and enter a critical section on each of them in order 752a2de44abSAlexander Motin * to safely access their cache buckets. 753a2de44abSAlexander Motin * Zone lock must not be held on call this function. 754a2de44abSAlexander Motin */ 755a2de44abSAlexander Motin static void 756a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone) 757a2de44abSAlexander Motin { 758a2de44abSAlexander Motin int cpu; 759a2de44abSAlexander Motin 760a2de44abSAlexander Motin /* 761a2de44abSAlexander Motin * Polite bucket sizes shrinking was not enouth, shrink aggressively. 762a2de44abSAlexander Motin */ 763a2de44abSAlexander Motin if (zone) 764a2de44abSAlexander Motin cache_shrink(zone); 765a2de44abSAlexander Motin else 766a2de44abSAlexander Motin zone_foreach(cache_shrink); 767a2de44abSAlexander Motin 768a2de44abSAlexander Motin CPU_FOREACH(cpu) { 769a2de44abSAlexander Motin thread_lock(curthread); 770a2de44abSAlexander Motin sched_bind(curthread, cpu); 771a2de44abSAlexander Motin thread_unlock(curthread); 772a2de44abSAlexander Motin 773a2de44abSAlexander Motin if (zone) 774a2de44abSAlexander Motin cache_drain_safe_cpu(zone); 775a2de44abSAlexander Motin else 776a2de44abSAlexander Motin zone_foreach(cache_drain_safe_cpu); 777a2de44abSAlexander Motin } 778a2de44abSAlexander Motin thread_lock(curthread); 779a2de44abSAlexander Motin sched_unbind(curthread); 780a2de44abSAlexander Motin thread_unlock(curthread); 781a2de44abSAlexander Motin } 782a2de44abSAlexander Motin 783aaa8bb16SJeff Roberson /* 784aaa8bb16SJeff Roberson * Drain the cached buckets from a zone. Expects a locked zone on entry. 785aaa8bb16SJeff Roberson */ 786aaa8bb16SJeff Roberson static void 787aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone) 788aaa8bb16SJeff Roberson { 789aaa8bb16SJeff Roberson uma_bucket_t bucket; 7908355f576SJeff Roberson 7918355f576SJeff Roberson /* 7928355f576SJeff Roberson * Drain the bucket queues and free the buckets, we just keep two per 7938355f576SJeff Roberson * cpu (alloc/free). 7948355f576SJeff Roberson */ 795fc03d22bSJeff Roberson while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 7968355f576SJeff Roberson LIST_REMOVE(bucket, ub_link); 7978355f576SJeff Roberson ZONE_UNLOCK(zone); 7988355f576SJeff Roberson bucket_drain(zone, bucket); 7996fd34d6fSJeff Roberson bucket_free(zone, bucket, NULL); 8008355f576SJeff Roberson ZONE_LOCK(zone); 8018355f576SJeff Roberson } 802ace66b56SAlexander Motin 803ace66b56SAlexander Motin /* 804ace66b56SAlexander Motin * Shrink further bucket sizes. Price of single zone lock collision 805ace66b56SAlexander Motin * is probably lower then price of global cache drain. 806ace66b56SAlexander Motin */ 807ace66b56SAlexander Motin if (zone->uz_count > zone->uz_count_min) 808ace66b56SAlexander Motin zone->uz_count--; 8098355f576SJeff Roberson } 810fc03d22bSJeff Roberson 811fc03d22bSJeff Roberson static void 812fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 813fc03d22bSJeff Roberson { 814fc03d22bSJeff Roberson uint8_t *mem; 815fc03d22bSJeff Roberson int i; 816fc03d22bSJeff Roberson uint8_t flags; 817fc03d22bSJeff Roberson 818*1431a748SGleb Smirnoff CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 819*1431a748SGleb Smirnoff keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 820*1431a748SGleb Smirnoff 821fc03d22bSJeff Roberson mem = slab->us_data; 822fc03d22bSJeff Roberson flags = slab->us_flags; 823fc03d22bSJeff Roberson i = start; 824fc03d22bSJeff Roberson if (keg->uk_fini != NULL) { 825fc03d22bSJeff Roberson for (i--; i > -1; i--) 826fc03d22bSJeff Roberson keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 827fc03d22bSJeff Roberson keg->uk_size); 828fc03d22bSJeff Roberson } 829fc03d22bSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 830fc03d22bSJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 831fc03d22bSJeff Roberson keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 8328355f576SJeff Roberson } 8338355f576SJeff Roberson 8348355f576SJeff Roberson /* 835e20a199fSJeff Roberson * Frees pages from a keg back to the system. This is done on demand from 8368355f576SJeff Roberson * the pageout daemon. 8378355f576SJeff Roberson * 838e20a199fSJeff Roberson * Returns nothing. 8398355f576SJeff Roberson */ 840e20a199fSJeff Roberson static void 841e20a199fSJeff Roberson keg_drain(uma_keg_t keg) 8428355f576SJeff Roberson { 8431e183df2SStefan Farfeleder struct slabhead freeslabs = { 0 }; 844829be516SMark Johnston uma_slab_t slab, tmp; 8458355f576SJeff Roberson 8468355f576SJeff Roberson /* 847e20a199fSJeff Roberson * We don't want to take pages from statically allocated kegs at this 8488355f576SJeff Roberson * time 8498355f576SJeff Roberson */ 850099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 8518355f576SJeff Roberson return; 8528355f576SJeff Roberson 853*1431a748SGleb Smirnoff CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", 854*1431a748SGleb Smirnoff keg->uk_name, keg, keg->uk_free); 855e20a199fSJeff Roberson KEG_LOCK(keg); 856099a0e58SBosko Milekic if (keg->uk_free == 0) 8578355f576SJeff Roberson goto finished; 8588355f576SJeff Roberson 859829be516SMark Johnston LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) { 860829be516SMark Johnston /* We have nowhere to free these to. */ 861829be516SMark Johnston if (slab->us_flags & UMA_SLAB_BOOT) 8628355f576SJeff Roberson continue; 8638355f576SJeff Roberson 8648355f576SJeff Roberson LIST_REMOVE(slab, us_link); 865099a0e58SBosko Milekic keg->uk_pages -= keg->uk_ppera; 866099a0e58SBosko Milekic keg->uk_free -= keg->uk_ipers; 867713deb36SJeff Roberson 868099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 869099a0e58SBosko Milekic UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 870713deb36SJeff Roberson 871713deb36SJeff Roberson SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 872713deb36SJeff Roberson } 873713deb36SJeff Roberson finished: 874e20a199fSJeff Roberson KEG_UNLOCK(keg); 875713deb36SJeff Roberson 876713deb36SJeff Roberson while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 877713deb36SJeff Roberson SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 8781645995bSKirk McKusick keg_free_slab(keg, slab, keg->uk_ipers); 8798355f576SJeff Roberson } 8808355f576SJeff Roberson } 8818355f576SJeff Roberson 882e20a199fSJeff Roberson static void 883e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok) 884e20a199fSJeff Roberson { 885e20a199fSJeff Roberson 8868355f576SJeff Roberson /* 887e20a199fSJeff Roberson * Set draining to interlock with zone_dtor() so we can release our 888e20a199fSJeff Roberson * locks as we go. Only dtor() should do a WAITOK call since it 889e20a199fSJeff Roberson * is the only call that knows the structure will still be available 890e20a199fSJeff Roberson * when it wakes up. 891e20a199fSJeff Roberson */ 892e20a199fSJeff Roberson ZONE_LOCK(zone); 893e20a199fSJeff Roberson while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 894e20a199fSJeff Roberson if (waitok == M_NOWAIT) 895e20a199fSJeff Roberson goto out; 896af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 897e20a199fSJeff Roberson } 898e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_DRAINING; 899e20a199fSJeff Roberson bucket_cache_drain(zone); 900e20a199fSJeff Roberson ZONE_UNLOCK(zone); 901e20a199fSJeff Roberson /* 902e20a199fSJeff Roberson * The DRAINING flag protects us from being freed while 903111fbcd5SBryan Venteicher * we're running. Normally the uma_rwlock would protect us but we 904e20a199fSJeff Roberson * must be able to release and acquire the right lock for each keg. 905e20a199fSJeff Roberson */ 906e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_drain); 907e20a199fSJeff Roberson ZONE_LOCK(zone); 908e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 909e20a199fSJeff Roberson wakeup(zone); 910e20a199fSJeff Roberson out: 911e20a199fSJeff Roberson ZONE_UNLOCK(zone); 912e20a199fSJeff Roberson } 913e20a199fSJeff Roberson 914e20a199fSJeff Roberson void 915e20a199fSJeff Roberson zone_drain(uma_zone_t zone) 916e20a199fSJeff Roberson { 917e20a199fSJeff Roberson 918e20a199fSJeff Roberson zone_drain_wait(zone, M_NOWAIT); 919e20a199fSJeff Roberson } 920e20a199fSJeff Roberson 921e20a199fSJeff Roberson /* 922e20a199fSJeff Roberson * Allocate a new slab for a keg. This does not insert the slab onto a list. 9238355f576SJeff Roberson * 9248355f576SJeff Roberson * Arguments: 9258355f576SJeff Roberson * wait Shall we wait? 9268355f576SJeff Roberson * 9278355f576SJeff Roberson * Returns: 9288355f576SJeff Roberson * The slab that was allocated or NULL if there is no memory and the 9298355f576SJeff Roberson * caller specified M_NOWAIT. 9308355f576SJeff Roberson */ 9318355f576SJeff Roberson static uma_slab_t 932e20a199fSJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 9338355f576SJeff Roberson { 934e20a199fSJeff Roberson uma_alloc allocf; 935099a0e58SBosko Milekic uma_slab_t slab; 93685dcf349SGleb Smirnoff uint8_t *mem; 93785dcf349SGleb Smirnoff uint8_t flags; 9388355f576SJeff Roberson int i; 9398355f576SJeff Roberson 940e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 941a553d4b8SJeff Roberson slab = NULL; 942fc03d22bSJeff Roberson mem = NULL; 943a553d4b8SJeff Roberson 944e20a199fSJeff Roberson allocf = keg->uk_allocf; 945e20a199fSJeff Roberson KEG_UNLOCK(keg); 946a553d4b8SJeff Roberson 947099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 948e20a199fSJeff Roberson slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 949fc03d22bSJeff Roberson if (slab == NULL) 950fc03d22bSJeff Roberson goto out; 951a553d4b8SJeff Roberson } 952a553d4b8SJeff Roberson 9533370c5bfSJeff Roberson /* 9543370c5bfSJeff Roberson * This reproduces the old vm_zone behavior of zero filling pages the 9553370c5bfSJeff Roberson * first time they are added to a zone. 9563370c5bfSJeff Roberson * 9573370c5bfSJeff Roberson * Malloced items are zeroed in uma_zalloc. 9583370c5bfSJeff Roberson */ 9593370c5bfSJeff Roberson 960099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 9613370c5bfSJeff Roberson wait |= M_ZERO; 9623370c5bfSJeff Roberson else 9633370c5bfSJeff Roberson wait &= ~M_ZERO; 9643370c5bfSJeff Roberson 965263811f7SKip Macy if (keg->uk_flags & UMA_ZONE_NODUMP) 966263811f7SKip Macy wait |= M_NODUMP; 967263811f7SKip Macy 968e20a199fSJeff Roberson /* zone is passed for legacy reasons. */ 969ad97af7eSGleb Smirnoff mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait); 970a553d4b8SJeff Roberson if (mem == NULL) { 971b23f72e9SBrian Feldman if (keg->uk_flags & UMA_ZONE_OFFPAGE) 9720095a784SJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 973fc03d22bSJeff Roberson slab = NULL; 974fc03d22bSJeff Roberson goto out; 975a553d4b8SJeff Roberson } 9768355f576SJeff Roberson 9775c0e403bSJeff Roberson /* Point the slab into the allocated memory */ 978099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 979099a0e58SBosko Milekic slab = (uma_slab_t )(mem + keg->uk_pgoff); 9805c0e403bSJeff Roberson 981e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZONE_VTOSLAB) 982099a0e58SBosko Milekic for (i = 0; i < keg->uk_ppera; i++) 98399571dc3SJeff Roberson vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 9848355f576SJeff Roberson 985099a0e58SBosko Milekic slab->us_keg = keg; 9868355f576SJeff Roberson slab->us_data = mem; 987099a0e58SBosko Milekic slab->us_freecount = keg->uk_ipers; 9888355f576SJeff Roberson slab->us_flags = flags; 989ef72505eSJeff Roberson BIT_FILL(SLAB_SETSIZE, &slab->us_free); 990ef72505eSJeff Roberson #ifdef INVARIANTS 991ef72505eSJeff Roberson BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 992ef72505eSJeff Roberson #endif 993099a0e58SBosko Milekic 994b23f72e9SBrian Feldman if (keg->uk_init != NULL) { 995099a0e58SBosko Milekic for (i = 0; i < keg->uk_ipers; i++) 996b23f72e9SBrian Feldman if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 997b23f72e9SBrian Feldman keg->uk_size, wait) != 0) 998b23f72e9SBrian Feldman break; 999b23f72e9SBrian Feldman if (i != keg->uk_ipers) { 1000fc03d22bSJeff Roberson keg_free_slab(keg, slab, i); 1001fc03d22bSJeff Roberson slab = NULL; 1002fc03d22bSJeff Roberson goto out; 1003b23f72e9SBrian Feldman } 1004b23f72e9SBrian Feldman } 1005fc03d22bSJeff Roberson out: 1006e20a199fSJeff Roberson KEG_LOCK(keg); 10075c0e403bSJeff Roberson 1008*1431a748SGleb Smirnoff CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 1009*1431a748SGleb Smirnoff slab, keg->uk_name, keg); 1010*1431a748SGleb Smirnoff 1011fc03d22bSJeff Roberson if (slab != NULL) { 1012099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1013099a0e58SBosko Milekic UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 10148355f576SJeff Roberson 1015099a0e58SBosko Milekic keg->uk_pages += keg->uk_ppera; 1016099a0e58SBosko Milekic keg->uk_free += keg->uk_ipers; 1017fc03d22bSJeff Roberson } 10188355f576SJeff Roberson 10198355f576SJeff Roberson return (slab); 10208355f576SJeff Roberson } 10218355f576SJeff Roberson 10228355f576SJeff Roberson /* 1023009b6fcbSJeff Roberson * This function is intended to be used early on in place of page_alloc() so 1024009b6fcbSJeff Roberson * that we may use the boot time page cache to satisfy allocations before 1025009b6fcbSJeff Roberson * the VM is ready. 1026009b6fcbSJeff Roberson */ 1027009b6fcbSJeff Roberson static void * 1028f2c2231eSRyan Stone startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 1029009b6fcbSJeff Roberson { 1030099a0e58SBosko Milekic uma_keg_t keg; 1031ac0a6fd0SGleb Smirnoff void *mem; 1032ac0a6fd0SGleb Smirnoff int pages; 1033099a0e58SBosko Milekic 1034e20a199fSJeff Roberson keg = zone_first_keg(zone); 1035e9a069d8SJohn Baldwin pages = howmany(bytes, PAGE_SIZE); 1036e9a069d8SJohn Baldwin KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1037099a0e58SBosko Milekic 1038009b6fcbSJeff Roberson /* 1039009b6fcbSJeff Roberson * Check our small startup cache to see if it has pages remaining. 1040009b6fcbSJeff Roberson */ 1041f353d338SAlan Cox mtx_lock(&uma_boot_pages_mtx); 1042ac0a6fd0SGleb Smirnoff if (pages <= boot_pages) { 1043ac0a6fd0SGleb Smirnoff mem = bootmem; 1044ac0a6fd0SGleb Smirnoff boot_pages -= pages; 1045ac0a6fd0SGleb Smirnoff bootmem += pages * PAGE_SIZE; 1046f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1047ac0a6fd0SGleb Smirnoff *pflag = UMA_SLAB_BOOT; 1048ac0a6fd0SGleb Smirnoff return (mem); 1049009b6fcbSJeff Roberson } 1050f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1051342f1793SAlan Cox if (booted < UMA_STARTUP2) 10523803b26bSDag-Erling Smørgrav panic("UMA: Increase vm.boot_pages"); 1053009b6fcbSJeff Roberson /* 1054009b6fcbSJeff Roberson * Now that we've booted reset these users to their real allocator. 1055009b6fcbSJeff Roberson */ 1056009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC 1057e9a069d8SJohn Baldwin keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1058009b6fcbSJeff Roberson #else 1059099a0e58SBosko Milekic keg->uk_allocf = page_alloc; 1060009b6fcbSJeff Roberson #endif 1061099a0e58SBosko Milekic return keg->uk_allocf(zone, bytes, pflag, wait); 1062009b6fcbSJeff Roberson } 1063009b6fcbSJeff Roberson 1064009b6fcbSJeff Roberson /* 10658355f576SJeff Roberson * Allocates a number of pages from the system 10668355f576SJeff Roberson * 10678355f576SJeff Roberson * Arguments: 10688355f576SJeff Roberson * bytes The number of bytes requested 10698355f576SJeff Roberson * wait Shall we wait? 10708355f576SJeff Roberson * 10718355f576SJeff Roberson * Returns: 10728355f576SJeff Roberson * A pointer to the alloced memory or possibly 10738355f576SJeff Roberson * NULL if M_NOWAIT is set. 10748355f576SJeff Roberson */ 10758355f576SJeff Roberson static void * 1076f2c2231eSRyan Stone page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 10778355f576SJeff Roberson { 10788355f576SJeff Roberson void *p; /* Returned page */ 10798355f576SJeff Roberson 10808355f576SJeff Roberson *pflag = UMA_SLAB_KMEM; 10815df87b21SJeff Roberson p = (void *) kmem_malloc(kmem_arena, bytes, wait); 10828355f576SJeff Roberson 10838355f576SJeff Roberson return (p); 10848355f576SJeff Roberson } 10858355f576SJeff Roberson 10868355f576SJeff Roberson /* 10878355f576SJeff Roberson * Allocates a number of pages from within an object 10888355f576SJeff Roberson * 10898355f576SJeff Roberson * Arguments: 10908355f576SJeff Roberson * bytes The number of bytes requested 10918355f576SJeff Roberson * wait Shall we wait? 10928355f576SJeff Roberson * 10938355f576SJeff Roberson * Returns: 10948355f576SJeff Roberson * A pointer to the alloced memory or possibly 10958355f576SJeff Roberson * NULL if M_NOWAIT is set. 10968355f576SJeff Roberson */ 10978355f576SJeff Roberson static void * 1098f2c2231eSRyan Stone noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) 10998355f576SJeff Roberson { 1100a4915c21SAttilio Rao TAILQ_HEAD(, vm_page) alloctail; 1101a4915c21SAttilio Rao u_long npages; 1102b245ac95SAlan Cox vm_offset_t retkva, zkva; 1103a4915c21SAttilio Rao vm_page_t p, p_next; 1104e20a199fSJeff Roberson uma_keg_t keg; 11058355f576SJeff Roberson 1106a4915c21SAttilio Rao TAILQ_INIT(&alloctail); 1107e20a199fSJeff Roberson keg = zone_first_keg(zone); 1108a4915c21SAttilio Rao 1109a4915c21SAttilio Rao npages = howmany(bytes, PAGE_SIZE); 1110a4915c21SAttilio Rao while (npages > 0) { 1111a4915c21SAttilio Rao p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 1112a4915c21SAttilio Rao VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); 1113a4915c21SAttilio Rao if (p != NULL) { 1114a4915c21SAttilio Rao /* 1115a4915c21SAttilio Rao * Since the page does not belong to an object, its 1116a4915c21SAttilio Rao * listq is unused. 1117a4915c21SAttilio Rao */ 1118a4915c21SAttilio Rao TAILQ_INSERT_TAIL(&alloctail, p, listq); 1119a4915c21SAttilio Rao npages--; 1120a4915c21SAttilio Rao continue; 1121a4915c21SAttilio Rao } 1122a4915c21SAttilio Rao if (wait & M_WAITOK) { 1123a4915c21SAttilio Rao VM_WAIT; 1124a4915c21SAttilio Rao continue; 1125a4915c21SAttilio Rao } 11268355f576SJeff Roberson 11278355f576SJeff Roberson /* 1128a4915c21SAttilio Rao * Page allocation failed, free intermediate pages and 1129a4915c21SAttilio Rao * exit. 11308355f576SJeff Roberson */ 1131a4915c21SAttilio Rao TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1132087a6132SAlan Cox vm_page_unwire(p, PQ_NONE); 1133b245ac95SAlan Cox vm_page_free(p); 1134b245ac95SAlan Cox } 1135a4915c21SAttilio Rao return (NULL); 1136b245ac95SAlan Cox } 11378355f576SJeff Roberson *flags = UMA_SLAB_PRIV; 1138a4915c21SAttilio Rao zkva = keg->uk_kva + 1139a4915c21SAttilio Rao atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1140a4915c21SAttilio Rao retkva = zkva; 1141a4915c21SAttilio Rao TAILQ_FOREACH(p, &alloctail, listq) { 1142a4915c21SAttilio Rao pmap_qenter(zkva, &p, 1); 1143a4915c21SAttilio Rao zkva += PAGE_SIZE; 1144a4915c21SAttilio Rao } 11458355f576SJeff Roberson 11468355f576SJeff Roberson return ((void *)retkva); 11478355f576SJeff Roberson } 11488355f576SJeff Roberson 11498355f576SJeff Roberson /* 11508355f576SJeff Roberson * Frees a number of pages to the system 11518355f576SJeff Roberson * 11528355f576SJeff Roberson * Arguments: 11538355f576SJeff Roberson * mem A pointer to the memory to be freed 11548355f576SJeff Roberson * size The size of the memory being freed 11558355f576SJeff Roberson * flags The original p->us_flags field 11568355f576SJeff Roberson * 11578355f576SJeff Roberson * Returns: 11588355f576SJeff Roberson * Nothing 11598355f576SJeff Roberson */ 11608355f576SJeff Roberson static void 1161f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags) 11628355f576SJeff Roberson { 11635df87b21SJeff Roberson struct vmem *vmem; 11643370c5bfSJeff Roberson 11658355f576SJeff Roberson if (flags & UMA_SLAB_KMEM) 11665df87b21SJeff Roberson vmem = kmem_arena; 1167aea6e893SAlan Cox else if (flags & UMA_SLAB_KERNEL) 11685df87b21SJeff Roberson vmem = kernel_arena; 11698355f576SJeff Roberson else 1170b5345ef1SJustin Hibbits panic("UMA: page_free used with invalid flags %x", flags); 11718355f576SJeff Roberson 11725df87b21SJeff Roberson kmem_free(vmem, (vm_offset_t)mem, size); 11738355f576SJeff Roberson } 11748355f576SJeff Roberson 11758355f576SJeff Roberson /* 11768355f576SJeff Roberson * Zero fill initializer 11778355f576SJeff Roberson * 11788355f576SJeff Roberson * Arguments/Returns follow uma_init specifications 11798355f576SJeff Roberson */ 1180b23f72e9SBrian Feldman static int 1181b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags) 11828355f576SJeff Roberson { 11838355f576SJeff Roberson bzero(mem, size); 1184b23f72e9SBrian Feldman return (0); 11858355f576SJeff Roberson } 11868355f576SJeff Roberson 11878355f576SJeff Roberson /* 1188e20a199fSJeff Roberson * Finish creating a small uma keg. This calculates ipers, and the keg size. 11898355f576SJeff Roberson * 11908355f576SJeff Roberson * Arguments 1191e20a199fSJeff Roberson * keg The zone we should initialize 11928355f576SJeff Roberson * 11938355f576SJeff Roberson * Returns 11948355f576SJeff Roberson * Nothing 11958355f576SJeff Roberson */ 11968355f576SJeff Roberson static void 1197e20a199fSJeff Roberson keg_small_init(uma_keg_t keg) 11988355f576SJeff Roberson { 1199244f4554SBosko Milekic u_int rsize; 1200244f4554SBosko Milekic u_int memused; 1201244f4554SBosko Milekic u_int wastedspace; 1202244f4554SBosko Milekic u_int shsize; 1203a55ebb7cSAndriy Gapon u_int slabsize; 12048355f576SJeff Roberson 1205ad97af7eSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_PCPU) { 120696c85efbSNathan Whitehorn u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; 1207e28a647dSGleb Smirnoff 1208a55ebb7cSAndriy Gapon slabsize = sizeof(struct pcpu); 1209e28a647dSGleb Smirnoff keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1210ad97af7eSGleb Smirnoff PAGE_SIZE); 1211ad97af7eSGleb Smirnoff } else { 1212a55ebb7cSAndriy Gapon slabsize = UMA_SLAB_SIZE; 1213ad97af7eSGleb Smirnoff keg->uk_ppera = 1; 1214ad97af7eSGleb Smirnoff } 1215ad97af7eSGleb Smirnoff 1216ef72505eSJeff Roberson /* 1217ef72505eSJeff Roberson * Calculate the size of each allocation (rsize) according to 1218ef72505eSJeff Roberson * alignment. If the requested size is smaller than we have 1219ef72505eSJeff Roberson * allocation bits for we round it up. 1220ef72505eSJeff Roberson */ 1221099a0e58SBosko Milekic rsize = keg->uk_size; 1222a55ebb7cSAndriy Gapon if (rsize < slabsize / SLAB_SETSIZE) 1223a55ebb7cSAndriy Gapon rsize = slabsize / SLAB_SETSIZE; 1224099a0e58SBosko Milekic if (rsize & keg->uk_align) 1225099a0e58SBosko Milekic rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1226099a0e58SBosko Milekic keg->uk_rsize = rsize; 1227ad97af7eSGleb Smirnoff 1228ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1229ad97af7eSGleb Smirnoff keg->uk_rsize < sizeof(struct pcpu), 1230ad97af7eSGleb Smirnoff ("%s: size %u too large", __func__, keg->uk_rsize)); 12318355f576SJeff Roberson 1232ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 12332864dbbfSGleb Smirnoff shsize = 0; 1234ef72505eSJeff Roberson else 1235244f4554SBosko Milekic shsize = sizeof(struct uma_slab); 12368355f576SJeff Roberson 1237a55ebb7cSAndriy Gapon keg->uk_ipers = (slabsize - shsize) / rsize; 1238ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1239ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1240ad97af7eSGleb Smirnoff 1241244f4554SBosko Milekic memused = keg->uk_ipers * rsize + shsize; 1242a55ebb7cSAndriy Gapon wastedspace = slabsize - memused; 1243244f4554SBosko Milekic 124420e8e865SBosko Milekic /* 1245244f4554SBosko Milekic * We can't do OFFPAGE if we're internal or if we've been 124620e8e865SBosko Milekic * asked to not go to the VM for buckets. If we do this we 12476fd34d6fSJeff Roberson * may end up going to the VM for slabs which we do not 12486fd34d6fSJeff Roberson * want to do if we're UMA_ZFLAG_CACHEONLY as a result 12496fd34d6fSJeff Roberson * of UMA_ZONE_VM, which clearly forbids it. 125020e8e865SBosko Milekic */ 1251099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1252099a0e58SBosko Milekic (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 12538355f576SJeff Roberson return; 1254244f4554SBosko Milekic 1255ef72505eSJeff Roberson /* 1256ef72505eSJeff Roberson * See if using an OFFPAGE slab will limit our waste. Only do 1257ef72505eSJeff Roberson * this if it permits more items per-slab. 1258ef72505eSJeff Roberson * 1259ef72505eSJeff Roberson * XXX We could try growing slabsize to limit max waste as well. 1260ef72505eSJeff Roberson * Historically this was not done because the VM could not 1261ef72505eSJeff Roberson * efficiently handle contiguous allocations. 1262ef72505eSJeff Roberson */ 1263a55ebb7cSAndriy Gapon if ((wastedspace >= slabsize / UMA_MAX_WASTE) && 1264a55ebb7cSAndriy Gapon (keg->uk_ipers < (slabsize / keg->uk_rsize))) { 1265a55ebb7cSAndriy Gapon keg->uk_ipers = slabsize / keg->uk_rsize; 1266ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1267ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1268*1431a748SGleb Smirnoff CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " 1269*1431a748SGleb Smirnoff "keg: %s(%p), calculated wastedspace = %d, " 1270244f4554SBosko Milekic "maximum wasted space allowed = %d, " 1271244f4554SBosko Milekic "calculated ipers = %d, " 1272*1431a748SGleb Smirnoff "new wasted space = %d\n", keg->uk_name, keg, wastedspace, 1273a55ebb7cSAndriy Gapon slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1274a55ebb7cSAndriy Gapon slabsize - keg->uk_ipers * keg->uk_rsize); 1275099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 12768355f576SJeff Roberson } 1277ad97af7eSGleb Smirnoff 1278ad97af7eSGleb Smirnoff if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1279ad97af7eSGleb Smirnoff (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1280ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_HASH; 12818355f576SJeff Roberson } 12828355f576SJeff Roberson 12838355f576SJeff Roberson /* 1284e20a199fSJeff Roberson * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 12858355f576SJeff Roberson * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 12868355f576SJeff Roberson * more complicated. 12878355f576SJeff Roberson * 12888355f576SJeff Roberson * Arguments 1289e20a199fSJeff Roberson * keg The keg we should initialize 12908355f576SJeff Roberson * 12918355f576SJeff Roberson * Returns 12928355f576SJeff Roberson * Nothing 12938355f576SJeff Roberson */ 12948355f576SJeff Roberson static void 1295e20a199fSJeff Roberson keg_large_init(uma_keg_t keg) 12968355f576SJeff Roberson { 1297cec48e00SAlexander Motin u_int shsize; 12988355f576SJeff Roberson 1299e20a199fSJeff Roberson KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1300099a0e58SBosko Milekic KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1301e20a199fSJeff Roberson ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1302ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1303ad97af7eSGleb Smirnoff ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 130420e8e865SBosko Milekic 1305ad97af7eSGleb Smirnoff keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1306099a0e58SBosko Milekic keg->uk_ipers = 1; 1307e9a069d8SJohn Baldwin keg->uk_rsize = keg->uk_size; 1308e9a069d8SJohn Baldwin 1309e9a069d8SJohn Baldwin /* We can't do OFFPAGE if we're internal, bail out here. */ 1310e9a069d8SJohn Baldwin if (keg->uk_flags & UMA_ZFLAG_INTERNAL) 1311e9a069d8SJohn Baldwin return; 13128355f576SJeff Roberson 1313cec48e00SAlexander Motin /* Check whether we have enough space to not do OFFPAGE. */ 1314cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1315cec48e00SAlexander Motin shsize = sizeof(struct uma_slab); 1316cec48e00SAlexander Motin if (shsize & UMA_ALIGN_PTR) 1317cec48e00SAlexander Motin shsize = (shsize & ~UMA_ALIGN_PTR) + 1318cec48e00SAlexander Motin (UMA_ALIGN_PTR + 1); 1319cec48e00SAlexander Motin 1320cec48e00SAlexander Motin if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize) 1321099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 1322cec48e00SAlexander Motin } 1323cec48e00SAlexander Motin 1324cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1325cec48e00SAlexander Motin (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1326099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_HASH; 13278355f576SJeff Roberson } 13288355f576SJeff Roberson 1329e20a199fSJeff Roberson static void 1330e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg) 1331e20a199fSJeff Roberson { 1332e20a199fSJeff Roberson int alignsize; 1333e20a199fSJeff Roberson int trailer; 1334e20a199fSJeff Roberson int pages; 1335e20a199fSJeff Roberson int rsize; 1336e20a199fSJeff Roberson 1337ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1338ad97af7eSGleb Smirnoff ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1339ad97af7eSGleb Smirnoff 1340e20a199fSJeff Roberson alignsize = keg->uk_align + 1; 1341e20a199fSJeff Roberson rsize = keg->uk_size; 1342e20a199fSJeff Roberson /* 1343e20a199fSJeff Roberson * We want one item to start on every align boundary in a page. To 1344e20a199fSJeff Roberson * do this we will span pages. We will also extend the item by the 1345e20a199fSJeff Roberson * size of align if it is an even multiple of align. Otherwise, it 1346e20a199fSJeff Roberson * would fall on the same boundary every time. 1347e20a199fSJeff Roberson */ 1348e20a199fSJeff Roberson if (rsize & keg->uk_align) 1349e20a199fSJeff Roberson rsize = (rsize & ~keg->uk_align) + alignsize; 1350e20a199fSJeff Roberson if ((rsize & alignsize) == 0) 1351e20a199fSJeff Roberson rsize += alignsize; 1352e20a199fSJeff Roberson trailer = rsize - keg->uk_size; 1353e20a199fSJeff Roberson pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1354e20a199fSJeff Roberson pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1355e20a199fSJeff Roberson keg->uk_rsize = rsize; 1356e20a199fSJeff Roberson keg->uk_ppera = pages; 1357e20a199fSJeff Roberson keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1358e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 13592367b4ddSDimitry Andric KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 136042321809SGleb Smirnoff ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1361e20a199fSJeff Roberson keg->uk_ipers)); 1362e20a199fSJeff Roberson } 1363e20a199fSJeff Roberson 13648355f576SJeff Roberson /* 1365099a0e58SBosko Milekic * Keg header ctor. This initializes all fields, locks, etc. And inserts 1366099a0e58SBosko Milekic * the keg onto the global keg list. 13678355f576SJeff Roberson * 13688355f576SJeff Roberson * Arguments/Returns follow uma_ctor specifications 1369099a0e58SBosko Milekic * udata Actually uma_kctor_args 1370099a0e58SBosko Milekic */ 1371b23f72e9SBrian Feldman static int 1372b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags) 1373099a0e58SBosko Milekic { 1374099a0e58SBosko Milekic struct uma_kctor_args *arg = udata; 1375099a0e58SBosko Milekic uma_keg_t keg = mem; 1376099a0e58SBosko Milekic uma_zone_t zone; 1377099a0e58SBosko Milekic 1378099a0e58SBosko Milekic bzero(keg, size); 1379099a0e58SBosko Milekic keg->uk_size = arg->size; 1380099a0e58SBosko Milekic keg->uk_init = arg->uminit; 1381099a0e58SBosko Milekic keg->uk_fini = arg->fini; 1382099a0e58SBosko Milekic keg->uk_align = arg->align; 1383099a0e58SBosko Milekic keg->uk_free = 0; 13846fd34d6fSJeff Roberson keg->uk_reserve = 0; 1385099a0e58SBosko Milekic keg->uk_pages = 0; 1386099a0e58SBosko Milekic keg->uk_flags = arg->flags; 1387099a0e58SBosko Milekic keg->uk_allocf = page_alloc; 1388099a0e58SBosko Milekic keg->uk_freef = page_free; 1389099a0e58SBosko Milekic keg->uk_slabzone = NULL; 1390099a0e58SBosko Milekic 1391099a0e58SBosko Milekic /* 1392099a0e58SBosko Milekic * The master zone is passed to us at keg-creation time. 1393099a0e58SBosko Milekic */ 1394099a0e58SBosko Milekic zone = arg->zone; 1395e20a199fSJeff Roberson keg->uk_name = zone->uz_name; 1396099a0e58SBosko Milekic 1397099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_VM) 1398099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1399099a0e58SBosko Milekic 1400099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_ZINIT) 1401099a0e58SBosko Milekic keg->uk_init = zero_init; 1402099a0e58SBosko Milekic 1403cfcae3f8SGleb Smirnoff if (arg->flags & UMA_ZONE_MALLOC) 1404e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_VTOSLAB; 1405e20a199fSJeff Roberson 1406ad97af7eSGleb Smirnoff if (arg->flags & UMA_ZONE_PCPU) 1407ad97af7eSGleb Smirnoff #ifdef SMP 1408ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_OFFPAGE; 1409ad97af7eSGleb Smirnoff #else 1410ad97af7eSGleb Smirnoff keg->uk_flags &= ~UMA_ZONE_PCPU; 1411ad97af7eSGleb Smirnoff #endif 1412ad97af7eSGleb Smirnoff 1413ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1414e20a199fSJeff Roberson keg_cachespread_init(keg); 1415244f4554SBosko Milekic } else { 1416ef72505eSJeff Roberson if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1417e20a199fSJeff Roberson keg_large_init(keg); 1418244f4554SBosko Milekic else 1419e20a199fSJeff Roberson keg_small_init(keg); 1420244f4554SBosko Milekic } 1421099a0e58SBosko Milekic 1422cfcae3f8SGleb Smirnoff if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1423099a0e58SBosko Milekic keg->uk_slabzone = slabzone; 1424099a0e58SBosko Milekic 1425099a0e58SBosko Milekic /* 1426099a0e58SBosko Milekic * If we haven't booted yet we need allocations to go through the 1427099a0e58SBosko Milekic * startup cache until the vm is ready. 1428099a0e58SBosko Milekic */ 1429099a0e58SBosko Milekic if (keg->uk_ppera == 1) { 1430099a0e58SBosko Milekic #ifdef UMA_MD_SMALL_ALLOC 1431099a0e58SBosko Milekic keg->uk_allocf = uma_small_alloc; 1432099a0e58SBosko Milekic keg->uk_freef = uma_small_free; 14338cd02d00SAlan Cox 1434342f1793SAlan Cox if (booted < UMA_STARTUP) 1435099a0e58SBosko Milekic keg->uk_allocf = startup_alloc; 14368cd02d00SAlan Cox #else 14378cd02d00SAlan Cox if (booted < UMA_STARTUP2) 14388cd02d00SAlan Cox keg->uk_allocf = startup_alloc; 14398cd02d00SAlan Cox #endif 1440342f1793SAlan Cox } else if (booted < UMA_STARTUP2 && 1441342f1793SAlan Cox (keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1442e9a069d8SJohn Baldwin keg->uk_allocf = startup_alloc; 1443099a0e58SBosko Milekic 1444099a0e58SBosko Milekic /* 1445af526374SJeff Roberson * Initialize keg's lock 1446099a0e58SBosko Milekic */ 1447af526374SJeff Roberson KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1448099a0e58SBosko Milekic 1449099a0e58SBosko Milekic /* 1450099a0e58SBosko Milekic * If we're putting the slab header in the actual page we need to 1451099a0e58SBosko Milekic * figure out where in each page it goes. This calculates a right 1452099a0e58SBosko Milekic * justified offset into the memory on an ALIGN_PTR boundary. 1453099a0e58SBosko Milekic */ 1454099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1455244f4554SBosko Milekic u_int totsize; 1456099a0e58SBosko Milekic 1457099a0e58SBosko Milekic /* Size of the slab struct and free list */ 1458ef72505eSJeff Roberson totsize = sizeof(struct uma_slab); 1459ef72505eSJeff Roberson 1460099a0e58SBosko Milekic if (totsize & UMA_ALIGN_PTR) 1461099a0e58SBosko Milekic totsize = (totsize & ~UMA_ALIGN_PTR) + 1462099a0e58SBosko Milekic (UMA_ALIGN_PTR + 1); 1463ad97af7eSGleb Smirnoff keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1464244f4554SBosko Milekic 1465244f4554SBosko Milekic /* 1466244f4554SBosko Milekic * The only way the following is possible is if with our 1467244f4554SBosko Milekic * UMA_ALIGN_PTR adjustments we are now bigger than 1468244f4554SBosko Milekic * UMA_SLAB_SIZE. I haven't checked whether this is 1469244f4554SBosko Milekic * mathematically possible for all cases, so we make 1470244f4554SBosko Milekic * sure here anyway. 1471244f4554SBosko Milekic */ 1472ef72505eSJeff Roberson totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1473ad97af7eSGleb Smirnoff if (totsize > PAGE_SIZE * keg->uk_ppera) { 1474099a0e58SBosko Milekic printf("zone %s ipers %d rsize %d size %d\n", 1475099a0e58SBosko Milekic zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1476099a0e58SBosko Milekic keg->uk_size); 1477aea6e893SAlan Cox panic("UMA slab won't fit."); 1478099a0e58SBosko Milekic } 1479099a0e58SBosko Milekic } 1480099a0e58SBosko Milekic 1481099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1482099a0e58SBosko Milekic hash_alloc(&keg->uk_hash); 1483099a0e58SBosko Milekic 1484*1431a748SGleb Smirnoff CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", 1485*1431a748SGleb Smirnoff keg, zone->uz_name, zone, 148657223e99SAndriy Gapon (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 148757223e99SAndriy Gapon keg->uk_free); 1488099a0e58SBosko Milekic 1489099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1490099a0e58SBosko Milekic 1491111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1492099a0e58SBosko Milekic LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1493111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1494b23f72e9SBrian Feldman return (0); 1495099a0e58SBosko Milekic } 1496099a0e58SBosko Milekic 1497099a0e58SBosko Milekic /* 1498099a0e58SBosko Milekic * Zone header ctor. This initializes all fields, locks, etc. 1499099a0e58SBosko Milekic * 1500099a0e58SBosko Milekic * Arguments/Returns follow uma_ctor specifications 1501099a0e58SBosko Milekic * udata Actually uma_zctor_args 15028355f576SJeff Roberson */ 1503b23f72e9SBrian Feldman static int 1504b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags) 15058355f576SJeff Roberson { 15068355f576SJeff Roberson struct uma_zctor_args *arg = udata; 15078355f576SJeff Roberson uma_zone_t zone = mem; 1508099a0e58SBosko Milekic uma_zone_t z; 1509099a0e58SBosko Milekic uma_keg_t keg; 15108355f576SJeff Roberson 15118355f576SJeff Roberson bzero(zone, size); 15128355f576SJeff Roberson zone->uz_name = arg->name; 15138355f576SJeff Roberson zone->uz_ctor = arg->ctor; 15148355f576SJeff Roberson zone->uz_dtor = arg->dtor; 1515e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab; 1516099a0e58SBosko Milekic zone->uz_init = NULL; 1517099a0e58SBosko Milekic zone->uz_fini = NULL; 1518099a0e58SBosko Milekic zone->uz_allocs = 0; 1519773df9abSRobert Watson zone->uz_frees = 0; 15202019094aSRobert Watson zone->uz_fails = 0; 1521bf965959SSean Bruno zone->uz_sleeps = 0; 1522fc03d22bSJeff Roberson zone->uz_count = 0; 1523ace66b56SAlexander Motin zone->uz_count_min = 0; 1524e20a199fSJeff Roberson zone->uz_flags = 0; 15252f891cd5SPawel Jakub Dawidek zone->uz_warning = NULL; 15262f891cd5SPawel Jakub Dawidek timevalclear(&zone->uz_ratecheck); 1527e20a199fSJeff Roberson keg = arg->keg; 1528099a0e58SBosko Milekic 1529af526374SJeff Roberson ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1530af526374SJeff Roberson 15310095a784SJeff Roberson /* 15320095a784SJeff Roberson * This is a pure cache zone, no kegs. 15330095a784SJeff Roberson */ 15340095a784SJeff Roberson if (arg->import) { 15356fd34d6fSJeff Roberson if (arg->flags & UMA_ZONE_VM) 15366fd34d6fSJeff Roberson arg->flags |= UMA_ZFLAG_CACHEONLY; 15376fd34d6fSJeff Roberson zone->uz_flags = arg->flags; 1538af526374SJeff Roberson zone->uz_size = arg->size; 15390095a784SJeff Roberson zone->uz_import = arg->import; 15400095a784SJeff Roberson zone->uz_release = arg->release; 15410095a784SJeff Roberson zone->uz_arg = arg->arg; 1542af526374SJeff Roberson zone->uz_lockptr = &zone->uz_lock; 1543111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 154403175483SAlexander Motin LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1545111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1546af526374SJeff Roberson goto out; 15470095a784SJeff Roberson } 15480095a784SJeff Roberson 15490095a784SJeff Roberson /* 15500095a784SJeff Roberson * Use the regular zone/keg/slab allocator. 15510095a784SJeff Roberson */ 15520095a784SJeff Roberson zone->uz_import = (uma_import)zone_import; 15530095a784SJeff Roberson zone->uz_release = (uma_release)zone_release; 15540095a784SJeff Roberson zone->uz_arg = zone; 15550095a784SJeff Roberson 1556099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_SECONDARY) { 1557099a0e58SBosko Milekic KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 15588355f576SJeff Roberson zone->uz_init = arg->uminit; 1559e221e841SJeff Roberson zone->uz_fini = arg->fini; 1560af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1561e20a199fSJeff Roberson zone->uz_flags |= UMA_ZONE_SECONDARY; 1562111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1563099a0e58SBosko Milekic ZONE_LOCK(zone); 1564099a0e58SBosko Milekic LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1565099a0e58SBosko Milekic if (LIST_NEXT(z, uz_link) == NULL) { 1566099a0e58SBosko Milekic LIST_INSERT_AFTER(z, zone, uz_link); 1567099a0e58SBosko Milekic break; 1568099a0e58SBosko Milekic } 1569099a0e58SBosko Milekic } 1570099a0e58SBosko Milekic ZONE_UNLOCK(zone); 1571111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1572e20a199fSJeff Roberson } else if (keg == NULL) { 1573e20a199fSJeff Roberson if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1574e20a199fSJeff Roberson arg->align, arg->flags)) == NULL) 1575b23f72e9SBrian Feldman return (ENOMEM); 1576099a0e58SBosko Milekic } else { 1577099a0e58SBosko Milekic struct uma_kctor_args karg; 1578b23f72e9SBrian Feldman int error; 1579099a0e58SBosko Milekic 1580099a0e58SBosko Milekic /* We should only be here from uma_startup() */ 1581099a0e58SBosko Milekic karg.size = arg->size; 1582099a0e58SBosko Milekic karg.uminit = arg->uminit; 1583099a0e58SBosko Milekic karg.fini = arg->fini; 1584099a0e58SBosko Milekic karg.align = arg->align; 1585099a0e58SBosko Milekic karg.flags = arg->flags; 1586099a0e58SBosko Milekic karg.zone = zone; 1587b23f72e9SBrian Feldman error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1588b23f72e9SBrian Feldman flags); 1589b23f72e9SBrian Feldman if (error) 1590b23f72e9SBrian Feldman return (error); 1591099a0e58SBosko Milekic } 15920095a784SJeff Roberson 1593e20a199fSJeff Roberson /* 1594e20a199fSJeff Roberson * Link in the first keg. 1595e20a199fSJeff Roberson */ 1596e20a199fSJeff Roberson zone->uz_klink.kl_keg = keg; 1597e20a199fSJeff Roberson LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1598af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1599e20a199fSJeff Roberson zone->uz_size = keg->uk_size; 1600e20a199fSJeff Roberson zone->uz_flags |= (keg->uk_flags & 1601e20a199fSJeff Roberson (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 16028355f576SJeff Roberson 16038355f576SJeff Roberson /* 16048355f576SJeff Roberson * Some internal zones don't have room allocated for the per cpu 16058355f576SJeff Roberson * caches. If we're internal, bail out here. 16068355f576SJeff Roberson */ 1607099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1608e20a199fSJeff Roberson KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1609099a0e58SBosko Milekic ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1610b23f72e9SBrian Feldman return (0); 1611099a0e58SBosko Milekic } 16128355f576SJeff Roberson 1613af526374SJeff Roberson out: 1614af526374SJeff Roberson if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1615af526374SJeff Roberson zone->uz_count = bucket_select(zone->uz_size); 16168355f576SJeff Roberson else 1617cae33c14SJeff Roberson zone->uz_count = BUCKET_MAX; 1618ace66b56SAlexander Motin zone->uz_count_min = zone->uz_count; 1619fc03d22bSJeff Roberson 1620b23f72e9SBrian Feldman return (0); 16218355f576SJeff Roberson } 16228355f576SJeff Roberson 16238355f576SJeff Roberson /* 1624099a0e58SBosko Milekic * Keg header dtor. This frees all data, destroys locks, frees the hash 1625099a0e58SBosko Milekic * table and removes the keg from the global list. 16269c2cd7e5SJeff Roberson * 16279c2cd7e5SJeff Roberson * Arguments/Returns follow uma_dtor specifications 16289c2cd7e5SJeff Roberson * udata unused 16299c2cd7e5SJeff Roberson */ 1630099a0e58SBosko Milekic static void 1631099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata) 1632099a0e58SBosko Milekic { 1633099a0e58SBosko Milekic uma_keg_t keg; 16349c2cd7e5SJeff Roberson 1635099a0e58SBosko Milekic keg = (uma_keg_t)arg; 1636e20a199fSJeff Roberson KEG_LOCK(keg); 1637099a0e58SBosko Milekic if (keg->uk_free != 0) { 1638a3845534SCraig Rodrigues printf("Freed UMA keg (%s) was not empty (%d items). " 1639099a0e58SBosko Milekic " Lost %d pages of memory.\n", 1640a3845534SCraig Rodrigues keg->uk_name ? keg->uk_name : "", 1641099a0e58SBosko Milekic keg->uk_free, keg->uk_pages); 1642099a0e58SBosko Milekic } 1643e20a199fSJeff Roberson KEG_UNLOCK(keg); 1644099a0e58SBosko Milekic 1645099a0e58SBosko Milekic hash_free(&keg->uk_hash); 1646099a0e58SBosko Milekic 1647e20a199fSJeff Roberson KEG_LOCK_FINI(keg); 1648099a0e58SBosko Milekic } 1649099a0e58SBosko Milekic 1650099a0e58SBosko Milekic /* 1651099a0e58SBosko Milekic * Zone header dtor. 1652099a0e58SBosko Milekic * 1653099a0e58SBosko Milekic * Arguments/Returns follow uma_dtor specifications 1654099a0e58SBosko Milekic * udata unused 1655099a0e58SBosko Milekic */ 16569c2cd7e5SJeff Roberson static void 16579c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata) 16589c2cd7e5SJeff Roberson { 1659e20a199fSJeff Roberson uma_klink_t klink; 16609c2cd7e5SJeff Roberson uma_zone_t zone; 1661099a0e58SBosko Milekic uma_keg_t keg; 16629c2cd7e5SJeff Roberson 16639c2cd7e5SJeff Roberson zone = (uma_zone_t)arg; 1664e20a199fSJeff Roberson keg = zone_first_keg(zone); 16659643769aSJeff Roberson 1666e20a199fSJeff Roberson if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 16679643769aSJeff Roberson cache_drain(zone); 1668099a0e58SBosko Milekic 1669111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1670099a0e58SBosko Milekic LIST_REMOVE(zone, uz_link); 1671111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1672099a0e58SBosko Milekic /* 1673099a0e58SBosko Milekic * XXX there are some races here where 1674099a0e58SBosko Milekic * the zone can be drained but zone lock 1675099a0e58SBosko Milekic * released and then refilled before we 1676099a0e58SBosko Milekic * remove it... we dont care for now 1677099a0e58SBosko Milekic */ 1678e20a199fSJeff Roberson zone_drain_wait(zone, M_WAITOK); 1679e20a199fSJeff Roberson /* 1680e20a199fSJeff Roberson * Unlink all of our kegs. 1681e20a199fSJeff Roberson */ 1682e20a199fSJeff Roberson while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1683e20a199fSJeff Roberson klink->kl_keg = NULL; 1684e20a199fSJeff Roberson LIST_REMOVE(klink, kl_link); 1685e20a199fSJeff Roberson if (klink == &zone->uz_klink) 1686e20a199fSJeff Roberson continue; 1687e20a199fSJeff Roberson free(klink, M_TEMP); 1688e20a199fSJeff Roberson } 1689e20a199fSJeff Roberson /* 1690e20a199fSJeff Roberson * We only destroy kegs from non secondary zones. 1691e20a199fSJeff Roberson */ 16920095a784SJeff Roberson if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1693111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1694099a0e58SBosko Milekic LIST_REMOVE(keg, uk_link); 1695111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 16960095a784SJeff Roberson zone_free_item(kegs, keg, NULL, SKIP_NONE); 16979c2cd7e5SJeff Roberson } 1698af526374SJeff Roberson ZONE_LOCK_FINI(zone); 1699099a0e58SBosko Milekic } 1700099a0e58SBosko Milekic 17019c2cd7e5SJeff Roberson /* 17028355f576SJeff Roberson * Traverses every zone in the system and calls a callback 17038355f576SJeff Roberson * 17048355f576SJeff Roberson * Arguments: 17058355f576SJeff Roberson * zfunc A pointer to a function which accepts a zone 17068355f576SJeff Roberson * as an argument. 17078355f576SJeff Roberson * 17088355f576SJeff Roberson * Returns: 17098355f576SJeff Roberson * Nothing 17108355f576SJeff Roberson */ 17118355f576SJeff Roberson static void 17128355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t)) 17138355f576SJeff Roberson { 1714099a0e58SBosko Milekic uma_keg_t keg; 17158355f576SJeff Roberson uma_zone_t zone; 17168355f576SJeff Roberson 1717111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 1718099a0e58SBosko Milekic LIST_FOREACH(keg, &uma_kegs, uk_link) { 1719099a0e58SBosko Milekic LIST_FOREACH(zone, &keg->uk_zones, uz_link) 17208355f576SJeff Roberson zfunc(zone); 1721099a0e58SBosko Milekic } 1722111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 17238355f576SJeff Roberson } 17248355f576SJeff Roberson 17258355f576SJeff Roberson /* Public functions */ 17268355f576SJeff Roberson /* See uma.h */ 17278355f576SJeff Roberson void 1728ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages) 17298355f576SJeff Roberson { 17308355f576SJeff Roberson struct uma_zctor_args args; 17318355f576SJeff Roberson 1732111fbcd5SBryan Venteicher rw_init(&uma_rwlock, "UMA lock"); 1733099a0e58SBosko Milekic 1734099a0e58SBosko Milekic /* "manually" create the initial zone */ 17350095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1736099a0e58SBosko Milekic args.name = "UMA Kegs"; 1737099a0e58SBosko Milekic args.size = sizeof(struct uma_keg); 1738099a0e58SBosko Milekic args.ctor = keg_ctor; 1739099a0e58SBosko Milekic args.dtor = keg_dtor; 17408355f576SJeff Roberson args.uminit = zero_init; 17418355f576SJeff Roberson args.fini = NULL; 1742099a0e58SBosko Milekic args.keg = &masterkeg; 17438355f576SJeff Roberson args.align = 32 - 1; 1744b60f5b79SJeff Roberson args.flags = UMA_ZFLAG_INTERNAL; 17458355f576SJeff Roberson /* The initial zone has no Per cpu queues so it's smaller */ 1746b23f72e9SBrian Feldman zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 17478355f576SJeff Roberson 1748f353d338SAlan Cox mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1749ac0a6fd0SGleb Smirnoff bootmem = mem; 1750ac0a6fd0SGleb Smirnoff boot_pages = npages; 17518355f576SJeff Roberson 1752099a0e58SBosko Milekic args.name = "UMA Zones"; 1753099a0e58SBosko Milekic args.size = sizeof(struct uma_zone) + 175451cfb0beSDmitry Chagin (sizeof(struct uma_cache) * (mp_maxid + 1)); 1755099a0e58SBosko Milekic args.ctor = zone_ctor; 1756099a0e58SBosko Milekic args.dtor = zone_dtor; 1757099a0e58SBosko Milekic args.uminit = zero_init; 1758099a0e58SBosko Milekic args.fini = NULL; 1759099a0e58SBosko Milekic args.keg = NULL; 1760099a0e58SBosko Milekic args.align = 32 - 1; 1761099a0e58SBosko Milekic args.flags = UMA_ZFLAG_INTERNAL; 1762099a0e58SBosko Milekic /* The initial zone has no Per cpu queues so it's smaller */ 1763b23f72e9SBrian Feldman zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1764099a0e58SBosko Milekic 17658355f576SJeff Roberson /* Now make a zone for slab headers */ 17668355f576SJeff Roberson slabzone = uma_zcreate("UMA Slabs", 1767ef72505eSJeff Roberson sizeof(struct uma_slab), 17688355f576SJeff Roberson NULL, NULL, NULL, NULL, 1769b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 17708355f576SJeff Roberson 17718355f576SJeff Roberson hashzone = uma_zcreate("UMA Hash", 17728355f576SJeff Roberson sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 17738355f576SJeff Roberson NULL, NULL, NULL, NULL, 1774b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 17758355f576SJeff Roberson 1776cae33c14SJeff Roberson bucket_init(); 17778355f576SJeff Roberson 1778342f1793SAlan Cox booted = UMA_STARTUP; 17798355f576SJeff Roberson } 17808355f576SJeff Roberson 17818355f576SJeff Roberson /* see uma.h */ 17828355f576SJeff Roberson void 178399571dc3SJeff Roberson uma_startup2(void) 17848355f576SJeff Roberson { 1785342f1793SAlan Cox booted = UMA_STARTUP2; 178686bbae32SJeff Roberson bucket_enable(); 178795c4bf75SKonstantin Belousov sx_init(&uma_drain_lock, "umadrain"); 17888355f576SJeff Roberson } 17898355f576SJeff Roberson 17908355f576SJeff Roberson /* 17918355f576SJeff Roberson * Initialize our callout handle 17928355f576SJeff Roberson * 17938355f576SJeff Roberson */ 17948355f576SJeff Roberson 17958355f576SJeff Roberson static void 17968355f576SJeff Roberson uma_startup3(void) 17978355f576SJeff Roberson { 1798*1431a748SGleb Smirnoff 1799fd90e2edSJung-uk Kim callout_init(&uma_callout, 1); 18009643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 18018355f576SJeff Roberson } 18028355f576SJeff Roberson 1803e20a199fSJeff Roberson static uma_keg_t 1804099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 180585dcf349SGleb Smirnoff int align, uint32_t flags) 1806099a0e58SBosko Milekic { 1807099a0e58SBosko Milekic struct uma_kctor_args args; 1808099a0e58SBosko Milekic 1809099a0e58SBosko Milekic args.size = size; 1810099a0e58SBosko Milekic args.uminit = uminit; 1811099a0e58SBosko Milekic args.fini = fini; 18121e319f6dSRobert Watson args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1813099a0e58SBosko Milekic args.flags = flags; 1814099a0e58SBosko Milekic args.zone = zone; 1815e20a199fSJeff Roberson return (zone_alloc_item(kegs, &args, M_WAITOK)); 1816099a0e58SBosko Milekic } 1817099a0e58SBosko Milekic 18188355f576SJeff Roberson /* See uma.h */ 18191e319f6dSRobert Watson void 18201e319f6dSRobert Watson uma_set_align(int align) 18211e319f6dSRobert Watson { 18221e319f6dSRobert Watson 18231e319f6dSRobert Watson if (align != UMA_ALIGN_CACHE) 18241e319f6dSRobert Watson uma_align_cache = align; 18251e319f6dSRobert Watson } 18261e319f6dSRobert Watson 18271e319f6dSRobert Watson /* See uma.h */ 18288355f576SJeff Roberson uma_zone_t 1829bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 183085dcf349SGleb Smirnoff uma_init uminit, uma_fini fini, int align, uint32_t flags) 18318355f576SJeff Roberson 18328355f576SJeff Roberson { 18338355f576SJeff Roberson struct uma_zctor_args args; 183495c4bf75SKonstantin Belousov uma_zone_t res; 183595c4bf75SKonstantin Belousov bool locked; 18368355f576SJeff Roberson 1837a5a35578SJohn Baldwin KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 1838a5a35578SJohn Baldwin align, name)); 1839a5a35578SJohn Baldwin 18408355f576SJeff Roberson /* This stuff is essential for the zone ctor */ 18410095a784SJeff Roberson memset(&args, 0, sizeof(args)); 18428355f576SJeff Roberson args.name = name; 18438355f576SJeff Roberson args.size = size; 18448355f576SJeff Roberson args.ctor = ctor; 18458355f576SJeff Roberson args.dtor = dtor; 18468355f576SJeff Roberson args.uminit = uminit; 18478355f576SJeff Roberson args.fini = fini; 1848afc6dc36SJohn-Mark Gurney #ifdef INVARIANTS 1849afc6dc36SJohn-Mark Gurney /* 1850afc6dc36SJohn-Mark Gurney * If a zone is being created with an empty constructor and 1851afc6dc36SJohn-Mark Gurney * destructor, pass UMA constructor/destructor which checks for 1852afc6dc36SJohn-Mark Gurney * memory use after free. 1853afc6dc36SJohn-Mark Gurney */ 185419c591bfSMateusz Guzik if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && 185519c591bfSMateusz Guzik ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { 1856afc6dc36SJohn-Mark Gurney args.ctor = trash_ctor; 1857afc6dc36SJohn-Mark Gurney args.dtor = trash_dtor; 1858afc6dc36SJohn-Mark Gurney args.uminit = trash_init; 1859afc6dc36SJohn-Mark Gurney args.fini = trash_fini; 1860afc6dc36SJohn-Mark Gurney } 1861afc6dc36SJohn-Mark Gurney #endif 18628355f576SJeff Roberson args.align = align; 18638355f576SJeff Roberson args.flags = flags; 1864099a0e58SBosko Milekic args.keg = NULL; 1865099a0e58SBosko Milekic 186695c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 186795c4bf75SKonstantin Belousov locked = false; 186895c4bf75SKonstantin Belousov } else { 186995c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 187095c4bf75SKonstantin Belousov locked = true; 187195c4bf75SKonstantin Belousov } 187295c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 187395c4bf75SKonstantin Belousov if (locked) 187495c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 187595c4bf75SKonstantin Belousov return (res); 1876099a0e58SBosko Milekic } 1877099a0e58SBosko Milekic 1878099a0e58SBosko Milekic /* See uma.h */ 1879099a0e58SBosko Milekic uma_zone_t 1880099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1881099a0e58SBosko Milekic uma_init zinit, uma_fini zfini, uma_zone_t master) 1882099a0e58SBosko Milekic { 1883099a0e58SBosko Milekic struct uma_zctor_args args; 1884e20a199fSJeff Roberson uma_keg_t keg; 188595c4bf75SKonstantin Belousov uma_zone_t res; 188695c4bf75SKonstantin Belousov bool locked; 1887099a0e58SBosko Milekic 1888e20a199fSJeff Roberson keg = zone_first_keg(master); 18890095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1890099a0e58SBosko Milekic args.name = name; 1891e20a199fSJeff Roberson args.size = keg->uk_size; 1892099a0e58SBosko Milekic args.ctor = ctor; 1893099a0e58SBosko Milekic args.dtor = dtor; 1894099a0e58SBosko Milekic args.uminit = zinit; 1895099a0e58SBosko Milekic args.fini = zfini; 1896e20a199fSJeff Roberson args.align = keg->uk_align; 1897e20a199fSJeff Roberson args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1898e20a199fSJeff Roberson args.keg = keg; 18998355f576SJeff Roberson 190095c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 190195c4bf75SKonstantin Belousov locked = false; 190295c4bf75SKonstantin Belousov } else { 190395c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 190495c4bf75SKonstantin Belousov locked = true; 190595c4bf75SKonstantin Belousov } 1906e20a199fSJeff Roberson /* XXX Attaches only one keg of potentially many. */ 190795c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 190895c4bf75SKonstantin Belousov if (locked) 190995c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 191095c4bf75SKonstantin Belousov return (res); 19118355f576SJeff Roberson } 19128355f576SJeff Roberson 19130095a784SJeff Roberson /* See uma.h */ 19140095a784SJeff Roberson uma_zone_t 1915af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1916af526374SJeff Roberson uma_init zinit, uma_fini zfini, uma_import zimport, 1917af526374SJeff Roberson uma_release zrelease, void *arg, int flags) 19180095a784SJeff Roberson { 19190095a784SJeff Roberson struct uma_zctor_args args; 19200095a784SJeff Roberson 19210095a784SJeff Roberson memset(&args, 0, sizeof(args)); 19220095a784SJeff Roberson args.name = name; 1923af526374SJeff Roberson args.size = size; 19240095a784SJeff Roberson args.ctor = ctor; 19250095a784SJeff Roberson args.dtor = dtor; 19260095a784SJeff Roberson args.uminit = zinit; 19270095a784SJeff Roberson args.fini = zfini; 19280095a784SJeff Roberson args.import = zimport; 19290095a784SJeff Roberson args.release = zrelease; 19300095a784SJeff Roberson args.arg = arg; 19310095a784SJeff Roberson args.align = 0; 19320095a784SJeff Roberson args.flags = flags; 19330095a784SJeff Roberson 19340095a784SJeff Roberson return (zone_alloc_item(zones, &args, M_WAITOK)); 19350095a784SJeff Roberson } 19360095a784SJeff Roberson 1937e20a199fSJeff Roberson static void 1938e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b) 1939e20a199fSJeff Roberson { 1940e20a199fSJeff Roberson if (a < b) { 1941e20a199fSJeff Roberson ZONE_LOCK(a); 1942af526374SJeff Roberson mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 1943e20a199fSJeff Roberson } else { 1944e20a199fSJeff Roberson ZONE_LOCK(b); 1945af526374SJeff Roberson mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 1946e20a199fSJeff Roberson } 1947e20a199fSJeff Roberson } 1948e20a199fSJeff Roberson 1949e20a199fSJeff Roberson static void 1950e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1951e20a199fSJeff Roberson { 1952e20a199fSJeff Roberson 1953e20a199fSJeff Roberson ZONE_UNLOCK(a); 1954e20a199fSJeff Roberson ZONE_UNLOCK(b); 1955e20a199fSJeff Roberson } 1956e20a199fSJeff Roberson 1957e20a199fSJeff Roberson int 1958e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1959e20a199fSJeff Roberson { 1960e20a199fSJeff Roberson uma_klink_t klink; 1961e20a199fSJeff Roberson uma_klink_t kl; 1962e20a199fSJeff Roberson int error; 1963e20a199fSJeff Roberson 1964e20a199fSJeff Roberson error = 0; 1965e20a199fSJeff Roberson klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1966e20a199fSJeff Roberson 1967e20a199fSJeff Roberson zone_lock_pair(zone, master); 1968e20a199fSJeff Roberson /* 1969e20a199fSJeff Roberson * zone must use vtoslab() to resolve objects and must already be 1970e20a199fSJeff Roberson * a secondary. 1971e20a199fSJeff Roberson */ 1972e20a199fSJeff Roberson if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1973e20a199fSJeff Roberson != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1974e20a199fSJeff Roberson error = EINVAL; 1975e20a199fSJeff Roberson goto out; 1976e20a199fSJeff Roberson } 1977e20a199fSJeff Roberson /* 1978e20a199fSJeff Roberson * The new master must also use vtoslab(). 1979e20a199fSJeff Roberson */ 1980e20a199fSJeff Roberson if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 1981e20a199fSJeff Roberson error = EINVAL; 1982e20a199fSJeff Roberson goto out; 1983e20a199fSJeff Roberson } 1984cfcae3f8SGleb Smirnoff 1985e20a199fSJeff Roberson /* 1986e20a199fSJeff Roberson * The underlying object must be the same size. rsize 1987e20a199fSJeff Roberson * may be different. 1988e20a199fSJeff Roberson */ 1989e20a199fSJeff Roberson if (master->uz_size != zone->uz_size) { 1990e20a199fSJeff Roberson error = E2BIG; 1991e20a199fSJeff Roberson goto out; 1992e20a199fSJeff Roberson } 1993e20a199fSJeff Roberson /* 1994e20a199fSJeff Roberson * Put it at the end of the list. 1995e20a199fSJeff Roberson */ 1996e20a199fSJeff Roberson klink->kl_keg = zone_first_keg(master); 1997e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 1998e20a199fSJeff Roberson if (LIST_NEXT(kl, kl_link) == NULL) { 1999e20a199fSJeff Roberson LIST_INSERT_AFTER(kl, klink, kl_link); 2000e20a199fSJeff Roberson break; 2001e20a199fSJeff Roberson } 2002e20a199fSJeff Roberson } 2003e20a199fSJeff Roberson klink = NULL; 2004e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_MULTI; 2005e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab_multi; 2006e20a199fSJeff Roberson 2007e20a199fSJeff Roberson out: 2008e20a199fSJeff Roberson zone_unlock_pair(zone, master); 2009e20a199fSJeff Roberson if (klink != NULL) 2010e20a199fSJeff Roberson free(klink, M_TEMP); 2011e20a199fSJeff Roberson 2012e20a199fSJeff Roberson return (error); 2013e20a199fSJeff Roberson } 2014e20a199fSJeff Roberson 2015e20a199fSJeff Roberson 20168355f576SJeff Roberson /* See uma.h */ 20179c2cd7e5SJeff Roberson void 20189c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone) 20199c2cd7e5SJeff Roberson { 2020f4ff923bSRobert Watson 202195c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 20220095a784SJeff Roberson zone_free_item(zones, zone, NULL, SKIP_NONE); 202395c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 20249c2cd7e5SJeff Roberson } 20259c2cd7e5SJeff Roberson 20269c2cd7e5SJeff Roberson /* See uma.h */ 20278355f576SJeff Roberson void * 20282cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 20298355f576SJeff Roberson { 20308355f576SJeff Roberson void *item; 20318355f576SJeff Roberson uma_cache_t cache; 20328355f576SJeff Roberson uma_bucket_t bucket; 2033fc03d22bSJeff Roberson int lockfail; 20348355f576SJeff Roberson int cpu; 20358355f576SJeff Roberson 2036e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2037e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 203810cb2424SMark Murray 20398355f576SJeff Roberson /* This is the fast path allocation */ 2040*1431a748SGleb Smirnoff CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", 2041*1431a748SGleb Smirnoff curthread, zone->uz_name, zone, flags); 2042a553d4b8SJeff Roberson 2043635fd505SRobert Watson if (flags & M_WAITOK) { 2044b23f72e9SBrian Feldman WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2045635fd505SRobert Watson "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 20464c1cc01cSJohn Baldwin } 2047d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 20481067a2baSJonathan T. Looney ("uma_zalloc_arg: called with spinlock or critical section held")); 20491067a2baSJonathan T. Looney 20508d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 20518d689e04SGleb Smirnoff if (memguard_cmp_zone(zone)) { 20528d689e04SGleb Smirnoff item = memguard_alloc(zone->uz_size, flags); 20538d689e04SGleb Smirnoff if (item != NULL) { 20548d689e04SGleb Smirnoff if (zone->uz_init != NULL && 20558d689e04SGleb Smirnoff zone->uz_init(item, zone->uz_size, flags) != 0) 20568d689e04SGleb Smirnoff return (NULL); 20578d689e04SGleb Smirnoff if (zone->uz_ctor != NULL && 2058fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, 2059fc03d22bSJeff Roberson flags) != 0) { 20608d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 20618d689e04SGleb Smirnoff return (NULL); 20628d689e04SGleb Smirnoff } 20638d689e04SGleb Smirnoff return (item); 20648d689e04SGleb Smirnoff } 20658d689e04SGleb Smirnoff /* This is unfortunate but should not be fatal. */ 20668d689e04SGleb Smirnoff } 20678d689e04SGleb Smirnoff #endif 20685d1ae027SRobert Watson /* 20695d1ae027SRobert Watson * If possible, allocate from the per-CPU cache. There are two 20705d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 20715d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 20725d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 20735d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 20745d1ae027SRobert Watson * preemption and migration. We release the critical section in 20755d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to allocate from 20765d1ae027SRobert Watson * the current cache; when we re-acquire the critical section, we 20775d1ae027SRobert Watson * must detect and handle migration if it has occurred. 20785d1ae027SRobert Watson */ 20795d1ae027SRobert Watson critical_enter(); 20805d1ae027SRobert Watson cpu = curcpu; 20818355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 20828355f576SJeff Roberson 20838355f576SJeff Roberson zalloc_start: 20848355f576SJeff Roberson bucket = cache->uc_allocbucket; 2085fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 2086cae33c14SJeff Roberson bucket->ub_cnt--; 2087cae33c14SJeff Roberson item = bucket->ub_bucket[bucket->ub_cnt]; 20888355f576SJeff Roberson #ifdef INVARIANTS 2089cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = NULL; 20908355f576SJeff Roberson #endif 2091fc03d22bSJeff Roberson KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 20928355f576SJeff Roberson cache->uc_allocs++; 20935d1ae027SRobert Watson critical_exit(); 2094fc03d22bSJeff Roberson if (zone->uz_ctor != NULL && 2095fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 20960095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2097fc03d22bSJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 2098b23f72e9SBrian Feldman return (NULL); 2099b23f72e9SBrian Feldman } 2100ef72505eSJeff Roberson #ifdef INVARIANTS 2101ef72505eSJeff Roberson uma_dbg_alloc(zone, NULL, item); 2102ef72505eSJeff Roberson #endif 21032cc35ff9SJeff Roberson if (flags & M_ZERO) 210448343a2fSGleb Smirnoff uma_zero_item(item, zone); 21058355f576SJeff Roberson return (item); 2106fc03d22bSJeff Roberson } 2107fc03d22bSJeff Roberson 21088355f576SJeff Roberson /* 21098355f576SJeff Roberson * We have run out of items in our alloc bucket. 21108355f576SJeff Roberson * See if we can switch with our free bucket. 21118355f576SJeff Roberson */ 2112b983089aSJeff Roberson bucket = cache->uc_freebucket; 2113fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 2114*1431a748SGleb Smirnoff CTR2(KTR_UMA, 2115*1431a748SGleb Smirnoff "uma_zalloc: zone %s(%p) swapping empty with alloc", 2116*1431a748SGleb Smirnoff zone->uz_name, zone); 21178355f576SJeff Roberson cache->uc_freebucket = cache->uc_allocbucket; 2118b983089aSJeff Roberson cache->uc_allocbucket = bucket; 21198355f576SJeff Roberson goto zalloc_start; 21208355f576SJeff Roberson } 2121fc03d22bSJeff Roberson 2122fc03d22bSJeff Roberson /* 2123fc03d22bSJeff Roberson * Discard any empty allocation bucket while we hold no locks. 2124fc03d22bSJeff Roberson */ 2125fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2126fc03d22bSJeff Roberson cache->uc_allocbucket = NULL; 2127fc03d22bSJeff Roberson critical_exit(); 2128fc03d22bSJeff Roberson if (bucket != NULL) 21296fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2130fc03d22bSJeff Roberson 2131fc03d22bSJeff Roberson /* Short-circuit for zones without buckets and low memory. */ 2132fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2133fc03d22bSJeff Roberson goto zalloc_item; 2134fc03d22bSJeff Roberson 21355d1ae027SRobert Watson /* 21365d1ae027SRobert Watson * Attempt to retrieve the item from the per-CPU cache has failed, so 21375d1ae027SRobert Watson * we must go back to the zone. This requires the zone lock, so we 21385d1ae027SRobert Watson * must drop the critical section, then re-acquire it when we go back 21395d1ae027SRobert Watson * to the cache. Since the critical section is released, we may be 21405d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 21415d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 21425d1ae027SRobert Watson * the critical section. 21435d1ae027SRobert Watson */ 2144fc03d22bSJeff Roberson lockfail = 0; 2145fc03d22bSJeff Roberson if (ZONE_TRYLOCK(zone) == 0) { 2146fc03d22bSJeff Roberson /* Record contention to size the buckets. */ 2147a553d4b8SJeff Roberson ZONE_LOCK(zone); 2148fc03d22bSJeff Roberson lockfail = 1; 2149fc03d22bSJeff Roberson } 21505d1ae027SRobert Watson critical_enter(); 21515d1ae027SRobert Watson cpu = curcpu; 21525d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 21535d1ae027SRobert Watson 2154fc03d22bSJeff Roberson /* 2155fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2156fc03d22bSJeff Roberson */ 21570095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 21580095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2159a553d4b8SJeff Roberson cache->uc_allocs = 0; 2160773df9abSRobert Watson cache->uc_frees = 0; 21618355f576SJeff Roberson 2162fc03d22bSJeff Roberson /* See if we lost the race to fill the cache. */ 2163fc03d22bSJeff Roberson if (cache->uc_allocbucket != NULL) { 2164fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2165fc03d22bSJeff Roberson goto zalloc_start; 2166a553d4b8SJeff Roberson } 21678355f576SJeff Roberson 2168fc03d22bSJeff Roberson /* 2169fc03d22bSJeff Roberson * Check the zone's cache of buckets. 2170fc03d22bSJeff Roberson */ 2171fc03d22bSJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2172cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 2173a553d4b8SJeff Roberson ("uma_zalloc_arg: Returning an empty bucket.")); 21748355f576SJeff Roberson 2175a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 2176a553d4b8SJeff Roberson cache->uc_allocbucket = bucket; 2177a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 21788355f576SJeff Roberson goto zalloc_start; 2179a553d4b8SJeff Roberson } 21805d1ae027SRobert Watson /* We are no longer associated with this CPU. */ 21815d1ae027SRobert Watson critical_exit(); 2182bbee39c6SJeff Roberson 2183fc03d22bSJeff Roberson /* 2184fc03d22bSJeff Roberson * We bump the uz count when the cache size is insufficient to 2185fc03d22bSJeff Roberson * handle the working set. 2186fc03d22bSJeff Roberson */ 21876fd34d6fSJeff Roberson if (lockfail && zone->uz_count < BUCKET_MAX) 2188a553d4b8SJeff Roberson zone->uz_count++; 2189fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2190099a0e58SBosko Milekic 21918355f576SJeff Roberson /* 2192a553d4b8SJeff Roberson * Now lets just fill a bucket and put it on the free list. If that 2193763df3ecSPedro F. Giffuni * works we'll restart the allocation from the beginning and it 2194fc03d22bSJeff Roberson * will use the just filled bucket. 2195bbee39c6SJeff Roberson */ 21966fd34d6fSJeff Roberson bucket = zone_alloc_bucket(zone, udata, flags); 2197*1431a748SGleb Smirnoff CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 2198*1431a748SGleb Smirnoff zone->uz_name, zone, bucket); 2199fc03d22bSJeff Roberson if (bucket != NULL) { 2200fc03d22bSJeff Roberson ZONE_LOCK(zone); 2201fc03d22bSJeff Roberson critical_enter(); 2202fc03d22bSJeff Roberson cpu = curcpu; 2203fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2204fc03d22bSJeff Roberson /* 2205fc03d22bSJeff Roberson * See if we lost the race or were migrated. Cache the 2206fc03d22bSJeff Roberson * initialized bucket to make this less likely or claim 2207fc03d22bSJeff Roberson * the memory directly. 2208fc03d22bSJeff Roberson */ 2209fc03d22bSJeff Roberson if (cache->uc_allocbucket == NULL) 2210fc03d22bSJeff Roberson cache->uc_allocbucket = bucket; 2211fc03d22bSJeff Roberson else 2212fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2213bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 2214fc03d22bSJeff Roberson goto zalloc_start; 2215bbee39c6SJeff Roberson } 2216fc03d22bSJeff Roberson 2217bbee39c6SJeff Roberson /* 2218bbee39c6SJeff Roberson * We may not be able to get a bucket so return an actual item. 2219bbee39c6SJeff Roberson */ 2220fc03d22bSJeff Roberson zalloc_item: 2221e20a199fSJeff Roberson item = zone_alloc_item(zone, udata, flags); 2222fc03d22bSJeff Roberson 2223e20a199fSJeff Roberson return (item); 2224bbee39c6SJeff Roberson } 2225bbee39c6SJeff Roberson 2226bbee39c6SJeff Roberson static uma_slab_t 2227e20a199fSJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2228bbee39c6SJeff Roberson { 2229bbee39c6SJeff Roberson uma_slab_t slab; 22306fd34d6fSJeff Roberson int reserve; 2231099a0e58SBosko Milekic 2232e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2233bbee39c6SJeff Roberson slab = NULL; 22346fd34d6fSJeff Roberson reserve = 0; 22356fd34d6fSJeff Roberson if ((flags & M_USE_RESERVE) == 0) 22366fd34d6fSJeff Roberson reserve = keg->uk_reserve; 2237bbee39c6SJeff Roberson 2238bbee39c6SJeff Roberson for (;;) { 2239bbee39c6SJeff Roberson /* 2240bbee39c6SJeff Roberson * Find a slab with some space. Prefer slabs that are partially 2241bbee39c6SJeff Roberson * used over those that are totally full. This helps to reduce 2242bbee39c6SJeff Roberson * fragmentation. 2243bbee39c6SJeff Roberson */ 22446fd34d6fSJeff Roberson if (keg->uk_free > reserve) { 2245099a0e58SBosko Milekic if (!LIST_EMPTY(&keg->uk_part_slab)) { 2246099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_part_slab); 2247bbee39c6SJeff Roberson } else { 2248099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_free_slab); 2249bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2250099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2251bbee39c6SJeff Roberson us_link); 2252bbee39c6SJeff Roberson } 2253e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2254bbee39c6SJeff Roberson return (slab); 2255bbee39c6SJeff Roberson } 2256bbee39c6SJeff Roberson 2257bbee39c6SJeff Roberson /* 2258bbee39c6SJeff Roberson * M_NOVM means don't ask at all! 2259bbee39c6SJeff Roberson */ 2260bbee39c6SJeff Roberson if (flags & M_NOVM) 2261bbee39c6SJeff Roberson break; 2262bbee39c6SJeff Roberson 2263e20a199fSJeff Roberson if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2264099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_FULL; 2265e20a199fSJeff Roberson /* 2266e20a199fSJeff Roberson * If this is not a multi-zone, set the FULL bit. 2267e20a199fSJeff Roberson * Otherwise slab_multi() takes care of it. 2268e20a199fSJeff Roberson */ 22692f891cd5SPawel Jakub Dawidek if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2270e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 22712f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 227254503a13SJonathan T. Looney zone_maxaction(zone); 22732f891cd5SPawel Jakub Dawidek } 2274ebc85edfSJeff Roberson if (flags & M_NOWAIT) 2275bbee39c6SJeff Roberson break; 2276c288b548SEitan Adler zone->uz_sleeps++; 2277e20a199fSJeff Roberson msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2278bbee39c6SJeff Roberson continue; 2279bbee39c6SJeff Roberson } 2280e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, flags); 2281bbee39c6SJeff Roberson /* 2282bbee39c6SJeff Roberson * If we got a slab here it's safe to mark it partially used 2283bbee39c6SJeff Roberson * and return. We assume that the caller is going to remove 2284bbee39c6SJeff Roberson * at least one item. 2285bbee39c6SJeff Roberson */ 2286bbee39c6SJeff Roberson if (slab) { 2287e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2288099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2289bbee39c6SJeff Roberson return (slab); 2290bbee39c6SJeff Roberson } 2291bbee39c6SJeff Roberson /* 2292bbee39c6SJeff Roberson * We might not have been able to get a slab but another cpu 2293bbee39c6SJeff Roberson * could have while we were unlocked. Check again before we 2294bbee39c6SJeff Roberson * fail. 2295bbee39c6SJeff Roberson */ 2296bbee39c6SJeff Roberson flags |= M_NOVM; 2297bbee39c6SJeff Roberson } 2298bbee39c6SJeff Roberson return (slab); 2299bbee39c6SJeff Roberson } 2300bbee39c6SJeff Roberson 2301e20a199fSJeff Roberson static uma_slab_t 2302e20a199fSJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2303e20a199fSJeff Roberson { 2304e20a199fSJeff Roberson uma_slab_t slab; 2305e20a199fSJeff Roberson 2306af526374SJeff Roberson if (keg == NULL) { 2307e20a199fSJeff Roberson keg = zone_first_keg(zone); 2308af526374SJeff Roberson KEG_LOCK(keg); 2309af526374SJeff Roberson } 2310e20a199fSJeff Roberson 2311e20a199fSJeff Roberson for (;;) { 2312e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2313e20a199fSJeff Roberson if (slab) 2314e20a199fSJeff Roberson return (slab); 2315e20a199fSJeff Roberson if (flags & (M_NOWAIT | M_NOVM)) 2316e20a199fSJeff Roberson break; 2317e20a199fSJeff Roberson } 2318af526374SJeff Roberson KEG_UNLOCK(keg); 2319e20a199fSJeff Roberson return (NULL); 2320e20a199fSJeff Roberson } 2321e20a199fSJeff Roberson 2322e20a199fSJeff Roberson /* 2323e20a199fSJeff Roberson * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2324af526374SJeff Roberson * with the keg locked. On NULL no lock is held. 2325e20a199fSJeff Roberson * 2326e20a199fSJeff Roberson * The last pointer is used to seed the search. It is not required. 2327e20a199fSJeff Roberson */ 2328e20a199fSJeff Roberson static uma_slab_t 2329e20a199fSJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2330e20a199fSJeff Roberson { 2331e20a199fSJeff Roberson uma_klink_t klink; 2332e20a199fSJeff Roberson uma_slab_t slab; 2333e20a199fSJeff Roberson uma_keg_t keg; 2334e20a199fSJeff Roberson int flags; 2335e20a199fSJeff Roberson int empty; 2336e20a199fSJeff Roberson int full; 2337e20a199fSJeff Roberson 2338e20a199fSJeff Roberson /* 2339e20a199fSJeff Roberson * Don't wait on the first pass. This will skip limit tests 2340e20a199fSJeff Roberson * as well. We don't want to block if we can find a provider 2341e20a199fSJeff Roberson * without blocking. 2342e20a199fSJeff Roberson */ 2343e20a199fSJeff Roberson flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2344e20a199fSJeff Roberson /* 2345e20a199fSJeff Roberson * Use the last slab allocated as a hint for where to start 2346e20a199fSJeff Roberson * the search. 2347e20a199fSJeff Roberson */ 2348af526374SJeff Roberson if (last != NULL) { 2349e20a199fSJeff Roberson slab = keg_fetch_slab(last, zone, flags); 2350e20a199fSJeff Roberson if (slab) 2351e20a199fSJeff Roberson return (slab); 2352af526374SJeff Roberson KEG_UNLOCK(last); 2353e20a199fSJeff Roberson } 2354e20a199fSJeff Roberson /* 2355e20a199fSJeff Roberson * Loop until we have a slab incase of transient failures 2356e20a199fSJeff Roberson * while M_WAITOK is specified. I'm not sure this is 100% 2357e20a199fSJeff Roberson * required but we've done it for so long now. 2358e20a199fSJeff Roberson */ 2359e20a199fSJeff Roberson for (;;) { 2360e20a199fSJeff Roberson empty = 0; 2361e20a199fSJeff Roberson full = 0; 2362e20a199fSJeff Roberson /* 2363e20a199fSJeff Roberson * Search the available kegs for slabs. Be careful to hold the 2364e20a199fSJeff Roberson * correct lock while calling into the keg layer. 2365e20a199fSJeff Roberson */ 2366e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2367e20a199fSJeff Roberson keg = klink->kl_keg; 2368af526374SJeff Roberson KEG_LOCK(keg); 2369e20a199fSJeff Roberson if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2370e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2371e20a199fSJeff Roberson if (slab) 2372e20a199fSJeff Roberson return (slab); 2373e20a199fSJeff Roberson } 2374e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZFLAG_FULL) 2375e20a199fSJeff Roberson full++; 2376e20a199fSJeff Roberson else 2377e20a199fSJeff Roberson empty++; 2378af526374SJeff Roberson KEG_UNLOCK(keg); 2379e20a199fSJeff Roberson } 2380e20a199fSJeff Roberson if (rflags & (M_NOWAIT | M_NOVM)) 2381e20a199fSJeff Roberson break; 2382e20a199fSJeff Roberson flags = rflags; 2383e20a199fSJeff Roberson /* 2384e20a199fSJeff Roberson * All kegs are full. XXX We can't atomically check all kegs 2385e20a199fSJeff Roberson * and sleep so just sleep for a short period and retry. 2386e20a199fSJeff Roberson */ 2387e20a199fSJeff Roberson if (full && !empty) { 2388af526374SJeff Roberson ZONE_LOCK(zone); 2389e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 2390bf965959SSean Bruno zone->uz_sleeps++; 23912f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 239254503a13SJonathan T. Looney zone_maxaction(zone); 2393af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, 2394af526374SJeff Roberson "zonelimit", hz/100); 2395e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2396af526374SJeff Roberson ZONE_UNLOCK(zone); 2397e20a199fSJeff Roberson continue; 2398e20a199fSJeff Roberson } 2399e20a199fSJeff Roberson } 2400e20a199fSJeff Roberson return (NULL); 2401e20a199fSJeff Roberson } 2402e20a199fSJeff Roberson 2403d56368d7SBosko Milekic static void * 24040095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2405bbee39c6SJeff Roberson { 2406bbee39c6SJeff Roberson void *item; 240785dcf349SGleb Smirnoff uint8_t freei; 2408bbee39c6SJeff Roberson 24090095a784SJeff Roberson MPASS(keg == slab->us_keg); 2410e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2411099a0e58SBosko Milekic 2412ef72505eSJeff Roberson freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2413ef72505eSJeff Roberson BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2414099a0e58SBosko Milekic item = slab->us_data + (keg->uk_rsize * freei); 2415bbee39c6SJeff Roberson slab->us_freecount--; 2416099a0e58SBosko Milekic keg->uk_free--; 2417ef72505eSJeff Roberson 2418bbee39c6SJeff Roberson /* Move this slab to the full list */ 2419bbee39c6SJeff Roberson if (slab->us_freecount == 0) { 2420bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2421099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2422bbee39c6SJeff Roberson } 2423bbee39c6SJeff Roberson 2424bbee39c6SJeff Roberson return (item); 2425bbee39c6SJeff Roberson } 2426bbee39c6SJeff Roberson 2427bbee39c6SJeff Roberson static int 24280095a784SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int flags) 24290095a784SJeff Roberson { 24300095a784SJeff Roberson uma_slab_t slab; 24310095a784SJeff Roberson uma_keg_t keg; 24320095a784SJeff Roberson int i; 24330095a784SJeff Roberson 24340095a784SJeff Roberson slab = NULL; 24350095a784SJeff Roberson keg = NULL; 2436af526374SJeff Roberson /* Try to keep the buckets totally full */ 24370095a784SJeff Roberson for (i = 0; i < max; ) { 24380095a784SJeff Roberson if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 24390095a784SJeff Roberson break; 24400095a784SJeff Roberson keg = slab->us_keg; 24416fd34d6fSJeff Roberson while (slab->us_freecount && i < max) { 24420095a784SJeff Roberson bucket[i++] = slab_alloc_item(keg, slab); 24436fd34d6fSJeff Roberson if (keg->uk_free <= keg->uk_reserve) 24446fd34d6fSJeff Roberson break; 24456fd34d6fSJeff Roberson } 24466fd34d6fSJeff Roberson /* Don't grab more than one slab at a time. */ 24470095a784SJeff Roberson flags &= ~M_WAITOK; 24480095a784SJeff Roberson flags |= M_NOWAIT; 24490095a784SJeff Roberson } 24500095a784SJeff Roberson if (slab != NULL) 24510095a784SJeff Roberson KEG_UNLOCK(keg); 24520095a784SJeff Roberson 24530095a784SJeff Roberson return i; 24540095a784SJeff Roberson } 24550095a784SJeff Roberson 2456fc03d22bSJeff Roberson static uma_bucket_t 24576fd34d6fSJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2458bbee39c6SJeff Roberson { 2459bbee39c6SJeff Roberson uma_bucket_t bucket; 24600095a784SJeff Roberson int max; 2461bbee39c6SJeff Roberson 24626fd34d6fSJeff Roberson /* Don't wait for buckets, preserve caller's NOVM setting. */ 24636fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 24640095a784SJeff Roberson if (bucket == NULL) 2465f7104ccdSAlexander Motin return (NULL); 24660095a784SJeff Roberson 2467af526374SJeff Roberson max = MIN(bucket->ub_entries, zone->uz_count); 24680095a784SJeff Roberson bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 24690095a784SJeff Roberson max, flags); 24700095a784SJeff Roberson 24710095a784SJeff Roberson /* 24720095a784SJeff Roberson * Initialize the memory if necessary. 24730095a784SJeff Roberson */ 24740095a784SJeff Roberson if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2475099a0e58SBosko Milekic int i; 2476bbee39c6SJeff Roberson 24770095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 2478e20a199fSJeff Roberson if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 24790095a784SJeff Roberson flags) != 0) 2480b23f72e9SBrian Feldman break; 2481b23f72e9SBrian Feldman /* 2482b23f72e9SBrian Feldman * If we couldn't initialize the whole bucket, put the 2483b23f72e9SBrian Feldman * rest back onto the freelist. 2484b23f72e9SBrian Feldman */ 2485b23f72e9SBrian Feldman if (i != bucket->ub_cnt) { 2486af526374SJeff Roberson zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 24870095a784SJeff Roberson bucket->ub_cnt - i); 2488a5a262c6SBosko Milekic #ifdef INVARIANTS 24890095a784SJeff Roberson bzero(&bucket->ub_bucket[i], 24900095a784SJeff Roberson sizeof(void *) * (bucket->ub_cnt - i)); 2491a5a262c6SBosko Milekic #endif 2492b23f72e9SBrian Feldman bucket->ub_cnt = i; 2493b23f72e9SBrian Feldman } 2494099a0e58SBosko Milekic } 2495099a0e58SBosko Milekic 2496f7104ccdSAlexander Motin if (bucket->ub_cnt == 0) { 24976fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2498fc03d22bSJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2499fc03d22bSJeff Roberson return (NULL); 2500bbee39c6SJeff Roberson } 2501fc03d22bSJeff Roberson 2502fc03d22bSJeff Roberson return (bucket); 2503fc03d22bSJeff Roberson } 2504fc03d22bSJeff Roberson 25058355f576SJeff Roberson /* 25060095a784SJeff Roberson * Allocates a single item from a zone. 25078355f576SJeff Roberson * 25088355f576SJeff Roberson * Arguments 25098355f576SJeff Roberson * zone The zone to alloc for. 25108355f576SJeff Roberson * udata The data to be passed to the constructor. 2511a163d034SWarner Losh * flags M_WAITOK, M_NOWAIT, M_ZERO. 25128355f576SJeff Roberson * 25138355f576SJeff Roberson * Returns 25148355f576SJeff Roberson * NULL if there is no memory and M_NOWAIT is set 2515bbee39c6SJeff Roberson * An item if successful 25168355f576SJeff Roberson */ 25178355f576SJeff Roberson 25188355f576SJeff Roberson static void * 2519e20a199fSJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int flags) 25208355f576SJeff Roberson { 25218355f576SJeff Roberson void *item; 25228355f576SJeff Roberson 25238355f576SJeff Roberson item = NULL; 25248355f576SJeff Roberson 25250095a784SJeff Roberson if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 25260095a784SJeff Roberson goto fail; 25270095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, 1); 25288355f576SJeff Roberson 2529099a0e58SBosko Milekic /* 2530099a0e58SBosko Milekic * We have to call both the zone's init (not the keg's init) 2531099a0e58SBosko Milekic * and the zone's ctor. This is because the item is going from 2532099a0e58SBosko Milekic * a keg slab directly to the user, and the user is expecting it 2533099a0e58SBosko Milekic * to be both zone-init'd as well as zone-ctor'd. 2534099a0e58SBosko Milekic */ 2535b23f72e9SBrian Feldman if (zone->uz_init != NULL) { 2536e20a199fSJeff Roberson if (zone->uz_init(item, zone->uz_size, flags) != 0) { 25370095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_FINI); 25380095a784SJeff Roberson goto fail; 2539b23f72e9SBrian Feldman } 2540b23f72e9SBrian Feldman } 2541b23f72e9SBrian Feldman if (zone->uz_ctor != NULL) { 2542e20a199fSJeff Roberson if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 25430095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 25440095a784SJeff Roberson goto fail; 2545b23f72e9SBrian Feldman } 2546b23f72e9SBrian Feldman } 2547ef72505eSJeff Roberson #ifdef INVARIANTS 25480095a784SJeff Roberson uma_dbg_alloc(zone, NULL, item); 2549ef72505eSJeff Roberson #endif 25502cc35ff9SJeff Roberson if (flags & M_ZERO) 255148343a2fSGleb Smirnoff uma_zero_item(item, zone); 25528355f576SJeff Roberson 2553*1431a748SGleb Smirnoff CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 2554*1431a748SGleb Smirnoff zone->uz_name, zone); 2555*1431a748SGleb Smirnoff 25568355f576SJeff Roberson return (item); 25570095a784SJeff Roberson 25580095a784SJeff Roberson fail: 2559*1431a748SGleb Smirnoff CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 2560*1431a748SGleb Smirnoff zone->uz_name, zone); 25610095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 25620095a784SJeff Roberson return (NULL); 25638355f576SJeff Roberson } 25648355f576SJeff Roberson 25658355f576SJeff Roberson /* See uma.h */ 25668355f576SJeff Roberson void 25678355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 25688355f576SJeff Roberson { 25698355f576SJeff Roberson uma_cache_t cache; 25708355f576SJeff Roberson uma_bucket_t bucket; 25714d104ba0SAlexander Motin int lockfail; 25728355f576SJeff Roberson int cpu; 25738355f576SJeff Roberson 2574e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2575e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 257610cb2424SMark Murray 25773659f747SRobert Watson CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 25783659f747SRobert Watson zone->uz_name); 25793659f747SRobert Watson 2580d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 25811067a2baSJonathan T. Looney ("uma_zfree_arg: called with spinlock or critical section held")); 25821067a2baSJonathan T. Looney 258320ed0cb0SMatthew D Fleming /* uma_zfree(..., NULL) does nothing, to match free(9). */ 258420ed0cb0SMatthew D Fleming if (item == NULL) 258520ed0cb0SMatthew D Fleming return; 25868d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 25878d689e04SGleb Smirnoff if (is_memguard_addr(item)) { 2588bc9d08e1SMark Johnston if (zone->uz_dtor != NULL) 25898d689e04SGleb Smirnoff zone->uz_dtor(item, zone->uz_size, udata); 2590bc9d08e1SMark Johnston if (zone->uz_fini != NULL) 25918d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 25928d689e04SGleb Smirnoff memguard_free(item); 25938d689e04SGleb Smirnoff return; 25948d689e04SGleb Smirnoff } 25958d689e04SGleb Smirnoff #endif 25965d1ae027SRobert Watson #ifdef INVARIANTS 2597e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 25985d1ae027SRobert Watson uma_dbg_free(zone, udata, item); 25995d1ae027SRobert Watson else 26005d1ae027SRobert Watson uma_dbg_free(zone, NULL, item); 26015d1ae027SRobert Watson #endif 2602fc03d22bSJeff Roberson if (zone->uz_dtor != NULL) 2603ef72505eSJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 2604ef72505eSJeff Roberson 2605af7f9b97SJeff Roberson /* 2606af7f9b97SJeff Roberson * The race here is acceptable. If we miss it we'll just have to wait 2607af7f9b97SJeff Roberson * a little longer for the limits to be reset. 2608af7f9b97SJeff Roberson */ 2609e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZFLAG_FULL) 2610fc03d22bSJeff Roberson goto zfree_item; 2611af7f9b97SJeff Roberson 26125d1ae027SRobert Watson /* 26135d1ae027SRobert Watson * If possible, free to the per-CPU cache. There are two 26145d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 26155d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 26165d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 26175d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 26185d1ae027SRobert Watson * preemption and migration. We release the critical section in 26195d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to free to the 26205d1ae027SRobert Watson * current cache; when we re-acquire the critical section, we must 26215d1ae027SRobert Watson * detect and handle migration if it has occurred. 26225d1ae027SRobert Watson */ 2623a553d4b8SJeff Roberson zfree_restart: 26245d1ae027SRobert Watson critical_enter(); 26255d1ae027SRobert Watson cpu = curcpu; 26268355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 26278355f576SJeff Roberson 26288355f576SJeff Roberson zfree_start: 2629a553d4b8SJeff Roberson /* 2630fc03d22bSJeff Roberson * Try to free into the allocbucket first to give LIFO ordering 2631fc03d22bSJeff Roberson * for cache-hot datastructures. Spill over into the freebucket 2632fc03d22bSJeff Roberson * if necessary. Alloc will swap them if one runs dry. 2633a553d4b8SJeff Roberson */ 2634fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2635fc03d22bSJeff Roberson if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2636fc03d22bSJeff Roberson bucket = cache->uc_freebucket; 2637fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2638cae33c14SJeff Roberson KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 26398355f576SJeff Roberson ("uma_zfree: Freeing to non free bucket index.")); 2640cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = item; 2641cae33c14SJeff Roberson bucket->ub_cnt++; 2642773df9abSRobert Watson cache->uc_frees++; 26435d1ae027SRobert Watson critical_exit(); 26448355f576SJeff Roberson return; 2645fc03d22bSJeff Roberson } 2646fc03d22bSJeff Roberson 26478355f576SJeff Roberson /* 26485d1ae027SRobert Watson * We must go back the zone, which requires acquiring the zone lock, 26495d1ae027SRobert Watson * which in turn means we must release and re-acquire the critical 26505d1ae027SRobert Watson * section. Since the critical section is released, we may be 26515d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 26525d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 26535d1ae027SRobert Watson * the critical section. 26548355f576SJeff Roberson */ 26555d1ae027SRobert Watson critical_exit(); 2656fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2657fc03d22bSJeff Roberson goto zfree_item; 2658fc03d22bSJeff Roberson 26594d104ba0SAlexander Motin lockfail = 0; 26604d104ba0SAlexander Motin if (ZONE_TRYLOCK(zone) == 0) { 26614d104ba0SAlexander Motin /* Record contention to size the buckets. */ 26628355f576SJeff Roberson ZONE_LOCK(zone); 26634d104ba0SAlexander Motin lockfail = 1; 26644d104ba0SAlexander Motin } 26655d1ae027SRobert Watson critical_enter(); 26665d1ae027SRobert Watson cpu = curcpu; 26675d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 26688355f576SJeff Roberson 2669fc03d22bSJeff Roberson /* 2670fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2671fc03d22bSJeff Roberson */ 26720095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 26730095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2674f4ff923bSRobert Watson cache->uc_allocs = 0; 2675f4ff923bSRobert Watson cache->uc_frees = 0; 2676f4ff923bSRobert Watson 26778355f576SJeff Roberson bucket = cache->uc_freebucket; 2678fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2679fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2680fc03d22bSJeff Roberson goto zfree_start; 2681fc03d22bSJeff Roberson } 26828355f576SJeff Roberson cache->uc_freebucket = NULL; 2683afa5d703SMark Johnston /* We are no longer associated with this CPU. */ 2684afa5d703SMark Johnston critical_exit(); 26858355f576SJeff Roberson 26868355f576SJeff Roberson /* Can we throw this on the zone full list? */ 26878355f576SJeff Roberson if (bucket != NULL) { 2688*1431a748SGleb Smirnoff CTR3(KTR_UMA, 2689*1431a748SGleb Smirnoff "uma_zfree: zone %s(%p) putting bucket %p on free list", 2690*1431a748SGleb Smirnoff zone->uz_name, zone, bucket); 2691cae33c14SJeff Roberson /* ub_cnt is pointing to the last free item */ 2692cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 26938355f576SJeff Roberson ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2694fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 26958355f576SJeff Roberson } 2696fc03d22bSJeff Roberson 26974d104ba0SAlexander Motin /* 26984d104ba0SAlexander Motin * We bump the uz count when the cache size is insufficient to 26994d104ba0SAlexander Motin * handle the working set. 27004d104ba0SAlexander Motin */ 27014d104ba0SAlexander Motin if (lockfail && zone->uz_count < BUCKET_MAX) 27024d104ba0SAlexander Motin zone->uz_count++; 2703a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 2704a553d4b8SJeff Roberson 27056fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT); 2706*1431a748SGleb Smirnoff CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", 2707*1431a748SGleb Smirnoff zone->uz_name, zone, bucket); 27084741dcbfSJeff Roberson if (bucket) { 2709fc03d22bSJeff Roberson critical_enter(); 2710fc03d22bSJeff Roberson cpu = curcpu; 2711fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2712fc03d22bSJeff Roberson if (cache->uc_freebucket == NULL) { 2713fc03d22bSJeff Roberson cache->uc_freebucket = bucket; 2714fc03d22bSJeff Roberson goto zfree_start; 2715fc03d22bSJeff Roberson } 2716fc03d22bSJeff Roberson /* 2717fc03d22bSJeff Roberson * We lost the race, start over. We have to drop our 2718fc03d22bSJeff Roberson * critical section to free the bucket. 2719fc03d22bSJeff Roberson */ 2720fc03d22bSJeff Roberson critical_exit(); 27216fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2722a553d4b8SJeff Roberson goto zfree_restart; 27238355f576SJeff Roberson } 27248355f576SJeff Roberson 2725a553d4b8SJeff Roberson /* 2726a553d4b8SJeff Roberson * If nothing else caught this, we'll just do an internal free. 2727a553d4b8SJeff Roberson */ 2728fc03d22bSJeff Roberson zfree_item: 27290095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 27308355f576SJeff Roberson 27318355f576SJeff Roberson return; 27328355f576SJeff Roberson } 27338355f576SJeff Roberson 27348355f576SJeff Roberson static void 27350095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 27368355f576SJeff Roberson { 273785dcf349SGleb Smirnoff uint8_t freei; 2738099a0e58SBosko Milekic 27390095a784SJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2740e20a199fSJeff Roberson MPASS(keg == slab->us_keg); 27418355f576SJeff Roberson 27428355f576SJeff Roberson /* Do we need to remove from any lists? */ 2743099a0e58SBosko Milekic if (slab->us_freecount+1 == keg->uk_ipers) { 27448355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2745099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 27468355f576SJeff Roberson } else if (slab->us_freecount == 0) { 27478355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2748099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 27498355f576SJeff Roberson } 27508355f576SJeff Roberson 2751ef72505eSJeff Roberson /* Slab management. */ 2752ef72505eSJeff Roberson freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2753ef72505eSJeff Roberson BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 27548355f576SJeff Roberson slab->us_freecount++; 27558355f576SJeff Roberson 2756ef72505eSJeff Roberson /* Keg statistics. */ 2757099a0e58SBosko Milekic keg->uk_free++; 27580095a784SJeff Roberson } 27590095a784SJeff Roberson 27600095a784SJeff Roberson static void 27610095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt) 27620095a784SJeff Roberson { 27630095a784SJeff Roberson void *item; 27640095a784SJeff Roberson uma_slab_t slab; 27650095a784SJeff Roberson uma_keg_t keg; 27660095a784SJeff Roberson uint8_t *mem; 27670095a784SJeff Roberson int clearfull; 27680095a784SJeff Roberson int i; 27698355f576SJeff Roberson 2770e20a199fSJeff Roberson clearfull = 0; 27710095a784SJeff Roberson keg = zone_first_keg(zone); 2772af526374SJeff Roberson KEG_LOCK(keg); 27730095a784SJeff Roberson for (i = 0; i < cnt; i++) { 27740095a784SJeff Roberson item = bucket[i]; 27750095a784SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 27760095a784SJeff Roberson mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 27770095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) { 27780095a784SJeff Roberson slab = hash_sfind(&keg->uk_hash, mem); 27790095a784SJeff Roberson } else { 27800095a784SJeff Roberson mem += keg->uk_pgoff; 27810095a784SJeff Roberson slab = (uma_slab_t)mem; 27820095a784SJeff Roberson } 27830095a784SJeff Roberson } else { 27840095a784SJeff Roberson slab = vtoslab((vm_offset_t)item); 27850095a784SJeff Roberson if (slab->us_keg != keg) { 27860095a784SJeff Roberson KEG_UNLOCK(keg); 27870095a784SJeff Roberson keg = slab->us_keg; 27880095a784SJeff Roberson KEG_LOCK(keg); 27890095a784SJeff Roberson } 27900095a784SJeff Roberson } 27910095a784SJeff Roberson slab_free_item(keg, slab, item); 2792099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_FULL) { 2793e20a199fSJeff Roberson if (keg->uk_pages < keg->uk_maxpages) { 2794099a0e58SBosko Milekic keg->uk_flags &= ~UMA_ZFLAG_FULL; 2795e20a199fSJeff Roberson clearfull = 1; 2796e20a199fSJeff Roberson } 2797af7f9b97SJeff Roberson 279877380291SMohan Srinivasan /* 2799ef72505eSJeff Roberson * We can handle one more allocation. Since we're 2800ef72505eSJeff Roberson * clearing ZFLAG_FULL, wake up all procs blocked 2801ef72505eSJeff Roberson * on pages. This should be uncommon, so keeping this 2802ef72505eSJeff Roberson * simple for now (rather than adding count of blocked 280377380291SMohan Srinivasan * threads etc). 280477380291SMohan Srinivasan */ 280577380291SMohan Srinivasan wakeup(keg); 2806af7f9b97SJeff Roberson } 28070095a784SJeff Roberson } 2808af526374SJeff Roberson KEG_UNLOCK(keg); 28090095a784SJeff Roberson if (clearfull) { 2810af526374SJeff Roberson ZONE_LOCK(zone); 2811e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2812e20a199fSJeff Roberson wakeup(zone); 2813605cbd6aSJeff Roberson ZONE_UNLOCK(zone); 2814af526374SJeff Roberson } 2815ef72505eSJeff Roberson 28168355f576SJeff Roberson } 28178355f576SJeff Roberson 28180095a784SJeff Roberson /* 28190095a784SJeff Roberson * Frees a single item to any zone. 28200095a784SJeff Roberson * 28210095a784SJeff Roberson * Arguments: 28220095a784SJeff Roberson * zone The zone to free to 28230095a784SJeff Roberson * item The item we're freeing 28240095a784SJeff Roberson * udata User supplied data for the dtor 28250095a784SJeff Roberson * skip Skip dtors and finis 28260095a784SJeff Roberson */ 28270095a784SJeff Roberson static void 28280095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 28290095a784SJeff Roberson { 28300095a784SJeff Roberson 28310095a784SJeff Roberson #ifdef INVARIANTS 28320095a784SJeff Roberson if (skip == SKIP_NONE) { 28330095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 28340095a784SJeff Roberson uma_dbg_free(zone, udata, item); 28350095a784SJeff Roberson else 28360095a784SJeff Roberson uma_dbg_free(zone, NULL, item); 28370095a784SJeff Roberson } 28380095a784SJeff Roberson #endif 28390095a784SJeff Roberson if (skip < SKIP_DTOR && zone->uz_dtor) 28400095a784SJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 28410095a784SJeff Roberson 28420095a784SJeff Roberson if (skip < SKIP_FINI && zone->uz_fini) 28430095a784SJeff Roberson zone->uz_fini(item, zone->uz_size); 28440095a784SJeff Roberson 28450095a784SJeff Roberson atomic_add_long(&zone->uz_frees, 1); 28460095a784SJeff Roberson zone->uz_release(zone->uz_arg, &item, 1); 28470095a784SJeff Roberson } 28480095a784SJeff Roberson 28498355f576SJeff Roberson /* See uma.h */ 28501c6cae97SLawrence Stewart int 2851736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems) 2852736ee590SJeff Roberson { 2853099a0e58SBosko Milekic uma_keg_t keg; 2854099a0e58SBosko Milekic 2855e20a199fSJeff Roberson keg = zone_first_keg(zone); 28560095a784SJeff Roberson if (keg == NULL) 28570095a784SJeff Roberson return (0); 2858af526374SJeff Roberson KEG_LOCK(keg); 2859e20a199fSJeff Roberson keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2860099a0e58SBosko Milekic if (keg->uk_maxpages * keg->uk_ipers < nitems) 2861e20a199fSJeff Roberson keg->uk_maxpages += keg->uk_ppera; 286257223e99SAndriy Gapon nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 2863af526374SJeff Roberson KEG_UNLOCK(keg); 28641c6cae97SLawrence Stewart 28651c6cae97SLawrence Stewart return (nitems); 2866736ee590SJeff Roberson } 2867736ee590SJeff Roberson 2868736ee590SJeff Roberson /* See uma.h */ 2869e49471b0SAndre Oppermann int 2870e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone) 2871e49471b0SAndre Oppermann { 2872e49471b0SAndre Oppermann int nitems; 2873e49471b0SAndre Oppermann uma_keg_t keg; 2874e49471b0SAndre Oppermann 2875e49471b0SAndre Oppermann keg = zone_first_keg(zone); 28760095a784SJeff Roberson if (keg == NULL) 28770095a784SJeff Roberson return (0); 2878af526374SJeff Roberson KEG_LOCK(keg); 287957223e99SAndriy Gapon nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 2880af526374SJeff Roberson KEG_UNLOCK(keg); 2881e49471b0SAndre Oppermann 2882e49471b0SAndre Oppermann return (nitems); 2883e49471b0SAndre Oppermann } 2884e49471b0SAndre Oppermann 2885e49471b0SAndre Oppermann /* See uma.h */ 28862f891cd5SPawel Jakub Dawidek void 28872f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning) 28882f891cd5SPawel Jakub Dawidek { 28892f891cd5SPawel Jakub Dawidek 28902f891cd5SPawel Jakub Dawidek ZONE_LOCK(zone); 28912f891cd5SPawel Jakub Dawidek zone->uz_warning = warning; 28922f891cd5SPawel Jakub Dawidek ZONE_UNLOCK(zone); 28932f891cd5SPawel Jakub Dawidek } 28942f891cd5SPawel Jakub Dawidek 28952f891cd5SPawel Jakub Dawidek /* See uma.h */ 289654503a13SJonathan T. Looney void 289754503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 289854503a13SJonathan T. Looney { 289954503a13SJonathan T. Looney 290054503a13SJonathan T. Looney ZONE_LOCK(zone); 2901e60b2fcbSGleb Smirnoff TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 290254503a13SJonathan T. Looney ZONE_UNLOCK(zone); 290354503a13SJonathan T. Looney } 290454503a13SJonathan T. Looney 290554503a13SJonathan T. Looney /* See uma.h */ 2906c4ae7908SLawrence Stewart int 2907c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone) 2908c4ae7908SLawrence Stewart { 2909c4ae7908SLawrence Stewart int64_t nitems; 2910c4ae7908SLawrence Stewart u_int i; 2911c4ae7908SLawrence Stewart 2912c4ae7908SLawrence Stewart ZONE_LOCK(zone); 2913c4ae7908SLawrence Stewart nitems = zone->uz_allocs - zone->uz_frees; 2914c4ae7908SLawrence Stewart CPU_FOREACH(i) { 2915c4ae7908SLawrence Stewart /* 2916c4ae7908SLawrence Stewart * See the comment in sysctl_vm_zone_stats() regarding the 2917c4ae7908SLawrence Stewart * safety of accessing the per-cpu caches. With the zone lock 2918c4ae7908SLawrence Stewart * held, it is safe, but can potentially result in stale data. 2919c4ae7908SLawrence Stewart */ 2920c4ae7908SLawrence Stewart nitems += zone->uz_cpu[i].uc_allocs - 2921c4ae7908SLawrence Stewart zone->uz_cpu[i].uc_frees; 2922c4ae7908SLawrence Stewart } 2923c4ae7908SLawrence Stewart ZONE_UNLOCK(zone); 2924c4ae7908SLawrence Stewart 2925c4ae7908SLawrence Stewart return (nitems < 0 ? 0 : nitems); 2926c4ae7908SLawrence Stewart } 2927c4ae7908SLawrence Stewart 2928c4ae7908SLawrence Stewart /* See uma.h */ 2929736ee590SJeff Roberson void 2930099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2931099a0e58SBosko Milekic { 2932e20a199fSJeff Roberson uma_keg_t keg; 2933e20a199fSJeff Roberson 2934e20a199fSJeff Roberson keg = zone_first_keg(zone); 29350095a784SJeff Roberson KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2936af526374SJeff Roberson KEG_LOCK(keg); 2937e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 2938099a0e58SBosko Milekic ("uma_zone_set_init on non-empty keg")); 2939e20a199fSJeff Roberson keg->uk_init = uminit; 2940af526374SJeff Roberson KEG_UNLOCK(keg); 2941099a0e58SBosko Milekic } 2942099a0e58SBosko Milekic 2943099a0e58SBosko Milekic /* See uma.h */ 2944099a0e58SBosko Milekic void 2945099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2946099a0e58SBosko Milekic { 2947e20a199fSJeff Roberson uma_keg_t keg; 2948e20a199fSJeff Roberson 2949e20a199fSJeff Roberson keg = zone_first_keg(zone); 29501d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); 2951af526374SJeff Roberson KEG_LOCK(keg); 2952e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 2953099a0e58SBosko Milekic ("uma_zone_set_fini on non-empty keg")); 2954e20a199fSJeff Roberson keg->uk_fini = fini; 2955af526374SJeff Roberson KEG_UNLOCK(keg); 2956099a0e58SBosko Milekic } 2957099a0e58SBosko Milekic 2958099a0e58SBosko Milekic /* See uma.h */ 2959099a0e58SBosko Milekic void 2960099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2961099a0e58SBosko Milekic { 2962af526374SJeff Roberson 2963099a0e58SBosko Milekic ZONE_LOCK(zone); 2964e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 2965099a0e58SBosko Milekic ("uma_zone_set_zinit on non-empty keg")); 2966099a0e58SBosko Milekic zone->uz_init = zinit; 2967099a0e58SBosko Milekic ZONE_UNLOCK(zone); 2968099a0e58SBosko Milekic } 2969099a0e58SBosko Milekic 2970099a0e58SBosko Milekic /* See uma.h */ 2971099a0e58SBosko Milekic void 2972099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2973099a0e58SBosko Milekic { 2974af526374SJeff Roberson 2975099a0e58SBosko Milekic ZONE_LOCK(zone); 2976e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 2977099a0e58SBosko Milekic ("uma_zone_set_zfini on non-empty keg")); 2978099a0e58SBosko Milekic zone->uz_fini = zfini; 2979099a0e58SBosko Milekic ZONE_UNLOCK(zone); 2980099a0e58SBosko Milekic } 2981099a0e58SBosko Milekic 2982099a0e58SBosko Milekic /* See uma.h */ 2983b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */ 2984099a0e58SBosko Milekic void 29858355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef) 29868355f576SJeff Roberson { 29870095a784SJeff Roberson uma_keg_t keg; 2988e20a199fSJeff Roberson 29890095a784SJeff Roberson keg = zone_first_keg(zone); 29901d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); 2991af526374SJeff Roberson KEG_LOCK(keg); 29920095a784SJeff Roberson keg->uk_freef = freef; 2993af526374SJeff Roberson KEG_UNLOCK(keg); 29948355f576SJeff Roberson } 29958355f576SJeff Roberson 29968355f576SJeff Roberson /* See uma.h */ 2997b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */ 29988355f576SJeff Roberson void 29998355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 30008355f576SJeff Roberson { 3001e20a199fSJeff Roberson uma_keg_t keg; 3002e20a199fSJeff Roberson 3003e20a199fSJeff Roberson keg = zone_first_keg(zone); 3004af526374SJeff Roberson KEG_LOCK(keg); 3005e20a199fSJeff Roberson keg->uk_allocf = allocf; 3006af526374SJeff Roberson KEG_UNLOCK(keg); 30078355f576SJeff Roberson } 30088355f576SJeff Roberson 30098355f576SJeff Roberson /* See uma.h */ 30106fd34d6fSJeff Roberson void 30116fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items) 30126fd34d6fSJeff Roberson { 30136fd34d6fSJeff Roberson uma_keg_t keg; 30146fd34d6fSJeff Roberson 30156fd34d6fSJeff Roberson keg = zone_first_keg(zone); 30166fd34d6fSJeff Roberson if (keg == NULL) 30176fd34d6fSJeff Roberson return; 30186fd34d6fSJeff Roberson KEG_LOCK(keg); 30196fd34d6fSJeff Roberson keg->uk_reserve = items; 30206fd34d6fSJeff Roberson KEG_UNLOCK(keg); 30216fd34d6fSJeff Roberson 30226fd34d6fSJeff Roberson return; 30236fd34d6fSJeff Roberson } 30246fd34d6fSJeff Roberson 30256fd34d6fSJeff Roberson /* See uma.h */ 30268355f576SJeff Roberson int 3027a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count) 30288355f576SJeff Roberson { 3029099a0e58SBosko Milekic uma_keg_t keg; 30308355f576SJeff Roberson vm_offset_t kva; 30319ba30bcbSZbigniew Bodek u_int pages; 30328355f576SJeff Roberson 3033e20a199fSJeff Roberson keg = zone_first_keg(zone); 30340095a784SJeff Roberson if (keg == NULL) 30350095a784SJeff Roberson return (0); 3036099a0e58SBosko Milekic pages = count / keg->uk_ipers; 30378355f576SJeff Roberson 3038099a0e58SBosko Milekic if (pages * keg->uk_ipers < count) 30398355f576SJeff Roberson pages++; 304057223e99SAndriy Gapon pages *= keg->uk_ppera; 3041a553d4b8SJeff Roberson 3042a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3043a4915c21SAttilio Rao if (keg->uk_ppera > 1) { 3044a4915c21SAttilio Rao #else 3045a4915c21SAttilio Rao if (1) { 3046a4915c21SAttilio Rao #endif 304757223e99SAndriy Gapon kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 3048d1f42ac2SAlan Cox if (kva == 0) 30498355f576SJeff Roberson return (0); 3050a4915c21SAttilio Rao } else 3051a4915c21SAttilio Rao kva = 0; 3052af526374SJeff Roberson KEG_LOCK(keg); 3053099a0e58SBosko Milekic keg->uk_kva = kva; 3054a4915c21SAttilio Rao keg->uk_offset = 0; 3055099a0e58SBosko Milekic keg->uk_maxpages = pages; 3056a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3057a4915c21SAttilio Rao keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3058a4915c21SAttilio Rao #else 3059a4915c21SAttilio Rao keg->uk_allocf = noobj_alloc; 3060a4915c21SAttilio Rao #endif 30616fd34d6fSJeff Roberson keg->uk_flags |= UMA_ZONE_NOFREE; 3062af526374SJeff Roberson KEG_UNLOCK(keg); 3063af526374SJeff Roberson 30648355f576SJeff Roberson return (1); 30658355f576SJeff Roberson } 30668355f576SJeff Roberson 30678355f576SJeff Roberson /* See uma.h */ 30688355f576SJeff Roberson void 30698355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items) 30708355f576SJeff Roberson { 30718355f576SJeff Roberson int slabs; 30728355f576SJeff Roberson uma_slab_t slab; 3073099a0e58SBosko Milekic uma_keg_t keg; 30748355f576SJeff Roberson 3075e20a199fSJeff Roberson keg = zone_first_keg(zone); 30760095a784SJeff Roberson if (keg == NULL) 30770095a784SJeff Roberson return; 3078af526374SJeff Roberson KEG_LOCK(keg); 3079099a0e58SBosko Milekic slabs = items / keg->uk_ipers; 3080099a0e58SBosko Milekic if (slabs * keg->uk_ipers < items) 30818355f576SJeff Roberson slabs++; 30828355f576SJeff Roberson while (slabs > 0) { 3083e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, M_WAITOK); 3084e20a199fSJeff Roberson if (slab == NULL) 3085e20a199fSJeff Roberson break; 3086e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 3087099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 30888355f576SJeff Roberson slabs--; 30898355f576SJeff Roberson } 3090af526374SJeff Roberson KEG_UNLOCK(keg); 30918355f576SJeff Roberson } 30928355f576SJeff Roberson 30938355f576SJeff Roberson /* See uma.h */ 309444ec2b63SKonstantin Belousov static void 309544ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger) 30968355f576SJeff Roberson { 309744ec2b63SKonstantin Belousov 3098*1431a748SGleb Smirnoff CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 309944ec2b63SKonstantin Belousov sx_assert(&uma_drain_lock, SA_XLOCKED); 310086bbae32SJeff Roberson bucket_enable(); 31018355f576SJeff Roberson zone_foreach(zone_drain); 310244ec2b63SKonstantin Belousov if (vm_page_count_min() || kmem_danger) { 3103a2de44abSAlexander Motin cache_drain_safe(NULL); 3104a2de44abSAlexander Motin zone_foreach(zone_drain); 3105a2de44abSAlexander Motin } 31068355f576SJeff Roberson /* 31078355f576SJeff Roberson * Some slabs may have been freed but this zone will be visited early 31088355f576SJeff Roberson * we visit again so that we can free pages that are empty once other 31098355f576SJeff Roberson * zones are drained. We have to do the same for buckets. 31108355f576SJeff Roberson */ 31119643769aSJeff Roberson zone_drain(slabzone); 3112cae33c14SJeff Roberson bucket_zone_drain(); 311344ec2b63SKonstantin Belousov } 311444ec2b63SKonstantin Belousov 311544ec2b63SKonstantin Belousov void 311644ec2b63SKonstantin Belousov uma_reclaim(void) 311744ec2b63SKonstantin Belousov { 311844ec2b63SKonstantin Belousov 311944ec2b63SKonstantin Belousov sx_xlock(&uma_drain_lock); 312044ec2b63SKonstantin Belousov uma_reclaim_locked(false); 312195c4bf75SKonstantin Belousov sx_xunlock(&uma_drain_lock); 31228355f576SJeff Roberson } 31238355f576SJeff Roberson 312444ec2b63SKonstantin Belousov static int uma_reclaim_needed; 312544ec2b63SKonstantin Belousov 312644ec2b63SKonstantin Belousov void 312744ec2b63SKonstantin Belousov uma_reclaim_wakeup(void) 312844ec2b63SKonstantin Belousov { 312944ec2b63SKonstantin Belousov 313044ec2b63SKonstantin Belousov uma_reclaim_needed = 1; 313144ec2b63SKonstantin Belousov wakeup(&uma_reclaim_needed); 313244ec2b63SKonstantin Belousov } 313344ec2b63SKonstantin Belousov 313444ec2b63SKonstantin Belousov void 313544ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused) 313644ec2b63SKonstantin Belousov { 313744ec2b63SKonstantin Belousov 313844ec2b63SKonstantin Belousov sx_xlock(&uma_drain_lock); 313944ec2b63SKonstantin Belousov for (;;) { 314044ec2b63SKonstantin Belousov sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM, 314144ec2b63SKonstantin Belousov "umarcl", 0); 314244ec2b63SKonstantin Belousov if (uma_reclaim_needed) { 314344ec2b63SKonstantin Belousov uma_reclaim_needed = 0; 31449b43bc27SAndriy Gapon sx_xunlock(&uma_drain_lock); 31459b43bc27SAndriy Gapon EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 31469b43bc27SAndriy Gapon sx_xlock(&uma_drain_lock); 314744ec2b63SKonstantin Belousov uma_reclaim_locked(true); 314844ec2b63SKonstantin Belousov } 314944ec2b63SKonstantin Belousov } 315044ec2b63SKonstantin Belousov } 315144ec2b63SKonstantin Belousov 3152663b416fSJohn Baldwin /* See uma.h */ 3153663b416fSJohn Baldwin int 3154663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone) 3155663b416fSJohn Baldwin { 3156663b416fSJohn Baldwin int full; 3157663b416fSJohn Baldwin 3158663b416fSJohn Baldwin ZONE_LOCK(zone); 3159e20a199fSJeff Roberson full = (zone->uz_flags & UMA_ZFLAG_FULL); 3160663b416fSJohn Baldwin ZONE_UNLOCK(zone); 3161663b416fSJohn Baldwin return (full); 3162663b416fSJohn Baldwin } 3163663b416fSJohn Baldwin 31646c125b8dSMohan Srinivasan int 31656c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone) 31666c125b8dSMohan Srinivasan { 3167e20a199fSJeff Roberson return (zone->uz_flags & UMA_ZFLAG_FULL); 31686c125b8dSMohan Srinivasan } 31696c125b8dSMohan Srinivasan 31708355f576SJeff Roberson void * 3171f2c2231eSRyan Stone uma_large_malloc(vm_size_t size, int wait) 31728355f576SJeff Roberson { 31738355f576SJeff Roberson void *mem; 31748355f576SJeff Roberson uma_slab_t slab; 317585dcf349SGleb Smirnoff uint8_t flags; 31768355f576SJeff Roberson 3177e20a199fSJeff Roberson slab = zone_alloc_item(slabzone, NULL, wait); 31788355f576SJeff Roberson if (slab == NULL) 31798355f576SJeff Roberson return (NULL); 31808355f576SJeff Roberson mem = page_alloc(NULL, size, &flags, wait); 31818355f576SJeff Roberson if (mem) { 318299571dc3SJeff Roberson vsetslab((vm_offset_t)mem, slab); 31838355f576SJeff Roberson slab->us_data = mem; 31848355f576SJeff Roberson slab->us_flags = flags | UMA_SLAB_MALLOC; 31858355f576SJeff Roberson slab->us_size = size; 31868355f576SJeff Roberson } else { 31870095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 31888355f576SJeff Roberson } 31898355f576SJeff Roberson 31908355f576SJeff Roberson return (mem); 31918355f576SJeff Roberson } 31928355f576SJeff Roberson 31938355f576SJeff Roberson void 31948355f576SJeff Roberson uma_large_free(uma_slab_t slab) 31958355f576SJeff Roberson { 3196c325e866SKonstantin Belousov 31978355f576SJeff Roberson page_free(slab->us_data, slab->us_size, slab->us_flags); 31980095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 31998355f576SJeff Roberson } 32008355f576SJeff Roberson 320148343a2fSGleb Smirnoff static void 320248343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone) 320348343a2fSGleb Smirnoff { 320496c85efbSNathan Whitehorn int i; 320548343a2fSGleb Smirnoff 320648343a2fSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_PCPU) { 320796c85efbSNathan Whitehorn CPU_FOREACH(i) 320848343a2fSGleb Smirnoff bzero(zpcpu_get_cpu(item, i), zone->uz_size); 320948343a2fSGleb Smirnoff } else 321048343a2fSGleb Smirnoff bzero(item, zone->uz_size); 321148343a2fSGleb Smirnoff } 321248343a2fSGleb Smirnoff 32138355f576SJeff Roberson void 32148355f576SJeff Roberson uma_print_stats(void) 32158355f576SJeff Roberson { 32168355f576SJeff Roberson zone_foreach(uma_print_zone); 32178355f576SJeff Roberson } 32188355f576SJeff Roberson 3219504d5de3SJeff Roberson static void 3220504d5de3SJeff Roberson slab_print(uma_slab_t slab) 3221504d5de3SJeff Roberson { 3222ef72505eSJeff Roberson printf("slab: keg %p, data %p, freecount %d\n", 3223ef72505eSJeff Roberson slab->us_keg, slab->us_data, slab->us_freecount); 3224504d5de3SJeff Roberson } 3225504d5de3SJeff Roberson 3226504d5de3SJeff Roberson static void 3227504d5de3SJeff Roberson cache_print(uma_cache_t cache) 3228504d5de3SJeff Roberson { 3229504d5de3SJeff Roberson printf("alloc: %p(%d), free: %p(%d)\n", 3230504d5de3SJeff Roberson cache->uc_allocbucket, 3231504d5de3SJeff Roberson cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3232504d5de3SJeff Roberson cache->uc_freebucket, 3233504d5de3SJeff Roberson cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3234504d5de3SJeff Roberson } 3235504d5de3SJeff Roberson 3236e20a199fSJeff Roberson static void 3237e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg) 32388355f576SJeff Roberson { 3239504d5de3SJeff Roberson uma_slab_t slab; 3240504d5de3SJeff Roberson 32410b80c1e4SEitan Adler printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3242e20a199fSJeff Roberson "out %d free %d limit %d\n", 3243e20a199fSJeff Roberson keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3244099a0e58SBosko Milekic keg->uk_ipers, keg->uk_ppera, 324557223e99SAndriy Gapon (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 324657223e99SAndriy Gapon keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3247504d5de3SJeff Roberson printf("Part slabs:\n"); 3248099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3249504d5de3SJeff Roberson slab_print(slab); 3250504d5de3SJeff Roberson printf("Free slabs:\n"); 3251099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3252504d5de3SJeff Roberson slab_print(slab); 3253504d5de3SJeff Roberson printf("Full slabs:\n"); 3254099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3255504d5de3SJeff Roberson slab_print(slab); 3256e20a199fSJeff Roberson } 3257e20a199fSJeff Roberson 3258e20a199fSJeff Roberson void 3259e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone) 3260e20a199fSJeff Roberson { 3261e20a199fSJeff Roberson uma_cache_t cache; 3262e20a199fSJeff Roberson uma_klink_t kl; 3263e20a199fSJeff Roberson int i; 3264e20a199fSJeff Roberson 32650b80c1e4SEitan Adler printf("zone: %s(%p) size %d flags %#x\n", 3266e20a199fSJeff Roberson zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3267e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3268e20a199fSJeff Roberson uma_print_keg(kl->kl_keg); 32693aa6d94eSJohn Baldwin CPU_FOREACH(i) { 3270504d5de3SJeff Roberson cache = &zone->uz_cpu[i]; 3271504d5de3SJeff Roberson printf("CPU %d Cache:\n", i); 3272504d5de3SJeff Roberson cache_print(cache); 3273504d5de3SJeff Roberson } 32748355f576SJeff Roberson } 32758355f576SJeff Roberson 3276a0d4b0aeSRobert Watson #ifdef DDB 32778355f576SJeff Roberson /* 32787a52a97eSRobert Watson * Generate statistics across both the zone and its per-cpu cache's. Return 32797a52a97eSRobert Watson * desired statistics if the pointer is non-NULL for that statistic. 32807a52a97eSRobert Watson * 32817a52a97eSRobert Watson * Note: does not update the zone statistics, as it can't safely clear the 32827a52a97eSRobert Watson * per-CPU cache statistic. 32837a52a97eSRobert Watson * 32847a52a97eSRobert Watson * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 32857a52a97eSRobert Watson * safe from off-CPU; we should modify the caches to track this information 32867a52a97eSRobert Watson * directly so that we don't have to. 32877a52a97eSRobert Watson */ 32887a52a97eSRobert Watson static void 328985dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 329085dcf349SGleb Smirnoff uint64_t *freesp, uint64_t *sleepsp) 32917a52a97eSRobert Watson { 32927a52a97eSRobert Watson uma_cache_t cache; 329385dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 32947a52a97eSRobert Watson int cachefree, cpu; 32957a52a97eSRobert Watson 3296bf965959SSean Bruno allocs = frees = sleeps = 0; 32977a52a97eSRobert Watson cachefree = 0; 32983aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 32997a52a97eSRobert Watson cache = &z->uz_cpu[cpu]; 33007a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 33017a52a97eSRobert Watson cachefree += cache->uc_allocbucket->ub_cnt; 33027a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 33037a52a97eSRobert Watson cachefree += cache->uc_freebucket->ub_cnt; 33047a52a97eSRobert Watson allocs += cache->uc_allocs; 33057a52a97eSRobert Watson frees += cache->uc_frees; 33067a52a97eSRobert Watson } 33077a52a97eSRobert Watson allocs += z->uz_allocs; 33087a52a97eSRobert Watson frees += z->uz_frees; 3309bf965959SSean Bruno sleeps += z->uz_sleeps; 33107a52a97eSRobert Watson if (cachefreep != NULL) 33117a52a97eSRobert Watson *cachefreep = cachefree; 33127a52a97eSRobert Watson if (allocsp != NULL) 33137a52a97eSRobert Watson *allocsp = allocs; 33147a52a97eSRobert Watson if (freesp != NULL) 33157a52a97eSRobert Watson *freesp = frees; 3316bf965959SSean Bruno if (sleepsp != NULL) 3317bf965959SSean Bruno *sleepsp = sleeps; 33187a52a97eSRobert Watson } 3319a0d4b0aeSRobert Watson #endif /* DDB */ 33207a52a97eSRobert Watson 33217a52a97eSRobert Watson static int 33227a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 33237a52a97eSRobert Watson { 33247a52a97eSRobert Watson uma_keg_t kz; 33257a52a97eSRobert Watson uma_zone_t z; 33267a52a97eSRobert Watson int count; 33277a52a97eSRobert Watson 33287a52a97eSRobert Watson count = 0; 3329111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 33307a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 33317a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 33327a52a97eSRobert Watson count++; 33337a52a97eSRobert Watson } 3334111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 33357a52a97eSRobert Watson return (sysctl_handle_int(oidp, &count, 0, req)); 33367a52a97eSRobert Watson } 33377a52a97eSRobert Watson 33387a52a97eSRobert Watson static int 33397a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 33407a52a97eSRobert Watson { 33417a52a97eSRobert Watson struct uma_stream_header ush; 33427a52a97eSRobert Watson struct uma_type_header uth; 33437a52a97eSRobert Watson struct uma_percpu_stat ups; 33447a52a97eSRobert Watson uma_bucket_t bucket; 33457a52a97eSRobert Watson struct sbuf sbuf; 33467a52a97eSRobert Watson uma_cache_t cache; 3347e20a199fSJeff Roberson uma_klink_t kl; 33487a52a97eSRobert Watson uma_keg_t kz; 33497a52a97eSRobert Watson uma_zone_t z; 3350e20a199fSJeff Roberson uma_keg_t k; 33514e657159SMatthew D Fleming int count, error, i; 33527a52a97eSRobert Watson 335300f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 335400f0e671SMatthew D Fleming if (error != 0) 335500f0e671SMatthew D Fleming return (error); 33564e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 33571eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 33584e657159SMatthew D Fleming 3359404a593eSMatthew D Fleming count = 0; 3360111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 33617a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 33627a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 33637a52a97eSRobert Watson count++; 33647a52a97eSRobert Watson } 33657a52a97eSRobert Watson 33667a52a97eSRobert Watson /* 33677a52a97eSRobert Watson * Insert stream header. 33687a52a97eSRobert Watson */ 33697a52a97eSRobert Watson bzero(&ush, sizeof(ush)); 33707a52a97eSRobert Watson ush.ush_version = UMA_STREAM_VERSION; 3371ab3a57c0SRobert Watson ush.ush_maxcpus = (mp_maxid + 1); 33727a52a97eSRobert Watson ush.ush_count = count; 33734e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 33747a52a97eSRobert Watson 33757a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 33767a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 33777a52a97eSRobert Watson bzero(&uth, sizeof(uth)); 33787a52a97eSRobert Watson ZONE_LOCK(z); 3379cbbb4a00SRobert Watson strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 33807a52a97eSRobert Watson uth.uth_align = kz->uk_align; 33817a52a97eSRobert Watson uth.uth_size = kz->uk_size; 33827a52a97eSRobert Watson uth.uth_rsize = kz->uk_rsize; 3383e20a199fSJeff Roberson LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3384e20a199fSJeff Roberson k = kl->kl_keg; 3385e20a199fSJeff Roberson uth.uth_maxpages += k->uk_maxpages; 3386e20a199fSJeff Roberson uth.uth_pages += k->uk_pages; 3387e20a199fSJeff Roberson uth.uth_keg_free += k->uk_free; 3388e20a199fSJeff Roberson uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3389e20a199fSJeff Roberson * k->uk_ipers; 3390e20a199fSJeff Roberson } 3391cbbb4a00SRobert Watson 3392cbbb4a00SRobert Watson /* 3393cbbb4a00SRobert Watson * A zone is secondary is it is not the first entry 3394cbbb4a00SRobert Watson * on the keg's zone list. 3395cbbb4a00SRobert Watson */ 3396e20a199fSJeff Roberson if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3397cbbb4a00SRobert Watson (LIST_FIRST(&kz->uk_zones) != z)) 3398cbbb4a00SRobert Watson uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3399cbbb4a00SRobert Watson 3400fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 34017a52a97eSRobert Watson uth.uth_zone_free += bucket->ub_cnt; 34027a52a97eSRobert Watson uth.uth_allocs = z->uz_allocs; 34037a52a97eSRobert Watson uth.uth_frees = z->uz_frees; 34042019094aSRobert Watson uth.uth_fails = z->uz_fails; 3405bf965959SSean Bruno uth.uth_sleeps = z->uz_sleeps; 34064e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 34077a52a97eSRobert Watson /* 34082450bbb8SRobert Watson * While it is not normally safe to access the cache 34092450bbb8SRobert Watson * bucket pointers while not on the CPU that owns the 34102450bbb8SRobert Watson * cache, we only allow the pointers to be exchanged 34112450bbb8SRobert Watson * without the zone lock held, not invalidated, so 34122450bbb8SRobert Watson * accept the possible race associated with bucket 34132450bbb8SRobert Watson * exchange during monitoring. 34147a52a97eSRobert Watson */ 3415ab3a57c0SRobert Watson for (i = 0; i < (mp_maxid + 1); i++) { 34167a52a97eSRobert Watson bzero(&ups, sizeof(ups)); 34177a52a97eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 34187a52a97eSRobert Watson goto skip; 3419082dc776SRobert Watson if (CPU_ABSENT(i)) 3420082dc776SRobert Watson goto skip; 34217a52a97eSRobert Watson cache = &z->uz_cpu[i]; 34227a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 34237a52a97eSRobert Watson ups.ups_cache_free += 34247a52a97eSRobert Watson cache->uc_allocbucket->ub_cnt; 34257a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 34267a52a97eSRobert Watson ups.ups_cache_free += 34277a52a97eSRobert Watson cache->uc_freebucket->ub_cnt; 34287a52a97eSRobert Watson ups.ups_allocs = cache->uc_allocs; 34297a52a97eSRobert Watson ups.ups_frees = cache->uc_frees; 34307a52a97eSRobert Watson skip: 34314e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 34327a52a97eSRobert Watson } 34332450bbb8SRobert Watson ZONE_UNLOCK(z); 34347a52a97eSRobert Watson } 34357a52a97eSRobert Watson } 3436111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 34374e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 34384e657159SMatthew D Fleming sbuf_delete(&sbuf); 34397a52a97eSRobert Watson return (error); 34407a52a97eSRobert Watson } 344148c5777eSRobert Watson 34420a5a3ccbSGleb Smirnoff int 34430a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 34440a5a3ccbSGleb Smirnoff { 34450a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 344616be9f54SGleb Smirnoff int error, max; 34470a5a3ccbSGleb Smirnoff 344816be9f54SGleb Smirnoff max = uma_zone_get_max(zone); 34490a5a3ccbSGleb Smirnoff error = sysctl_handle_int(oidp, &max, 0, req); 34500a5a3ccbSGleb Smirnoff if (error || !req->newptr) 34510a5a3ccbSGleb Smirnoff return (error); 34520a5a3ccbSGleb Smirnoff 34530a5a3ccbSGleb Smirnoff uma_zone_set_max(zone, max); 34540a5a3ccbSGleb Smirnoff 34550a5a3ccbSGleb Smirnoff return (0); 34560a5a3ccbSGleb Smirnoff } 34570a5a3ccbSGleb Smirnoff 34580a5a3ccbSGleb Smirnoff int 34590a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 34600a5a3ccbSGleb Smirnoff { 34610a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 34620a5a3ccbSGleb Smirnoff int cur; 34630a5a3ccbSGleb Smirnoff 34640a5a3ccbSGleb Smirnoff cur = uma_zone_get_cur(zone); 34650a5a3ccbSGleb Smirnoff return (sysctl_handle_int(oidp, &cur, 0, req)); 34660a5a3ccbSGleb Smirnoff } 34670a5a3ccbSGleb Smirnoff 34689542ea7bSGleb Smirnoff #ifdef INVARIANTS 34699542ea7bSGleb Smirnoff static uma_slab_t 34709542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item) 34719542ea7bSGleb Smirnoff { 34729542ea7bSGleb Smirnoff uma_slab_t slab; 34739542ea7bSGleb Smirnoff uma_keg_t keg; 34749542ea7bSGleb Smirnoff uint8_t *mem; 34759542ea7bSGleb Smirnoff 34769542ea7bSGleb Smirnoff mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 34779542ea7bSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_VTOSLAB) { 34789542ea7bSGleb Smirnoff slab = vtoslab((vm_offset_t)mem); 34799542ea7bSGleb Smirnoff } else { 34809542ea7bSGleb Smirnoff /* 34819542ea7bSGleb Smirnoff * It is safe to return the slab here even though the 34829542ea7bSGleb Smirnoff * zone is unlocked because the item's allocation state 34839542ea7bSGleb Smirnoff * essentially holds a reference. 34849542ea7bSGleb Smirnoff */ 34859542ea7bSGleb Smirnoff ZONE_LOCK(zone); 34869542ea7bSGleb Smirnoff keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; 34879542ea7bSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_HASH) 34889542ea7bSGleb Smirnoff slab = hash_sfind(&keg->uk_hash, mem); 34899542ea7bSGleb Smirnoff else 34909542ea7bSGleb Smirnoff slab = (uma_slab_t)(mem + keg->uk_pgoff); 34919542ea7bSGleb Smirnoff ZONE_UNLOCK(zone); 34929542ea7bSGleb Smirnoff } 34939542ea7bSGleb Smirnoff 34949542ea7bSGleb Smirnoff return (slab); 34959542ea7bSGleb Smirnoff } 34969542ea7bSGleb Smirnoff 34979542ea7bSGleb Smirnoff /* 34989542ea7bSGleb Smirnoff * Set up the slab's freei data such that uma_dbg_free can function. 34999542ea7bSGleb Smirnoff * 35009542ea7bSGleb Smirnoff */ 35019542ea7bSGleb Smirnoff static void 35029542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 35039542ea7bSGleb Smirnoff { 35049542ea7bSGleb Smirnoff uma_keg_t keg; 35059542ea7bSGleb Smirnoff int freei; 35069542ea7bSGleb Smirnoff 35079542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 35089542ea7bSGleb Smirnoff return; 35099542ea7bSGleb Smirnoff if (slab == NULL) { 35109542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 35119542ea7bSGleb Smirnoff if (slab == NULL) 35129542ea7bSGleb Smirnoff panic("uma: item %p did not belong to zone %s\n", 35139542ea7bSGleb Smirnoff item, zone->uz_name); 35149542ea7bSGleb Smirnoff } 35159542ea7bSGleb Smirnoff keg = slab->us_keg; 35169542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 35179542ea7bSGleb Smirnoff 35189542ea7bSGleb Smirnoff if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 35199542ea7bSGleb Smirnoff panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 35209542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35219542ea7bSGleb Smirnoff BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 35229542ea7bSGleb Smirnoff 35239542ea7bSGleb Smirnoff return; 35249542ea7bSGleb Smirnoff } 35259542ea7bSGleb Smirnoff 35269542ea7bSGleb Smirnoff /* 35279542ea7bSGleb Smirnoff * Verifies freed addresses. Checks for alignment, valid slab membership 35289542ea7bSGleb Smirnoff * and duplicate frees. 35299542ea7bSGleb Smirnoff * 35309542ea7bSGleb Smirnoff */ 35319542ea7bSGleb Smirnoff static void 35329542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 35339542ea7bSGleb Smirnoff { 35349542ea7bSGleb Smirnoff uma_keg_t keg; 35359542ea7bSGleb Smirnoff int freei; 35369542ea7bSGleb Smirnoff 35379542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 35389542ea7bSGleb Smirnoff return; 35399542ea7bSGleb Smirnoff if (slab == NULL) { 35409542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 35419542ea7bSGleb Smirnoff if (slab == NULL) 35429542ea7bSGleb Smirnoff panic("uma: Freed item %p did not belong to zone %s\n", 35439542ea7bSGleb Smirnoff item, zone->uz_name); 35449542ea7bSGleb Smirnoff } 35459542ea7bSGleb Smirnoff keg = slab->us_keg; 35469542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 35479542ea7bSGleb Smirnoff 35489542ea7bSGleb Smirnoff if (freei >= keg->uk_ipers) 35499542ea7bSGleb Smirnoff panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 35509542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35519542ea7bSGleb Smirnoff 35529542ea7bSGleb Smirnoff if (((freei * keg->uk_rsize) + slab->us_data) != item) 35539542ea7bSGleb Smirnoff panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 35549542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35559542ea7bSGleb Smirnoff 35569542ea7bSGleb Smirnoff if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 35579542ea7bSGleb Smirnoff panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 35589542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35599542ea7bSGleb Smirnoff 35609542ea7bSGleb Smirnoff BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 35619542ea7bSGleb Smirnoff } 35629542ea7bSGleb Smirnoff #endif /* INVARIANTS */ 35639542ea7bSGleb Smirnoff 356448c5777eSRobert Watson #ifdef DDB 356548c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma) 356648c5777eSRobert Watson { 356785dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 356848c5777eSRobert Watson uma_bucket_t bucket; 356948c5777eSRobert Watson uma_keg_t kz; 357048c5777eSRobert Watson uma_zone_t z; 357148c5777eSRobert Watson int cachefree; 357248c5777eSRobert Watson 357303175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 357403175483SAlexander Motin "Free", "Requests", "Sleeps", "Bucket"); 357548c5777eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 357648c5777eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 357748c5777eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 357848c5777eSRobert Watson allocs = z->uz_allocs; 357948c5777eSRobert Watson frees = z->uz_frees; 3580bf965959SSean Bruno sleeps = z->uz_sleeps; 358148c5777eSRobert Watson cachefree = 0; 358248c5777eSRobert Watson } else 358348c5777eSRobert Watson uma_zone_sumstat(z, &cachefree, &allocs, 3584bf965959SSean Bruno &frees, &sleeps); 3585e20a199fSJeff Roberson if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 358648c5777eSRobert Watson (LIST_FIRST(&kz->uk_zones) != z))) 358748c5777eSRobert Watson cachefree += kz->uk_free; 3588fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 358948c5777eSRobert Watson cachefree += bucket->ub_cnt; 359003175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 359103175483SAlexander Motin z->uz_name, (uintmax_t)kz->uk_size, 3592ae4e9636SRobert Watson (intmax_t)(allocs - frees), cachefree, 359303175483SAlexander Motin (uintmax_t)allocs, sleeps, z->uz_count); 3594687c94aaSJohn Baldwin if (db_pager_quit) 3595687c94aaSJohn Baldwin return; 359648c5777eSRobert Watson } 359748c5777eSRobert Watson } 359848c5777eSRobert Watson } 359903175483SAlexander Motin 360003175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache) 360103175483SAlexander Motin { 360203175483SAlexander Motin uint64_t allocs, frees; 360303175483SAlexander Motin uma_bucket_t bucket; 360403175483SAlexander Motin uma_zone_t z; 360503175483SAlexander Motin int cachefree; 360603175483SAlexander Motin 360703175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 360803175483SAlexander Motin "Requests", "Bucket"); 360903175483SAlexander Motin LIST_FOREACH(z, &uma_cachezones, uz_link) { 361003175483SAlexander Motin uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 361103175483SAlexander Motin LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 361203175483SAlexander Motin cachefree += bucket->ub_cnt; 361303175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 361403175483SAlexander Motin z->uz_name, (uintmax_t)z->uz_size, 361503175483SAlexander Motin (intmax_t)(allocs - frees), cachefree, 361603175483SAlexander Motin (uintmax_t)allocs, z->uz_count); 361703175483SAlexander Motin if (db_pager_quit) 361803175483SAlexander Motin return; 361903175483SAlexander Motin } 362003175483SAlexander Motin } 36219542ea7bSGleb Smirnoff #endif /* DDB */ 3622