160727d8bSWarner Losh /*- 2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3fe267a55SPedro F. Giffuni * 4ef72505eSJeff Roberson * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> 508ecce74SRobert Watson * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 6ae4e9636SRobert Watson * Copyright (c) 2004-2006 Robert N. M. Watson 708ecce74SRobert Watson * All rights reserved. 88355f576SJeff Roberson * 98355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 108355f576SJeff Roberson * modification, are permitted provided that the following conditions 118355f576SJeff Roberson * are met: 128355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 138355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 148355f576SJeff Roberson * disclaimer. 158355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 168355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 178355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 188355f576SJeff Roberson * 198355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 208355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 218355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 228355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 238355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 248355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 258355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 268355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 278355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 288355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 298355f576SJeff Roberson */ 308355f576SJeff Roberson 318355f576SJeff Roberson /* 328355f576SJeff Roberson * uma_core.c Implementation of the Universal Memory allocator 338355f576SJeff Roberson * 348355f576SJeff Roberson * This allocator is intended to replace the multitude of similar object caches 358355f576SJeff Roberson * in the standard FreeBSD kernel. The intent is to be flexible as well as 36763df3ecSPedro F. Giffuni * efficient. A primary design goal is to return unused memory to the rest of 378355f576SJeff Roberson * the system. This will make the system as a whole more flexible due to the 388355f576SJeff Roberson * ability to move memory to subsystems which most need it instead of leaving 398355f576SJeff Roberson * pools of reserved memory unused. 408355f576SJeff Roberson * 418355f576SJeff Roberson * The basic ideas stem from similar slab/zone based allocators whose algorithms 428355f576SJeff Roberson * are well known. 438355f576SJeff Roberson * 448355f576SJeff Roberson */ 458355f576SJeff Roberson 468355f576SJeff Roberson /* 478355f576SJeff Roberson * TODO: 488355f576SJeff Roberson * - Improve memory usage for large allocations 498355f576SJeff Roberson * - Investigate cache size adjustments 508355f576SJeff Roberson */ 518355f576SJeff Roberson 52874651b1SDavid E. O'Brien #include <sys/cdefs.h> 53874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 54874651b1SDavid E. O'Brien 5548c5777eSRobert Watson #include "opt_ddb.h" 568355f576SJeff Roberson #include "opt_param.h" 578d689e04SGleb Smirnoff #include "opt_vm.h" 5848c5777eSRobert Watson 598355f576SJeff Roberson #include <sys/param.h> 608355f576SJeff Roberson #include <sys/systm.h> 61ef72505eSJeff Roberson #include <sys/bitset.h> 629b43bc27SAndriy Gapon #include <sys/eventhandler.h> 638355f576SJeff Roberson #include <sys/kernel.h> 648355f576SJeff Roberson #include <sys/types.h> 65*ad5b0f5bSJeff Roberson #include <sys/limits.h> 668355f576SJeff Roberson #include <sys/queue.h> 678355f576SJeff Roberson #include <sys/malloc.h> 683659f747SRobert Watson #include <sys/ktr.h> 698355f576SJeff Roberson #include <sys/lock.h> 708355f576SJeff Roberson #include <sys/sysctl.h> 718355f576SJeff Roberson #include <sys/mutex.h> 724c1cc01cSJohn Baldwin #include <sys/proc.h> 7310cb2424SMark Murray #include <sys/random.h> 7489f6b863SAttilio Rao #include <sys/rwlock.h> 757a52a97eSRobert Watson #include <sys/sbuf.h> 76a2de44abSAlexander Motin #include <sys/sched.h> 778355f576SJeff Roberson #include <sys/smp.h> 78e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h> 7986bbae32SJeff Roberson #include <sys/vmmeter.h> 8086bbae32SJeff Roberson 818355f576SJeff Roberson #include <vm/vm.h> 828355f576SJeff Roberson #include <vm/vm_object.h> 838355f576SJeff Roberson #include <vm/vm_page.h> 84a4915c21SAttilio Rao #include <vm/vm_pageout.h> 858355f576SJeff Roberson #include <vm/vm_param.h> 868355f576SJeff Roberson #include <vm/vm_map.h> 878355f576SJeff Roberson #include <vm/vm_kern.h> 888355f576SJeff Roberson #include <vm/vm_extern.h> 898355f576SJeff Roberson #include <vm/uma.h> 908355f576SJeff Roberson #include <vm/uma_int.h> 91639c9550SJeff Roberson #include <vm/uma_dbg.h> 928355f576SJeff Roberson 9348c5777eSRobert Watson #include <ddb/ddb.h> 9448c5777eSRobert Watson 958d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 968d689e04SGleb Smirnoff #include <vm/memguard.h> 978d689e04SGleb Smirnoff #endif 988d689e04SGleb Smirnoff 998355f576SJeff Roberson /* 100099a0e58SBosko Milekic * This is the zone and keg from which all zones are spawned. The idea is that 101099a0e58SBosko Milekic * even the zone & keg heads are allocated from the allocator, so we use the 102099a0e58SBosko Milekic * bss section to bootstrap us. 1038355f576SJeff Roberson */ 104099a0e58SBosko Milekic static struct uma_keg masterkeg; 105099a0e58SBosko Milekic static struct uma_zone masterzone_k; 106099a0e58SBosko Milekic static struct uma_zone masterzone_z; 107099a0e58SBosko Milekic static uma_zone_t kegs = &masterzone_k; 108099a0e58SBosko Milekic static uma_zone_t zones = &masterzone_z; 1098355f576SJeff Roberson 1108355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */ 1118355f576SJeff Roberson static uma_zone_t slabzone; 1128355f576SJeff Roberson 1138355f576SJeff Roberson /* 1148355f576SJeff Roberson * The initial hash tables come out of this zone so they can be allocated 1158355f576SJeff Roberson * prior to malloc coming up. 1168355f576SJeff Roberson */ 1178355f576SJeff Roberson static uma_zone_t hashzone; 1188355f576SJeff Roberson 1191e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */ 120e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1; 1211e319f6dSRobert Watson 122961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 123961647dfSJeff Roberson 1248355f576SJeff Roberson /* 12586bbae32SJeff Roberson * Are we allowed to allocate buckets? 12686bbae32SJeff Roberson */ 12786bbae32SJeff Roberson static int bucketdisable = 1; 12886bbae32SJeff Roberson 129099a0e58SBosko Milekic /* Linked list of all kegs in the system */ 13013e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); 1318355f576SJeff Roberson 13203175483SAlexander Motin /* Linked list of all cache-only zones in the system */ 13303175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones = 13403175483SAlexander Motin LIST_HEAD_INITIALIZER(uma_cachezones); 13503175483SAlexander Motin 136111fbcd5SBryan Venteicher /* This RW lock protects the keg list */ 137fe933c1dSMateusz Guzik static struct rwlock_padalign __exclusive_cache_line uma_rwlock; 1388355f576SJeff Roberson 139ac0a6fd0SGleb Smirnoff /* 140ac0a6fd0SGleb Smirnoff * Pointer and counter to pool of pages, that is preallocated at 141ac0a6fd0SGleb Smirnoff * startup to bootstrap UMA. Early zones continue to use the pool 142ac0a6fd0SGleb Smirnoff * until it is depleted, so allocations may happen after boot, thus 143ac0a6fd0SGleb Smirnoff * we need a mutex to protect it. 144ac0a6fd0SGleb Smirnoff */ 145ac0a6fd0SGleb Smirnoff static char *bootmem; 146ac0a6fd0SGleb Smirnoff static int boot_pages; 147ac0a6fd0SGleb Smirnoff static struct mtx uma_boot_pages_mtx; 1488355f576SJeff Roberson 14995c4bf75SKonstantin Belousov static struct sx uma_drain_lock; 15095c4bf75SKonstantin Belousov 1512e47807cSJeff Roberson /* kmem soft limit. */ 152*ad5b0f5bSJeff Roberson static unsigned long uma_kmem_limit = LONG_MAX; 1532e47807cSJeff Roberson static volatile unsigned long uma_kmem_total; 1542e47807cSJeff Roberson 1558355f576SJeff Roberson /* Is the VM done starting up? */ 1568355f576SJeff Roberson static int booted = 0; 157342f1793SAlan Cox #define UMA_STARTUP 1 158342f1793SAlan Cox #define UMA_STARTUP2 2 1598355f576SJeff Roberson 160ef72505eSJeff Roberson /* 1619643769aSJeff Roberson * This is the handle used to schedule events that need to happen 1629643769aSJeff Roberson * outside of the allocation fast path. 1639643769aSJeff Roberson */ 1648355f576SJeff Roberson static struct callout uma_callout; 1659643769aSJeff Roberson #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ 1668355f576SJeff Roberson 1678355f576SJeff Roberson /* 1688355f576SJeff Roberson * This structure is passed as the zone ctor arg so that I don't have to create 1698355f576SJeff Roberson * a special allocation function just for zones. 1708355f576SJeff Roberson */ 1718355f576SJeff Roberson struct uma_zctor_args { 172bb196eb4SMatthew D Fleming const char *name; 173c3bdc05fSAndrew R. Reiter size_t size; 1748355f576SJeff Roberson uma_ctor ctor; 1758355f576SJeff Roberson uma_dtor dtor; 1768355f576SJeff Roberson uma_init uminit; 1778355f576SJeff Roberson uma_fini fini; 1780095a784SJeff Roberson uma_import import; 1790095a784SJeff Roberson uma_release release; 1800095a784SJeff Roberson void *arg; 181099a0e58SBosko Milekic uma_keg_t keg; 182099a0e58SBosko Milekic int align; 18385dcf349SGleb Smirnoff uint32_t flags; 184099a0e58SBosko Milekic }; 185099a0e58SBosko Milekic 186099a0e58SBosko Milekic struct uma_kctor_args { 187099a0e58SBosko Milekic uma_zone_t zone; 188099a0e58SBosko Milekic size_t size; 189099a0e58SBosko Milekic uma_init uminit; 190099a0e58SBosko Milekic uma_fini fini; 1918355f576SJeff Roberson int align; 19285dcf349SGleb Smirnoff uint32_t flags; 1938355f576SJeff Roberson }; 1948355f576SJeff Roberson 195cae33c14SJeff Roberson struct uma_bucket_zone { 196cae33c14SJeff Roberson uma_zone_t ubz_zone; 197cae33c14SJeff Roberson char *ubz_name; 198fc03d22bSJeff Roberson int ubz_entries; /* Number of items it can hold. */ 199fc03d22bSJeff Roberson int ubz_maxsize; /* Maximum allocation size per-item. */ 200cae33c14SJeff Roberson }; 201cae33c14SJeff Roberson 202f9d27e75SRobert Watson /* 203fc03d22bSJeff Roberson * Compute the actual number of bucket entries to pack them in power 204fc03d22bSJeff Roberson * of two sizes for more efficient space utilization. 205f9d27e75SRobert Watson */ 206fc03d22bSJeff Roberson #define BUCKET_SIZE(n) \ 207fc03d22bSJeff Roberson (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) 208fc03d22bSJeff Roberson 2091aa6c758SAlexander Motin #define BUCKET_MAX BUCKET_SIZE(256) 210fc03d22bSJeff Roberson 211fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = { 2126fd34d6fSJeff Roberson { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, 213f3932e90SAlexander Motin { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, 2146fd34d6fSJeff Roberson { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, 215f3932e90SAlexander Motin { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, 2166fd34d6fSJeff Roberson { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, 217fc03d22bSJeff Roberson { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, 218fc03d22bSJeff Roberson { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, 219fc03d22bSJeff Roberson { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, 2201aa6c758SAlexander Motin { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, 221fc03d22bSJeff Roberson { NULL, NULL, 0} 222fc03d22bSJeff Roberson }; 223cae33c14SJeff Roberson 2242019094aSRobert Watson /* 2252019094aSRobert Watson * Flags and enumerations to be passed to internal functions. 2262019094aSRobert Watson */ 227ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; 228b23f72e9SBrian Feldman 2298355f576SJeff Roberson /* Prototypes.. */ 2308355f576SJeff Roberson 231f2c2231eSRyan Stone static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 232f2c2231eSRyan Stone static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 233f2c2231eSRyan Stone static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int); 234f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t); 235e20a199fSJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); 2369643769aSJeff Roberson static void cache_drain(uma_zone_t); 2378355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t); 238aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone); 239b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int); 240099a0e58SBosko Milekic static void keg_dtor(void *, int, void *); 241b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int); 2429c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *); 243b23f72e9SBrian Feldman static int zero_init(void *, int, int); 244e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg); 245e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg); 2468355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t)); 2478355f576SJeff Roberson static void zone_timeout(uma_zone_t zone); 2480aef6126SJeff Roberson static int hash_alloc(struct uma_hash *); 2490aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *); 2500aef6126SJeff Roberson static void hash_free(struct uma_hash *hash); 2518355f576SJeff Roberson static void uma_timeout(void *); 2528355f576SJeff Roberson static void uma_startup3(void); 253e20a199fSJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int); 2540095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); 25586bbae32SJeff Roberson static void bucket_enable(void); 256cae33c14SJeff Roberson static void bucket_init(void); 2576fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); 2586fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); 259cae33c14SJeff Roberson static void bucket_zone_drain(void); 2606fd34d6fSJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags); 261e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 262e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 2630095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); 2640095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); 265e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, 26685dcf349SGleb Smirnoff uma_fini fini, int align, uint32_t flags); 2670095a784SJeff Roberson static int zone_import(uma_zone_t zone, void **bucket, int max, int flags); 2680095a784SJeff Roberson static void zone_release(uma_zone_t zone, void **bucket, int cnt); 26948343a2fSGleb Smirnoff static void uma_zero_item(void *item, uma_zone_t zone); 270bbee39c6SJeff Roberson 2718355f576SJeff Roberson void uma_print_zone(uma_zone_t); 2728355f576SJeff Roberson void uma_print_stats(void); 2737a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 2747a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 2758355f576SJeff Roberson 2769542ea7bSGleb Smirnoff #ifdef INVARIANTS 2779542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); 2789542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); 2799542ea7bSGleb Smirnoff #endif 2809542ea7bSGleb Smirnoff 2818355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 2828355f576SJeff Roberson 2837a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, 2847a52a97eSRobert Watson 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); 2857a52a97eSRobert Watson 2867a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 2877a52a97eSRobert Watson 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); 2887a52a97eSRobert Watson 2892f891cd5SPawel Jakub Dawidek static int zone_warnings = 1; 290af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, 2912f891cd5SPawel Jakub Dawidek "Warn when UMA zones becomes full"); 2922f891cd5SPawel Jakub Dawidek 2932e47807cSJeff Roberson /* Adjust bytes under management by UMA. */ 2942e47807cSJeff Roberson static inline void 2952e47807cSJeff Roberson uma_total_dec(unsigned long size) 2962e47807cSJeff Roberson { 2972e47807cSJeff Roberson 2982e47807cSJeff Roberson atomic_subtract_long(&uma_kmem_total, size); 2992e47807cSJeff Roberson } 3002e47807cSJeff Roberson 3012e47807cSJeff Roberson static inline void 3022e47807cSJeff Roberson uma_total_inc(unsigned long size) 3032e47807cSJeff Roberson { 3042e47807cSJeff Roberson 3052e47807cSJeff Roberson if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) 3062e47807cSJeff Roberson uma_reclaim_wakeup(); 3072e47807cSJeff Roberson } 3082e47807cSJeff Roberson 30986bbae32SJeff Roberson /* 31086bbae32SJeff Roberson * This routine checks to see whether or not it's safe to enable buckets. 31186bbae32SJeff Roberson */ 31286bbae32SJeff Roberson static void 31386bbae32SJeff Roberson bucket_enable(void) 31486bbae32SJeff Roberson { 315251386b4SMaksim Yevmenkin bucketdisable = vm_page_count_min(); 31686bbae32SJeff Roberson } 31786bbae32SJeff Roberson 318dc2c7965SRobert Watson /* 319dc2c7965SRobert Watson * Initialize bucket_zones, the array of zones of buckets of various sizes. 320dc2c7965SRobert Watson * 321dc2c7965SRobert Watson * For each zone, calculate the memory required for each bucket, consisting 322fc03d22bSJeff Roberson * of the header and an array of pointers. 323dc2c7965SRobert Watson */ 324cae33c14SJeff Roberson static void 325cae33c14SJeff Roberson bucket_init(void) 326cae33c14SJeff Roberson { 327cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 328cae33c14SJeff Roberson int size; 329cae33c14SJeff Roberson 330d74e6a1dSAlan Cox for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { 331cae33c14SJeff Roberson size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 332cae33c14SJeff Roberson size += sizeof(void *) * ubz->ubz_entries; 333cae33c14SJeff Roberson ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 334e20a199fSJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 3356fd34d6fSJeff Roberson UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); 336cae33c14SJeff Roberson } 337cae33c14SJeff Roberson } 338cae33c14SJeff Roberson 339dc2c7965SRobert Watson /* 340dc2c7965SRobert Watson * Given a desired number of entries for a bucket, return the zone from which 341dc2c7965SRobert Watson * to allocate the bucket. 342dc2c7965SRobert Watson */ 343dc2c7965SRobert Watson static struct uma_bucket_zone * 344dc2c7965SRobert Watson bucket_zone_lookup(int entries) 345dc2c7965SRobert Watson { 346fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 347dc2c7965SRobert Watson 348fc03d22bSJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 349fc03d22bSJeff Roberson if (ubz->ubz_entries >= entries) 350fc03d22bSJeff Roberson return (ubz); 351fc03d22bSJeff Roberson ubz--; 352fc03d22bSJeff Roberson return (ubz); 353fc03d22bSJeff Roberson } 354fc03d22bSJeff Roberson 355fc03d22bSJeff Roberson static int 356fc03d22bSJeff Roberson bucket_select(int size) 357fc03d22bSJeff Roberson { 358fc03d22bSJeff Roberson struct uma_bucket_zone *ubz; 359fc03d22bSJeff Roberson 360fc03d22bSJeff Roberson ubz = &bucket_zones[0]; 361fc03d22bSJeff Roberson if (size > ubz->ubz_maxsize) 362fc03d22bSJeff Roberson return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); 363fc03d22bSJeff Roberson 364fc03d22bSJeff Roberson for (; ubz->ubz_entries != 0; ubz++) 365fc03d22bSJeff Roberson if (ubz->ubz_maxsize < size) 366fc03d22bSJeff Roberson break; 367fc03d22bSJeff Roberson ubz--; 368fc03d22bSJeff Roberson return (ubz->ubz_entries); 369dc2c7965SRobert Watson } 370dc2c7965SRobert Watson 371cae33c14SJeff Roberson static uma_bucket_t 3726fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags) 373cae33c14SJeff Roberson { 374cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 375cae33c14SJeff Roberson uma_bucket_t bucket; 376cae33c14SJeff Roberson 377cae33c14SJeff Roberson /* 378cae33c14SJeff Roberson * This is to stop us from allocating per cpu buckets while we're 3793803b26bSDag-Erling Smørgrav * running out of vm.boot_pages. Otherwise, we would exhaust the 380cae33c14SJeff Roberson * boot pages. This also prevents us from allocating buckets in 381cae33c14SJeff Roberson * low memory situations. 382cae33c14SJeff Roberson */ 383cae33c14SJeff Roberson if (bucketdisable) 384cae33c14SJeff Roberson return (NULL); 3856fd34d6fSJeff Roberson /* 3866fd34d6fSJeff Roberson * To limit bucket recursion we store the original zone flags 3876fd34d6fSJeff Roberson * in a cookie passed via zalloc_arg/zfree_arg. This allows the 3886fd34d6fSJeff Roberson * NOVM flag to persist even through deep recursions. We also 3896fd34d6fSJeff Roberson * store ZFLAG_BUCKET once we have recursed attempting to allocate 3906fd34d6fSJeff Roberson * a bucket for a bucket zone so we do not allow infinite bucket 3916fd34d6fSJeff Roberson * recursion. This cookie will even persist to frees of unused 3926fd34d6fSJeff Roberson * buckets via the allocation path or bucket allocations in the 3936fd34d6fSJeff Roberson * free path. 3946fd34d6fSJeff Roberson */ 3956fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 3966fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 397e8a720feSAlexander Motin else { 398e8a720feSAlexander Motin if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) 399e8a720feSAlexander Motin return (NULL); 4006fd34d6fSJeff Roberson udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); 401e8a720feSAlexander Motin } 4026fd34d6fSJeff Roberson if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) 403af526374SJeff Roberson flags |= M_NOVM; 404af526374SJeff Roberson ubz = bucket_zone_lookup(zone->uz_count); 40520d3ab87SAlexander Motin if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) 40620d3ab87SAlexander Motin ubz++; 4076fd34d6fSJeff Roberson bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); 408cae33c14SJeff Roberson if (bucket) { 409cae33c14SJeff Roberson #ifdef INVARIANTS 410cae33c14SJeff Roberson bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 411cae33c14SJeff Roberson #endif 412cae33c14SJeff Roberson bucket->ub_cnt = 0; 413cae33c14SJeff Roberson bucket->ub_entries = ubz->ubz_entries; 414cae33c14SJeff Roberson } 415cae33c14SJeff Roberson 416cae33c14SJeff Roberson return (bucket); 417cae33c14SJeff Roberson } 418cae33c14SJeff Roberson 419cae33c14SJeff Roberson static void 4206fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) 421cae33c14SJeff Roberson { 422cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 423cae33c14SJeff Roberson 424fc03d22bSJeff Roberson KASSERT(bucket->ub_cnt == 0, 425fc03d22bSJeff Roberson ("bucket_free: Freeing a non free bucket.")); 4266fd34d6fSJeff Roberson if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) 4276fd34d6fSJeff Roberson udata = (void *)(uintptr_t)zone->uz_flags; 428dc2c7965SRobert Watson ubz = bucket_zone_lookup(bucket->ub_entries); 4296fd34d6fSJeff Roberson uma_zfree_arg(ubz->ubz_zone, bucket, udata); 430cae33c14SJeff Roberson } 431cae33c14SJeff Roberson 432cae33c14SJeff Roberson static void 433cae33c14SJeff Roberson bucket_zone_drain(void) 434cae33c14SJeff Roberson { 435cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 436cae33c14SJeff Roberson 437cae33c14SJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 438cae33c14SJeff Roberson zone_drain(ubz->ubz_zone); 439cae33c14SJeff Roberson } 440cae33c14SJeff Roberson 4412f891cd5SPawel Jakub Dawidek static void 4422f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone) 4432f891cd5SPawel Jakub Dawidek { 4442f891cd5SPawel Jakub Dawidek static const struct timeval warninterval = { 300, 0 }; 4452f891cd5SPawel Jakub Dawidek 4462f891cd5SPawel Jakub Dawidek if (!zone_warnings || zone->uz_warning == NULL) 4472f891cd5SPawel Jakub Dawidek return; 4482f891cd5SPawel Jakub Dawidek 4492f891cd5SPawel Jakub Dawidek if (ratecheck(&zone->uz_ratecheck, &warninterval)) 4502f891cd5SPawel Jakub Dawidek printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); 4512f891cd5SPawel Jakub Dawidek } 4522f891cd5SPawel Jakub Dawidek 45354503a13SJonathan T. Looney static inline void 45454503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone) 45554503a13SJonathan T. Looney { 456e60b2fcbSGleb Smirnoff 457e60b2fcbSGleb Smirnoff if (zone->uz_maxaction.ta_func != NULL) 458e60b2fcbSGleb Smirnoff taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); 45954503a13SJonathan T. Looney } 46054503a13SJonathan T. Looney 461e20a199fSJeff Roberson static void 462e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 463e20a199fSJeff Roberson { 464e20a199fSJeff Roberson uma_klink_t klink; 465e20a199fSJeff Roberson 466e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 467e20a199fSJeff Roberson kegfn(klink->kl_keg); 468e20a199fSJeff Roberson } 4698355f576SJeff Roberson 4708355f576SJeff Roberson /* 4718355f576SJeff Roberson * Routine called by timeout which is used to fire off some time interval 4729643769aSJeff Roberson * based calculations. (stats, hash size, etc.) 4738355f576SJeff Roberson * 4748355f576SJeff Roberson * Arguments: 4758355f576SJeff Roberson * arg Unused 4768355f576SJeff Roberson * 4778355f576SJeff Roberson * Returns: 4788355f576SJeff Roberson * Nothing 4798355f576SJeff Roberson */ 4808355f576SJeff Roberson static void 4818355f576SJeff Roberson uma_timeout(void *unused) 4828355f576SJeff Roberson { 48386bbae32SJeff Roberson bucket_enable(); 4848355f576SJeff Roberson zone_foreach(zone_timeout); 4858355f576SJeff Roberson 4868355f576SJeff Roberson /* Reschedule this event */ 4879643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 4888355f576SJeff Roberson } 4898355f576SJeff Roberson 4908355f576SJeff Roberson /* 4919643769aSJeff Roberson * Routine to perform timeout driven calculations. This expands the 4929643769aSJeff Roberson * hashes and does per cpu statistics aggregation. 4938355f576SJeff Roberson * 494e20a199fSJeff Roberson * Returns nothing. 4958355f576SJeff Roberson */ 4968355f576SJeff Roberson static void 497e20a199fSJeff Roberson keg_timeout(uma_keg_t keg) 4988355f576SJeff Roberson { 4998355f576SJeff Roberson 500e20a199fSJeff Roberson KEG_LOCK(keg); 5018355f576SJeff Roberson /* 502e20a199fSJeff Roberson * Expand the keg hash table. 5038355f576SJeff Roberson * 5048355f576SJeff Roberson * This is done if the number of slabs is larger than the hash size. 5058355f576SJeff Roberson * What I'm trying to do here is completely reduce collisions. This 5068355f576SJeff Roberson * may be a little aggressive. Should I allow for two collisions max? 5078355f576SJeff Roberson */ 508099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH && 509099a0e58SBosko Milekic keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 5100aef6126SJeff Roberson struct uma_hash newhash; 5110aef6126SJeff Roberson struct uma_hash oldhash; 5120aef6126SJeff Roberson int ret; 5135300d9ddSJeff Roberson 5140aef6126SJeff Roberson /* 5150aef6126SJeff Roberson * This is so involved because allocating and freeing 516e20a199fSJeff Roberson * while the keg lock is held will lead to deadlock. 5170aef6126SJeff Roberson * I have to do everything in stages and check for 5180aef6126SJeff Roberson * races. 5190aef6126SJeff Roberson */ 520099a0e58SBosko Milekic newhash = keg->uk_hash; 521e20a199fSJeff Roberson KEG_UNLOCK(keg); 5220aef6126SJeff Roberson ret = hash_alloc(&newhash); 523e20a199fSJeff Roberson KEG_LOCK(keg); 5240aef6126SJeff Roberson if (ret) { 525099a0e58SBosko Milekic if (hash_expand(&keg->uk_hash, &newhash)) { 526099a0e58SBosko Milekic oldhash = keg->uk_hash; 527099a0e58SBosko Milekic keg->uk_hash = newhash; 5280aef6126SJeff Roberson } else 5290aef6126SJeff Roberson oldhash = newhash; 5300aef6126SJeff Roberson 531e20a199fSJeff Roberson KEG_UNLOCK(keg); 5320aef6126SJeff Roberson hash_free(&oldhash); 533a1dff920SDavide Italiano return; 5340aef6126SJeff Roberson } 5355300d9ddSJeff Roberson } 536e20a199fSJeff Roberson KEG_UNLOCK(keg); 537e20a199fSJeff Roberson } 538e20a199fSJeff Roberson 539e20a199fSJeff Roberson static void 540e20a199fSJeff Roberson zone_timeout(uma_zone_t zone) 541e20a199fSJeff Roberson { 542e20a199fSJeff Roberson 543e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_timeout); 5448355f576SJeff Roberson } 5458355f576SJeff Roberson 5468355f576SJeff Roberson /* 5475300d9ddSJeff Roberson * Allocate and zero fill the next sized hash table from the appropriate 5485300d9ddSJeff Roberson * backing store. 5495300d9ddSJeff Roberson * 5505300d9ddSJeff Roberson * Arguments: 5510aef6126SJeff Roberson * hash A new hash structure with the old hash size in uh_hashsize 5525300d9ddSJeff Roberson * 5535300d9ddSJeff Roberson * Returns: 554763df3ecSPedro F. Giffuni * 1 on success and 0 on failure. 5555300d9ddSJeff Roberson */ 55637c84183SPoul-Henning Kamp static int 5570aef6126SJeff Roberson hash_alloc(struct uma_hash *hash) 5585300d9ddSJeff Roberson { 5590aef6126SJeff Roberson int oldsize; 5605300d9ddSJeff Roberson int alloc; 5615300d9ddSJeff Roberson 5620aef6126SJeff Roberson oldsize = hash->uh_hashsize; 5630aef6126SJeff Roberson 5645300d9ddSJeff Roberson /* We're just going to go to a power of two greater */ 5650aef6126SJeff Roberson if (oldsize) { 5660aef6126SJeff Roberson hash->uh_hashsize = oldsize * 2; 5670aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 5680aef6126SJeff Roberson hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 569961647dfSJeff Roberson M_UMAHASH, M_NOWAIT); 5705300d9ddSJeff Roberson } else { 5710aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 572e20a199fSJeff Roberson hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, 573a163d034SWarner Losh M_WAITOK); 5740aef6126SJeff Roberson hash->uh_hashsize = UMA_HASH_SIZE_INIT; 5755300d9ddSJeff Roberson } 5760aef6126SJeff Roberson if (hash->uh_slab_hash) { 5770aef6126SJeff Roberson bzero(hash->uh_slab_hash, alloc); 5780aef6126SJeff Roberson hash->uh_hashmask = hash->uh_hashsize - 1; 5790aef6126SJeff Roberson return (1); 5800aef6126SJeff Roberson } 5815300d9ddSJeff Roberson 5820aef6126SJeff Roberson return (0); 5835300d9ddSJeff Roberson } 5845300d9ddSJeff Roberson 5855300d9ddSJeff Roberson /* 58664f051e9SJeff Roberson * Expands the hash table for HASH zones. This is done from zone_timeout 58764f051e9SJeff Roberson * to reduce collisions. This must not be done in the regular allocation 58864f051e9SJeff Roberson * path, otherwise, we can recurse on the vm while allocating pages. 5898355f576SJeff Roberson * 5908355f576SJeff Roberson * Arguments: 5910aef6126SJeff Roberson * oldhash The hash you want to expand 5920aef6126SJeff Roberson * newhash The hash structure for the new table 5938355f576SJeff Roberson * 5948355f576SJeff Roberson * Returns: 5958355f576SJeff Roberson * Nothing 5968355f576SJeff Roberson * 5978355f576SJeff Roberson * Discussion: 5988355f576SJeff Roberson */ 5990aef6126SJeff Roberson static int 6000aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 6018355f576SJeff Roberson { 6028355f576SJeff Roberson uma_slab_t slab; 6038355f576SJeff Roberson int hval; 6048355f576SJeff Roberson int i; 6058355f576SJeff Roberson 6060aef6126SJeff Roberson if (!newhash->uh_slab_hash) 6070aef6126SJeff Roberson return (0); 6088355f576SJeff Roberson 6090aef6126SJeff Roberson if (oldhash->uh_hashsize >= newhash->uh_hashsize) 6100aef6126SJeff Roberson return (0); 6118355f576SJeff Roberson 6128355f576SJeff Roberson /* 6138355f576SJeff Roberson * I need to investigate hash algorithms for resizing without a 6148355f576SJeff Roberson * full rehash. 6158355f576SJeff Roberson */ 6168355f576SJeff Roberson 6170aef6126SJeff Roberson for (i = 0; i < oldhash->uh_hashsize; i++) 6180aef6126SJeff Roberson while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 6190aef6126SJeff Roberson slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 6200aef6126SJeff Roberson SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 6210aef6126SJeff Roberson hval = UMA_HASH(newhash, slab->us_data); 6220aef6126SJeff Roberson SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 6230aef6126SJeff Roberson slab, us_hlink); 6248355f576SJeff Roberson } 6258355f576SJeff Roberson 6260aef6126SJeff Roberson return (1); 6279c2cd7e5SJeff Roberson } 6289c2cd7e5SJeff Roberson 6295300d9ddSJeff Roberson /* 6305300d9ddSJeff Roberson * Free the hash bucket to the appropriate backing store. 6315300d9ddSJeff Roberson * 6325300d9ddSJeff Roberson * Arguments: 6335300d9ddSJeff Roberson * slab_hash The hash bucket we're freeing 6345300d9ddSJeff Roberson * hashsize The number of entries in that hash bucket 6355300d9ddSJeff Roberson * 6365300d9ddSJeff Roberson * Returns: 6375300d9ddSJeff Roberson * Nothing 6385300d9ddSJeff Roberson */ 6399c2cd7e5SJeff Roberson static void 6400aef6126SJeff Roberson hash_free(struct uma_hash *hash) 6419c2cd7e5SJeff Roberson { 6420aef6126SJeff Roberson if (hash->uh_slab_hash == NULL) 6430aef6126SJeff Roberson return; 6440aef6126SJeff Roberson if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 6450095a784SJeff Roberson zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); 6468355f576SJeff Roberson else 647961647dfSJeff Roberson free(hash->uh_slab_hash, M_UMAHASH); 6488355f576SJeff Roberson } 6498355f576SJeff Roberson 6508355f576SJeff Roberson /* 6518355f576SJeff Roberson * Frees all outstanding items in a bucket 6528355f576SJeff Roberson * 6538355f576SJeff Roberson * Arguments: 6548355f576SJeff Roberson * zone The zone to free to, must be unlocked. 6558355f576SJeff Roberson * bucket The free/alloc bucket with items, cpu queue must be locked. 6568355f576SJeff Roberson * 6578355f576SJeff Roberson * Returns: 6588355f576SJeff Roberson * Nothing 6598355f576SJeff Roberson */ 6608355f576SJeff Roberson 6618355f576SJeff Roberson static void 6628355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 6638355f576SJeff Roberson { 6640095a784SJeff Roberson int i; 6658355f576SJeff Roberson 6668355f576SJeff Roberson if (bucket == NULL) 6678355f576SJeff Roberson return; 6688355f576SJeff Roberson 6690095a784SJeff Roberson if (zone->uz_fini) 6700095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 6710095a784SJeff Roberson zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); 6720095a784SJeff Roberson zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); 6730095a784SJeff Roberson bucket->ub_cnt = 0; 6748355f576SJeff Roberson } 6758355f576SJeff Roberson 6768355f576SJeff Roberson /* 6778355f576SJeff Roberson * Drains the per cpu caches for a zone. 6788355f576SJeff Roberson * 6795d1ae027SRobert Watson * NOTE: This may only be called while the zone is being turn down, and not 6805d1ae027SRobert Watson * during normal operation. This is necessary in order that we do not have 6815d1ae027SRobert Watson * to migrate CPUs to drain the per-CPU caches. 6825d1ae027SRobert Watson * 6838355f576SJeff Roberson * Arguments: 6848355f576SJeff Roberson * zone The zone to drain, must be unlocked. 6858355f576SJeff Roberson * 6868355f576SJeff Roberson * Returns: 6878355f576SJeff Roberson * Nothing 6888355f576SJeff Roberson */ 6898355f576SJeff Roberson static void 6909643769aSJeff Roberson cache_drain(uma_zone_t zone) 6918355f576SJeff Roberson { 6928355f576SJeff Roberson uma_cache_t cache; 6938355f576SJeff Roberson int cpu; 6948355f576SJeff Roberson 6958355f576SJeff Roberson /* 6965d1ae027SRobert Watson * XXX: It is safe to not lock the per-CPU caches, because we're 6975d1ae027SRobert Watson * tearing down the zone anyway. I.e., there will be no further use 6985d1ae027SRobert Watson * of the caches at this point. 6995d1ae027SRobert Watson * 7005d1ae027SRobert Watson * XXX: It would good to be able to assert that the zone is being 7015d1ae027SRobert Watson * torn down to prevent improper use of cache_drain(). 7025d1ae027SRobert Watson * 7035d1ae027SRobert Watson * XXX: We lock the zone before passing into bucket_cache_drain() as 7045d1ae027SRobert Watson * it is used elsewhere. Should the tear-down path be made special 7055d1ae027SRobert Watson * there in some form? 7068355f576SJeff Roberson */ 7073aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 7088355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 7098355f576SJeff Roberson bucket_drain(zone, cache->uc_allocbucket); 7108355f576SJeff Roberson bucket_drain(zone, cache->uc_freebucket); 711174ab450SBosko Milekic if (cache->uc_allocbucket != NULL) 7126fd34d6fSJeff Roberson bucket_free(zone, cache->uc_allocbucket, NULL); 713174ab450SBosko Milekic if (cache->uc_freebucket != NULL) 7146fd34d6fSJeff Roberson bucket_free(zone, cache->uc_freebucket, NULL); 715d56368d7SBosko Milekic cache->uc_allocbucket = cache->uc_freebucket = NULL; 716d56368d7SBosko Milekic } 717aaa8bb16SJeff Roberson ZONE_LOCK(zone); 718aaa8bb16SJeff Roberson bucket_cache_drain(zone); 719aaa8bb16SJeff Roberson ZONE_UNLOCK(zone); 720aaa8bb16SJeff Roberson } 721aaa8bb16SJeff Roberson 722a2de44abSAlexander Motin static void 723a2de44abSAlexander Motin cache_shrink(uma_zone_t zone) 724a2de44abSAlexander Motin { 725a2de44abSAlexander Motin 726a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 727a2de44abSAlexander Motin return; 728a2de44abSAlexander Motin 729a2de44abSAlexander Motin ZONE_LOCK(zone); 730a2de44abSAlexander Motin zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; 731a2de44abSAlexander Motin ZONE_UNLOCK(zone); 732a2de44abSAlexander Motin } 733a2de44abSAlexander Motin 734a2de44abSAlexander Motin static void 735a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone) 736a2de44abSAlexander Motin { 737a2de44abSAlexander Motin uma_cache_t cache; 7388a8d9d14SAlexander Motin uma_bucket_t b1, b2; 739a2de44abSAlexander Motin 740a2de44abSAlexander Motin if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 741a2de44abSAlexander Motin return; 742a2de44abSAlexander Motin 7438a8d9d14SAlexander Motin b1 = b2 = NULL; 744a2de44abSAlexander Motin ZONE_LOCK(zone); 745a2de44abSAlexander Motin critical_enter(); 746a2de44abSAlexander Motin cache = &zone->uz_cpu[curcpu]; 747a2de44abSAlexander Motin if (cache->uc_allocbucket) { 7488a8d9d14SAlexander Motin if (cache->uc_allocbucket->ub_cnt != 0) 7498a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7508a8d9d14SAlexander Motin cache->uc_allocbucket, ub_link); 7518a8d9d14SAlexander Motin else 7528a8d9d14SAlexander Motin b1 = cache->uc_allocbucket; 753a2de44abSAlexander Motin cache->uc_allocbucket = NULL; 754a2de44abSAlexander Motin } 755a2de44abSAlexander Motin if (cache->uc_freebucket) { 7568a8d9d14SAlexander Motin if (cache->uc_freebucket->ub_cnt != 0) 7578a8d9d14SAlexander Motin LIST_INSERT_HEAD(&zone->uz_buckets, 7588a8d9d14SAlexander Motin cache->uc_freebucket, ub_link); 7598a8d9d14SAlexander Motin else 7608a8d9d14SAlexander Motin b2 = cache->uc_freebucket; 761a2de44abSAlexander Motin cache->uc_freebucket = NULL; 762a2de44abSAlexander Motin } 763a2de44abSAlexander Motin critical_exit(); 764a2de44abSAlexander Motin ZONE_UNLOCK(zone); 7658a8d9d14SAlexander Motin if (b1) 7668a8d9d14SAlexander Motin bucket_free(zone, b1, NULL); 7678a8d9d14SAlexander Motin if (b2) 7688a8d9d14SAlexander Motin bucket_free(zone, b2, NULL); 769a2de44abSAlexander Motin } 770a2de44abSAlexander Motin 771a2de44abSAlexander Motin /* 772a2de44abSAlexander Motin * Safely drain per-CPU caches of a zone(s) to alloc bucket. 773a2de44abSAlexander Motin * This is an expensive call because it needs to bind to all CPUs 774a2de44abSAlexander Motin * one by one and enter a critical section on each of them in order 775a2de44abSAlexander Motin * to safely access their cache buckets. 776a2de44abSAlexander Motin * Zone lock must not be held on call this function. 777a2de44abSAlexander Motin */ 778a2de44abSAlexander Motin static void 779a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone) 780a2de44abSAlexander Motin { 781a2de44abSAlexander Motin int cpu; 782a2de44abSAlexander Motin 783a2de44abSAlexander Motin /* 784a2de44abSAlexander Motin * Polite bucket sizes shrinking was not enouth, shrink aggressively. 785a2de44abSAlexander Motin */ 786a2de44abSAlexander Motin if (zone) 787a2de44abSAlexander Motin cache_shrink(zone); 788a2de44abSAlexander Motin else 789a2de44abSAlexander Motin zone_foreach(cache_shrink); 790a2de44abSAlexander Motin 791a2de44abSAlexander Motin CPU_FOREACH(cpu) { 792a2de44abSAlexander Motin thread_lock(curthread); 793a2de44abSAlexander Motin sched_bind(curthread, cpu); 794a2de44abSAlexander Motin thread_unlock(curthread); 795a2de44abSAlexander Motin 796a2de44abSAlexander Motin if (zone) 797a2de44abSAlexander Motin cache_drain_safe_cpu(zone); 798a2de44abSAlexander Motin else 799a2de44abSAlexander Motin zone_foreach(cache_drain_safe_cpu); 800a2de44abSAlexander Motin } 801a2de44abSAlexander Motin thread_lock(curthread); 802a2de44abSAlexander Motin sched_unbind(curthread); 803a2de44abSAlexander Motin thread_unlock(curthread); 804a2de44abSAlexander Motin } 805a2de44abSAlexander Motin 806aaa8bb16SJeff Roberson /* 807aaa8bb16SJeff Roberson * Drain the cached buckets from a zone. Expects a locked zone on entry. 808aaa8bb16SJeff Roberson */ 809aaa8bb16SJeff Roberson static void 810aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone) 811aaa8bb16SJeff Roberson { 812aaa8bb16SJeff Roberson uma_bucket_t bucket; 8138355f576SJeff Roberson 8148355f576SJeff Roberson /* 8158355f576SJeff Roberson * Drain the bucket queues and free the buckets, we just keep two per 8168355f576SJeff Roberson * cpu (alloc/free). 8178355f576SJeff Roberson */ 818fc03d22bSJeff Roberson while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 8198355f576SJeff Roberson LIST_REMOVE(bucket, ub_link); 8208355f576SJeff Roberson ZONE_UNLOCK(zone); 8218355f576SJeff Roberson bucket_drain(zone, bucket); 8226fd34d6fSJeff Roberson bucket_free(zone, bucket, NULL); 8238355f576SJeff Roberson ZONE_LOCK(zone); 8248355f576SJeff Roberson } 825ace66b56SAlexander Motin 826ace66b56SAlexander Motin /* 827ace66b56SAlexander Motin * Shrink further bucket sizes. Price of single zone lock collision 828ace66b56SAlexander Motin * is probably lower then price of global cache drain. 829ace66b56SAlexander Motin */ 830ace66b56SAlexander Motin if (zone->uz_count > zone->uz_count_min) 831ace66b56SAlexander Motin zone->uz_count--; 8328355f576SJeff Roberson } 833fc03d22bSJeff Roberson 834fc03d22bSJeff Roberson static void 835fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) 836fc03d22bSJeff Roberson { 837fc03d22bSJeff Roberson uint8_t *mem; 838fc03d22bSJeff Roberson int i; 839fc03d22bSJeff Roberson uint8_t flags; 840fc03d22bSJeff Roberson 8411431a748SGleb Smirnoff CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", 8421431a748SGleb Smirnoff keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); 8431431a748SGleb Smirnoff 844fc03d22bSJeff Roberson mem = slab->us_data; 845fc03d22bSJeff Roberson flags = slab->us_flags; 846fc03d22bSJeff Roberson i = start; 847fc03d22bSJeff Roberson if (keg->uk_fini != NULL) { 848fc03d22bSJeff Roberson for (i--; i > -1; i--) 849fc03d22bSJeff Roberson keg->uk_fini(slab->us_data + (keg->uk_rsize * i), 850fc03d22bSJeff Roberson keg->uk_size); 851fc03d22bSJeff Roberson } 852fc03d22bSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 853fc03d22bSJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 854fc03d22bSJeff Roberson keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); 8552e47807cSJeff Roberson uma_total_dec(PAGE_SIZE * keg->uk_ppera); 8568355f576SJeff Roberson } 8578355f576SJeff Roberson 8588355f576SJeff Roberson /* 859e20a199fSJeff Roberson * Frees pages from a keg back to the system. This is done on demand from 8608355f576SJeff Roberson * the pageout daemon. 8618355f576SJeff Roberson * 862e20a199fSJeff Roberson * Returns nothing. 8638355f576SJeff Roberson */ 864e20a199fSJeff Roberson static void 865e20a199fSJeff Roberson keg_drain(uma_keg_t keg) 8668355f576SJeff Roberson { 8671e183df2SStefan Farfeleder struct slabhead freeslabs = { 0 }; 868829be516SMark Johnston uma_slab_t slab, tmp; 8698355f576SJeff Roberson 8708355f576SJeff Roberson /* 871e20a199fSJeff Roberson * We don't want to take pages from statically allocated kegs at this 8728355f576SJeff Roberson * time 8738355f576SJeff Roberson */ 874099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 8758355f576SJeff Roberson return; 8768355f576SJeff Roberson 8771431a748SGleb Smirnoff CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", 8781431a748SGleb Smirnoff keg->uk_name, keg, keg->uk_free); 879e20a199fSJeff Roberson KEG_LOCK(keg); 880099a0e58SBosko Milekic if (keg->uk_free == 0) 8818355f576SJeff Roberson goto finished; 8828355f576SJeff Roberson 883829be516SMark Johnston LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) { 884829be516SMark Johnston /* We have nowhere to free these to. */ 885829be516SMark Johnston if (slab->us_flags & UMA_SLAB_BOOT) 8868355f576SJeff Roberson continue; 8878355f576SJeff Roberson 8888355f576SJeff Roberson LIST_REMOVE(slab, us_link); 889099a0e58SBosko Milekic keg->uk_pages -= keg->uk_ppera; 890099a0e58SBosko Milekic keg->uk_free -= keg->uk_ipers; 891713deb36SJeff Roberson 892099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 893099a0e58SBosko Milekic UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 894713deb36SJeff Roberson 895713deb36SJeff Roberson SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 896713deb36SJeff Roberson } 897713deb36SJeff Roberson finished: 898e20a199fSJeff Roberson KEG_UNLOCK(keg); 899713deb36SJeff Roberson 900713deb36SJeff Roberson while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 901713deb36SJeff Roberson SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 9021645995bSKirk McKusick keg_free_slab(keg, slab, keg->uk_ipers); 9038355f576SJeff Roberson } 9048355f576SJeff Roberson } 9058355f576SJeff Roberson 906e20a199fSJeff Roberson static void 907e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok) 908e20a199fSJeff Roberson { 909e20a199fSJeff Roberson 9108355f576SJeff Roberson /* 911e20a199fSJeff Roberson * Set draining to interlock with zone_dtor() so we can release our 912e20a199fSJeff Roberson * locks as we go. Only dtor() should do a WAITOK call since it 913e20a199fSJeff Roberson * is the only call that knows the structure will still be available 914e20a199fSJeff Roberson * when it wakes up. 915e20a199fSJeff Roberson */ 916e20a199fSJeff Roberson ZONE_LOCK(zone); 917e20a199fSJeff Roberson while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 918e20a199fSJeff Roberson if (waitok == M_NOWAIT) 919e20a199fSJeff Roberson goto out; 920af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); 921e20a199fSJeff Roberson } 922e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_DRAINING; 923e20a199fSJeff Roberson bucket_cache_drain(zone); 924e20a199fSJeff Roberson ZONE_UNLOCK(zone); 925e20a199fSJeff Roberson /* 926e20a199fSJeff Roberson * The DRAINING flag protects us from being freed while 927111fbcd5SBryan Venteicher * we're running. Normally the uma_rwlock would protect us but we 928e20a199fSJeff Roberson * must be able to release and acquire the right lock for each keg. 929e20a199fSJeff Roberson */ 930e20a199fSJeff Roberson zone_foreach_keg(zone, &keg_drain); 931e20a199fSJeff Roberson ZONE_LOCK(zone); 932e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 933e20a199fSJeff Roberson wakeup(zone); 934e20a199fSJeff Roberson out: 935e20a199fSJeff Roberson ZONE_UNLOCK(zone); 936e20a199fSJeff Roberson } 937e20a199fSJeff Roberson 938e20a199fSJeff Roberson void 939e20a199fSJeff Roberson zone_drain(uma_zone_t zone) 940e20a199fSJeff Roberson { 941e20a199fSJeff Roberson 942e20a199fSJeff Roberson zone_drain_wait(zone, M_NOWAIT); 943e20a199fSJeff Roberson } 944e20a199fSJeff Roberson 945e20a199fSJeff Roberson /* 946e20a199fSJeff Roberson * Allocate a new slab for a keg. This does not insert the slab onto a list. 9478355f576SJeff Roberson * 9488355f576SJeff Roberson * Arguments: 9498355f576SJeff Roberson * wait Shall we wait? 9508355f576SJeff Roberson * 9518355f576SJeff Roberson * Returns: 9528355f576SJeff Roberson * The slab that was allocated or NULL if there is no memory and the 9538355f576SJeff Roberson * caller specified M_NOWAIT. 9548355f576SJeff Roberson */ 9558355f576SJeff Roberson static uma_slab_t 956e20a199fSJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) 9578355f576SJeff Roberson { 958e20a199fSJeff Roberson uma_alloc allocf; 959099a0e58SBosko Milekic uma_slab_t slab; 9602e47807cSJeff Roberson unsigned long size; 96185dcf349SGleb Smirnoff uint8_t *mem; 96285dcf349SGleb Smirnoff uint8_t flags; 9638355f576SJeff Roberson int i; 9648355f576SJeff Roberson 965e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 966a553d4b8SJeff Roberson slab = NULL; 967fc03d22bSJeff Roberson mem = NULL; 968a553d4b8SJeff Roberson 969e20a199fSJeff Roberson allocf = keg->uk_allocf; 970e20a199fSJeff Roberson KEG_UNLOCK(keg); 9712e47807cSJeff Roberson size = keg->uk_ppera * PAGE_SIZE; 972a553d4b8SJeff Roberson 973099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 974e20a199fSJeff Roberson slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); 975fc03d22bSJeff Roberson if (slab == NULL) 976fc03d22bSJeff Roberson goto out; 977a553d4b8SJeff Roberson } 978a553d4b8SJeff Roberson 9793370c5bfSJeff Roberson /* 9803370c5bfSJeff Roberson * This reproduces the old vm_zone behavior of zero filling pages the 9813370c5bfSJeff Roberson * first time they are added to a zone. 9823370c5bfSJeff Roberson * 9833370c5bfSJeff Roberson * Malloced items are zeroed in uma_zalloc. 9843370c5bfSJeff Roberson */ 9853370c5bfSJeff Roberson 986099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 9873370c5bfSJeff Roberson wait |= M_ZERO; 9883370c5bfSJeff Roberson else 9893370c5bfSJeff Roberson wait &= ~M_ZERO; 9903370c5bfSJeff Roberson 991263811f7SKip Macy if (keg->uk_flags & UMA_ZONE_NODUMP) 992263811f7SKip Macy wait |= M_NODUMP; 993263811f7SKip Macy 994e20a199fSJeff Roberson /* zone is passed for legacy reasons. */ 9952e47807cSJeff Roberson mem = allocf(zone, size, &flags, wait); 996a553d4b8SJeff Roberson if (mem == NULL) { 997b23f72e9SBrian Feldman if (keg->uk_flags & UMA_ZONE_OFFPAGE) 9980095a784SJeff Roberson zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); 999fc03d22bSJeff Roberson slab = NULL; 1000fc03d22bSJeff Roberson goto out; 1001a553d4b8SJeff Roberson } 10022e47807cSJeff Roberson uma_total_inc(size); 10038355f576SJeff Roberson 10045c0e403bSJeff Roberson /* Point the slab into the allocated memory */ 1005099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 1006099a0e58SBosko Milekic slab = (uma_slab_t )(mem + keg->uk_pgoff); 10075c0e403bSJeff Roberson 1008e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZONE_VTOSLAB) 1009099a0e58SBosko Milekic for (i = 0; i < keg->uk_ppera; i++) 101099571dc3SJeff Roberson vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 10118355f576SJeff Roberson 1012099a0e58SBosko Milekic slab->us_keg = keg; 10138355f576SJeff Roberson slab->us_data = mem; 1014099a0e58SBosko Milekic slab->us_freecount = keg->uk_ipers; 10158355f576SJeff Roberson slab->us_flags = flags; 1016ef72505eSJeff Roberson BIT_FILL(SLAB_SETSIZE, &slab->us_free); 1017ef72505eSJeff Roberson #ifdef INVARIANTS 1018ef72505eSJeff Roberson BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); 1019ef72505eSJeff Roberson #endif 1020099a0e58SBosko Milekic 1021b23f72e9SBrian Feldman if (keg->uk_init != NULL) { 1022099a0e58SBosko Milekic for (i = 0; i < keg->uk_ipers; i++) 1023b23f72e9SBrian Feldman if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 1024b23f72e9SBrian Feldman keg->uk_size, wait) != 0) 1025b23f72e9SBrian Feldman break; 1026b23f72e9SBrian Feldman if (i != keg->uk_ipers) { 1027fc03d22bSJeff Roberson keg_free_slab(keg, slab, i); 1028fc03d22bSJeff Roberson slab = NULL; 1029fc03d22bSJeff Roberson goto out; 1030b23f72e9SBrian Feldman } 1031b23f72e9SBrian Feldman } 1032fc03d22bSJeff Roberson out: 1033e20a199fSJeff Roberson KEG_LOCK(keg); 10345c0e403bSJeff Roberson 10351431a748SGleb Smirnoff CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", 10361431a748SGleb Smirnoff slab, keg->uk_name, keg); 10371431a748SGleb Smirnoff 1038fc03d22bSJeff Roberson if (slab != NULL) { 1039099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1040099a0e58SBosko Milekic UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 10418355f576SJeff Roberson 1042099a0e58SBosko Milekic keg->uk_pages += keg->uk_ppera; 1043099a0e58SBosko Milekic keg->uk_free += keg->uk_ipers; 1044fc03d22bSJeff Roberson } 10458355f576SJeff Roberson 10468355f576SJeff Roberson return (slab); 10478355f576SJeff Roberson } 10488355f576SJeff Roberson 10498355f576SJeff Roberson /* 1050009b6fcbSJeff Roberson * This function is intended to be used early on in place of page_alloc() so 1051009b6fcbSJeff Roberson * that we may use the boot time page cache to satisfy allocations before 1052009b6fcbSJeff Roberson * the VM is ready. 1053009b6fcbSJeff Roberson */ 1054009b6fcbSJeff Roberson static void * 1055f2c2231eSRyan Stone startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 1056009b6fcbSJeff Roberson { 1057099a0e58SBosko Milekic uma_keg_t keg; 1058ac0a6fd0SGleb Smirnoff void *mem; 1059ac0a6fd0SGleb Smirnoff int pages; 1060099a0e58SBosko Milekic 1061e20a199fSJeff Roberson keg = zone_first_keg(zone); 1062e9a069d8SJohn Baldwin pages = howmany(bytes, PAGE_SIZE); 1063e9a069d8SJohn Baldwin KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n")); 1064099a0e58SBosko Milekic 1065009b6fcbSJeff Roberson /* 1066009b6fcbSJeff Roberson * Check our small startup cache to see if it has pages remaining. 1067009b6fcbSJeff Roberson */ 1068f353d338SAlan Cox mtx_lock(&uma_boot_pages_mtx); 1069ac0a6fd0SGleb Smirnoff if (pages <= boot_pages) { 1070ac0a6fd0SGleb Smirnoff mem = bootmem; 1071ac0a6fd0SGleb Smirnoff boot_pages -= pages; 1072ac0a6fd0SGleb Smirnoff bootmem += pages * PAGE_SIZE; 1073f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1074ac0a6fd0SGleb Smirnoff *pflag = UMA_SLAB_BOOT; 1075ac0a6fd0SGleb Smirnoff return (mem); 1076009b6fcbSJeff Roberson } 1077f353d338SAlan Cox mtx_unlock(&uma_boot_pages_mtx); 1078342f1793SAlan Cox if (booted < UMA_STARTUP2) 10793803b26bSDag-Erling Smørgrav panic("UMA: Increase vm.boot_pages"); 1080009b6fcbSJeff Roberson /* 1081009b6fcbSJeff Roberson * Now that we've booted reset these users to their real allocator. 1082009b6fcbSJeff Roberson */ 1083009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC 1084e9a069d8SJohn Baldwin keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc; 1085009b6fcbSJeff Roberson #else 1086099a0e58SBosko Milekic keg->uk_allocf = page_alloc; 1087009b6fcbSJeff Roberson #endif 1088099a0e58SBosko Milekic return keg->uk_allocf(zone, bytes, pflag, wait); 1089009b6fcbSJeff Roberson } 1090009b6fcbSJeff Roberson 1091009b6fcbSJeff Roberson /* 10928355f576SJeff Roberson * Allocates a number of pages from the system 10938355f576SJeff Roberson * 10948355f576SJeff Roberson * Arguments: 10958355f576SJeff Roberson * bytes The number of bytes requested 10968355f576SJeff Roberson * wait Shall we wait? 10978355f576SJeff Roberson * 10988355f576SJeff Roberson * Returns: 10998355f576SJeff Roberson * A pointer to the alloced memory or possibly 11008355f576SJeff Roberson * NULL if M_NOWAIT is set. 11018355f576SJeff Roberson */ 11028355f576SJeff Roberson static void * 1103f2c2231eSRyan Stone page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait) 11048355f576SJeff Roberson { 11058355f576SJeff Roberson void *p; /* Returned page */ 11068355f576SJeff Roberson 11072e47807cSJeff Roberson *pflag = UMA_SLAB_KERNEL; 11082e47807cSJeff Roberson p = (void *) kmem_malloc(kernel_arena, bytes, wait); 11098355f576SJeff Roberson 11108355f576SJeff Roberson return (p); 11118355f576SJeff Roberson } 11128355f576SJeff Roberson 11138355f576SJeff Roberson /* 11148355f576SJeff Roberson * Allocates a number of pages from within an object 11158355f576SJeff Roberson * 11168355f576SJeff Roberson * Arguments: 11178355f576SJeff Roberson * bytes The number of bytes requested 11188355f576SJeff Roberson * wait Shall we wait? 11198355f576SJeff Roberson * 11208355f576SJeff Roberson * Returns: 11218355f576SJeff Roberson * A pointer to the alloced memory or possibly 11228355f576SJeff Roberson * NULL if M_NOWAIT is set. 11238355f576SJeff Roberson */ 11248355f576SJeff Roberson static void * 1125f2c2231eSRyan Stone noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) 11268355f576SJeff Roberson { 1127a4915c21SAttilio Rao TAILQ_HEAD(, vm_page) alloctail; 1128a4915c21SAttilio Rao u_long npages; 1129b245ac95SAlan Cox vm_offset_t retkva, zkva; 1130a4915c21SAttilio Rao vm_page_t p, p_next; 1131e20a199fSJeff Roberson uma_keg_t keg; 11328355f576SJeff Roberson 1133a4915c21SAttilio Rao TAILQ_INIT(&alloctail); 1134e20a199fSJeff Roberson keg = zone_first_keg(zone); 1135a4915c21SAttilio Rao 1136a4915c21SAttilio Rao npages = howmany(bytes, PAGE_SIZE); 1137a4915c21SAttilio Rao while (npages > 0) { 1138a4915c21SAttilio Rao p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | 11398d6fbbb8SJeff Roberson VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | 1140772c8b67SKonstantin Belousov ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : 1141772c8b67SKonstantin Belousov VM_ALLOC_NOWAIT)); 1142a4915c21SAttilio Rao if (p != NULL) { 1143a4915c21SAttilio Rao /* 1144a4915c21SAttilio Rao * Since the page does not belong to an object, its 1145a4915c21SAttilio Rao * listq is unused. 1146a4915c21SAttilio Rao */ 1147a4915c21SAttilio Rao TAILQ_INSERT_TAIL(&alloctail, p, listq); 1148a4915c21SAttilio Rao npages--; 1149a4915c21SAttilio Rao continue; 1150a4915c21SAttilio Rao } 11518355f576SJeff Roberson /* 1152a4915c21SAttilio Rao * Page allocation failed, free intermediate pages and 1153a4915c21SAttilio Rao * exit. 11548355f576SJeff Roberson */ 1155a4915c21SAttilio Rao TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { 1156087a6132SAlan Cox vm_page_unwire(p, PQ_NONE); 1157b245ac95SAlan Cox vm_page_free(p); 1158b245ac95SAlan Cox } 1159a4915c21SAttilio Rao return (NULL); 1160b245ac95SAlan Cox } 11618355f576SJeff Roberson *flags = UMA_SLAB_PRIV; 1162a4915c21SAttilio Rao zkva = keg->uk_kva + 1163a4915c21SAttilio Rao atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); 1164a4915c21SAttilio Rao retkva = zkva; 1165a4915c21SAttilio Rao TAILQ_FOREACH(p, &alloctail, listq) { 1166a4915c21SAttilio Rao pmap_qenter(zkva, &p, 1); 1167a4915c21SAttilio Rao zkva += PAGE_SIZE; 1168a4915c21SAttilio Rao } 11698355f576SJeff Roberson 11708355f576SJeff Roberson return ((void *)retkva); 11718355f576SJeff Roberson } 11728355f576SJeff Roberson 11738355f576SJeff Roberson /* 11748355f576SJeff Roberson * Frees a number of pages to the system 11758355f576SJeff Roberson * 11768355f576SJeff Roberson * Arguments: 11778355f576SJeff Roberson * mem A pointer to the memory to be freed 11788355f576SJeff Roberson * size The size of the memory being freed 11798355f576SJeff Roberson * flags The original p->us_flags field 11808355f576SJeff Roberson * 11818355f576SJeff Roberson * Returns: 11828355f576SJeff Roberson * Nothing 11838355f576SJeff Roberson */ 11848355f576SJeff Roberson static void 1185f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags) 11868355f576SJeff Roberson { 11875df87b21SJeff Roberson struct vmem *vmem; 11883370c5bfSJeff Roberson 11892e47807cSJeff Roberson if (flags & UMA_SLAB_KERNEL) 11905df87b21SJeff Roberson vmem = kernel_arena; 11918355f576SJeff Roberson else 1192b5345ef1SJustin Hibbits panic("UMA: page_free used with invalid flags %x", flags); 11938355f576SJeff Roberson 11945df87b21SJeff Roberson kmem_free(vmem, (vm_offset_t)mem, size); 11958355f576SJeff Roberson } 11968355f576SJeff Roberson 11978355f576SJeff Roberson /* 11988355f576SJeff Roberson * Zero fill initializer 11998355f576SJeff Roberson * 12008355f576SJeff Roberson * Arguments/Returns follow uma_init specifications 12018355f576SJeff Roberson */ 1202b23f72e9SBrian Feldman static int 1203b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags) 12048355f576SJeff Roberson { 12058355f576SJeff Roberson bzero(mem, size); 1206b23f72e9SBrian Feldman return (0); 12078355f576SJeff Roberson } 12088355f576SJeff Roberson 12098355f576SJeff Roberson /* 1210e20a199fSJeff Roberson * Finish creating a small uma keg. This calculates ipers, and the keg size. 12118355f576SJeff Roberson * 12128355f576SJeff Roberson * Arguments 1213e20a199fSJeff Roberson * keg The zone we should initialize 12148355f576SJeff Roberson * 12158355f576SJeff Roberson * Returns 12168355f576SJeff Roberson * Nothing 12178355f576SJeff Roberson */ 12188355f576SJeff Roberson static void 1219e20a199fSJeff Roberson keg_small_init(uma_keg_t keg) 12208355f576SJeff Roberson { 1221244f4554SBosko Milekic u_int rsize; 1222244f4554SBosko Milekic u_int memused; 1223244f4554SBosko Milekic u_int wastedspace; 1224244f4554SBosko Milekic u_int shsize; 1225a55ebb7cSAndriy Gapon u_int slabsize; 12268355f576SJeff Roberson 1227ad97af7eSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_PCPU) { 122896c85efbSNathan Whitehorn u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; 1229e28a647dSGleb Smirnoff 1230a55ebb7cSAndriy Gapon slabsize = sizeof(struct pcpu); 1231e28a647dSGleb Smirnoff keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu), 1232ad97af7eSGleb Smirnoff PAGE_SIZE); 1233ad97af7eSGleb Smirnoff } else { 1234a55ebb7cSAndriy Gapon slabsize = UMA_SLAB_SIZE; 1235ad97af7eSGleb Smirnoff keg->uk_ppera = 1; 1236ad97af7eSGleb Smirnoff } 1237ad97af7eSGleb Smirnoff 1238ef72505eSJeff Roberson /* 1239ef72505eSJeff Roberson * Calculate the size of each allocation (rsize) according to 1240ef72505eSJeff Roberson * alignment. If the requested size is smaller than we have 1241ef72505eSJeff Roberson * allocation bits for we round it up. 1242ef72505eSJeff Roberson */ 1243099a0e58SBosko Milekic rsize = keg->uk_size; 1244a55ebb7cSAndriy Gapon if (rsize < slabsize / SLAB_SETSIZE) 1245a55ebb7cSAndriy Gapon rsize = slabsize / SLAB_SETSIZE; 1246099a0e58SBosko Milekic if (rsize & keg->uk_align) 1247099a0e58SBosko Milekic rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1248099a0e58SBosko Milekic keg->uk_rsize = rsize; 1249ad97af7eSGleb Smirnoff 1250ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || 1251ad97af7eSGleb Smirnoff keg->uk_rsize < sizeof(struct pcpu), 1252ad97af7eSGleb Smirnoff ("%s: size %u too large", __func__, keg->uk_rsize)); 12538355f576SJeff Roberson 1254ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_OFFPAGE) 12552864dbbfSGleb Smirnoff shsize = 0; 1256ef72505eSJeff Roberson else 1257244f4554SBosko Milekic shsize = sizeof(struct uma_slab); 12588355f576SJeff Roberson 1259a55ebb7cSAndriy Gapon keg->uk_ipers = (slabsize - shsize) / rsize; 1260ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1261ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 1262ad97af7eSGleb Smirnoff 1263244f4554SBosko Milekic memused = keg->uk_ipers * rsize + shsize; 1264a55ebb7cSAndriy Gapon wastedspace = slabsize - memused; 1265244f4554SBosko Milekic 126620e8e865SBosko Milekic /* 1267244f4554SBosko Milekic * We can't do OFFPAGE if we're internal or if we've been 126820e8e865SBosko Milekic * asked to not go to the VM for buckets. If we do this we 12696fd34d6fSJeff Roberson * may end up going to the VM for slabs which we do not 12706fd34d6fSJeff Roberson * want to do if we're UMA_ZFLAG_CACHEONLY as a result 12716fd34d6fSJeff Roberson * of UMA_ZONE_VM, which clearly forbids it. 127220e8e865SBosko Milekic */ 1273099a0e58SBosko Milekic if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1274099a0e58SBosko Milekic (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 12758355f576SJeff Roberson return; 1276244f4554SBosko Milekic 1277ef72505eSJeff Roberson /* 1278ef72505eSJeff Roberson * See if using an OFFPAGE slab will limit our waste. Only do 1279ef72505eSJeff Roberson * this if it permits more items per-slab. 1280ef72505eSJeff Roberson * 1281ef72505eSJeff Roberson * XXX We could try growing slabsize to limit max waste as well. 1282ef72505eSJeff Roberson * Historically this was not done because the VM could not 1283ef72505eSJeff Roberson * efficiently handle contiguous allocations. 1284ef72505eSJeff Roberson */ 1285a55ebb7cSAndriy Gapon if ((wastedspace >= slabsize / UMA_MAX_WASTE) && 1286a55ebb7cSAndriy Gapon (keg->uk_ipers < (slabsize / keg->uk_rsize))) { 1287a55ebb7cSAndriy Gapon keg->uk_ipers = slabsize / keg->uk_rsize; 1288ef72505eSJeff Roberson KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, 1289ad97af7eSGleb Smirnoff ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); 12901431a748SGleb Smirnoff CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " 12911431a748SGleb Smirnoff "keg: %s(%p), calculated wastedspace = %d, " 1292244f4554SBosko Milekic "maximum wasted space allowed = %d, " 1293244f4554SBosko Milekic "calculated ipers = %d, " 12941431a748SGleb Smirnoff "new wasted space = %d\n", keg->uk_name, keg, wastedspace, 1295a55ebb7cSAndriy Gapon slabsize / UMA_MAX_WASTE, keg->uk_ipers, 1296a55ebb7cSAndriy Gapon slabsize - keg->uk_ipers * keg->uk_rsize); 1297099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 12988355f576SJeff Roberson } 1299ad97af7eSGleb Smirnoff 1300ad97af7eSGleb Smirnoff if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1301ad97af7eSGleb Smirnoff (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1302ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_HASH; 13038355f576SJeff Roberson } 13048355f576SJeff Roberson 13058355f576SJeff Roberson /* 1306e20a199fSJeff Roberson * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do 13078355f576SJeff Roberson * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 13088355f576SJeff Roberson * more complicated. 13098355f576SJeff Roberson * 13108355f576SJeff Roberson * Arguments 1311e20a199fSJeff Roberson * keg The keg we should initialize 13128355f576SJeff Roberson * 13138355f576SJeff Roberson * Returns 13148355f576SJeff Roberson * Nothing 13158355f576SJeff Roberson */ 13168355f576SJeff Roberson static void 1317e20a199fSJeff Roberson keg_large_init(uma_keg_t keg) 13188355f576SJeff Roberson { 1319cec48e00SAlexander Motin u_int shsize; 13208355f576SJeff Roberson 1321e20a199fSJeff Roberson KASSERT(keg != NULL, ("Keg is null in keg_large_init")); 1322099a0e58SBosko Milekic KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1323e20a199fSJeff Roberson ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); 1324ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1325ad97af7eSGleb Smirnoff ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); 132620e8e865SBosko Milekic 1327ad97af7eSGleb Smirnoff keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); 1328099a0e58SBosko Milekic keg->uk_ipers = 1; 1329e9a069d8SJohn Baldwin keg->uk_rsize = keg->uk_size; 1330e9a069d8SJohn Baldwin 1331cec48e00SAlexander Motin /* Check whether we have enough space to not do OFFPAGE. */ 1332cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { 1333cec48e00SAlexander Motin shsize = sizeof(struct uma_slab); 1334cec48e00SAlexander Motin if (shsize & UMA_ALIGN_PTR) 1335cec48e00SAlexander Motin shsize = (shsize & ~UMA_ALIGN_PTR) + 1336cec48e00SAlexander Motin (UMA_ALIGN_PTR + 1); 1337cec48e00SAlexander Motin 13382934eb8aSMark Johnston if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { 13392934eb8aSMark Johnston /* 13402934eb8aSMark Johnston * We can't do OFFPAGE if we're internal, in which case 13412934eb8aSMark Johnston * we need an extra page per allocation to contain the 13422934eb8aSMark Johnston * slab header. 13432934eb8aSMark Johnston */ 13442934eb8aSMark Johnston if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) 1345099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_OFFPAGE; 13462934eb8aSMark Johnston else 13472934eb8aSMark Johnston keg->uk_ppera++; 13482934eb8aSMark Johnston } 1349cec48e00SAlexander Motin } 1350cec48e00SAlexander Motin 1351cec48e00SAlexander Motin if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && 1352cec48e00SAlexander Motin (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) 1353099a0e58SBosko Milekic keg->uk_flags |= UMA_ZONE_HASH; 13548355f576SJeff Roberson } 13558355f576SJeff Roberson 1356e20a199fSJeff Roberson static void 1357e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg) 1358e20a199fSJeff Roberson { 1359e20a199fSJeff Roberson int alignsize; 1360e20a199fSJeff Roberson int trailer; 1361e20a199fSJeff Roberson int pages; 1362e20a199fSJeff Roberson int rsize; 1363e20a199fSJeff Roberson 1364ad97af7eSGleb Smirnoff KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, 1365ad97af7eSGleb Smirnoff ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); 1366ad97af7eSGleb Smirnoff 1367e20a199fSJeff Roberson alignsize = keg->uk_align + 1; 1368e20a199fSJeff Roberson rsize = keg->uk_size; 1369e20a199fSJeff Roberson /* 1370e20a199fSJeff Roberson * We want one item to start on every align boundary in a page. To 1371e20a199fSJeff Roberson * do this we will span pages. We will also extend the item by the 1372e20a199fSJeff Roberson * size of align if it is an even multiple of align. Otherwise, it 1373e20a199fSJeff Roberson * would fall on the same boundary every time. 1374e20a199fSJeff Roberson */ 1375e20a199fSJeff Roberson if (rsize & keg->uk_align) 1376e20a199fSJeff Roberson rsize = (rsize & ~keg->uk_align) + alignsize; 1377e20a199fSJeff Roberson if ((rsize & alignsize) == 0) 1378e20a199fSJeff Roberson rsize += alignsize; 1379e20a199fSJeff Roberson trailer = rsize - keg->uk_size; 1380e20a199fSJeff Roberson pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1381e20a199fSJeff Roberson pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1382e20a199fSJeff Roberson keg->uk_rsize = rsize; 1383e20a199fSJeff Roberson keg->uk_ppera = pages; 1384e20a199fSJeff Roberson keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1385e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 13862367b4ddSDimitry Andric KASSERT(keg->uk_ipers <= SLAB_SETSIZE, 138742321809SGleb Smirnoff ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, 1388e20a199fSJeff Roberson keg->uk_ipers)); 1389e20a199fSJeff Roberson } 1390e20a199fSJeff Roberson 13918355f576SJeff Roberson /* 1392099a0e58SBosko Milekic * Keg header ctor. This initializes all fields, locks, etc. And inserts 1393099a0e58SBosko Milekic * the keg onto the global keg list. 13948355f576SJeff Roberson * 13958355f576SJeff Roberson * Arguments/Returns follow uma_ctor specifications 1396099a0e58SBosko Milekic * udata Actually uma_kctor_args 1397099a0e58SBosko Milekic */ 1398b23f72e9SBrian Feldman static int 1399b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags) 1400099a0e58SBosko Milekic { 1401099a0e58SBosko Milekic struct uma_kctor_args *arg = udata; 1402099a0e58SBosko Milekic uma_keg_t keg = mem; 1403099a0e58SBosko Milekic uma_zone_t zone; 1404099a0e58SBosko Milekic 1405099a0e58SBosko Milekic bzero(keg, size); 1406099a0e58SBosko Milekic keg->uk_size = arg->size; 1407099a0e58SBosko Milekic keg->uk_init = arg->uminit; 1408099a0e58SBosko Milekic keg->uk_fini = arg->fini; 1409099a0e58SBosko Milekic keg->uk_align = arg->align; 1410099a0e58SBosko Milekic keg->uk_free = 0; 14116fd34d6fSJeff Roberson keg->uk_reserve = 0; 1412099a0e58SBosko Milekic keg->uk_pages = 0; 1413099a0e58SBosko Milekic keg->uk_flags = arg->flags; 1414099a0e58SBosko Milekic keg->uk_slabzone = NULL; 1415099a0e58SBosko Milekic 1416099a0e58SBosko Milekic /* 1417099a0e58SBosko Milekic * The master zone is passed to us at keg-creation time. 1418099a0e58SBosko Milekic */ 1419099a0e58SBosko Milekic zone = arg->zone; 1420e20a199fSJeff Roberson keg->uk_name = zone->uz_name; 1421099a0e58SBosko Milekic 1422099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_VM) 1423099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1424099a0e58SBosko Milekic 1425099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_ZINIT) 1426099a0e58SBosko Milekic keg->uk_init = zero_init; 1427099a0e58SBosko Milekic 1428cfcae3f8SGleb Smirnoff if (arg->flags & UMA_ZONE_MALLOC) 1429e20a199fSJeff Roberson keg->uk_flags |= UMA_ZONE_VTOSLAB; 1430e20a199fSJeff Roberson 1431ad97af7eSGleb Smirnoff if (arg->flags & UMA_ZONE_PCPU) 1432ad97af7eSGleb Smirnoff #ifdef SMP 1433ad97af7eSGleb Smirnoff keg->uk_flags |= UMA_ZONE_OFFPAGE; 1434ad97af7eSGleb Smirnoff #else 1435ad97af7eSGleb Smirnoff keg->uk_flags &= ~UMA_ZONE_PCPU; 1436ad97af7eSGleb Smirnoff #endif 1437ad97af7eSGleb Smirnoff 1438ef72505eSJeff Roberson if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { 1439e20a199fSJeff Roberson keg_cachespread_init(keg); 1440244f4554SBosko Milekic } else { 1441ef72505eSJeff Roberson if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1442e20a199fSJeff Roberson keg_large_init(keg); 1443244f4554SBosko Milekic else 1444e20a199fSJeff Roberson keg_small_init(keg); 1445244f4554SBosko Milekic } 1446099a0e58SBosko Milekic 1447cfcae3f8SGleb Smirnoff if (keg->uk_flags & UMA_ZONE_OFFPAGE) 1448099a0e58SBosko Milekic keg->uk_slabzone = slabzone; 1449099a0e58SBosko Milekic 1450099a0e58SBosko Milekic /* 1451099a0e58SBosko Milekic * If we haven't booted yet we need allocations to go through the 1452099a0e58SBosko Milekic * startup cache until the vm is ready. 1453099a0e58SBosko Milekic */ 14548cd02d00SAlan Cox if (booted < UMA_STARTUP2) 14558cd02d00SAlan Cox keg->uk_allocf = startup_alloc; 145677e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC 145777e19437SGleb Smirnoff else if (keg->uk_ppera == 1) 145877e19437SGleb Smirnoff keg->uk_allocf = uma_small_alloc; 14598cd02d00SAlan Cox #endif 146077e19437SGleb Smirnoff else 146177e19437SGleb Smirnoff keg->uk_allocf = page_alloc; 146277e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC 146377e19437SGleb Smirnoff if (keg->uk_ppera == 1) 146477e19437SGleb Smirnoff keg->uk_freef = uma_small_free; 146577e19437SGleb Smirnoff else 146677e19437SGleb Smirnoff #endif 146777e19437SGleb Smirnoff keg->uk_freef = page_free; 1468099a0e58SBosko Milekic 1469099a0e58SBosko Milekic /* 1470af526374SJeff Roberson * Initialize keg's lock 1471099a0e58SBosko Milekic */ 1472af526374SJeff Roberson KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); 1473099a0e58SBosko Milekic 1474099a0e58SBosko Milekic /* 1475099a0e58SBosko Milekic * If we're putting the slab header in the actual page we need to 1476099a0e58SBosko Milekic * figure out where in each page it goes. This calculates a right 1477099a0e58SBosko Milekic * justified offset into the memory on an ALIGN_PTR boundary. 1478099a0e58SBosko Milekic */ 1479099a0e58SBosko Milekic if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1480244f4554SBosko Milekic u_int totsize; 1481099a0e58SBosko Milekic 1482099a0e58SBosko Milekic /* Size of the slab struct and free list */ 1483ef72505eSJeff Roberson totsize = sizeof(struct uma_slab); 1484ef72505eSJeff Roberson 1485099a0e58SBosko Milekic if (totsize & UMA_ALIGN_PTR) 1486099a0e58SBosko Milekic totsize = (totsize & ~UMA_ALIGN_PTR) + 1487099a0e58SBosko Milekic (UMA_ALIGN_PTR + 1); 1488ad97af7eSGleb Smirnoff keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; 1489244f4554SBosko Milekic 1490244f4554SBosko Milekic /* 1491244f4554SBosko Milekic * The only way the following is possible is if with our 1492244f4554SBosko Milekic * UMA_ALIGN_PTR adjustments we are now bigger than 1493244f4554SBosko Milekic * UMA_SLAB_SIZE. I haven't checked whether this is 1494244f4554SBosko Milekic * mathematically possible for all cases, so we make 1495244f4554SBosko Milekic * sure here anyway. 1496244f4554SBosko Milekic */ 1497ef72505eSJeff Roberson totsize = keg->uk_pgoff + sizeof(struct uma_slab); 1498ad97af7eSGleb Smirnoff if (totsize > PAGE_SIZE * keg->uk_ppera) { 1499099a0e58SBosko Milekic printf("zone %s ipers %d rsize %d size %d\n", 1500099a0e58SBosko Milekic zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1501099a0e58SBosko Milekic keg->uk_size); 1502aea6e893SAlan Cox panic("UMA slab won't fit."); 1503099a0e58SBosko Milekic } 1504099a0e58SBosko Milekic } 1505099a0e58SBosko Milekic 1506099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZONE_HASH) 1507099a0e58SBosko Milekic hash_alloc(&keg->uk_hash); 1508099a0e58SBosko Milekic 15091431a748SGleb Smirnoff CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", 15101431a748SGleb Smirnoff keg, zone->uz_name, zone, 151157223e99SAndriy Gapon (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 151257223e99SAndriy Gapon keg->uk_free); 1513099a0e58SBosko Milekic 1514099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1515099a0e58SBosko Milekic 1516111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1517099a0e58SBosko Milekic LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1518111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1519b23f72e9SBrian Feldman return (0); 1520099a0e58SBosko Milekic } 1521099a0e58SBosko Milekic 1522099a0e58SBosko Milekic /* 1523099a0e58SBosko Milekic * Zone header ctor. This initializes all fields, locks, etc. 1524099a0e58SBosko Milekic * 1525099a0e58SBosko Milekic * Arguments/Returns follow uma_ctor specifications 1526099a0e58SBosko Milekic * udata Actually uma_zctor_args 15278355f576SJeff Roberson */ 1528b23f72e9SBrian Feldman static int 1529b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags) 15308355f576SJeff Roberson { 15318355f576SJeff Roberson struct uma_zctor_args *arg = udata; 15328355f576SJeff Roberson uma_zone_t zone = mem; 1533099a0e58SBosko Milekic uma_zone_t z; 1534099a0e58SBosko Milekic uma_keg_t keg; 15358355f576SJeff Roberson 15368355f576SJeff Roberson bzero(zone, size); 15378355f576SJeff Roberson zone->uz_name = arg->name; 15388355f576SJeff Roberson zone->uz_ctor = arg->ctor; 15398355f576SJeff Roberson zone->uz_dtor = arg->dtor; 1540e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab; 1541099a0e58SBosko Milekic zone->uz_init = NULL; 1542099a0e58SBosko Milekic zone->uz_fini = NULL; 1543099a0e58SBosko Milekic zone->uz_allocs = 0; 1544773df9abSRobert Watson zone->uz_frees = 0; 15452019094aSRobert Watson zone->uz_fails = 0; 1546bf965959SSean Bruno zone->uz_sleeps = 0; 1547fc03d22bSJeff Roberson zone->uz_count = 0; 1548ace66b56SAlexander Motin zone->uz_count_min = 0; 1549e20a199fSJeff Roberson zone->uz_flags = 0; 15502f891cd5SPawel Jakub Dawidek zone->uz_warning = NULL; 15512f891cd5SPawel Jakub Dawidek timevalclear(&zone->uz_ratecheck); 1552e20a199fSJeff Roberson keg = arg->keg; 1553099a0e58SBosko Milekic 1554af526374SJeff Roberson ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); 1555af526374SJeff Roberson 15560095a784SJeff Roberson /* 15570095a784SJeff Roberson * This is a pure cache zone, no kegs. 15580095a784SJeff Roberson */ 15590095a784SJeff Roberson if (arg->import) { 15606fd34d6fSJeff Roberson if (arg->flags & UMA_ZONE_VM) 15616fd34d6fSJeff Roberson arg->flags |= UMA_ZFLAG_CACHEONLY; 15626fd34d6fSJeff Roberson zone->uz_flags = arg->flags; 1563af526374SJeff Roberson zone->uz_size = arg->size; 15640095a784SJeff Roberson zone->uz_import = arg->import; 15650095a784SJeff Roberson zone->uz_release = arg->release; 15660095a784SJeff Roberson zone->uz_arg = arg->arg; 1567af526374SJeff Roberson zone->uz_lockptr = &zone->uz_lock; 1568111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 156903175483SAlexander Motin LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); 1570111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1571af526374SJeff Roberson goto out; 15720095a784SJeff Roberson } 15730095a784SJeff Roberson 15740095a784SJeff Roberson /* 15750095a784SJeff Roberson * Use the regular zone/keg/slab allocator. 15760095a784SJeff Roberson */ 15770095a784SJeff Roberson zone->uz_import = (uma_import)zone_import; 15780095a784SJeff Roberson zone->uz_release = (uma_release)zone_release; 15790095a784SJeff Roberson zone->uz_arg = zone; 15800095a784SJeff Roberson 1581099a0e58SBosko Milekic if (arg->flags & UMA_ZONE_SECONDARY) { 1582099a0e58SBosko Milekic KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 15838355f576SJeff Roberson zone->uz_init = arg->uminit; 1584e221e841SJeff Roberson zone->uz_fini = arg->fini; 1585af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1586e20a199fSJeff Roberson zone->uz_flags |= UMA_ZONE_SECONDARY; 1587111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1588099a0e58SBosko Milekic ZONE_LOCK(zone); 1589099a0e58SBosko Milekic LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1590099a0e58SBosko Milekic if (LIST_NEXT(z, uz_link) == NULL) { 1591099a0e58SBosko Milekic LIST_INSERT_AFTER(z, zone, uz_link); 1592099a0e58SBosko Milekic break; 1593099a0e58SBosko Milekic } 1594099a0e58SBosko Milekic } 1595099a0e58SBosko Milekic ZONE_UNLOCK(zone); 1596111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1597e20a199fSJeff Roberson } else if (keg == NULL) { 1598e20a199fSJeff Roberson if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1599e20a199fSJeff Roberson arg->align, arg->flags)) == NULL) 1600b23f72e9SBrian Feldman return (ENOMEM); 1601099a0e58SBosko Milekic } else { 1602099a0e58SBosko Milekic struct uma_kctor_args karg; 1603b23f72e9SBrian Feldman int error; 1604099a0e58SBosko Milekic 1605099a0e58SBosko Milekic /* We should only be here from uma_startup() */ 1606099a0e58SBosko Milekic karg.size = arg->size; 1607099a0e58SBosko Milekic karg.uminit = arg->uminit; 1608099a0e58SBosko Milekic karg.fini = arg->fini; 1609099a0e58SBosko Milekic karg.align = arg->align; 1610099a0e58SBosko Milekic karg.flags = arg->flags; 1611099a0e58SBosko Milekic karg.zone = zone; 1612b23f72e9SBrian Feldman error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1613b23f72e9SBrian Feldman flags); 1614b23f72e9SBrian Feldman if (error) 1615b23f72e9SBrian Feldman return (error); 1616099a0e58SBosko Milekic } 16170095a784SJeff Roberson 1618e20a199fSJeff Roberson /* 1619e20a199fSJeff Roberson * Link in the first keg. 1620e20a199fSJeff Roberson */ 1621e20a199fSJeff Roberson zone->uz_klink.kl_keg = keg; 1622e20a199fSJeff Roberson LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); 1623af526374SJeff Roberson zone->uz_lockptr = &keg->uk_lock; 1624e20a199fSJeff Roberson zone->uz_size = keg->uk_size; 1625e20a199fSJeff Roberson zone->uz_flags |= (keg->uk_flags & 1626e20a199fSJeff Roberson (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); 16278355f576SJeff Roberson 16288355f576SJeff Roberson /* 16298355f576SJeff Roberson * Some internal zones don't have room allocated for the per cpu 16308355f576SJeff Roberson * caches. If we're internal, bail out here. 16318355f576SJeff Roberson */ 1632099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1633e20a199fSJeff Roberson KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, 1634099a0e58SBosko Milekic ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1635b23f72e9SBrian Feldman return (0); 1636099a0e58SBosko Milekic } 16378355f576SJeff Roberson 1638af526374SJeff Roberson out: 1639af526374SJeff Roberson if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0) 1640af526374SJeff Roberson zone->uz_count = bucket_select(zone->uz_size); 16418355f576SJeff Roberson else 1642cae33c14SJeff Roberson zone->uz_count = BUCKET_MAX; 1643ace66b56SAlexander Motin zone->uz_count_min = zone->uz_count; 1644fc03d22bSJeff Roberson 1645b23f72e9SBrian Feldman return (0); 16468355f576SJeff Roberson } 16478355f576SJeff Roberson 16488355f576SJeff Roberson /* 1649099a0e58SBosko Milekic * Keg header dtor. This frees all data, destroys locks, frees the hash 1650099a0e58SBosko Milekic * table and removes the keg from the global list. 16519c2cd7e5SJeff Roberson * 16529c2cd7e5SJeff Roberson * Arguments/Returns follow uma_dtor specifications 16539c2cd7e5SJeff Roberson * udata unused 16549c2cd7e5SJeff Roberson */ 1655099a0e58SBosko Milekic static void 1656099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata) 1657099a0e58SBosko Milekic { 1658099a0e58SBosko Milekic uma_keg_t keg; 16599c2cd7e5SJeff Roberson 1660099a0e58SBosko Milekic keg = (uma_keg_t)arg; 1661e20a199fSJeff Roberson KEG_LOCK(keg); 1662099a0e58SBosko Milekic if (keg->uk_free != 0) { 1663a3845534SCraig Rodrigues printf("Freed UMA keg (%s) was not empty (%d items). " 1664099a0e58SBosko Milekic " Lost %d pages of memory.\n", 1665a3845534SCraig Rodrigues keg->uk_name ? keg->uk_name : "", 1666099a0e58SBosko Milekic keg->uk_free, keg->uk_pages); 1667099a0e58SBosko Milekic } 1668e20a199fSJeff Roberson KEG_UNLOCK(keg); 1669099a0e58SBosko Milekic 1670099a0e58SBosko Milekic hash_free(&keg->uk_hash); 1671099a0e58SBosko Milekic 1672e20a199fSJeff Roberson KEG_LOCK_FINI(keg); 1673099a0e58SBosko Milekic } 1674099a0e58SBosko Milekic 1675099a0e58SBosko Milekic /* 1676099a0e58SBosko Milekic * Zone header dtor. 1677099a0e58SBosko Milekic * 1678099a0e58SBosko Milekic * Arguments/Returns follow uma_dtor specifications 1679099a0e58SBosko Milekic * udata unused 1680099a0e58SBosko Milekic */ 16819c2cd7e5SJeff Roberson static void 16829c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata) 16839c2cd7e5SJeff Roberson { 1684e20a199fSJeff Roberson uma_klink_t klink; 16859c2cd7e5SJeff Roberson uma_zone_t zone; 1686099a0e58SBosko Milekic uma_keg_t keg; 16879c2cd7e5SJeff Roberson 16889c2cd7e5SJeff Roberson zone = (uma_zone_t)arg; 1689e20a199fSJeff Roberson keg = zone_first_keg(zone); 16909643769aSJeff Roberson 1691e20a199fSJeff Roberson if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 16929643769aSJeff Roberson cache_drain(zone); 1693099a0e58SBosko Milekic 1694111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1695099a0e58SBosko Milekic LIST_REMOVE(zone, uz_link); 1696111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 1697099a0e58SBosko Milekic /* 1698099a0e58SBosko Milekic * XXX there are some races here where 1699099a0e58SBosko Milekic * the zone can be drained but zone lock 1700099a0e58SBosko Milekic * released and then refilled before we 1701099a0e58SBosko Milekic * remove it... we dont care for now 1702099a0e58SBosko Milekic */ 1703e20a199fSJeff Roberson zone_drain_wait(zone, M_WAITOK); 1704e20a199fSJeff Roberson /* 1705e20a199fSJeff Roberson * Unlink all of our kegs. 1706e20a199fSJeff Roberson */ 1707e20a199fSJeff Roberson while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1708e20a199fSJeff Roberson klink->kl_keg = NULL; 1709e20a199fSJeff Roberson LIST_REMOVE(klink, kl_link); 1710e20a199fSJeff Roberson if (klink == &zone->uz_klink) 1711e20a199fSJeff Roberson continue; 1712e20a199fSJeff Roberson free(klink, M_TEMP); 1713e20a199fSJeff Roberson } 1714e20a199fSJeff Roberson /* 1715e20a199fSJeff Roberson * We only destroy kegs from non secondary zones. 1716e20a199fSJeff Roberson */ 17170095a784SJeff Roberson if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1718111fbcd5SBryan Venteicher rw_wlock(&uma_rwlock); 1719099a0e58SBosko Milekic LIST_REMOVE(keg, uk_link); 1720111fbcd5SBryan Venteicher rw_wunlock(&uma_rwlock); 17210095a784SJeff Roberson zone_free_item(kegs, keg, NULL, SKIP_NONE); 17229c2cd7e5SJeff Roberson } 1723af526374SJeff Roberson ZONE_LOCK_FINI(zone); 1724099a0e58SBosko Milekic } 1725099a0e58SBosko Milekic 17269c2cd7e5SJeff Roberson /* 17278355f576SJeff Roberson * Traverses every zone in the system and calls a callback 17288355f576SJeff Roberson * 17298355f576SJeff Roberson * Arguments: 17308355f576SJeff Roberson * zfunc A pointer to a function which accepts a zone 17318355f576SJeff Roberson * as an argument. 17328355f576SJeff Roberson * 17338355f576SJeff Roberson * Returns: 17348355f576SJeff Roberson * Nothing 17358355f576SJeff Roberson */ 17368355f576SJeff Roberson static void 17378355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t)) 17388355f576SJeff Roberson { 1739099a0e58SBosko Milekic uma_keg_t keg; 17408355f576SJeff Roberson uma_zone_t zone; 17418355f576SJeff Roberson 1742111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 1743099a0e58SBosko Milekic LIST_FOREACH(keg, &uma_kegs, uk_link) { 1744099a0e58SBosko Milekic LIST_FOREACH(zone, &keg->uk_zones, uz_link) 17458355f576SJeff Roberson zfunc(zone); 1746099a0e58SBosko Milekic } 1747111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 17488355f576SJeff Roberson } 17498355f576SJeff Roberson 17508355f576SJeff Roberson /* Public functions */ 17518355f576SJeff Roberson /* See uma.h */ 17528355f576SJeff Roberson void 1753ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages) 17548355f576SJeff Roberson { 17558355f576SJeff Roberson struct uma_zctor_args args; 17568355f576SJeff Roberson 1757111fbcd5SBryan Venteicher rw_init(&uma_rwlock, "UMA lock"); 1758099a0e58SBosko Milekic 1759099a0e58SBosko Milekic /* "manually" create the initial zone */ 17600095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1761099a0e58SBosko Milekic args.name = "UMA Kegs"; 1762099a0e58SBosko Milekic args.size = sizeof(struct uma_keg); 1763099a0e58SBosko Milekic args.ctor = keg_ctor; 1764099a0e58SBosko Milekic args.dtor = keg_dtor; 17658355f576SJeff Roberson args.uminit = zero_init; 17668355f576SJeff Roberson args.fini = NULL; 1767099a0e58SBosko Milekic args.keg = &masterkeg; 17688355f576SJeff Roberson args.align = 32 - 1; 1769b60f5b79SJeff Roberson args.flags = UMA_ZFLAG_INTERNAL; 17708355f576SJeff Roberson /* The initial zone has no Per cpu queues so it's smaller */ 1771b23f72e9SBrian Feldman zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 17728355f576SJeff Roberson 1773f353d338SAlan Cox mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF); 1774ac0a6fd0SGleb Smirnoff bootmem = mem; 1775ac0a6fd0SGleb Smirnoff boot_pages = npages; 17768355f576SJeff Roberson 1777099a0e58SBosko Milekic args.name = "UMA Zones"; 1778099a0e58SBosko Milekic args.size = sizeof(struct uma_zone) + 177951cfb0beSDmitry Chagin (sizeof(struct uma_cache) * (mp_maxid + 1)); 1780099a0e58SBosko Milekic args.ctor = zone_ctor; 1781099a0e58SBosko Milekic args.dtor = zone_dtor; 1782099a0e58SBosko Milekic args.uminit = zero_init; 1783099a0e58SBosko Milekic args.fini = NULL; 1784099a0e58SBosko Milekic args.keg = NULL; 1785099a0e58SBosko Milekic args.align = 32 - 1; 1786099a0e58SBosko Milekic args.flags = UMA_ZFLAG_INTERNAL; 1787099a0e58SBosko Milekic /* The initial zone has no Per cpu queues so it's smaller */ 1788b23f72e9SBrian Feldman zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1789099a0e58SBosko Milekic 17908355f576SJeff Roberson /* Now make a zone for slab headers */ 17918355f576SJeff Roberson slabzone = uma_zcreate("UMA Slabs", 1792ef72505eSJeff Roberson sizeof(struct uma_slab), 17938355f576SJeff Roberson NULL, NULL, NULL, NULL, 1794b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 17958355f576SJeff Roberson 17968355f576SJeff Roberson hashzone = uma_zcreate("UMA Hash", 17978355f576SJeff Roberson sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 17988355f576SJeff Roberson NULL, NULL, NULL, NULL, 1799b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 18008355f576SJeff Roberson 1801cae33c14SJeff Roberson bucket_init(); 18028355f576SJeff Roberson 1803342f1793SAlan Cox booted = UMA_STARTUP; 18048355f576SJeff Roberson } 18058355f576SJeff Roberson 18068355f576SJeff Roberson /* see uma.h */ 18078355f576SJeff Roberson void 180899571dc3SJeff Roberson uma_startup2(void) 18098355f576SJeff Roberson { 1810342f1793SAlan Cox booted = UMA_STARTUP2; 181186bbae32SJeff Roberson bucket_enable(); 181295c4bf75SKonstantin Belousov sx_init(&uma_drain_lock, "umadrain"); 18138355f576SJeff Roberson } 18148355f576SJeff Roberson 18158355f576SJeff Roberson /* 18168355f576SJeff Roberson * Initialize our callout handle 18178355f576SJeff Roberson * 18188355f576SJeff Roberson */ 18198355f576SJeff Roberson 18208355f576SJeff Roberson static void 18218355f576SJeff Roberson uma_startup3(void) 18228355f576SJeff Roberson { 18231431a748SGleb Smirnoff 1824fd90e2edSJung-uk Kim callout_init(&uma_callout, 1); 18259643769aSJeff Roberson callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 18268355f576SJeff Roberson } 18278355f576SJeff Roberson 1828e20a199fSJeff Roberson static uma_keg_t 1829099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 183085dcf349SGleb Smirnoff int align, uint32_t flags) 1831099a0e58SBosko Milekic { 1832099a0e58SBosko Milekic struct uma_kctor_args args; 1833099a0e58SBosko Milekic 1834099a0e58SBosko Milekic args.size = size; 1835099a0e58SBosko Milekic args.uminit = uminit; 1836099a0e58SBosko Milekic args.fini = fini; 18371e319f6dSRobert Watson args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1838099a0e58SBosko Milekic args.flags = flags; 1839099a0e58SBosko Milekic args.zone = zone; 1840e20a199fSJeff Roberson return (zone_alloc_item(kegs, &args, M_WAITOK)); 1841099a0e58SBosko Milekic } 1842099a0e58SBosko Milekic 18438355f576SJeff Roberson /* See uma.h */ 18441e319f6dSRobert Watson void 18451e319f6dSRobert Watson uma_set_align(int align) 18461e319f6dSRobert Watson { 18471e319f6dSRobert Watson 18481e319f6dSRobert Watson if (align != UMA_ALIGN_CACHE) 18491e319f6dSRobert Watson uma_align_cache = align; 18501e319f6dSRobert Watson } 18511e319f6dSRobert Watson 18521e319f6dSRobert Watson /* See uma.h */ 18538355f576SJeff Roberson uma_zone_t 1854bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 185585dcf349SGleb Smirnoff uma_init uminit, uma_fini fini, int align, uint32_t flags) 18568355f576SJeff Roberson 18578355f576SJeff Roberson { 18588355f576SJeff Roberson struct uma_zctor_args args; 185995c4bf75SKonstantin Belousov uma_zone_t res; 186095c4bf75SKonstantin Belousov bool locked; 18618355f576SJeff Roberson 1862a5a35578SJohn Baldwin KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", 1863a5a35578SJohn Baldwin align, name)); 1864a5a35578SJohn Baldwin 18658355f576SJeff Roberson /* This stuff is essential for the zone ctor */ 18660095a784SJeff Roberson memset(&args, 0, sizeof(args)); 18678355f576SJeff Roberson args.name = name; 18688355f576SJeff Roberson args.size = size; 18698355f576SJeff Roberson args.ctor = ctor; 18708355f576SJeff Roberson args.dtor = dtor; 18718355f576SJeff Roberson args.uminit = uminit; 18728355f576SJeff Roberson args.fini = fini; 1873afc6dc36SJohn-Mark Gurney #ifdef INVARIANTS 1874afc6dc36SJohn-Mark Gurney /* 1875afc6dc36SJohn-Mark Gurney * If a zone is being created with an empty constructor and 1876afc6dc36SJohn-Mark Gurney * destructor, pass UMA constructor/destructor which checks for 1877afc6dc36SJohn-Mark Gurney * memory use after free. 1878afc6dc36SJohn-Mark Gurney */ 187919c591bfSMateusz Guzik if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && 188019c591bfSMateusz Guzik ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { 1881afc6dc36SJohn-Mark Gurney args.ctor = trash_ctor; 1882afc6dc36SJohn-Mark Gurney args.dtor = trash_dtor; 1883afc6dc36SJohn-Mark Gurney args.uminit = trash_init; 1884afc6dc36SJohn-Mark Gurney args.fini = trash_fini; 1885afc6dc36SJohn-Mark Gurney } 1886afc6dc36SJohn-Mark Gurney #endif 18878355f576SJeff Roberson args.align = align; 18888355f576SJeff Roberson args.flags = flags; 1889099a0e58SBosko Milekic args.keg = NULL; 1890099a0e58SBosko Milekic 189195c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 189295c4bf75SKonstantin Belousov locked = false; 189395c4bf75SKonstantin Belousov } else { 189495c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 189595c4bf75SKonstantin Belousov locked = true; 189695c4bf75SKonstantin Belousov } 189795c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 189895c4bf75SKonstantin Belousov if (locked) 189995c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 190095c4bf75SKonstantin Belousov return (res); 1901099a0e58SBosko Milekic } 1902099a0e58SBosko Milekic 1903099a0e58SBosko Milekic /* See uma.h */ 1904099a0e58SBosko Milekic uma_zone_t 1905099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1906099a0e58SBosko Milekic uma_init zinit, uma_fini zfini, uma_zone_t master) 1907099a0e58SBosko Milekic { 1908099a0e58SBosko Milekic struct uma_zctor_args args; 1909e20a199fSJeff Roberson uma_keg_t keg; 191095c4bf75SKonstantin Belousov uma_zone_t res; 191195c4bf75SKonstantin Belousov bool locked; 1912099a0e58SBosko Milekic 1913e20a199fSJeff Roberson keg = zone_first_keg(master); 19140095a784SJeff Roberson memset(&args, 0, sizeof(args)); 1915099a0e58SBosko Milekic args.name = name; 1916e20a199fSJeff Roberson args.size = keg->uk_size; 1917099a0e58SBosko Milekic args.ctor = ctor; 1918099a0e58SBosko Milekic args.dtor = dtor; 1919099a0e58SBosko Milekic args.uminit = zinit; 1920099a0e58SBosko Milekic args.fini = zfini; 1921e20a199fSJeff Roberson args.align = keg->uk_align; 1922e20a199fSJeff Roberson args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1923e20a199fSJeff Roberson args.keg = keg; 19248355f576SJeff Roberson 192595c4bf75SKonstantin Belousov if (booted < UMA_STARTUP2) { 192695c4bf75SKonstantin Belousov locked = false; 192795c4bf75SKonstantin Belousov } else { 192895c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 192995c4bf75SKonstantin Belousov locked = true; 193095c4bf75SKonstantin Belousov } 1931e20a199fSJeff Roberson /* XXX Attaches only one keg of potentially many. */ 193295c4bf75SKonstantin Belousov res = zone_alloc_item(zones, &args, M_WAITOK); 193395c4bf75SKonstantin Belousov if (locked) 193495c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 193595c4bf75SKonstantin Belousov return (res); 19368355f576SJeff Roberson } 19378355f576SJeff Roberson 19380095a784SJeff Roberson /* See uma.h */ 19390095a784SJeff Roberson uma_zone_t 1940af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, 1941af526374SJeff Roberson uma_init zinit, uma_fini zfini, uma_import zimport, 1942af526374SJeff Roberson uma_release zrelease, void *arg, int flags) 19430095a784SJeff Roberson { 19440095a784SJeff Roberson struct uma_zctor_args args; 19450095a784SJeff Roberson 19460095a784SJeff Roberson memset(&args, 0, sizeof(args)); 19470095a784SJeff Roberson args.name = name; 1948af526374SJeff Roberson args.size = size; 19490095a784SJeff Roberson args.ctor = ctor; 19500095a784SJeff Roberson args.dtor = dtor; 19510095a784SJeff Roberson args.uminit = zinit; 19520095a784SJeff Roberson args.fini = zfini; 19530095a784SJeff Roberson args.import = zimport; 19540095a784SJeff Roberson args.release = zrelease; 19550095a784SJeff Roberson args.arg = arg; 19560095a784SJeff Roberson args.align = 0; 19570095a784SJeff Roberson args.flags = flags; 19580095a784SJeff Roberson 19590095a784SJeff Roberson return (zone_alloc_item(zones, &args, M_WAITOK)); 19600095a784SJeff Roberson } 19610095a784SJeff Roberson 1962e20a199fSJeff Roberson static void 1963e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b) 1964e20a199fSJeff Roberson { 1965e20a199fSJeff Roberson if (a < b) { 1966e20a199fSJeff Roberson ZONE_LOCK(a); 1967af526374SJeff Roberson mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); 1968e20a199fSJeff Roberson } else { 1969e20a199fSJeff Roberson ZONE_LOCK(b); 1970af526374SJeff Roberson mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); 1971e20a199fSJeff Roberson } 1972e20a199fSJeff Roberson } 1973e20a199fSJeff Roberson 1974e20a199fSJeff Roberson static void 1975e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1976e20a199fSJeff Roberson { 1977e20a199fSJeff Roberson 1978e20a199fSJeff Roberson ZONE_UNLOCK(a); 1979e20a199fSJeff Roberson ZONE_UNLOCK(b); 1980e20a199fSJeff Roberson } 1981e20a199fSJeff Roberson 1982e20a199fSJeff Roberson int 1983e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1984e20a199fSJeff Roberson { 1985e20a199fSJeff Roberson uma_klink_t klink; 1986e20a199fSJeff Roberson uma_klink_t kl; 1987e20a199fSJeff Roberson int error; 1988e20a199fSJeff Roberson 1989e20a199fSJeff Roberson error = 0; 1990e20a199fSJeff Roberson klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1991e20a199fSJeff Roberson 1992e20a199fSJeff Roberson zone_lock_pair(zone, master); 1993e20a199fSJeff Roberson /* 1994e20a199fSJeff Roberson * zone must use vtoslab() to resolve objects and must already be 1995e20a199fSJeff Roberson * a secondary. 1996e20a199fSJeff Roberson */ 1997e20a199fSJeff Roberson if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1998e20a199fSJeff Roberson != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1999e20a199fSJeff Roberson error = EINVAL; 2000e20a199fSJeff Roberson goto out; 2001e20a199fSJeff Roberson } 2002e20a199fSJeff Roberson /* 2003e20a199fSJeff Roberson * The new master must also use vtoslab(). 2004e20a199fSJeff Roberson */ 2005e20a199fSJeff Roberson if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 2006e20a199fSJeff Roberson error = EINVAL; 2007e20a199fSJeff Roberson goto out; 2008e20a199fSJeff Roberson } 2009cfcae3f8SGleb Smirnoff 2010e20a199fSJeff Roberson /* 2011e20a199fSJeff Roberson * The underlying object must be the same size. rsize 2012e20a199fSJeff Roberson * may be different. 2013e20a199fSJeff Roberson */ 2014e20a199fSJeff Roberson if (master->uz_size != zone->uz_size) { 2015e20a199fSJeff Roberson error = E2BIG; 2016e20a199fSJeff Roberson goto out; 2017e20a199fSJeff Roberson } 2018e20a199fSJeff Roberson /* 2019e20a199fSJeff Roberson * Put it at the end of the list. 2020e20a199fSJeff Roberson */ 2021e20a199fSJeff Roberson klink->kl_keg = zone_first_keg(master); 2022e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 2023e20a199fSJeff Roberson if (LIST_NEXT(kl, kl_link) == NULL) { 2024e20a199fSJeff Roberson LIST_INSERT_AFTER(kl, klink, kl_link); 2025e20a199fSJeff Roberson break; 2026e20a199fSJeff Roberson } 2027e20a199fSJeff Roberson } 2028e20a199fSJeff Roberson klink = NULL; 2029e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_MULTI; 2030e20a199fSJeff Roberson zone->uz_slab = zone_fetch_slab_multi; 2031e20a199fSJeff Roberson 2032e20a199fSJeff Roberson out: 2033e20a199fSJeff Roberson zone_unlock_pair(zone, master); 2034e20a199fSJeff Roberson if (klink != NULL) 2035e20a199fSJeff Roberson free(klink, M_TEMP); 2036e20a199fSJeff Roberson 2037e20a199fSJeff Roberson return (error); 2038e20a199fSJeff Roberson } 2039e20a199fSJeff Roberson 2040e20a199fSJeff Roberson 20418355f576SJeff Roberson /* See uma.h */ 20429c2cd7e5SJeff Roberson void 20439c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone) 20449c2cd7e5SJeff Roberson { 2045f4ff923bSRobert Watson 204695c4bf75SKonstantin Belousov sx_slock(&uma_drain_lock); 20470095a784SJeff Roberson zone_free_item(zones, zone, NULL, SKIP_NONE); 204895c4bf75SKonstantin Belousov sx_sunlock(&uma_drain_lock); 20499c2cd7e5SJeff Roberson } 20509c2cd7e5SJeff Roberson 20518d6fbbb8SJeff Roberson void 20528d6fbbb8SJeff Roberson uma_zwait(uma_zone_t zone) 20538d6fbbb8SJeff Roberson { 20548d6fbbb8SJeff Roberson void *item; 20558d6fbbb8SJeff Roberson 20568d6fbbb8SJeff Roberson item = uma_zalloc_arg(zone, NULL, M_WAITOK); 20578d6fbbb8SJeff Roberson uma_zfree(zone, item); 20588d6fbbb8SJeff Roberson } 20598d6fbbb8SJeff Roberson 20609c2cd7e5SJeff Roberson /* See uma.h */ 20618355f576SJeff Roberson void * 20622cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 20638355f576SJeff Roberson { 20648355f576SJeff Roberson void *item; 20658355f576SJeff Roberson uma_cache_t cache; 20668355f576SJeff Roberson uma_bucket_t bucket; 2067fc03d22bSJeff Roberson int lockfail; 20688355f576SJeff Roberson int cpu; 20698355f576SJeff Roberson 2070e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2071e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 207210cb2424SMark Murray 20738355f576SJeff Roberson /* This is the fast path allocation */ 20741431a748SGleb Smirnoff CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", 20751431a748SGleb Smirnoff curthread, zone->uz_name, zone, flags); 2076a553d4b8SJeff Roberson 2077635fd505SRobert Watson if (flags & M_WAITOK) { 2078b23f72e9SBrian Feldman WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 2079635fd505SRobert Watson "uma_zalloc_arg: zone \"%s\"", zone->uz_name); 20804c1cc01cSJohn Baldwin } 2081d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 20821067a2baSJonathan T. Looney ("uma_zalloc_arg: called with spinlock or critical section held")); 20831067a2baSJonathan T. Looney 20848d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 20858d689e04SGleb Smirnoff if (memguard_cmp_zone(zone)) { 20868d689e04SGleb Smirnoff item = memguard_alloc(zone->uz_size, flags); 20878d689e04SGleb Smirnoff if (item != NULL) { 20888d689e04SGleb Smirnoff if (zone->uz_init != NULL && 20898d689e04SGleb Smirnoff zone->uz_init(item, zone->uz_size, flags) != 0) 20908d689e04SGleb Smirnoff return (NULL); 20918d689e04SGleb Smirnoff if (zone->uz_ctor != NULL && 2092fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, 2093fc03d22bSJeff Roberson flags) != 0) { 20948d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 20958d689e04SGleb Smirnoff return (NULL); 20968d689e04SGleb Smirnoff } 20978d689e04SGleb Smirnoff return (item); 20988d689e04SGleb Smirnoff } 20998d689e04SGleb Smirnoff /* This is unfortunate but should not be fatal. */ 21008d689e04SGleb Smirnoff } 21018d689e04SGleb Smirnoff #endif 21025d1ae027SRobert Watson /* 21035d1ae027SRobert Watson * If possible, allocate from the per-CPU cache. There are two 21045d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 21055d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 21065d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 21075d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 21085d1ae027SRobert Watson * preemption and migration. We release the critical section in 21095d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to allocate from 21105d1ae027SRobert Watson * the current cache; when we re-acquire the critical section, we 21115d1ae027SRobert Watson * must detect and handle migration if it has occurred. 21125d1ae027SRobert Watson */ 21135d1ae027SRobert Watson critical_enter(); 21145d1ae027SRobert Watson cpu = curcpu; 21158355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 21168355f576SJeff Roberson 21178355f576SJeff Roberson zalloc_start: 21188355f576SJeff Roberson bucket = cache->uc_allocbucket; 2119fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 2120cae33c14SJeff Roberson bucket->ub_cnt--; 2121cae33c14SJeff Roberson item = bucket->ub_bucket[bucket->ub_cnt]; 21228355f576SJeff Roberson #ifdef INVARIANTS 2123cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = NULL; 21248355f576SJeff Roberson #endif 2125fc03d22bSJeff Roberson KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); 21268355f576SJeff Roberson cache->uc_allocs++; 21275d1ae027SRobert Watson critical_exit(); 2128fc03d22bSJeff Roberson if (zone->uz_ctor != NULL && 2129fc03d22bSJeff Roberson zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 21300095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2131fc03d22bSJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 2132b23f72e9SBrian Feldman return (NULL); 2133b23f72e9SBrian Feldman } 2134ef72505eSJeff Roberson #ifdef INVARIANTS 2135ef72505eSJeff Roberson uma_dbg_alloc(zone, NULL, item); 2136ef72505eSJeff Roberson #endif 21372cc35ff9SJeff Roberson if (flags & M_ZERO) 213848343a2fSGleb Smirnoff uma_zero_item(item, zone); 21398355f576SJeff Roberson return (item); 2140fc03d22bSJeff Roberson } 2141fc03d22bSJeff Roberson 21428355f576SJeff Roberson /* 21438355f576SJeff Roberson * We have run out of items in our alloc bucket. 21448355f576SJeff Roberson * See if we can switch with our free bucket. 21458355f576SJeff Roberson */ 2146b983089aSJeff Roberson bucket = cache->uc_freebucket; 2147fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt > 0) { 21481431a748SGleb Smirnoff CTR2(KTR_UMA, 21491431a748SGleb Smirnoff "uma_zalloc: zone %s(%p) swapping empty with alloc", 21501431a748SGleb Smirnoff zone->uz_name, zone); 21518355f576SJeff Roberson cache->uc_freebucket = cache->uc_allocbucket; 2152b983089aSJeff Roberson cache->uc_allocbucket = bucket; 21538355f576SJeff Roberson goto zalloc_start; 21548355f576SJeff Roberson } 2155fc03d22bSJeff Roberson 2156fc03d22bSJeff Roberson /* 2157fc03d22bSJeff Roberson * Discard any empty allocation bucket while we hold no locks. 2158fc03d22bSJeff Roberson */ 2159fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2160fc03d22bSJeff Roberson cache->uc_allocbucket = NULL; 2161fc03d22bSJeff Roberson critical_exit(); 2162fc03d22bSJeff Roberson if (bucket != NULL) 21636fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2164fc03d22bSJeff Roberson 2165fc03d22bSJeff Roberson /* Short-circuit for zones without buckets and low memory. */ 2166fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2167fc03d22bSJeff Roberson goto zalloc_item; 2168fc03d22bSJeff Roberson 21695d1ae027SRobert Watson /* 21705d1ae027SRobert Watson * Attempt to retrieve the item from the per-CPU cache has failed, so 21715d1ae027SRobert Watson * we must go back to the zone. This requires the zone lock, so we 21725d1ae027SRobert Watson * must drop the critical section, then re-acquire it when we go back 21735d1ae027SRobert Watson * to the cache. Since the critical section is released, we may be 21745d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 21755d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 21765d1ae027SRobert Watson * the critical section. 21775d1ae027SRobert Watson */ 2178fc03d22bSJeff Roberson lockfail = 0; 2179fc03d22bSJeff Roberson if (ZONE_TRYLOCK(zone) == 0) { 2180fc03d22bSJeff Roberson /* Record contention to size the buckets. */ 2181a553d4b8SJeff Roberson ZONE_LOCK(zone); 2182fc03d22bSJeff Roberson lockfail = 1; 2183fc03d22bSJeff Roberson } 21845d1ae027SRobert Watson critical_enter(); 21855d1ae027SRobert Watson cpu = curcpu; 21865d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 21875d1ae027SRobert Watson 2188fc03d22bSJeff Roberson /* 2189fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2190fc03d22bSJeff Roberson */ 21910095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 21920095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2193a553d4b8SJeff Roberson cache->uc_allocs = 0; 2194773df9abSRobert Watson cache->uc_frees = 0; 21958355f576SJeff Roberson 2196fc03d22bSJeff Roberson /* See if we lost the race to fill the cache. */ 2197fc03d22bSJeff Roberson if (cache->uc_allocbucket != NULL) { 2198fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2199fc03d22bSJeff Roberson goto zalloc_start; 2200a553d4b8SJeff Roberson } 22018355f576SJeff Roberson 2202fc03d22bSJeff Roberson /* 2203fc03d22bSJeff Roberson * Check the zone's cache of buckets. 2204fc03d22bSJeff Roberson */ 2205fc03d22bSJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) { 2206cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 2207a553d4b8SJeff Roberson ("uma_zalloc_arg: Returning an empty bucket.")); 22088355f576SJeff Roberson 2209a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 2210a553d4b8SJeff Roberson cache->uc_allocbucket = bucket; 2211a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 22128355f576SJeff Roberson goto zalloc_start; 2213a553d4b8SJeff Roberson } 22145d1ae027SRobert Watson /* We are no longer associated with this CPU. */ 22155d1ae027SRobert Watson critical_exit(); 2216bbee39c6SJeff Roberson 2217fc03d22bSJeff Roberson /* 2218fc03d22bSJeff Roberson * We bump the uz count when the cache size is insufficient to 2219fc03d22bSJeff Roberson * handle the working set. 2220fc03d22bSJeff Roberson */ 22216fd34d6fSJeff Roberson if (lockfail && zone->uz_count < BUCKET_MAX) 2222a553d4b8SJeff Roberson zone->uz_count++; 2223fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2224099a0e58SBosko Milekic 22258355f576SJeff Roberson /* 2226a553d4b8SJeff Roberson * Now lets just fill a bucket and put it on the free list. If that 2227763df3ecSPedro F. Giffuni * works we'll restart the allocation from the beginning and it 2228fc03d22bSJeff Roberson * will use the just filled bucket. 2229bbee39c6SJeff Roberson */ 22306fd34d6fSJeff Roberson bucket = zone_alloc_bucket(zone, udata, flags); 22311431a748SGleb Smirnoff CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", 22321431a748SGleb Smirnoff zone->uz_name, zone, bucket); 2233fc03d22bSJeff Roberson if (bucket != NULL) { 2234fc03d22bSJeff Roberson ZONE_LOCK(zone); 2235fc03d22bSJeff Roberson critical_enter(); 2236fc03d22bSJeff Roberson cpu = curcpu; 2237fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2238fc03d22bSJeff Roberson /* 2239fc03d22bSJeff Roberson * See if we lost the race or were migrated. Cache the 2240fc03d22bSJeff Roberson * initialized bucket to make this less likely or claim 2241fc03d22bSJeff Roberson * the memory directly. 2242fc03d22bSJeff Roberson */ 2243fc03d22bSJeff Roberson if (cache->uc_allocbucket == NULL) 2244fc03d22bSJeff Roberson cache->uc_allocbucket = bucket; 2245fc03d22bSJeff Roberson else 2246fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 2247bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 2248fc03d22bSJeff Roberson goto zalloc_start; 2249bbee39c6SJeff Roberson } 2250fc03d22bSJeff Roberson 2251bbee39c6SJeff Roberson /* 2252bbee39c6SJeff Roberson * We may not be able to get a bucket so return an actual item. 2253bbee39c6SJeff Roberson */ 2254fc03d22bSJeff Roberson zalloc_item: 2255e20a199fSJeff Roberson item = zone_alloc_item(zone, udata, flags); 2256fc03d22bSJeff Roberson 2257e20a199fSJeff Roberson return (item); 2258bbee39c6SJeff Roberson } 2259bbee39c6SJeff Roberson 2260bbee39c6SJeff Roberson static uma_slab_t 2261e20a199fSJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) 2262bbee39c6SJeff Roberson { 2263bbee39c6SJeff Roberson uma_slab_t slab; 22646fd34d6fSJeff Roberson int reserve; 2265099a0e58SBosko Milekic 2266e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2267bbee39c6SJeff Roberson slab = NULL; 22686fd34d6fSJeff Roberson reserve = 0; 22696fd34d6fSJeff Roberson if ((flags & M_USE_RESERVE) == 0) 22706fd34d6fSJeff Roberson reserve = keg->uk_reserve; 2271bbee39c6SJeff Roberson 2272bbee39c6SJeff Roberson for (;;) { 2273bbee39c6SJeff Roberson /* 2274bbee39c6SJeff Roberson * Find a slab with some space. Prefer slabs that are partially 2275bbee39c6SJeff Roberson * used over those that are totally full. This helps to reduce 2276bbee39c6SJeff Roberson * fragmentation. 2277bbee39c6SJeff Roberson */ 22786fd34d6fSJeff Roberson if (keg->uk_free > reserve) { 2279099a0e58SBosko Milekic if (!LIST_EMPTY(&keg->uk_part_slab)) { 2280099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_part_slab); 2281bbee39c6SJeff Roberson } else { 2282099a0e58SBosko Milekic slab = LIST_FIRST(&keg->uk_free_slab); 2283bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2284099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2285bbee39c6SJeff Roberson us_link); 2286bbee39c6SJeff Roberson } 2287e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2288bbee39c6SJeff Roberson return (slab); 2289bbee39c6SJeff Roberson } 2290bbee39c6SJeff Roberson 2291bbee39c6SJeff Roberson /* 2292bbee39c6SJeff Roberson * M_NOVM means don't ask at all! 2293bbee39c6SJeff Roberson */ 2294bbee39c6SJeff Roberson if (flags & M_NOVM) 2295bbee39c6SJeff Roberson break; 2296bbee39c6SJeff Roberson 2297e20a199fSJeff Roberson if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { 2298099a0e58SBosko Milekic keg->uk_flags |= UMA_ZFLAG_FULL; 2299e20a199fSJeff Roberson /* 2300e20a199fSJeff Roberson * If this is not a multi-zone, set the FULL bit. 2301e20a199fSJeff Roberson * Otherwise slab_multi() takes care of it. 2302e20a199fSJeff Roberson */ 23032f891cd5SPawel Jakub Dawidek if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { 2304e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 23052f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 230654503a13SJonathan T. Looney zone_maxaction(zone); 23072f891cd5SPawel Jakub Dawidek } 2308ebc85edfSJeff Roberson if (flags & M_NOWAIT) 2309bbee39c6SJeff Roberson break; 2310c288b548SEitan Adler zone->uz_sleeps++; 2311e20a199fSJeff Roberson msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); 2312bbee39c6SJeff Roberson continue; 2313bbee39c6SJeff Roberson } 2314e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, flags); 2315bbee39c6SJeff Roberson /* 2316bbee39c6SJeff Roberson * If we got a slab here it's safe to mark it partially used 2317bbee39c6SJeff Roberson * and return. We assume that the caller is going to remove 2318bbee39c6SJeff Roberson * at least one item. 2319bbee39c6SJeff Roberson */ 2320bbee39c6SJeff Roberson if (slab) { 2321e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 2322099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2323bbee39c6SJeff Roberson return (slab); 2324bbee39c6SJeff Roberson } 2325bbee39c6SJeff Roberson /* 2326bbee39c6SJeff Roberson * We might not have been able to get a slab but another cpu 2327bbee39c6SJeff Roberson * could have while we were unlocked. Check again before we 2328bbee39c6SJeff Roberson * fail. 2329bbee39c6SJeff Roberson */ 2330bbee39c6SJeff Roberson flags |= M_NOVM; 2331bbee39c6SJeff Roberson } 2332bbee39c6SJeff Roberson return (slab); 2333bbee39c6SJeff Roberson } 2334bbee39c6SJeff Roberson 2335e20a199fSJeff Roberson static uma_slab_t 2336e20a199fSJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2337e20a199fSJeff Roberson { 2338e20a199fSJeff Roberson uma_slab_t slab; 2339e20a199fSJeff Roberson 2340af526374SJeff Roberson if (keg == NULL) { 2341e20a199fSJeff Roberson keg = zone_first_keg(zone); 2342af526374SJeff Roberson KEG_LOCK(keg); 2343af526374SJeff Roberson } 2344e20a199fSJeff Roberson 2345e20a199fSJeff Roberson for (;;) { 2346e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2347e20a199fSJeff Roberson if (slab) 2348e20a199fSJeff Roberson return (slab); 2349e20a199fSJeff Roberson if (flags & (M_NOWAIT | M_NOVM)) 2350e20a199fSJeff Roberson break; 2351e20a199fSJeff Roberson } 2352af526374SJeff Roberson KEG_UNLOCK(keg); 2353e20a199fSJeff Roberson return (NULL); 2354e20a199fSJeff Roberson } 2355e20a199fSJeff Roberson 2356e20a199fSJeff Roberson /* 2357e20a199fSJeff Roberson * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2358af526374SJeff Roberson * with the keg locked. On NULL no lock is held. 2359e20a199fSJeff Roberson * 2360e20a199fSJeff Roberson * The last pointer is used to seed the search. It is not required. 2361e20a199fSJeff Roberson */ 2362e20a199fSJeff Roberson static uma_slab_t 2363e20a199fSJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2364e20a199fSJeff Roberson { 2365e20a199fSJeff Roberson uma_klink_t klink; 2366e20a199fSJeff Roberson uma_slab_t slab; 2367e20a199fSJeff Roberson uma_keg_t keg; 2368e20a199fSJeff Roberson int flags; 2369e20a199fSJeff Roberson int empty; 2370e20a199fSJeff Roberson int full; 2371e20a199fSJeff Roberson 2372e20a199fSJeff Roberson /* 2373e20a199fSJeff Roberson * Don't wait on the first pass. This will skip limit tests 2374e20a199fSJeff Roberson * as well. We don't want to block if we can find a provider 2375e20a199fSJeff Roberson * without blocking. 2376e20a199fSJeff Roberson */ 2377e20a199fSJeff Roberson flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2378e20a199fSJeff Roberson /* 2379e20a199fSJeff Roberson * Use the last slab allocated as a hint for where to start 2380e20a199fSJeff Roberson * the search. 2381e20a199fSJeff Roberson */ 2382af526374SJeff Roberson if (last != NULL) { 2383e20a199fSJeff Roberson slab = keg_fetch_slab(last, zone, flags); 2384e20a199fSJeff Roberson if (slab) 2385e20a199fSJeff Roberson return (slab); 2386af526374SJeff Roberson KEG_UNLOCK(last); 2387e20a199fSJeff Roberson } 2388e20a199fSJeff Roberson /* 2389e20a199fSJeff Roberson * Loop until we have a slab incase of transient failures 2390e20a199fSJeff Roberson * while M_WAITOK is specified. I'm not sure this is 100% 2391e20a199fSJeff Roberson * required but we've done it for so long now. 2392e20a199fSJeff Roberson */ 2393e20a199fSJeff Roberson for (;;) { 2394e20a199fSJeff Roberson empty = 0; 2395e20a199fSJeff Roberson full = 0; 2396e20a199fSJeff Roberson /* 2397e20a199fSJeff Roberson * Search the available kegs for slabs. Be careful to hold the 2398e20a199fSJeff Roberson * correct lock while calling into the keg layer. 2399e20a199fSJeff Roberson */ 2400e20a199fSJeff Roberson LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2401e20a199fSJeff Roberson keg = klink->kl_keg; 2402af526374SJeff Roberson KEG_LOCK(keg); 2403e20a199fSJeff Roberson if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2404e20a199fSJeff Roberson slab = keg_fetch_slab(keg, zone, flags); 2405e20a199fSJeff Roberson if (slab) 2406e20a199fSJeff Roberson return (slab); 2407e20a199fSJeff Roberson } 2408e20a199fSJeff Roberson if (keg->uk_flags & UMA_ZFLAG_FULL) 2409e20a199fSJeff Roberson full++; 2410e20a199fSJeff Roberson else 2411e20a199fSJeff Roberson empty++; 2412af526374SJeff Roberson KEG_UNLOCK(keg); 2413e20a199fSJeff Roberson } 2414e20a199fSJeff Roberson if (rflags & (M_NOWAIT | M_NOVM)) 2415e20a199fSJeff Roberson break; 2416e20a199fSJeff Roberson flags = rflags; 2417e20a199fSJeff Roberson /* 2418e20a199fSJeff Roberson * All kegs are full. XXX We can't atomically check all kegs 2419e20a199fSJeff Roberson * and sleep so just sleep for a short period and retry. 2420e20a199fSJeff Roberson */ 2421e20a199fSJeff Roberson if (full && !empty) { 2422af526374SJeff Roberson ZONE_LOCK(zone); 2423e20a199fSJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 2424bf965959SSean Bruno zone->uz_sleeps++; 24252f891cd5SPawel Jakub Dawidek zone_log_warning(zone); 242654503a13SJonathan T. Looney zone_maxaction(zone); 2427af526374SJeff Roberson msleep(zone, zone->uz_lockptr, PVM, 2428af526374SJeff Roberson "zonelimit", hz/100); 2429e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2430af526374SJeff Roberson ZONE_UNLOCK(zone); 2431e20a199fSJeff Roberson continue; 2432e20a199fSJeff Roberson } 2433e20a199fSJeff Roberson } 2434e20a199fSJeff Roberson return (NULL); 2435e20a199fSJeff Roberson } 2436e20a199fSJeff Roberson 2437d56368d7SBosko Milekic static void * 24380095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab) 2439bbee39c6SJeff Roberson { 2440bbee39c6SJeff Roberson void *item; 244185dcf349SGleb Smirnoff uint8_t freei; 2442bbee39c6SJeff Roberson 24430095a784SJeff Roberson MPASS(keg == slab->us_keg); 2444e20a199fSJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2445099a0e58SBosko Milekic 2446ef72505eSJeff Roberson freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; 2447ef72505eSJeff Roberson BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); 2448099a0e58SBosko Milekic item = slab->us_data + (keg->uk_rsize * freei); 2449bbee39c6SJeff Roberson slab->us_freecount--; 2450099a0e58SBosko Milekic keg->uk_free--; 2451ef72505eSJeff Roberson 2452bbee39c6SJeff Roberson /* Move this slab to the full list */ 2453bbee39c6SJeff Roberson if (slab->us_freecount == 0) { 2454bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 2455099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2456bbee39c6SJeff Roberson } 2457bbee39c6SJeff Roberson 2458bbee39c6SJeff Roberson return (item); 2459bbee39c6SJeff Roberson } 2460bbee39c6SJeff Roberson 2461bbee39c6SJeff Roberson static int 24620095a784SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int flags) 24630095a784SJeff Roberson { 24640095a784SJeff Roberson uma_slab_t slab; 24650095a784SJeff Roberson uma_keg_t keg; 24660095a784SJeff Roberson int i; 24670095a784SJeff Roberson 24680095a784SJeff Roberson slab = NULL; 24690095a784SJeff Roberson keg = NULL; 2470af526374SJeff Roberson /* Try to keep the buckets totally full */ 24710095a784SJeff Roberson for (i = 0; i < max; ) { 24720095a784SJeff Roberson if ((slab = zone->uz_slab(zone, keg, flags)) == NULL) 24730095a784SJeff Roberson break; 24740095a784SJeff Roberson keg = slab->us_keg; 24756fd34d6fSJeff Roberson while (slab->us_freecount && i < max) { 24760095a784SJeff Roberson bucket[i++] = slab_alloc_item(keg, slab); 24776fd34d6fSJeff Roberson if (keg->uk_free <= keg->uk_reserve) 24786fd34d6fSJeff Roberson break; 24796fd34d6fSJeff Roberson } 24806fd34d6fSJeff Roberson /* Don't grab more than one slab at a time. */ 24810095a784SJeff Roberson flags &= ~M_WAITOK; 24820095a784SJeff Roberson flags |= M_NOWAIT; 24830095a784SJeff Roberson } 24840095a784SJeff Roberson if (slab != NULL) 24850095a784SJeff Roberson KEG_UNLOCK(keg); 24860095a784SJeff Roberson 24870095a784SJeff Roberson return i; 24880095a784SJeff Roberson } 24890095a784SJeff Roberson 2490fc03d22bSJeff Roberson static uma_bucket_t 24916fd34d6fSJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int flags) 2492bbee39c6SJeff Roberson { 2493bbee39c6SJeff Roberson uma_bucket_t bucket; 24940095a784SJeff Roberson int max; 2495bbee39c6SJeff Roberson 24966fd34d6fSJeff Roberson /* Don't wait for buckets, preserve caller's NOVM setting. */ 24976fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); 24980095a784SJeff Roberson if (bucket == NULL) 2499f7104ccdSAlexander Motin return (NULL); 25000095a784SJeff Roberson 2501af526374SJeff Roberson max = MIN(bucket->ub_entries, zone->uz_count); 25020095a784SJeff Roberson bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, 25030095a784SJeff Roberson max, flags); 25040095a784SJeff Roberson 25050095a784SJeff Roberson /* 25060095a784SJeff Roberson * Initialize the memory if necessary. 25070095a784SJeff Roberson */ 25080095a784SJeff Roberson if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { 2509099a0e58SBosko Milekic int i; 2510bbee39c6SJeff Roberson 25110095a784SJeff Roberson for (i = 0; i < bucket->ub_cnt; i++) 2512e20a199fSJeff Roberson if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 25130095a784SJeff Roberson flags) != 0) 2514b23f72e9SBrian Feldman break; 2515b23f72e9SBrian Feldman /* 2516b23f72e9SBrian Feldman * If we couldn't initialize the whole bucket, put the 2517b23f72e9SBrian Feldman * rest back onto the freelist. 2518b23f72e9SBrian Feldman */ 2519b23f72e9SBrian Feldman if (i != bucket->ub_cnt) { 2520af526374SJeff Roberson zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], 25210095a784SJeff Roberson bucket->ub_cnt - i); 2522a5a262c6SBosko Milekic #ifdef INVARIANTS 25230095a784SJeff Roberson bzero(&bucket->ub_bucket[i], 25240095a784SJeff Roberson sizeof(void *) * (bucket->ub_cnt - i)); 2525a5a262c6SBosko Milekic #endif 2526b23f72e9SBrian Feldman bucket->ub_cnt = i; 2527b23f72e9SBrian Feldman } 2528099a0e58SBosko Milekic } 2529099a0e58SBosko Milekic 2530f7104ccdSAlexander Motin if (bucket->ub_cnt == 0) { 25316fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2532fc03d22bSJeff Roberson atomic_add_long(&zone->uz_fails, 1); 2533fc03d22bSJeff Roberson return (NULL); 2534bbee39c6SJeff Roberson } 2535fc03d22bSJeff Roberson 2536fc03d22bSJeff Roberson return (bucket); 2537fc03d22bSJeff Roberson } 2538fc03d22bSJeff Roberson 25398355f576SJeff Roberson /* 25400095a784SJeff Roberson * Allocates a single item from a zone. 25418355f576SJeff Roberson * 25428355f576SJeff Roberson * Arguments 25438355f576SJeff Roberson * zone The zone to alloc for. 25448355f576SJeff Roberson * udata The data to be passed to the constructor. 2545a163d034SWarner Losh * flags M_WAITOK, M_NOWAIT, M_ZERO. 25468355f576SJeff Roberson * 25478355f576SJeff Roberson * Returns 25488355f576SJeff Roberson * NULL if there is no memory and M_NOWAIT is set 2549bbee39c6SJeff Roberson * An item if successful 25508355f576SJeff Roberson */ 25518355f576SJeff Roberson 25528355f576SJeff Roberson static void * 2553e20a199fSJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int flags) 25548355f576SJeff Roberson { 25558355f576SJeff Roberson void *item; 25568355f576SJeff Roberson 25578355f576SJeff Roberson item = NULL; 25588355f576SJeff Roberson 25590095a784SJeff Roberson if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1) 25600095a784SJeff Roberson goto fail; 25610095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, 1); 25628355f576SJeff Roberson 2563099a0e58SBosko Milekic /* 2564099a0e58SBosko Milekic * We have to call both the zone's init (not the keg's init) 2565099a0e58SBosko Milekic * and the zone's ctor. This is because the item is going from 2566099a0e58SBosko Milekic * a keg slab directly to the user, and the user is expecting it 2567099a0e58SBosko Milekic * to be both zone-init'd as well as zone-ctor'd. 2568099a0e58SBosko Milekic */ 2569b23f72e9SBrian Feldman if (zone->uz_init != NULL) { 2570e20a199fSJeff Roberson if (zone->uz_init(item, zone->uz_size, flags) != 0) { 25710095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_FINI); 25720095a784SJeff Roberson goto fail; 2573b23f72e9SBrian Feldman } 2574b23f72e9SBrian Feldman } 2575b23f72e9SBrian Feldman if (zone->uz_ctor != NULL) { 2576e20a199fSJeff Roberson if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 25770095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 25780095a784SJeff Roberson goto fail; 2579b23f72e9SBrian Feldman } 2580b23f72e9SBrian Feldman } 2581ef72505eSJeff Roberson #ifdef INVARIANTS 25820095a784SJeff Roberson uma_dbg_alloc(zone, NULL, item); 2583ef72505eSJeff Roberson #endif 25842cc35ff9SJeff Roberson if (flags & M_ZERO) 258548343a2fSGleb Smirnoff uma_zero_item(item, zone); 25868355f576SJeff Roberson 25871431a748SGleb Smirnoff CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, 25881431a748SGleb Smirnoff zone->uz_name, zone); 25891431a748SGleb Smirnoff 25908355f576SJeff Roberson return (item); 25910095a784SJeff Roberson 25920095a784SJeff Roberson fail: 25931431a748SGleb Smirnoff CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", 25941431a748SGleb Smirnoff zone->uz_name, zone); 25950095a784SJeff Roberson atomic_add_long(&zone->uz_fails, 1); 25960095a784SJeff Roberson return (NULL); 25978355f576SJeff Roberson } 25988355f576SJeff Roberson 25998355f576SJeff Roberson /* See uma.h */ 26008355f576SJeff Roberson void 26018355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 26028355f576SJeff Roberson { 26038355f576SJeff Roberson uma_cache_t cache; 26048355f576SJeff Roberson uma_bucket_t bucket; 26054d104ba0SAlexander Motin int lockfail; 26068355f576SJeff Roberson int cpu; 26078355f576SJeff Roberson 2608e866d8f0SMark Murray /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ 2609e866d8f0SMark Murray random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA); 261010cb2424SMark Murray 26113659f747SRobert Watson CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 26123659f747SRobert Watson zone->uz_name); 26133659f747SRobert Watson 2614d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 26151067a2baSJonathan T. Looney ("uma_zfree_arg: called with spinlock or critical section held")); 26161067a2baSJonathan T. Looney 261720ed0cb0SMatthew D Fleming /* uma_zfree(..., NULL) does nothing, to match free(9). */ 261820ed0cb0SMatthew D Fleming if (item == NULL) 261920ed0cb0SMatthew D Fleming return; 26208d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD 26218d689e04SGleb Smirnoff if (is_memguard_addr(item)) { 2622bc9d08e1SMark Johnston if (zone->uz_dtor != NULL) 26238d689e04SGleb Smirnoff zone->uz_dtor(item, zone->uz_size, udata); 2624bc9d08e1SMark Johnston if (zone->uz_fini != NULL) 26258d689e04SGleb Smirnoff zone->uz_fini(item, zone->uz_size); 26268d689e04SGleb Smirnoff memguard_free(item); 26278d689e04SGleb Smirnoff return; 26288d689e04SGleb Smirnoff } 26298d689e04SGleb Smirnoff #endif 26305d1ae027SRobert Watson #ifdef INVARIANTS 2631e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 26325d1ae027SRobert Watson uma_dbg_free(zone, udata, item); 26335d1ae027SRobert Watson else 26345d1ae027SRobert Watson uma_dbg_free(zone, NULL, item); 26355d1ae027SRobert Watson #endif 2636fc03d22bSJeff Roberson if (zone->uz_dtor != NULL) 2637ef72505eSJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 2638ef72505eSJeff Roberson 2639af7f9b97SJeff Roberson /* 2640af7f9b97SJeff Roberson * The race here is acceptable. If we miss it we'll just have to wait 2641af7f9b97SJeff Roberson * a little longer for the limits to be reset. 2642af7f9b97SJeff Roberson */ 2643e20a199fSJeff Roberson if (zone->uz_flags & UMA_ZFLAG_FULL) 2644fc03d22bSJeff Roberson goto zfree_item; 2645af7f9b97SJeff Roberson 26465d1ae027SRobert Watson /* 26475d1ae027SRobert Watson * If possible, free to the per-CPU cache. There are two 26485d1ae027SRobert Watson * requirements for safe access to the per-CPU cache: (1) the thread 26495d1ae027SRobert Watson * accessing the cache must not be preempted or yield during access, 26505d1ae027SRobert Watson * and (2) the thread must not migrate CPUs without switching which 26515d1ae027SRobert Watson * cache it accesses. We rely on a critical section to prevent 26525d1ae027SRobert Watson * preemption and migration. We release the critical section in 26535d1ae027SRobert Watson * order to acquire the zone mutex if we are unable to free to the 26545d1ae027SRobert Watson * current cache; when we re-acquire the critical section, we must 26555d1ae027SRobert Watson * detect and handle migration if it has occurred. 26565d1ae027SRobert Watson */ 2657a553d4b8SJeff Roberson zfree_restart: 26585d1ae027SRobert Watson critical_enter(); 26595d1ae027SRobert Watson cpu = curcpu; 26608355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 26618355f576SJeff Roberson 26628355f576SJeff Roberson zfree_start: 2663a553d4b8SJeff Roberson /* 2664fc03d22bSJeff Roberson * Try to free into the allocbucket first to give LIFO ordering 2665fc03d22bSJeff Roberson * for cache-hot datastructures. Spill over into the freebucket 2666fc03d22bSJeff Roberson * if necessary. Alloc will swap them if one runs dry. 2667a553d4b8SJeff Roberson */ 2668fc03d22bSJeff Roberson bucket = cache->uc_allocbucket; 2669fc03d22bSJeff Roberson if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) 2670fc03d22bSJeff Roberson bucket = cache->uc_freebucket; 2671fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2672cae33c14SJeff Roberson KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 26738355f576SJeff Roberson ("uma_zfree: Freeing to non free bucket index.")); 2674cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = item; 2675cae33c14SJeff Roberson bucket->ub_cnt++; 2676773df9abSRobert Watson cache->uc_frees++; 26775d1ae027SRobert Watson critical_exit(); 26788355f576SJeff Roberson return; 2679fc03d22bSJeff Roberson } 2680fc03d22bSJeff Roberson 26818355f576SJeff Roberson /* 26825d1ae027SRobert Watson * We must go back the zone, which requires acquiring the zone lock, 26835d1ae027SRobert Watson * which in turn means we must release and re-acquire the critical 26845d1ae027SRobert Watson * section. Since the critical section is released, we may be 26855d1ae027SRobert Watson * preempted or migrate. As such, make sure not to maintain any 26865d1ae027SRobert Watson * thread-local state specific to the cache from prior to releasing 26875d1ae027SRobert Watson * the critical section. 26888355f576SJeff Roberson */ 26895d1ae027SRobert Watson critical_exit(); 2690fc03d22bSJeff Roberson if (zone->uz_count == 0 || bucketdisable) 2691fc03d22bSJeff Roberson goto zfree_item; 2692fc03d22bSJeff Roberson 26934d104ba0SAlexander Motin lockfail = 0; 26944d104ba0SAlexander Motin if (ZONE_TRYLOCK(zone) == 0) { 26954d104ba0SAlexander Motin /* Record contention to size the buckets. */ 26968355f576SJeff Roberson ZONE_LOCK(zone); 26974d104ba0SAlexander Motin lockfail = 1; 26984d104ba0SAlexander Motin } 26995d1ae027SRobert Watson critical_enter(); 27005d1ae027SRobert Watson cpu = curcpu; 27015d1ae027SRobert Watson cache = &zone->uz_cpu[cpu]; 27028355f576SJeff Roberson 2703fc03d22bSJeff Roberson /* 2704fc03d22bSJeff Roberson * Since we have locked the zone we may as well send back our stats. 2705fc03d22bSJeff Roberson */ 27060095a784SJeff Roberson atomic_add_long(&zone->uz_allocs, cache->uc_allocs); 27070095a784SJeff Roberson atomic_add_long(&zone->uz_frees, cache->uc_frees); 2708f4ff923bSRobert Watson cache->uc_allocs = 0; 2709f4ff923bSRobert Watson cache->uc_frees = 0; 2710f4ff923bSRobert Watson 27118355f576SJeff Roberson bucket = cache->uc_freebucket; 2712fc03d22bSJeff Roberson if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { 2713fc03d22bSJeff Roberson ZONE_UNLOCK(zone); 2714fc03d22bSJeff Roberson goto zfree_start; 2715fc03d22bSJeff Roberson } 27168355f576SJeff Roberson cache->uc_freebucket = NULL; 2717afa5d703SMark Johnston /* We are no longer associated with this CPU. */ 2718afa5d703SMark Johnston critical_exit(); 27198355f576SJeff Roberson 27208355f576SJeff Roberson /* Can we throw this on the zone full list? */ 27218355f576SJeff Roberson if (bucket != NULL) { 27221431a748SGleb Smirnoff CTR3(KTR_UMA, 27231431a748SGleb Smirnoff "uma_zfree: zone %s(%p) putting bucket %p on free list", 27241431a748SGleb Smirnoff zone->uz_name, zone, bucket); 2725cae33c14SJeff Roberson /* ub_cnt is pointing to the last free item */ 2726cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 27278355f576SJeff Roberson ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2728fc03d22bSJeff Roberson LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link); 27298355f576SJeff Roberson } 2730fc03d22bSJeff Roberson 27314d104ba0SAlexander Motin /* 27324d104ba0SAlexander Motin * We bump the uz count when the cache size is insufficient to 27334d104ba0SAlexander Motin * handle the working set. 27344d104ba0SAlexander Motin */ 27354d104ba0SAlexander Motin if (lockfail && zone->uz_count < BUCKET_MAX) 27364d104ba0SAlexander Motin zone->uz_count++; 2737a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 2738a553d4b8SJeff Roberson 27396fd34d6fSJeff Roberson bucket = bucket_alloc(zone, udata, M_NOWAIT); 27401431a748SGleb Smirnoff CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", 27411431a748SGleb Smirnoff zone->uz_name, zone, bucket); 27424741dcbfSJeff Roberson if (bucket) { 2743fc03d22bSJeff Roberson critical_enter(); 2744fc03d22bSJeff Roberson cpu = curcpu; 2745fc03d22bSJeff Roberson cache = &zone->uz_cpu[cpu]; 2746fc03d22bSJeff Roberson if (cache->uc_freebucket == NULL) { 2747fc03d22bSJeff Roberson cache->uc_freebucket = bucket; 2748fc03d22bSJeff Roberson goto zfree_start; 2749fc03d22bSJeff Roberson } 2750fc03d22bSJeff Roberson /* 2751fc03d22bSJeff Roberson * We lost the race, start over. We have to drop our 2752fc03d22bSJeff Roberson * critical section to free the bucket. 2753fc03d22bSJeff Roberson */ 2754fc03d22bSJeff Roberson critical_exit(); 27556fd34d6fSJeff Roberson bucket_free(zone, bucket, udata); 2756a553d4b8SJeff Roberson goto zfree_restart; 27578355f576SJeff Roberson } 27588355f576SJeff Roberson 2759a553d4b8SJeff Roberson /* 2760a553d4b8SJeff Roberson * If nothing else caught this, we'll just do an internal free. 2761a553d4b8SJeff Roberson */ 2762fc03d22bSJeff Roberson zfree_item: 27630095a784SJeff Roberson zone_free_item(zone, item, udata, SKIP_DTOR); 27648355f576SJeff Roberson 27658355f576SJeff Roberson return; 27668355f576SJeff Roberson } 27678355f576SJeff Roberson 27688355f576SJeff Roberson static void 27690095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) 27708355f576SJeff Roberson { 277185dcf349SGleb Smirnoff uint8_t freei; 2772099a0e58SBosko Milekic 27730095a784SJeff Roberson mtx_assert(&keg->uk_lock, MA_OWNED); 2774e20a199fSJeff Roberson MPASS(keg == slab->us_keg); 27758355f576SJeff Roberson 27768355f576SJeff Roberson /* Do we need to remove from any lists? */ 2777099a0e58SBosko Milekic if (slab->us_freecount+1 == keg->uk_ipers) { 27788355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2779099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 27808355f576SJeff Roberson } else if (slab->us_freecount == 0) { 27818355f576SJeff Roberson LIST_REMOVE(slab, us_link); 2782099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 27838355f576SJeff Roberson } 27848355f576SJeff Roberson 2785ef72505eSJeff Roberson /* Slab management. */ 2786ef72505eSJeff Roberson freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 2787ef72505eSJeff Roberson BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); 27888355f576SJeff Roberson slab->us_freecount++; 27898355f576SJeff Roberson 2790ef72505eSJeff Roberson /* Keg statistics. */ 2791099a0e58SBosko Milekic keg->uk_free++; 27920095a784SJeff Roberson } 27930095a784SJeff Roberson 27940095a784SJeff Roberson static void 27950095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt) 27960095a784SJeff Roberson { 27970095a784SJeff Roberson void *item; 27980095a784SJeff Roberson uma_slab_t slab; 27990095a784SJeff Roberson uma_keg_t keg; 28000095a784SJeff Roberson uint8_t *mem; 28010095a784SJeff Roberson int clearfull; 28020095a784SJeff Roberson int i; 28038355f576SJeff Roberson 2804e20a199fSJeff Roberson clearfull = 0; 28050095a784SJeff Roberson keg = zone_first_keg(zone); 2806af526374SJeff Roberson KEG_LOCK(keg); 28070095a784SJeff Roberson for (i = 0; i < cnt; i++) { 28080095a784SJeff Roberson item = bucket[i]; 28090095a784SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { 28100095a784SJeff Roberson mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 28110095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) { 28120095a784SJeff Roberson slab = hash_sfind(&keg->uk_hash, mem); 28130095a784SJeff Roberson } else { 28140095a784SJeff Roberson mem += keg->uk_pgoff; 28150095a784SJeff Roberson slab = (uma_slab_t)mem; 28160095a784SJeff Roberson } 28170095a784SJeff Roberson } else { 28180095a784SJeff Roberson slab = vtoslab((vm_offset_t)item); 28190095a784SJeff Roberson if (slab->us_keg != keg) { 28200095a784SJeff Roberson KEG_UNLOCK(keg); 28210095a784SJeff Roberson keg = slab->us_keg; 28220095a784SJeff Roberson KEG_LOCK(keg); 28230095a784SJeff Roberson } 28240095a784SJeff Roberson } 28250095a784SJeff Roberson slab_free_item(keg, slab, item); 2826099a0e58SBosko Milekic if (keg->uk_flags & UMA_ZFLAG_FULL) { 2827e20a199fSJeff Roberson if (keg->uk_pages < keg->uk_maxpages) { 2828099a0e58SBosko Milekic keg->uk_flags &= ~UMA_ZFLAG_FULL; 2829e20a199fSJeff Roberson clearfull = 1; 2830e20a199fSJeff Roberson } 2831af7f9b97SJeff Roberson 283277380291SMohan Srinivasan /* 2833ef72505eSJeff Roberson * We can handle one more allocation. Since we're 2834ef72505eSJeff Roberson * clearing ZFLAG_FULL, wake up all procs blocked 2835ef72505eSJeff Roberson * on pages. This should be uncommon, so keeping this 2836ef72505eSJeff Roberson * simple for now (rather than adding count of blocked 283777380291SMohan Srinivasan * threads etc). 283877380291SMohan Srinivasan */ 283977380291SMohan Srinivasan wakeup(keg); 2840af7f9b97SJeff Roberson } 28410095a784SJeff Roberson } 2842af526374SJeff Roberson KEG_UNLOCK(keg); 28430095a784SJeff Roberson if (clearfull) { 2844af526374SJeff Roberson ZONE_LOCK(zone); 2845e20a199fSJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 2846e20a199fSJeff Roberson wakeup(zone); 2847605cbd6aSJeff Roberson ZONE_UNLOCK(zone); 2848af526374SJeff Roberson } 2849ef72505eSJeff Roberson 28508355f576SJeff Roberson } 28518355f576SJeff Roberson 28520095a784SJeff Roberson /* 28530095a784SJeff Roberson * Frees a single item to any zone. 28540095a784SJeff Roberson * 28550095a784SJeff Roberson * Arguments: 28560095a784SJeff Roberson * zone The zone to free to 28570095a784SJeff Roberson * item The item we're freeing 28580095a784SJeff Roberson * udata User supplied data for the dtor 28590095a784SJeff Roberson * skip Skip dtors and finis 28600095a784SJeff Roberson */ 28610095a784SJeff Roberson static void 28620095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) 28630095a784SJeff Roberson { 28640095a784SJeff Roberson 28650095a784SJeff Roberson #ifdef INVARIANTS 28660095a784SJeff Roberson if (skip == SKIP_NONE) { 28670095a784SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 28680095a784SJeff Roberson uma_dbg_free(zone, udata, item); 28690095a784SJeff Roberson else 28700095a784SJeff Roberson uma_dbg_free(zone, NULL, item); 28710095a784SJeff Roberson } 28720095a784SJeff Roberson #endif 28730095a784SJeff Roberson if (skip < SKIP_DTOR && zone->uz_dtor) 28740095a784SJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 28750095a784SJeff Roberson 28760095a784SJeff Roberson if (skip < SKIP_FINI && zone->uz_fini) 28770095a784SJeff Roberson zone->uz_fini(item, zone->uz_size); 28780095a784SJeff Roberson 28790095a784SJeff Roberson atomic_add_long(&zone->uz_frees, 1); 28800095a784SJeff Roberson zone->uz_release(zone->uz_arg, &item, 1); 28810095a784SJeff Roberson } 28820095a784SJeff Roberson 28838355f576SJeff Roberson /* See uma.h */ 28841c6cae97SLawrence Stewart int 2885736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems) 2886736ee590SJeff Roberson { 2887099a0e58SBosko Milekic uma_keg_t keg; 2888099a0e58SBosko Milekic 2889e20a199fSJeff Roberson keg = zone_first_keg(zone); 28900095a784SJeff Roberson if (keg == NULL) 28910095a784SJeff Roberson return (0); 2892af526374SJeff Roberson KEG_LOCK(keg); 2893e20a199fSJeff Roberson keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; 2894099a0e58SBosko Milekic if (keg->uk_maxpages * keg->uk_ipers < nitems) 2895e20a199fSJeff Roberson keg->uk_maxpages += keg->uk_ppera; 289657223e99SAndriy Gapon nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 2897af526374SJeff Roberson KEG_UNLOCK(keg); 28981c6cae97SLawrence Stewart 28991c6cae97SLawrence Stewart return (nitems); 2900736ee590SJeff Roberson } 2901736ee590SJeff Roberson 2902736ee590SJeff Roberson /* See uma.h */ 2903e49471b0SAndre Oppermann int 2904e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone) 2905e49471b0SAndre Oppermann { 2906e49471b0SAndre Oppermann int nitems; 2907e49471b0SAndre Oppermann uma_keg_t keg; 2908e49471b0SAndre Oppermann 2909e49471b0SAndre Oppermann keg = zone_first_keg(zone); 29100095a784SJeff Roberson if (keg == NULL) 29110095a784SJeff Roberson return (0); 2912af526374SJeff Roberson KEG_LOCK(keg); 291357223e99SAndriy Gapon nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; 2914af526374SJeff Roberson KEG_UNLOCK(keg); 2915e49471b0SAndre Oppermann 2916e49471b0SAndre Oppermann return (nitems); 2917e49471b0SAndre Oppermann } 2918e49471b0SAndre Oppermann 2919e49471b0SAndre Oppermann /* See uma.h */ 29202f891cd5SPawel Jakub Dawidek void 29212f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning) 29222f891cd5SPawel Jakub Dawidek { 29232f891cd5SPawel Jakub Dawidek 29242f891cd5SPawel Jakub Dawidek ZONE_LOCK(zone); 29252f891cd5SPawel Jakub Dawidek zone->uz_warning = warning; 29262f891cd5SPawel Jakub Dawidek ZONE_UNLOCK(zone); 29272f891cd5SPawel Jakub Dawidek } 29282f891cd5SPawel Jakub Dawidek 29292f891cd5SPawel Jakub Dawidek /* See uma.h */ 293054503a13SJonathan T. Looney void 293154503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) 293254503a13SJonathan T. Looney { 293354503a13SJonathan T. Looney 293454503a13SJonathan T. Looney ZONE_LOCK(zone); 2935e60b2fcbSGleb Smirnoff TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); 293654503a13SJonathan T. Looney ZONE_UNLOCK(zone); 293754503a13SJonathan T. Looney } 293854503a13SJonathan T. Looney 293954503a13SJonathan T. Looney /* See uma.h */ 2940c4ae7908SLawrence Stewart int 2941c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone) 2942c4ae7908SLawrence Stewart { 2943c4ae7908SLawrence Stewart int64_t nitems; 2944c4ae7908SLawrence Stewart u_int i; 2945c4ae7908SLawrence Stewart 2946c4ae7908SLawrence Stewart ZONE_LOCK(zone); 2947c4ae7908SLawrence Stewart nitems = zone->uz_allocs - zone->uz_frees; 2948c4ae7908SLawrence Stewart CPU_FOREACH(i) { 2949c4ae7908SLawrence Stewart /* 2950c4ae7908SLawrence Stewart * See the comment in sysctl_vm_zone_stats() regarding the 2951c4ae7908SLawrence Stewart * safety of accessing the per-cpu caches. With the zone lock 2952c4ae7908SLawrence Stewart * held, it is safe, but can potentially result in stale data. 2953c4ae7908SLawrence Stewart */ 2954c4ae7908SLawrence Stewart nitems += zone->uz_cpu[i].uc_allocs - 2955c4ae7908SLawrence Stewart zone->uz_cpu[i].uc_frees; 2956c4ae7908SLawrence Stewart } 2957c4ae7908SLawrence Stewart ZONE_UNLOCK(zone); 2958c4ae7908SLawrence Stewart 2959c4ae7908SLawrence Stewart return (nitems < 0 ? 0 : nitems); 2960c4ae7908SLawrence Stewart } 2961c4ae7908SLawrence Stewart 2962c4ae7908SLawrence Stewart /* See uma.h */ 2963736ee590SJeff Roberson void 2964099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2965099a0e58SBosko Milekic { 2966e20a199fSJeff Roberson uma_keg_t keg; 2967e20a199fSJeff Roberson 2968e20a199fSJeff Roberson keg = zone_first_keg(zone); 29690095a784SJeff Roberson KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); 2970af526374SJeff Roberson KEG_LOCK(keg); 2971e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 2972099a0e58SBosko Milekic ("uma_zone_set_init on non-empty keg")); 2973e20a199fSJeff Roberson keg->uk_init = uminit; 2974af526374SJeff Roberson KEG_UNLOCK(keg); 2975099a0e58SBosko Milekic } 2976099a0e58SBosko Milekic 2977099a0e58SBosko Milekic /* See uma.h */ 2978099a0e58SBosko Milekic void 2979099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2980099a0e58SBosko Milekic { 2981e20a199fSJeff Roberson uma_keg_t keg; 2982e20a199fSJeff Roberson 2983e20a199fSJeff Roberson keg = zone_first_keg(zone); 29841d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); 2985af526374SJeff Roberson KEG_LOCK(keg); 2986e20a199fSJeff Roberson KASSERT(keg->uk_pages == 0, 2987099a0e58SBosko Milekic ("uma_zone_set_fini on non-empty keg")); 2988e20a199fSJeff Roberson keg->uk_fini = fini; 2989af526374SJeff Roberson KEG_UNLOCK(keg); 2990099a0e58SBosko Milekic } 2991099a0e58SBosko Milekic 2992099a0e58SBosko Milekic /* See uma.h */ 2993099a0e58SBosko Milekic void 2994099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2995099a0e58SBosko Milekic { 2996af526374SJeff Roberson 2997099a0e58SBosko Milekic ZONE_LOCK(zone); 2998e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 2999099a0e58SBosko Milekic ("uma_zone_set_zinit on non-empty keg")); 3000099a0e58SBosko Milekic zone->uz_init = zinit; 3001099a0e58SBosko Milekic ZONE_UNLOCK(zone); 3002099a0e58SBosko Milekic } 3003099a0e58SBosko Milekic 3004099a0e58SBosko Milekic /* See uma.h */ 3005099a0e58SBosko Milekic void 3006099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 3007099a0e58SBosko Milekic { 3008af526374SJeff Roberson 3009099a0e58SBosko Milekic ZONE_LOCK(zone); 3010e20a199fSJeff Roberson KASSERT(zone_first_keg(zone)->uk_pages == 0, 3011099a0e58SBosko Milekic ("uma_zone_set_zfini on non-empty keg")); 3012099a0e58SBosko Milekic zone->uz_fini = zfini; 3013099a0e58SBosko Milekic ZONE_UNLOCK(zone); 3014099a0e58SBosko Milekic } 3015099a0e58SBosko Milekic 3016099a0e58SBosko Milekic /* See uma.h */ 3017b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */ 3018099a0e58SBosko Milekic void 30198355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef) 30208355f576SJeff Roberson { 30210095a784SJeff Roberson uma_keg_t keg; 3022e20a199fSJeff Roberson 30230095a784SJeff Roberson keg = zone_first_keg(zone); 30241d2c0c46SDmitry Chagin KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); 3025af526374SJeff Roberson KEG_LOCK(keg); 30260095a784SJeff Roberson keg->uk_freef = freef; 3027af526374SJeff Roberson KEG_UNLOCK(keg); 30288355f576SJeff Roberson } 30298355f576SJeff Roberson 30308355f576SJeff Roberson /* See uma.h */ 3031b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */ 30328355f576SJeff Roberson void 30338355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 30348355f576SJeff Roberson { 3035e20a199fSJeff Roberson uma_keg_t keg; 3036e20a199fSJeff Roberson 3037e20a199fSJeff Roberson keg = zone_first_keg(zone); 3038af526374SJeff Roberson KEG_LOCK(keg); 3039e20a199fSJeff Roberson keg->uk_allocf = allocf; 3040af526374SJeff Roberson KEG_UNLOCK(keg); 30418355f576SJeff Roberson } 30428355f576SJeff Roberson 30438355f576SJeff Roberson /* See uma.h */ 30446fd34d6fSJeff Roberson void 30456fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items) 30466fd34d6fSJeff Roberson { 30476fd34d6fSJeff Roberson uma_keg_t keg; 30486fd34d6fSJeff Roberson 30496fd34d6fSJeff Roberson keg = zone_first_keg(zone); 30506fd34d6fSJeff Roberson if (keg == NULL) 30516fd34d6fSJeff Roberson return; 30526fd34d6fSJeff Roberson KEG_LOCK(keg); 30536fd34d6fSJeff Roberson keg->uk_reserve = items; 30546fd34d6fSJeff Roberson KEG_UNLOCK(keg); 30556fd34d6fSJeff Roberson 30566fd34d6fSJeff Roberson return; 30576fd34d6fSJeff Roberson } 30586fd34d6fSJeff Roberson 30596fd34d6fSJeff Roberson /* See uma.h */ 30608355f576SJeff Roberson int 3061a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count) 30628355f576SJeff Roberson { 3063099a0e58SBosko Milekic uma_keg_t keg; 30648355f576SJeff Roberson vm_offset_t kva; 30659ba30bcbSZbigniew Bodek u_int pages; 30668355f576SJeff Roberson 3067e20a199fSJeff Roberson keg = zone_first_keg(zone); 30680095a784SJeff Roberson if (keg == NULL) 30690095a784SJeff Roberson return (0); 3070099a0e58SBosko Milekic pages = count / keg->uk_ipers; 30718355f576SJeff Roberson 3072099a0e58SBosko Milekic if (pages * keg->uk_ipers < count) 30738355f576SJeff Roberson pages++; 307457223e99SAndriy Gapon pages *= keg->uk_ppera; 3075a553d4b8SJeff Roberson 3076a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3077a4915c21SAttilio Rao if (keg->uk_ppera > 1) { 3078a4915c21SAttilio Rao #else 3079a4915c21SAttilio Rao if (1) { 3080a4915c21SAttilio Rao #endif 308157223e99SAndriy Gapon kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); 3082d1f42ac2SAlan Cox if (kva == 0) 30838355f576SJeff Roberson return (0); 3084a4915c21SAttilio Rao } else 3085a4915c21SAttilio Rao kva = 0; 3086af526374SJeff Roberson KEG_LOCK(keg); 3087099a0e58SBosko Milekic keg->uk_kva = kva; 3088a4915c21SAttilio Rao keg->uk_offset = 0; 3089099a0e58SBosko Milekic keg->uk_maxpages = pages; 3090a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC 3091a4915c21SAttilio Rao keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; 3092a4915c21SAttilio Rao #else 3093a4915c21SAttilio Rao keg->uk_allocf = noobj_alloc; 3094a4915c21SAttilio Rao #endif 30956fd34d6fSJeff Roberson keg->uk_flags |= UMA_ZONE_NOFREE; 3096af526374SJeff Roberson KEG_UNLOCK(keg); 3097af526374SJeff Roberson 30988355f576SJeff Roberson return (1); 30998355f576SJeff Roberson } 31008355f576SJeff Roberson 31018355f576SJeff Roberson /* See uma.h */ 31028355f576SJeff Roberson void 31038355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items) 31048355f576SJeff Roberson { 31058355f576SJeff Roberson int slabs; 31068355f576SJeff Roberson uma_slab_t slab; 3107099a0e58SBosko Milekic uma_keg_t keg; 31088355f576SJeff Roberson 3109e20a199fSJeff Roberson keg = zone_first_keg(zone); 31100095a784SJeff Roberson if (keg == NULL) 31110095a784SJeff Roberson return; 3112af526374SJeff Roberson KEG_LOCK(keg); 3113099a0e58SBosko Milekic slabs = items / keg->uk_ipers; 3114099a0e58SBosko Milekic if (slabs * keg->uk_ipers < items) 31158355f576SJeff Roberson slabs++; 31168355f576SJeff Roberson while (slabs > 0) { 3117e20a199fSJeff Roberson slab = keg_alloc_slab(keg, zone, M_WAITOK); 3118e20a199fSJeff Roberson if (slab == NULL) 3119e20a199fSJeff Roberson break; 3120e20a199fSJeff Roberson MPASS(slab->us_keg == keg); 3121099a0e58SBosko Milekic LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 31228355f576SJeff Roberson slabs--; 31238355f576SJeff Roberson } 3124af526374SJeff Roberson KEG_UNLOCK(keg); 31258355f576SJeff Roberson } 31268355f576SJeff Roberson 31278355f576SJeff Roberson /* See uma.h */ 312844ec2b63SKonstantin Belousov static void 312944ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger) 31308355f576SJeff Roberson { 313144ec2b63SKonstantin Belousov 31321431a748SGleb Smirnoff CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); 313344ec2b63SKonstantin Belousov sx_assert(&uma_drain_lock, SA_XLOCKED); 313486bbae32SJeff Roberson bucket_enable(); 31358355f576SJeff Roberson zone_foreach(zone_drain); 313644ec2b63SKonstantin Belousov if (vm_page_count_min() || kmem_danger) { 3137a2de44abSAlexander Motin cache_drain_safe(NULL); 3138a2de44abSAlexander Motin zone_foreach(zone_drain); 3139a2de44abSAlexander Motin } 31408355f576SJeff Roberson /* 31418355f576SJeff Roberson * Some slabs may have been freed but this zone will be visited early 31428355f576SJeff Roberson * we visit again so that we can free pages that are empty once other 31438355f576SJeff Roberson * zones are drained. We have to do the same for buckets. 31448355f576SJeff Roberson */ 31459643769aSJeff Roberson zone_drain(slabzone); 3146cae33c14SJeff Roberson bucket_zone_drain(); 314744ec2b63SKonstantin Belousov } 314844ec2b63SKonstantin Belousov 314944ec2b63SKonstantin Belousov void 315044ec2b63SKonstantin Belousov uma_reclaim(void) 315144ec2b63SKonstantin Belousov { 315244ec2b63SKonstantin Belousov 315344ec2b63SKonstantin Belousov sx_xlock(&uma_drain_lock); 315444ec2b63SKonstantin Belousov uma_reclaim_locked(false); 315595c4bf75SKonstantin Belousov sx_xunlock(&uma_drain_lock); 31568355f576SJeff Roberson } 31578355f576SJeff Roberson 31582e47807cSJeff Roberson static volatile int uma_reclaim_needed; 315944ec2b63SKonstantin Belousov 316044ec2b63SKonstantin Belousov void 316144ec2b63SKonstantin Belousov uma_reclaim_wakeup(void) 316244ec2b63SKonstantin Belousov { 316344ec2b63SKonstantin Belousov 31642e47807cSJeff Roberson if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) 31652e47807cSJeff Roberson wakeup(uma_reclaim); 316644ec2b63SKonstantin Belousov } 316744ec2b63SKonstantin Belousov 316844ec2b63SKonstantin Belousov void 316944ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused) 317044ec2b63SKonstantin Belousov { 317144ec2b63SKonstantin Belousov 317244ec2b63SKonstantin Belousov for (;;) { 31732e47807cSJeff Roberson sx_xlock(&uma_drain_lock); 3174200f8117SKonstantin Belousov while (atomic_load_int(&uma_reclaim_needed) == 0) 31752e47807cSJeff Roberson sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", 31762e47807cSJeff Roberson hz); 31779b43bc27SAndriy Gapon sx_xunlock(&uma_drain_lock); 31789b43bc27SAndriy Gapon EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); 31799b43bc27SAndriy Gapon sx_xlock(&uma_drain_lock); 318044ec2b63SKonstantin Belousov uma_reclaim_locked(true); 3181200f8117SKonstantin Belousov atomic_store_int(&uma_reclaim_needed, 0); 31822e47807cSJeff Roberson sx_xunlock(&uma_drain_lock); 31832e47807cSJeff Roberson /* Don't fire more than once per-second. */ 31842e47807cSJeff Roberson pause("umarclslp", hz); 318544ec2b63SKonstantin Belousov } 318644ec2b63SKonstantin Belousov } 318744ec2b63SKonstantin Belousov 3188663b416fSJohn Baldwin /* See uma.h */ 3189663b416fSJohn Baldwin int 3190663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone) 3191663b416fSJohn Baldwin { 3192663b416fSJohn Baldwin int full; 3193663b416fSJohn Baldwin 3194663b416fSJohn Baldwin ZONE_LOCK(zone); 3195e20a199fSJeff Roberson full = (zone->uz_flags & UMA_ZFLAG_FULL); 3196663b416fSJohn Baldwin ZONE_UNLOCK(zone); 3197663b416fSJohn Baldwin return (full); 3198663b416fSJohn Baldwin } 3199663b416fSJohn Baldwin 32006c125b8dSMohan Srinivasan int 32016c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone) 32026c125b8dSMohan Srinivasan { 3203e20a199fSJeff Roberson return (zone->uz_flags & UMA_ZFLAG_FULL); 32046c125b8dSMohan Srinivasan } 32056c125b8dSMohan Srinivasan 32068355f576SJeff Roberson void * 3207f2c2231eSRyan Stone uma_large_malloc(vm_size_t size, int wait) 32088355f576SJeff Roberson { 32098355f576SJeff Roberson void *mem; 32108355f576SJeff Roberson uma_slab_t slab; 321185dcf349SGleb Smirnoff uint8_t flags; 32128355f576SJeff Roberson 3213e20a199fSJeff Roberson slab = zone_alloc_item(slabzone, NULL, wait); 32148355f576SJeff Roberson if (slab == NULL) 32158355f576SJeff Roberson return (NULL); 32168355f576SJeff Roberson mem = page_alloc(NULL, size, &flags, wait); 32178355f576SJeff Roberson if (mem) { 321899571dc3SJeff Roberson vsetslab((vm_offset_t)mem, slab); 32198355f576SJeff Roberson slab->us_data = mem; 32208355f576SJeff Roberson slab->us_flags = flags | UMA_SLAB_MALLOC; 32218355f576SJeff Roberson slab->us_size = size; 32222e47807cSJeff Roberson uma_total_inc(size); 32238355f576SJeff Roberson } else { 32240095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 32258355f576SJeff Roberson } 32268355f576SJeff Roberson 32278355f576SJeff Roberson return (mem); 32288355f576SJeff Roberson } 32298355f576SJeff Roberson 32308355f576SJeff Roberson void 32318355f576SJeff Roberson uma_large_free(uma_slab_t slab) 32328355f576SJeff Roberson { 3233c325e866SKonstantin Belousov 32348355f576SJeff Roberson page_free(slab->us_data, slab->us_size, slab->us_flags); 32352e47807cSJeff Roberson uma_total_dec(slab->us_size); 32360095a784SJeff Roberson zone_free_item(slabzone, slab, NULL, SKIP_NONE); 32378355f576SJeff Roberson } 32388355f576SJeff Roberson 323948343a2fSGleb Smirnoff static void 324048343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone) 324148343a2fSGleb Smirnoff { 324296c85efbSNathan Whitehorn int i; 324348343a2fSGleb Smirnoff 324448343a2fSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_PCPU) { 324596c85efbSNathan Whitehorn CPU_FOREACH(i) 324648343a2fSGleb Smirnoff bzero(zpcpu_get_cpu(item, i), zone->uz_size); 324748343a2fSGleb Smirnoff } else 324848343a2fSGleb Smirnoff bzero(item, zone->uz_size); 324948343a2fSGleb Smirnoff } 325048343a2fSGleb Smirnoff 32512e47807cSJeff Roberson unsigned long 32522e47807cSJeff Roberson uma_limit(void) 32532e47807cSJeff Roberson { 32542e47807cSJeff Roberson 32552e47807cSJeff Roberson return (uma_kmem_limit); 32562e47807cSJeff Roberson } 32572e47807cSJeff Roberson 32582e47807cSJeff Roberson void 32592e47807cSJeff Roberson uma_set_limit(unsigned long limit) 32602e47807cSJeff Roberson { 32612e47807cSJeff Roberson 32622e47807cSJeff Roberson uma_kmem_limit = limit; 32632e47807cSJeff Roberson } 32642e47807cSJeff Roberson 32652e47807cSJeff Roberson unsigned long 32662e47807cSJeff Roberson uma_size(void) 32672e47807cSJeff Roberson { 32682e47807cSJeff Roberson 3269*ad5b0f5bSJeff Roberson return (uma_kmem_total); 3270*ad5b0f5bSJeff Roberson } 3271*ad5b0f5bSJeff Roberson 3272*ad5b0f5bSJeff Roberson long 3273*ad5b0f5bSJeff Roberson uma_avail(void) 3274*ad5b0f5bSJeff Roberson { 3275*ad5b0f5bSJeff Roberson 3276*ad5b0f5bSJeff Roberson return (uma_kmem_limit - uma_kmem_total); 32772e47807cSJeff Roberson } 32782e47807cSJeff Roberson 32798355f576SJeff Roberson void 32808355f576SJeff Roberson uma_print_stats(void) 32818355f576SJeff Roberson { 32828355f576SJeff Roberson zone_foreach(uma_print_zone); 32838355f576SJeff Roberson } 32848355f576SJeff Roberson 3285504d5de3SJeff Roberson static void 3286504d5de3SJeff Roberson slab_print(uma_slab_t slab) 3287504d5de3SJeff Roberson { 3288ef72505eSJeff Roberson printf("slab: keg %p, data %p, freecount %d\n", 3289ef72505eSJeff Roberson slab->us_keg, slab->us_data, slab->us_freecount); 3290504d5de3SJeff Roberson } 3291504d5de3SJeff Roberson 3292504d5de3SJeff Roberson static void 3293504d5de3SJeff Roberson cache_print(uma_cache_t cache) 3294504d5de3SJeff Roberson { 3295504d5de3SJeff Roberson printf("alloc: %p(%d), free: %p(%d)\n", 3296504d5de3SJeff Roberson cache->uc_allocbucket, 3297504d5de3SJeff Roberson cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3298504d5de3SJeff Roberson cache->uc_freebucket, 3299504d5de3SJeff Roberson cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3300504d5de3SJeff Roberson } 3301504d5de3SJeff Roberson 3302e20a199fSJeff Roberson static void 3303e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg) 33048355f576SJeff Roberson { 3305504d5de3SJeff Roberson uma_slab_t slab; 3306504d5de3SJeff Roberson 33070b80c1e4SEitan Adler printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " 3308e20a199fSJeff Roberson "out %d free %d limit %d\n", 3309e20a199fSJeff Roberson keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, 3310099a0e58SBosko Milekic keg->uk_ipers, keg->uk_ppera, 331157223e99SAndriy Gapon (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, 331257223e99SAndriy Gapon keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); 3313504d5de3SJeff Roberson printf("Part slabs:\n"); 3314099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3315504d5de3SJeff Roberson slab_print(slab); 3316504d5de3SJeff Roberson printf("Free slabs:\n"); 3317099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3318504d5de3SJeff Roberson slab_print(slab); 3319504d5de3SJeff Roberson printf("Full slabs:\n"); 3320099a0e58SBosko Milekic LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3321504d5de3SJeff Roberson slab_print(slab); 3322e20a199fSJeff Roberson } 3323e20a199fSJeff Roberson 3324e20a199fSJeff Roberson void 3325e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone) 3326e20a199fSJeff Roberson { 3327e20a199fSJeff Roberson uma_cache_t cache; 3328e20a199fSJeff Roberson uma_klink_t kl; 3329e20a199fSJeff Roberson int i; 3330e20a199fSJeff Roberson 33310b80c1e4SEitan Adler printf("zone: %s(%p) size %d flags %#x\n", 3332e20a199fSJeff Roberson zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3333e20a199fSJeff Roberson LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3334e20a199fSJeff Roberson uma_print_keg(kl->kl_keg); 33353aa6d94eSJohn Baldwin CPU_FOREACH(i) { 3336504d5de3SJeff Roberson cache = &zone->uz_cpu[i]; 3337504d5de3SJeff Roberson printf("CPU %d Cache:\n", i); 3338504d5de3SJeff Roberson cache_print(cache); 3339504d5de3SJeff Roberson } 33408355f576SJeff Roberson } 33418355f576SJeff Roberson 3342a0d4b0aeSRobert Watson #ifdef DDB 33438355f576SJeff Roberson /* 33447a52a97eSRobert Watson * Generate statistics across both the zone and its per-cpu cache's. Return 33457a52a97eSRobert Watson * desired statistics if the pointer is non-NULL for that statistic. 33467a52a97eSRobert Watson * 33477a52a97eSRobert Watson * Note: does not update the zone statistics, as it can't safely clear the 33487a52a97eSRobert Watson * per-CPU cache statistic. 33497a52a97eSRobert Watson * 33507a52a97eSRobert Watson * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't 33517a52a97eSRobert Watson * safe from off-CPU; we should modify the caches to track this information 33527a52a97eSRobert Watson * directly so that we don't have to. 33537a52a97eSRobert Watson */ 33547a52a97eSRobert Watson static void 335585dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp, 335685dcf349SGleb Smirnoff uint64_t *freesp, uint64_t *sleepsp) 33577a52a97eSRobert Watson { 33587a52a97eSRobert Watson uma_cache_t cache; 335985dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 33607a52a97eSRobert Watson int cachefree, cpu; 33617a52a97eSRobert Watson 3362bf965959SSean Bruno allocs = frees = sleeps = 0; 33637a52a97eSRobert Watson cachefree = 0; 33643aa6d94eSJohn Baldwin CPU_FOREACH(cpu) { 33657a52a97eSRobert Watson cache = &z->uz_cpu[cpu]; 33667a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 33677a52a97eSRobert Watson cachefree += cache->uc_allocbucket->ub_cnt; 33687a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 33697a52a97eSRobert Watson cachefree += cache->uc_freebucket->ub_cnt; 33707a52a97eSRobert Watson allocs += cache->uc_allocs; 33717a52a97eSRobert Watson frees += cache->uc_frees; 33727a52a97eSRobert Watson } 33737a52a97eSRobert Watson allocs += z->uz_allocs; 33747a52a97eSRobert Watson frees += z->uz_frees; 3375bf965959SSean Bruno sleeps += z->uz_sleeps; 33767a52a97eSRobert Watson if (cachefreep != NULL) 33777a52a97eSRobert Watson *cachefreep = cachefree; 33787a52a97eSRobert Watson if (allocsp != NULL) 33797a52a97eSRobert Watson *allocsp = allocs; 33807a52a97eSRobert Watson if (freesp != NULL) 33817a52a97eSRobert Watson *freesp = frees; 3382bf965959SSean Bruno if (sleepsp != NULL) 3383bf965959SSean Bruno *sleepsp = sleeps; 33847a52a97eSRobert Watson } 3385a0d4b0aeSRobert Watson #endif /* DDB */ 33867a52a97eSRobert Watson 33877a52a97eSRobert Watson static int 33887a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) 33897a52a97eSRobert Watson { 33907a52a97eSRobert Watson uma_keg_t kz; 33917a52a97eSRobert Watson uma_zone_t z; 33927a52a97eSRobert Watson int count; 33937a52a97eSRobert Watson 33947a52a97eSRobert Watson count = 0; 3395111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 33967a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 33977a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 33987a52a97eSRobert Watson count++; 33997a52a97eSRobert Watson } 3400111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 34017a52a97eSRobert Watson return (sysctl_handle_int(oidp, &count, 0, req)); 34027a52a97eSRobert Watson } 34037a52a97eSRobert Watson 34047a52a97eSRobert Watson static int 34057a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 34067a52a97eSRobert Watson { 34077a52a97eSRobert Watson struct uma_stream_header ush; 34087a52a97eSRobert Watson struct uma_type_header uth; 34097a52a97eSRobert Watson struct uma_percpu_stat ups; 34107a52a97eSRobert Watson uma_bucket_t bucket; 34117a52a97eSRobert Watson struct sbuf sbuf; 34127a52a97eSRobert Watson uma_cache_t cache; 3413e20a199fSJeff Roberson uma_klink_t kl; 34147a52a97eSRobert Watson uma_keg_t kz; 34157a52a97eSRobert Watson uma_zone_t z; 3416e20a199fSJeff Roberson uma_keg_t k; 34174e657159SMatthew D Fleming int count, error, i; 34187a52a97eSRobert Watson 341900f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 342000f0e671SMatthew D Fleming if (error != 0) 342100f0e671SMatthew D Fleming return (error); 34224e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 34231eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 34244e657159SMatthew D Fleming 3425404a593eSMatthew D Fleming count = 0; 3426111fbcd5SBryan Venteicher rw_rlock(&uma_rwlock); 34277a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 34287a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) 34297a52a97eSRobert Watson count++; 34307a52a97eSRobert Watson } 34317a52a97eSRobert Watson 34327a52a97eSRobert Watson /* 34337a52a97eSRobert Watson * Insert stream header. 34347a52a97eSRobert Watson */ 34357a52a97eSRobert Watson bzero(&ush, sizeof(ush)); 34367a52a97eSRobert Watson ush.ush_version = UMA_STREAM_VERSION; 3437ab3a57c0SRobert Watson ush.ush_maxcpus = (mp_maxid + 1); 34387a52a97eSRobert Watson ush.ush_count = count; 34394e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); 34407a52a97eSRobert Watson 34417a52a97eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 34427a52a97eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 34437a52a97eSRobert Watson bzero(&uth, sizeof(uth)); 34447a52a97eSRobert Watson ZONE_LOCK(z); 3445cbbb4a00SRobert Watson strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 34467a52a97eSRobert Watson uth.uth_align = kz->uk_align; 34477a52a97eSRobert Watson uth.uth_size = kz->uk_size; 34487a52a97eSRobert Watson uth.uth_rsize = kz->uk_rsize; 3449e20a199fSJeff Roberson LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3450e20a199fSJeff Roberson k = kl->kl_keg; 3451e20a199fSJeff Roberson uth.uth_maxpages += k->uk_maxpages; 3452e20a199fSJeff Roberson uth.uth_pages += k->uk_pages; 3453e20a199fSJeff Roberson uth.uth_keg_free += k->uk_free; 3454e20a199fSJeff Roberson uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3455e20a199fSJeff Roberson * k->uk_ipers; 3456e20a199fSJeff Roberson } 3457cbbb4a00SRobert Watson 3458cbbb4a00SRobert Watson /* 3459cbbb4a00SRobert Watson * A zone is secondary is it is not the first entry 3460cbbb4a00SRobert Watson * on the keg's zone list. 3461cbbb4a00SRobert Watson */ 3462e20a199fSJeff Roberson if ((z->uz_flags & UMA_ZONE_SECONDARY) && 3463cbbb4a00SRobert Watson (LIST_FIRST(&kz->uk_zones) != z)) 3464cbbb4a00SRobert Watson uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3465cbbb4a00SRobert Watson 3466fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 34677a52a97eSRobert Watson uth.uth_zone_free += bucket->ub_cnt; 34687a52a97eSRobert Watson uth.uth_allocs = z->uz_allocs; 34697a52a97eSRobert Watson uth.uth_frees = z->uz_frees; 34702019094aSRobert Watson uth.uth_fails = z->uz_fails; 3471bf965959SSean Bruno uth.uth_sleeps = z->uz_sleeps; 34724e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); 34737a52a97eSRobert Watson /* 34742450bbb8SRobert Watson * While it is not normally safe to access the cache 34752450bbb8SRobert Watson * bucket pointers while not on the CPU that owns the 34762450bbb8SRobert Watson * cache, we only allow the pointers to be exchanged 34772450bbb8SRobert Watson * without the zone lock held, not invalidated, so 34782450bbb8SRobert Watson * accept the possible race associated with bucket 34792450bbb8SRobert Watson * exchange during monitoring. 34807a52a97eSRobert Watson */ 3481ab3a57c0SRobert Watson for (i = 0; i < (mp_maxid + 1); i++) { 34827a52a97eSRobert Watson bzero(&ups, sizeof(ups)); 34837a52a97eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) 34847a52a97eSRobert Watson goto skip; 3485082dc776SRobert Watson if (CPU_ABSENT(i)) 3486082dc776SRobert Watson goto skip; 34877a52a97eSRobert Watson cache = &z->uz_cpu[i]; 34887a52a97eSRobert Watson if (cache->uc_allocbucket != NULL) 34897a52a97eSRobert Watson ups.ups_cache_free += 34907a52a97eSRobert Watson cache->uc_allocbucket->ub_cnt; 34917a52a97eSRobert Watson if (cache->uc_freebucket != NULL) 34927a52a97eSRobert Watson ups.ups_cache_free += 34937a52a97eSRobert Watson cache->uc_freebucket->ub_cnt; 34947a52a97eSRobert Watson ups.ups_allocs = cache->uc_allocs; 34957a52a97eSRobert Watson ups.ups_frees = cache->uc_frees; 34967a52a97eSRobert Watson skip: 34974e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &ups, sizeof(ups)); 34987a52a97eSRobert Watson } 34992450bbb8SRobert Watson ZONE_UNLOCK(z); 35007a52a97eSRobert Watson } 35017a52a97eSRobert Watson } 3502111fbcd5SBryan Venteicher rw_runlock(&uma_rwlock); 35034e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 35044e657159SMatthew D Fleming sbuf_delete(&sbuf); 35057a52a97eSRobert Watson return (error); 35067a52a97eSRobert Watson } 350748c5777eSRobert Watson 35080a5a3ccbSGleb Smirnoff int 35090a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) 35100a5a3ccbSGleb Smirnoff { 35110a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 351216be9f54SGleb Smirnoff int error, max; 35130a5a3ccbSGleb Smirnoff 351416be9f54SGleb Smirnoff max = uma_zone_get_max(zone); 35150a5a3ccbSGleb Smirnoff error = sysctl_handle_int(oidp, &max, 0, req); 35160a5a3ccbSGleb Smirnoff if (error || !req->newptr) 35170a5a3ccbSGleb Smirnoff return (error); 35180a5a3ccbSGleb Smirnoff 35190a5a3ccbSGleb Smirnoff uma_zone_set_max(zone, max); 35200a5a3ccbSGleb Smirnoff 35210a5a3ccbSGleb Smirnoff return (0); 35220a5a3ccbSGleb Smirnoff } 35230a5a3ccbSGleb Smirnoff 35240a5a3ccbSGleb Smirnoff int 35250a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) 35260a5a3ccbSGleb Smirnoff { 35270a5a3ccbSGleb Smirnoff uma_zone_t zone = *(uma_zone_t *)arg1; 35280a5a3ccbSGleb Smirnoff int cur; 35290a5a3ccbSGleb Smirnoff 35300a5a3ccbSGleb Smirnoff cur = uma_zone_get_cur(zone); 35310a5a3ccbSGleb Smirnoff return (sysctl_handle_int(oidp, &cur, 0, req)); 35320a5a3ccbSGleb Smirnoff } 35330a5a3ccbSGleb Smirnoff 35349542ea7bSGleb Smirnoff #ifdef INVARIANTS 35359542ea7bSGleb Smirnoff static uma_slab_t 35369542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item) 35379542ea7bSGleb Smirnoff { 35389542ea7bSGleb Smirnoff uma_slab_t slab; 35399542ea7bSGleb Smirnoff uma_keg_t keg; 35409542ea7bSGleb Smirnoff uint8_t *mem; 35419542ea7bSGleb Smirnoff 35429542ea7bSGleb Smirnoff mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); 35439542ea7bSGleb Smirnoff if (zone->uz_flags & UMA_ZONE_VTOSLAB) { 35449542ea7bSGleb Smirnoff slab = vtoslab((vm_offset_t)mem); 35459542ea7bSGleb Smirnoff } else { 35469542ea7bSGleb Smirnoff /* 35479542ea7bSGleb Smirnoff * It is safe to return the slab here even though the 35489542ea7bSGleb Smirnoff * zone is unlocked because the item's allocation state 35499542ea7bSGleb Smirnoff * essentially holds a reference. 35509542ea7bSGleb Smirnoff */ 35519542ea7bSGleb Smirnoff ZONE_LOCK(zone); 35529542ea7bSGleb Smirnoff keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; 35539542ea7bSGleb Smirnoff if (keg->uk_flags & UMA_ZONE_HASH) 35549542ea7bSGleb Smirnoff slab = hash_sfind(&keg->uk_hash, mem); 35559542ea7bSGleb Smirnoff else 35569542ea7bSGleb Smirnoff slab = (uma_slab_t)(mem + keg->uk_pgoff); 35579542ea7bSGleb Smirnoff ZONE_UNLOCK(zone); 35589542ea7bSGleb Smirnoff } 35599542ea7bSGleb Smirnoff 35609542ea7bSGleb Smirnoff return (slab); 35619542ea7bSGleb Smirnoff } 35629542ea7bSGleb Smirnoff 35639542ea7bSGleb Smirnoff /* 35649542ea7bSGleb Smirnoff * Set up the slab's freei data such that uma_dbg_free can function. 35659542ea7bSGleb Smirnoff * 35669542ea7bSGleb Smirnoff */ 35679542ea7bSGleb Smirnoff static void 35689542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) 35699542ea7bSGleb Smirnoff { 35709542ea7bSGleb Smirnoff uma_keg_t keg; 35719542ea7bSGleb Smirnoff int freei; 35729542ea7bSGleb Smirnoff 35739542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 35749542ea7bSGleb Smirnoff return; 35759542ea7bSGleb Smirnoff if (slab == NULL) { 35769542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 35779542ea7bSGleb Smirnoff if (slab == NULL) 35789542ea7bSGleb Smirnoff panic("uma: item %p did not belong to zone %s\n", 35799542ea7bSGleb Smirnoff item, zone->uz_name); 35809542ea7bSGleb Smirnoff } 35819542ea7bSGleb Smirnoff keg = slab->us_keg; 35829542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 35839542ea7bSGleb Smirnoff 35849542ea7bSGleb Smirnoff if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 35859542ea7bSGleb Smirnoff panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", 35869542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 35879542ea7bSGleb Smirnoff BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 35889542ea7bSGleb Smirnoff 35899542ea7bSGleb Smirnoff return; 35909542ea7bSGleb Smirnoff } 35919542ea7bSGleb Smirnoff 35929542ea7bSGleb Smirnoff /* 35939542ea7bSGleb Smirnoff * Verifies freed addresses. Checks for alignment, valid slab membership 35949542ea7bSGleb Smirnoff * and duplicate frees. 35959542ea7bSGleb Smirnoff * 35969542ea7bSGleb Smirnoff */ 35979542ea7bSGleb Smirnoff static void 35989542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) 35999542ea7bSGleb Smirnoff { 36009542ea7bSGleb Smirnoff uma_keg_t keg; 36019542ea7bSGleb Smirnoff int freei; 36029542ea7bSGleb Smirnoff 36039542ea7bSGleb Smirnoff if (zone_first_keg(zone) == NULL) 36049542ea7bSGleb Smirnoff return; 36059542ea7bSGleb Smirnoff if (slab == NULL) { 36069542ea7bSGleb Smirnoff slab = uma_dbg_getslab(zone, item); 36079542ea7bSGleb Smirnoff if (slab == NULL) 36089542ea7bSGleb Smirnoff panic("uma: Freed item %p did not belong to zone %s\n", 36099542ea7bSGleb Smirnoff item, zone->uz_name); 36109542ea7bSGleb Smirnoff } 36119542ea7bSGleb Smirnoff keg = slab->us_keg; 36129542ea7bSGleb Smirnoff freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; 36139542ea7bSGleb Smirnoff 36149542ea7bSGleb Smirnoff if (freei >= keg->uk_ipers) 36159542ea7bSGleb Smirnoff panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", 36169542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36179542ea7bSGleb Smirnoff 36189542ea7bSGleb Smirnoff if (((freei * keg->uk_rsize) + slab->us_data) != item) 36199542ea7bSGleb Smirnoff panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", 36209542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36219542ea7bSGleb Smirnoff 36229542ea7bSGleb Smirnoff if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) 36239542ea7bSGleb Smirnoff panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", 36249542ea7bSGleb Smirnoff item, zone, zone->uz_name, slab, freei); 36259542ea7bSGleb Smirnoff 36269542ea7bSGleb Smirnoff BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); 36279542ea7bSGleb Smirnoff } 36289542ea7bSGleb Smirnoff #endif /* INVARIANTS */ 36299542ea7bSGleb Smirnoff 363048c5777eSRobert Watson #ifdef DDB 363148c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma) 363248c5777eSRobert Watson { 363385dcf349SGleb Smirnoff uint64_t allocs, frees, sleeps; 363448c5777eSRobert Watson uma_bucket_t bucket; 363548c5777eSRobert Watson uma_keg_t kz; 363648c5777eSRobert Watson uma_zone_t z; 363748c5777eSRobert Watson int cachefree; 363848c5777eSRobert Watson 363903175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", 364003175483SAlexander Motin "Free", "Requests", "Sleeps", "Bucket"); 364148c5777eSRobert Watson LIST_FOREACH(kz, &uma_kegs, uk_link) { 364248c5777eSRobert Watson LIST_FOREACH(z, &kz->uk_zones, uz_link) { 364348c5777eSRobert Watson if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 364448c5777eSRobert Watson allocs = z->uz_allocs; 364548c5777eSRobert Watson frees = z->uz_frees; 3646bf965959SSean Bruno sleeps = z->uz_sleeps; 364748c5777eSRobert Watson cachefree = 0; 364848c5777eSRobert Watson } else 364948c5777eSRobert Watson uma_zone_sumstat(z, &cachefree, &allocs, 3650bf965959SSean Bruno &frees, &sleeps); 3651e20a199fSJeff Roberson if (!((z->uz_flags & UMA_ZONE_SECONDARY) && 365248c5777eSRobert Watson (LIST_FIRST(&kz->uk_zones) != z))) 365348c5777eSRobert Watson cachefree += kz->uk_free; 3654fc03d22bSJeff Roberson LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 365548c5777eSRobert Watson cachefree += bucket->ub_cnt; 365603175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n", 365703175483SAlexander Motin z->uz_name, (uintmax_t)kz->uk_size, 3658ae4e9636SRobert Watson (intmax_t)(allocs - frees), cachefree, 365903175483SAlexander Motin (uintmax_t)allocs, sleeps, z->uz_count); 3660687c94aaSJohn Baldwin if (db_pager_quit) 3661687c94aaSJohn Baldwin return; 366248c5777eSRobert Watson } 366348c5777eSRobert Watson } 366448c5777eSRobert Watson } 366503175483SAlexander Motin 366603175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache) 366703175483SAlexander Motin { 366803175483SAlexander Motin uint64_t allocs, frees; 366903175483SAlexander Motin uma_bucket_t bucket; 367003175483SAlexander Motin uma_zone_t z; 367103175483SAlexander Motin int cachefree; 367203175483SAlexander Motin 367303175483SAlexander Motin db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", 367403175483SAlexander Motin "Requests", "Bucket"); 367503175483SAlexander Motin LIST_FOREACH(z, &uma_cachezones, uz_link) { 367603175483SAlexander Motin uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); 367703175483SAlexander Motin LIST_FOREACH(bucket, &z->uz_buckets, ub_link) 367803175483SAlexander Motin cachefree += bucket->ub_cnt; 367903175483SAlexander Motin db_printf("%18s %8ju %8jd %8d %12ju %8u\n", 368003175483SAlexander Motin z->uz_name, (uintmax_t)z->uz_size, 368103175483SAlexander Motin (intmax_t)(allocs - frees), cachefree, 368203175483SAlexander Motin (uintmax_t)allocs, z->uz_count); 368303175483SAlexander Motin if (db_pager_quit) 368403175483SAlexander Motin return; 368503175483SAlexander Motin } 368603175483SAlexander Motin } 36879542ea7bSGleb Smirnoff #endif /* DDB */ 3688