18355f576SJeff Roberson /* 2f461cf22SJeff Roberson * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 38355f576SJeff Roberson * All rights reserved. 48355f576SJeff Roberson * 58355f576SJeff Roberson * Redistribution and use in source and binary forms, with or without 68355f576SJeff Roberson * modification, are permitted provided that the following conditions 78355f576SJeff Roberson * are met: 88355f576SJeff Roberson * 1. Redistributions of source code must retain the above copyright 98355f576SJeff Roberson * notice unmodified, this list of conditions, and the following 108355f576SJeff Roberson * disclaimer. 118355f576SJeff Roberson * 2. Redistributions in binary form must reproduce the above copyright 128355f576SJeff Roberson * notice, this list of conditions and the following disclaimer in the 138355f576SJeff Roberson * documentation and/or other materials provided with the distribution. 148355f576SJeff Roberson * 158355f576SJeff Roberson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 168355f576SJeff Roberson * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 178355f576SJeff Roberson * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 188355f576SJeff Roberson * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 198355f576SJeff Roberson * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 208355f576SJeff Roberson * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 218355f576SJeff Roberson * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 228355f576SJeff Roberson * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 238355f576SJeff Roberson * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 248355f576SJeff Roberson * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 258355f576SJeff Roberson */ 268355f576SJeff Roberson 278355f576SJeff Roberson /* 288355f576SJeff Roberson * uma_core.c Implementation of the Universal Memory allocator 298355f576SJeff Roberson * 308355f576SJeff Roberson * This allocator is intended to replace the multitude of similar object caches 318355f576SJeff Roberson * in the standard FreeBSD kernel. The intent is to be flexible as well as 328355f576SJeff Roberson * effecient. A primary design goal is to return unused memory to the rest of 338355f576SJeff Roberson * the system. This will make the system as a whole more flexible due to the 348355f576SJeff Roberson * ability to move memory to subsystems which most need it instead of leaving 358355f576SJeff Roberson * pools of reserved memory unused. 368355f576SJeff Roberson * 378355f576SJeff Roberson * The basic ideas stem from similar slab/zone based allocators whose algorithms 388355f576SJeff Roberson * are well known. 398355f576SJeff Roberson * 408355f576SJeff Roberson */ 418355f576SJeff Roberson 428355f576SJeff Roberson /* 438355f576SJeff Roberson * TODO: 448355f576SJeff Roberson * - Improve memory usage for large allocations 458355f576SJeff Roberson * - Investigate cache size adjustments 468355f576SJeff Roberson */ 478355f576SJeff Roberson 48874651b1SDavid E. O'Brien #include <sys/cdefs.h> 49874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 50874651b1SDavid E. O'Brien 518355f576SJeff Roberson /* I should really use ktr.. */ 528355f576SJeff Roberson /* 538355f576SJeff Roberson #define UMA_DEBUG 1 548355f576SJeff Roberson #define UMA_DEBUG_ALLOC 1 558355f576SJeff Roberson #define UMA_DEBUG_ALLOC_1 1 568355f576SJeff Roberson */ 578355f576SJeff Roberson 588355f576SJeff Roberson #include "opt_param.h" 598355f576SJeff Roberson #include <sys/param.h> 608355f576SJeff Roberson #include <sys/systm.h> 618355f576SJeff Roberson #include <sys/kernel.h> 628355f576SJeff Roberson #include <sys/types.h> 638355f576SJeff Roberson #include <sys/queue.h> 648355f576SJeff Roberson #include <sys/malloc.h> 658355f576SJeff Roberson #include <sys/lock.h> 668355f576SJeff Roberson #include <sys/sysctl.h> 678355f576SJeff Roberson #include <sys/mutex.h> 684c1cc01cSJohn Baldwin #include <sys/proc.h> 698355f576SJeff Roberson #include <sys/smp.h> 7086bbae32SJeff Roberson #include <sys/vmmeter.h> 718522511bSHartmut Brandt #include <sys/mbuf.h> 7286bbae32SJeff Roberson 738355f576SJeff Roberson #include <vm/vm.h> 748355f576SJeff Roberson #include <vm/vm_object.h> 758355f576SJeff Roberson #include <vm/vm_page.h> 768355f576SJeff Roberson #include <vm/vm_param.h> 778355f576SJeff Roberson #include <vm/vm_map.h> 788355f576SJeff Roberson #include <vm/vm_kern.h> 798355f576SJeff Roberson #include <vm/vm_extern.h> 808355f576SJeff Roberson #include <vm/uma.h> 818355f576SJeff Roberson #include <vm/uma_int.h> 82639c9550SJeff Roberson #include <vm/uma_dbg.h> 838355f576SJeff Roberson 8448eea375SJeff Roberson #include <machine/vmparam.h> 8548eea375SJeff Roberson 868355f576SJeff Roberson /* 878355f576SJeff Roberson * This is the zone from which all zones are spawned. The idea is that even 888355f576SJeff Roberson * the zone heads are allocated from the allocator, so we use the bss section 898355f576SJeff Roberson * to bootstrap us. 908355f576SJeff Roberson */ 9186bbae32SJeff Roberson static struct uma_zone masterzone; 9286bbae32SJeff Roberson static uma_zone_t zones = &masterzone; 938355f576SJeff Roberson 948355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */ 958355f576SJeff Roberson static uma_zone_t slabzone; 968355f576SJeff Roberson 978355f576SJeff Roberson /* 988355f576SJeff Roberson * The initial hash tables come out of this zone so they can be allocated 998355f576SJeff Roberson * prior to malloc coming up. 1008355f576SJeff Roberson */ 1018355f576SJeff Roberson static uma_zone_t hashzone; 1028355f576SJeff Roberson 103961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 104961647dfSJeff Roberson 1058355f576SJeff Roberson /* 10686bbae32SJeff Roberson * Are we allowed to allocate buckets? 10786bbae32SJeff Roberson */ 10886bbae32SJeff Roberson static int bucketdisable = 1; 10986bbae32SJeff Roberson 1108355f576SJeff Roberson /* Linked list of all zones in the system */ 1118355f576SJeff Roberson static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones); 1128355f576SJeff Roberson 1138355f576SJeff Roberson /* This mutex protects the zone list */ 1148355f576SJeff Roberson static struct mtx uma_mtx; 1158355f576SJeff Roberson 116d88797c2SBosko Milekic /* These are the pcpu cache locks */ 117d88797c2SBosko Milekic static struct mtx uma_pcpu_mtx[MAXCPU]; 118d88797c2SBosko Milekic 1198355f576SJeff Roberson /* Linked list of boot time pages */ 1208355f576SJeff Roberson static LIST_HEAD(,uma_slab) uma_boot_pages = 1218355f576SJeff Roberson LIST_HEAD_INITIALIZER(&uma_boot_pages); 1228355f576SJeff Roberson 1238355f576SJeff Roberson /* Count of free boottime pages */ 1248355f576SJeff Roberson static int uma_boot_free = 0; 1258355f576SJeff Roberson 1268355f576SJeff Roberson /* Is the VM done starting up? */ 1278355f576SJeff Roberson static int booted = 0; 1288355f576SJeff Roberson 1298355f576SJeff Roberson /* This is the handle used to schedule our working set calculator */ 1308355f576SJeff Roberson static struct callout uma_callout; 1318355f576SJeff Roberson 1328355f576SJeff Roberson /* This is mp_maxid + 1, for use while looping over each cpu */ 1338355f576SJeff Roberson static int maxcpu; 1348355f576SJeff Roberson 1358355f576SJeff Roberson /* 1368355f576SJeff Roberson * This structure is passed as the zone ctor arg so that I don't have to create 1378355f576SJeff Roberson * a special allocation function just for zones. 1388355f576SJeff Roberson */ 1398355f576SJeff Roberson struct uma_zctor_args { 1408355f576SJeff Roberson char *name; 141c3bdc05fSAndrew R. Reiter size_t size; 1428355f576SJeff Roberson uma_ctor ctor; 1438355f576SJeff Roberson uma_dtor dtor; 1448355f576SJeff Roberson uma_init uminit; 1458355f576SJeff Roberson uma_fini fini; 1468355f576SJeff Roberson int align; 1478355f576SJeff Roberson u_int16_t flags; 1488355f576SJeff Roberson }; 1498355f576SJeff Roberson 150cae33c14SJeff Roberson struct uma_bucket_zone { 151cae33c14SJeff Roberson uma_zone_t ubz_zone; 152cae33c14SJeff Roberson char *ubz_name; 153cae33c14SJeff Roberson int ubz_entries; 154cae33c14SJeff Roberson }; 155cae33c14SJeff Roberson 156cae33c14SJeff Roberson #define BUCKET_MAX 128 157cae33c14SJeff Roberson 158cae33c14SJeff Roberson struct uma_bucket_zone bucket_zones[] = { 159cae33c14SJeff Roberson { NULL, "16 Bucket", 16 }, 160cae33c14SJeff Roberson { NULL, "32 Bucket", 32 }, 161cae33c14SJeff Roberson { NULL, "64 Bucket", 64 }, 162cae33c14SJeff Roberson { NULL, "128 Bucket", 128 }, 163cae33c14SJeff Roberson { NULL, NULL, 0} 164cae33c14SJeff Roberson }; 165cae33c14SJeff Roberson 166cae33c14SJeff Roberson #define BUCKET_SHIFT 4 167cae33c14SJeff Roberson #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) 168cae33c14SJeff Roberson 169cae33c14SJeff Roberson uint8_t bucket_size[BUCKET_ZONES]; 170cae33c14SJeff Roberson 1718355f576SJeff Roberson /* Prototypes.. */ 1728355f576SJeff Roberson 1738355f576SJeff Roberson static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 1748355f576SJeff Roberson static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 1758355f576SJeff Roberson static void page_free(void *, int, u_int8_t); 1768355f576SJeff Roberson static uma_slab_t slab_zalloc(uma_zone_t, int); 177d56368d7SBosko Milekic static void cache_drain(uma_zone_t, int); 1788355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t); 179d56368d7SBosko Milekic static void zone_drain_common(uma_zone_t, int); 1808355f576SJeff Roberson static void zone_ctor(void *, int, void *); 1819c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *); 1828355f576SJeff Roberson static void zero_init(void *, int); 1838355f576SJeff Roberson static void zone_small_init(uma_zone_t zone); 1848355f576SJeff Roberson static void zone_large_init(uma_zone_t zone); 1858355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t)); 1868355f576SJeff Roberson static void zone_timeout(uma_zone_t zone); 1870aef6126SJeff Roberson static int hash_alloc(struct uma_hash *); 1880aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *); 1890aef6126SJeff Roberson static void hash_free(struct uma_hash *hash); 1908355f576SJeff Roberson static void uma_timeout(void *); 1918355f576SJeff Roberson static void uma_startup3(void); 192bbee39c6SJeff Roberson static void *uma_zalloc_internal(uma_zone_t, void *, int); 19386bbae32SJeff Roberson static void uma_zfree_internal(uma_zone_t, void *, void *, int); 19486bbae32SJeff Roberson static void bucket_enable(void); 195cae33c14SJeff Roberson static void bucket_init(void); 196cae33c14SJeff Roberson static uma_bucket_t bucket_alloc(int, int); 197cae33c14SJeff Roberson static void bucket_free(uma_bucket_t); 198cae33c14SJeff Roberson static void bucket_zone_drain(void); 199bbee39c6SJeff Roberson static int uma_zalloc_bucket(uma_zone_t zone, int flags); 200bbee39c6SJeff Roberson static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags); 201bbee39c6SJeff Roberson static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab); 202d56368d7SBosko Milekic static __inline void zone_drain(uma_zone_t); 203bbee39c6SJeff Roberson 2048355f576SJeff Roberson void uma_print_zone(uma_zone_t); 2058355f576SJeff Roberson void uma_print_stats(void); 2068355f576SJeff Roberson static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); 2078355f576SJeff Roberson 2088355f576SJeff Roberson SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, 2098355f576SJeff Roberson NULL, 0, sysctl_vm_zone, "A", "Zone Info"); 2108355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 2118355f576SJeff Roberson 21286bbae32SJeff Roberson /* 21386bbae32SJeff Roberson * This routine checks to see whether or not it's safe to enable buckets. 21486bbae32SJeff Roberson */ 21586bbae32SJeff Roberson 21686bbae32SJeff Roberson static void 21786bbae32SJeff Roberson bucket_enable(void) 21886bbae32SJeff Roberson { 21986bbae32SJeff Roberson if (cnt.v_free_count < cnt.v_free_min) 22086bbae32SJeff Roberson bucketdisable = 1; 22186bbae32SJeff Roberson else 22286bbae32SJeff Roberson bucketdisable = 0; 22386bbae32SJeff Roberson } 22486bbae32SJeff Roberson 225cae33c14SJeff Roberson static void 226cae33c14SJeff Roberson bucket_init(void) 227cae33c14SJeff Roberson { 228cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 229cae33c14SJeff Roberson int i; 230cae33c14SJeff Roberson int j; 231cae33c14SJeff Roberson 232cae33c14SJeff Roberson for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 233cae33c14SJeff Roberson int size; 234cae33c14SJeff Roberson 235cae33c14SJeff Roberson ubz = &bucket_zones[j]; 236cae33c14SJeff Roberson size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 237cae33c14SJeff Roberson size += sizeof(void *) * ubz->ubz_entries; 238cae33c14SJeff Roberson ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 239b60f5b79SJeff Roberson NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 240cae33c14SJeff Roberson for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 241cae33c14SJeff Roberson bucket_size[i >> BUCKET_SHIFT] = j; 242cae33c14SJeff Roberson } 243cae33c14SJeff Roberson } 244cae33c14SJeff Roberson 245cae33c14SJeff Roberson static uma_bucket_t 246cae33c14SJeff Roberson bucket_alloc(int entries, int bflags) 247cae33c14SJeff Roberson { 248cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 249cae33c14SJeff Roberson uma_bucket_t bucket; 250cae33c14SJeff Roberson int idx; 251cae33c14SJeff Roberson 252cae33c14SJeff Roberson /* 253cae33c14SJeff Roberson * This is to stop us from allocating per cpu buckets while we're 254cae33c14SJeff Roberson * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 255cae33c14SJeff Roberson * boot pages. This also prevents us from allocating buckets in 256cae33c14SJeff Roberson * low memory situations. 257cae33c14SJeff Roberson */ 258cae33c14SJeff Roberson 259cae33c14SJeff Roberson if (bucketdisable) 260cae33c14SJeff Roberson return (NULL); 261cae33c14SJeff Roberson idx = howmany(entries, 1 << BUCKET_SHIFT); 262cae33c14SJeff Roberson ubz = &bucket_zones[bucket_size[idx]]; 263cae33c14SJeff Roberson bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 264cae33c14SJeff Roberson if (bucket) { 265cae33c14SJeff Roberson #ifdef INVARIANTS 266cae33c14SJeff Roberson bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 267cae33c14SJeff Roberson #endif 268cae33c14SJeff Roberson bucket->ub_cnt = 0; 269cae33c14SJeff Roberson bucket->ub_entries = ubz->ubz_entries; 270cae33c14SJeff Roberson } 271cae33c14SJeff Roberson 272cae33c14SJeff Roberson return (bucket); 273cae33c14SJeff Roberson } 274cae33c14SJeff Roberson 275cae33c14SJeff Roberson static void 276cae33c14SJeff Roberson bucket_free(uma_bucket_t bucket) 277cae33c14SJeff Roberson { 278cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 279cae33c14SJeff Roberson int idx; 280cae33c14SJeff Roberson 281cae33c14SJeff Roberson idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT); 282cae33c14SJeff Roberson ubz = &bucket_zones[bucket_size[idx]]; 283cae33c14SJeff Roberson uma_zfree_internal(ubz->ubz_zone, bucket, NULL, 0); 284cae33c14SJeff Roberson } 285cae33c14SJeff Roberson 286cae33c14SJeff Roberson static void 287cae33c14SJeff Roberson bucket_zone_drain(void) 288cae33c14SJeff Roberson { 289cae33c14SJeff Roberson struct uma_bucket_zone *ubz; 290cae33c14SJeff Roberson 291cae33c14SJeff Roberson for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 292cae33c14SJeff Roberson zone_drain(ubz->ubz_zone); 293cae33c14SJeff Roberson } 294cae33c14SJeff Roberson 2958355f576SJeff Roberson 2968355f576SJeff Roberson /* 2978355f576SJeff Roberson * Routine called by timeout which is used to fire off some time interval 2988355f576SJeff Roberson * based calculations. (working set, stats, etc.) 2998355f576SJeff Roberson * 3008355f576SJeff Roberson * Arguments: 3018355f576SJeff Roberson * arg Unused 3028355f576SJeff Roberson * 3038355f576SJeff Roberson * Returns: 3048355f576SJeff Roberson * Nothing 3058355f576SJeff Roberson */ 3068355f576SJeff Roberson static void 3078355f576SJeff Roberson uma_timeout(void *unused) 3088355f576SJeff Roberson { 30986bbae32SJeff Roberson bucket_enable(); 3108355f576SJeff Roberson zone_foreach(zone_timeout); 3118355f576SJeff Roberson 3128355f576SJeff Roberson /* Reschedule this event */ 3138355f576SJeff Roberson callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL); 3148355f576SJeff Roberson } 3158355f576SJeff Roberson 3168355f576SJeff Roberson /* 3178355f576SJeff Roberson * Routine to perform timeout driven calculations. This does the working set 3188355f576SJeff Roberson * as well as hash expanding, and per cpu statistics aggregation. 3198355f576SJeff Roberson * 3208355f576SJeff Roberson * Arguments: 3218355f576SJeff Roberson * zone The zone to operate on 3228355f576SJeff Roberson * 3238355f576SJeff Roberson * Returns: 3248355f576SJeff Roberson * Nothing 3258355f576SJeff Roberson */ 3268355f576SJeff Roberson static void 3278355f576SJeff Roberson zone_timeout(uma_zone_t zone) 3288355f576SJeff Roberson { 3298355f576SJeff Roberson uma_cache_t cache; 3308355f576SJeff Roberson u_int64_t alloc; 3318355f576SJeff Roberson int cpu; 3328355f576SJeff Roberson 3338355f576SJeff Roberson alloc = 0; 3348355f576SJeff Roberson 3358355f576SJeff Roberson /* 3368355f576SJeff Roberson * Aggregate per cpu cache statistics back to the zone. 3378355f576SJeff Roberson * 3388355f576SJeff Roberson * I may rewrite this to set a flag in the per cpu cache instead of 3398355f576SJeff Roberson * locking. If the flag is not cleared on the next round I will have 3408355f576SJeff Roberson * to lock and do it here instead so that the statistics don't get too 3418355f576SJeff Roberson * far out of sync. 3428355f576SJeff Roberson */ 3438355f576SJeff Roberson if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) { 3448355f576SJeff Roberson for (cpu = 0; cpu < maxcpu; cpu++) { 3458355f576SJeff Roberson if (CPU_ABSENT(cpu)) 3468355f576SJeff Roberson continue; 347d88797c2SBosko Milekic CPU_LOCK(cpu); 3488355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 3498355f576SJeff Roberson /* Add them up, and reset */ 3508355f576SJeff Roberson alloc += cache->uc_allocs; 3518355f576SJeff Roberson cache->uc_allocs = 0; 352d88797c2SBosko Milekic CPU_UNLOCK(cpu); 3538355f576SJeff Roberson } 3548355f576SJeff Roberson } 3558355f576SJeff Roberson 3568355f576SJeff Roberson /* Now push these stats back into the zone.. */ 3578355f576SJeff Roberson ZONE_LOCK(zone); 3588355f576SJeff Roberson zone->uz_allocs += alloc; 3598355f576SJeff Roberson 3608355f576SJeff Roberson /* 3618355f576SJeff Roberson * Expand the zone hash table. 3628355f576SJeff Roberson * 3638355f576SJeff Roberson * This is done if the number of slabs is larger than the hash size. 3648355f576SJeff Roberson * What I'm trying to do here is completely reduce collisions. This 3658355f576SJeff Roberson * may be a little aggressive. Should I allow for two collisions max? 3668355f576SJeff Roberson */ 3678355f576SJeff Roberson 368b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH && 36999571dc3SJeff Roberson zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) { 3700aef6126SJeff Roberson struct uma_hash newhash; 3710aef6126SJeff Roberson struct uma_hash oldhash; 3720aef6126SJeff Roberson int ret; 3735300d9ddSJeff Roberson 3740aef6126SJeff Roberson /* 3750aef6126SJeff Roberson * This is so involved because allocating and freeing 3760aef6126SJeff Roberson * while the zone lock is held will lead to deadlock. 3770aef6126SJeff Roberson * I have to do everything in stages and check for 3780aef6126SJeff Roberson * races. 3790aef6126SJeff Roberson */ 3800aef6126SJeff Roberson newhash = zone->uz_hash; 3815300d9ddSJeff Roberson ZONE_UNLOCK(zone); 3820aef6126SJeff Roberson ret = hash_alloc(&newhash); 3835300d9ddSJeff Roberson ZONE_LOCK(zone); 3840aef6126SJeff Roberson if (ret) { 3850aef6126SJeff Roberson if (hash_expand(&zone->uz_hash, &newhash)) { 3860aef6126SJeff Roberson oldhash = zone->uz_hash; 3870aef6126SJeff Roberson zone->uz_hash = newhash; 3880aef6126SJeff Roberson } else 3890aef6126SJeff Roberson oldhash = newhash; 3900aef6126SJeff Roberson 3910aef6126SJeff Roberson ZONE_UNLOCK(zone); 3920aef6126SJeff Roberson hash_free(&oldhash); 3930aef6126SJeff Roberson ZONE_LOCK(zone); 3940aef6126SJeff Roberson } 3955300d9ddSJeff Roberson } 3968355f576SJeff Roberson 3978355f576SJeff Roberson /* 3988355f576SJeff Roberson * Here we compute the working set size as the total number of items 3998355f576SJeff Roberson * left outstanding since the last time interval. This is slightly 4008355f576SJeff Roberson * suboptimal. What we really want is the highest number of outstanding 4018355f576SJeff Roberson * items during the last time quantum. This should be close enough. 4028355f576SJeff Roberson * 4038355f576SJeff Roberson * The working set size is used to throttle the zone_drain function. 4048355f576SJeff Roberson * We don't want to return memory that we may need again immediately. 4058355f576SJeff Roberson */ 4068355f576SJeff Roberson alloc = zone->uz_allocs - zone->uz_oallocs; 4078355f576SJeff Roberson zone->uz_oallocs = zone->uz_allocs; 4088355f576SJeff Roberson zone->uz_wssize = alloc; 4098355f576SJeff Roberson 4108355f576SJeff Roberson ZONE_UNLOCK(zone); 4118355f576SJeff Roberson } 4128355f576SJeff Roberson 4138355f576SJeff Roberson /* 4145300d9ddSJeff Roberson * Allocate and zero fill the next sized hash table from the appropriate 4155300d9ddSJeff Roberson * backing store. 4165300d9ddSJeff Roberson * 4175300d9ddSJeff Roberson * Arguments: 4180aef6126SJeff Roberson * hash A new hash structure with the old hash size in uh_hashsize 4195300d9ddSJeff Roberson * 4205300d9ddSJeff Roberson * Returns: 4210aef6126SJeff Roberson * 1 on sucess and 0 on failure. 4225300d9ddSJeff Roberson */ 42337c84183SPoul-Henning Kamp static int 4240aef6126SJeff Roberson hash_alloc(struct uma_hash *hash) 4255300d9ddSJeff Roberson { 4260aef6126SJeff Roberson int oldsize; 4275300d9ddSJeff Roberson int alloc; 4285300d9ddSJeff Roberson 4290aef6126SJeff Roberson oldsize = hash->uh_hashsize; 4300aef6126SJeff Roberson 4315300d9ddSJeff Roberson /* We're just going to go to a power of two greater */ 4320aef6126SJeff Roberson if (oldsize) { 4330aef6126SJeff Roberson hash->uh_hashsize = oldsize * 2; 4340aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 4350aef6126SJeff Roberson hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 436961647dfSJeff Roberson M_UMAHASH, M_NOWAIT); 4375300d9ddSJeff Roberson } else { 4380aef6126SJeff Roberson alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 4390aef6126SJeff Roberson hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 440a163d034SWarner Losh M_WAITOK); 4410aef6126SJeff Roberson hash->uh_hashsize = UMA_HASH_SIZE_INIT; 4425300d9ddSJeff Roberson } 4430aef6126SJeff Roberson if (hash->uh_slab_hash) { 4440aef6126SJeff Roberson bzero(hash->uh_slab_hash, alloc); 4450aef6126SJeff Roberson hash->uh_hashmask = hash->uh_hashsize - 1; 4460aef6126SJeff Roberson return (1); 4470aef6126SJeff Roberson } 4485300d9ddSJeff Roberson 4490aef6126SJeff Roberson return (0); 4505300d9ddSJeff Roberson } 4515300d9ddSJeff Roberson 4525300d9ddSJeff Roberson /* 4538355f576SJeff Roberson * Expands the hash table for OFFPAGE zones. This is done from zone_timeout 4548355f576SJeff Roberson * to reduce collisions. This must not be done in the regular allocation path, 4558355f576SJeff Roberson * otherwise, we can recurse on the vm while allocating pages. 4568355f576SJeff Roberson * 4578355f576SJeff Roberson * Arguments: 4580aef6126SJeff Roberson * oldhash The hash you want to expand 4590aef6126SJeff Roberson * newhash The hash structure for the new table 4608355f576SJeff Roberson * 4618355f576SJeff Roberson * Returns: 4628355f576SJeff Roberson * Nothing 4638355f576SJeff Roberson * 4648355f576SJeff Roberson * Discussion: 4658355f576SJeff Roberson */ 4660aef6126SJeff Roberson static int 4670aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 4688355f576SJeff Roberson { 4698355f576SJeff Roberson uma_slab_t slab; 4708355f576SJeff Roberson int hval; 4718355f576SJeff Roberson int i; 4728355f576SJeff Roberson 4730aef6126SJeff Roberson if (!newhash->uh_slab_hash) 4740aef6126SJeff Roberson return (0); 4758355f576SJeff Roberson 4760aef6126SJeff Roberson if (oldhash->uh_hashsize >= newhash->uh_hashsize) 4770aef6126SJeff Roberson return (0); 4788355f576SJeff Roberson 4798355f576SJeff Roberson /* 4808355f576SJeff Roberson * I need to investigate hash algorithms for resizing without a 4818355f576SJeff Roberson * full rehash. 4828355f576SJeff Roberson */ 4838355f576SJeff Roberson 4840aef6126SJeff Roberson for (i = 0; i < oldhash->uh_hashsize; i++) 4850aef6126SJeff Roberson while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 4860aef6126SJeff Roberson slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 4870aef6126SJeff Roberson SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 4880aef6126SJeff Roberson hval = UMA_HASH(newhash, slab->us_data); 4890aef6126SJeff Roberson SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 4900aef6126SJeff Roberson slab, us_hlink); 4918355f576SJeff Roberson } 4928355f576SJeff Roberson 4930aef6126SJeff Roberson return (1); 4949c2cd7e5SJeff Roberson } 4959c2cd7e5SJeff Roberson 4965300d9ddSJeff Roberson /* 4975300d9ddSJeff Roberson * Free the hash bucket to the appropriate backing store. 4985300d9ddSJeff Roberson * 4995300d9ddSJeff Roberson * Arguments: 5005300d9ddSJeff Roberson * slab_hash The hash bucket we're freeing 5015300d9ddSJeff Roberson * hashsize The number of entries in that hash bucket 5025300d9ddSJeff Roberson * 5035300d9ddSJeff Roberson * Returns: 5045300d9ddSJeff Roberson * Nothing 5055300d9ddSJeff Roberson */ 5069c2cd7e5SJeff Roberson static void 5070aef6126SJeff Roberson hash_free(struct uma_hash *hash) 5089c2cd7e5SJeff Roberson { 5090aef6126SJeff Roberson if (hash->uh_slab_hash == NULL) 5100aef6126SJeff Roberson return; 5110aef6126SJeff Roberson if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 5128355f576SJeff Roberson uma_zfree_internal(hashzone, 5130aef6126SJeff Roberson hash->uh_slab_hash, NULL, 0); 5148355f576SJeff Roberson else 515961647dfSJeff Roberson free(hash->uh_slab_hash, M_UMAHASH); 5168355f576SJeff Roberson } 5178355f576SJeff Roberson 5188355f576SJeff Roberson /* 5198355f576SJeff Roberson * Frees all outstanding items in a bucket 5208355f576SJeff Roberson * 5218355f576SJeff Roberson * Arguments: 5228355f576SJeff Roberson * zone The zone to free to, must be unlocked. 5238355f576SJeff Roberson * bucket The free/alloc bucket with items, cpu queue must be locked. 5248355f576SJeff Roberson * 5258355f576SJeff Roberson * Returns: 5268355f576SJeff Roberson * Nothing 5278355f576SJeff Roberson */ 5288355f576SJeff Roberson 5298355f576SJeff Roberson static void 5308355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 5318355f576SJeff Roberson { 5328355f576SJeff Roberson uma_slab_t slab; 5338355f576SJeff Roberson int mzone; 5348355f576SJeff Roberson void *item; 5358355f576SJeff Roberson 5368355f576SJeff Roberson if (bucket == NULL) 5378355f576SJeff Roberson return; 5388355f576SJeff Roberson 5398355f576SJeff Roberson slab = NULL; 5408355f576SJeff Roberson mzone = 0; 5418355f576SJeff Roberson 5428355f576SJeff Roberson /* We have to lookup the slab again for malloc.. */ 543b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 5448355f576SJeff Roberson mzone = 1; 5458355f576SJeff Roberson 546cae33c14SJeff Roberson while (bucket->ub_cnt > 0) { 547cae33c14SJeff Roberson bucket->ub_cnt--; 548cae33c14SJeff Roberson item = bucket->ub_bucket[bucket->ub_cnt]; 5498355f576SJeff Roberson #ifdef INVARIANTS 550cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = NULL; 5518355f576SJeff Roberson KASSERT(item != NULL, 5528355f576SJeff Roberson ("bucket_drain: botched ptr, item is NULL")); 5538355f576SJeff Roberson #endif 5548355f576SJeff Roberson /* 5558355f576SJeff Roberson * This is extremely inefficient. The slab pointer was passed 5568355f576SJeff Roberson * to uma_zfree_arg, but we lost it because the buckets don't 5578355f576SJeff Roberson * hold them. This will go away when free() gets a size passed 5588355f576SJeff Roberson * to it. 5598355f576SJeff Roberson */ 56099571dc3SJeff Roberson if (mzone) 56199571dc3SJeff Roberson slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 5628355f576SJeff Roberson uma_zfree_internal(zone, item, slab, 1); 5638355f576SJeff Roberson } 5648355f576SJeff Roberson } 5658355f576SJeff Roberson 5668355f576SJeff Roberson /* 5678355f576SJeff Roberson * Drains the per cpu caches for a zone. 5688355f576SJeff Roberson * 5698355f576SJeff Roberson * Arguments: 5708355f576SJeff Roberson * zone The zone to drain, must be unlocked. 571d56368d7SBosko Milekic * destroy Whether or not to destroy the pcpu buckets (from zone_dtor) 5728355f576SJeff Roberson * 5738355f576SJeff Roberson * Returns: 5748355f576SJeff Roberson * Nothing 5758355f576SJeff Roberson * 5768355f576SJeff Roberson * This function returns with the zone locked so that the per cpu queues can 5778355f576SJeff Roberson * not be filled until zone_drain is finished. 5788355f576SJeff Roberson * 5798355f576SJeff Roberson */ 5808355f576SJeff Roberson static void 581d56368d7SBosko Milekic cache_drain(uma_zone_t zone, int destroy) 5828355f576SJeff Roberson { 5838355f576SJeff Roberson uma_bucket_t bucket; 5848355f576SJeff Roberson uma_cache_t cache; 5858355f576SJeff Roberson int cpu; 5868355f576SJeff Roberson 5878355f576SJeff Roberson /* 5888355f576SJeff Roberson * Flush out the per cpu queues. 5898355f576SJeff Roberson * 590157d7b35SAlfred Perlstein * XXX This causes unnecessary thrashing due to immediately having 5918355f576SJeff Roberson * empty per cpu queues. I need to improve this. 5928355f576SJeff Roberson */ 5938355f576SJeff Roberson 5948355f576SJeff Roberson /* 5958355f576SJeff Roberson * We have to lock each cpu cache before locking the zone 5968355f576SJeff Roberson */ 5978355f576SJeff Roberson ZONE_UNLOCK(zone); 5988355f576SJeff Roberson 5998355f576SJeff Roberson for (cpu = 0; cpu < maxcpu; cpu++) { 6008355f576SJeff Roberson if (CPU_ABSENT(cpu)) 6018355f576SJeff Roberson continue; 602d88797c2SBosko Milekic CPU_LOCK(cpu); 6038355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 6048355f576SJeff Roberson bucket_drain(zone, cache->uc_allocbucket); 6058355f576SJeff Roberson bucket_drain(zone, cache->uc_freebucket); 606d56368d7SBosko Milekic if (destroy) { 607174ab450SBosko Milekic if (cache->uc_allocbucket != NULL) 608cae33c14SJeff Roberson bucket_free(cache->uc_allocbucket); 609174ab450SBosko Milekic if (cache->uc_freebucket != NULL) 610cae33c14SJeff Roberson bucket_free(cache->uc_freebucket); 611d56368d7SBosko Milekic cache->uc_allocbucket = cache->uc_freebucket = NULL; 612d56368d7SBosko Milekic } 6138355f576SJeff Roberson } 6148355f576SJeff Roberson 6158355f576SJeff Roberson /* 6168355f576SJeff Roberson * Drain the bucket queues and free the buckets, we just keep two per 6178355f576SJeff Roberson * cpu (alloc/free). 6188355f576SJeff Roberson */ 6198355f576SJeff Roberson ZONE_LOCK(zone); 6208355f576SJeff Roberson while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 6218355f576SJeff Roberson LIST_REMOVE(bucket, ub_link); 6228355f576SJeff Roberson ZONE_UNLOCK(zone); 6238355f576SJeff Roberson bucket_drain(zone, bucket); 624cae33c14SJeff Roberson bucket_free(bucket); 6258355f576SJeff Roberson ZONE_LOCK(zone); 6268355f576SJeff Roberson } 6278355f576SJeff Roberson 6288355f576SJeff Roberson /* Now we do the free queue.. */ 6298355f576SJeff Roberson while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 6308355f576SJeff Roberson LIST_REMOVE(bucket, ub_link); 631cae33c14SJeff Roberson bucket_free(bucket); 6328355f576SJeff Roberson } 6338355f576SJeff Roberson 6348355f576SJeff Roberson /* We unlock here, but they will all block until the zone is unlocked */ 6358355f576SJeff Roberson for (cpu = 0; cpu < maxcpu; cpu++) { 6368355f576SJeff Roberson if (CPU_ABSENT(cpu)) 6378355f576SJeff Roberson continue; 638d88797c2SBosko Milekic CPU_UNLOCK(cpu); 6398355f576SJeff Roberson } 6408355f576SJeff Roberson } 6418355f576SJeff Roberson 6428355f576SJeff Roberson /* 6438355f576SJeff Roberson * Frees pages from a zone back to the system. This is done on demand from 6448355f576SJeff Roberson * the pageout daemon. 6458355f576SJeff Roberson * 6468355f576SJeff Roberson * Arguments: 6478355f576SJeff Roberson * zone The zone to free pages from 6489c2cd7e5SJeff Roberson * all Should we drain all items? 649d56368d7SBosko Milekic * destroy Whether to destroy the zone and pcpu buckets (from zone_dtor) 6508355f576SJeff Roberson * 6518355f576SJeff Roberson * Returns: 6528355f576SJeff Roberson * Nothing. 6538355f576SJeff Roberson */ 6548355f576SJeff Roberson static void 655d56368d7SBosko Milekic zone_drain_common(uma_zone_t zone, int destroy) 6568355f576SJeff Roberson { 657713deb36SJeff Roberson struct slabhead freeslabs = {}; 6588355f576SJeff Roberson uma_slab_t slab; 6598355f576SJeff Roberson uma_slab_t n; 6608355f576SJeff Roberson u_int64_t extra; 6618355f576SJeff Roberson u_int8_t flags; 6628355f576SJeff Roberson u_int8_t *mem; 6638355f576SJeff Roberson int i; 6648355f576SJeff Roberson 6658355f576SJeff Roberson /* 6668355f576SJeff Roberson * We don't want to take pages from staticly allocated zones at this 6678355f576SJeff Roberson * time 6688355f576SJeff Roberson */ 669b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_NOFREE || zone->uz_freef == NULL) 6708355f576SJeff Roberson return; 6718355f576SJeff Roberson 6728355f576SJeff Roberson ZONE_LOCK(zone); 6738355f576SJeff Roberson 6748355f576SJeff Roberson if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) 675d56368d7SBosko Milekic cache_drain(zone, destroy); 676d56368d7SBosko Milekic 677d56368d7SBosko Milekic if (destroy) 678d56368d7SBosko Milekic zone->uz_wssize = 0; 6798355f576SJeff Roberson 6808355f576SJeff Roberson if (zone->uz_free < zone->uz_wssize) 6818355f576SJeff Roberson goto finished; 6828355f576SJeff Roberson #ifdef UMA_DEBUG 6838355f576SJeff Roberson printf("%s working set size: %llu free items: %u\n", 6848355f576SJeff Roberson zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free); 6858355f576SJeff Roberson #endif 6869c2cd7e5SJeff Roberson extra = zone->uz_free - zone->uz_wssize; 6878355f576SJeff Roberson extra /= zone->uz_ipers; 6888355f576SJeff Roberson 6898355f576SJeff Roberson /* extra is now the number of extra slabs that we can free */ 6908355f576SJeff Roberson 6918355f576SJeff Roberson if (extra == 0) 6928355f576SJeff Roberson goto finished; 6938355f576SJeff Roberson 6948355f576SJeff Roberson slab = LIST_FIRST(&zone->uz_free_slab); 6958355f576SJeff Roberson while (slab && extra) { 6968355f576SJeff Roberson n = LIST_NEXT(slab, us_link); 6978355f576SJeff Roberson 6988355f576SJeff Roberson /* We have no where to free these to */ 6998355f576SJeff Roberson if (slab->us_flags & UMA_SLAB_BOOT) { 7008355f576SJeff Roberson slab = n; 7018355f576SJeff Roberson continue; 7028355f576SJeff Roberson } 7038355f576SJeff Roberson 7048355f576SJeff Roberson LIST_REMOVE(slab, us_link); 7058355f576SJeff Roberson zone->uz_pages -= zone->uz_ppera; 7068355f576SJeff Roberson zone->uz_free -= zone->uz_ipers; 707713deb36SJeff Roberson 708b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) 709713deb36SJeff Roberson UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data); 710713deb36SJeff Roberson 711713deb36SJeff Roberson SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 712713deb36SJeff Roberson 713713deb36SJeff Roberson slab = n; 714713deb36SJeff Roberson extra--; 715713deb36SJeff Roberson } 716713deb36SJeff Roberson finished: 717713deb36SJeff Roberson ZONE_UNLOCK(zone); 718713deb36SJeff Roberson 719713deb36SJeff Roberson while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 720713deb36SJeff Roberson SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 7218355f576SJeff Roberson if (zone->uz_fini) 7228355f576SJeff Roberson for (i = 0; i < zone->uz_ipers; i++) 7238355f576SJeff Roberson zone->uz_fini( 7248355f576SJeff Roberson slab->us_data + (zone->uz_rsize * i), 7258355f576SJeff Roberson zone->uz_size); 7268355f576SJeff Roberson flags = slab->us_flags; 7278355f576SJeff Roberson mem = slab->us_data; 72899571dc3SJeff Roberson 729b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_OFFPAGE) 7308355f576SJeff Roberson uma_zfree_internal(slabzone, slab, NULL, 0); 731b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) { 73248eea375SJeff Roberson vm_object_t obj; 73348eea375SJeff Roberson 73448eea375SJeff Roberson if (flags & UMA_SLAB_KMEM) 73548eea375SJeff Roberson obj = kmem_object; 73648eea375SJeff Roberson else 73748eea375SJeff Roberson obj = NULL; 73899571dc3SJeff Roberson for (i = 0; i < zone->uz_ppera; i++) 73999571dc3SJeff Roberson vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 74048eea375SJeff Roberson obj); 74148eea375SJeff Roberson } 7428355f576SJeff Roberson #ifdef UMA_DEBUG 7438355f576SJeff Roberson printf("%s: Returning %d bytes.\n", 7448355f576SJeff Roberson zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera); 7458355f576SJeff Roberson #endif 7468355f576SJeff Roberson zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags); 7478355f576SJeff Roberson } 7488355f576SJeff Roberson 7498355f576SJeff Roberson } 7508355f576SJeff Roberson 751cae33c14SJeff Roberson static __inline void 752d56368d7SBosko Milekic zone_drain(uma_zone_t zone) 753d56368d7SBosko Milekic { 754d56368d7SBosko Milekic zone_drain_common(zone, 0); 755d56368d7SBosko Milekic } 756d56368d7SBosko Milekic 7578355f576SJeff Roberson /* 7588355f576SJeff Roberson * Allocate a new slab for a zone. This does not insert the slab onto a list. 7598355f576SJeff Roberson * 7608355f576SJeff Roberson * Arguments: 7618355f576SJeff Roberson * zone The zone to allocate slabs for 7628355f576SJeff Roberson * wait Shall we wait? 7638355f576SJeff Roberson * 7648355f576SJeff Roberson * Returns: 7658355f576SJeff Roberson * The slab that was allocated or NULL if there is no memory and the 7668355f576SJeff Roberson * caller specified M_NOWAIT. 7678355f576SJeff Roberson * 7688355f576SJeff Roberson */ 7698355f576SJeff Roberson static uma_slab_t 7708355f576SJeff Roberson slab_zalloc(uma_zone_t zone, int wait) 7718355f576SJeff Roberson { 7728355f576SJeff Roberson uma_slab_t slab; /* Starting slab */ 7738355f576SJeff Roberson u_int8_t *mem; 7748355f576SJeff Roberson u_int8_t flags; 7758355f576SJeff Roberson int i; 7768355f576SJeff Roberson 777a553d4b8SJeff Roberson slab = NULL; 778a553d4b8SJeff Roberson 7798355f576SJeff Roberson #ifdef UMA_DEBUG 7808355f576SJeff Roberson printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 7818355f576SJeff Roberson #endif 7828355f576SJeff Roberson ZONE_UNLOCK(zone); 783a553d4b8SJeff Roberson 784b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_OFFPAGE) { 785bbee39c6SJeff Roberson slab = uma_zalloc_internal(slabzone, NULL, wait); 786a553d4b8SJeff Roberson if (slab == NULL) { 787a553d4b8SJeff Roberson ZONE_LOCK(zone); 788a553d4b8SJeff Roberson return NULL; 789a553d4b8SJeff Roberson } 790a553d4b8SJeff Roberson } 791a553d4b8SJeff Roberson 7923370c5bfSJeff Roberson /* 7933370c5bfSJeff Roberson * This reproduces the old vm_zone behavior of zero filling pages the 7943370c5bfSJeff Roberson * first time they are added to a zone. 7953370c5bfSJeff Roberson * 7963370c5bfSJeff Roberson * Malloced items are zeroed in uma_zalloc. 7973370c5bfSJeff Roberson */ 7983370c5bfSJeff Roberson 799b60f5b79SJeff Roberson if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0) 8003370c5bfSJeff Roberson wait |= M_ZERO; 8013370c5bfSJeff Roberson else 8023370c5bfSJeff Roberson wait &= ~M_ZERO; 8033370c5bfSJeff Roberson 804a553d4b8SJeff Roberson if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) { 805234c7726SAlan Cox mem = zone->uz_allocf(zone, zone->uz_ppera * UMA_SLAB_SIZE, 806234c7726SAlan Cox &flags, wait); 807a553d4b8SJeff Roberson if (mem == NULL) { 8088355f576SJeff Roberson ZONE_LOCK(zone); 8098355f576SJeff Roberson return (NULL); 810a553d4b8SJeff Roberson } 8118355f576SJeff Roberson } else { 812a553d4b8SJeff Roberson uma_slab_t tmps; 8138355f576SJeff Roberson 8148355f576SJeff Roberson if (zone->uz_ppera > 1) 8158355f576SJeff Roberson panic("UMA: Attemping to allocate multiple pages before vm has started.\n"); 816b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 8178355f576SJeff Roberson panic("Mallocing before uma_startup2 has been called.\n"); 8188355f576SJeff Roberson if (uma_boot_free == 0) 8198355f576SJeff Roberson panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n"); 820a553d4b8SJeff Roberson tmps = LIST_FIRST(&uma_boot_pages); 821a553d4b8SJeff Roberson LIST_REMOVE(tmps, us_link); 8228355f576SJeff Roberson uma_boot_free--; 823a553d4b8SJeff Roberson mem = tmps->us_data; 824f3da1873SJeff Roberson flags = tmps->us_flags; 8258355f576SJeff Roberson } 8268355f576SJeff Roberson 8275c0e403bSJeff Roberson /* Point the slab into the allocated memory */ 828b60f5b79SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_OFFPAGE)) 8298355f576SJeff Roberson slab = (uma_slab_t )(mem + zone->uz_pgoff); 8305c0e403bSJeff Roberson 831b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 83299571dc3SJeff Roberson for (i = 0; i < zone->uz_ppera; i++) 83399571dc3SJeff Roberson vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 8348355f576SJeff Roberson 8358355f576SJeff Roberson slab->us_zone = zone; 8368355f576SJeff Roberson slab->us_data = mem; 8378355f576SJeff Roberson 8388355f576SJeff Roberson /* 8398355f576SJeff Roberson * This is intended to spread data out across cache lines. 8408355f576SJeff Roberson * 8418355f576SJeff Roberson * This code doesn't seem to work properly on x86, and on alpha 8428355f576SJeff Roberson * it makes absolutely no performance difference. I'm sure it could 8438355f576SJeff Roberson * use some tuning, but sun makes outrageous claims about it's 8448355f576SJeff Roberson * performance. 8458355f576SJeff Roberson */ 8468355f576SJeff Roberson #if 0 8478355f576SJeff Roberson if (zone->uz_cachemax) { 8488355f576SJeff Roberson slab->us_data += zone->uz_cacheoff; 8498355f576SJeff Roberson zone->uz_cacheoff += UMA_CACHE_INC; 8508355f576SJeff Roberson if (zone->uz_cacheoff > zone->uz_cachemax) 8518355f576SJeff Roberson zone->uz_cacheoff = 0; 8528355f576SJeff Roberson } 8538355f576SJeff Roberson #endif 8548355f576SJeff Roberson 8558355f576SJeff Roberson slab->us_freecount = zone->uz_ipers; 8568355f576SJeff Roberson slab->us_firstfree = 0; 8578355f576SJeff Roberson slab->us_flags = flags; 8588355f576SJeff Roberson for (i = 0; i < zone->uz_ipers; i++) 8598355f576SJeff Roberson slab->us_freelist[i] = i+1; 8608355f576SJeff Roberson 8618355f576SJeff Roberson if (zone->uz_init) 8628355f576SJeff Roberson for (i = 0; i < zone->uz_ipers; i++) 8638355f576SJeff Roberson zone->uz_init(slab->us_data + (zone->uz_rsize * i), 8648355f576SJeff Roberson zone->uz_size); 8655c0e403bSJeff Roberson ZONE_LOCK(zone); 8665c0e403bSJeff Roberson 867b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) 8685c0e403bSJeff Roberson UMA_HASH_INSERT(&zone->uz_hash, slab, mem); 8698355f576SJeff Roberson 8708355f576SJeff Roberson zone->uz_pages += zone->uz_ppera; 8718355f576SJeff Roberson zone->uz_free += zone->uz_ipers; 8728355f576SJeff Roberson 8735c0e403bSJeff Roberson 8748355f576SJeff Roberson return (slab); 8758355f576SJeff Roberson } 8768355f576SJeff Roberson 8778355f576SJeff Roberson /* 8788355f576SJeff Roberson * Allocates a number of pages from the system 8798355f576SJeff Roberson * 8808355f576SJeff Roberson * Arguments: 8818355f576SJeff Roberson * zone Unused 8828355f576SJeff Roberson * bytes The number of bytes requested 8838355f576SJeff Roberson * wait Shall we wait? 8848355f576SJeff Roberson * 8858355f576SJeff Roberson * Returns: 8868355f576SJeff Roberson * A pointer to the alloced memory or possibly 8878355f576SJeff Roberson * NULL if M_NOWAIT is set. 8888355f576SJeff Roberson */ 8898355f576SJeff Roberson static void * 8908355f576SJeff Roberson page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 8918355f576SJeff Roberson { 8928355f576SJeff Roberson void *p; /* Returned page */ 8938355f576SJeff Roberson 8948355f576SJeff Roberson *pflag = UMA_SLAB_KMEM; 8958355f576SJeff Roberson p = (void *) kmem_malloc(kmem_map, bytes, wait); 8968355f576SJeff Roberson 8978355f576SJeff Roberson return (p); 8988355f576SJeff Roberson } 8998355f576SJeff Roberson 9008355f576SJeff Roberson /* 9018355f576SJeff Roberson * Allocates a number of pages from within an object 9028355f576SJeff Roberson * 9038355f576SJeff Roberson * Arguments: 9048355f576SJeff Roberson * zone Unused 9058355f576SJeff Roberson * bytes The number of bytes requested 9068355f576SJeff Roberson * wait Shall we wait? 9078355f576SJeff Roberson * 9088355f576SJeff Roberson * Returns: 9098355f576SJeff Roberson * A pointer to the alloced memory or possibly 9108355f576SJeff Roberson * NULL if M_NOWAIT is set. 911494273beSJeff Roberson * 9128355f576SJeff Roberson */ 9138355f576SJeff Roberson static void * 9148355f576SJeff Roberson obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 9158355f576SJeff Roberson { 916b245ac95SAlan Cox vm_object_t object; 917b245ac95SAlan Cox vm_offset_t retkva, zkva; 9188355f576SJeff Roberson vm_page_t p; 919b245ac95SAlan Cox int pages, startpages; 9208355f576SJeff Roberson 921b245ac95SAlan Cox object = zone->uz_obj; 92255f7c614SArchie Cobbs retkva = 0; 9238355f576SJeff Roberson 9248355f576SJeff Roberson /* 9258355f576SJeff Roberson * This looks a little weird since we're getting one page at a time 9268355f576SJeff Roberson */ 927b245ac95SAlan Cox VM_OBJECT_LOCK(object); 928b245ac95SAlan Cox p = TAILQ_LAST(&object->memq, pglist); 929b245ac95SAlan Cox pages = p != NULL ? p->pindex + 1 : 0; 930b245ac95SAlan Cox startpages = pages; 9318355f576SJeff Roberson zkva = zone->uz_kva + pages * PAGE_SIZE; 932b245ac95SAlan Cox for (; bytes > 0; bytes -= PAGE_SIZE) { 933b245ac95SAlan Cox p = vm_page_alloc(object, pages, 934b245ac95SAlan Cox VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 935b245ac95SAlan Cox if (p == NULL) { 936b245ac95SAlan Cox if (pages != startpages) 937b245ac95SAlan Cox pmap_qremove(retkva, pages - startpages); 938b245ac95SAlan Cox while (pages != startpages) { 939b245ac95SAlan Cox pages--; 940b245ac95SAlan Cox p = TAILQ_LAST(&object->memq, pglist); 941b245ac95SAlan Cox vm_page_lock_queues(); 942b245ac95SAlan Cox vm_page_unwire(p, 0); 943b245ac95SAlan Cox vm_page_free(p); 944b245ac95SAlan Cox vm_page_unlock_queues(); 945b245ac95SAlan Cox } 946b245ac95SAlan Cox retkva = 0; 947b245ac95SAlan Cox goto done; 948b245ac95SAlan Cox } 949b245ac95SAlan Cox pmap_qenter(zkva, &p, 1); 95055f7c614SArchie Cobbs if (retkva == 0) 9518355f576SJeff Roberson retkva = zkva; 952b245ac95SAlan Cox zkva += PAGE_SIZE; 9538355f576SJeff Roberson pages += 1; 9548355f576SJeff Roberson } 955b245ac95SAlan Cox done: 956b245ac95SAlan Cox VM_OBJECT_UNLOCK(object); 9578355f576SJeff Roberson 9588355f576SJeff Roberson *flags = UMA_SLAB_PRIV; 9598355f576SJeff Roberson 9608355f576SJeff Roberson return ((void *)retkva); 9618355f576SJeff Roberson } 9628355f576SJeff Roberson 9638355f576SJeff Roberson /* 9648355f576SJeff Roberson * Frees a number of pages to the system 9658355f576SJeff Roberson * 9668355f576SJeff Roberson * Arguments: 9678355f576SJeff Roberson * mem A pointer to the memory to be freed 9688355f576SJeff Roberson * size The size of the memory being freed 9698355f576SJeff Roberson * flags The original p->us_flags field 9708355f576SJeff Roberson * 9718355f576SJeff Roberson * Returns: 9728355f576SJeff Roberson * Nothing 9738355f576SJeff Roberson * 9748355f576SJeff Roberson */ 9758355f576SJeff Roberson static void 9768355f576SJeff Roberson page_free(void *mem, int size, u_int8_t flags) 9778355f576SJeff Roberson { 9788355f576SJeff Roberson vm_map_t map; 9793370c5bfSJeff Roberson 9808355f576SJeff Roberson if (flags & UMA_SLAB_KMEM) 9818355f576SJeff Roberson map = kmem_map; 9828355f576SJeff Roberson else 9838355f576SJeff Roberson panic("UMA: page_free used with invalid flags %d\n", flags); 9848355f576SJeff Roberson 9858355f576SJeff Roberson kmem_free(map, (vm_offset_t)mem, size); 9868355f576SJeff Roberson } 9878355f576SJeff Roberson 9888355f576SJeff Roberson /* 9898355f576SJeff Roberson * Zero fill initializer 9908355f576SJeff Roberson * 9918355f576SJeff Roberson * Arguments/Returns follow uma_init specifications 9928355f576SJeff Roberson * 9938355f576SJeff Roberson */ 9948355f576SJeff Roberson static void 9958355f576SJeff Roberson zero_init(void *mem, int size) 9968355f576SJeff Roberson { 9978355f576SJeff Roberson bzero(mem, size); 9988355f576SJeff Roberson } 9998355f576SJeff Roberson 10008355f576SJeff Roberson /* 10018355f576SJeff Roberson * Finish creating a small uma zone. This calculates ipers, and the zone size. 10028355f576SJeff Roberson * 10038355f576SJeff Roberson * Arguments 10048355f576SJeff Roberson * zone The zone we should initialize 10058355f576SJeff Roberson * 10068355f576SJeff Roberson * Returns 10078355f576SJeff Roberson * Nothing 10088355f576SJeff Roberson */ 10098355f576SJeff Roberson static void 10108355f576SJeff Roberson zone_small_init(uma_zone_t zone) 10118355f576SJeff Roberson { 10128355f576SJeff Roberson int rsize; 10138355f576SJeff Roberson int memused; 10148355f576SJeff Roberson int ipers; 10158355f576SJeff Roberson 10168355f576SJeff Roberson rsize = zone->uz_size; 10178355f576SJeff Roberson 10188355f576SJeff Roberson if (rsize < UMA_SMALLEST_UNIT) 10198355f576SJeff Roberson rsize = UMA_SMALLEST_UNIT; 10208355f576SJeff Roberson 10218355f576SJeff Roberson if (rsize & zone->uz_align) 10228355f576SJeff Roberson rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1); 10238355f576SJeff Roberson 10248355f576SJeff Roberson zone->uz_rsize = rsize; 10258355f576SJeff Roberson 10268355f576SJeff Roberson rsize += 1; /* Account for the byte of linkage */ 10278355f576SJeff Roberson zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize; 10288355f576SJeff Roberson zone->uz_ppera = 1; 10298355f576SJeff Roberson 103020e8e865SBosko Milekic KASSERT(zone->uz_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!")); 10318355f576SJeff Roberson memused = zone->uz_ipers * zone->uz_rsize; 10328355f576SJeff Roberson 10338355f576SJeff Roberson /* Can we do any better? */ 10348355f576SJeff Roberson if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) { 103520e8e865SBosko Milekic /* 103620e8e865SBosko Milekic * We can't do this if we're internal or if we've been 103720e8e865SBosko Milekic * asked to not go to the VM for buckets. If we do this we 103820e8e865SBosko Milekic * may end up going to the VM (kmem_map) for slabs which we 103920e8e865SBosko Milekic * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 104020e8e865SBosko Milekic * result of UMA_ZONE_VM, which clearly forbids it. 104120e8e865SBosko Milekic */ 104220e8e865SBosko Milekic if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) || 104320e8e865SBosko Milekic (zone->uz_flags & UMA_ZFLAG_CACHEONLY)) 10448355f576SJeff Roberson return; 10458355f576SJeff Roberson ipers = UMA_SLAB_SIZE / zone->uz_rsize; 10468355f576SJeff Roberson if (ipers > zone->uz_ipers) { 1047b60f5b79SJeff Roberson zone->uz_flags |= UMA_ZONE_OFFPAGE; 1048b60f5b79SJeff Roberson if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0) 1049b60f5b79SJeff Roberson zone->uz_flags |= UMA_ZONE_HASH; 10508355f576SJeff Roberson zone->uz_ipers = ipers; 10518355f576SJeff Roberson } 10528355f576SJeff Roberson } 10538355f576SJeff Roberson 10548355f576SJeff Roberson } 10558355f576SJeff Roberson 10568355f576SJeff Roberson /* 10578355f576SJeff Roberson * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 10588355f576SJeff Roberson * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 10598355f576SJeff Roberson * more complicated. 10608355f576SJeff Roberson * 10618355f576SJeff Roberson * Arguments 10628355f576SJeff Roberson * zone The zone we should initialize 10638355f576SJeff Roberson * 10648355f576SJeff Roberson * Returns 10658355f576SJeff Roberson * Nothing 10668355f576SJeff Roberson */ 10678355f576SJeff Roberson static void 10688355f576SJeff Roberson zone_large_init(uma_zone_t zone) 10698355f576SJeff Roberson { 10708355f576SJeff Roberson int pages; 10718355f576SJeff Roberson 107220e8e865SBosko Milekic KASSERT((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0, 107320e8e865SBosko Milekic ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 107420e8e865SBosko Milekic 10758355f576SJeff Roberson pages = zone->uz_size / UMA_SLAB_SIZE; 10768355f576SJeff Roberson 10778355f576SJeff Roberson /* Account for remainder */ 10788355f576SJeff Roberson if ((pages * UMA_SLAB_SIZE) < zone->uz_size) 10798355f576SJeff Roberson pages++; 10808355f576SJeff Roberson 10818355f576SJeff Roberson zone->uz_ppera = pages; 10828355f576SJeff Roberson zone->uz_ipers = 1; 10838355f576SJeff Roberson 1084b60f5b79SJeff Roberson zone->uz_flags |= UMA_ZONE_OFFPAGE; 1085b60f5b79SJeff Roberson if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0) 1086b60f5b79SJeff Roberson zone->uz_flags |= UMA_ZONE_HASH; 108799571dc3SJeff Roberson 10888355f576SJeff Roberson zone->uz_rsize = zone->uz_size; 10898355f576SJeff Roberson } 10908355f576SJeff Roberson 10918355f576SJeff Roberson /* 10928355f576SJeff Roberson * Zone header ctor. This initializes all fields, locks, etc. And inserts 10938355f576SJeff Roberson * the zone onto the global zone list. 10948355f576SJeff Roberson * 10958355f576SJeff Roberson * Arguments/Returns follow uma_ctor specifications 10968355f576SJeff Roberson * udata Actually uma_zcreat_args 10978355f576SJeff Roberson * 10988355f576SJeff Roberson */ 10998355f576SJeff Roberson 11008355f576SJeff Roberson static void 11018355f576SJeff Roberson zone_ctor(void *mem, int size, void *udata) 11028355f576SJeff Roberson { 11038355f576SJeff Roberson struct uma_zctor_args *arg = udata; 11048355f576SJeff Roberson uma_zone_t zone = mem; 110528bc4419SJeff Roberson int privlc; 11068355f576SJeff Roberson 11078355f576SJeff Roberson bzero(zone, size); 11088355f576SJeff Roberson zone->uz_name = arg->name; 11098355f576SJeff Roberson zone->uz_size = arg->size; 11108355f576SJeff Roberson zone->uz_ctor = arg->ctor; 11118355f576SJeff Roberson zone->uz_dtor = arg->dtor; 11128355f576SJeff Roberson zone->uz_init = arg->uminit; 1113e221e841SJeff Roberson zone->uz_fini = arg->fini; 11148355f576SJeff Roberson zone->uz_align = arg->align; 11158355f576SJeff Roberson zone->uz_free = 0; 11168355f576SJeff Roberson zone->uz_pages = 0; 1117b60f5b79SJeff Roberson zone->uz_flags = arg->flags; 11188355f576SJeff Roberson zone->uz_allocf = page_alloc; 11198355f576SJeff Roberson zone->uz_freef = page_free; 11208355f576SJeff Roberson 11218355f576SJeff Roberson if (arg->flags & UMA_ZONE_ZINIT) 11228355f576SJeff Roberson zone->uz_init = zero_init; 11238355f576SJeff Roberson 112418aa2de5SJeff Roberson if (arg->flags & UMA_ZONE_VM) 112520e8e865SBosko Milekic zone->uz_flags |= UMA_ZFLAG_CACHEONLY; 112618aa2de5SJeff Roberson 112720e8e865SBosko Milekic /* 112820e8e865SBosko Milekic * XXX: 112920e8e865SBosko Milekic * The +1 byte added to uz_size is to account for the byte of 113020e8e865SBosko Milekic * linkage that is added to the size in zone_small_init(). If 113120e8e865SBosko Milekic * we don't account for this here then we may end up in 113220e8e865SBosko Milekic * zone_small_init() with a calculated 'ipers' of 0. 113320e8e865SBosko Milekic */ 113420e8e865SBosko Milekic if ((zone->uz_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 11358355f576SJeff Roberson zone_large_init(zone); 11368355f576SJeff Roberson else 11378355f576SJeff Roberson zone_small_init(zone); 113848eea375SJeff Roberson #ifdef UMA_MD_SMALL_ALLOC 113948eea375SJeff Roberson if (zone->uz_ppera == 1) { 114048eea375SJeff Roberson zone->uz_allocf = uma_small_alloc; 114148eea375SJeff Roberson zone->uz_freef = uma_small_free; 114248eea375SJeff Roberson } 114348eea375SJeff Roberson #endif /* UMA_MD_SMALL_ALLOC */ 11448355f576SJeff Roberson 114528bc4419SJeff Roberson if (arg->flags & UMA_ZONE_MTXCLASS) 114628bc4419SJeff Roberson privlc = 1; 114728bc4419SJeff Roberson else 114828bc4419SJeff Roberson privlc = 0; 114928bc4419SJeff Roberson 11508355f576SJeff Roberson /* 11518355f576SJeff Roberson * If we're putting the slab header in the actual page we need to 11528355f576SJeff Roberson * figure out where in each page it goes. This calculates a right 11539d5abbddSJens Schweikhardt * justified offset into the memory on an ALIGN_PTR boundary. 11548355f576SJeff Roberson */ 1155b60f5b79SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_OFFPAGE)) { 11568355f576SJeff Roberson int totsize; 11578355f576SJeff Roberson int waste; 11588355f576SJeff Roberson 11598355f576SJeff Roberson /* Size of the slab struct and free list */ 11608355f576SJeff Roberson totsize = sizeof(struct uma_slab) + zone->uz_ipers; 11618355f576SJeff Roberson if (totsize & UMA_ALIGN_PTR) 11628355f576SJeff Roberson totsize = (totsize & ~UMA_ALIGN_PTR) + 11638355f576SJeff Roberson (UMA_ALIGN_PTR + 1); 11648355f576SJeff Roberson zone->uz_pgoff = UMA_SLAB_SIZE - totsize; 11658355f576SJeff Roberson 11668355f576SJeff Roberson waste = zone->uz_pgoff; 11678355f576SJeff Roberson waste -= (zone->uz_ipers * zone->uz_rsize); 11688355f576SJeff Roberson 11698355f576SJeff Roberson /* 11708355f576SJeff Roberson * This calculates how much space we have for cache line size 11718355f576SJeff Roberson * optimizations. It works by offseting each slab slightly. 11728355f576SJeff Roberson * Currently it breaks on x86, and so it is disabled. 11738355f576SJeff Roberson */ 11748355f576SJeff Roberson 11758355f576SJeff Roberson if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) { 11768355f576SJeff Roberson zone->uz_cachemax = waste - UMA_CACHE_INC; 11778355f576SJeff Roberson zone->uz_cacheoff = 0; 11788355f576SJeff Roberson } 11798355f576SJeff Roberson 11808355f576SJeff Roberson totsize = zone->uz_pgoff + sizeof(struct uma_slab) 11818355f576SJeff Roberson + zone->uz_ipers; 11828355f576SJeff Roberson /* I don't think it's possible, but I'll make sure anyway */ 11838355f576SJeff Roberson if (totsize > UMA_SLAB_SIZE) { 11848355f576SJeff Roberson printf("zone %s ipers %d rsize %d size %d\n", 11858355f576SJeff Roberson zone->uz_name, zone->uz_ipers, zone->uz_rsize, 11868355f576SJeff Roberson zone->uz_size); 11878355f576SJeff Roberson panic("UMA slab won't fit.\n"); 11888355f576SJeff Roberson } 11898355f576SJeff Roberson } 11908355f576SJeff Roberson 1191b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) 119299571dc3SJeff Roberson hash_alloc(&zone->uz_hash); 119399571dc3SJeff Roberson 11948355f576SJeff Roberson #ifdef UMA_DEBUG 11958355f576SJeff Roberson printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 11968355f576SJeff Roberson zone->uz_name, zone, 11978355f576SJeff Roberson zone->uz_size, zone->uz_ipers, 11988355f576SJeff Roberson zone->uz_ppera, zone->uz_pgoff); 11998355f576SJeff Roberson #endif 120028bc4419SJeff Roberson ZONE_LOCK_INIT(zone, privlc); 12018355f576SJeff Roberson 12028355f576SJeff Roberson mtx_lock(&uma_mtx); 12038355f576SJeff Roberson LIST_INSERT_HEAD(&uma_zones, zone, uz_link); 12048355f576SJeff Roberson mtx_unlock(&uma_mtx); 12058355f576SJeff Roberson 12068355f576SJeff Roberson /* 12078355f576SJeff Roberson * Some internal zones don't have room allocated for the per cpu 12088355f576SJeff Roberson * caches. If we're internal, bail out here. 12098355f576SJeff Roberson */ 12108355f576SJeff Roberson 12118355f576SJeff Roberson if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 12128355f576SJeff Roberson return; 12138355f576SJeff Roberson 1214cae33c14SJeff Roberson if (zone->uz_ipers <= BUCKET_MAX) 1215cae33c14SJeff Roberson zone->uz_count = zone->uz_ipers; 12168355f576SJeff Roberson else 1217cae33c14SJeff Roberson zone->uz_count = BUCKET_MAX; 12188355f576SJeff Roberson } 12198355f576SJeff Roberson 12208355f576SJeff Roberson /* 12219c2cd7e5SJeff Roberson * Zone header dtor. This frees all data, destroys locks, frees the hash table 12229c2cd7e5SJeff Roberson * and removes the zone from the global list. 12239c2cd7e5SJeff Roberson * 12249c2cd7e5SJeff Roberson * Arguments/Returns follow uma_dtor specifications 12259c2cd7e5SJeff Roberson * udata unused 12269c2cd7e5SJeff Roberson */ 12279c2cd7e5SJeff Roberson 12289c2cd7e5SJeff Roberson static void 12299c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata) 12309c2cd7e5SJeff Roberson { 12319c2cd7e5SJeff Roberson uma_zone_t zone; 12329c2cd7e5SJeff Roberson 12339c2cd7e5SJeff Roberson zone = (uma_zone_t)arg; 123417b9cc49SJeff Roberson mtx_lock(&uma_mtx); 123517b9cc49SJeff Roberson LIST_REMOVE(zone, uz_link); 1236d56368d7SBosko Milekic zone_drain_common(zone, 1); 123717b9cc49SJeff Roberson mtx_unlock(&uma_mtx); 123817b9cc49SJeff Roberson 12399c2cd7e5SJeff Roberson ZONE_LOCK(zone); 12409c2cd7e5SJeff Roberson if (zone->uz_free != 0) 1241886eaaacSPoul-Henning Kamp printf("Zone %s was not empty (%d items). Lost %d pages of memory.\n", 1242886eaaacSPoul-Henning Kamp zone->uz_name, zone->uz_free, zone->uz_pages); 12439c2cd7e5SJeff Roberson 12449c2cd7e5SJeff Roberson ZONE_UNLOCK(zone); 1245b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) 12460aef6126SJeff Roberson hash_free(&zone->uz_hash); 12470aef6126SJeff Roberson 12489c2cd7e5SJeff Roberson ZONE_LOCK_FINI(zone); 12499c2cd7e5SJeff Roberson } 12509c2cd7e5SJeff Roberson /* 12518355f576SJeff Roberson * Traverses every zone in the system and calls a callback 12528355f576SJeff Roberson * 12538355f576SJeff Roberson * Arguments: 12548355f576SJeff Roberson * zfunc A pointer to a function which accepts a zone 12558355f576SJeff Roberson * as an argument. 12568355f576SJeff Roberson * 12578355f576SJeff Roberson * Returns: 12588355f576SJeff Roberson * Nothing 12598355f576SJeff Roberson */ 12608355f576SJeff Roberson static void 12618355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t)) 12628355f576SJeff Roberson { 12638355f576SJeff Roberson uma_zone_t zone; 12648355f576SJeff Roberson 12658355f576SJeff Roberson mtx_lock(&uma_mtx); 12668355f576SJeff Roberson LIST_FOREACH(zone, &uma_zones, uz_link) { 12678355f576SJeff Roberson zfunc(zone); 12688355f576SJeff Roberson } 12698355f576SJeff Roberson mtx_unlock(&uma_mtx); 12708355f576SJeff Roberson } 12718355f576SJeff Roberson 12728355f576SJeff Roberson /* Public functions */ 12738355f576SJeff Roberson /* See uma.h */ 12748355f576SJeff Roberson void 12758355f576SJeff Roberson uma_startup(void *bootmem) 12768355f576SJeff Roberson { 12778355f576SJeff Roberson struct uma_zctor_args args; 12788355f576SJeff Roberson uma_slab_t slab; 12798355f576SJeff Roberson int slabsize; 12808355f576SJeff Roberson int i; 12818355f576SJeff Roberson 12828355f576SJeff Roberson #ifdef UMA_DEBUG 12838355f576SJeff Roberson printf("Creating uma zone headers zone.\n"); 12848355f576SJeff Roberson #endif 12858355f576SJeff Roberson #ifdef SMP 12868355f576SJeff Roberson maxcpu = mp_maxid + 1; 12878355f576SJeff Roberson #else 12888355f576SJeff Roberson maxcpu = 1; 12898355f576SJeff Roberson #endif 12908355f576SJeff Roberson #ifdef UMA_DEBUG 12918355f576SJeff Roberson printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid); 12928355f576SJeff Roberson Debugger("stop"); 12938355f576SJeff Roberson #endif 12946008862bSJohn Baldwin mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF); 12958355f576SJeff Roberson /* "manually" Create the initial zone */ 12968355f576SJeff Roberson args.name = "UMA Zones"; 12978355f576SJeff Roberson args.size = sizeof(struct uma_zone) + 12988355f576SJeff Roberson (sizeof(struct uma_cache) * (maxcpu - 1)); 12998355f576SJeff Roberson args.ctor = zone_ctor; 13009c2cd7e5SJeff Roberson args.dtor = zone_dtor; 13018355f576SJeff Roberson args.uminit = zero_init; 13028355f576SJeff Roberson args.fini = NULL; 13038355f576SJeff Roberson args.align = 32 - 1; 1304b60f5b79SJeff Roberson args.flags = UMA_ZFLAG_INTERNAL; 13058355f576SJeff Roberson /* The initial zone has no Per cpu queues so it's smaller */ 13068355f576SJeff Roberson zone_ctor(zones, sizeof(struct uma_zone), &args); 13078355f576SJeff Roberson 1308d88797c2SBosko Milekic /* Initialize the pcpu cache lock set once and for all */ 1309d88797c2SBosko Milekic for (i = 0; i < maxcpu; i++) 1310d88797c2SBosko Milekic CPU_LOCK_INIT(i); 1311d88797c2SBosko Milekic 13128355f576SJeff Roberson #ifdef UMA_DEBUG 13138355f576SJeff Roberson printf("Filling boot free list.\n"); 13148355f576SJeff Roberson #endif 13158355f576SJeff Roberson for (i = 0; i < UMA_BOOT_PAGES; i++) { 13168355f576SJeff Roberson slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 13178355f576SJeff Roberson slab->us_data = (u_int8_t *)slab; 13188355f576SJeff Roberson slab->us_flags = UMA_SLAB_BOOT; 13198355f576SJeff Roberson LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 13208355f576SJeff Roberson uma_boot_free++; 13218355f576SJeff Roberson } 13228355f576SJeff Roberson 13238355f576SJeff Roberson #ifdef UMA_DEBUG 13248355f576SJeff Roberson printf("Creating slab zone.\n"); 13258355f576SJeff Roberson #endif 13268355f576SJeff Roberson 13278355f576SJeff Roberson /* 13288355f576SJeff Roberson * This is the max number of free list items we'll have with 13298355f576SJeff Roberson * offpage slabs. 13308355f576SJeff Roberson */ 13318355f576SJeff Roberson 13328355f576SJeff Roberson slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab); 13338355f576SJeff Roberson slabsize /= UMA_MAX_WASTE; 13348355f576SJeff Roberson slabsize++; /* In case there it's rounded */ 13358355f576SJeff Roberson slabsize += sizeof(struct uma_slab); 13368355f576SJeff Roberson 13378355f576SJeff Roberson /* Now make a zone for slab headers */ 13388355f576SJeff Roberson slabzone = uma_zcreate("UMA Slabs", 13398355f576SJeff Roberson slabsize, 13408355f576SJeff Roberson NULL, NULL, NULL, NULL, 1341b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 13428355f576SJeff Roberson 13438355f576SJeff Roberson hashzone = uma_zcreate("UMA Hash", 13448355f576SJeff Roberson sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 13458355f576SJeff Roberson NULL, NULL, NULL, NULL, 1346b60f5b79SJeff Roberson UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 13478355f576SJeff Roberson 1348cae33c14SJeff Roberson bucket_init(); 13498355f576SJeff Roberson 135048eea375SJeff Roberson #ifdef UMA_MD_SMALL_ALLOC 135148eea375SJeff Roberson booted = 1; 135248eea375SJeff Roberson #endif 13538355f576SJeff Roberson 13548355f576SJeff Roberson #ifdef UMA_DEBUG 13558355f576SJeff Roberson printf("UMA startup complete.\n"); 13568355f576SJeff Roberson #endif 13578355f576SJeff Roberson } 13588355f576SJeff Roberson 13598355f576SJeff Roberson /* see uma.h */ 13608355f576SJeff Roberson void 136199571dc3SJeff Roberson uma_startup2(void) 13628355f576SJeff Roberson { 13638355f576SJeff Roberson booted = 1; 136486bbae32SJeff Roberson bucket_enable(); 13658355f576SJeff Roberson #ifdef UMA_DEBUG 13668355f576SJeff Roberson printf("UMA startup2 complete.\n"); 13678355f576SJeff Roberson #endif 13688355f576SJeff Roberson } 13698355f576SJeff Roberson 13708355f576SJeff Roberson /* 13718355f576SJeff Roberson * Initialize our callout handle 13728355f576SJeff Roberson * 13738355f576SJeff Roberson */ 13748355f576SJeff Roberson 13758355f576SJeff Roberson static void 13768355f576SJeff Roberson uma_startup3(void) 13778355f576SJeff Roberson { 13788355f576SJeff Roberson #ifdef UMA_DEBUG 13798355f576SJeff Roberson printf("Starting callout.\n"); 13808355f576SJeff Roberson #endif 13818355f576SJeff Roberson callout_init(&uma_callout, 0); 13828355f576SJeff Roberson callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL); 13838355f576SJeff Roberson #ifdef UMA_DEBUG 13848355f576SJeff Roberson printf("UMA startup3 complete.\n"); 13858355f576SJeff Roberson #endif 13868355f576SJeff Roberson } 13878355f576SJeff Roberson 13888355f576SJeff Roberson /* See uma.h */ 13898355f576SJeff Roberson uma_zone_t 1390c3bdc05fSAndrew R. Reiter uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1391c3bdc05fSAndrew R. Reiter uma_init uminit, uma_fini fini, int align, u_int16_t flags) 13928355f576SJeff Roberson 13938355f576SJeff Roberson { 13948355f576SJeff Roberson struct uma_zctor_args args; 13958355f576SJeff Roberson 13968355f576SJeff Roberson /* This stuff is essential for the zone ctor */ 13978355f576SJeff Roberson args.name = name; 13988355f576SJeff Roberson args.size = size; 13998355f576SJeff Roberson args.ctor = ctor; 14008355f576SJeff Roberson args.dtor = dtor; 14018355f576SJeff Roberson args.uminit = uminit; 14028355f576SJeff Roberson args.fini = fini; 14038355f576SJeff Roberson args.align = align; 14048355f576SJeff Roberson args.flags = flags; 14058355f576SJeff Roberson 1406a163d034SWarner Losh return (uma_zalloc_internal(zones, &args, M_WAITOK)); 14078355f576SJeff Roberson } 14088355f576SJeff Roberson 14098355f576SJeff Roberson /* See uma.h */ 14109c2cd7e5SJeff Roberson void 14119c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone) 14129c2cd7e5SJeff Roberson { 14139c2cd7e5SJeff Roberson uma_zfree_internal(zones, zone, NULL, 0); 14149c2cd7e5SJeff Roberson } 14159c2cd7e5SJeff Roberson 14169c2cd7e5SJeff Roberson /* See uma.h */ 14178355f576SJeff Roberson void * 14182cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 14198355f576SJeff Roberson { 14208355f576SJeff Roberson void *item; 14218355f576SJeff Roberson uma_cache_t cache; 14228355f576SJeff Roberson uma_bucket_t bucket; 14238355f576SJeff Roberson int cpu; 14248355f576SJeff Roberson 14258355f576SJeff Roberson /* This is the fast path allocation */ 14268355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1 14278355f576SJeff Roberson printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 14288355f576SJeff Roberson #endif 1429a553d4b8SJeff Roberson 14308522511bSHartmut Brandt #ifdef INVARIANTS 14318522511bSHartmut Brandt /* 14328522511bSHartmut Brandt * To make sure that WAITOK or NOWAIT is set, but not more than 14338522511bSHartmut Brandt * one, and check against the API botches that are common. 14348522511bSHartmut Brandt * The uma code implies M_WAITOK if M_NOWAIT is not set, so 14358522511bSHartmut Brandt * we default to waiting if none of the flags is set. 14368522511bSHartmut Brandt */ 14378522511bSHartmut Brandt cpu = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT); 14388522511bSHartmut Brandt if (cpu != M_NOWAIT && cpu != M_WAITOK) { 14398522511bSHartmut Brandt static struct timeval lasterr; 14408522511bSHartmut Brandt static int curerr, once; 14418522511bSHartmut Brandt if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 14428522511bSHartmut Brandt printf("Bad uma_zalloc flags: %x\n", cpu); 14438522511bSHartmut Brandt backtrace(); 14448522511bSHartmut Brandt once++; 14458522511bSHartmut Brandt } 14468522511bSHartmut Brandt } 14478522511bSHartmut Brandt #endif 14488522511bSHartmut Brandt 14494c1cc01cSJohn Baldwin if (!(flags & M_NOWAIT)) { 14504c1cc01cSJohn Baldwin KASSERT(curthread->td_intr_nesting_level == 0, 1451a163d034SWarner Losh ("malloc(M_WAITOK) in interrupt context")); 145226306795SJohn Baldwin WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 145326306795SJohn Baldwin "malloc() of \"%s\"", zone->uz_name); 14544c1cc01cSJohn Baldwin } 14554c1cc01cSJohn Baldwin 1456a553d4b8SJeff Roberson zalloc_restart: 14578355f576SJeff Roberson cpu = PCPU_GET(cpuid); 1458d88797c2SBosko Milekic CPU_LOCK(cpu); 14598355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 14608355f576SJeff Roberson 14618355f576SJeff Roberson zalloc_start: 14628355f576SJeff Roberson bucket = cache->uc_allocbucket; 14638355f576SJeff Roberson 14648355f576SJeff Roberson if (bucket) { 1465cae33c14SJeff Roberson if (bucket->ub_cnt > 0) { 1466cae33c14SJeff Roberson bucket->ub_cnt--; 1467cae33c14SJeff Roberson item = bucket->ub_bucket[bucket->ub_cnt]; 14688355f576SJeff Roberson #ifdef INVARIANTS 1469cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = NULL; 14708355f576SJeff Roberson #endif 14718355f576SJeff Roberson KASSERT(item != NULL, 14728355f576SJeff Roberson ("uma_zalloc: Bucket pointer mangled.")); 14738355f576SJeff Roberson cache->uc_allocs++; 1474639c9550SJeff Roberson #ifdef INVARIANTS 147581f71edaSMatt Jacob ZONE_LOCK(zone); 1476639c9550SJeff Roberson uma_dbg_alloc(zone, NULL, item); 147781f71edaSMatt Jacob ZONE_UNLOCK(zone); 1478639c9550SJeff Roberson #endif 1479d88797c2SBosko Milekic CPU_UNLOCK(cpu); 14808355f576SJeff Roberson if (zone->uz_ctor) 14818355f576SJeff Roberson zone->uz_ctor(item, zone->uz_size, udata); 14822cc35ff9SJeff Roberson if (flags & M_ZERO) 14832cc35ff9SJeff Roberson bzero(item, zone->uz_size); 14848355f576SJeff Roberson return (item); 14858355f576SJeff Roberson } else if (cache->uc_freebucket) { 14868355f576SJeff Roberson /* 14878355f576SJeff Roberson * We have run out of items in our allocbucket. 14888355f576SJeff Roberson * See if we can switch with our free bucket. 14898355f576SJeff Roberson */ 1490cae33c14SJeff Roberson if (cache->uc_freebucket->ub_cnt > 0) { 14918355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 14928355f576SJeff Roberson printf("uma_zalloc: Swapping empty with alloc.\n"); 14938355f576SJeff Roberson #endif 1494b983089aSJeff Roberson bucket = cache->uc_freebucket; 14958355f576SJeff Roberson cache->uc_freebucket = cache->uc_allocbucket; 1496b983089aSJeff Roberson cache->uc_allocbucket = bucket; 14978355f576SJeff Roberson 14988355f576SJeff Roberson goto zalloc_start; 14998355f576SJeff Roberson } 15008355f576SJeff Roberson } 15018355f576SJeff Roberson } 1502a553d4b8SJeff Roberson ZONE_LOCK(zone); 1503a553d4b8SJeff Roberson /* Since we have locked the zone we may as well send back our stats */ 1504a553d4b8SJeff Roberson zone->uz_allocs += cache->uc_allocs; 1505a553d4b8SJeff Roberson cache->uc_allocs = 0; 15068355f576SJeff Roberson 1507a553d4b8SJeff Roberson /* Our old one is now a free bucket */ 1508a553d4b8SJeff Roberson if (cache->uc_allocbucket) { 1509cae33c14SJeff Roberson KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1510a553d4b8SJeff Roberson ("uma_zalloc_arg: Freeing a non free bucket.")); 1511a553d4b8SJeff Roberson LIST_INSERT_HEAD(&zone->uz_free_bucket, 1512a553d4b8SJeff Roberson cache->uc_allocbucket, ub_link); 1513a553d4b8SJeff Roberson cache->uc_allocbucket = NULL; 1514a553d4b8SJeff Roberson } 15158355f576SJeff Roberson 1516a553d4b8SJeff Roberson /* Check the free list for a new alloc bucket */ 1517a553d4b8SJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1518cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 1519a553d4b8SJeff Roberson ("uma_zalloc_arg: Returning an empty bucket.")); 15208355f576SJeff Roberson 1521a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 1522a553d4b8SJeff Roberson cache->uc_allocbucket = bucket; 1523a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 15248355f576SJeff Roberson goto zalloc_start; 1525a553d4b8SJeff Roberson } 1526bbee39c6SJeff Roberson /* We are no longer associated with this cpu!!! */ 1527d88797c2SBosko Milekic CPU_UNLOCK(cpu); 1528bbee39c6SJeff Roberson 1529a553d4b8SJeff Roberson /* Bump up our uz_count so we get here less */ 1530cae33c14SJeff Roberson if (zone->uz_count < BUCKET_MAX) 1531a553d4b8SJeff Roberson zone->uz_count++; 1532a553d4b8SJeff Roberson 15338355f576SJeff Roberson /* 1534a553d4b8SJeff Roberson * Now lets just fill a bucket and put it on the free list. If that 1535a553d4b8SJeff Roberson * works we'll restart the allocation from the begining. 1536bbee39c6SJeff Roberson */ 1537bbee39c6SJeff Roberson 1538bbee39c6SJeff Roberson if (uma_zalloc_bucket(zone, flags)) { 1539bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 1540bbee39c6SJeff Roberson goto zalloc_restart; 1541bbee39c6SJeff Roberson } 1542bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 1543bbee39c6SJeff Roberson /* 1544bbee39c6SJeff Roberson * We may not be able to get a bucket so return an actual item. 1545bbee39c6SJeff Roberson */ 1546bbee39c6SJeff Roberson #ifdef UMA_DEBUG 1547bbee39c6SJeff Roberson printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1548bbee39c6SJeff Roberson #endif 1549bbee39c6SJeff Roberson 1550bbee39c6SJeff Roberson return (uma_zalloc_internal(zone, udata, flags)); 1551bbee39c6SJeff Roberson } 1552bbee39c6SJeff Roberson 1553bbee39c6SJeff Roberson static uma_slab_t 1554bbee39c6SJeff Roberson uma_zone_slab(uma_zone_t zone, int flags) 1555bbee39c6SJeff Roberson { 1556bbee39c6SJeff Roberson uma_slab_t slab; 1557bbee39c6SJeff Roberson 1558bbee39c6SJeff Roberson /* 1559bbee39c6SJeff Roberson * This is to prevent us from recursively trying to allocate 1560bbee39c6SJeff Roberson * buckets. The problem is that if an allocation forces us to 1561bbee39c6SJeff Roberson * grab a new bucket we will call page_alloc, which will go off 1562bbee39c6SJeff Roberson * and cause the vm to allocate vm_map_entries. If we need new 1563bbee39c6SJeff Roberson * buckets there too we will recurse in kmem_alloc and bad 1564bbee39c6SJeff Roberson * things happen. So instead we return a NULL bucket, and make 1565bbee39c6SJeff Roberson * the code that allocates buckets smart enough to deal with it 1566bbee39c6SJeff Roberson */ 1567cae33c14SJeff Roberson if (zone->uz_flags & UMA_ZFLAG_INTERNAL && zone->uz_recurse != 0) 1568bbee39c6SJeff Roberson return (NULL); 1569bbee39c6SJeff Roberson 1570bbee39c6SJeff Roberson slab = NULL; 1571bbee39c6SJeff Roberson 1572bbee39c6SJeff Roberson for (;;) { 1573bbee39c6SJeff Roberson /* 1574bbee39c6SJeff Roberson * Find a slab with some space. Prefer slabs that are partially 1575bbee39c6SJeff Roberson * used over those that are totally full. This helps to reduce 1576bbee39c6SJeff Roberson * fragmentation. 1577bbee39c6SJeff Roberson */ 1578bbee39c6SJeff Roberson if (zone->uz_free != 0) { 1579bbee39c6SJeff Roberson if (!LIST_EMPTY(&zone->uz_part_slab)) { 1580bbee39c6SJeff Roberson slab = LIST_FIRST(&zone->uz_part_slab); 1581bbee39c6SJeff Roberson } else { 1582bbee39c6SJeff Roberson slab = LIST_FIRST(&zone->uz_free_slab); 1583bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 1584bbee39c6SJeff Roberson LIST_INSERT_HEAD(&zone->uz_part_slab, slab, 1585bbee39c6SJeff Roberson us_link); 1586bbee39c6SJeff Roberson } 1587bbee39c6SJeff Roberson return (slab); 1588bbee39c6SJeff Roberson } 1589bbee39c6SJeff Roberson 1590bbee39c6SJeff Roberson /* 1591bbee39c6SJeff Roberson * M_NOVM means don't ask at all! 1592bbee39c6SJeff Roberson */ 1593bbee39c6SJeff Roberson if (flags & M_NOVM) 1594bbee39c6SJeff Roberson break; 1595bbee39c6SJeff Roberson 1596bbee39c6SJeff Roberson if (zone->uz_maxpages && 1597bbee39c6SJeff Roberson zone->uz_pages >= zone->uz_maxpages) { 1598bbee39c6SJeff Roberson zone->uz_flags |= UMA_ZFLAG_FULL; 1599bbee39c6SJeff Roberson 1600ebc85edfSJeff Roberson if (flags & M_NOWAIT) 1601bbee39c6SJeff Roberson break; 1602ebc85edfSJeff Roberson else 1603ebc85edfSJeff Roberson msleep(zone, &zone->uz_lock, PVM, "zonelimit", 0); 1604bbee39c6SJeff Roberson continue; 1605bbee39c6SJeff Roberson } 1606bbee39c6SJeff Roberson zone->uz_recurse++; 1607bbee39c6SJeff Roberson slab = slab_zalloc(zone, flags); 1608bbee39c6SJeff Roberson zone->uz_recurse--; 1609bbee39c6SJeff Roberson /* 1610bbee39c6SJeff Roberson * If we got a slab here it's safe to mark it partially used 1611bbee39c6SJeff Roberson * and return. We assume that the caller is going to remove 1612bbee39c6SJeff Roberson * at least one item. 1613bbee39c6SJeff Roberson */ 1614bbee39c6SJeff Roberson if (slab) { 1615bbee39c6SJeff Roberson LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link); 1616bbee39c6SJeff Roberson return (slab); 1617bbee39c6SJeff Roberson } 1618bbee39c6SJeff Roberson /* 1619bbee39c6SJeff Roberson * We might not have been able to get a slab but another cpu 1620bbee39c6SJeff Roberson * could have while we were unlocked. Check again before we 1621bbee39c6SJeff Roberson * fail. 1622bbee39c6SJeff Roberson */ 1623ebc85edfSJeff Roberson if (flags & M_NOWAIT) 1624bbee39c6SJeff Roberson flags |= M_NOVM; 1625bbee39c6SJeff Roberson } 1626bbee39c6SJeff Roberson return (slab); 1627bbee39c6SJeff Roberson } 1628bbee39c6SJeff Roberson 1629d56368d7SBosko Milekic static void * 1630bbee39c6SJeff Roberson uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 1631bbee39c6SJeff Roberson { 1632bbee39c6SJeff Roberson void *item; 1633bbee39c6SJeff Roberson u_int8_t freei; 1634bbee39c6SJeff Roberson 1635bbee39c6SJeff Roberson freei = slab->us_firstfree; 1636bbee39c6SJeff Roberson slab->us_firstfree = slab->us_freelist[freei]; 1637bbee39c6SJeff Roberson item = slab->us_data + (zone->uz_rsize * freei); 1638bbee39c6SJeff Roberson 1639bbee39c6SJeff Roberson slab->us_freecount--; 1640bbee39c6SJeff Roberson zone->uz_free--; 1641bbee39c6SJeff Roberson #ifdef INVARIANTS 1642bbee39c6SJeff Roberson uma_dbg_alloc(zone, slab, item); 1643bbee39c6SJeff Roberson #endif 1644bbee39c6SJeff Roberson /* Move this slab to the full list */ 1645bbee39c6SJeff Roberson if (slab->us_freecount == 0) { 1646bbee39c6SJeff Roberson LIST_REMOVE(slab, us_link); 1647bbee39c6SJeff Roberson LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link); 1648bbee39c6SJeff Roberson } 1649bbee39c6SJeff Roberson 1650bbee39c6SJeff Roberson return (item); 1651bbee39c6SJeff Roberson } 1652bbee39c6SJeff Roberson 1653bbee39c6SJeff Roberson static int 1654bbee39c6SJeff Roberson uma_zalloc_bucket(uma_zone_t zone, int flags) 1655bbee39c6SJeff Roberson { 1656bbee39c6SJeff Roberson uma_bucket_t bucket; 1657bbee39c6SJeff Roberson uma_slab_t slab; 1658bbee39c6SJeff Roberson 1659bbee39c6SJeff Roberson /* 1660a553d4b8SJeff Roberson * Try this zone's free list first so we don't allocate extra buckets. 16618355f576SJeff Roberson */ 1662a553d4b8SJeff Roberson 1663bbee39c6SJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 1664cae33c14SJeff Roberson KASSERT(bucket->ub_cnt == 0, 1665bbee39c6SJeff Roberson ("uma_zalloc_bucket: Bucket on free list is not empty.")); 1666a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 1667bbee39c6SJeff Roberson } else { 166818aa2de5SJeff Roberson int bflags; 166918aa2de5SJeff Roberson 1670cae33c14SJeff Roberson bflags = (flags & ~M_ZERO); 167120e8e865SBosko Milekic if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) 167218aa2de5SJeff Roberson bflags |= M_NOVM; 167318aa2de5SJeff Roberson 1674bbee39c6SJeff Roberson ZONE_UNLOCK(zone); 1675cae33c14SJeff Roberson bucket = bucket_alloc(zone->uz_count, bflags); 1676bbee39c6SJeff Roberson ZONE_LOCK(zone); 1677bbee39c6SJeff Roberson } 1678bbee39c6SJeff Roberson 1679bbee39c6SJeff Roberson if (bucket == NULL) 1680bbee39c6SJeff Roberson return (0); 1681bbee39c6SJeff Roberson 1682bbee39c6SJeff Roberson #ifdef SMP 1683a553d4b8SJeff Roberson /* 1684bbee39c6SJeff Roberson * This code is here to limit the number of simultaneous bucket fills 1685bbee39c6SJeff Roberson * for any given zone to the number of per cpu caches in this zone. This 1686bbee39c6SJeff Roberson * is done so that we don't allocate more memory than we really need. 1687a553d4b8SJeff Roberson */ 1688bbee39c6SJeff Roberson if (zone->uz_fills >= mp_ncpus) 1689bbee39c6SJeff Roberson goto done; 1690a553d4b8SJeff Roberson 1691bbee39c6SJeff Roberson #endif 1692bbee39c6SJeff Roberson zone->uz_fills++; 1693bbee39c6SJeff Roberson 1694bbee39c6SJeff Roberson /* Try to keep the buckets totally full */ 1695cae33c14SJeff Roberson while (bucket->ub_cnt < bucket->ub_entries && 1696d11e0ba5SJeff Roberson (slab = uma_zone_slab(zone, flags)) != NULL) { 1697bbee39c6SJeff Roberson while (slab->us_freecount && 1698cae33c14SJeff Roberson bucket->ub_cnt < bucket->ub_entries) { 1699cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt++] = 1700bbee39c6SJeff Roberson uma_slab_alloc(zone, slab); 1701bbee39c6SJeff Roberson } 1702bbee39c6SJeff Roberson /* Don't block on the next fill */ 1703bbee39c6SJeff Roberson flags |= M_NOWAIT; 17048355f576SJeff Roberson } 17058355f576SJeff Roberson 1706bbee39c6SJeff Roberson zone->uz_fills--; 1707bbee39c6SJeff Roberson 1708cae33c14SJeff Roberson if (bucket->ub_cnt != 0) { 1709bbee39c6SJeff Roberson LIST_INSERT_HEAD(&zone->uz_full_bucket, 1710bbee39c6SJeff Roberson bucket, ub_link); 1711bbee39c6SJeff Roberson return (1); 1712bbee39c6SJeff Roberson } 1713bbee39c6SJeff Roberson #ifdef SMP 1714bbee39c6SJeff Roberson done: 1715bbee39c6SJeff Roberson #endif 1716cae33c14SJeff Roberson bucket_free(bucket); 1717bbee39c6SJeff Roberson 1718bbee39c6SJeff Roberson return (0); 1719bbee39c6SJeff Roberson } 17208355f576SJeff Roberson /* 1721bbee39c6SJeff Roberson * Allocates an item for an internal zone 17228355f576SJeff Roberson * 17238355f576SJeff Roberson * Arguments 17248355f576SJeff Roberson * zone The zone to alloc for. 17258355f576SJeff Roberson * udata The data to be passed to the constructor. 1726a163d034SWarner Losh * flags M_WAITOK, M_NOWAIT, M_ZERO. 17278355f576SJeff Roberson * 17288355f576SJeff Roberson * Returns 17298355f576SJeff Roberson * NULL if there is no memory and M_NOWAIT is set 1730bbee39c6SJeff Roberson * An item if successful 17318355f576SJeff Roberson */ 17328355f576SJeff Roberson 17338355f576SJeff Roberson static void * 1734bbee39c6SJeff Roberson uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 17358355f576SJeff Roberson { 17368355f576SJeff Roberson uma_slab_t slab; 17378355f576SJeff Roberson void *item; 17388355f576SJeff Roberson 17398355f576SJeff Roberson item = NULL; 17408355f576SJeff Roberson 17418355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 17428355f576SJeff Roberson printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 17438355f576SJeff Roberson #endif 17448355f576SJeff Roberson ZONE_LOCK(zone); 17458355f576SJeff Roberson 1746bbee39c6SJeff Roberson slab = uma_zone_slab(zone, flags); 1747bbee39c6SJeff Roberson if (slab == NULL) { 1748bce97791SJeff Roberson ZONE_UNLOCK(zone); 1749a553d4b8SJeff Roberson return (NULL); 1750bce97791SJeff Roberson } 1751a553d4b8SJeff Roberson 1752bbee39c6SJeff Roberson item = uma_slab_alloc(zone, slab); 17538355f576SJeff Roberson 17548355f576SJeff Roberson ZONE_UNLOCK(zone); 17558355f576SJeff Roberson 17563370c5bfSJeff Roberson if (zone->uz_ctor != NULL) 17578355f576SJeff Roberson zone->uz_ctor(item, zone->uz_size, udata); 17582cc35ff9SJeff Roberson if (flags & M_ZERO) 17592cc35ff9SJeff Roberson bzero(item, zone->uz_size); 17608355f576SJeff Roberson 17618355f576SJeff Roberson return (item); 17628355f576SJeff Roberson } 17638355f576SJeff Roberson 17648355f576SJeff Roberson /* See uma.h */ 17658355f576SJeff Roberson void 17668355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 17678355f576SJeff Roberson { 17688355f576SJeff Roberson uma_cache_t cache; 17698355f576SJeff Roberson uma_bucket_t bucket; 17704741dcbfSJeff Roberson int bflags; 17718355f576SJeff Roberson int cpu; 17725c133dfaSBosko Milekic int skip; 17738355f576SJeff Roberson 17748355f576SJeff Roberson /* This is the fast path free */ 17755c133dfaSBosko Milekic skip = 0; 17768355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1 17778355f576SJeff Roberson printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 17788355f576SJeff Roberson #endif 1779af7f9b97SJeff Roberson /* 1780af7f9b97SJeff Roberson * The race here is acceptable. If we miss it we'll just have to wait 1781af7f9b97SJeff Roberson * a little longer for the limits to be reset. 1782af7f9b97SJeff Roberson */ 1783af7f9b97SJeff Roberson 1784af7f9b97SJeff Roberson if (zone->uz_flags & UMA_ZFLAG_FULL) 1785af7f9b97SJeff Roberson goto zfree_internal; 1786af7f9b97SJeff Roberson 17875c133dfaSBosko Milekic if (zone->uz_dtor) { 1788bba739abSJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 17895c133dfaSBosko Milekic skip = 1; 17905c133dfaSBosko Milekic } 1791bba739abSJeff Roberson 1792a553d4b8SJeff Roberson zfree_restart: 17938355f576SJeff Roberson cpu = PCPU_GET(cpuid); 1794d88797c2SBosko Milekic CPU_LOCK(cpu); 17958355f576SJeff Roberson cache = &zone->uz_cpu[cpu]; 17968355f576SJeff Roberson 17978355f576SJeff Roberson zfree_start: 17988355f576SJeff Roberson bucket = cache->uc_freebucket; 17998355f576SJeff Roberson 18008355f576SJeff Roberson if (bucket) { 1801a553d4b8SJeff Roberson /* 1802a553d4b8SJeff Roberson * Do we have room in our bucket? It is OK for this uz count 1803a553d4b8SJeff Roberson * check to be slightly out of sync. 1804a553d4b8SJeff Roberson */ 1805a553d4b8SJeff Roberson 1806cae33c14SJeff Roberson if (bucket->ub_cnt < bucket->ub_entries) { 1807cae33c14SJeff Roberson KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 18088355f576SJeff Roberson ("uma_zfree: Freeing to non free bucket index.")); 1809cae33c14SJeff Roberson bucket->ub_bucket[bucket->ub_cnt] = item; 1810cae33c14SJeff Roberson bucket->ub_cnt++; 1811b9ba8931SJeff Roberson #ifdef INVARIANTS 181281f71edaSMatt Jacob ZONE_LOCK(zone); 1813b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 1814b9ba8931SJeff Roberson uma_dbg_free(zone, udata, item); 1815b9ba8931SJeff Roberson else 1816b9ba8931SJeff Roberson uma_dbg_free(zone, NULL, item); 181781f71edaSMatt Jacob ZONE_UNLOCK(zone); 1818b9ba8931SJeff Roberson #endif 1819d88797c2SBosko Milekic CPU_UNLOCK(cpu); 18208355f576SJeff Roberson return; 18218355f576SJeff Roberson } else if (cache->uc_allocbucket) { 18228355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 18238355f576SJeff Roberson printf("uma_zfree: Swapping buckets.\n"); 18248355f576SJeff Roberson #endif 18258355f576SJeff Roberson /* 18268355f576SJeff Roberson * We have run out of space in our freebucket. 18278355f576SJeff Roberson * See if we can switch with our alloc bucket. 18288355f576SJeff Roberson */ 1829cae33c14SJeff Roberson if (cache->uc_allocbucket->ub_cnt < 1830cae33c14SJeff Roberson cache->uc_freebucket->ub_cnt) { 1831b983089aSJeff Roberson bucket = cache->uc_freebucket; 18328355f576SJeff Roberson cache->uc_freebucket = cache->uc_allocbucket; 1833b983089aSJeff Roberson cache->uc_allocbucket = bucket; 18348355f576SJeff Roberson goto zfree_start; 18358355f576SJeff Roberson } 18368355f576SJeff Roberson } 18378355f576SJeff Roberson } 18388355f576SJeff Roberson 18398355f576SJeff Roberson /* 1840a553d4b8SJeff Roberson * We can get here for two reasons: 18418355f576SJeff Roberson * 18428355f576SJeff Roberson * 1) The buckets are NULL 1843a553d4b8SJeff Roberson * 2) The alloc and free buckets are both somewhat full. 18448355f576SJeff Roberson * 18458355f576SJeff Roberson */ 18468355f576SJeff Roberson 18478355f576SJeff Roberson ZONE_LOCK(zone); 18488355f576SJeff Roberson 18498355f576SJeff Roberson bucket = cache->uc_freebucket; 18508355f576SJeff Roberson cache->uc_freebucket = NULL; 18518355f576SJeff Roberson 18528355f576SJeff Roberson /* Can we throw this on the zone full list? */ 18538355f576SJeff Roberson if (bucket != NULL) { 18548355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 18558355f576SJeff Roberson printf("uma_zfree: Putting old bucket on the free list.\n"); 18568355f576SJeff Roberson #endif 1857cae33c14SJeff Roberson /* ub_cnt is pointing to the last free item */ 1858cae33c14SJeff Roberson KASSERT(bucket->ub_cnt != 0, 18598355f576SJeff Roberson ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 18608355f576SJeff Roberson LIST_INSERT_HEAD(&zone->uz_full_bucket, 18618355f576SJeff Roberson bucket, ub_link); 18628355f576SJeff Roberson } 1863a553d4b8SJeff Roberson if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 1864a553d4b8SJeff Roberson LIST_REMOVE(bucket, ub_link); 1865a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 1866a553d4b8SJeff Roberson cache->uc_freebucket = bucket; 1867a553d4b8SJeff Roberson goto zfree_start; 1868a553d4b8SJeff Roberson } 1869a553d4b8SJeff Roberson /* We're done with this CPU now */ 1870d88797c2SBosko Milekic CPU_UNLOCK(cpu); 1871a553d4b8SJeff Roberson 1872a553d4b8SJeff Roberson /* And the zone.. */ 1873a553d4b8SJeff Roberson ZONE_UNLOCK(zone); 1874a553d4b8SJeff Roberson 18758355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC 18768355f576SJeff Roberson printf("uma_zfree: Allocating new free bucket.\n"); 18778355f576SJeff Roberson #endif 18784741dcbfSJeff Roberson bflags = M_NOWAIT; 18794741dcbfSJeff Roberson 188020e8e865SBosko Milekic if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) 18814741dcbfSJeff Roberson bflags |= M_NOVM; 1882cae33c14SJeff Roberson bucket = bucket_alloc(zone->uz_count, bflags); 18834741dcbfSJeff Roberson if (bucket) { 1884a553d4b8SJeff Roberson ZONE_LOCK(zone); 1885a553d4b8SJeff Roberson LIST_INSERT_HEAD(&zone->uz_free_bucket, 1886a553d4b8SJeff Roberson bucket, ub_link); 18878355f576SJeff Roberson ZONE_UNLOCK(zone); 1888a553d4b8SJeff Roberson goto zfree_restart; 18898355f576SJeff Roberson } 18908355f576SJeff Roberson 1891a553d4b8SJeff Roberson /* 1892a553d4b8SJeff Roberson * If nothing else caught this, we'll just do an internal free. 1893a553d4b8SJeff Roberson */ 18948355f576SJeff Roberson 1895af7f9b97SJeff Roberson zfree_internal: 1896af7f9b97SJeff Roberson 189748bf8725SBosko Milekic #ifdef INVARIANTS 189848bf8725SBosko Milekic /* 189948bf8725SBosko Milekic * If we need to skip the dtor and the uma_dbg_free in uma_zfree_internal 190048bf8725SBosko Milekic * because we've already called the dtor above, but we ended up here, then 190148bf8725SBosko Milekic * we need to make sure that we take care of the uma_dbg_free immediately. 190248bf8725SBosko Milekic */ 190348bf8725SBosko Milekic if (skip) { 190448bf8725SBosko Milekic ZONE_LOCK(zone); 1905b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_MALLOC) 190648bf8725SBosko Milekic uma_dbg_free(zone, udata, item); 190748bf8725SBosko Milekic else 190848bf8725SBosko Milekic uma_dbg_free(zone, NULL, item); 190948bf8725SBosko Milekic ZONE_UNLOCK(zone); 191048bf8725SBosko Milekic } 191148bf8725SBosko Milekic #endif 19125c133dfaSBosko Milekic uma_zfree_internal(zone, item, udata, skip); 19138355f576SJeff Roberson 19148355f576SJeff Roberson return; 19158355f576SJeff Roberson 19168355f576SJeff Roberson } 19178355f576SJeff Roberson 19188355f576SJeff Roberson /* 19198355f576SJeff Roberson * Frees an item to an INTERNAL zone or allocates a free bucket 19208355f576SJeff Roberson * 19218355f576SJeff Roberson * Arguments: 19228355f576SJeff Roberson * zone The zone to free to 19238355f576SJeff Roberson * item The item we're freeing 19248355f576SJeff Roberson * udata User supplied data for the dtor 19258355f576SJeff Roberson * skip Skip the dtor, it was done in uma_zfree_arg 19268355f576SJeff Roberson */ 19278355f576SJeff Roberson 19288355f576SJeff Roberson static void 19298355f576SJeff Roberson uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip) 19308355f576SJeff Roberson { 19318355f576SJeff Roberson uma_slab_t slab; 19328355f576SJeff Roberson u_int8_t *mem; 19338355f576SJeff Roberson u_int8_t freei; 19348355f576SJeff Roberson 1935bba739abSJeff Roberson if (!skip && zone->uz_dtor) 1936bba739abSJeff Roberson zone->uz_dtor(item, zone->uz_size, udata); 1937bba739abSJeff Roberson 19388355f576SJeff Roberson ZONE_LOCK(zone); 19398355f576SJeff Roberson 1940b60f5b79SJeff Roberson if (!(zone->uz_flags & UMA_ZONE_MALLOC)) { 19418355f576SJeff Roberson mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 1942b60f5b79SJeff Roberson if (zone->uz_flags & UMA_ZONE_HASH) 19438355f576SJeff Roberson slab = hash_sfind(&zone->uz_hash, mem); 19448355f576SJeff Roberson else { 19458355f576SJeff Roberson mem += zone->uz_pgoff; 19468355f576SJeff Roberson slab = (uma_slab_t)mem; 19478355f576SJeff Roberson } 19488355f576SJeff Roberson } else { 19498355f576SJeff Roberson slab = (uma_slab_t)udata; 19508355f576SJeff Roberson } 19518355f576SJeff Roberson 19528355f576SJeff Roberson /* Do we need to remove from any lists? */ 19538355f576SJeff Roberson if (slab->us_freecount+1 == zone->uz_ipers) { 19548355f576SJeff Roberson LIST_REMOVE(slab, us_link); 19558355f576SJeff Roberson LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link); 19568355f576SJeff Roberson } else if (slab->us_freecount == 0) { 19578355f576SJeff Roberson LIST_REMOVE(slab, us_link); 19588355f576SJeff Roberson LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link); 19598355f576SJeff Roberson } 19608355f576SJeff Roberson 19618355f576SJeff Roberson /* Slab management stuff */ 19628355f576SJeff Roberson freei = ((unsigned long)item - (unsigned long)slab->us_data) 19638355f576SJeff Roberson / zone->uz_rsize; 19648355f576SJeff Roberson 1965639c9550SJeff Roberson #ifdef INVARIANTS 1966639c9550SJeff Roberson if (!skip) 1967639c9550SJeff Roberson uma_dbg_free(zone, slab, item); 19688355f576SJeff Roberson #endif 1969639c9550SJeff Roberson 19708355f576SJeff Roberson slab->us_freelist[freei] = slab->us_firstfree; 19718355f576SJeff Roberson slab->us_firstfree = freei; 19728355f576SJeff Roberson slab->us_freecount++; 19738355f576SJeff Roberson 19748355f576SJeff Roberson /* Zone statistics */ 19758355f576SJeff Roberson zone->uz_free++; 19768355f576SJeff Roberson 1977af7f9b97SJeff Roberson if (zone->uz_flags & UMA_ZFLAG_FULL) { 1978af7f9b97SJeff Roberson if (zone->uz_pages < zone->uz_maxpages) 1979af7f9b97SJeff Roberson zone->uz_flags &= ~UMA_ZFLAG_FULL; 1980af7f9b97SJeff Roberson 1981af7f9b97SJeff Roberson /* We can handle one more allocation */ 198274c924b5SJeff Roberson wakeup_one(zone); 1983af7f9b97SJeff Roberson } 1984af7f9b97SJeff Roberson 1985605cbd6aSJeff Roberson ZONE_UNLOCK(zone); 19868355f576SJeff Roberson } 19878355f576SJeff Roberson 19888355f576SJeff Roberson /* See uma.h */ 19898355f576SJeff Roberson void 1990736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems) 1991736ee590SJeff Roberson { 1992736ee590SJeff Roberson ZONE_LOCK(zone); 1993736ee590SJeff Roberson if (zone->uz_ppera > 1) 1994af7f9b97SJeff Roberson zone->uz_maxpages = nitems * zone->uz_ppera; 1995736ee590SJeff Roberson else 1996736ee590SJeff Roberson zone->uz_maxpages = nitems / zone->uz_ipers; 199728bc4419SJeff Roberson 1998d4d6aee5SAndrew R. Reiter if (zone->uz_maxpages * zone->uz_ipers < nitems) 1999d4d6aee5SAndrew R. Reiter zone->uz_maxpages++; 200028bc4419SJeff Roberson 2001736ee590SJeff Roberson ZONE_UNLOCK(zone); 2002736ee590SJeff Roberson } 2003736ee590SJeff Roberson 2004736ee590SJeff Roberson /* See uma.h */ 2005736ee590SJeff Roberson void 20068355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef) 20078355f576SJeff Roberson { 20088355f576SJeff Roberson ZONE_LOCK(zone); 20098355f576SJeff Roberson 20108355f576SJeff Roberson zone->uz_freef = freef; 20118355f576SJeff Roberson 20128355f576SJeff Roberson ZONE_UNLOCK(zone); 20138355f576SJeff Roberson } 20148355f576SJeff Roberson 20158355f576SJeff Roberson /* See uma.h */ 20168355f576SJeff Roberson void 20178355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 20188355f576SJeff Roberson { 20198355f576SJeff Roberson ZONE_LOCK(zone); 20208355f576SJeff Roberson 20218355f576SJeff Roberson zone->uz_flags |= UMA_ZFLAG_PRIVALLOC; 20228355f576SJeff Roberson zone->uz_allocf = allocf; 20238355f576SJeff Roberson 20248355f576SJeff Roberson ZONE_UNLOCK(zone); 20258355f576SJeff Roberson } 20268355f576SJeff Roberson 20278355f576SJeff Roberson /* See uma.h */ 20288355f576SJeff Roberson int 20298355f576SJeff Roberson uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 20308355f576SJeff Roberson { 20318355f576SJeff Roberson int pages; 20328355f576SJeff Roberson vm_offset_t kva; 20338355f576SJeff Roberson 20348355f576SJeff Roberson mtx_lock(&Giant); 20358355f576SJeff Roberson 20368355f576SJeff Roberson pages = count / zone->uz_ipers; 20378355f576SJeff Roberson 20388355f576SJeff Roberson if (pages * zone->uz_ipers < count) 20398355f576SJeff Roberson pages++; 2040a553d4b8SJeff Roberson 20418355f576SJeff Roberson kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE); 20428355f576SJeff Roberson 2043a553d4b8SJeff Roberson if (kva == 0) { 2044a553d4b8SJeff Roberson mtx_unlock(&Giant); 20458355f576SJeff Roberson return (0); 20468355f576SJeff Roberson } 20478355f576SJeff Roberson 20488355f576SJeff Roberson 2049a553d4b8SJeff Roberson if (obj == NULL) 2050a553d4b8SJeff Roberson obj = vm_object_allocate(OBJT_DEFAULT, 2051c7173f58SJeff Roberson pages); 205282774d80SAlan Cox else { 205382774d80SAlan Cox VM_OBJECT_LOCK_INIT(obj); 20548355f576SJeff Roberson _vm_object_allocate(OBJT_DEFAULT, 2055c7173f58SJeff Roberson pages, obj); 205682774d80SAlan Cox } 2057a553d4b8SJeff Roberson ZONE_LOCK(zone); 2058a553d4b8SJeff Roberson zone->uz_kva = kva; 2059a553d4b8SJeff Roberson zone->uz_obj = obj; 2060a553d4b8SJeff Roberson zone->uz_maxpages = pages; 20618355f576SJeff Roberson 20628355f576SJeff Roberson zone->uz_allocf = obj_alloc; 2063b60f5b79SJeff Roberson zone->uz_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 20648355f576SJeff Roberson 20658355f576SJeff Roberson ZONE_UNLOCK(zone); 2066a553d4b8SJeff Roberson mtx_unlock(&Giant); 20678355f576SJeff Roberson 20688355f576SJeff Roberson return (1); 20698355f576SJeff Roberson } 20708355f576SJeff Roberson 20718355f576SJeff Roberson /* See uma.h */ 20728355f576SJeff Roberson void 20738355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items) 20748355f576SJeff Roberson { 20758355f576SJeff Roberson int slabs; 20768355f576SJeff Roberson uma_slab_t slab; 20778355f576SJeff Roberson 20788355f576SJeff Roberson ZONE_LOCK(zone); 20798355f576SJeff Roberson slabs = items / zone->uz_ipers; 20808355f576SJeff Roberson if (slabs * zone->uz_ipers < items) 20818355f576SJeff Roberson slabs++; 20828355f576SJeff Roberson 20838355f576SJeff Roberson while (slabs > 0) { 2084a163d034SWarner Losh slab = slab_zalloc(zone, M_WAITOK); 20858355f576SJeff Roberson LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link); 20868355f576SJeff Roberson slabs--; 20878355f576SJeff Roberson } 20888355f576SJeff Roberson ZONE_UNLOCK(zone); 20898355f576SJeff Roberson } 20908355f576SJeff Roberson 20918355f576SJeff Roberson /* See uma.h */ 20928355f576SJeff Roberson void 20938355f576SJeff Roberson uma_reclaim(void) 20948355f576SJeff Roberson { 20958355f576SJeff Roberson /* 20968355f576SJeff Roberson * You might think that the delay below would improve performance since 20978355f576SJeff Roberson * the allocator will give away memory that it may ask for immediately. 20988355f576SJeff Roberson * Really, it makes things worse, since cpu cycles are so much cheaper 20998355f576SJeff Roberson * than disk activity. 21008355f576SJeff Roberson */ 21018355f576SJeff Roberson #if 0 21028355f576SJeff Roberson static struct timeval tv = {0}; 21038355f576SJeff Roberson struct timeval now; 21048355f576SJeff Roberson getmicrouptime(&now); 21058355f576SJeff Roberson if (now.tv_sec > tv.tv_sec + 30) 21068355f576SJeff Roberson tv = now; 21078355f576SJeff Roberson else 21088355f576SJeff Roberson return; 21098355f576SJeff Roberson #endif 21108355f576SJeff Roberson #ifdef UMA_DEBUG 21118355f576SJeff Roberson printf("UMA: vm asked us to release pages!\n"); 21128355f576SJeff Roberson #endif 211386bbae32SJeff Roberson bucket_enable(); 21148355f576SJeff Roberson zone_foreach(zone_drain); 21158355f576SJeff Roberson 21168355f576SJeff Roberson /* 21178355f576SJeff Roberson * Some slabs may have been freed but this zone will be visited early 21188355f576SJeff Roberson * we visit again so that we can free pages that are empty once other 21198355f576SJeff Roberson * zones are drained. We have to do the same for buckets. 21208355f576SJeff Roberson */ 2121cae33c14SJeff Roberson zone_drain_common(slabzone, 0); 2122cae33c14SJeff Roberson bucket_zone_drain(); 21238355f576SJeff Roberson } 21248355f576SJeff Roberson 21258355f576SJeff Roberson void * 21268355f576SJeff Roberson uma_large_malloc(int size, int wait) 21278355f576SJeff Roberson { 21288355f576SJeff Roberson void *mem; 21298355f576SJeff Roberson uma_slab_t slab; 21308355f576SJeff Roberson u_int8_t flags; 21318355f576SJeff Roberson 2132bbee39c6SJeff Roberson slab = uma_zalloc_internal(slabzone, NULL, wait); 21338355f576SJeff Roberson if (slab == NULL) 21348355f576SJeff Roberson return (NULL); 21358355f576SJeff Roberson 21368355f576SJeff Roberson mem = page_alloc(NULL, size, &flags, wait); 21378355f576SJeff Roberson if (mem) { 213899571dc3SJeff Roberson vsetslab((vm_offset_t)mem, slab); 21398355f576SJeff Roberson slab->us_data = mem; 21408355f576SJeff Roberson slab->us_flags = flags | UMA_SLAB_MALLOC; 21418355f576SJeff Roberson slab->us_size = size; 21428355f576SJeff Roberson } else { 21438355f576SJeff Roberson uma_zfree_internal(slabzone, slab, NULL, 0); 21448355f576SJeff Roberson } 21458355f576SJeff Roberson 21468355f576SJeff Roberson 21478355f576SJeff Roberson return (mem); 21488355f576SJeff Roberson } 21498355f576SJeff Roberson 21508355f576SJeff Roberson void 21518355f576SJeff Roberson uma_large_free(uma_slab_t slab) 21528355f576SJeff Roberson { 215399571dc3SJeff Roberson vsetobj((vm_offset_t)slab->us_data, kmem_object); 2154125ee0d1STor Egge /* 21555103186cSAlan Cox * XXX: We get a lock order reversal if we don't have Giant: 2156125ee0d1STor Egge * vm_map_remove (locks system map) -> vm_map_delete -> 2157125ee0d1STor Egge * vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant) 2158125ee0d1STor Egge */ 2159125ee0d1STor Egge if (!mtx_owned(&Giant)) { 2160125ee0d1STor Egge mtx_lock(&Giant); 2161125ee0d1STor Egge page_free(slab->us_data, slab->us_size, slab->us_flags); 2162125ee0d1STor Egge mtx_unlock(&Giant); 2163125ee0d1STor Egge } else 21648355f576SJeff Roberson page_free(slab->us_data, slab->us_size, slab->us_flags); 21658355f576SJeff Roberson uma_zfree_internal(slabzone, slab, NULL, 0); 21668355f576SJeff Roberson } 21678355f576SJeff Roberson 21688355f576SJeff Roberson void 21698355f576SJeff Roberson uma_print_stats(void) 21708355f576SJeff Roberson { 21718355f576SJeff Roberson zone_foreach(uma_print_zone); 21728355f576SJeff Roberson } 21738355f576SJeff Roberson 21748355f576SJeff Roberson void 21758355f576SJeff Roberson uma_print_zone(uma_zone_t zone) 21768355f576SJeff Roberson { 21778355f576SJeff Roberson printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 21788355f576SJeff Roberson zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags, 21798355f576SJeff Roberson zone->uz_ipers, zone->uz_ppera, 21808355f576SJeff Roberson (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free); 21818355f576SJeff Roberson } 21828355f576SJeff Roberson 21838355f576SJeff Roberson /* 21848355f576SJeff Roberson * Sysctl handler for vm.zone 21858355f576SJeff Roberson * 21868355f576SJeff Roberson * stolen from vm_zone.c 21878355f576SJeff Roberson */ 21888355f576SJeff Roberson static int 21898355f576SJeff Roberson sysctl_vm_zone(SYSCTL_HANDLER_ARGS) 21908355f576SJeff Roberson { 21918355f576SJeff Roberson int error, len, cnt; 21928355f576SJeff Roberson const int linesize = 128; /* conservative */ 21938355f576SJeff Roberson int totalfree; 21948355f576SJeff Roberson char *tmpbuf, *offset; 21958355f576SJeff Roberson uma_zone_t z; 21968355f576SJeff Roberson char *p; 2197f828e5beSJeff Roberson int cpu; 2198f828e5beSJeff Roberson int cachefree; 2199f828e5beSJeff Roberson uma_bucket_t bucket; 2200f828e5beSJeff Roberson uma_cache_t cache; 22018355f576SJeff Roberson 22028355f576SJeff Roberson cnt = 0; 22030da47b2fSJeff Roberson mtx_lock(&uma_mtx); 22048355f576SJeff Roberson LIST_FOREACH(z, &uma_zones, uz_link) 22058355f576SJeff Roberson cnt++; 22060da47b2fSJeff Roberson mtx_unlock(&uma_mtx); 22078355f576SJeff Roberson MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize, 2208a163d034SWarner Losh M_TEMP, M_WAITOK); 22098355f576SJeff Roberson len = snprintf(tmpbuf, linesize, 22108355f576SJeff Roberson "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n"); 22118355f576SJeff Roberson if (cnt == 0) 22128355f576SJeff Roberson tmpbuf[len - 1] = '\0'; 22138355f576SJeff Roberson error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len); 22148355f576SJeff Roberson if (error || cnt == 0) 22158355f576SJeff Roberson goto out; 22168355f576SJeff Roberson offset = tmpbuf; 2217f4af24d5SJeff Roberson mtx_lock(&uma_mtx); 22188355f576SJeff Roberson LIST_FOREACH(z, &uma_zones, uz_link) { 22198355f576SJeff Roberson if (cnt == 0) /* list may have changed size */ 22208355f576SJeff Roberson break; 22211c35e213SBosko Milekic if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) { 2222f828e5beSJeff Roberson for (cpu = 0; cpu < maxcpu; cpu++) { 2223f828e5beSJeff Roberson if (CPU_ABSENT(cpu)) 2224f828e5beSJeff Roberson continue; 2225f828e5beSJeff Roberson CPU_LOCK(cpu); 2226f828e5beSJeff Roberson } 22271c35e213SBosko Milekic } 22288355f576SJeff Roberson ZONE_LOCK(z); 2229f828e5beSJeff Roberson cachefree = 0; 22301c35e213SBosko Milekic if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) { 2231f828e5beSJeff Roberson for (cpu = 0; cpu < maxcpu; cpu++) { 2232f828e5beSJeff Roberson if (CPU_ABSENT(cpu)) 2233f828e5beSJeff Roberson continue; 2234f828e5beSJeff Roberson cache = &z->uz_cpu[cpu]; 2235f828e5beSJeff Roberson if (cache->uc_allocbucket != NULL) 2236cae33c14SJeff Roberson cachefree += cache->uc_allocbucket->ub_cnt; 2237f828e5beSJeff Roberson if (cache->uc_freebucket != NULL) 2238cae33c14SJeff Roberson cachefree += cache->uc_freebucket->ub_cnt; 2239f828e5beSJeff Roberson CPU_UNLOCK(cpu); 2240f828e5beSJeff Roberson } 22411c35e213SBosko Milekic } 2242f828e5beSJeff Roberson LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) { 2243cae33c14SJeff Roberson cachefree += bucket->ub_cnt; 2244f828e5beSJeff Roberson } 2245f828e5beSJeff Roberson totalfree = z->uz_free + cachefree; 22468355f576SJeff Roberson len = snprintf(offset, linesize, 22478355f576SJeff Roberson "%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n", 22488355f576SJeff Roberson z->uz_name, z->uz_size, 22498355f576SJeff Roberson z->uz_maxpages * z->uz_ipers, 22508355f576SJeff Roberson (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree, 22518355f576SJeff Roberson totalfree, 22528355f576SJeff Roberson (unsigned long long)z->uz_allocs); 22538355f576SJeff Roberson ZONE_UNLOCK(z); 22548355f576SJeff Roberson for (p = offset + 12; p > offset && *p == ' '; --p) 22558355f576SJeff Roberson /* nothing */ ; 22568355f576SJeff Roberson p[1] = ':'; 22578355f576SJeff Roberson cnt--; 22588355f576SJeff Roberson offset += len; 22598355f576SJeff Roberson } 2260f4af24d5SJeff Roberson mtx_unlock(&uma_mtx); 22618355f576SJeff Roberson *offset++ = '\0'; 22628355f576SJeff Roberson error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf); 22638355f576SJeff Roberson out: 22648355f576SJeff Roberson FREE(tmpbuf, M_TEMP); 22658355f576SJeff Roberson return (error); 22668355f576SJeff Roberson } 2267