xref: /freebsd/sys/vm/uma_core.c (revision 37c841831ff323187d7f749947244f7e278a14ea)
18355f576SJeff Roberson /*
2f461cf22SJeff Roberson  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
38355f576SJeff Roberson  * All rights reserved.
48355f576SJeff Roberson  *
58355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
68355f576SJeff Roberson  * modification, are permitted provided that the following conditions
78355f576SJeff Roberson  * are met:
88355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
98355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
108355f576SJeff Roberson  *    disclaimer.
118355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
128355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
138355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
148355f576SJeff Roberson  *
158355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
168355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
178355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
188355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
198355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
208355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
218355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
228355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
238355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
248355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
258355f576SJeff Roberson  *
268355f576SJeff Roberson  * $FreeBSD$
278355f576SJeff Roberson  *
288355f576SJeff Roberson  */
298355f576SJeff Roberson 
308355f576SJeff Roberson /*
318355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
328355f576SJeff Roberson  *
338355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
348355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
358355f576SJeff Roberson  * effecient.  A primary design goal is to return unused memory to the rest of
368355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
378355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
388355f576SJeff Roberson  * pools of reserved memory unused.
398355f576SJeff Roberson  *
408355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
418355f576SJeff Roberson  * are well known.
428355f576SJeff Roberson  *
438355f576SJeff Roberson  */
448355f576SJeff Roberson 
458355f576SJeff Roberson /*
468355f576SJeff Roberson  * TODO:
478355f576SJeff Roberson  *	- Improve memory usage for large allocations
488355f576SJeff Roberson  *	- Investigate cache size adjustments
498355f576SJeff Roberson  */
508355f576SJeff Roberson 
518355f576SJeff Roberson /* I should really use ktr.. */
528355f576SJeff Roberson /*
538355f576SJeff Roberson #define UMA_DEBUG 1
548355f576SJeff Roberson #define UMA_DEBUG_ALLOC 1
558355f576SJeff Roberson #define UMA_DEBUG_ALLOC_1 1
568355f576SJeff Roberson */
578355f576SJeff Roberson 
588355f576SJeff Roberson 
598355f576SJeff Roberson #include "opt_param.h"
608355f576SJeff Roberson #include <sys/param.h>
618355f576SJeff Roberson #include <sys/systm.h>
628355f576SJeff Roberson #include <sys/kernel.h>
638355f576SJeff Roberson #include <sys/types.h>
648355f576SJeff Roberson #include <sys/queue.h>
658355f576SJeff Roberson #include <sys/malloc.h>
668355f576SJeff Roberson #include <sys/lock.h>
678355f576SJeff Roberson #include <sys/sysctl.h>
688355f576SJeff Roberson #include <sys/mutex.h>
694c1cc01cSJohn Baldwin #include <sys/proc.h>
708355f576SJeff Roberson #include <sys/smp.h>
7186bbae32SJeff Roberson #include <sys/vmmeter.h>
7286bbae32SJeff Roberson 
738355f576SJeff Roberson #include <vm/vm.h>
748355f576SJeff Roberson #include <vm/vm_object.h>
758355f576SJeff Roberson #include <vm/vm_page.h>
768355f576SJeff Roberson #include <vm/vm_param.h>
778355f576SJeff Roberson #include <vm/vm_map.h>
788355f576SJeff Roberson #include <vm/vm_kern.h>
798355f576SJeff Roberson #include <vm/vm_extern.h>
808355f576SJeff Roberson #include <vm/uma.h>
818355f576SJeff Roberson #include <vm/uma_int.h>
82639c9550SJeff Roberson #include <vm/uma_dbg.h>
838355f576SJeff Roberson 
848355f576SJeff Roberson /*
858355f576SJeff Roberson  * This is the zone from which all zones are spawned.  The idea is that even
868355f576SJeff Roberson  * the zone heads are allocated from the allocator, so we use the bss section
878355f576SJeff Roberson  * to bootstrap us.
888355f576SJeff Roberson  */
8986bbae32SJeff Roberson static struct uma_zone masterzone;
9086bbae32SJeff Roberson static uma_zone_t zones = &masterzone;
918355f576SJeff Roberson 
928355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */
938355f576SJeff Roberson static uma_zone_t slabzone;
948355f576SJeff Roberson 
958355f576SJeff Roberson /*
968355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
978355f576SJeff Roberson  * prior to malloc coming up.
988355f576SJeff Roberson  */
998355f576SJeff Roberson static uma_zone_t hashzone;
1008355f576SJeff Roberson 
1018355f576SJeff Roberson /*
1028355f576SJeff Roberson  * Zone that buckets come from.
1038355f576SJeff Roberson  */
1048355f576SJeff Roberson static uma_zone_t bucketzone;
1058355f576SJeff Roberson 
10686bbae32SJeff Roberson /*
10786bbae32SJeff Roberson  * Are we allowed to allocate buckets?
10886bbae32SJeff Roberson  */
10986bbae32SJeff Roberson static int bucketdisable = 1;
11086bbae32SJeff Roberson 
1118355f576SJeff Roberson /* Linked list of all zones in the system */
1128355f576SJeff Roberson static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
1138355f576SJeff Roberson 
1148355f576SJeff Roberson /* This mutex protects the zone list */
1158355f576SJeff Roberson static struct mtx uma_mtx;
1168355f576SJeff Roberson 
1178355f576SJeff Roberson /* Linked list of boot time pages */
1188355f576SJeff Roberson static LIST_HEAD(,uma_slab) uma_boot_pages =
1198355f576SJeff Roberson     LIST_HEAD_INITIALIZER(&uma_boot_pages);
1208355f576SJeff Roberson 
1218355f576SJeff Roberson /* Count of free boottime pages */
1228355f576SJeff Roberson static int uma_boot_free = 0;
1238355f576SJeff Roberson 
1248355f576SJeff Roberson /* Is the VM done starting up? */
1258355f576SJeff Roberson static int booted = 0;
1268355f576SJeff Roberson 
1278355f576SJeff Roberson /* This is the handle used to schedule our working set calculator */
1288355f576SJeff Roberson static struct callout uma_callout;
1298355f576SJeff Roberson 
1308355f576SJeff Roberson /* This is mp_maxid + 1, for use while looping over each cpu */
1318355f576SJeff Roberson static int maxcpu;
1328355f576SJeff Roberson 
1338355f576SJeff Roberson /*
1348355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1358355f576SJeff Roberson  * a special allocation function just for zones.
1368355f576SJeff Roberson  */
1378355f576SJeff Roberson struct uma_zctor_args {
1388355f576SJeff Roberson 	char *name;
139c3bdc05fSAndrew R. Reiter 	size_t size;
1408355f576SJeff Roberson 	uma_ctor ctor;
1418355f576SJeff Roberson 	uma_dtor dtor;
1428355f576SJeff Roberson 	uma_init uminit;
1438355f576SJeff Roberson 	uma_fini fini;
1448355f576SJeff Roberson 	int align;
1458355f576SJeff Roberson 	u_int16_t flags;
1468355f576SJeff Roberson };
1478355f576SJeff Roberson 
1488355f576SJeff Roberson /* Prototypes.. */
1498355f576SJeff Roberson 
1508355f576SJeff Roberson static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
1518355f576SJeff Roberson static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
1528355f576SJeff Roberson static void page_free(void *, int, u_int8_t);
1538355f576SJeff Roberson static uma_slab_t slab_zalloc(uma_zone_t, int);
1548355f576SJeff Roberson static void cache_drain(uma_zone_t);
1558355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
1568355f576SJeff Roberson static void zone_drain(uma_zone_t);
1578355f576SJeff Roberson static void zone_ctor(void *, int, void *);
1589c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
1598355f576SJeff Roberson static void zero_init(void *, int);
1608355f576SJeff Roberson static void zone_small_init(uma_zone_t zone);
1618355f576SJeff Roberson static void zone_large_init(uma_zone_t zone);
1628355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
1638355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
1640aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
1650aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
1660aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
1678355f576SJeff Roberson static void uma_timeout(void *);
1688355f576SJeff Roberson static void uma_startup3(void);
169a553d4b8SJeff Roberson static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t);
17086bbae32SJeff Roberson static void uma_zfree_internal(uma_zone_t, void *, void *, int);
17186bbae32SJeff Roberson static void bucket_enable(void);
1728355f576SJeff Roberson void uma_print_zone(uma_zone_t);
1738355f576SJeff Roberson void uma_print_stats(void);
1748355f576SJeff Roberson static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
1758355f576SJeff Roberson 
1768355f576SJeff Roberson SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
1778355f576SJeff Roberson     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
1788355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
1798355f576SJeff Roberson 
18086bbae32SJeff Roberson /*
18186bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
18286bbae32SJeff Roberson  */
18386bbae32SJeff Roberson 
18486bbae32SJeff Roberson static void
18586bbae32SJeff Roberson bucket_enable(void)
18686bbae32SJeff Roberson {
18786bbae32SJeff Roberson 	if (cnt.v_free_count < cnt.v_free_min)
18886bbae32SJeff Roberson 		bucketdisable = 1;
18986bbae32SJeff Roberson 	else
19086bbae32SJeff Roberson 		bucketdisable = 0;
19186bbae32SJeff Roberson }
19286bbae32SJeff Roberson 
1938355f576SJeff Roberson 
1948355f576SJeff Roberson /*
1958355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
1968355f576SJeff Roberson  * based calculations.  (working set, stats, etc.)
1978355f576SJeff Roberson  *
1988355f576SJeff Roberson  * Arguments:
1998355f576SJeff Roberson  *	arg   Unused
2008355f576SJeff Roberson  *
2018355f576SJeff Roberson  * Returns:
2028355f576SJeff Roberson  *	Nothing
2038355f576SJeff Roberson  */
2048355f576SJeff Roberson static void
2058355f576SJeff Roberson uma_timeout(void *unused)
2068355f576SJeff Roberson {
20786bbae32SJeff Roberson 	bucket_enable();
2088355f576SJeff Roberson 	zone_foreach(zone_timeout);
2098355f576SJeff Roberson 
2108355f576SJeff Roberson 	/* Reschedule this event */
2118355f576SJeff Roberson 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
2128355f576SJeff Roberson }
2138355f576SJeff Roberson 
2148355f576SJeff Roberson /*
2158355f576SJeff Roberson  * Routine to perform timeout driven calculations.  This does the working set
2168355f576SJeff Roberson  * as well as hash expanding, and per cpu statistics aggregation.
2178355f576SJeff Roberson  *
2188355f576SJeff Roberson  *  Arguments:
2198355f576SJeff Roberson  *	zone  The zone to operate on
2208355f576SJeff Roberson  *
2218355f576SJeff Roberson  *  Returns:
2228355f576SJeff Roberson  *	Nothing
2238355f576SJeff Roberson  */
2248355f576SJeff Roberson static void
2258355f576SJeff Roberson zone_timeout(uma_zone_t zone)
2268355f576SJeff Roberson {
2278355f576SJeff Roberson 	uma_cache_t cache;
2288355f576SJeff Roberson 	u_int64_t alloc;
2298355f576SJeff Roberson 	int free;
2308355f576SJeff Roberson 	int cpu;
2318355f576SJeff Roberson 
2328355f576SJeff Roberson 	alloc = 0;
2338355f576SJeff Roberson 	free = 0;
2348355f576SJeff Roberson 
2358355f576SJeff Roberson 	/*
2368355f576SJeff Roberson 	 * Aggregate per cpu cache statistics back to the zone.
2378355f576SJeff Roberson 	 *
2388355f576SJeff Roberson 	 * I may rewrite this to set a flag in the per cpu cache instead of
2398355f576SJeff Roberson 	 * locking.  If the flag is not cleared on the next round I will have
2408355f576SJeff Roberson 	 * to lock and do it here instead so that the statistics don't get too
2418355f576SJeff Roberson 	 * far out of sync.
2428355f576SJeff Roberson 	 */
2438355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
2448355f576SJeff Roberson 		for (cpu = 0; cpu < maxcpu; cpu++) {
2458355f576SJeff Roberson 			if (CPU_ABSENT(cpu))
2468355f576SJeff Roberson 				continue;
2478355f576SJeff Roberson 			CPU_LOCK(zone, cpu);
2488355f576SJeff Roberson 			cache = &zone->uz_cpu[cpu];
2498355f576SJeff Roberson 			/* Add them up, and reset */
2508355f576SJeff Roberson 			alloc += cache->uc_allocs;
2518355f576SJeff Roberson 			cache->uc_allocs = 0;
2528355f576SJeff Roberson 			if (cache->uc_allocbucket)
2538355f576SJeff Roberson 				free += cache->uc_allocbucket->ub_ptr + 1;
2548355f576SJeff Roberson 			if (cache->uc_freebucket)
2558355f576SJeff Roberson 				free += cache->uc_freebucket->ub_ptr + 1;
2568355f576SJeff Roberson 			CPU_UNLOCK(zone, cpu);
2578355f576SJeff Roberson 		}
2588355f576SJeff Roberson 	}
2598355f576SJeff Roberson 
2608355f576SJeff Roberson 	/* Now push these stats back into the zone.. */
2618355f576SJeff Roberson 	ZONE_LOCK(zone);
2628355f576SJeff Roberson 	zone->uz_allocs += alloc;
2638355f576SJeff Roberson 
2648355f576SJeff Roberson 	/*
2658355f576SJeff Roberson 	 * cachefree is an instantanious snapshot of what is in the per cpu
2668355f576SJeff Roberson 	 * caches, not an accurate counter
2678355f576SJeff Roberson 	 */
2688355f576SJeff Roberson 	zone->uz_cachefree = free;
2698355f576SJeff Roberson 
2708355f576SJeff Roberson 	/*
2718355f576SJeff Roberson 	 * Expand the zone hash table.
2728355f576SJeff Roberson 	 *
2738355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
2748355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
2758355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
2768355f576SJeff Roberson 	 */
2778355f576SJeff Roberson 
27899571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH &&
27999571dc3SJeff Roberson 	    zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
2800aef6126SJeff Roberson 		struct uma_hash newhash;
2810aef6126SJeff Roberson 		struct uma_hash oldhash;
2820aef6126SJeff Roberson 		int ret;
2835300d9ddSJeff Roberson 
2840aef6126SJeff Roberson 		/*
2850aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
2860aef6126SJeff Roberson 		 * while the zone lock is held will lead to deadlock.
2870aef6126SJeff Roberson 		 * I have to do everything in stages and check for
2880aef6126SJeff Roberson 		 * races.
2890aef6126SJeff Roberson 		 */
2900aef6126SJeff Roberson 		newhash = zone->uz_hash;
2915300d9ddSJeff Roberson 		ZONE_UNLOCK(zone);
2920aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
2935300d9ddSJeff Roberson 		ZONE_LOCK(zone);
2940aef6126SJeff Roberson 		if (ret) {
2950aef6126SJeff Roberson 			if (hash_expand(&zone->uz_hash, &newhash)) {
2960aef6126SJeff Roberson 				oldhash = zone->uz_hash;
2970aef6126SJeff Roberson 				zone->uz_hash = newhash;
2980aef6126SJeff Roberson 			} else
2990aef6126SJeff Roberson 				oldhash = newhash;
3000aef6126SJeff Roberson 
3010aef6126SJeff Roberson 			ZONE_UNLOCK(zone);
3020aef6126SJeff Roberson 			hash_free(&oldhash);
3030aef6126SJeff Roberson 			ZONE_LOCK(zone);
3040aef6126SJeff Roberson 		}
3055300d9ddSJeff Roberson 	}
3068355f576SJeff Roberson 
3078355f576SJeff Roberson 	/*
3088355f576SJeff Roberson 	 * Here we compute the working set size as the total number of items
3098355f576SJeff Roberson 	 * left outstanding since the last time interval.  This is slightly
3108355f576SJeff Roberson 	 * suboptimal. What we really want is the highest number of outstanding
3118355f576SJeff Roberson 	 * items during the last time quantum.  This should be close enough.
3128355f576SJeff Roberson 	 *
3138355f576SJeff Roberson 	 * The working set size is used to throttle the zone_drain function.
3148355f576SJeff Roberson 	 * We don't want to return memory that we may need again immediately.
3158355f576SJeff Roberson 	 */
3168355f576SJeff Roberson 	alloc = zone->uz_allocs - zone->uz_oallocs;
3178355f576SJeff Roberson 	zone->uz_oallocs = zone->uz_allocs;
3188355f576SJeff Roberson 	zone->uz_wssize = alloc;
3198355f576SJeff Roberson 
3208355f576SJeff Roberson 	ZONE_UNLOCK(zone);
3218355f576SJeff Roberson }
3228355f576SJeff Roberson 
3238355f576SJeff Roberson /*
3245300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
3255300d9ddSJeff Roberson  * backing store.
3265300d9ddSJeff Roberson  *
3275300d9ddSJeff Roberson  * Arguments:
3280aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
3295300d9ddSJeff Roberson  *
3305300d9ddSJeff Roberson  * Returns:
3310aef6126SJeff Roberson  *	1 on sucess and 0 on failure.
3325300d9ddSJeff Roberson  */
33337c84183SPoul-Henning Kamp static int
3340aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
3355300d9ddSJeff Roberson {
3360aef6126SJeff Roberson 	int oldsize;
3375300d9ddSJeff Roberson 	int alloc;
3385300d9ddSJeff Roberson 
3390aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
3400aef6126SJeff Roberson 
3415300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
3420aef6126SJeff Roberson 	if (oldsize)  {
3430aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
3440aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
3455300d9ddSJeff Roberson 		/* XXX Shouldn't be abusing DEVBUF here */
3460aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
3470aef6126SJeff Roberson 		    M_DEVBUF, M_NOWAIT);
3485300d9ddSJeff Roberson 	} else {
3490aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
3500aef6126SJeff Roberson 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
3510aef6126SJeff Roberson 		    M_WAITOK, NULL);
3520aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
3535300d9ddSJeff Roberson 	}
3540aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
3550aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
3560aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
3570aef6126SJeff Roberson 		return (1);
3580aef6126SJeff Roberson 	}
3595300d9ddSJeff Roberson 
3600aef6126SJeff Roberson 	return (0);
3615300d9ddSJeff Roberson }
3625300d9ddSJeff Roberson 
3635300d9ddSJeff Roberson /*
3648355f576SJeff Roberson  * Expands the hash table for OFFPAGE zones.  This is done from zone_timeout
3658355f576SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation path,
3668355f576SJeff Roberson  * otherwise, we can recurse on the vm while allocating pages.
3678355f576SJeff Roberson  *
3688355f576SJeff Roberson  * Arguments:
3690aef6126SJeff Roberson  *	oldhash  The hash you want to expand
3700aef6126SJeff Roberson  *	newhash  The hash structure for the new table
3718355f576SJeff Roberson  *
3728355f576SJeff Roberson  * Returns:
3738355f576SJeff Roberson  * 	Nothing
3748355f576SJeff Roberson  *
3758355f576SJeff Roberson  * Discussion:
3768355f576SJeff Roberson  */
3770aef6126SJeff Roberson static int
3780aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
3798355f576SJeff Roberson {
3808355f576SJeff Roberson 	uma_slab_t slab;
3818355f576SJeff Roberson 	int hval;
3828355f576SJeff Roberson 	int i;
3838355f576SJeff Roberson 
3840aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
3850aef6126SJeff Roberson 		return (0);
3868355f576SJeff Roberson 
3870aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
3880aef6126SJeff Roberson 		return (0);
3898355f576SJeff Roberson 
3908355f576SJeff Roberson 	/*
3918355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
3928355f576SJeff Roberson 	 * full rehash.
3938355f576SJeff Roberson 	 */
3948355f576SJeff Roberson 
3950aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
3960aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
3970aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
3980aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
3990aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
4000aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
4010aef6126SJeff Roberson 			    slab, us_hlink);
4028355f576SJeff Roberson 		}
4038355f576SJeff Roberson 
4040aef6126SJeff Roberson 	return (1);
4059c2cd7e5SJeff Roberson }
4069c2cd7e5SJeff Roberson 
4075300d9ddSJeff Roberson /*
4085300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
4095300d9ddSJeff Roberson  *
4105300d9ddSJeff Roberson  * Arguments:
4115300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
4125300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
4135300d9ddSJeff Roberson  *
4145300d9ddSJeff Roberson  * Returns:
4155300d9ddSJeff Roberson  *	Nothing
4165300d9ddSJeff Roberson  */
4179c2cd7e5SJeff Roberson static void
4180aef6126SJeff Roberson hash_free(struct uma_hash *hash)
4199c2cd7e5SJeff Roberson {
4200aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
4210aef6126SJeff Roberson 		return;
4220aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
4238355f576SJeff Roberson 		uma_zfree_internal(hashzone,
4240aef6126SJeff Roberson 		    hash->uh_slab_hash, NULL, 0);
4258355f576SJeff Roberson 	else
4260aef6126SJeff Roberson 		free(hash->uh_slab_hash, M_DEVBUF);
4278355f576SJeff Roberson }
4288355f576SJeff Roberson 
4298355f576SJeff Roberson /*
4308355f576SJeff Roberson  * Frees all outstanding items in a bucket
4318355f576SJeff Roberson  *
4328355f576SJeff Roberson  * Arguments:
4338355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
4348355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
4358355f576SJeff Roberson  *
4368355f576SJeff Roberson  * Returns:
4378355f576SJeff Roberson  *	Nothing
4388355f576SJeff Roberson  */
4398355f576SJeff Roberson 
4408355f576SJeff Roberson static void
4418355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
4428355f576SJeff Roberson {
4438355f576SJeff Roberson 	uma_slab_t slab;
4448355f576SJeff Roberson 	int mzone;
4458355f576SJeff Roberson 	void *item;
4468355f576SJeff Roberson 
4478355f576SJeff Roberson 	if (bucket == NULL)
4488355f576SJeff Roberson 		return;
4498355f576SJeff Roberson 
4508355f576SJeff Roberson 	slab = NULL;
4518355f576SJeff Roberson 	mzone = 0;
4528355f576SJeff Roberson 
4538355f576SJeff Roberson 	/* We have to lookup the slab again for malloc.. */
4548355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_MALLOC)
4558355f576SJeff Roberson 		mzone = 1;
4568355f576SJeff Roberson 
4578355f576SJeff Roberson 	while (bucket->ub_ptr > -1)  {
4588355f576SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_ptr];
4598355f576SJeff Roberson #ifdef INVARIANTS
4608355f576SJeff Roberson 		bucket->ub_bucket[bucket->ub_ptr] = NULL;
4618355f576SJeff Roberson 		KASSERT(item != NULL,
4628355f576SJeff Roberson 		    ("bucket_drain: botched ptr, item is NULL"));
4638355f576SJeff Roberson #endif
4648355f576SJeff Roberson 		bucket->ub_ptr--;
4658355f576SJeff Roberson 		/*
4668355f576SJeff Roberson 		 * This is extremely inefficient.  The slab pointer was passed
4678355f576SJeff Roberson 		 * to uma_zfree_arg, but we lost it because the buckets don't
4688355f576SJeff Roberson 		 * hold them.  This will go away when free() gets a size passed
4698355f576SJeff Roberson 		 * to it.
4708355f576SJeff Roberson 		 */
47199571dc3SJeff Roberson 		if (mzone)
47299571dc3SJeff Roberson 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
4738355f576SJeff Roberson 		uma_zfree_internal(zone, item, slab, 1);
4748355f576SJeff Roberson 	}
4758355f576SJeff Roberson }
4768355f576SJeff Roberson 
4778355f576SJeff Roberson /*
4788355f576SJeff Roberson  * Drains the per cpu caches for a zone.
4798355f576SJeff Roberson  *
4808355f576SJeff Roberson  * Arguments:
4818355f576SJeff Roberson  *	zone  The zone to drain, must be unlocked.
4828355f576SJeff Roberson  *
4838355f576SJeff Roberson  * Returns:
4848355f576SJeff Roberson  *	Nothing
4858355f576SJeff Roberson  *
4868355f576SJeff Roberson  * This function returns with the zone locked so that the per cpu queues can
4878355f576SJeff Roberson  * not be filled until zone_drain is finished.
4888355f576SJeff Roberson  *
4898355f576SJeff Roberson  */
4908355f576SJeff Roberson static void
4918355f576SJeff Roberson cache_drain(uma_zone_t zone)
4928355f576SJeff Roberson {
4938355f576SJeff Roberson 	uma_bucket_t bucket;
4948355f576SJeff Roberson 	uma_cache_t cache;
4958355f576SJeff Roberson 	int cpu;
4968355f576SJeff Roberson 
4978355f576SJeff Roberson 	/*
4988355f576SJeff Roberson 	 * Flush out the per cpu queues.
4998355f576SJeff Roberson 	 *
500157d7b35SAlfred Perlstein 	 * XXX This causes unnecessary thrashing due to immediately having
5018355f576SJeff Roberson 	 * empty per cpu queues.  I need to improve this.
5028355f576SJeff Roberson 	 */
5038355f576SJeff Roberson 
5048355f576SJeff Roberson 	/*
5058355f576SJeff Roberson 	 * We have to lock each cpu cache before locking the zone
5068355f576SJeff Roberson 	 */
5078355f576SJeff Roberson 	ZONE_UNLOCK(zone);
5088355f576SJeff Roberson 
5098355f576SJeff Roberson 	for (cpu = 0; cpu < maxcpu; cpu++) {
5108355f576SJeff Roberson 		if (CPU_ABSENT(cpu))
5118355f576SJeff Roberson 			continue;
5128355f576SJeff Roberson 		CPU_LOCK(zone, cpu);
5138355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
5148355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
5158355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
5168355f576SJeff Roberson 	}
5178355f576SJeff Roberson 
5188355f576SJeff Roberson 	/*
5198355f576SJeff Roberson 	 * Drain the bucket queues and free the buckets, we just keep two per
5208355f576SJeff Roberson 	 * cpu (alloc/free).
5218355f576SJeff Roberson 	 */
5228355f576SJeff Roberson 	ZONE_LOCK(zone);
5238355f576SJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
5248355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
5258355f576SJeff Roberson 		ZONE_UNLOCK(zone);
5268355f576SJeff Roberson 		bucket_drain(zone, bucket);
5278355f576SJeff Roberson 		uma_zfree_internal(bucketzone, bucket, NULL, 0);
5288355f576SJeff Roberson 		ZONE_LOCK(zone);
5298355f576SJeff Roberson 	}
5308355f576SJeff Roberson 
5318355f576SJeff Roberson 	/* Now we do the free queue.. */
5328355f576SJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
5338355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
5348355f576SJeff Roberson 		uma_zfree_internal(bucketzone, bucket, NULL, 0);
5358355f576SJeff Roberson 	}
5368355f576SJeff Roberson 
5378355f576SJeff Roberson 	/* We unlock here, but they will all block until the zone is unlocked */
5388355f576SJeff Roberson 	for (cpu = 0; cpu < maxcpu; cpu++) {
5398355f576SJeff Roberson 		if (CPU_ABSENT(cpu))
5408355f576SJeff Roberson 			continue;
5418355f576SJeff Roberson 		CPU_UNLOCK(zone, cpu);
5428355f576SJeff Roberson 	}
543f4af24d5SJeff Roberson 
544f4af24d5SJeff Roberson 	zone->uz_cachefree = 0;
5458355f576SJeff Roberson }
5468355f576SJeff Roberson 
5478355f576SJeff Roberson /*
5488355f576SJeff Roberson  * Frees pages from a zone back to the system.  This is done on demand from
5498355f576SJeff Roberson  * the pageout daemon.
5508355f576SJeff Roberson  *
5518355f576SJeff Roberson  * Arguments:
5528355f576SJeff Roberson  *	zone  The zone to free pages from
5539c2cd7e5SJeff Roberson  *	all   Should we drain all items?
5548355f576SJeff Roberson  *
5558355f576SJeff Roberson  * Returns:
5568355f576SJeff Roberson  *	Nothing.
5578355f576SJeff Roberson  */
5588355f576SJeff Roberson static void
5598355f576SJeff Roberson zone_drain(uma_zone_t zone)
5608355f576SJeff Roberson {
561713deb36SJeff Roberson 	struct slabhead freeslabs = {};
5628355f576SJeff Roberson 	uma_slab_t slab;
5638355f576SJeff Roberson 	uma_slab_t n;
5648355f576SJeff Roberson 	u_int64_t extra;
5658355f576SJeff Roberson 	u_int8_t flags;
5668355f576SJeff Roberson 	u_int8_t *mem;
5678355f576SJeff Roberson 	int i;
5688355f576SJeff Roberson 
5698355f576SJeff Roberson 	/*
5708355f576SJeff Roberson 	 * We don't want to take pages from staticly allocated zones at this
5718355f576SJeff Roberson 	 * time
5728355f576SJeff Roberson 	 */
5738355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_NOFREE || zone->uz_freef == NULL)
5748355f576SJeff Roberson 		return;
5758355f576SJeff Roberson 
5768355f576SJeff Roberson 	ZONE_LOCK(zone);
5778355f576SJeff Roberson 
5788355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
5798355f576SJeff Roberson 		cache_drain(zone);
5808355f576SJeff Roberson 
5818355f576SJeff Roberson 	if (zone->uz_free < zone->uz_wssize)
5828355f576SJeff Roberson 		goto finished;
5838355f576SJeff Roberson #ifdef UMA_DEBUG
5848355f576SJeff Roberson 	printf("%s working set size: %llu free items: %u\n",
5858355f576SJeff Roberson 	    zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
5868355f576SJeff Roberson #endif
5879c2cd7e5SJeff Roberson 	extra = zone->uz_free - zone->uz_wssize;
5888355f576SJeff Roberson 	extra /= zone->uz_ipers;
5898355f576SJeff Roberson 
5908355f576SJeff Roberson 	/* extra is now the number of extra slabs that we can free */
5918355f576SJeff Roberson 
5928355f576SJeff Roberson 	if (extra == 0)
5938355f576SJeff Roberson 		goto finished;
5948355f576SJeff Roberson 
5958355f576SJeff Roberson 	slab = LIST_FIRST(&zone->uz_free_slab);
5968355f576SJeff Roberson 	while (slab && extra) {
5978355f576SJeff Roberson 		n = LIST_NEXT(slab, us_link);
5988355f576SJeff Roberson 
5998355f576SJeff Roberson 		/* We have no where to free these to */
6008355f576SJeff Roberson 		if (slab->us_flags & UMA_SLAB_BOOT) {
6018355f576SJeff Roberson 			slab = n;
6028355f576SJeff Roberson 			continue;
6038355f576SJeff Roberson 		}
6048355f576SJeff Roberson 
6058355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
6068355f576SJeff Roberson 		zone->uz_pages -= zone->uz_ppera;
6078355f576SJeff Roberson 		zone->uz_free -= zone->uz_ipers;
608713deb36SJeff Roberson 
60999571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_HASH)
610713deb36SJeff Roberson 			UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
611713deb36SJeff Roberson 
612713deb36SJeff Roberson 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
613713deb36SJeff Roberson 
614713deb36SJeff Roberson 		slab = n;
615713deb36SJeff Roberson 		extra--;
616713deb36SJeff Roberson 	}
617713deb36SJeff Roberson finished:
618713deb36SJeff Roberson 	ZONE_UNLOCK(zone);
619713deb36SJeff Roberson 
620713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
621713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
6228355f576SJeff Roberson 		if (zone->uz_fini)
6238355f576SJeff Roberson 			for (i = 0; i < zone->uz_ipers; i++)
6248355f576SJeff Roberson 				zone->uz_fini(
6258355f576SJeff Roberson 				    slab->us_data + (zone->uz_rsize * i),
6268355f576SJeff Roberson 				    zone->uz_size);
6278355f576SJeff Roberson 		flags = slab->us_flags;
6288355f576SJeff Roberson 		mem = slab->us_data;
62999571dc3SJeff Roberson 
63099571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
6318355f576SJeff Roberson 			uma_zfree_internal(slabzone, slab, NULL, 0);
63299571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_MALLOC)
63399571dc3SJeff Roberson 			for (i = 0; i < zone->uz_ppera; i++)
63499571dc3SJeff Roberson 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
63599571dc3SJeff Roberson 				    kmem_object);
6368355f576SJeff Roberson #ifdef UMA_DEBUG
6378355f576SJeff Roberson 		printf("%s: Returning %d bytes.\n",
6388355f576SJeff Roberson 		    zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
6398355f576SJeff Roberson #endif
6408355f576SJeff Roberson 		zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
6418355f576SJeff Roberson 	}
6428355f576SJeff Roberson 
6438355f576SJeff Roberson }
6448355f576SJeff Roberson 
6458355f576SJeff Roberson /*
6468355f576SJeff Roberson  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
6478355f576SJeff Roberson  *
6488355f576SJeff Roberson  * Arguments:
6498355f576SJeff Roberson  *	zone  The zone to allocate slabs for
6508355f576SJeff Roberson  *	wait  Shall we wait?
6518355f576SJeff Roberson  *
6528355f576SJeff Roberson  * Returns:
6538355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
6548355f576SJeff Roberson  *	caller specified M_NOWAIT.
6558355f576SJeff Roberson  *
6568355f576SJeff Roberson  */
6578355f576SJeff Roberson static uma_slab_t
6588355f576SJeff Roberson slab_zalloc(uma_zone_t zone, int wait)
6598355f576SJeff Roberson {
6608355f576SJeff Roberson 	uma_slab_t slab;	/* Starting slab */
6618355f576SJeff Roberson 	u_int8_t *mem;
6628355f576SJeff Roberson 	u_int8_t flags;
6638355f576SJeff Roberson 	int i;
6648355f576SJeff Roberson 
665a553d4b8SJeff Roberson 	slab = NULL;
666a553d4b8SJeff Roberson 
6678355f576SJeff Roberson #ifdef UMA_DEBUG
6688355f576SJeff Roberson 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
6698355f576SJeff Roberson #endif
6708355f576SJeff Roberson 	ZONE_UNLOCK(zone);
671a553d4b8SJeff Roberson 
672a553d4b8SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
673a553d4b8SJeff Roberson 		slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
674a553d4b8SJeff Roberson 		if (slab == NULL) {
675a553d4b8SJeff Roberson 			ZONE_LOCK(zone);
676a553d4b8SJeff Roberson 			return NULL;
677a553d4b8SJeff Roberson 		}
678a553d4b8SJeff Roberson 	}
679a553d4b8SJeff Roberson 
6803370c5bfSJeff Roberson 	/*
6813370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
6823370c5bfSJeff Roberson 	 * first time they are added to a zone.
6833370c5bfSJeff Roberson 	 *
6843370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
6853370c5bfSJeff Roberson 	 */
6863370c5bfSJeff Roberson 
6873370c5bfSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
6883370c5bfSJeff Roberson 		wait |= M_ZERO;
6893370c5bfSJeff Roberson 	else
6903370c5bfSJeff Roberson 		wait &= ~M_ZERO;
6913370c5bfSJeff Roberson 
692a553d4b8SJeff Roberson 	if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
6938355f576SJeff Roberson 		mtx_lock(&Giant);
694a553d4b8SJeff Roberson 		mem = zone->uz_allocf(zone,
6958355f576SJeff Roberson 		    zone->uz_ppera * UMA_SLAB_SIZE, &flags, wait);
6968355f576SJeff Roberson 		mtx_unlock(&Giant);
697a553d4b8SJeff Roberson 		if (mem == NULL) {
6988355f576SJeff Roberson 			ZONE_LOCK(zone);
6998355f576SJeff Roberson 			return (NULL);
700a553d4b8SJeff Roberson 		}
7018355f576SJeff Roberson 	} else {
702a553d4b8SJeff Roberson 		uma_slab_t tmps;
7038355f576SJeff Roberson 
7048355f576SJeff Roberson 		if (zone->uz_ppera > 1)
7058355f576SJeff Roberson 			panic("UMA: Attemping to allocate multiple pages before vm has started.\n");
7068355f576SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_MALLOC)
7078355f576SJeff Roberson 			panic("Mallocing before uma_startup2 has been called.\n");
7088355f576SJeff Roberson 		if (uma_boot_free == 0)
7098355f576SJeff Roberson 			panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
710a553d4b8SJeff Roberson 		tmps = LIST_FIRST(&uma_boot_pages);
711a553d4b8SJeff Roberson 		LIST_REMOVE(tmps, us_link);
7128355f576SJeff Roberson 		uma_boot_free--;
713a553d4b8SJeff Roberson 		mem = tmps->us_data;
7148355f576SJeff Roberson 	}
7158355f576SJeff Roberson 
7165c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
71799571dc3SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE))
7188355f576SJeff Roberson 		slab = (uma_slab_t )(mem + zone->uz_pgoff);
7195c0e403bSJeff Roberson 
72099571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_MALLOC)
72199571dc3SJeff Roberson 		for (i = 0; i < zone->uz_ppera; i++)
72299571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
7238355f576SJeff Roberson 
7248355f576SJeff Roberson 	slab->us_zone = zone;
7258355f576SJeff Roberson 	slab->us_data = mem;
7268355f576SJeff Roberson 
7278355f576SJeff Roberson 	/*
7288355f576SJeff Roberson 	 * This is intended to spread data out across cache lines.
7298355f576SJeff Roberson 	 *
7308355f576SJeff Roberson 	 * This code doesn't seem to work properly on x86, and on alpha
7318355f576SJeff Roberson 	 * it makes absolutely no performance difference. I'm sure it could
7328355f576SJeff Roberson 	 * use some tuning, but sun makes outrageous claims about it's
7338355f576SJeff Roberson 	 * performance.
7348355f576SJeff Roberson 	 */
7358355f576SJeff Roberson #if 0
7368355f576SJeff Roberson 	if (zone->uz_cachemax) {
7378355f576SJeff Roberson 		slab->us_data += zone->uz_cacheoff;
7388355f576SJeff Roberson 		zone->uz_cacheoff += UMA_CACHE_INC;
7398355f576SJeff Roberson 		if (zone->uz_cacheoff > zone->uz_cachemax)
7408355f576SJeff Roberson 			zone->uz_cacheoff = 0;
7418355f576SJeff Roberson 	}
7428355f576SJeff Roberson #endif
7438355f576SJeff Roberson 
7448355f576SJeff Roberson 	slab->us_freecount = zone->uz_ipers;
7458355f576SJeff Roberson 	slab->us_firstfree = 0;
7468355f576SJeff Roberson 	slab->us_flags = flags;
7478355f576SJeff Roberson 	for (i = 0; i < zone->uz_ipers; i++)
7488355f576SJeff Roberson 		slab->us_freelist[i] = i+1;
7498355f576SJeff Roberson 
7508355f576SJeff Roberson 	if (zone->uz_init)
7518355f576SJeff Roberson 		for (i = 0; i < zone->uz_ipers; i++)
7528355f576SJeff Roberson 			zone->uz_init(slab->us_data + (zone->uz_rsize * i),
7538355f576SJeff Roberson 			    zone->uz_size);
7545c0e403bSJeff Roberson 	ZONE_LOCK(zone);
7555c0e403bSJeff Roberson 
75699571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH)
7575c0e403bSJeff Roberson 		UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
7588355f576SJeff Roberson 
7598355f576SJeff Roberson 	zone->uz_pages += zone->uz_ppera;
7608355f576SJeff Roberson 	zone->uz_free += zone->uz_ipers;
7618355f576SJeff Roberson 
7625c0e403bSJeff Roberson 
7638355f576SJeff Roberson 	return (slab);
7648355f576SJeff Roberson }
7658355f576SJeff Roberson 
7668355f576SJeff Roberson /*
7678355f576SJeff Roberson  * Allocates a number of pages from the system
7688355f576SJeff Roberson  *
7698355f576SJeff Roberson  * Arguments:
7708355f576SJeff Roberson  *	zone  Unused
7718355f576SJeff Roberson  *	bytes  The number of bytes requested
7728355f576SJeff Roberson  *	wait  Shall we wait?
7738355f576SJeff Roberson  *
7748355f576SJeff Roberson  * Returns:
7758355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
7768355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
7778355f576SJeff Roberson  */
7788355f576SJeff Roberson static void *
7798355f576SJeff Roberson page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
7808355f576SJeff Roberson {
7818355f576SJeff Roberson 	void *p;	/* Returned page */
7828355f576SJeff Roberson 
7838355f576SJeff Roberson 	*pflag = UMA_SLAB_KMEM;
7848355f576SJeff Roberson 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
7858355f576SJeff Roberson 
7868355f576SJeff Roberson 	return (p);
7878355f576SJeff Roberson }
7888355f576SJeff Roberson 
7898355f576SJeff Roberson /*
7908355f576SJeff Roberson  * Allocates a number of pages from within an object
7918355f576SJeff Roberson  *
7928355f576SJeff Roberson  * Arguments:
7938355f576SJeff Roberson  *	zone   Unused
7948355f576SJeff Roberson  *	bytes  The number of bytes requested
7958355f576SJeff Roberson  *	wait   Shall we wait?
7968355f576SJeff Roberson  *
7978355f576SJeff Roberson  * Returns:
7988355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
7998355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
800494273beSJeff Roberson  *
801494273beSJeff Roberson  * TODO: If we fail during a multi-page allocation release the pages that have
802494273beSJeff Roberson  *	 already been allocated.
8038355f576SJeff Roberson  */
8048355f576SJeff Roberson static void *
8058355f576SJeff Roberson obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
8068355f576SJeff Roberson {
8078355f576SJeff Roberson 	vm_offset_t zkva;
8088355f576SJeff Roberson 	vm_offset_t retkva;
8098355f576SJeff Roberson 	vm_page_t p;
8108355f576SJeff Roberson 	int pages;
8118355f576SJeff Roberson 
81255f7c614SArchie Cobbs 	retkva = 0;
8138355f576SJeff Roberson 	pages = zone->uz_pages;
8148355f576SJeff Roberson 
8158355f576SJeff Roberson 	/*
8168355f576SJeff Roberson 	 * This looks a little weird since we're getting one page at a time
8178355f576SJeff Roberson 	 */
8188355f576SJeff Roberson 	while (bytes > 0) {
8198355f576SJeff Roberson 		p = vm_page_alloc(zone->uz_obj, pages,
8208355f576SJeff Roberson 		    VM_ALLOC_INTERRUPT);
8218355f576SJeff Roberson 		if (p == NULL)
8228355f576SJeff Roberson 			return (NULL);
8238355f576SJeff Roberson 
8248355f576SJeff Roberson 		zkva = zone->uz_kva + pages * PAGE_SIZE;
82555f7c614SArchie Cobbs 		if (retkva == 0)
8268355f576SJeff Roberson 			retkva = zkva;
8278355f576SJeff Roberson 		pmap_qenter(zkva, &p, 1);
8288355f576SJeff Roberson 		bytes -= PAGE_SIZE;
8298355f576SJeff Roberson 		pages += 1;
8308355f576SJeff Roberson 	}
8318355f576SJeff Roberson 
8328355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
8338355f576SJeff Roberson 
8348355f576SJeff Roberson 	return ((void *)retkva);
8358355f576SJeff Roberson }
8368355f576SJeff Roberson 
8378355f576SJeff Roberson /*
8388355f576SJeff Roberson  * Frees a number of pages to the system
8398355f576SJeff Roberson  *
8408355f576SJeff Roberson  * Arguments:
8418355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
8428355f576SJeff Roberson  *	size  The size of the memory being freed
8438355f576SJeff Roberson  *	flags The original p->us_flags field
8448355f576SJeff Roberson  *
8458355f576SJeff Roberson  * Returns:
8468355f576SJeff Roberson  *	Nothing
8478355f576SJeff Roberson  *
8488355f576SJeff Roberson  */
8498355f576SJeff Roberson static void
8508355f576SJeff Roberson page_free(void *mem, int size, u_int8_t flags)
8518355f576SJeff Roberson {
8528355f576SJeff Roberson 	vm_map_t map;
8533370c5bfSJeff Roberson 
8548355f576SJeff Roberson 	if (flags & UMA_SLAB_KMEM)
8558355f576SJeff Roberson 		map = kmem_map;
8568355f576SJeff Roberson 	else
8578355f576SJeff Roberson 		panic("UMA: page_free used with invalid flags %d\n", flags);
8588355f576SJeff Roberson 
8598355f576SJeff Roberson 	kmem_free(map, (vm_offset_t)mem, size);
8608355f576SJeff Roberson }
8618355f576SJeff Roberson 
8628355f576SJeff Roberson /*
8638355f576SJeff Roberson  * Zero fill initializer
8648355f576SJeff Roberson  *
8658355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
8668355f576SJeff Roberson  *
8678355f576SJeff Roberson  */
8688355f576SJeff Roberson static void
8698355f576SJeff Roberson zero_init(void *mem, int size)
8708355f576SJeff Roberson {
8718355f576SJeff Roberson 	bzero(mem, size);
8728355f576SJeff Roberson }
8738355f576SJeff Roberson 
8748355f576SJeff Roberson /*
8758355f576SJeff Roberson  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
8768355f576SJeff Roberson  *
8778355f576SJeff Roberson  * Arguments
8788355f576SJeff Roberson  *	zone  The zone we should initialize
8798355f576SJeff Roberson  *
8808355f576SJeff Roberson  * Returns
8818355f576SJeff Roberson  *	Nothing
8828355f576SJeff Roberson  */
8838355f576SJeff Roberson static void
8848355f576SJeff Roberson zone_small_init(uma_zone_t zone)
8858355f576SJeff Roberson {
8868355f576SJeff Roberson 	int rsize;
8878355f576SJeff Roberson 	int memused;
8888355f576SJeff Roberson 	int ipers;
8898355f576SJeff Roberson 
8908355f576SJeff Roberson 	rsize = zone->uz_size;
8918355f576SJeff Roberson 
8928355f576SJeff Roberson 	if (rsize < UMA_SMALLEST_UNIT)
8938355f576SJeff Roberson 		rsize = UMA_SMALLEST_UNIT;
8948355f576SJeff Roberson 
8958355f576SJeff Roberson 	if (rsize & zone->uz_align)
8968355f576SJeff Roberson 		rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
8978355f576SJeff Roberson 
8988355f576SJeff Roberson 	zone->uz_rsize = rsize;
8998355f576SJeff Roberson 
9008355f576SJeff Roberson 	rsize += 1;	/* Account for the byte of linkage */
9018355f576SJeff Roberson 	zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
9028355f576SJeff Roberson 	zone->uz_ppera = 1;
9038355f576SJeff Roberson 
9048355f576SJeff Roberson 	memused = zone->uz_ipers * zone->uz_rsize;
9058355f576SJeff Roberson 
9068355f576SJeff Roberson 	/* Can we do any better? */
9078355f576SJeff Roberson 	if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
9088355f576SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
9098355f576SJeff Roberson 			return;
9108355f576SJeff Roberson 		ipers = UMA_SLAB_SIZE / zone->uz_rsize;
9118355f576SJeff Roberson 		if (ipers > zone->uz_ipers) {
9128355f576SJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
91399571dc3SJeff Roberson 			if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
91499571dc3SJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_HASH;
9158355f576SJeff Roberson 			zone->uz_ipers = ipers;
9168355f576SJeff Roberson 		}
9178355f576SJeff Roberson 	}
9188355f576SJeff Roberson 
9198355f576SJeff Roberson }
9208355f576SJeff Roberson 
9218355f576SJeff Roberson /*
9228355f576SJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
9238355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
9248355f576SJeff Roberson  * more complicated.
9258355f576SJeff Roberson  *
9268355f576SJeff Roberson  * Arguments
9278355f576SJeff Roberson  *	zone  The zone we should initialize
9288355f576SJeff Roberson  *
9298355f576SJeff Roberson  * Returns
9308355f576SJeff Roberson  *	Nothing
9318355f576SJeff Roberson  */
9328355f576SJeff Roberson static void
9338355f576SJeff Roberson zone_large_init(uma_zone_t zone)
9348355f576SJeff Roberson {
9358355f576SJeff Roberson 	int pages;
9368355f576SJeff Roberson 
9378355f576SJeff Roberson 	pages = zone->uz_size / UMA_SLAB_SIZE;
9388355f576SJeff Roberson 
9398355f576SJeff Roberson 	/* Account for remainder */
9408355f576SJeff Roberson 	if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
9418355f576SJeff Roberson 		pages++;
9428355f576SJeff Roberson 
9438355f576SJeff Roberson 	zone->uz_ppera = pages;
9448355f576SJeff Roberson 	zone->uz_ipers = 1;
9458355f576SJeff Roberson 
9468355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
94799571dc3SJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
94899571dc3SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_HASH;
94999571dc3SJeff Roberson 
9508355f576SJeff Roberson 	zone->uz_rsize = zone->uz_size;
9518355f576SJeff Roberson }
9528355f576SJeff Roberson 
9538355f576SJeff Roberson /*
9548355f576SJeff Roberson  * Zone header ctor.  This initializes all fields, locks, etc.  And inserts
9558355f576SJeff Roberson  * the zone onto the global zone list.
9568355f576SJeff Roberson  *
9578355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
9588355f576SJeff Roberson  *	udata  Actually uma_zcreat_args
9598355f576SJeff Roberson  *
9608355f576SJeff Roberson  */
9618355f576SJeff Roberson 
9628355f576SJeff Roberson static void
9638355f576SJeff Roberson zone_ctor(void *mem, int size, void *udata)
9648355f576SJeff Roberson {
9658355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
9668355f576SJeff Roberson 	uma_zone_t zone = mem;
96728bc4419SJeff Roberson 	int privlc;
9688355f576SJeff Roberson 	int cplen;
9698355f576SJeff Roberson 	int cpu;
9708355f576SJeff Roberson 
9718355f576SJeff Roberson 	bzero(zone, size);
9728355f576SJeff Roberson 	zone->uz_name = arg->name;
9738355f576SJeff Roberson 	zone->uz_size = arg->size;
9748355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
9758355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
9768355f576SJeff Roberson 	zone->uz_init = arg->uminit;
977e221e841SJeff Roberson 	zone->uz_fini = arg->fini;
9788355f576SJeff Roberson 	zone->uz_align = arg->align;
9798355f576SJeff Roberson 	zone->uz_free = 0;
9808355f576SJeff Roberson 	zone->uz_pages = 0;
9818355f576SJeff Roberson 	zone->uz_flags = 0;
9828355f576SJeff Roberson 	zone->uz_allocf = page_alloc;
9838355f576SJeff Roberson 	zone->uz_freef = page_free;
9848355f576SJeff Roberson 
9858355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_ZINIT)
9868355f576SJeff Roberson 		zone->uz_init = zero_init;
9878355f576SJeff Roberson 
9888355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_INTERNAL)
9898355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_INTERNAL;
9908355f576SJeff Roberson 
9918355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_MALLOC)
9928355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_MALLOC;
9938355f576SJeff Roberson 
9948355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_NOFREE)
9958355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_NOFREE;
9968355f576SJeff Roberson 
99718aa2de5SJeff Roberson 	if (arg->flags & UMA_ZONE_VM)
99818aa2de5SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_BUCKETCACHE;
99918aa2de5SJeff Roberson 
10008355f576SJeff Roberson 	if (zone->uz_size > UMA_SLAB_SIZE)
10018355f576SJeff Roberson 		zone_large_init(zone);
10028355f576SJeff Roberson 	else
10038355f576SJeff Roberson 		zone_small_init(zone);
10048355f576SJeff Roberson 
100528bc4419SJeff Roberson 	if (arg->flags & UMA_ZONE_MTXCLASS)
100628bc4419SJeff Roberson 		privlc = 1;
100728bc4419SJeff Roberson 	else
100828bc4419SJeff Roberson 		privlc = 0;
100928bc4419SJeff Roberson 
10108355f576SJeff Roberson 	/* We do this so that the per cpu lock name is unique for each zone */
10118355f576SJeff Roberson 	memcpy(zone->uz_lname, "PCPU ", 5);
10128355f576SJeff Roberson 	cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
10138355f576SJeff Roberson 	memcpy(zone->uz_lname+5, zone->uz_name, cplen);
10148355f576SJeff Roberson 	zone->uz_lname[LOCKNAME_LEN - 1] = '\0';
10158355f576SJeff Roberson 
10168355f576SJeff Roberson 	/*
10178355f576SJeff Roberson 	 * If we're putting the slab header in the actual page we need to
10188355f576SJeff Roberson 	 * figure out where in each page it goes.  This calculates a right
10198355f576SJeff Roberson 	 * justified offset into the memory on a ALIGN_PTR boundary.
10208355f576SJeff Roberson 	 */
10218355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
10228355f576SJeff Roberson 		int totsize;
10238355f576SJeff Roberson 		int waste;
10248355f576SJeff Roberson 
10258355f576SJeff Roberson 		/* Size of the slab struct and free list */
10268355f576SJeff Roberson 		totsize = sizeof(struct uma_slab) + zone->uz_ipers;
10278355f576SJeff Roberson 		if (totsize & UMA_ALIGN_PTR)
10288355f576SJeff Roberson 			totsize = (totsize & ~UMA_ALIGN_PTR) +
10298355f576SJeff Roberson 			    (UMA_ALIGN_PTR + 1);
10308355f576SJeff Roberson 		zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
10318355f576SJeff Roberson 
10328355f576SJeff Roberson 		waste = zone->uz_pgoff;
10338355f576SJeff Roberson 		waste -= (zone->uz_ipers * zone->uz_rsize);
10348355f576SJeff Roberson 
10358355f576SJeff Roberson 		/*
10368355f576SJeff Roberson 		 * This calculates how much space we have for cache line size
10378355f576SJeff Roberson 		 * optimizations.  It works by offseting each slab slightly.
10388355f576SJeff Roberson 		 * Currently it breaks on x86, and so it is disabled.
10398355f576SJeff Roberson 		 */
10408355f576SJeff Roberson 
10418355f576SJeff Roberson 		if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) {
10428355f576SJeff Roberson 			zone->uz_cachemax = waste - UMA_CACHE_INC;
10438355f576SJeff Roberson 			zone->uz_cacheoff = 0;
10448355f576SJeff Roberson 		}
10458355f576SJeff Roberson 
10468355f576SJeff Roberson 		totsize = zone->uz_pgoff + sizeof(struct uma_slab)
10478355f576SJeff Roberson 		    + zone->uz_ipers;
10488355f576SJeff Roberson 		/* I don't think it's possible, but I'll make sure anyway */
10498355f576SJeff Roberson 		if (totsize > UMA_SLAB_SIZE) {
10508355f576SJeff Roberson 			printf("zone %s ipers %d rsize %d size %d\n",
10518355f576SJeff Roberson 			    zone->uz_name, zone->uz_ipers, zone->uz_rsize,
10528355f576SJeff Roberson 			    zone->uz_size);
10538355f576SJeff Roberson 			panic("UMA slab won't fit.\n");
10548355f576SJeff Roberson 		}
10558355f576SJeff Roberson 	}
10568355f576SJeff Roberson 
105799571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH)
105899571dc3SJeff Roberson 		hash_alloc(&zone->uz_hash);
105999571dc3SJeff Roberson 
10608355f576SJeff Roberson #ifdef UMA_DEBUG
10618355f576SJeff Roberson 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
10628355f576SJeff Roberson 	    zone->uz_name, zone,
10638355f576SJeff Roberson 	    zone->uz_size, zone->uz_ipers,
10648355f576SJeff Roberson 	    zone->uz_ppera, zone->uz_pgoff);
10658355f576SJeff Roberson #endif
106628bc4419SJeff Roberson 	ZONE_LOCK_INIT(zone, privlc);
10678355f576SJeff Roberson 
10688355f576SJeff Roberson 	mtx_lock(&uma_mtx);
10698355f576SJeff Roberson 	LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
10708355f576SJeff Roberson 	mtx_unlock(&uma_mtx);
10718355f576SJeff Roberson 
10728355f576SJeff Roberson 	/*
10738355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
10748355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
10758355f576SJeff Roberson 	 */
10768355f576SJeff Roberson 
10778355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
10788355f576SJeff Roberson 		return;
10798355f576SJeff Roberson 
10808355f576SJeff Roberson 	if (zone->uz_ipers < UMA_BUCKET_SIZE)
1081a553d4b8SJeff Roberson 		zone->uz_count = zone->uz_ipers - 1;
10828355f576SJeff Roberson 	else
1083a553d4b8SJeff Roberson 		zone->uz_count = UMA_BUCKET_SIZE - 1;
1084a553d4b8SJeff Roberson 
1085a553d4b8SJeff Roberson 	for (cpu = 0; cpu < maxcpu; cpu++)
108628bc4419SJeff Roberson 		CPU_LOCK_INIT(zone, cpu, privlc);
10878355f576SJeff Roberson }
10888355f576SJeff Roberson 
10898355f576SJeff Roberson /*
10909c2cd7e5SJeff Roberson  * Zone header dtor.  This frees all data, destroys locks, frees the hash table
10919c2cd7e5SJeff Roberson  * and removes the zone from the global list.
10929c2cd7e5SJeff Roberson  *
10939c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
10949c2cd7e5SJeff Roberson  *	udata  unused
10959c2cd7e5SJeff Roberson  */
10969c2cd7e5SJeff Roberson 
10979c2cd7e5SJeff Roberson static void
10989c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
10999c2cd7e5SJeff Roberson {
11009c2cd7e5SJeff Roberson 	uma_zone_t zone;
11019c2cd7e5SJeff Roberson 	int cpu;
11029c2cd7e5SJeff Roberson 
11039c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
11049c2cd7e5SJeff Roberson 
11059c2cd7e5SJeff Roberson 	ZONE_LOCK(zone);
11069c2cd7e5SJeff Roberson 	zone->uz_wssize = 0;
11079c2cd7e5SJeff Roberson 	ZONE_UNLOCK(zone);
11089c2cd7e5SJeff Roberson 
110917b9cc49SJeff Roberson 	mtx_lock(&uma_mtx);
111017b9cc49SJeff Roberson 	LIST_REMOVE(zone, uz_link);
11119c2cd7e5SJeff Roberson 	zone_drain(zone);
111217b9cc49SJeff Roberson 	mtx_unlock(&uma_mtx);
111317b9cc49SJeff Roberson 
11149c2cd7e5SJeff Roberson 	ZONE_LOCK(zone);
11159c2cd7e5SJeff Roberson 	if (zone->uz_free != 0)
11169c2cd7e5SJeff Roberson 		printf("Zone %s was not empty.  Lost %d pages of memory.\n",
11179c2cd7e5SJeff Roberson 		    zone->uz_name, zone->uz_pages);
11189c2cd7e5SJeff Roberson 
1119f97d6ce3SIan Dowse 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0)
11209c2cd7e5SJeff Roberson 		for (cpu = 0; cpu < maxcpu; cpu++)
11219c2cd7e5SJeff Roberson 			CPU_LOCK_FINI(zone, cpu);
11229c2cd7e5SJeff Roberson 
11239c2cd7e5SJeff Roberson 	ZONE_UNLOCK(zone);
11240aef6126SJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0)
11250aef6126SJeff Roberson 		hash_free(&zone->uz_hash);
11260aef6126SJeff Roberson 
11279c2cd7e5SJeff Roberson 	ZONE_LOCK_FINI(zone);
11289c2cd7e5SJeff Roberson }
11299c2cd7e5SJeff Roberson /*
11308355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
11318355f576SJeff Roberson  *
11328355f576SJeff Roberson  * Arguments:
11338355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
11348355f576SJeff Roberson  *		as an argument.
11358355f576SJeff Roberson  *
11368355f576SJeff Roberson  * Returns:
11378355f576SJeff Roberson  *	Nothing
11388355f576SJeff Roberson  */
11398355f576SJeff Roberson static void
11408355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
11418355f576SJeff Roberson {
11428355f576SJeff Roberson 	uma_zone_t zone;
11438355f576SJeff Roberson 
11448355f576SJeff Roberson 	mtx_lock(&uma_mtx);
11458355f576SJeff Roberson 	LIST_FOREACH(zone, &uma_zones, uz_link) {
11468355f576SJeff Roberson 		zfunc(zone);
11478355f576SJeff Roberson 	}
11488355f576SJeff Roberson 	mtx_unlock(&uma_mtx);
11498355f576SJeff Roberson }
11508355f576SJeff Roberson 
11518355f576SJeff Roberson /* Public functions */
11528355f576SJeff Roberson /* See uma.h */
11538355f576SJeff Roberson void
11548355f576SJeff Roberson uma_startup(void *bootmem)
11558355f576SJeff Roberson {
11568355f576SJeff Roberson 	struct uma_zctor_args args;
11578355f576SJeff Roberson 	uma_slab_t slab;
11588355f576SJeff Roberson 	int slabsize;
11598355f576SJeff Roberson 	int i;
11608355f576SJeff Roberson 
11618355f576SJeff Roberson #ifdef UMA_DEBUG
11628355f576SJeff Roberson 	printf("Creating uma zone headers zone.\n");
11638355f576SJeff Roberson #endif
11648355f576SJeff Roberson #ifdef SMP
11658355f576SJeff Roberson 	maxcpu = mp_maxid + 1;
11668355f576SJeff Roberson #else
11678355f576SJeff Roberson 	maxcpu = 1;
11688355f576SJeff Roberson #endif
11698355f576SJeff Roberson #ifdef UMA_DEBUG
11708355f576SJeff Roberson 	printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
11718355f576SJeff Roberson 	Debugger("stop");
11728355f576SJeff Roberson #endif
11736008862bSJohn Baldwin 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
11748355f576SJeff Roberson 	/* "manually" Create the initial zone */
11758355f576SJeff Roberson 	args.name = "UMA Zones";
11768355f576SJeff Roberson 	args.size = sizeof(struct uma_zone) +
11778355f576SJeff Roberson 	    (sizeof(struct uma_cache) * (maxcpu - 1));
11788355f576SJeff Roberson 	args.ctor = zone_ctor;
11799c2cd7e5SJeff Roberson 	args.dtor = zone_dtor;
11808355f576SJeff Roberson 	args.uminit = zero_init;
11818355f576SJeff Roberson 	args.fini = NULL;
11828355f576SJeff Roberson 	args.align = 32 - 1;
11838355f576SJeff Roberson 	args.flags = UMA_ZONE_INTERNAL;
11848355f576SJeff Roberson 	/* The initial zone has no Per cpu queues so it's smaller */
11858355f576SJeff Roberson 	zone_ctor(zones, sizeof(struct uma_zone), &args);
11868355f576SJeff Roberson 
11878355f576SJeff Roberson #ifdef UMA_DEBUG
11888355f576SJeff Roberson 	printf("Filling boot free list.\n");
11898355f576SJeff Roberson #endif
11908355f576SJeff Roberson 	for (i = 0; i < UMA_BOOT_PAGES; i++) {
11918355f576SJeff Roberson 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
11928355f576SJeff Roberson 		slab->us_data = (u_int8_t *)slab;
11938355f576SJeff Roberson 		slab->us_flags = UMA_SLAB_BOOT;
11948355f576SJeff Roberson 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
11958355f576SJeff Roberson 		uma_boot_free++;
11968355f576SJeff Roberson 	}
11978355f576SJeff Roberson 
11988355f576SJeff Roberson #ifdef UMA_DEBUG
11998355f576SJeff Roberson 	printf("Creating slab zone.\n");
12008355f576SJeff Roberson #endif
12018355f576SJeff Roberson 
12028355f576SJeff Roberson 	/*
12038355f576SJeff Roberson 	 * This is the max number of free list items we'll have with
12048355f576SJeff Roberson 	 * offpage slabs.
12058355f576SJeff Roberson 	 */
12068355f576SJeff Roberson 
12078355f576SJeff Roberson 	slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
12088355f576SJeff Roberson 	slabsize /= UMA_MAX_WASTE;
12098355f576SJeff Roberson 	slabsize++;			/* In case there it's rounded */
12108355f576SJeff Roberson 	slabsize += sizeof(struct uma_slab);
12118355f576SJeff Roberson 
12128355f576SJeff Roberson 	/* Now make a zone for slab headers */
12138355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
12148355f576SJeff Roberson 				slabsize,
12158355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
12168355f576SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
12178355f576SJeff Roberson 
12188355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
12198355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
12208355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
12218355f576SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
12228355f576SJeff Roberson 
12238355f576SJeff Roberson 	bucketzone = uma_zcreate("UMA Buckets", sizeof(struct uma_bucket),
12248355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
12258355f576SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
12268355f576SJeff Roberson 
12278355f576SJeff Roberson 
12288355f576SJeff Roberson #ifdef UMA_DEBUG
12298355f576SJeff Roberson 	printf("UMA startup complete.\n");
12308355f576SJeff Roberson #endif
12318355f576SJeff Roberson }
12328355f576SJeff Roberson 
12338355f576SJeff Roberson /* see uma.h */
12348355f576SJeff Roberson void
123599571dc3SJeff Roberson uma_startup2(void)
12368355f576SJeff Roberson {
12378355f576SJeff Roberson 	booted = 1;
123886bbae32SJeff Roberson 	bucket_enable();
12398355f576SJeff Roberson #ifdef UMA_DEBUG
12408355f576SJeff Roberson 	printf("UMA startup2 complete.\n");
12418355f576SJeff Roberson #endif
12428355f576SJeff Roberson }
12438355f576SJeff Roberson 
12448355f576SJeff Roberson /*
12458355f576SJeff Roberson  * Initialize our callout handle
12468355f576SJeff Roberson  *
12478355f576SJeff Roberson  */
12488355f576SJeff Roberson 
12498355f576SJeff Roberson static void
12508355f576SJeff Roberson uma_startup3(void)
12518355f576SJeff Roberson {
12528355f576SJeff Roberson #ifdef UMA_DEBUG
12538355f576SJeff Roberson 	printf("Starting callout.\n");
12548355f576SJeff Roberson #endif
12558355f576SJeff Roberson 	callout_init(&uma_callout, 0);
12568355f576SJeff Roberson 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
12578355f576SJeff Roberson #ifdef UMA_DEBUG
12588355f576SJeff Roberson 	printf("UMA startup3 complete.\n");
12598355f576SJeff Roberson #endif
12608355f576SJeff Roberson }
12618355f576SJeff Roberson 
12628355f576SJeff Roberson /* See uma.h */
12638355f576SJeff Roberson uma_zone_t
1264c3bdc05fSAndrew R. Reiter uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1265c3bdc05fSAndrew R. Reiter 		uma_init uminit, uma_fini fini, int align, u_int16_t flags)
12668355f576SJeff Roberson 
12678355f576SJeff Roberson {
12688355f576SJeff Roberson 	struct uma_zctor_args args;
12698355f576SJeff Roberson 
12708355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
12718355f576SJeff Roberson 	args.name = name;
12728355f576SJeff Roberson 	args.size = size;
12738355f576SJeff Roberson 	args.ctor = ctor;
12748355f576SJeff Roberson 	args.dtor = dtor;
12758355f576SJeff Roberson 	args.uminit = uminit;
12768355f576SJeff Roberson 	args.fini = fini;
12778355f576SJeff Roberson 	args.align = align;
12788355f576SJeff Roberson 	args.flags = flags;
12798355f576SJeff Roberson 
1280a553d4b8SJeff Roberson 	return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL));
12818355f576SJeff Roberson }
12828355f576SJeff Roberson 
12838355f576SJeff Roberson /* See uma.h */
12849c2cd7e5SJeff Roberson void
12859c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
12869c2cd7e5SJeff Roberson {
12879c2cd7e5SJeff Roberson 	uma_zfree_internal(zones, zone, NULL, 0);
12889c2cd7e5SJeff Roberson }
12899c2cd7e5SJeff Roberson 
12909c2cd7e5SJeff Roberson /* See uma.h */
12918355f576SJeff Roberson void *
12922cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
12938355f576SJeff Roberson {
12948355f576SJeff Roberson 	void *item;
12958355f576SJeff Roberson 	uma_cache_t cache;
12968355f576SJeff Roberson 	uma_bucket_t bucket;
12978355f576SJeff Roberson 	int cpu;
12988355f576SJeff Roberson 
12998355f576SJeff Roberson 	/* This is the fast path allocation */
13008355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1
13018355f576SJeff Roberson 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
13028355f576SJeff Roberson #endif
1303a553d4b8SJeff Roberson 
13044c1cc01cSJohn Baldwin 	if (!(flags & M_NOWAIT)) {
13054c1cc01cSJohn Baldwin 		KASSERT(curthread->td_intr_nesting_level == 0,
13064c1cc01cSJohn Baldwin 		   ("malloc(M_WAITOK) in interrupt context"));
13074c1cc01cSJohn Baldwin 		WITNESS_SLEEP(1, NULL);
13084c1cc01cSJohn Baldwin 	}
13094c1cc01cSJohn Baldwin 
1310a553d4b8SJeff Roberson zalloc_restart:
13118355f576SJeff Roberson 	cpu = PCPU_GET(cpuid);
13128355f576SJeff Roberson 	CPU_LOCK(zone, cpu);
13138355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
13148355f576SJeff Roberson 
13158355f576SJeff Roberson zalloc_start:
13168355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
13178355f576SJeff Roberson 
13188355f576SJeff Roberson 	if (bucket) {
13198355f576SJeff Roberson 		if (bucket->ub_ptr > -1) {
13208355f576SJeff Roberson 			item = bucket->ub_bucket[bucket->ub_ptr];
13218355f576SJeff Roberson #ifdef INVARIANTS
13228355f576SJeff Roberson 			bucket->ub_bucket[bucket->ub_ptr] = NULL;
13238355f576SJeff Roberson #endif
13248355f576SJeff Roberson 			bucket->ub_ptr--;
13258355f576SJeff Roberson 			KASSERT(item != NULL,
13268355f576SJeff Roberson 			    ("uma_zalloc: Bucket pointer mangled."));
13278355f576SJeff Roberson 			cache->uc_allocs++;
1328639c9550SJeff Roberson #ifdef INVARIANTS
1329639c9550SJeff Roberson 			uma_dbg_alloc(zone, NULL, item);
1330639c9550SJeff Roberson #endif
1331b9ba8931SJeff Roberson 			CPU_UNLOCK(zone, cpu);
13328355f576SJeff Roberson 			if (zone->uz_ctor)
13338355f576SJeff Roberson 				zone->uz_ctor(item, zone->uz_size, udata);
13342cc35ff9SJeff Roberson 			if (flags & M_ZERO)
13352cc35ff9SJeff Roberson 				bzero(item, zone->uz_size);
13368355f576SJeff Roberson 			return (item);
13378355f576SJeff Roberson 		} else if (cache->uc_freebucket) {
13388355f576SJeff Roberson 			/*
13398355f576SJeff Roberson 			 * We have run out of items in our allocbucket.
13408355f576SJeff Roberson 			 * See if we can switch with our free bucket.
13418355f576SJeff Roberson 			 */
13428355f576SJeff Roberson 			if (cache->uc_freebucket->ub_ptr > -1) {
13438355f576SJeff Roberson 				uma_bucket_t swap;
13448355f576SJeff Roberson 
13458355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
13468355f576SJeff Roberson 				printf("uma_zalloc: Swapping empty with alloc.\n");
13478355f576SJeff Roberson #endif
13488355f576SJeff Roberson 				swap = cache->uc_freebucket;
13498355f576SJeff Roberson 				cache->uc_freebucket = cache->uc_allocbucket;
13508355f576SJeff Roberson 				cache->uc_allocbucket = swap;
13518355f576SJeff Roberson 
13528355f576SJeff Roberson 				goto zalloc_start;
13538355f576SJeff Roberson 			}
13548355f576SJeff Roberson 		}
13558355f576SJeff Roberson 	}
1356a553d4b8SJeff Roberson 	ZONE_LOCK(zone);
1357a553d4b8SJeff Roberson 	/* Since we have locked the zone we may as well send back our stats */
1358a553d4b8SJeff Roberson 	zone->uz_allocs += cache->uc_allocs;
1359a553d4b8SJeff Roberson 	cache->uc_allocs = 0;
13608355f576SJeff Roberson 
1361a553d4b8SJeff Roberson 	/* Our old one is now a free bucket */
1362a553d4b8SJeff Roberson 	if (cache->uc_allocbucket) {
1363a553d4b8SJeff Roberson 		KASSERT(cache->uc_allocbucket->ub_ptr == -1,
1364a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1365a553d4b8SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1366a553d4b8SJeff Roberson 		    cache->uc_allocbucket, ub_link);
1367a553d4b8SJeff Roberson 		cache->uc_allocbucket = NULL;
1368a553d4b8SJeff Roberson 	}
13698355f576SJeff Roberson 
1370a553d4b8SJeff Roberson 	/* Check the free list for a new alloc bucket */
1371a553d4b8SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1372a553d4b8SJeff Roberson 		KASSERT(bucket->ub_ptr != -1,
1373a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
13748355f576SJeff Roberson 
1375a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1376a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
1377a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
13788355f576SJeff Roberson 		goto zalloc_start;
1379a553d4b8SJeff Roberson 	}
1380a553d4b8SJeff Roberson 	/* Bump up our uz_count so we get here less */
1381a553d4b8SJeff Roberson 	if (zone->uz_count < UMA_BUCKET_SIZE - 1)
1382a553d4b8SJeff Roberson 		zone->uz_count++;
1383a553d4b8SJeff Roberson 
1384a553d4b8SJeff Roberson 	/* We are no longer associated with this cpu!!! */
1385a553d4b8SJeff Roberson 	CPU_UNLOCK(zone, cpu);
13868355f576SJeff Roberson 
13878355f576SJeff Roberson 	/*
1388a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
1389a553d4b8SJeff Roberson 	 * works we'll restart the allocation from the begining.
1390a553d4b8SJeff Roberson 	 *
1391a553d4b8SJeff Roberson 	 * Try this zone's free list first so we don't allocate extra buckets.
13928355f576SJeff Roberson 	 */
1393a553d4b8SJeff Roberson 
1394a553d4b8SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL)
1395a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1396a553d4b8SJeff Roberson 
1397a553d4b8SJeff Roberson 	/* Now we no longer need the zone lock. */
1398a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
1399a553d4b8SJeff Roberson 
140018aa2de5SJeff Roberson 	if (bucket == NULL) {
140118aa2de5SJeff Roberson 		int bflags;
140218aa2de5SJeff Roberson 
140318aa2de5SJeff Roberson 		bflags = flags;
140418aa2de5SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_BUCKETCACHE)
140518aa2de5SJeff Roberson 			bflags |= M_NOVM;
140618aa2de5SJeff Roberson 
1407a553d4b8SJeff Roberson 		bucket = uma_zalloc_internal(bucketzone,
140818aa2de5SJeff Roberson 		    NULL, bflags, NULL);
140918aa2de5SJeff Roberson 	}
1410a553d4b8SJeff Roberson 
1411a553d4b8SJeff Roberson 	if (bucket != NULL) {
1412a553d4b8SJeff Roberson #ifdef INVARIANTS
1413a553d4b8SJeff Roberson 		bzero(bucket, bucketzone->uz_size);
1414a553d4b8SJeff Roberson #endif
1415a553d4b8SJeff Roberson 		bucket->ub_ptr = -1;
1416a553d4b8SJeff Roberson 
14172cc35ff9SJeff Roberson 		if (uma_zalloc_internal(zone, udata, flags, bucket))
1418a553d4b8SJeff Roberson 			goto zalloc_restart;
1419a553d4b8SJeff Roberson 		else
1420a553d4b8SJeff Roberson 			uma_zfree_internal(bucketzone, bucket, NULL, 0);
1421a553d4b8SJeff Roberson 	}
1422a553d4b8SJeff Roberson 	/*
1423a553d4b8SJeff Roberson 	 * We may not get a bucket if we recurse, so
1424a553d4b8SJeff Roberson 	 * return an actual item.
1425a553d4b8SJeff Roberson 	 */
1426a553d4b8SJeff Roberson #ifdef UMA_DEBUG
1427a553d4b8SJeff Roberson 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1428a553d4b8SJeff Roberson #endif
1429a553d4b8SJeff Roberson 
14302cc35ff9SJeff Roberson 	return (uma_zalloc_internal(zone, udata, flags, NULL));
14318355f576SJeff Roberson }
14328355f576SJeff Roberson 
14338355f576SJeff Roberson /*
14348355f576SJeff Roberson  * Allocates an item for an internal zone OR fills a bucket
14358355f576SJeff Roberson  *
14368355f576SJeff Roberson  * Arguments
14378355f576SJeff Roberson  *	zone   The zone to alloc for.
14388355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
14392cc35ff9SJeff Roberson  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
1440a553d4b8SJeff Roberson  *	bucket The bucket to fill or NULL
14418355f576SJeff Roberson  *
14428355f576SJeff Roberson  * Returns
14438355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
14448355f576SJeff Roberson  *	An item if called on an interal zone
14458355f576SJeff Roberson  *	Non NULL if called to fill a bucket and it was successful.
14468355f576SJeff Roberson  *
14478355f576SJeff Roberson  * Discussion:
14488355f576SJeff Roberson  *	This was much cleaner before it had to do per cpu caches.  It is
14498355f576SJeff Roberson  *	complicated now because it has to handle the simple internal case, and
1450a553d4b8SJeff Roberson  *	the more involved bucket filling and allocation.
14518355f576SJeff Roberson  */
14528355f576SJeff Roberson 
14538355f576SJeff Roberson static void *
14542cc35ff9SJeff Roberson uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket)
14558355f576SJeff Roberson {
14568355f576SJeff Roberson 	uma_slab_t slab;
14578355f576SJeff Roberson 	u_int8_t freei;
14588355f576SJeff Roberson 	void *item;
14598355f576SJeff Roberson 
14608355f576SJeff Roberson 	item = NULL;
14618355f576SJeff Roberson 
14628355f576SJeff Roberson 	/*
1463a553d4b8SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
1464a553d4b8SJeff Roberson 	 * running out of UMA_BOOT_PAGES.  Otherwise, we would exhaust the
1465a553d4b8SJeff Roberson 	 * boot pages.
14668355f576SJeff Roberson 	 */
14678355f576SJeff Roberson 
146886bbae32SJeff Roberson 	if (bucketdisable && zone == bucketzone)
14698355f576SJeff Roberson 		return (NULL);
14708355f576SJeff Roberson 
14718355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
14728355f576SJeff Roberson 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
14738355f576SJeff Roberson #endif
14748355f576SJeff Roberson 	ZONE_LOCK(zone);
14758355f576SJeff Roberson 
14768355f576SJeff Roberson 	/*
1477a553d4b8SJeff Roberson 	 * This code is here to limit the number of simultaneous bucket fills
1478a553d4b8SJeff Roberson 	 * for any given zone to the number of per cpu caches in this zone. This
1479a553d4b8SJeff Roberson 	 * is done so that we don't allocate more memory than we really need.
14808355f576SJeff Roberson 	 */
1481a553d4b8SJeff Roberson 
1482a553d4b8SJeff Roberson 	if (bucket) {
1483a553d4b8SJeff Roberson #ifdef SMP
1484bce97791SJeff Roberson 		if (zone->uz_fills >= mp_ncpus) {
1485a553d4b8SJeff Roberson #else
1486bce97791SJeff Roberson 		if (zone->uz_fills > 1) {
14878355f576SJeff Roberson #endif
1488bce97791SJeff Roberson 			ZONE_UNLOCK(zone);
1489a553d4b8SJeff Roberson 			return (NULL);
1490bce97791SJeff Roberson 		}
1491a553d4b8SJeff Roberson 
1492a553d4b8SJeff Roberson 		zone->uz_fills++;
14938355f576SJeff Roberson 	}
14948355f576SJeff Roberson 
14958355f576SJeff Roberson new_slab:
14968355f576SJeff Roberson 
14978355f576SJeff Roberson 	/* Find a slab with some space */
14988355f576SJeff Roberson 	if (zone->uz_free) {
14998355f576SJeff Roberson 		if (!LIST_EMPTY(&zone->uz_part_slab)) {
15008355f576SJeff Roberson 			slab = LIST_FIRST(&zone->uz_part_slab);
15018355f576SJeff Roberson 		} else {
15028355f576SJeff Roberson 			slab = LIST_FIRST(&zone->uz_free_slab);
15038355f576SJeff Roberson 			LIST_REMOVE(slab, us_link);
15048355f576SJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
15058355f576SJeff Roberson 		}
15068355f576SJeff Roberson 	} else {
15078355f576SJeff Roberson 		/*
15088355f576SJeff Roberson 		 * This is to prevent us from recursively trying to allocate
15098355f576SJeff Roberson 		 * buckets.  The problem is that if an allocation forces us to
15108355f576SJeff Roberson 		 * grab a new bucket we will call page_alloc, which will go off
15118355f576SJeff Roberson 		 * and cause the vm to allocate vm_map_entries.  If we need new
15128355f576SJeff Roberson 		 * buckets there too we will recurse in kmem_alloc and bad
15138355f576SJeff Roberson 		 * things happen.  So instead we return a NULL bucket, and make
151418aa2de5SJeff Roberson 		 * the code that allocates buckets smart enough to deal with it
151518aa2de5SJeff Roberson 		 */
15168355f576SJeff Roberson 		if (zone == bucketzone && zone->uz_recurse != 0) {
15178355f576SJeff Roberson 			ZONE_UNLOCK(zone);
15188355f576SJeff Roberson 			return (NULL);
15198355f576SJeff Roberson 		}
1520af7f9b97SJeff Roberson 		while (zone->uz_maxpages &&
1521af7f9b97SJeff Roberson 		    zone->uz_pages >= zone->uz_maxpages) {
1522af7f9b97SJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
1523af7f9b97SJeff Roberson 
15242cc35ff9SJeff Roberson 			if (flags & M_WAITOK)
1525af7f9b97SJeff Roberson 				msleep(zone, &zone->uz_lock, PVM, "zonelimit", 0);
1526af7f9b97SJeff Roberson 			else
1527af7f9b97SJeff Roberson 				goto alloc_fail;
1528af7f9b97SJeff Roberson 
1529af7f9b97SJeff Roberson 			goto new_slab;
1530af7f9b97SJeff Roberson 		}
1531af7f9b97SJeff Roberson 
153218aa2de5SJeff Roberson 		if (flags & M_NOVM)
153318aa2de5SJeff Roberson 			goto alloc_fail;
153418aa2de5SJeff Roberson 
15358355f576SJeff Roberson 		zone->uz_recurse++;
15362cc35ff9SJeff Roberson 		slab = slab_zalloc(zone, flags);
15378355f576SJeff Roberson 		zone->uz_recurse--;
15388355f576SJeff Roberson 		/*
1539af7f9b97SJeff Roberson 		 * We might not have been able to get a slab but another cpu
1540af7f9b97SJeff Roberson 		 * could have while we were unlocked.  If we did get a slab put
1541af7f9b97SJeff Roberson 		 * it on the partially used slab list.  If not check the free
1542af7f9b97SJeff Roberson 		 * count and restart or fail accordingly.
15438355f576SJeff Roberson 		 */
1544af7f9b97SJeff Roberson 		if (slab)
1545af7f9b97SJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1546af7f9b97SJeff Roberson 		else if (zone->uz_free == 0)
1547af7f9b97SJeff Roberson 			goto alloc_fail;
1548a553d4b8SJeff Roberson 		else
1549af7f9b97SJeff Roberson 			goto new_slab;
15508355f576SJeff Roberson 	}
1551a553d4b8SJeff Roberson 	/*
1552a553d4b8SJeff Roberson 	 * If this is our first time though put this guy on the list.
1553a553d4b8SJeff Roberson 	 */
1554a553d4b8SJeff Roberson 	if (bucket != NULL && bucket->ub_ptr == -1)
1555a553d4b8SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
1556a553d4b8SJeff Roberson 		    bucket, ub_link);
1557a553d4b8SJeff Roberson 
15588355f576SJeff Roberson 
15598355f576SJeff Roberson 	while (slab->us_freecount) {
15608355f576SJeff Roberson 		freei = slab->us_firstfree;
15618355f576SJeff Roberson 		slab->us_firstfree = slab->us_freelist[freei];
1562639c9550SJeff Roberson 
15638355f576SJeff Roberson 		item = slab->us_data + (zone->uz_rsize * freei);
15648355f576SJeff Roberson 
1565639c9550SJeff Roberson 		slab->us_freecount--;
1566639c9550SJeff Roberson 		zone->uz_free--;
1567639c9550SJeff Roberson #ifdef INVARIANTS
1568639c9550SJeff Roberson 		uma_dbg_alloc(zone, slab, item);
1569639c9550SJeff Roberson #endif
1570a553d4b8SJeff Roberson 		if (bucket == NULL) {
15718355f576SJeff Roberson 			zone->uz_allocs++;
15728355f576SJeff Roberson 			break;
15738355f576SJeff Roberson 		}
15748355f576SJeff Roberson 		bucket->ub_bucket[++bucket->ub_ptr] = item;
15758355f576SJeff Roberson 
15768355f576SJeff Roberson 		/* Don't overfill the bucket! */
1577a553d4b8SJeff Roberson 		if (bucket->ub_ptr == zone->uz_count)
15788355f576SJeff Roberson 			break;
15798355f576SJeff Roberson 	}
15808355f576SJeff Roberson 
15818355f576SJeff Roberson 	/* Move this slab to the full list */
15828355f576SJeff Roberson 	if (slab->us_freecount == 0) {
15838355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
15848355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
15858355f576SJeff Roberson 	}
15868355f576SJeff Roberson 
1587a553d4b8SJeff Roberson 	if (bucket != NULL) {
15888355f576SJeff Roberson 		/* Try to keep the buckets totally full, but don't block */
1589a553d4b8SJeff Roberson 		if (bucket->ub_ptr < zone->uz_count) {
15902cc35ff9SJeff Roberson 			flags |= M_NOWAIT;
15912cc35ff9SJeff Roberson 			flags &= ~M_WAITOK;
15928355f576SJeff Roberson 			goto new_slab;
1593a553d4b8SJeff Roberson 		} else
1594a553d4b8SJeff Roberson 			zone->uz_fills--;
15958355f576SJeff Roberson 	}
15968355f576SJeff Roberson 
15978355f576SJeff Roberson 	ZONE_UNLOCK(zone);
15988355f576SJeff Roberson 
15998355f576SJeff Roberson 	/* Only construct at this time if we're not filling a bucket */
16003370c5bfSJeff Roberson 	if (bucket == NULL) {
16013370c5bfSJeff Roberson 		if (zone->uz_ctor != NULL)
16028355f576SJeff Roberson 			zone->uz_ctor(item, zone->uz_size, udata);
16032cc35ff9SJeff Roberson 		if (flags & M_ZERO)
16042cc35ff9SJeff Roberson 			bzero(item, zone->uz_size);
16052cc35ff9SJeff Roberson 	}
16068355f576SJeff Roberson 
16078355f576SJeff Roberson 	return (item);
1608af7f9b97SJeff Roberson 
1609af7f9b97SJeff Roberson alloc_fail:
1610af7f9b97SJeff Roberson 	if (bucket != NULL)
1611af7f9b97SJeff Roberson 		zone->uz_fills--;
1612af7f9b97SJeff Roberson 	ZONE_UNLOCK(zone);
1613af7f9b97SJeff Roberson 
1614af7f9b97SJeff Roberson 	if (bucket != NULL && bucket->ub_ptr != -1)
1615af7f9b97SJeff Roberson 		return (bucket);
1616af7f9b97SJeff Roberson 
1617af7f9b97SJeff Roberson 	return (NULL);
16188355f576SJeff Roberson }
16198355f576SJeff Roberson 
16208355f576SJeff Roberson /* See uma.h */
16218355f576SJeff Roberson void
16228355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
16238355f576SJeff Roberson {
16248355f576SJeff Roberson 	uma_cache_t cache;
16258355f576SJeff Roberson 	uma_bucket_t bucket;
16264741dcbfSJeff Roberson 	int bflags;
16278355f576SJeff Roberson 	int cpu;
16288355f576SJeff Roberson 
16298355f576SJeff Roberson 	/* This is the fast path free */
16308355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1
16318355f576SJeff Roberson 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
16328355f576SJeff Roberson #endif
1633af7f9b97SJeff Roberson 	/*
1634af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
1635af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
1636af7f9b97SJeff Roberson 	 */
1637af7f9b97SJeff Roberson 
1638af7f9b97SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
1639af7f9b97SJeff Roberson 		goto zfree_internal;
1640af7f9b97SJeff Roberson 
1641a553d4b8SJeff Roberson zfree_restart:
16428355f576SJeff Roberson 	cpu = PCPU_GET(cpuid);
16438355f576SJeff Roberson 	CPU_LOCK(zone, cpu);
16448355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
16458355f576SJeff Roberson 
16468355f576SJeff Roberson zfree_start:
16478355f576SJeff Roberson 	bucket = cache->uc_freebucket;
16488355f576SJeff Roberson 
16498355f576SJeff Roberson 	if (bucket) {
1650a553d4b8SJeff Roberson 		/*
1651a553d4b8SJeff Roberson 		 * Do we have room in our bucket? It is OK for this uz count
1652a553d4b8SJeff Roberson 		 * check to be slightly out of sync.
1653a553d4b8SJeff Roberson 		 */
1654a553d4b8SJeff Roberson 
1655a553d4b8SJeff Roberson 		if (bucket->ub_ptr < zone->uz_count) {
16568355f576SJeff Roberson 			bucket->ub_ptr++;
16578355f576SJeff Roberson 			KASSERT(bucket->ub_bucket[bucket->ub_ptr] == NULL,
16588355f576SJeff Roberson 			    ("uma_zfree: Freeing to non free bucket index."));
16598355f576SJeff Roberson 			bucket->ub_bucket[bucket->ub_ptr] = item;
16608355f576SJeff Roberson 			if (zone->uz_dtor)
16618355f576SJeff Roberson 				zone->uz_dtor(item, zone->uz_size, udata);
1662b9ba8931SJeff Roberson #ifdef INVARIANTS
1663b9ba8931SJeff Roberson 			if (zone->uz_flags & UMA_ZFLAG_MALLOC)
1664b9ba8931SJeff Roberson 				uma_dbg_free(zone, udata, item);
1665b9ba8931SJeff Roberson 			else
1666b9ba8931SJeff Roberson 				uma_dbg_free(zone, NULL, item);
1667b9ba8931SJeff Roberson #endif
1668605cbd6aSJeff Roberson 			CPU_UNLOCK(zone, cpu);
16698355f576SJeff Roberson 			return;
16708355f576SJeff Roberson 		} else if (cache->uc_allocbucket) {
16718355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
16728355f576SJeff Roberson 			printf("uma_zfree: Swapping buckets.\n");
16738355f576SJeff Roberson #endif
16748355f576SJeff Roberson 			/*
16758355f576SJeff Roberson 			 * We have run out of space in our freebucket.
16768355f576SJeff Roberson 			 * See if we can switch with our alloc bucket.
16778355f576SJeff Roberson 			 */
16788355f576SJeff Roberson 			if (cache->uc_allocbucket->ub_ptr <
16798355f576SJeff Roberson 			    cache->uc_freebucket->ub_ptr) {
16808355f576SJeff Roberson 				uma_bucket_t swap;
16818355f576SJeff Roberson 
16828355f576SJeff Roberson 				swap = cache->uc_freebucket;
16838355f576SJeff Roberson 				cache->uc_freebucket = cache->uc_allocbucket;
16848355f576SJeff Roberson 				cache->uc_allocbucket = swap;
16858355f576SJeff Roberson 
16868355f576SJeff Roberson 				goto zfree_start;
16878355f576SJeff Roberson 			}
16888355f576SJeff Roberson 		}
16898355f576SJeff Roberson 	}
16908355f576SJeff Roberson 
16918355f576SJeff Roberson 	/*
1692a553d4b8SJeff Roberson 	 * We can get here for two reasons:
16938355f576SJeff Roberson 	 *
16948355f576SJeff Roberson 	 * 1) The buckets are NULL
1695a553d4b8SJeff Roberson 	 * 2) The alloc and free buckets are both somewhat full.
16968355f576SJeff Roberson 	 *
16978355f576SJeff Roberson 	 */
16988355f576SJeff Roberson 
16998355f576SJeff Roberson 	ZONE_LOCK(zone);
17008355f576SJeff Roberson 
17018355f576SJeff Roberson 	bucket = cache->uc_freebucket;
17028355f576SJeff Roberson 	cache->uc_freebucket = NULL;
17038355f576SJeff Roberson 
17048355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
17058355f576SJeff Roberson 	if (bucket != NULL) {
17068355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
17078355f576SJeff Roberson 		printf("uma_zfree: Putting old bucket on the free list.\n");
17088355f576SJeff Roberson #endif
17098355f576SJeff Roberson 		/* ub_ptr is pointing to the last free item */
17108355f576SJeff Roberson 		KASSERT(bucket->ub_ptr != -1,
17118355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
17128355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
17138355f576SJeff Roberson 		    bucket, ub_link);
17148355f576SJeff Roberson 	}
1715a553d4b8SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1716a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1717a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
1718a553d4b8SJeff Roberson 		cache->uc_freebucket = bucket;
1719a553d4b8SJeff Roberson 		goto zfree_start;
1720a553d4b8SJeff Roberson 	}
1721a553d4b8SJeff Roberson 	/* We're done with this CPU now */
1722a553d4b8SJeff Roberson 	CPU_UNLOCK(zone, cpu);
1723a553d4b8SJeff Roberson 
1724a553d4b8SJeff Roberson 	/* And the zone.. */
1725a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
1726a553d4b8SJeff Roberson 
17278355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
17288355f576SJeff Roberson 	printf("uma_zfree: Allocating new free bucket.\n");
17298355f576SJeff Roberson #endif
17304741dcbfSJeff Roberson 	bflags = M_NOWAIT;
17314741dcbfSJeff Roberson 
17324741dcbfSJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_BUCKETCACHE)
17334741dcbfSJeff Roberson 		bflags |= M_NOVM;
17348355f576SJeff Roberson #ifdef INVARIANTS
17354741dcbfSJeff Roberson 	bflags |= M_ZERO;
17368355f576SJeff Roberson #endif
17374741dcbfSJeff Roberson 	bucket = uma_zalloc_internal(bucketzone,
17384741dcbfSJeff Roberson 	    NULL, bflags, NULL);
17394741dcbfSJeff Roberson 	if (bucket) {
17408355f576SJeff Roberson 		bucket->ub_ptr = -1;
1741a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
1742a553d4b8SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1743a553d4b8SJeff Roberson 		    bucket, ub_link);
17448355f576SJeff Roberson 		ZONE_UNLOCK(zone);
1745a553d4b8SJeff Roberson 		goto zfree_restart;
17468355f576SJeff Roberson 	}
17478355f576SJeff Roberson 
1748a553d4b8SJeff Roberson 	/*
1749a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
1750a553d4b8SJeff Roberson 	 */
17518355f576SJeff Roberson 
1752af7f9b97SJeff Roberson zfree_internal:
1753af7f9b97SJeff Roberson 
17548355f576SJeff Roberson 	uma_zfree_internal(zone, item, udata, 0);
17558355f576SJeff Roberson 
17568355f576SJeff Roberson 	return;
17578355f576SJeff Roberson 
17588355f576SJeff Roberson }
17598355f576SJeff Roberson 
17608355f576SJeff Roberson /*
17618355f576SJeff Roberson  * Frees an item to an INTERNAL zone or allocates a free bucket
17628355f576SJeff Roberson  *
17638355f576SJeff Roberson  * Arguments:
17648355f576SJeff Roberson  *	zone   The zone to free to
17658355f576SJeff Roberson  *	item   The item we're freeing
17668355f576SJeff Roberson  *	udata  User supplied data for the dtor
17678355f576SJeff Roberson  *	skip   Skip the dtor, it was done in uma_zfree_arg
17688355f576SJeff Roberson  */
17698355f576SJeff Roberson 
17708355f576SJeff Roberson static void
17718355f576SJeff Roberson uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
17728355f576SJeff Roberson {
17738355f576SJeff Roberson 	uma_slab_t slab;
17748355f576SJeff Roberson 	u_int8_t *mem;
17758355f576SJeff Roberson 	u_int8_t freei;
17768355f576SJeff Roberson 
17778355f576SJeff Roberson 	ZONE_LOCK(zone);
17788355f576SJeff Roberson 
17798355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
17808355f576SJeff Roberson 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
178199571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_HASH)
17828355f576SJeff Roberson 			slab = hash_sfind(&zone->uz_hash, mem);
17838355f576SJeff Roberson 		else {
17848355f576SJeff Roberson 			mem += zone->uz_pgoff;
17858355f576SJeff Roberson 			slab = (uma_slab_t)mem;
17868355f576SJeff Roberson 		}
17878355f576SJeff Roberson 	} else {
17888355f576SJeff Roberson 		slab = (uma_slab_t)udata;
17898355f576SJeff Roberson 	}
17908355f576SJeff Roberson 
17918355f576SJeff Roberson 	/* Do we need to remove from any lists? */
17928355f576SJeff Roberson 	if (slab->us_freecount+1 == zone->uz_ipers) {
17938355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
17948355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
17958355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
17968355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
17978355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
17988355f576SJeff Roberson 	}
17998355f576SJeff Roberson 
18008355f576SJeff Roberson 	/* Slab management stuff */
18018355f576SJeff Roberson 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
18028355f576SJeff Roberson 		/ zone->uz_rsize;
18038355f576SJeff Roberson 
1804639c9550SJeff Roberson #ifdef INVARIANTS
1805639c9550SJeff Roberson 	if (!skip)
1806639c9550SJeff Roberson 		uma_dbg_free(zone, slab, item);
18078355f576SJeff Roberson #endif
1808639c9550SJeff Roberson 
18098355f576SJeff Roberson 	slab->us_freelist[freei] = slab->us_firstfree;
18108355f576SJeff Roberson 	slab->us_firstfree = freei;
18118355f576SJeff Roberson 	slab->us_freecount++;
18128355f576SJeff Roberson 
18138355f576SJeff Roberson 	/* Zone statistics */
18148355f576SJeff Roberson 	zone->uz_free++;
18158355f576SJeff Roberson 
18168355f576SJeff Roberson 	if (!skip && zone->uz_dtor)
18178355f576SJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
1818605cbd6aSJeff Roberson 
1819af7f9b97SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL) {
1820af7f9b97SJeff Roberson 		if (zone->uz_pages < zone->uz_maxpages)
1821af7f9b97SJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
1822af7f9b97SJeff Roberson 
1823af7f9b97SJeff Roberson 		/* We can handle one more allocation */
1824af7f9b97SJeff Roberson 		wakeup_one(&zone);
1825af7f9b97SJeff Roberson 	}
1826af7f9b97SJeff Roberson 
1827605cbd6aSJeff Roberson 	ZONE_UNLOCK(zone);
18288355f576SJeff Roberson }
18298355f576SJeff Roberson 
18308355f576SJeff Roberson /* See uma.h */
18318355f576SJeff Roberson void
1832736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
1833736ee590SJeff Roberson {
1834736ee590SJeff Roberson 	ZONE_LOCK(zone);
1835736ee590SJeff Roberson 	if (zone->uz_ppera > 1)
1836af7f9b97SJeff Roberson 		zone->uz_maxpages = nitems * zone->uz_ppera;
1837736ee590SJeff Roberson 	else
1838736ee590SJeff Roberson 		zone->uz_maxpages = nitems / zone->uz_ipers;
183928bc4419SJeff Roberson 
1840d4d6aee5SAndrew R. Reiter 	if (zone->uz_maxpages * zone->uz_ipers < nitems)
1841d4d6aee5SAndrew R. Reiter 		zone->uz_maxpages++;
184228bc4419SJeff Roberson 
1843736ee590SJeff Roberson 	ZONE_UNLOCK(zone);
1844736ee590SJeff Roberson }
1845736ee590SJeff Roberson 
1846736ee590SJeff Roberson /* See uma.h */
1847736ee590SJeff Roberson void
18488355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
18498355f576SJeff Roberson {
18508355f576SJeff Roberson 	ZONE_LOCK(zone);
18518355f576SJeff Roberson 
18528355f576SJeff Roberson 	zone->uz_freef = freef;
18538355f576SJeff Roberson 
18548355f576SJeff Roberson 	ZONE_UNLOCK(zone);
18558355f576SJeff Roberson }
18568355f576SJeff Roberson 
18578355f576SJeff Roberson /* See uma.h */
18588355f576SJeff Roberson void
18598355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
18608355f576SJeff Roberson {
18618355f576SJeff Roberson 	ZONE_LOCK(zone);
18628355f576SJeff Roberson 
18638355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
18648355f576SJeff Roberson 	zone->uz_allocf = allocf;
18658355f576SJeff Roberson 
18668355f576SJeff Roberson 	ZONE_UNLOCK(zone);
18678355f576SJeff Roberson }
18688355f576SJeff Roberson 
18698355f576SJeff Roberson /* See uma.h */
18708355f576SJeff Roberson int
18718355f576SJeff Roberson uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
18728355f576SJeff Roberson {
18738355f576SJeff Roberson 	int pages;
18748355f576SJeff Roberson 	vm_offset_t kva;
18758355f576SJeff Roberson 
18768355f576SJeff Roberson 	mtx_lock(&Giant);
18778355f576SJeff Roberson 
18788355f576SJeff Roberson 	pages = count / zone->uz_ipers;
18798355f576SJeff Roberson 
18808355f576SJeff Roberson 	if (pages * zone->uz_ipers < count)
18818355f576SJeff Roberson 		pages++;
1882a553d4b8SJeff Roberson 
18838355f576SJeff Roberson 	kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
18848355f576SJeff Roberson 
1885a553d4b8SJeff Roberson 	if (kva == 0) {
1886a553d4b8SJeff Roberson 		mtx_unlock(&Giant);
18878355f576SJeff Roberson 		return (0);
18888355f576SJeff Roberson 	}
18898355f576SJeff Roberson 
18908355f576SJeff Roberson 
1891a553d4b8SJeff Roberson 	if (obj == NULL)
1892a553d4b8SJeff Roberson 		obj = vm_object_allocate(OBJT_DEFAULT,
1893c7173f58SJeff Roberson 		    pages);
18948355f576SJeff Roberson 	else
18958355f576SJeff Roberson 		_vm_object_allocate(OBJT_DEFAULT,
1896c7173f58SJeff Roberson 		    pages, obj);
1897a553d4b8SJeff Roberson 
1898a553d4b8SJeff Roberson 	ZONE_LOCK(zone);
1899a553d4b8SJeff Roberson 	zone->uz_kva = kva;
1900a553d4b8SJeff Roberson 	zone->uz_obj = obj;
1901a553d4b8SJeff Roberson 	zone->uz_maxpages = pages;
19028355f576SJeff Roberson 
19038355f576SJeff Roberson 	zone->uz_allocf = obj_alloc;
19048355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_NOFREE | UMA_ZFLAG_PRIVALLOC;
19058355f576SJeff Roberson 
19068355f576SJeff Roberson 	ZONE_UNLOCK(zone);
1907a553d4b8SJeff Roberson 	mtx_unlock(&Giant);
19088355f576SJeff Roberson 
19098355f576SJeff Roberson 	return (1);
19108355f576SJeff Roberson }
19118355f576SJeff Roberson 
19128355f576SJeff Roberson /* See uma.h */
19138355f576SJeff Roberson void
19148355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
19158355f576SJeff Roberson {
19168355f576SJeff Roberson 	int slabs;
19178355f576SJeff Roberson 	uma_slab_t slab;
19188355f576SJeff Roberson 
19198355f576SJeff Roberson 	ZONE_LOCK(zone);
19208355f576SJeff Roberson 	slabs = items / zone->uz_ipers;
19218355f576SJeff Roberson 	if (slabs * zone->uz_ipers < items)
19228355f576SJeff Roberson 		slabs++;
19238355f576SJeff Roberson 
19248355f576SJeff Roberson 	while (slabs > 0) {
19258355f576SJeff Roberson 		slab = slab_zalloc(zone, M_WAITOK);
19268355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
19278355f576SJeff Roberson 		slabs--;
19288355f576SJeff Roberson 	}
19298355f576SJeff Roberson 	ZONE_UNLOCK(zone);
19308355f576SJeff Roberson }
19318355f576SJeff Roberson 
19328355f576SJeff Roberson /* See uma.h */
19338355f576SJeff Roberson void
19348355f576SJeff Roberson uma_reclaim(void)
19358355f576SJeff Roberson {
19368355f576SJeff Roberson 	/*
19378355f576SJeff Roberson 	 * You might think that the delay below would improve performance since
19388355f576SJeff Roberson 	 * the allocator will give away memory that it may ask for immediately.
19398355f576SJeff Roberson 	 * Really, it makes things worse, since cpu cycles are so much cheaper
19408355f576SJeff Roberson 	 * than disk activity.
19418355f576SJeff Roberson 	 */
19428355f576SJeff Roberson #if 0
19438355f576SJeff Roberson 	static struct timeval tv = {0};
19448355f576SJeff Roberson 	struct timeval now;
19458355f576SJeff Roberson 	getmicrouptime(&now);
19468355f576SJeff Roberson 	if (now.tv_sec > tv.tv_sec + 30)
19478355f576SJeff Roberson 		tv = now;
19488355f576SJeff Roberson 	else
19498355f576SJeff Roberson 		return;
19508355f576SJeff Roberson #endif
19518355f576SJeff Roberson #ifdef UMA_DEBUG
19528355f576SJeff Roberson 	printf("UMA: vm asked us to release pages!\n");
19538355f576SJeff Roberson #endif
195486bbae32SJeff Roberson 	bucket_enable();
19558355f576SJeff Roberson 	zone_foreach(zone_drain);
19568355f576SJeff Roberson 
19578355f576SJeff Roberson 	/*
19588355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
19598355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
19608355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
19618355f576SJeff Roberson 	 */
19628355f576SJeff Roberson 	zone_drain(slabzone);
19638355f576SJeff Roberson 	zone_drain(bucketzone);
19648355f576SJeff Roberson }
19658355f576SJeff Roberson 
19668355f576SJeff Roberson void *
19678355f576SJeff Roberson uma_large_malloc(int size, int wait)
19688355f576SJeff Roberson {
19698355f576SJeff Roberson 	void *mem;
19708355f576SJeff Roberson 	uma_slab_t slab;
19718355f576SJeff Roberson 	u_int8_t flags;
19728355f576SJeff Roberson 
1973a553d4b8SJeff Roberson 	slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
19748355f576SJeff Roberson 	if (slab == NULL)
19758355f576SJeff Roberson 		return (NULL);
19768355f576SJeff Roberson 
19778355f576SJeff Roberson 	mem = page_alloc(NULL, size, &flags, wait);
19788355f576SJeff Roberson 	if (mem) {
197999571dc3SJeff Roberson 		vsetslab((vm_offset_t)mem, slab);
19808355f576SJeff Roberson 		slab->us_data = mem;
19818355f576SJeff Roberson 		slab->us_flags = flags | UMA_SLAB_MALLOC;
19828355f576SJeff Roberson 		slab->us_size = size;
19838355f576SJeff Roberson 	} else {
19848355f576SJeff Roberson 		uma_zfree_internal(slabzone, slab, NULL, 0);
19858355f576SJeff Roberson 	}
19868355f576SJeff Roberson 
19878355f576SJeff Roberson 
19888355f576SJeff Roberson 	return (mem);
19898355f576SJeff Roberson }
19908355f576SJeff Roberson 
19918355f576SJeff Roberson void
19928355f576SJeff Roberson uma_large_free(uma_slab_t slab)
19938355f576SJeff Roberson {
199499571dc3SJeff Roberson 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
19958355f576SJeff Roberson 	page_free(slab->us_data, slab->us_size, slab->us_flags);
19968355f576SJeff Roberson 	uma_zfree_internal(slabzone, slab, NULL, 0);
19978355f576SJeff Roberson }
19988355f576SJeff Roberson 
19998355f576SJeff Roberson void
20008355f576SJeff Roberson uma_print_stats(void)
20018355f576SJeff Roberson {
20028355f576SJeff Roberson 	zone_foreach(uma_print_zone);
20038355f576SJeff Roberson }
20048355f576SJeff Roberson 
20058355f576SJeff Roberson void
20068355f576SJeff Roberson uma_print_zone(uma_zone_t zone)
20078355f576SJeff Roberson {
20088355f576SJeff Roberson 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
20098355f576SJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
20108355f576SJeff Roberson 	    zone->uz_ipers, zone->uz_ppera,
20118355f576SJeff Roberson 	    (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
20128355f576SJeff Roberson }
20138355f576SJeff Roberson 
20148355f576SJeff Roberson /*
20158355f576SJeff Roberson  * Sysctl handler for vm.zone
20168355f576SJeff Roberson  *
20178355f576SJeff Roberson  * stolen from vm_zone.c
20188355f576SJeff Roberson  */
20198355f576SJeff Roberson static int
20208355f576SJeff Roberson sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
20218355f576SJeff Roberson {
20228355f576SJeff Roberson 	int error, len, cnt;
20238355f576SJeff Roberson 	const int linesize = 128;	/* conservative */
20248355f576SJeff Roberson 	int totalfree;
20258355f576SJeff Roberson 	char *tmpbuf, *offset;
20268355f576SJeff Roberson 	uma_zone_t z;
20278355f576SJeff Roberson 	char *p;
20288355f576SJeff Roberson 
20298355f576SJeff Roberson 	cnt = 0;
20300da47b2fSJeff Roberson 	mtx_lock(&uma_mtx);
20318355f576SJeff Roberson 	LIST_FOREACH(z, &uma_zones, uz_link)
20328355f576SJeff Roberson 		cnt++;
20330da47b2fSJeff Roberson 	mtx_unlock(&uma_mtx);
20348355f576SJeff Roberson 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
20358355f576SJeff Roberson 			M_TEMP, M_WAITOK);
20368355f576SJeff Roberson 	len = snprintf(tmpbuf, linesize,
20378355f576SJeff Roberson 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
20388355f576SJeff Roberson 	if (cnt == 0)
20398355f576SJeff Roberson 		tmpbuf[len - 1] = '\0';
20408355f576SJeff Roberson 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
20418355f576SJeff Roberson 	if (error || cnt == 0)
20428355f576SJeff Roberson 		goto out;
20438355f576SJeff Roberson 	offset = tmpbuf;
2044f4af24d5SJeff Roberson 	mtx_lock(&uma_mtx);
20458355f576SJeff Roberson 	LIST_FOREACH(z, &uma_zones, uz_link) {
20468355f576SJeff Roberson 		if (cnt == 0)	/* list may have changed size */
20478355f576SJeff Roberson 			break;
20488355f576SJeff Roberson 		ZONE_LOCK(z);
20498355f576SJeff Roberson 		totalfree = z->uz_free + z->uz_cachefree;
20508355f576SJeff Roberson 		len = snprintf(offset, linesize,
20518355f576SJeff Roberson 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
20528355f576SJeff Roberson 		    z->uz_name, z->uz_size,
20538355f576SJeff Roberson 		    z->uz_maxpages * z->uz_ipers,
20548355f576SJeff Roberson 		    (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
20558355f576SJeff Roberson 		    totalfree,
20568355f576SJeff Roberson 		    (unsigned long long)z->uz_allocs);
20578355f576SJeff Roberson 		ZONE_UNLOCK(z);
20588355f576SJeff Roberson 		for (p = offset + 12; p > offset && *p == ' '; --p)
20598355f576SJeff Roberson 			/* nothing */ ;
20608355f576SJeff Roberson 		p[1] = ':';
20618355f576SJeff Roberson 		cnt--;
20628355f576SJeff Roberson 		offset += len;
20638355f576SJeff Roberson 	}
2064f4af24d5SJeff Roberson 	mtx_unlock(&uma_mtx);
20658355f576SJeff Roberson 	*offset++ = '\0';
20668355f576SJeff Roberson 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
20678355f576SJeff Roberson out:
20688355f576SJeff Roberson 	FREE(tmpbuf, M_TEMP);
20698355f576SJeff Roberson 	return (error);
20708355f576SJeff Roberson }
2071