xref: /freebsd/sys/vm/uma_core.c (revision cae33c14291339048a4b1365bb3fb45d82ef68b0)
18355f576SJeff Roberson /*
2f461cf22SJeff Roberson  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
38355f576SJeff Roberson  * All rights reserved.
48355f576SJeff Roberson  *
58355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
68355f576SJeff Roberson  * modification, are permitted provided that the following conditions
78355f576SJeff Roberson  * are met:
88355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
98355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
108355f576SJeff Roberson  *    disclaimer.
118355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
128355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
138355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
148355f576SJeff Roberson  *
158355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
168355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
178355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
188355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
198355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
208355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
218355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
228355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
238355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
248355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
258355f576SJeff Roberson  */
268355f576SJeff Roberson 
278355f576SJeff Roberson /*
288355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
298355f576SJeff Roberson  *
308355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
318355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
328355f576SJeff Roberson  * effecient.  A primary design goal is to return unused memory to the rest of
338355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
348355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
358355f576SJeff Roberson  * pools of reserved memory unused.
368355f576SJeff Roberson  *
378355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
388355f576SJeff Roberson  * are well known.
398355f576SJeff Roberson  *
408355f576SJeff Roberson  */
418355f576SJeff Roberson 
428355f576SJeff Roberson /*
438355f576SJeff Roberson  * TODO:
448355f576SJeff Roberson  *	- Improve memory usage for large allocations
458355f576SJeff Roberson  *	- Investigate cache size adjustments
468355f576SJeff Roberson  */
478355f576SJeff Roberson 
48874651b1SDavid E. O'Brien #include <sys/cdefs.h>
49874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
50874651b1SDavid E. O'Brien 
518355f576SJeff Roberson /* I should really use ktr.. */
528355f576SJeff Roberson /*
538355f576SJeff Roberson #define UMA_DEBUG 1
548355f576SJeff Roberson #define UMA_DEBUG_ALLOC 1
558355f576SJeff Roberson #define UMA_DEBUG_ALLOC_1 1
568355f576SJeff Roberson */
578355f576SJeff Roberson 
588355f576SJeff Roberson #include "opt_param.h"
598355f576SJeff Roberson #include <sys/param.h>
608355f576SJeff Roberson #include <sys/systm.h>
618355f576SJeff Roberson #include <sys/kernel.h>
628355f576SJeff Roberson #include <sys/types.h>
638355f576SJeff Roberson #include <sys/queue.h>
648355f576SJeff Roberson #include <sys/malloc.h>
658355f576SJeff Roberson #include <sys/lock.h>
668355f576SJeff Roberson #include <sys/sysctl.h>
678355f576SJeff Roberson #include <sys/mutex.h>
684c1cc01cSJohn Baldwin #include <sys/proc.h>
698355f576SJeff Roberson #include <sys/smp.h>
7086bbae32SJeff Roberson #include <sys/vmmeter.h>
718522511bSHartmut Brandt #include <sys/mbuf.h>
7286bbae32SJeff Roberson 
738355f576SJeff Roberson #include <vm/vm.h>
748355f576SJeff Roberson #include <vm/vm_object.h>
758355f576SJeff Roberson #include <vm/vm_page.h>
768355f576SJeff Roberson #include <vm/vm_param.h>
778355f576SJeff Roberson #include <vm/vm_map.h>
788355f576SJeff Roberson #include <vm/vm_kern.h>
798355f576SJeff Roberson #include <vm/vm_extern.h>
808355f576SJeff Roberson #include <vm/uma.h>
818355f576SJeff Roberson #include <vm/uma_int.h>
82639c9550SJeff Roberson #include <vm/uma_dbg.h>
838355f576SJeff Roberson 
8448eea375SJeff Roberson #include <machine/vmparam.h>
8548eea375SJeff Roberson 
868355f576SJeff Roberson /*
878355f576SJeff Roberson  * This is the zone from which all zones are spawned.  The idea is that even
888355f576SJeff Roberson  * the zone heads are allocated from the allocator, so we use the bss section
898355f576SJeff Roberson  * to bootstrap us.
908355f576SJeff Roberson  */
9186bbae32SJeff Roberson static struct uma_zone masterzone;
9286bbae32SJeff Roberson static uma_zone_t zones = &masterzone;
938355f576SJeff Roberson 
948355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */
958355f576SJeff Roberson static uma_zone_t slabzone;
968355f576SJeff Roberson 
978355f576SJeff Roberson /*
988355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
998355f576SJeff Roberson  * prior to malloc coming up.
1008355f576SJeff Roberson  */
1018355f576SJeff Roberson static uma_zone_t hashzone;
1028355f576SJeff Roberson 
1038355f576SJeff Roberson /*
10486bbae32SJeff Roberson  * Are we allowed to allocate buckets?
10586bbae32SJeff Roberson  */
10686bbae32SJeff Roberson static int bucketdisable = 1;
10786bbae32SJeff Roberson 
1088355f576SJeff Roberson /* Linked list of all zones in the system */
1098355f576SJeff Roberson static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
1108355f576SJeff Roberson 
1118355f576SJeff Roberson /* This mutex protects the zone list */
1128355f576SJeff Roberson static struct mtx uma_mtx;
1138355f576SJeff Roberson 
114d88797c2SBosko Milekic /* These are the pcpu cache locks */
115d88797c2SBosko Milekic static struct mtx uma_pcpu_mtx[MAXCPU];
116d88797c2SBosko Milekic 
1178355f576SJeff Roberson /* Linked list of boot time pages */
1188355f576SJeff Roberson static LIST_HEAD(,uma_slab) uma_boot_pages =
1198355f576SJeff Roberson     LIST_HEAD_INITIALIZER(&uma_boot_pages);
1208355f576SJeff Roberson 
1218355f576SJeff Roberson /* Count of free boottime pages */
1228355f576SJeff Roberson static int uma_boot_free = 0;
1238355f576SJeff Roberson 
1248355f576SJeff Roberson /* Is the VM done starting up? */
1258355f576SJeff Roberson static int booted = 0;
1268355f576SJeff Roberson 
1278355f576SJeff Roberson /* This is the handle used to schedule our working set calculator */
1288355f576SJeff Roberson static struct callout uma_callout;
1298355f576SJeff Roberson 
1308355f576SJeff Roberson /* This is mp_maxid + 1, for use while looping over each cpu */
1318355f576SJeff Roberson static int maxcpu;
1328355f576SJeff Roberson 
1338355f576SJeff Roberson /*
1348355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1358355f576SJeff Roberson  * a special allocation function just for zones.
1368355f576SJeff Roberson  */
1378355f576SJeff Roberson struct uma_zctor_args {
1388355f576SJeff Roberson 	char *name;
139c3bdc05fSAndrew R. Reiter 	size_t size;
1408355f576SJeff Roberson 	uma_ctor ctor;
1418355f576SJeff Roberson 	uma_dtor dtor;
1428355f576SJeff Roberson 	uma_init uminit;
1438355f576SJeff Roberson 	uma_fini fini;
1448355f576SJeff Roberson 	int align;
1458355f576SJeff Roberson 	u_int16_t flags;
1468355f576SJeff Roberson };
1478355f576SJeff Roberson 
148cae33c14SJeff Roberson struct uma_bucket_zone {
149cae33c14SJeff Roberson 	uma_zone_t	ubz_zone;
150cae33c14SJeff Roberson 	char		*ubz_name;
151cae33c14SJeff Roberson 	int		ubz_entries;
152cae33c14SJeff Roberson };
153cae33c14SJeff Roberson 
154cae33c14SJeff Roberson #define	BUCKET_MAX	128
155cae33c14SJeff Roberson 
156cae33c14SJeff Roberson struct uma_bucket_zone bucket_zones[] = {
157cae33c14SJeff Roberson 	{ NULL, "16 Bucket", 16 },
158cae33c14SJeff Roberson 	{ NULL, "32 Bucket", 32 },
159cae33c14SJeff Roberson 	{ NULL, "64 Bucket", 64 },
160cae33c14SJeff Roberson 	{ NULL, "128 Bucket", 128 },
161cae33c14SJeff Roberson 	{ NULL, NULL, 0}
162cae33c14SJeff Roberson };
163cae33c14SJeff Roberson 
164cae33c14SJeff Roberson #define	BUCKET_SHIFT	4
165cae33c14SJeff Roberson #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
166cae33c14SJeff Roberson 
167cae33c14SJeff Roberson uint8_t bucket_size[BUCKET_ZONES];
168cae33c14SJeff Roberson 
1698355f576SJeff Roberson /* Prototypes.. */
1708355f576SJeff Roberson 
1718355f576SJeff Roberson static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
1728355f576SJeff Roberson static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
1738355f576SJeff Roberson static void page_free(void *, int, u_int8_t);
1748355f576SJeff Roberson static uma_slab_t slab_zalloc(uma_zone_t, int);
175d56368d7SBosko Milekic static void cache_drain(uma_zone_t, int);
1768355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
177d56368d7SBosko Milekic static void zone_drain_common(uma_zone_t, int);
1788355f576SJeff Roberson static void zone_ctor(void *, int, void *);
1799c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
1808355f576SJeff Roberson static void zero_init(void *, int);
1818355f576SJeff Roberson static void zone_small_init(uma_zone_t zone);
1828355f576SJeff Roberson static void zone_large_init(uma_zone_t zone);
1838355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
1848355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
1850aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
1860aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
1870aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
1888355f576SJeff Roberson static void uma_timeout(void *);
1898355f576SJeff Roberson static void uma_startup3(void);
190bbee39c6SJeff Roberson static void *uma_zalloc_internal(uma_zone_t, void *, int);
19186bbae32SJeff Roberson static void uma_zfree_internal(uma_zone_t, void *, void *, int);
19286bbae32SJeff Roberson static void bucket_enable(void);
193cae33c14SJeff Roberson static void bucket_init(void);
194cae33c14SJeff Roberson static uma_bucket_t bucket_alloc(int, int);
195cae33c14SJeff Roberson static void bucket_free(uma_bucket_t);
196cae33c14SJeff Roberson static void bucket_zone_drain(void);
197bbee39c6SJeff Roberson static int uma_zalloc_bucket(uma_zone_t zone, int flags);
198bbee39c6SJeff Roberson static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
199bbee39c6SJeff Roberson static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
200d56368d7SBosko Milekic static __inline void zone_drain(uma_zone_t);
201bbee39c6SJeff Roberson 
2028355f576SJeff Roberson void uma_print_zone(uma_zone_t);
2038355f576SJeff Roberson void uma_print_stats(void);
2048355f576SJeff Roberson static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
2058355f576SJeff Roberson 
2068355f576SJeff Roberson SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
2078355f576SJeff Roberson     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
2088355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
2098355f576SJeff Roberson 
21086bbae32SJeff Roberson /*
21186bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
21286bbae32SJeff Roberson  */
21386bbae32SJeff Roberson 
21486bbae32SJeff Roberson static void
21586bbae32SJeff Roberson bucket_enable(void)
21686bbae32SJeff Roberson {
21786bbae32SJeff Roberson 	if (cnt.v_free_count < cnt.v_free_min)
21886bbae32SJeff Roberson 		bucketdisable = 1;
21986bbae32SJeff Roberson 	else
22086bbae32SJeff Roberson 		bucketdisable = 0;
22186bbae32SJeff Roberson }
22286bbae32SJeff Roberson 
223cae33c14SJeff Roberson static void
224cae33c14SJeff Roberson bucket_init(void)
225cae33c14SJeff Roberson {
226cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
227cae33c14SJeff Roberson 	int i;
228cae33c14SJeff Roberson 	int j;
229cae33c14SJeff Roberson 
230cae33c14SJeff Roberson 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
231cae33c14SJeff Roberson 		int size;
232cae33c14SJeff Roberson 
233cae33c14SJeff Roberson 		ubz = &bucket_zones[j];
234cae33c14SJeff Roberson 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
235cae33c14SJeff Roberson 		size += sizeof(void *) * ubz->ubz_entries;
236cae33c14SJeff Roberson 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
237cae33c14SJeff Roberson 	    	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
238cae33c14SJeff Roberson 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
239cae33c14SJeff Roberson 			bucket_size[i >> BUCKET_SHIFT] = j;
240cae33c14SJeff Roberson 	}
241cae33c14SJeff Roberson }
242cae33c14SJeff Roberson 
243cae33c14SJeff Roberson static uma_bucket_t
244cae33c14SJeff Roberson bucket_alloc(int entries, int bflags)
245cae33c14SJeff Roberson {
246cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
247cae33c14SJeff Roberson 	uma_bucket_t bucket;
248cae33c14SJeff Roberson 	int idx;
249cae33c14SJeff Roberson 
250cae33c14SJeff Roberson 	/*
251cae33c14SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
252cae33c14SJeff Roberson 	 * running out of UMA_BOOT_PAGES.  Otherwise, we would exhaust the
253cae33c14SJeff Roberson 	 * boot pages.  This also prevents us from allocating buckets in
254cae33c14SJeff Roberson 	 * low memory situations.
255cae33c14SJeff Roberson 	 */
256cae33c14SJeff Roberson 
257cae33c14SJeff Roberson 	if (bucketdisable)
258cae33c14SJeff Roberson 		return (NULL);
259cae33c14SJeff Roberson 	idx = howmany(entries, 1 << BUCKET_SHIFT);
260cae33c14SJeff Roberson 	ubz = &bucket_zones[bucket_size[idx]];
261cae33c14SJeff Roberson 	bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
262cae33c14SJeff Roberson 	if (bucket) {
263cae33c14SJeff Roberson #ifdef INVARIANTS
264cae33c14SJeff Roberson 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
265cae33c14SJeff Roberson #endif
266cae33c14SJeff Roberson 		bucket->ub_cnt = 0;
267cae33c14SJeff Roberson 		bucket->ub_entries = ubz->ubz_entries;
268cae33c14SJeff Roberson 	}
269cae33c14SJeff Roberson 
270cae33c14SJeff Roberson 	return (bucket);
271cae33c14SJeff Roberson }
272cae33c14SJeff Roberson 
273cae33c14SJeff Roberson static void
274cae33c14SJeff Roberson bucket_free(uma_bucket_t bucket)
275cae33c14SJeff Roberson {
276cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
277cae33c14SJeff Roberson 	int idx;
278cae33c14SJeff Roberson 
279cae33c14SJeff Roberson 	idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT);
280cae33c14SJeff Roberson 	ubz = &bucket_zones[bucket_size[idx]];
281cae33c14SJeff Roberson 	uma_zfree_internal(ubz->ubz_zone, bucket, NULL, 0);
282cae33c14SJeff Roberson }
283cae33c14SJeff Roberson 
284cae33c14SJeff Roberson static void
285cae33c14SJeff Roberson bucket_zone_drain(void)
286cae33c14SJeff Roberson {
287cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
288cae33c14SJeff Roberson 
289cae33c14SJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
290cae33c14SJeff Roberson 		zone_drain(ubz->ubz_zone);
291cae33c14SJeff Roberson }
292cae33c14SJeff Roberson 
2938355f576SJeff Roberson 
2948355f576SJeff Roberson /*
2958355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
2968355f576SJeff Roberson  * based calculations.  (working set, stats, etc.)
2978355f576SJeff Roberson  *
2988355f576SJeff Roberson  * Arguments:
2998355f576SJeff Roberson  *	arg   Unused
3008355f576SJeff Roberson  *
3018355f576SJeff Roberson  * Returns:
3028355f576SJeff Roberson  *	Nothing
3038355f576SJeff Roberson  */
3048355f576SJeff Roberson static void
3058355f576SJeff Roberson uma_timeout(void *unused)
3068355f576SJeff Roberson {
30786bbae32SJeff Roberson 	bucket_enable();
3088355f576SJeff Roberson 	zone_foreach(zone_timeout);
3098355f576SJeff Roberson 
3108355f576SJeff Roberson 	/* Reschedule this event */
3118355f576SJeff Roberson 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
3128355f576SJeff Roberson }
3138355f576SJeff Roberson 
3148355f576SJeff Roberson /*
3158355f576SJeff Roberson  * Routine to perform timeout driven calculations.  This does the working set
3168355f576SJeff Roberson  * as well as hash expanding, and per cpu statistics aggregation.
3178355f576SJeff Roberson  *
3188355f576SJeff Roberson  *  Arguments:
3198355f576SJeff Roberson  *	zone  The zone to operate on
3208355f576SJeff Roberson  *
3218355f576SJeff Roberson  *  Returns:
3228355f576SJeff Roberson  *	Nothing
3238355f576SJeff Roberson  */
3248355f576SJeff Roberson static void
3258355f576SJeff Roberson zone_timeout(uma_zone_t zone)
3268355f576SJeff Roberson {
3278355f576SJeff Roberson 	uma_cache_t cache;
3288355f576SJeff Roberson 	u_int64_t alloc;
3298355f576SJeff Roberson 	int cpu;
3308355f576SJeff Roberson 
3318355f576SJeff Roberson 	alloc = 0;
3328355f576SJeff Roberson 
3338355f576SJeff Roberson 	/*
3348355f576SJeff Roberson 	 * Aggregate per cpu cache statistics back to the zone.
3358355f576SJeff Roberson 	 *
3368355f576SJeff Roberson 	 * I may rewrite this to set a flag in the per cpu cache instead of
3378355f576SJeff Roberson 	 * locking.  If the flag is not cleared on the next round I will have
3388355f576SJeff Roberson 	 * to lock and do it here instead so that the statistics don't get too
3398355f576SJeff Roberson 	 * far out of sync.
3408355f576SJeff Roberson 	 */
3418355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
3428355f576SJeff Roberson 		for (cpu = 0; cpu < maxcpu; cpu++) {
3438355f576SJeff Roberson 			if (CPU_ABSENT(cpu))
3448355f576SJeff Roberson 				continue;
345d88797c2SBosko Milekic 			CPU_LOCK(cpu);
3468355f576SJeff Roberson 			cache = &zone->uz_cpu[cpu];
3478355f576SJeff Roberson 			/* Add them up, and reset */
3488355f576SJeff Roberson 			alloc += cache->uc_allocs;
3498355f576SJeff Roberson 			cache->uc_allocs = 0;
350d88797c2SBosko Milekic 			CPU_UNLOCK(cpu);
3518355f576SJeff Roberson 		}
3528355f576SJeff Roberson 	}
3538355f576SJeff Roberson 
3548355f576SJeff Roberson 	/* Now push these stats back into the zone.. */
3558355f576SJeff Roberson 	ZONE_LOCK(zone);
3568355f576SJeff Roberson 	zone->uz_allocs += alloc;
3578355f576SJeff Roberson 
3588355f576SJeff Roberson 	/*
3598355f576SJeff Roberson 	 * Expand the zone hash table.
3608355f576SJeff Roberson 	 *
3618355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
3628355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
3638355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
3648355f576SJeff Roberson 	 */
3658355f576SJeff Roberson 
36699571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH &&
36799571dc3SJeff Roberson 	    zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
3680aef6126SJeff Roberson 		struct uma_hash newhash;
3690aef6126SJeff Roberson 		struct uma_hash oldhash;
3700aef6126SJeff Roberson 		int ret;
3715300d9ddSJeff Roberson 
3720aef6126SJeff Roberson 		/*
3730aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
3740aef6126SJeff Roberson 		 * while the zone lock is held will lead to deadlock.
3750aef6126SJeff Roberson 		 * I have to do everything in stages and check for
3760aef6126SJeff Roberson 		 * races.
3770aef6126SJeff Roberson 		 */
3780aef6126SJeff Roberson 		newhash = zone->uz_hash;
3795300d9ddSJeff Roberson 		ZONE_UNLOCK(zone);
3800aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
3815300d9ddSJeff Roberson 		ZONE_LOCK(zone);
3820aef6126SJeff Roberson 		if (ret) {
3830aef6126SJeff Roberson 			if (hash_expand(&zone->uz_hash, &newhash)) {
3840aef6126SJeff Roberson 				oldhash = zone->uz_hash;
3850aef6126SJeff Roberson 				zone->uz_hash = newhash;
3860aef6126SJeff Roberson 			} else
3870aef6126SJeff Roberson 				oldhash = newhash;
3880aef6126SJeff Roberson 
3890aef6126SJeff Roberson 			ZONE_UNLOCK(zone);
3900aef6126SJeff Roberson 			hash_free(&oldhash);
3910aef6126SJeff Roberson 			ZONE_LOCK(zone);
3920aef6126SJeff Roberson 		}
3935300d9ddSJeff Roberson 	}
3948355f576SJeff Roberson 
3958355f576SJeff Roberson 	/*
3968355f576SJeff Roberson 	 * Here we compute the working set size as the total number of items
3978355f576SJeff Roberson 	 * left outstanding since the last time interval.  This is slightly
3988355f576SJeff Roberson 	 * suboptimal. What we really want is the highest number of outstanding
3998355f576SJeff Roberson 	 * items during the last time quantum.  This should be close enough.
4008355f576SJeff Roberson 	 *
4018355f576SJeff Roberson 	 * The working set size is used to throttle the zone_drain function.
4028355f576SJeff Roberson 	 * We don't want to return memory that we may need again immediately.
4038355f576SJeff Roberson 	 */
4048355f576SJeff Roberson 	alloc = zone->uz_allocs - zone->uz_oallocs;
4058355f576SJeff Roberson 	zone->uz_oallocs = zone->uz_allocs;
4068355f576SJeff Roberson 	zone->uz_wssize = alloc;
4078355f576SJeff Roberson 
4088355f576SJeff Roberson 	ZONE_UNLOCK(zone);
4098355f576SJeff Roberson }
4108355f576SJeff Roberson 
4118355f576SJeff Roberson /*
4125300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
4135300d9ddSJeff Roberson  * backing store.
4145300d9ddSJeff Roberson  *
4155300d9ddSJeff Roberson  * Arguments:
4160aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
4175300d9ddSJeff Roberson  *
4185300d9ddSJeff Roberson  * Returns:
4190aef6126SJeff Roberson  *	1 on sucess and 0 on failure.
4205300d9ddSJeff Roberson  */
42137c84183SPoul-Henning Kamp static int
4220aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
4235300d9ddSJeff Roberson {
4240aef6126SJeff Roberson 	int oldsize;
4255300d9ddSJeff Roberson 	int alloc;
4265300d9ddSJeff Roberson 
4270aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
4280aef6126SJeff Roberson 
4295300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
4300aef6126SJeff Roberson 	if (oldsize)  {
4310aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
4320aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
4335300d9ddSJeff Roberson 		/* XXX Shouldn't be abusing DEVBUF here */
4340aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
4350aef6126SJeff Roberson 		    M_DEVBUF, M_NOWAIT);
4365300d9ddSJeff Roberson 	} else {
4370aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
4380aef6126SJeff Roberson 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
439a163d034SWarner Losh 		    M_WAITOK);
4400aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
4415300d9ddSJeff Roberson 	}
4420aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
4430aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
4440aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
4450aef6126SJeff Roberson 		return (1);
4460aef6126SJeff Roberson 	}
4475300d9ddSJeff Roberson 
4480aef6126SJeff Roberson 	return (0);
4495300d9ddSJeff Roberson }
4505300d9ddSJeff Roberson 
4515300d9ddSJeff Roberson /*
4528355f576SJeff Roberson  * Expands the hash table for OFFPAGE zones.  This is done from zone_timeout
4538355f576SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation path,
4548355f576SJeff Roberson  * otherwise, we can recurse on the vm while allocating pages.
4558355f576SJeff Roberson  *
4568355f576SJeff Roberson  * Arguments:
4570aef6126SJeff Roberson  *	oldhash  The hash you want to expand
4580aef6126SJeff Roberson  *	newhash  The hash structure for the new table
4598355f576SJeff Roberson  *
4608355f576SJeff Roberson  * Returns:
4618355f576SJeff Roberson  * 	Nothing
4628355f576SJeff Roberson  *
4638355f576SJeff Roberson  * Discussion:
4648355f576SJeff Roberson  */
4650aef6126SJeff Roberson static int
4660aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
4678355f576SJeff Roberson {
4688355f576SJeff Roberson 	uma_slab_t slab;
4698355f576SJeff Roberson 	int hval;
4708355f576SJeff Roberson 	int i;
4718355f576SJeff Roberson 
4720aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
4730aef6126SJeff Roberson 		return (0);
4748355f576SJeff Roberson 
4750aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
4760aef6126SJeff Roberson 		return (0);
4778355f576SJeff Roberson 
4788355f576SJeff Roberson 	/*
4798355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
4808355f576SJeff Roberson 	 * full rehash.
4818355f576SJeff Roberson 	 */
4828355f576SJeff Roberson 
4830aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
4840aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
4850aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
4860aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
4870aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
4880aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
4890aef6126SJeff Roberson 			    slab, us_hlink);
4908355f576SJeff Roberson 		}
4918355f576SJeff Roberson 
4920aef6126SJeff Roberson 	return (1);
4939c2cd7e5SJeff Roberson }
4949c2cd7e5SJeff Roberson 
4955300d9ddSJeff Roberson /*
4965300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
4975300d9ddSJeff Roberson  *
4985300d9ddSJeff Roberson  * Arguments:
4995300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
5005300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
5015300d9ddSJeff Roberson  *
5025300d9ddSJeff Roberson  * Returns:
5035300d9ddSJeff Roberson  *	Nothing
5045300d9ddSJeff Roberson  */
5059c2cd7e5SJeff Roberson static void
5060aef6126SJeff Roberson hash_free(struct uma_hash *hash)
5079c2cd7e5SJeff Roberson {
5080aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
5090aef6126SJeff Roberson 		return;
5100aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
5118355f576SJeff Roberson 		uma_zfree_internal(hashzone,
5120aef6126SJeff Roberson 		    hash->uh_slab_hash, NULL, 0);
5138355f576SJeff Roberson 	else
5140aef6126SJeff Roberson 		free(hash->uh_slab_hash, M_DEVBUF);
5158355f576SJeff Roberson }
5168355f576SJeff Roberson 
5178355f576SJeff Roberson /*
5188355f576SJeff Roberson  * Frees all outstanding items in a bucket
5198355f576SJeff Roberson  *
5208355f576SJeff Roberson  * Arguments:
5218355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
5228355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
5238355f576SJeff Roberson  *
5248355f576SJeff Roberson  * Returns:
5258355f576SJeff Roberson  *	Nothing
5268355f576SJeff Roberson  */
5278355f576SJeff Roberson 
5288355f576SJeff Roberson static void
5298355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
5308355f576SJeff Roberson {
5318355f576SJeff Roberson 	uma_slab_t slab;
5328355f576SJeff Roberson 	int mzone;
5338355f576SJeff Roberson 	void *item;
5348355f576SJeff Roberson 
5358355f576SJeff Roberson 	if (bucket == NULL)
5368355f576SJeff Roberson 		return;
5378355f576SJeff Roberson 
5388355f576SJeff Roberson 	slab = NULL;
5398355f576SJeff Roberson 	mzone = 0;
5408355f576SJeff Roberson 
5418355f576SJeff Roberson 	/* We have to lookup the slab again for malloc.. */
5428355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_MALLOC)
5438355f576SJeff Roberson 		mzone = 1;
5448355f576SJeff Roberson 
545cae33c14SJeff Roberson 	while (bucket->ub_cnt > 0)  {
546cae33c14SJeff Roberson 		bucket->ub_cnt--;
547cae33c14SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_cnt];
5488355f576SJeff Roberson #ifdef INVARIANTS
549cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
5508355f576SJeff Roberson 		KASSERT(item != NULL,
5518355f576SJeff Roberson 		    ("bucket_drain: botched ptr, item is NULL"));
5528355f576SJeff Roberson #endif
5538355f576SJeff Roberson 		/*
5548355f576SJeff Roberson 		 * This is extremely inefficient.  The slab pointer was passed
5558355f576SJeff Roberson 		 * to uma_zfree_arg, but we lost it because the buckets don't
5568355f576SJeff Roberson 		 * hold them.  This will go away when free() gets a size passed
5578355f576SJeff Roberson 		 * to it.
5588355f576SJeff Roberson 		 */
55999571dc3SJeff Roberson 		if (mzone)
56099571dc3SJeff Roberson 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
5618355f576SJeff Roberson 		uma_zfree_internal(zone, item, slab, 1);
5628355f576SJeff Roberson 	}
5638355f576SJeff Roberson }
5648355f576SJeff Roberson 
5658355f576SJeff Roberson /*
5668355f576SJeff Roberson  * Drains the per cpu caches for a zone.
5678355f576SJeff Roberson  *
5688355f576SJeff Roberson  * Arguments:
5698355f576SJeff Roberson  *	zone     The zone to drain, must be unlocked.
570d56368d7SBosko Milekic  *	destroy  Whether or not to destroy the pcpu buckets (from zone_dtor)
5718355f576SJeff Roberson  *
5728355f576SJeff Roberson  * Returns:
5738355f576SJeff Roberson  *	Nothing
5748355f576SJeff Roberson  *
5758355f576SJeff Roberson  * This function returns with the zone locked so that the per cpu queues can
5768355f576SJeff Roberson  * not be filled until zone_drain is finished.
5778355f576SJeff Roberson  *
5788355f576SJeff Roberson  */
5798355f576SJeff Roberson static void
580d56368d7SBosko Milekic cache_drain(uma_zone_t zone, int destroy)
5818355f576SJeff Roberson {
5828355f576SJeff Roberson 	uma_bucket_t bucket;
5838355f576SJeff Roberson 	uma_cache_t cache;
5848355f576SJeff Roberson 	int cpu;
5858355f576SJeff Roberson 
5868355f576SJeff Roberson 	/*
5878355f576SJeff Roberson 	 * Flush out the per cpu queues.
5888355f576SJeff Roberson 	 *
589157d7b35SAlfred Perlstein 	 * XXX This causes unnecessary thrashing due to immediately having
5908355f576SJeff Roberson 	 * empty per cpu queues.  I need to improve this.
5918355f576SJeff Roberson 	 */
5928355f576SJeff Roberson 
5938355f576SJeff Roberson 	/*
5948355f576SJeff Roberson 	 * We have to lock each cpu cache before locking the zone
5958355f576SJeff Roberson 	 */
5968355f576SJeff Roberson 	ZONE_UNLOCK(zone);
5978355f576SJeff Roberson 
5988355f576SJeff Roberson 	for (cpu = 0; cpu < maxcpu; cpu++) {
5998355f576SJeff Roberson 		if (CPU_ABSENT(cpu))
6008355f576SJeff Roberson 			continue;
601d88797c2SBosko Milekic 		CPU_LOCK(cpu);
6028355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
6038355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
6048355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
605d56368d7SBosko Milekic 		if (destroy) {
606174ab450SBosko Milekic 			if (cache->uc_allocbucket != NULL)
607cae33c14SJeff Roberson 				bucket_free(cache->uc_allocbucket);
608174ab450SBosko Milekic 			if (cache->uc_freebucket != NULL)
609cae33c14SJeff Roberson 				bucket_free(cache->uc_freebucket);
610d56368d7SBosko Milekic 			cache->uc_allocbucket = cache->uc_freebucket = NULL;
611d56368d7SBosko Milekic 		}
6128355f576SJeff Roberson 	}
6138355f576SJeff Roberson 
6148355f576SJeff Roberson 	/*
6158355f576SJeff Roberson 	 * Drain the bucket queues and free the buckets, we just keep two per
6168355f576SJeff Roberson 	 * cpu (alloc/free).
6178355f576SJeff Roberson 	 */
6188355f576SJeff Roberson 	ZONE_LOCK(zone);
6198355f576SJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
6208355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
6218355f576SJeff Roberson 		ZONE_UNLOCK(zone);
6228355f576SJeff Roberson 		bucket_drain(zone, bucket);
623cae33c14SJeff Roberson 		bucket_free(bucket);
6248355f576SJeff Roberson 		ZONE_LOCK(zone);
6258355f576SJeff Roberson 	}
6268355f576SJeff Roberson 
6278355f576SJeff Roberson 	/* Now we do the free queue.. */
6288355f576SJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
6298355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
630cae33c14SJeff Roberson 		bucket_free(bucket);
6318355f576SJeff Roberson 	}
6328355f576SJeff Roberson 
6338355f576SJeff Roberson 	/* We unlock here, but they will all block until the zone is unlocked */
6348355f576SJeff Roberson 	for (cpu = 0; cpu < maxcpu; cpu++) {
6358355f576SJeff Roberson 		if (CPU_ABSENT(cpu))
6368355f576SJeff Roberson 			continue;
637d88797c2SBosko Milekic 		CPU_UNLOCK(cpu);
6388355f576SJeff Roberson 	}
6398355f576SJeff Roberson }
6408355f576SJeff Roberson 
6418355f576SJeff Roberson /*
6428355f576SJeff Roberson  * Frees pages from a zone back to the system.  This is done on demand from
6438355f576SJeff Roberson  * the pageout daemon.
6448355f576SJeff Roberson  *
6458355f576SJeff Roberson  * Arguments:
6468355f576SJeff Roberson  *	zone  The zone to free pages from
6479c2cd7e5SJeff Roberson  *	 all  Should we drain all items?
648d56368d7SBosko Milekic  *   destroy  Whether to destroy the zone and pcpu buckets (from zone_dtor)
6498355f576SJeff Roberson  *
6508355f576SJeff Roberson  * Returns:
6518355f576SJeff Roberson  *	Nothing.
6528355f576SJeff Roberson  */
6538355f576SJeff Roberson static void
654d56368d7SBosko Milekic zone_drain_common(uma_zone_t zone, int destroy)
6558355f576SJeff Roberson {
656713deb36SJeff Roberson 	struct slabhead freeslabs = {};
6578355f576SJeff Roberson 	uma_slab_t slab;
6588355f576SJeff Roberson 	uma_slab_t n;
6598355f576SJeff Roberson 	u_int64_t extra;
6608355f576SJeff Roberson 	u_int8_t flags;
6618355f576SJeff Roberson 	u_int8_t *mem;
6628355f576SJeff Roberson 	int i;
6638355f576SJeff Roberson 
6648355f576SJeff Roberson 	/*
6658355f576SJeff Roberson 	 * We don't want to take pages from staticly allocated zones at this
6668355f576SJeff Roberson 	 * time
6678355f576SJeff Roberson 	 */
6688355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_NOFREE || zone->uz_freef == NULL)
6698355f576SJeff Roberson 		return;
6708355f576SJeff Roberson 
6718355f576SJeff Roberson 	ZONE_LOCK(zone);
6728355f576SJeff Roberson 
6738355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
674d56368d7SBosko Milekic 		cache_drain(zone, destroy);
675d56368d7SBosko Milekic 
676d56368d7SBosko Milekic 	if (destroy)
677d56368d7SBosko Milekic 		zone->uz_wssize = 0;
6788355f576SJeff Roberson 
6798355f576SJeff Roberson 	if (zone->uz_free < zone->uz_wssize)
6808355f576SJeff Roberson 		goto finished;
6818355f576SJeff Roberson #ifdef UMA_DEBUG
6828355f576SJeff Roberson 	printf("%s working set size: %llu free items: %u\n",
6838355f576SJeff Roberson 	    zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
6848355f576SJeff Roberson #endif
6859c2cd7e5SJeff Roberson 	extra = zone->uz_free - zone->uz_wssize;
6868355f576SJeff Roberson 	extra /= zone->uz_ipers;
6878355f576SJeff Roberson 
6888355f576SJeff Roberson 	/* extra is now the number of extra slabs that we can free */
6898355f576SJeff Roberson 
6908355f576SJeff Roberson 	if (extra == 0)
6918355f576SJeff Roberson 		goto finished;
6928355f576SJeff Roberson 
6938355f576SJeff Roberson 	slab = LIST_FIRST(&zone->uz_free_slab);
6948355f576SJeff Roberson 	while (slab && extra) {
6958355f576SJeff Roberson 		n = LIST_NEXT(slab, us_link);
6968355f576SJeff Roberson 
6978355f576SJeff Roberson 		/* We have no where to free these to */
6988355f576SJeff Roberson 		if (slab->us_flags & UMA_SLAB_BOOT) {
6998355f576SJeff Roberson 			slab = n;
7008355f576SJeff Roberson 			continue;
7018355f576SJeff Roberson 		}
7028355f576SJeff Roberson 
7038355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
7048355f576SJeff Roberson 		zone->uz_pages -= zone->uz_ppera;
7058355f576SJeff Roberson 		zone->uz_free -= zone->uz_ipers;
706713deb36SJeff Roberson 
70799571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_HASH)
708713deb36SJeff Roberson 			UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
709713deb36SJeff Roberson 
710713deb36SJeff Roberson 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
711713deb36SJeff Roberson 
712713deb36SJeff Roberson 		slab = n;
713713deb36SJeff Roberson 		extra--;
714713deb36SJeff Roberson 	}
715713deb36SJeff Roberson finished:
716713deb36SJeff Roberson 	ZONE_UNLOCK(zone);
717713deb36SJeff Roberson 
718713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
719713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
7208355f576SJeff Roberson 		if (zone->uz_fini)
7218355f576SJeff Roberson 			for (i = 0; i < zone->uz_ipers; i++)
7228355f576SJeff Roberson 				zone->uz_fini(
7238355f576SJeff Roberson 				    slab->us_data + (zone->uz_rsize * i),
7248355f576SJeff Roberson 				    zone->uz_size);
7258355f576SJeff Roberson 		flags = slab->us_flags;
7268355f576SJeff Roberson 		mem = slab->us_data;
72799571dc3SJeff Roberson 
72899571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
7298355f576SJeff Roberson 			uma_zfree_internal(slabzone, slab, NULL, 0);
73048eea375SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
73148eea375SJeff Roberson 			vm_object_t obj;
73248eea375SJeff Roberson 
73348eea375SJeff Roberson 			if (flags & UMA_SLAB_KMEM)
73448eea375SJeff Roberson 				obj = kmem_object;
73548eea375SJeff Roberson 			else
73648eea375SJeff Roberson 				obj = NULL;
73799571dc3SJeff Roberson 			for (i = 0; i < zone->uz_ppera; i++)
73899571dc3SJeff Roberson 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
73948eea375SJeff Roberson 				    obj);
74048eea375SJeff Roberson 		}
7418355f576SJeff Roberson #ifdef UMA_DEBUG
7428355f576SJeff Roberson 		printf("%s: Returning %d bytes.\n",
7438355f576SJeff Roberson 		    zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
7448355f576SJeff Roberson #endif
7458355f576SJeff Roberson 		zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
7468355f576SJeff Roberson 	}
7478355f576SJeff Roberson 
7488355f576SJeff Roberson }
7498355f576SJeff Roberson 
750cae33c14SJeff Roberson static __inline void
751d56368d7SBosko Milekic zone_drain(uma_zone_t zone)
752d56368d7SBosko Milekic {
753d56368d7SBosko Milekic 	zone_drain_common(zone, 0);
754d56368d7SBosko Milekic }
755d56368d7SBosko Milekic 
7568355f576SJeff Roberson /*
7578355f576SJeff Roberson  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
7588355f576SJeff Roberson  *
7598355f576SJeff Roberson  * Arguments:
7608355f576SJeff Roberson  *	zone  The zone to allocate slabs for
7618355f576SJeff Roberson  *	wait  Shall we wait?
7628355f576SJeff Roberson  *
7638355f576SJeff Roberson  * Returns:
7648355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
7658355f576SJeff Roberson  *	caller specified M_NOWAIT.
7668355f576SJeff Roberson  *
7678355f576SJeff Roberson  */
7688355f576SJeff Roberson static uma_slab_t
7698355f576SJeff Roberson slab_zalloc(uma_zone_t zone, int wait)
7708355f576SJeff Roberson {
7718355f576SJeff Roberson 	uma_slab_t slab;	/* Starting slab */
7728355f576SJeff Roberson 	u_int8_t *mem;
7738355f576SJeff Roberson 	u_int8_t flags;
7748355f576SJeff Roberson 	int i;
7758355f576SJeff Roberson 
776a553d4b8SJeff Roberson 	slab = NULL;
777a553d4b8SJeff Roberson 
7788355f576SJeff Roberson #ifdef UMA_DEBUG
7798355f576SJeff Roberson 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
7808355f576SJeff Roberson #endif
7818355f576SJeff Roberson 	ZONE_UNLOCK(zone);
782a553d4b8SJeff Roberson 
783a553d4b8SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
784bbee39c6SJeff Roberson 		slab = uma_zalloc_internal(slabzone, NULL, wait);
785a553d4b8SJeff Roberson 		if (slab == NULL) {
786a553d4b8SJeff Roberson 			ZONE_LOCK(zone);
787a553d4b8SJeff Roberson 			return NULL;
788a553d4b8SJeff Roberson 		}
789a553d4b8SJeff Roberson 	}
790a553d4b8SJeff Roberson 
7913370c5bfSJeff Roberson 	/*
7923370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
7933370c5bfSJeff Roberson 	 * first time they are added to a zone.
7943370c5bfSJeff Roberson 	 *
7953370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
7963370c5bfSJeff Roberson 	 */
7973370c5bfSJeff Roberson 
7983370c5bfSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
7993370c5bfSJeff Roberson 		wait |= M_ZERO;
8003370c5bfSJeff Roberson 	else
8013370c5bfSJeff Roberson 		wait &= ~M_ZERO;
8023370c5bfSJeff Roberson 
803a553d4b8SJeff Roberson 	if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
804234c7726SAlan Cox 		mem = zone->uz_allocf(zone, zone->uz_ppera * UMA_SLAB_SIZE,
805234c7726SAlan Cox 		    &flags, wait);
806a553d4b8SJeff Roberson 		if (mem == NULL) {
8078355f576SJeff Roberson 			ZONE_LOCK(zone);
8088355f576SJeff Roberson 			return (NULL);
809a553d4b8SJeff Roberson 		}
8108355f576SJeff Roberson 	} else {
811a553d4b8SJeff Roberson 		uma_slab_t tmps;
8128355f576SJeff Roberson 
8138355f576SJeff Roberson 		if (zone->uz_ppera > 1)
8148355f576SJeff Roberson 			panic("UMA: Attemping to allocate multiple pages before vm has started.\n");
8158355f576SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_MALLOC)
8168355f576SJeff Roberson 			panic("Mallocing before uma_startup2 has been called.\n");
8178355f576SJeff Roberson 		if (uma_boot_free == 0)
8188355f576SJeff Roberson 			panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
819a553d4b8SJeff Roberson 		tmps = LIST_FIRST(&uma_boot_pages);
820a553d4b8SJeff Roberson 		LIST_REMOVE(tmps, us_link);
8218355f576SJeff Roberson 		uma_boot_free--;
822a553d4b8SJeff Roberson 		mem = tmps->us_data;
823f3da1873SJeff Roberson 		flags = tmps->us_flags;
8248355f576SJeff Roberson 	}
8258355f576SJeff Roberson 
8265c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
82799571dc3SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE))
8288355f576SJeff Roberson 		slab = (uma_slab_t )(mem + zone->uz_pgoff);
8295c0e403bSJeff Roberson 
83099571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_MALLOC)
83199571dc3SJeff Roberson 		for (i = 0; i < zone->uz_ppera; i++)
83299571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
8338355f576SJeff Roberson 
8348355f576SJeff Roberson 	slab->us_zone = zone;
8358355f576SJeff Roberson 	slab->us_data = mem;
8368355f576SJeff Roberson 
8378355f576SJeff Roberson 	/*
8388355f576SJeff Roberson 	 * This is intended to spread data out across cache lines.
8398355f576SJeff Roberson 	 *
8408355f576SJeff Roberson 	 * This code doesn't seem to work properly on x86, and on alpha
8418355f576SJeff Roberson 	 * it makes absolutely no performance difference. I'm sure it could
8428355f576SJeff Roberson 	 * use some tuning, but sun makes outrageous claims about it's
8438355f576SJeff Roberson 	 * performance.
8448355f576SJeff Roberson 	 */
8458355f576SJeff Roberson #if 0
8468355f576SJeff Roberson 	if (zone->uz_cachemax) {
8478355f576SJeff Roberson 		slab->us_data += zone->uz_cacheoff;
8488355f576SJeff Roberson 		zone->uz_cacheoff += UMA_CACHE_INC;
8498355f576SJeff Roberson 		if (zone->uz_cacheoff > zone->uz_cachemax)
8508355f576SJeff Roberson 			zone->uz_cacheoff = 0;
8518355f576SJeff Roberson 	}
8528355f576SJeff Roberson #endif
8538355f576SJeff Roberson 
8548355f576SJeff Roberson 	slab->us_freecount = zone->uz_ipers;
8558355f576SJeff Roberson 	slab->us_firstfree = 0;
8568355f576SJeff Roberson 	slab->us_flags = flags;
8578355f576SJeff Roberson 	for (i = 0; i < zone->uz_ipers; i++)
8588355f576SJeff Roberson 		slab->us_freelist[i] = i+1;
8598355f576SJeff Roberson 
8608355f576SJeff Roberson 	if (zone->uz_init)
8618355f576SJeff Roberson 		for (i = 0; i < zone->uz_ipers; i++)
8628355f576SJeff Roberson 			zone->uz_init(slab->us_data + (zone->uz_rsize * i),
8638355f576SJeff Roberson 			    zone->uz_size);
8645c0e403bSJeff Roberson 	ZONE_LOCK(zone);
8655c0e403bSJeff Roberson 
86699571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH)
8675c0e403bSJeff Roberson 		UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
8688355f576SJeff Roberson 
8698355f576SJeff Roberson 	zone->uz_pages += zone->uz_ppera;
8708355f576SJeff Roberson 	zone->uz_free += zone->uz_ipers;
8718355f576SJeff Roberson 
8725c0e403bSJeff Roberson 
8738355f576SJeff Roberson 	return (slab);
8748355f576SJeff Roberson }
8758355f576SJeff Roberson 
8768355f576SJeff Roberson /*
8778355f576SJeff Roberson  * Allocates a number of pages from the system
8788355f576SJeff Roberson  *
8798355f576SJeff Roberson  * Arguments:
8808355f576SJeff Roberson  *	zone  Unused
8818355f576SJeff Roberson  *	bytes  The number of bytes requested
8828355f576SJeff Roberson  *	wait  Shall we wait?
8838355f576SJeff Roberson  *
8848355f576SJeff Roberson  * Returns:
8858355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
8868355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
8878355f576SJeff Roberson  */
8888355f576SJeff Roberson static void *
8898355f576SJeff Roberson page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
8908355f576SJeff Roberson {
8918355f576SJeff Roberson 	void *p;	/* Returned page */
8928355f576SJeff Roberson 
8938355f576SJeff Roberson 	*pflag = UMA_SLAB_KMEM;
8948355f576SJeff Roberson 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
8958355f576SJeff Roberson 
8968355f576SJeff Roberson 	return (p);
8978355f576SJeff Roberson }
8988355f576SJeff Roberson 
8998355f576SJeff Roberson /*
9008355f576SJeff Roberson  * Allocates a number of pages from within an object
9018355f576SJeff Roberson  *
9028355f576SJeff Roberson  * Arguments:
9038355f576SJeff Roberson  *	zone   Unused
9048355f576SJeff Roberson  *	bytes  The number of bytes requested
9058355f576SJeff Roberson  *	wait   Shall we wait?
9068355f576SJeff Roberson  *
9078355f576SJeff Roberson  * Returns:
9088355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
9098355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
910494273beSJeff Roberson  *
9118355f576SJeff Roberson  */
9128355f576SJeff Roberson static void *
9138355f576SJeff Roberson obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
9148355f576SJeff Roberson {
915b245ac95SAlan Cox 	vm_object_t object;
916b245ac95SAlan Cox 	vm_offset_t retkva, zkva;
9178355f576SJeff Roberson 	vm_page_t p;
918b245ac95SAlan Cox 	int pages, startpages;
9198355f576SJeff Roberson 
920b245ac95SAlan Cox 	object = zone->uz_obj;
92155f7c614SArchie Cobbs 	retkva = 0;
9228355f576SJeff Roberson 
9238355f576SJeff Roberson 	/*
9248355f576SJeff Roberson 	 * This looks a little weird since we're getting one page at a time
9258355f576SJeff Roberson 	 */
926b245ac95SAlan Cox 	VM_OBJECT_LOCK(object);
927b245ac95SAlan Cox 	p = TAILQ_LAST(&object->memq, pglist);
928b245ac95SAlan Cox 	pages = p != NULL ? p->pindex + 1 : 0;
929b245ac95SAlan Cox 	startpages = pages;
9308355f576SJeff Roberson 	zkva = zone->uz_kva + pages * PAGE_SIZE;
931b245ac95SAlan Cox 	for (; bytes > 0; bytes -= PAGE_SIZE) {
932b245ac95SAlan Cox 		p = vm_page_alloc(object, pages,
933b245ac95SAlan Cox 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
934b245ac95SAlan Cox 		if (p == NULL) {
935b245ac95SAlan Cox 			if (pages != startpages)
936b245ac95SAlan Cox 				pmap_qremove(retkva, pages - startpages);
937b245ac95SAlan Cox 			while (pages != startpages) {
938b245ac95SAlan Cox 				pages--;
939b245ac95SAlan Cox 				p = TAILQ_LAST(&object->memq, pglist);
940b245ac95SAlan Cox 				vm_page_lock_queues();
941b245ac95SAlan Cox 				vm_page_unwire(p, 0);
942b245ac95SAlan Cox 				vm_page_free(p);
943b245ac95SAlan Cox 				vm_page_unlock_queues();
944b245ac95SAlan Cox 			}
945b245ac95SAlan Cox 			retkva = 0;
946b245ac95SAlan Cox 			goto done;
947b245ac95SAlan Cox 		}
948b245ac95SAlan Cox 		pmap_qenter(zkva, &p, 1);
94955f7c614SArchie Cobbs 		if (retkva == 0)
9508355f576SJeff Roberson 			retkva = zkva;
951b245ac95SAlan Cox 		zkva += PAGE_SIZE;
9528355f576SJeff Roberson 		pages += 1;
9538355f576SJeff Roberson 	}
954b245ac95SAlan Cox done:
955b245ac95SAlan Cox 	VM_OBJECT_UNLOCK(object);
9568355f576SJeff Roberson 
9578355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
9588355f576SJeff Roberson 
9598355f576SJeff Roberson 	return ((void *)retkva);
9608355f576SJeff Roberson }
9618355f576SJeff Roberson 
9628355f576SJeff Roberson /*
9638355f576SJeff Roberson  * Frees a number of pages to the system
9648355f576SJeff Roberson  *
9658355f576SJeff Roberson  * Arguments:
9668355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
9678355f576SJeff Roberson  *	size  The size of the memory being freed
9688355f576SJeff Roberson  *	flags The original p->us_flags field
9698355f576SJeff Roberson  *
9708355f576SJeff Roberson  * Returns:
9718355f576SJeff Roberson  *	Nothing
9728355f576SJeff Roberson  *
9738355f576SJeff Roberson  */
9748355f576SJeff Roberson static void
9758355f576SJeff Roberson page_free(void *mem, int size, u_int8_t flags)
9768355f576SJeff Roberson {
9778355f576SJeff Roberson 	vm_map_t map;
9783370c5bfSJeff Roberson 
9798355f576SJeff Roberson 	if (flags & UMA_SLAB_KMEM)
9808355f576SJeff Roberson 		map = kmem_map;
9818355f576SJeff Roberson 	else
9828355f576SJeff Roberson 		panic("UMA: page_free used with invalid flags %d\n", flags);
9838355f576SJeff Roberson 
9848355f576SJeff Roberson 	kmem_free(map, (vm_offset_t)mem, size);
9858355f576SJeff Roberson }
9868355f576SJeff Roberson 
9878355f576SJeff Roberson /*
9888355f576SJeff Roberson  * Zero fill initializer
9898355f576SJeff Roberson  *
9908355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
9918355f576SJeff Roberson  *
9928355f576SJeff Roberson  */
9938355f576SJeff Roberson static void
9948355f576SJeff Roberson zero_init(void *mem, int size)
9958355f576SJeff Roberson {
9968355f576SJeff Roberson 	bzero(mem, size);
9978355f576SJeff Roberson }
9988355f576SJeff Roberson 
9998355f576SJeff Roberson /*
10008355f576SJeff Roberson  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
10018355f576SJeff Roberson  *
10028355f576SJeff Roberson  * Arguments
10038355f576SJeff Roberson  *	zone  The zone we should initialize
10048355f576SJeff Roberson  *
10058355f576SJeff Roberson  * Returns
10068355f576SJeff Roberson  *	Nothing
10078355f576SJeff Roberson  */
10088355f576SJeff Roberson static void
10098355f576SJeff Roberson zone_small_init(uma_zone_t zone)
10108355f576SJeff Roberson {
10118355f576SJeff Roberson 	int rsize;
10128355f576SJeff Roberson 	int memused;
10138355f576SJeff Roberson 	int ipers;
10148355f576SJeff Roberson 
10158355f576SJeff Roberson 	rsize = zone->uz_size;
10168355f576SJeff Roberson 
10178355f576SJeff Roberson 	if (rsize < UMA_SMALLEST_UNIT)
10188355f576SJeff Roberson 		rsize = UMA_SMALLEST_UNIT;
10198355f576SJeff Roberson 
10208355f576SJeff Roberson 	if (rsize & zone->uz_align)
10218355f576SJeff Roberson 		rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
10228355f576SJeff Roberson 
10238355f576SJeff Roberson 	zone->uz_rsize = rsize;
10248355f576SJeff Roberson 
10258355f576SJeff Roberson 	rsize += 1;	/* Account for the byte of linkage */
10268355f576SJeff Roberson 	zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
10278355f576SJeff Roberson 	zone->uz_ppera = 1;
10288355f576SJeff Roberson 
102920e8e865SBosko Milekic 	KASSERT(zone->uz_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!"));
10308355f576SJeff Roberson 	memused = zone->uz_ipers * zone->uz_rsize;
10318355f576SJeff Roberson 
10328355f576SJeff Roberson 	/* Can we do any better? */
10338355f576SJeff Roberson 	if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
103420e8e865SBosko Milekic 		/*
103520e8e865SBosko Milekic 		 * We can't do this if we're internal or if we've been
103620e8e865SBosko Milekic 		 * asked to not go to the VM for buckets.  If we do this we
103720e8e865SBosko Milekic 		 * may end up going to the VM (kmem_map) for slabs which we
103820e8e865SBosko Milekic 		 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
103920e8e865SBosko Milekic 		 * result of UMA_ZONE_VM, which clearly forbids it.
104020e8e865SBosko Milekic 		 */
104120e8e865SBosko Milekic 		if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) ||
104220e8e865SBosko Milekic 		    (zone->uz_flags & UMA_ZFLAG_CACHEONLY))
10438355f576SJeff Roberson 			return;
10448355f576SJeff Roberson 		ipers = UMA_SLAB_SIZE / zone->uz_rsize;
10458355f576SJeff Roberson 		if (ipers > zone->uz_ipers) {
10468355f576SJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
104799571dc3SJeff Roberson 			if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
104899571dc3SJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_HASH;
10498355f576SJeff Roberson 			zone->uz_ipers = ipers;
10508355f576SJeff Roberson 		}
10518355f576SJeff Roberson 	}
10528355f576SJeff Roberson 
10538355f576SJeff Roberson }
10548355f576SJeff Roberson 
10558355f576SJeff Roberson /*
10568355f576SJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
10578355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
10588355f576SJeff Roberson  * more complicated.
10598355f576SJeff Roberson  *
10608355f576SJeff Roberson  * Arguments
10618355f576SJeff Roberson  *	zone  The zone we should initialize
10628355f576SJeff Roberson  *
10638355f576SJeff Roberson  * Returns
10648355f576SJeff Roberson  *	Nothing
10658355f576SJeff Roberson  */
10668355f576SJeff Roberson static void
10678355f576SJeff Roberson zone_large_init(uma_zone_t zone)
10688355f576SJeff Roberson {
10698355f576SJeff Roberson 	int pages;
10708355f576SJeff Roberson 
107120e8e865SBosko Milekic 	KASSERT((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0,
107220e8e865SBosko Milekic 	    ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
107320e8e865SBosko Milekic 
10748355f576SJeff Roberson 	pages = zone->uz_size / UMA_SLAB_SIZE;
10758355f576SJeff Roberson 
10768355f576SJeff Roberson 	/* Account for remainder */
10778355f576SJeff Roberson 	if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
10788355f576SJeff Roberson 		pages++;
10798355f576SJeff Roberson 
10808355f576SJeff Roberson 	zone->uz_ppera = pages;
10818355f576SJeff Roberson 	zone->uz_ipers = 1;
10828355f576SJeff Roberson 
10838355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
108499571dc3SJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
108599571dc3SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_HASH;
108699571dc3SJeff Roberson 
10878355f576SJeff Roberson 	zone->uz_rsize = zone->uz_size;
10888355f576SJeff Roberson }
10898355f576SJeff Roberson 
10908355f576SJeff Roberson /*
10918355f576SJeff Roberson  * Zone header ctor.  This initializes all fields, locks, etc.  And inserts
10928355f576SJeff Roberson  * the zone onto the global zone list.
10938355f576SJeff Roberson  *
10948355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
10958355f576SJeff Roberson  *	udata  Actually uma_zcreat_args
10968355f576SJeff Roberson  *
10978355f576SJeff Roberson  */
10988355f576SJeff Roberson 
10998355f576SJeff Roberson static void
11008355f576SJeff Roberson zone_ctor(void *mem, int size, void *udata)
11018355f576SJeff Roberson {
11028355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
11038355f576SJeff Roberson 	uma_zone_t zone = mem;
110428bc4419SJeff Roberson 	int privlc;
11058355f576SJeff Roberson 
11068355f576SJeff Roberson 	bzero(zone, size);
11078355f576SJeff Roberson 	zone->uz_name = arg->name;
11088355f576SJeff Roberson 	zone->uz_size = arg->size;
11098355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
11108355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
11118355f576SJeff Roberson 	zone->uz_init = arg->uminit;
1112e221e841SJeff Roberson 	zone->uz_fini = arg->fini;
11138355f576SJeff Roberson 	zone->uz_align = arg->align;
11148355f576SJeff Roberson 	zone->uz_free = 0;
11158355f576SJeff Roberson 	zone->uz_pages = 0;
11168355f576SJeff Roberson 	zone->uz_flags = 0;
11178355f576SJeff Roberson 	zone->uz_allocf = page_alloc;
11188355f576SJeff Roberson 	zone->uz_freef = page_free;
11198355f576SJeff Roberson 
11208355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_ZINIT)
11218355f576SJeff Roberson 		zone->uz_init = zero_init;
11228355f576SJeff Roberson 
11238355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_INTERNAL)
11248355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_INTERNAL;
11258355f576SJeff Roberson 
11268355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_MALLOC)
11278355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_MALLOC;
11288355f576SJeff Roberson 
11298355f576SJeff Roberson 	if (arg->flags & UMA_ZONE_NOFREE)
11308355f576SJeff Roberson 		zone->uz_flags |= UMA_ZFLAG_NOFREE;
11318355f576SJeff Roberson 
113218aa2de5SJeff Roberson 	if (arg->flags & UMA_ZONE_VM)
113320e8e865SBosko Milekic 		zone->uz_flags |= UMA_ZFLAG_CACHEONLY;
113418aa2de5SJeff Roberson 
113520e8e865SBosko Milekic 	/*
113620e8e865SBosko Milekic 	 * XXX:
113720e8e865SBosko Milekic 	 * The +1 byte added to uz_size is to account for the byte of
113820e8e865SBosko Milekic 	 * linkage that is added to the size in zone_small_init().  If
113920e8e865SBosko Milekic 	 * we don't account for this here then we may end up in
114020e8e865SBosko Milekic 	 * zone_small_init() with a calculated 'ipers' of 0.
114120e8e865SBosko Milekic 	 */
114220e8e865SBosko Milekic 	if ((zone->uz_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
11438355f576SJeff Roberson 		zone_large_init(zone);
11448355f576SJeff Roberson 	else
11458355f576SJeff Roberson 		zone_small_init(zone);
114648eea375SJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
114748eea375SJeff Roberson 	if (zone->uz_ppera == 1) {
114848eea375SJeff Roberson 		zone->uz_allocf = uma_small_alloc;
114948eea375SJeff Roberson 		zone->uz_freef = uma_small_free;
115048eea375SJeff Roberson 	}
115148eea375SJeff Roberson #endif	/* UMA_MD_SMALL_ALLOC */
11528355f576SJeff Roberson 
115328bc4419SJeff Roberson 	if (arg->flags & UMA_ZONE_MTXCLASS)
115428bc4419SJeff Roberson 		privlc = 1;
115528bc4419SJeff Roberson 	else
115628bc4419SJeff Roberson 		privlc = 0;
115728bc4419SJeff Roberson 
11588355f576SJeff Roberson 	/*
11598355f576SJeff Roberson 	 * If we're putting the slab header in the actual page we need to
11608355f576SJeff Roberson 	 * figure out where in each page it goes.  This calculates a right
11619d5abbddSJens Schweikhardt 	 * justified offset into the memory on an ALIGN_PTR boundary.
11628355f576SJeff Roberson 	 */
11638355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
11648355f576SJeff Roberson 		int totsize;
11658355f576SJeff Roberson 		int waste;
11668355f576SJeff Roberson 
11678355f576SJeff Roberson 		/* Size of the slab struct and free list */
11688355f576SJeff Roberson 		totsize = sizeof(struct uma_slab) + zone->uz_ipers;
11698355f576SJeff Roberson 		if (totsize & UMA_ALIGN_PTR)
11708355f576SJeff Roberson 			totsize = (totsize & ~UMA_ALIGN_PTR) +
11718355f576SJeff Roberson 			    (UMA_ALIGN_PTR + 1);
11728355f576SJeff Roberson 		zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
11738355f576SJeff Roberson 
11748355f576SJeff Roberson 		waste = zone->uz_pgoff;
11758355f576SJeff Roberson 		waste -= (zone->uz_ipers * zone->uz_rsize);
11768355f576SJeff Roberson 
11778355f576SJeff Roberson 		/*
11788355f576SJeff Roberson 		 * This calculates how much space we have for cache line size
11798355f576SJeff Roberson 		 * optimizations.  It works by offseting each slab slightly.
11808355f576SJeff Roberson 		 * Currently it breaks on x86, and so it is disabled.
11818355f576SJeff Roberson 		 */
11828355f576SJeff Roberson 
11838355f576SJeff Roberson 		if (zone->uz_align < UMA_CACHE_INC && waste > UMA_CACHE_INC) {
11848355f576SJeff Roberson 			zone->uz_cachemax = waste - UMA_CACHE_INC;
11858355f576SJeff Roberson 			zone->uz_cacheoff = 0;
11868355f576SJeff Roberson 		}
11878355f576SJeff Roberson 
11888355f576SJeff Roberson 		totsize = zone->uz_pgoff + sizeof(struct uma_slab)
11898355f576SJeff Roberson 		    + zone->uz_ipers;
11908355f576SJeff Roberson 		/* I don't think it's possible, but I'll make sure anyway */
11918355f576SJeff Roberson 		if (totsize > UMA_SLAB_SIZE) {
11928355f576SJeff Roberson 			printf("zone %s ipers %d rsize %d size %d\n",
11938355f576SJeff Roberson 			    zone->uz_name, zone->uz_ipers, zone->uz_rsize,
11948355f576SJeff Roberson 			    zone->uz_size);
11958355f576SJeff Roberson 			panic("UMA slab won't fit.\n");
11968355f576SJeff Roberson 		}
11978355f576SJeff Roberson 	}
11988355f576SJeff Roberson 
119999571dc3SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_HASH)
120099571dc3SJeff Roberson 		hash_alloc(&zone->uz_hash);
120199571dc3SJeff Roberson 
12028355f576SJeff Roberson #ifdef UMA_DEBUG
12038355f576SJeff Roberson 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
12048355f576SJeff Roberson 	    zone->uz_name, zone,
12058355f576SJeff Roberson 	    zone->uz_size, zone->uz_ipers,
12068355f576SJeff Roberson 	    zone->uz_ppera, zone->uz_pgoff);
12078355f576SJeff Roberson #endif
120828bc4419SJeff Roberson 	ZONE_LOCK_INIT(zone, privlc);
12098355f576SJeff Roberson 
12108355f576SJeff Roberson 	mtx_lock(&uma_mtx);
12118355f576SJeff Roberson 	LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
12128355f576SJeff Roberson 	mtx_unlock(&uma_mtx);
12138355f576SJeff Roberson 
12148355f576SJeff Roberson 	/*
12158355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
12168355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
12178355f576SJeff Roberson 	 */
12188355f576SJeff Roberson 
12198355f576SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
12208355f576SJeff Roberson 		return;
12218355f576SJeff Roberson 
1222cae33c14SJeff Roberson 	if (zone->uz_ipers <= BUCKET_MAX)
1223cae33c14SJeff Roberson 		zone->uz_count = zone->uz_ipers;
12248355f576SJeff Roberson 	else
1225cae33c14SJeff Roberson 		zone->uz_count = BUCKET_MAX;
12268355f576SJeff Roberson }
12278355f576SJeff Roberson 
12288355f576SJeff Roberson /*
12299c2cd7e5SJeff Roberson  * Zone header dtor.  This frees all data, destroys locks, frees the hash table
12309c2cd7e5SJeff Roberson  * and removes the zone from the global list.
12319c2cd7e5SJeff Roberson  *
12329c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
12339c2cd7e5SJeff Roberson  *	udata  unused
12349c2cd7e5SJeff Roberson  */
12359c2cd7e5SJeff Roberson 
12369c2cd7e5SJeff Roberson static void
12379c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
12389c2cd7e5SJeff Roberson {
12399c2cd7e5SJeff Roberson 	uma_zone_t zone;
12409c2cd7e5SJeff Roberson 
12419c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
124217b9cc49SJeff Roberson 	mtx_lock(&uma_mtx);
124317b9cc49SJeff Roberson 	LIST_REMOVE(zone, uz_link);
1244d56368d7SBosko Milekic 	zone_drain_common(zone, 1);
124517b9cc49SJeff Roberson 	mtx_unlock(&uma_mtx);
124617b9cc49SJeff Roberson 
12479c2cd7e5SJeff Roberson 	ZONE_LOCK(zone);
12489c2cd7e5SJeff Roberson 	if (zone->uz_free != 0)
1249886eaaacSPoul-Henning Kamp 		printf("Zone %s was not empty (%d items).  Lost %d pages of memory.\n",
1250886eaaacSPoul-Henning Kamp 		    zone->uz_name, zone->uz_free, zone->uz_pages);
12519c2cd7e5SJeff Roberson 
12529c2cd7e5SJeff Roberson 	ZONE_UNLOCK(zone);
12530aef6126SJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0)
12540aef6126SJeff Roberson 		hash_free(&zone->uz_hash);
12550aef6126SJeff Roberson 
12569c2cd7e5SJeff Roberson 	ZONE_LOCK_FINI(zone);
12579c2cd7e5SJeff Roberson }
12589c2cd7e5SJeff Roberson /*
12598355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
12608355f576SJeff Roberson  *
12618355f576SJeff Roberson  * Arguments:
12628355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
12638355f576SJeff Roberson  *		as an argument.
12648355f576SJeff Roberson  *
12658355f576SJeff Roberson  * Returns:
12668355f576SJeff Roberson  *	Nothing
12678355f576SJeff Roberson  */
12688355f576SJeff Roberson static void
12698355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
12708355f576SJeff Roberson {
12718355f576SJeff Roberson 	uma_zone_t zone;
12728355f576SJeff Roberson 
12738355f576SJeff Roberson 	mtx_lock(&uma_mtx);
12748355f576SJeff Roberson 	LIST_FOREACH(zone, &uma_zones, uz_link) {
12758355f576SJeff Roberson 		zfunc(zone);
12768355f576SJeff Roberson 	}
12778355f576SJeff Roberson 	mtx_unlock(&uma_mtx);
12788355f576SJeff Roberson }
12798355f576SJeff Roberson 
12808355f576SJeff Roberson /* Public functions */
12818355f576SJeff Roberson /* See uma.h */
12828355f576SJeff Roberson void
12838355f576SJeff Roberson uma_startup(void *bootmem)
12848355f576SJeff Roberson {
12858355f576SJeff Roberson 	struct uma_zctor_args args;
12868355f576SJeff Roberson 	uma_slab_t slab;
12878355f576SJeff Roberson 	int slabsize;
12888355f576SJeff Roberson 	int i;
12898355f576SJeff Roberson 
12908355f576SJeff Roberson #ifdef UMA_DEBUG
12918355f576SJeff Roberson 	printf("Creating uma zone headers zone.\n");
12928355f576SJeff Roberson #endif
12938355f576SJeff Roberson #ifdef SMP
12948355f576SJeff Roberson 	maxcpu = mp_maxid + 1;
12958355f576SJeff Roberson #else
12968355f576SJeff Roberson 	maxcpu = 1;
12978355f576SJeff Roberson #endif
12988355f576SJeff Roberson #ifdef UMA_DEBUG
12998355f576SJeff Roberson 	printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
13008355f576SJeff Roberson 	Debugger("stop");
13018355f576SJeff Roberson #endif
13026008862bSJohn Baldwin 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
13038355f576SJeff Roberson 	/* "manually" Create the initial zone */
13048355f576SJeff Roberson 	args.name = "UMA Zones";
13058355f576SJeff Roberson 	args.size = sizeof(struct uma_zone) +
13068355f576SJeff Roberson 	    (sizeof(struct uma_cache) * (maxcpu - 1));
13078355f576SJeff Roberson 	args.ctor = zone_ctor;
13089c2cd7e5SJeff Roberson 	args.dtor = zone_dtor;
13098355f576SJeff Roberson 	args.uminit = zero_init;
13108355f576SJeff Roberson 	args.fini = NULL;
13118355f576SJeff Roberson 	args.align = 32 - 1;
13128355f576SJeff Roberson 	args.flags = UMA_ZONE_INTERNAL;
13138355f576SJeff Roberson 	/* The initial zone has no Per cpu queues so it's smaller */
13148355f576SJeff Roberson 	zone_ctor(zones, sizeof(struct uma_zone), &args);
13158355f576SJeff Roberson 
1316d88797c2SBosko Milekic 	/* Initialize the pcpu cache lock set once and for all */
1317d88797c2SBosko Milekic 	for (i = 0; i < maxcpu; i++)
1318d88797c2SBosko Milekic 		CPU_LOCK_INIT(i);
1319d88797c2SBosko Milekic 
13208355f576SJeff Roberson #ifdef UMA_DEBUG
13218355f576SJeff Roberson 	printf("Filling boot free list.\n");
13228355f576SJeff Roberson #endif
13238355f576SJeff Roberson 	for (i = 0; i < UMA_BOOT_PAGES; i++) {
13248355f576SJeff Roberson 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
13258355f576SJeff Roberson 		slab->us_data = (u_int8_t *)slab;
13268355f576SJeff Roberson 		slab->us_flags = UMA_SLAB_BOOT;
13278355f576SJeff Roberson 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
13288355f576SJeff Roberson 		uma_boot_free++;
13298355f576SJeff Roberson 	}
13308355f576SJeff Roberson 
13318355f576SJeff Roberson #ifdef UMA_DEBUG
13328355f576SJeff Roberson 	printf("Creating slab zone.\n");
13338355f576SJeff Roberson #endif
13348355f576SJeff Roberson 
13358355f576SJeff Roberson 	/*
13368355f576SJeff Roberson 	 * This is the max number of free list items we'll have with
13378355f576SJeff Roberson 	 * offpage slabs.
13388355f576SJeff Roberson 	 */
13398355f576SJeff Roberson 
13408355f576SJeff Roberson 	slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
13418355f576SJeff Roberson 	slabsize /= UMA_MAX_WASTE;
13428355f576SJeff Roberson 	slabsize++;			/* In case there it's rounded */
13438355f576SJeff Roberson 	slabsize += sizeof(struct uma_slab);
13448355f576SJeff Roberson 
13458355f576SJeff Roberson 	/* Now make a zone for slab headers */
13468355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
13478355f576SJeff Roberson 				slabsize,
13488355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
13498355f576SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
13508355f576SJeff Roberson 
13518355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
13528355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
13538355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
13548355f576SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZONE_INTERNAL);
13558355f576SJeff Roberson 
1356cae33c14SJeff Roberson 	bucket_init();
13578355f576SJeff Roberson 
135848eea375SJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
135948eea375SJeff Roberson 	booted = 1;
136048eea375SJeff Roberson #endif
13618355f576SJeff Roberson 
13628355f576SJeff Roberson #ifdef UMA_DEBUG
13638355f576SJeff Roberson 	printf("UMA startup complete.\n");
13648355f576SJeff Roberson #endif
13658355f576SJeff Roberson }
13668355f576SJeff Roberson 
13678355f576SJeff Roberson /* see uma.h */
13688355f576SJeff Roberson void
136999571dc3SJeff Roberson uma_startup2(void)
13708355f576SJeff Roberson {
13718355f576SJeff Roberson 	booted = 1;
137286bbae32SJeff Roberson 	bucket_enable();
13738355f576SJeff Roberson #ifdef UMA_DEBUG
13748355f576SJeff Roberson 	printf("UMA startup2 complete.\n");
13758355f576SJeff Roberson #endif
13768355f576SJeff Roberson }
13778355f576SJeff Roberson 
13788355f576SJeff Roberson /*
13798355f576SJeff Roberson  * Initialize our callout handle
13808355f576SJeff Roberson  *
13818355f576SJeff Roberson  */
13828355f576SJeff Roberson 
13838355f576SJeff Roberson static void
13848355f576SJeff Roberson uma_startup3(void)
13858355f576SJeff Roberson {
13868355f576SJeff Roberson #ifdef UMA_DEBUG
13878355f576SJeff Roberson 	printf("Starting callout.\n");
13888355f576SJeff Roberson #endif
13898355f576SJeff Roberson 	callout_init(&uma_callout, 0);
13908355f576SJeff Roberson 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
13918355f576SJeff Roberson #ifdef UMA_DEBUG
13928355f576SJeff Roberson 	printf("UMA startup3 complete.\n");
13938355f576SJeff Roberson #endif
13948355f576SJeff Roberson }
13958355f576SJeff Roberson 
13968355f576SJeff Roberson /* See uma.h */
13978355f576SJeff Roberson uma_zone_t
1398c3bdc05fSAndrew R. Reiter uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1399c3bdc05fSAndrew R. Reiter 		uma_init uminit, uma_fini fini, int align, u_int16_t flags)
14008355f576SJeff Roberson 
14018355f576SJeff Roberson {
14028355f576SJeff Roberson 	struct uma_zctor_args args;
14038355f576SJeff Roberson 
14048355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
14058355f576SJeff Roberson 	args.name = name;
14068355f576SJeff Roberson 	args.size = size;
14078355f576SJeff Roberson 	args.ctor = ctor;
14088355f576SJeff Roberson 	args.dtor = dtor;
14098355f576SJeff Roberson 	args.uminit = uminit;
14108355f576SJeff Roberson 	args.fini = fini;
14118355f576SJeff Roberson 	args.align = align;
14128355f576SJeff Roberson 	args.flags = flags;
14138355f576SJeff Roberson 
1414a163d034SWarner Losh 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
14158355f576SJeff Roberson }
14168355f576SJeff Roberson 
14178355f576SJeff Roberson /* See uma.h */
14189c2cd7e5SJeff Roberson void
14199c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
14209c2cd7e5SJeff Roberson {
14219c2cd7e5SJeff Roberson 	uma_zfree_internal(zones, zone, NULL, 0);
14229c2cd7e5SJeff Roberson }
14239c2cd7e5SJeff Roberson 
14249c2cd7e5SJeff Roberson /* See uma.h */
14258355f576SJeff Roberson void *
14262cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
14278355f576SJeff Roberson {
14288355f576SJeff Roberson 	void *item;
14298355f576SJeff Roberson 	uma_cache_t cache;
14308355f576SJeff Roberson 	uma_bucket_t bucket;
14318355f576SJeff Roberson 	int cpu;
14328355f576SJeff Roberson 
14338355f576SJeff Roberson 	/* This is the fast path allocation */
14348355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1
14358355f576SJeff Roberson 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
14368355f576SJeff Roberson #endif
1437a553d4b8SJeff Roberson 
14388522511bSHartmut Brandt #ifdef INVARIANTS
14398522511bSHartmut Brandt 	/*
14408522511bSHartmut Brandt 	 * To make sure that WAITOK or NOWAIT is set, but not more than
14418522511bSHartmut Brandt 	 * one, and check against the API botches that are common.
14428522511bSHartmut Brandt 	 * The uma code implies M_WAITOK if M_NOWAIT is not set, so
14438522511bSHartmut Brandt 	 * we default to waiting if none of the flags is set.
14448522511bSHartmut Brandt 	 */
14458522511bSHartmut Brandt 	cpu = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
14468522511bSHartmut Brandt 	if (cpu != M_NOWAIT && cpu != M_WAITOK) {
14478522511bSHartmut Brandt 		static	struct timeval lasterr;
14488522511bSHartmut Brandt 		static	int curerr, once;
14498522511bSHartmut Brandt 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
14508522511bSHartmut Brandt 			printf("Bad uma_zalloc flags: %x\n", cpu);
14518522511bSHartmut Brandt 			backtrace();
14528522511bSHartmut Brandt 			once++;
14538522511bSHartmut Brandt 		}
14548522511bSHartmut Brandt 	}
14558522511bSHartmut Brandt #endif
14568522511bSHartmut Brandt 
14574c1cc01cSJohn Baldwin 	if (!(flags & M_NOWAIT)) {
14584c1cc01cSJohn Baldwin 		KASSERT(curthread->td_intr_nesting_level == 0,
1459a163d034SWarner Losh 		   ("malloc(M_WAITOK) in interrupt context"));
146026306795SJohn Baldwin 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
146126306795SJohn Baldwin 		    "malloc() of \"%s\"", zone->uz_name);
14624c1cc01cSJohn Baldwin 	}
14634c1cc01cSJohn Baldwin 
1464a553d4b8SJeff Roberson zalloc_restart:
14658355f576SJeff Roberson 	cpu = PCPU_GET(cpuid);
1466d88797c2SBosko Milekic 	CPU_LOCK(cpu);
14678355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
14688355f576SJeff Roberson 
14698355f576SJeff Roberson zalloc_start:
14708355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
14718355f576SJeff Roberson 
14728355f576SJeff Roberson 	if (bucket) {
1473cae33c14SJeff Roberson 		if (bucket->ub_cnt > 0) {
1474cae33c14SJeff Roberson 			bucket->ub_cnt--;
1475cae33c14SJeff Roberson 			item = bucket->ub_bucket[bucket->ub_cnt];
14768355f576SJeff Roberson #ifdef INVARIANTS
1477cae33c14SJeff Roberson 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
14788355f576SJeff Roberson #endif
14798355f576SJeff Roberson 			KASSERT(item != NULL,
14808355f576SJeff Roberson 			    ("uma_zalloc: Bucket pointer mangled."));
14818355f576SJeff Roberson 			cache->uc_allocs++;
1482639c9550SJeff Roberson #ifdef INVARIANTS
148381f71edaSMatt Jacob 			ZONE_LOCK(zone);
1484639c9550SJeff Roberson 			uma_dbg_alloc(zone, NULL, item);
148581f71edaSMatt Jacob 			ZONE_UNLOCK(zone);
1486639c9550SJeff Roberson #endif
1487d88797c2SBosko Milekic 			CPU_UNLOCK(cpu);
14888355f576SJeff Roberson 			if (zone->uz_ctor)
14898355f576SJeff Roberson 				zone->uz_ctor(item, zone->uz_size, udata);
14902cc35ff9SJeff Roberson 			if (flags & M_ZERO)
14912cc35ff9SJeff Roberson 				bzero(item, zone->uz_size);
14928355f576SJeff Roberson 			return (item);
14938355f576SJeff Roberson 		} else if (cache->uc_freebucket) {
14948355f576SJeff Roberson 			/*
14958355f576SJeff Roberson 			 * We have run out of items in our allocbucket.
14968355f576SJeff Roberson 			 * See if we can switch with our free bucket.
14978355f576SJeff Roberson 			 */
1498cae33c14SJeff Roberson 			if (cache->uc_freebucket->ub_cnt > 0) {
14998355f576SJeff Roberson 				uma_bucket_t swap;
15008355f576SJeff Roberson 
15018355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
15028355f576SJeff Roberson 				printf("uma_zalloc: Swapping empty with alloc.\n");
15038355f576SJeff Roberson #endif
15048355f576SJeff Roberson 				swap = cache->uc_freebucket;
15058355f576SJeff Roberson 				cache->uc_freebucket = cache->uc_allocbucket;
15068355f576SJeff Roberson 				cache->uc_allocbucket = swap;
15078355f576SJeff Roberson 
15088355f576SJeff Roberson 				goto zalloc_start;
15098355f576SJeff Roberson 			}
15108355f576SJeff Roberson 		}
15118355f576SJeff Roberson 	}
1512a553d4b8SJeff Roberson 	ZONE_LOCK(zone);
1513a553d4b8SJeff Roberson 	/* Since we have locked the zone we may as well send back our stats */
1514a553d4b8SJeff Roberson 	zone->uz_allocs += cache->uc_allocs;
1515a553d4b8SJeff Roberson 	cache->uc_allocs = 0;
15168355f576SJeff Roberson 
1517a553d4b8SJeff Roberson 	/* Our old one is now a free bucket */
1518a553d4b8SJeff Roberson 	if (cache->uc_allocbucket) {
1519cae33c14SJeff Roberson 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1520a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1521a553d4b8SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1522a553d4b8SJeff Roberson 		    cache->uc_allocbucket, ub_link);
1523a553d4b8SJeff Roberson 		cache->uc_allocbucket = NULL;
1524a553d4b8SJeff Roberson 	}
15258355f576SJeff Roberson 
1526a553d4b8SJeff Roberson 	/* Check the free list for a new alloc bucket */
1527a553d4b8SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1528cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
1529a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
15308355f576SJeff Roberson 
1531a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1532a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
1533a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
15348355f576SJeff Roberson 		goto zalloc_start;
1535a553d4b8SJeff Roberson 	}
1536bbee39c6SJeff Roberson 	/* We are no longer associated with this cpu!!! */
1537d88797c2SBosko Milekic 	CPU_UNLOCK(cpu);
1538bbee39c6SJeff Roberson 
1539a553d4b8SJeff Roberson 	/* Bump up our uz_count so we get here less */
1540cae33c14SJeff Roberson 	if (zone->uz_count < BUCKET_MAX)
1541a553d4b8SJeff Roberson 		zone->uz_count++;
1542a553d4b8SJeff Roberson 
15438355f576SJeff Roberson 	/*
1544a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
1545a553d4b8SJeff Roberson 	 * works we'll restart the allocation from the begining.
1546bbee39c6SJeff Roberson 	 */
1547bbee39c6SJeff Roberson 
1548bbee39c6SJeff Roberson 	if (uma_zalloc_bucket(zone, flags)) {
1549bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
1550bbee39c6SJeff Roberson 		goto zalloc_restart;
1551bbee39c6SJeff Roberson 	}
1552bbee39c6SJeff Roberson 	ZONE_UNLOCK(zone);
1553bbee39c6SJeff Roberson 	/*
1554bbee39c6SJeff Roberson 	 * We may not be able to get a bucket so return an actual item.
1555bbee39c6SJeff Roberson 	 */
1556bbee39c6SJeff Roberson #ifdef UMA_DEBUG
1557bbee39c6SJeff Roberson 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1558bbee39c6SJeff Roberson #endif
1559bbee39c6SJeff Roberson 
1560bbee39c6SJeff Roberson 	return (uma_zalloc_internal(zone, udata, flags));
1561bbee39c6SJeff Roberson }
1562bbee39c6SJeff Roberson 
1563bbee39c6SJeff Roberson static uma_slab_t
1564bbee39c6SJeff Roberson uma_zone_slab(uma_zone_t zone, int flags)
1565bbee39c6SJeff Roberson {
1566bbee39c6SJeff Roberson 	uma_slab_t slab;
1567bbee39c6SJeff Roberson 
1568bbee39c6SJeff Roberson 	/*
1569bbee39c6SJeff Roberson 	 * This is to prevent us from recursively trying to allocate
1570bbee39c6SJeff Roberson 	 * buckets.  The problem is that if an allocation forces us to
1571bbee39c6SJeff Roberson 	 * grab a new bucket we will call page_alloc, which will go off
1572bbee39c6SJeff Roberson 	 * and cause the vm to allocate vm_map_entries.  If we need new
1573bbee39c6SJeff Roberson 	 * buckets there too we will recurse in kmem_alloc and bad
1574bbee39c6SJeff Roberson 	 * things happen.  So instead we return a NULL bucket, and make
1575bbee39c6SJeff Roberson 	 * the code that allocates buckets smart enough to deal with it
1576bbee39c6SJeff Roberson 	 */
1577cae33c14SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL && zone->uz_recurse != 0)
1578bbee39c6SJeff Roberson 		return (NULL);
1579bbee39c6SJeff Roberson 
1580bbee39c6SJeff Roberson 	slab = NULL;
1581bbee39c6SJeff Roberson 
1582bbee39c6SJeff Roberson 	for (;;) {
1583bbee39c6SJeff Roberson 		/*
1584bbee39c6SJeff Roberson 		 * Find a slab with some space.  Prefer slabs that are partially
1585bbee39c6SJeff Roberson 		 * used over those that are totally full.  This helps to reduce
1586bbee39c6SJeff Roberson 		 * fragmentation.
1587bbee39c6SJeff Roberson 		 */
1588bbee39c6SJeff Roberson 		if (zone->uz_free != 0) {
1589bbee39c6SJeff Roberson 			if (!LIST_EMPTY(&zone->uz_part_slab)) {
1590bbee39c6SJeff Roberson 				slab = LIST_FIRST(&zone->uz_part_slab);
1591bbee39c6SJeff Roberson 			} else {
1592bbee39c6SJeff Roberson 				slab = LIST_FIRST(&zone->uz_free_slab);
1593bbee39c6SJeff Roberson 				LIST_REMOVE(slab, us_link);
1594bbee39c6SJeff Roberson 				LIST_INSERT_HEAD(&zone->uz_part_slab, slab,
1595bbee39c6SJeff Roberson 				us_link);
1596bbee39c6SJeff Roberson 			}
1597bbee39c6SJeff Roberson 			return (slab);
1598bbee39c6SJeff Roberson 		}
1599bbee39c6SJeff Roberson 
1600bbee39c6SJeff Roberson 		/*
1601bbee39c6SJeff Roberson 		 * M_NOVM means don't ask at all!
1602bbee39c6SJeff Roberson 		 */
1603bbee39c6SJeff Roberson 		if (flags & M_NOVM)
1604bbee39c6SJeff Roberson 			break;
1605bbee39c6SJeff Roberson 
1606bbee39c6SJeff Roberson 		if (zone->uz_maxpages &&
1607bbee39c6SJeff Roberson 		    zone->uz_pages >= zone->uz_maxpages) {
1608bbee39c6SJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
1609bbee39c6SJeff Roberson 
1610ebc85edfSJeff Roberson 			if (flags & M_NOWAIT)
1611bbee39c6SJeff Roberson 				break;
1612ebc85edfSJeff Roberson 			else
1613ebc85edfSJeff Roberson 				msleep(zone, &zone->uz_lock, PVM, "zonelimit", 0);
1614bbee39c6SJeff Roberson 			continue;
1615bbee39c6SJeff Roberson 		}
1616bbee39c6SJeff Roberson 		zone->uz_recurse++;
1617bbee39c6SJeff Roberson 		slab = slab_zalloc(zone, flags);
1618bbee39c6SJeff Roberson 		zone->uz_recurse--;
1619bbee39c6SJeff Roberson 		/*
1620bbee39c6SJeff Roberson 		 * If we got a slab here it's safe to mark it partially used
1621bbee39c6SJeff Roberson 		 * and return.  We assume that the caller is going to remove
1622bbee39c6SJeff Roberson 		 * at least one item.
1623bbee39c6SJeff Roberson 		 */
1624bbee39c6SJeff Roberson 		if (slab) {
1625bbee39c6SJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1626bbee39c6SJeff Roberson 			return (slab);
1627bbee39c6SJeff Roberson 		}
1628bbee39c6SJeff Roberson 		/*
1629bbee39c6SJeff Roberson 		 * We might not have been able to get a slab but another cpu
1630bbee39c6SJeff Roberson 		 * could have while we were unlocked.  Check again before we
1631bbee39c6SJeff Roberson 		 * fail.
1632bbee39c6SJeff Roberson 		 */
1633ebc85edfSJeff Roberson 		if (flags & M_NOWAIT)
1634bbee39c6SJeff Roberson 			flags |= M_NOVM;
1635bbee39c6SJeff Roberson 	}
1636bbee39c6SJeff Roberson 	return (slab);
1637bbee39c6SJeff Roberson }
1638bbee39c6SJeff Roberson 
1639d56368d7SBosko Milekic static void *
1640bbee39c6SJeff Roberson uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
1641bbee39c6SJeff Roberson {
1642bbee39c6SJeff Roberson 	void *item;
1643bbee39c6SJeff Roberson 	u_int8_t freei;
1644bbee39c6SJeff Roberson 
1645bbee39c6SJeff Roberson 	freei = slab->us_firstfree;
1646bbee39c6SJeff Roberson 	slab->us_firstfree = slab->us_freelist[freei];
1647bbee39c6SJeff Roberson 	item = slab->us_data + (zone->uz_rsize * freei);
1648bbee39c6SJeff Roberson 
1649bbee39c6SJeff Roberson 	slab->us_freecount--;
1650bbee39c6SJeff Roberson 	zone->uz_free--;
1651bbee39c6SJeff Roberson #ifdef INVARIANTS
1652bbee39c6SJeff Roberson 	uma_dbg_alloc(zone, slab, item);
1653bbee39c6SJeff Roberson #endif
1654bbee39c6SJeff Roberson 	/* Move this slab to the full list */
1655bbee39c6SJeff Roberson 	if (slab->us_freecount == 0) {
1656bbee39c6SJeff Roberson 		LIST_REMOVE(slab, us_link);
1657bbee39c6SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
1658bbee39c6SJeff Roberson 	}
1659bbee39c6SJeff Roberson 
1660bbee39c6SJeff Roberson 	return (item);
1661bbee39c6SJeff Roberson }
1662bbee39c6SJeff Roberson 
1663bbee39c6SJeff Roberson static int
1664bbee39c6SJeff Roberson uma_zalloc_bucket(uma_zone_t zone, int flags)
1665bbee39c6SJeff Roberson {
1666bbee39c6SJeff Roberson 	uma_bucket_t bucket;
1667bbee39c6SJeff Roberson 	uma_slab_t slab;
1668bbee39c6SJeff Roberson 
1669bbee39c6SJeff Roberson 	/*
1670a553d4b8SJeff Roberson 	 * Try this zone's free list first so we don't allocate extra buckets.
16718355f576SJeff Roberson 	 */
1672a553d4b8SJeff Roberson 
1673bbee39c6SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1674cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt == 0,
1675bbee39c6SJeff Roberson 		    ("uma_zalloc_bucket: Bucket on free list is not empty."));
1676a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1677bbee39c6SJeff Roberson 	} else {
167818aa2de5SJeff Roberson 		int bflags;
167918aa2de5SJeff Roberson 
1680cae33c14SJeff Roberson 		bflags = (flags & ~M_ZERO);
168120e8e865SBosko Milekic 		if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
168218aa2de5SJeff Roberson 			bflags |= M_NOVM;
168318aa2de5SJeff Roberson 
1684bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
1685cae33c14SJeff Roberson 		bucket = bucket_alloc(zone->uz_count, bflags);
1686bbee39c6SJeff Roberson 		ZONE_LOCK(zone);
1687bbee39c6SJeff Roberson 	}
1688bbee39c6SJeff Roberson 
1689bbee39c6SJeff Roberson 	if (bucket == NULL)
1690bbee39c6SJeff Roberson 		return (0);
1691bbee39c6SJeff Roberson 
1692bbee39c6SJeff Roberson #ifdef SMP
1693a553d4b8SJeff Roberson 	/*
1694bbee39c6SJeff Roberson 	 * This code is here to limit the number of simultaneous bucket fills
1695bbee39c6SJeff Roberson 	 * for any given zone to the number of per cpu caches in this zone. This
1696bbee39c6SJeff Roberson 	 * is done so that we don't allocate more memory than we really need.
1697a553d4b8SJeff Roberson 	 */
1698bbee39c6SJeff Roberson 	if (zone->uz_fills >= mp_ncpus)
1699bbee39c6SJeff Roberson 		goto done;
1700a553d4b8SJeff Roberson 
1701bbee39c6SJeff Roberson #endif
1702bbee39c6SJeff Roberson 	zone->uz_fills++;
1703bbee39c6SJeff Roberson 
1704bbee39c6SJeff Roberson 	/* Try to keep the buckets totally full */
1705cae33c14SJeff Roberson 	while (bucket->ub_cnt < bucket->ub_entries &&
1706d11e0ba5SJeff Roberson 	    (slab = uma_zone_slab(zone, flags)) != NULL) {
1707bbee39c6SJeff Roberson 		while (slab->us_freecount &&
1708cae33c14SJeff Roberson 		    bucket->ub_cnt < bucket->ub_entries) {
1709cae33c14SJeff Roberson 			bucket->ub_bucket[bucket->ub_cnt++] =
1710bbee39c6SJeff Roberson 			    uma_slab_alloc(zone, slab);
1711bbee39c6SJeff Roberson 		}
1712bbee39c6SJeff Roberson 		/* Don't block on the next fill */
1713bbee39c6SJeff Roberson 		flags |= M_NOWAIT;
17148355f576SJeff Roberson 	}
17158355f576SJeff Roberson 
1716bbee39c6SJeff Roberson 	zone->uz_fills--;
1717bbee39c6SJeff Roberson 
1718cae33c14SJeff Roberson 	if (bucket->ub_cnt != 0) {
1719bbee39c6SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
1720bbee39c6SJeff Roberson 		    bucket, ub_link);
1721bbee39c6SJeff Roberson 		return (1);
1722bbee39c6SJeff Roberson 	}
1723bbee39c6SJeff Roberson #ifdef SMP
1724bbee39c6SJeff Roberson done:
1725bbee39c6SJeff Roberson #endif
1726cae33c14SJeff Roberson 	bucket_free(bucket);
1727bbee39c6SJeff Roberson 
1728bbee39c6SJeff Roberson 	return (0);
1729bbee39c6SJeff Roberson }
17308355f576SJeff Roberson /*
1731bbee39c6SJeff Roberson  * Allocates an item for an internal zone
17328355f576SJeff Roberson  *
17338355f576SJeff Roberson  * Arguments
17348355f576SJeff Roberson  *	zone   The zone to alloc for.
17358355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
1736a163d034SWarner Losh  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
17378355f576SJeff Roberson  *
17388355f576SJeff Roberson  * Returns
17398355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
1740bbee39c6SJeff Roberson  *	An item if successful
17418355f576SJeff Roberson  */
17428355f576SJeff Roberson 
17438355f576SJeff Roberson static void *
1744bbee39c6SJeff Roberson uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
17458355f576SJeff Roberson {
17468355f576SJeff Roberson 	uma_slab_t slab;
17478355f576SJeff Roberson 	void *item;
17488355f576SJeff Roberson 
17498355f576SJeff Roberson 	item = NULL;
17508355f576SJeff Roberson 
17518355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
17528355f576SJeff Roberson 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
17538355f576SJeff Roberson #endif
17548355f576SJeff Roberson 	ZONE_LOCK(zone);
17558355f576SJeff Roberson 
1756bbee39c6SJeff Roberson 	slab = uma_zone_slab(zone, flags);
1757bbee39c6SJeff Roberson 	if (slab == NULL) {
1758bce97791SJeff Roberson 		ZONE_UNLOCK(zone);
1759a553d4b8SJeff Roberson 		return (NULL);
1760bce97791SJeff Roberson 	}
1761a553d4b8SJeff Roberson 
1762bbee39c6SJeff Roberson 	item = uma_slab_alloc(zone, slab);
17638355f576SJeff Roberson 
17648355f576SJeff Roberson 	ZONE_UNLOCK(zone);
17658355f576SJeff Roberson 
17663370c5bfSJeff Roberson 	if (zone->uz_ctor != NULL)
17678355f576SJeff Roberson 		zone->uz_ctor(item, zone->uz_size, udata);
17682cc35ff9SJeff Roberson 	if (flags & M_ZERO)
17692cc35ff9SJeff Roberson 		bzero(item, zone->uz_size);
17708355f576SJeff Roberson 
17718355f576SJeff Roberson 	return (item);
17728355f576SJeff Roberson }
17738355f576SJeff Roberson 
17748355f576SJeff Roberson /* See uma.h */
17758355f576SJeff Roberson void
17768355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
17778355f576SJeff Roberson {
17788355f576SJeff Roberson 	uma_cache_t cache;
17798355f576SJeff Roberson 	uma_bucket_t bucket;
17804741dcbfSJeff Roberson 	int bflags;
17818355f576SJeff Roberson 	int cpu;
17825c133dfaSBosko Milekic 	int skip;
17838355f576SJeff Roberson 
17848355f576SJeff Roberson 	/* This is the fast path free */
17855c133dfaSBosko Milekic 	skip = 0;
17868355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC_1
17878355f576SJeff Roberson 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
17888355f576SJeff Roberson #endif
1789af7f9b97SJeff Roberson 	/*
1790af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
1791af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
1792af7f9b97SJeff Roberson 	 */
1793af7f9b97SJeff Roberson 
1794af7f9b97SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
1795af7f9b97SJeff Roberson 		goto zfree_internal;
1796af7f9b97SJeff Roberson 
17975c133dfaSBosko Milekic 	if (zone->uz_dtor) {
1798bba739abSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
17995c133dfaSBosko Milekic 		skip = 1;
18005c133dfaSBosko Milekic 	}
1801bba739abSJeff Roberson 
1802a553d4b8SJeff Roberson zfree_restart:
18038355f576SJeff Roberson 	cpu = PCPU_GET(cpuid);
1804d88797c2SBosko Milekic 	CPU_LOCK(cpu);
18058355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
18068355f576SJeff Roberson 
18078355f576SJeff Roberson zfree_start:
18088355f576SJeff Roberson 	bucket = cache->uc_freebucket;
18098355f576SJeff Roberson 
18108355f576SJeff Roberson 	if (bucket) {
1811a553d4b8SJeff Roberson 		/*
1812a553d4b8SJeff Roberson 		 * Do we have room in our bucket? It is OK for this uz count
1813a553d4b8SJeff Roberson 		 * check to be slightly out of sync.
1814a553d4b8SJeff Roberson 		 */
1815a553d4b8SJeff Roberson 
1816cae33c14SJeff Roberson 		if (bucket->ub_cnt < bucket->ub_entries) {
1817cae33c14SJeff Roberson 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
18188355f576SJeff Roberson 			    ("uma_zfree: Freeing to non free bucket index."));
1819cae33c14SJeff Roberson 			bucket->ub_bucket[bucket->ub_cnt] = item;
1820cae33c14SJeff Roberson 			bucket->ub_cnt++;
1821b9ba8931SJeff Roberson #ifdef INVARIANTS
182281f71edaSMatt Jacob 			ZONE_LOCK(zone);
1823b9ba8931SJeff Roberson 			if (zone->uz_flags & UMA_ZFLAG_MALLOC)
1824b9ba8931SJeff Roberson 				uma_dbg_free(zone, udata, item);
1825b9ba8931SJeff Roberson 			else
1826b9ba8931SJeff Roberson 				uma_dbg_free(zone, NULL, item);
182781f71edaSMatt Jacob 			ZONE_UNLOCK(zone);
1828b9ba8931SJeff Roberson #endif
1829d88797c2SBosko Milekic 			CPU_UNLOCK(cpu);
18308355f576SJeff Roberson 			return;
18318355f576SJeff Roberson 		} else if (cache->uc_allocbucket) {
18328355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
18338355f576SJeff Roberson 			printf("uma_zfree: Swapping buckets.\n");
18348355f576SJeff Roberson #endif
18358355f576SJeff Roberson 			/*
18368355f576SJeff Roberson 			 * We have run out of space in our freebucket.
18378355f576SJeff Roberson 			 * See if we can switch with our alloc bucket.
18388355f576SJeff Roberson 			 */
1839cae33c14SJeff Roberson 			if (cache->uc_allocbucket->ub_cnt <
1840cae33c14SJeff Roberson 			    cache->uc_freebucket->ub_cnt) {
18418355f576SJeff Roberson 				uma_bucket_t swap;
18428355f576SJeff Roberson 
18438355f576SJeff Roberson 				swap = cache->uc_freebucket;
18448355f576SJeff Roberson 				cache->uc_freebucket = cache->uc_allocbucket;
18458355f576SJeff Roberson 				cache->uc_allocbucket = swap;
18468355f576SJeff Roberson 
18478355f576SJeff Roberson 				goto zfree_start;
18488355f576SJeff Roberson 			}
18498355f576SJeff Roberson 		}
18508355f576SJeff Roberson 	}
18518355f576SJeff Roberson 
18528355f576SJeff Roberson 	/*
1853a553d4b8SJeff Roberson 	 * We can get here for two reasons:
18548355f576SJeff Roberson 	 *
18558355f576SJeff Roberson 	 * 1) The buckets are NULL
1856a553d4b8SJeff Roberson 	 * 2) The alloc and free buckets are both somewhat full.
18578355f576SJeff Roberson 	 *
18588355f576SJeff Roberson 	 */
18598355f576SJeff Roberson 
18608355f576SJeff Roberson 	ZONE_LOCK(zone);
18618355f576SJeff Roberson 
18628355f576SJeff Roberson 	bucket = cache->uc_freebucket;
18638355f576SJeff Roberson 	cache->uc_freebucket = NULL;
18648355f576SJeff Roberson 
18658355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
18668355f576SJeff Roberson 	if (bucket != NULL) {
18678355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
18688355f576SJeff Roberson 		printf("uma_zfree: Putting old bucket on the free list.\n");
18698355f576SJeff Roberson #endif
1870cae33c14SJeff Roberson 		/* ub_cnt is pointing to the last free item */
1871cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
18728355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
18738355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
18748355f576SJeff Roberson 		    bucket, ub_link);
18758355f576SJeff Roberson 	}
1876a553d4b8SJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1877a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
1878a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
1879a553d4b8SJeff Roberson 		cache->uc_freebucket = bucket;
1880a553d4b8SJeff Roberson 		goto zfree_start;
1881a553d4b8SJeff Roberson 	}
1882a553d4b8SJeff Roberson 	/* We're done with this CPU now */
1883d88797c2SBosko Milekic 	CPU_UNLOCK(cpu);
1884a553d4b8SJeff Roberson 
1885a553d4b8SJeff Roberson 	/* And the zone.. */
1886a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
1887a553d4b8SJeff Roberson 
18888355f576SJeff Roberson #ifdef UMA_DEBUG_ALLOC
18898355f576SJeff Roberson 	printf("uma_zfree: Allocating new free bucket.\n");
18908355f576SJeff Roberson #endif
18914741dcbfSJeff Roberson 	bflags = M_NOWAIT;
18924741dcbfSJeff Roberson 
189320e8e865SBosko Milekic 	if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
18944741dcbfSJeff Roberson 		bflags |= M_NOVM;
1895cae33c14SJeff Roberson 	bucket = bucket_alloc(zone->uz_count, bflags);
18964741dcbfSJeff Roberson 	if (bucket) {
1897a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
1898a553d4b8SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1899a553d4b8SJeff Roberson 		    bucket, ub_link);
19008355f576SJeff Roberson 		ZONE_UNLOCK(zone);
1901a553d4b8SJeff Roberson 		goto zfree_restart;
19028355f576SJeff Roberson 	}
19038355f576SJeff Roberson 
1904a553d4b8SJeff Roberson 	/*
1905a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
1906a553d4b8SJeff Roberson 	 */
19078355f576SJeff Roberson 
1908af7f9b97SJeff Roberson zfree_internal:
1909af7f9b97SJeff Roberson 
191048bf8725SBosko Milekic #ifdef INVARIANTS
191148bf8725SBosko Milekic 	/*
191248bf8725SBosko Milekic 	 * If we need to skip the dtor and the uma_dbg_free in uma_zfree_internal
191348bf8725SBosko Milekic 	 * because we've already called the dtor above, but we ended up here, then
191448bf8725SBosko Milekic 	 * we need to make sure that we take care of the uma_dbg_free immediately.
191548bf8725SBosko Milekic 	 */
191648bf8725SBosko Milekic 	if (skip) {
191748bf8725SBosko Milekic 		ZONE_LOCK(zone);
191848bf8725SBosko Milekic 		if (zone->uz_flags & UMA_ZFLAG_MALLOC)
191948bf8725SBosko Milekic 			uma_dbg_free(zone, udata, item);
192048bf8725SBosko Milekic 		else
192148bf8725SBosko Milekic 			uma_dbg_free(zone, NULL, item);
192248bf8725SBosko Milekic 		ZONE_UNLOCK(zone);
192348bf8725SBosko Milekic 	}
192448bf8725SBosko Milekic #endif
19255c133dfaSBosko Milekic 	uma_zfree_internal(zone, item, udata, skip);
19268355f576SJeff Roberson 
19278355f576SJeff Roberson 	return;
19288355f576SJeff Roberson 
19298355f576SJeff Roberson }
19308355f576SJeff Roberson 
19318355f576SJeff Roberson /*
19328355f576SJeff Roberson  * Frees an item to an INTERNAL zone or allocates a free bucket
19338355f576SJeff Roberson  *
19348355f576SJeff Roberson  * Arguments:
19358355f576SJeff Roberson  *	zone   The zone to free to
19368355f576SJeff Roberson  *	item   The item we're freeing
19378355f576SJeff Roberson  *	udata  User supplied data for the dtor
19388355f576SJeff Roberson  *	skip   Skip the dtor, it was done in uma_zfree_arg
19398355f576SJeff Roberson  */
19408355f576SJeff Roberson 
19418355f576SJeff Roberson static void
19428355f576SJeff Roberson uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
19438355f576SJeff Roberson {
19448355f576SJeff Roberson 	uma_slab_t slab;
19458355f576SJeff Roberson 	u_int8_t *mem;
19468355f576SJeff Roberson 	u_int8_t freei;
19478355f576SJeff Roberson 
1948bba739abSJeff Roberson 	if (!skip && zone->uz_dtor)
1949bba739abSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
1950bba739abSJeff Roberson 
19518355f576SJeff Roberson 	ZONE_LOCK(zone);
19528355f576SJeff Roberson 
19538355f576SJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
19548355f576SJeff Roberson 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
195599571dc3SJeff Roberson 		if (zone->uz_flags & UMA_ZFLAG_HASH)
19568355f576SJeff Roberson 			slab = hash_sfind(&zone->uz_hash, mem);
19578355f576SJeff Roberson 		else {
19588355f576SJeff Roberson 			mem += zone->uz_pgoff;
19598355f576SJeff Roberson 			slab = (uma_slab_t)mem;
19608355f576SJeff Roberson 		}
19618355f576SJeff Roberson 	} else {
19628355f576SJeff Roberson 		slab = (uma_slab_t)udata;
19638355f576SJeff Roberson 	}
19648355f576SJeff Roberson 
19658355f576SJeff Roberson 	/* Do we need to remove from any lists? */
19668355f576SJeff Roberson 	if (slab->us_freecount+1 == zone->uz_ipers) {
19678355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
19688355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
19698355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
19708355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
19718355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
19728355f576SJeff Roberson 	}
19738355f576SJeff Roberson 
19748355f576SJeff Roberson 	/* Slab management stuff */
19758355f576SJeff Roberson 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
19768355f576SJeff Roberson 		/ zone->uz_rsize;
19778355f576SJeff Roberson 
1978639c9550SJeff Roberson #ifdef INVARIANTS
1979639c9550SJeff Roberson 	if (!skip)
1980639c9550SJeff Roberson 		uma_dbg_free(zone, slab, item);
19818355f576SJeff Roberson #endif
1982639c9550SJeff Roberson 
19838355f576SJeff Roberson 	slab->us_freelist[freei] = slab->us_firstfree;
19848355f576SJeff Roberson 	slab->us_firstfree = freei;
19858355f576SJeff Roberson 	slab->us_freecount++;
19868355f576SJeff Roberson 
19878355f576SJeff Roberson 	/* Zone statistics */
19888355f576SJeff Roberson 	zone->uz_free++;
19898355f576SJeff Roberson 
1990af7f9b97SJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL) {
1991af7f9b97SJeff Roberson 		if (zone->uz_pages < zone->uz_maxpages)
1992af7f9b97SJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
1993af7f9b97SJeff Roberson 
1994af7f9b97SJeff Roberson 		/* We can handle one more allocation */
199574c924b5SJeff Roberson 		wakeup_one(zone);
1996af7f9b97SJeff Roberson 	}
1997af7f9b97SJeff Roberson 
1998605cbd6aSJeff Roberson 	ZONE_UNLOCK(zone);
19998355f576SJeff Roberson }
20008355f576SJeff Roberson 
20018355f576SJeff Roberson /* See uma.h */
20028355f576SJeff Roberson void
2003736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
2004736ee590SJeff Roberson {
2005736ee590SJeff Roberson 	ZONE_LOCK(zone);
2006736ee590SJeff Roberson 	if (zone->uz_ppera > 1)
2007af7f9b97SJeff Roberson 		zone->uz_maxpages = nitems * zone->uz_ppera;
2008736ee590SJeff Roberson 	else
2009736ee590SJeff Roberson 		zone->uz_maxpages = nitems / zone->uz_ipers;
201028bc4419SJeff Roberson 
2011d4d6aee5SAndrew R. Reiter 	if (zone->uz_maxpages * zone->uz_ipers < nitems)
2012d4d6aee5SAndrew R. Reiter 		zone->uz_maxpages++;
201328bc4419SJeff Roberson 
2014736ee590SJeff Roberson 	ZONE_UNLOCK(zone);
2015736ee590SJeff Roberson }
2016736ee590SJeff Roberson 
2017736ee590SJeff Roberson /* See uma.h */
2018736ee590SJeff Roberson void
20198355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
20208355f576SJeff Roberson {
20218355f576SJeff Roberson 	ZONE_LOCK(zone);
20228355f576SJeff Roberson 
20238355f576SJeff Roberson 	zone->uz_freef = freef;
20248355f576SJeff Roberson 
20258355f576SJeff Roberson 	ZONE_UNLOCK(zone);
20268355f576SJeff Roberson }
20278355f576SJeff Roberson 
20288355f576SJeff Roberson /* See uma.h */
20298355f576SJeff Roberson void
20308355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
20318355f576SJeff Roberson {
20328355f576SJeff Roberson 	ZONE_LOCK(zone);
20338355f576SJeff Roberson 
20348355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
20358355f576SJeff Roberson 	zone->uz_allocf = allocf;
20368355f576SJeff Roberson 
20378355f576SJeff Roberson 	ZONE_UNLOCK(zone);
20388355f576SJeff Roberson }
20398355f576SJeff Roberson 
20408355f576SJeff Roberson /* See uma.h */
20418355f576SJeff Roberson int
20428355f576SJeff Roberson uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
20438355f576SJeff Roberson {
20448355f576SJeff Roberson 	int pages;
20458355f576SJeff Roberson 	vm_offset_t kva;
20468355f576SJeff Roberson 
20478355f576SJeff Roberson 	mtx_lock(&Giant);
20488355f576SJeff Roberson 
20498355f576SJeff Roberson 	pages = count / zone->uz_ipers;
20508355f576SJeff Roberson 
20518355f576SJeff Roberson 	if (pages * zone->uz_ipers < count)
20528355f576SJeff Roberson 		pages++;
2053a553d4b8SJeff Roberson 
20548355f576SJeff Roberson 	kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
20558355f576SJeff Roberson 
2056a553d4b8SJeff Roberson 	if (kva == 0) {
2057a553d4b8SJeff Roberson 		mtx_unlock(&Giant);
20588355f576SJeff Roberson 		return (0);
20598355f576SJeff Roberson 	}
20608355f576SJeff Roberson 
20618355f576SJeff Roberson 
2062a553d4b8SJeff Roberson 	if (obj == NULL)
2063a553d4b8SJeff Roberson 		obj = vm_object_allocate(OBJT_DEFAULT,
2064c7173f58SJeff Roberson 		    pages);
206582774d80SAlan Cox 	else {
206682774d80SAlan Cox 		VM_OBJECT_LOCK_INIT(obj);
20678355f576SJeff Roberson 		_vm_object_allocate(OBJT_DEFAULT,
2068c7173f58SJeff Roberson 		    pages, obj);
206982774d80SAlan Cox 	}
2070a553d4b8SJeff Roberson 	ZONE_LOCK(zone);
2071a553d4b8SJeff Roberson 	zone->uz_kva = kva;
2072a553d4b8SJeff Roberson 	zone->uz_obj = obj;
2073a553d4b8SJeff Roberson 	zone->uz_maxpages = pages;
20748355f576SJeff Roberson 
20758355f576SJeff Roberson 	zone->uz_allocf = obj_alloc;
20768355f576SJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_NOFREE | UMA_ZFLAG_PRIVALLOC;
20778355f576SJeff Roberson 
20788355f576SJeff Roberson 	ZONE_UNLOCK(zone);
2079a553d4b8SJeff Roberson 	mtx_unlock(&Giant);
20808355f576SJeff Roberson 
20818355f576SJeff Roberson 	return (1);
20828355f576SJeff Roberson }
20838355f576SJeff Roberson 
20848355f576SJeff Roberson /* See uma.h */
20858355f576SJeff Roberson void
20868355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
20878355f576SJeff Roberson {
20888355f576SJeff Roberson 	int slabs;
20898355f576SJeff Roberson 	uma_slab_t slab;
20908355f576SJeff Roberson 
20918355f576SJeff Roberson 	ZONE_LOCK(zone);
20928355f576SJeff Roberson 	slabs = items / zone->uz_ipers;
20938355f576SJeff Roberson 	if (slabs * zone->uz_ipers < items)
20948355f576SJeff Roberson 		slabs++;
20958355f576SJeff Roberson 
20968355f576SJeff Roberson 	while (slabs > 0) {
2097a163d034SWarner Losh 		slab = slab_zalloc(zone, M_WAITOK);
20988355f576SJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
20998355f576SJeff Roberson 		slabs--;
21008355f576SJeff Roberson 	}
21018355f576SJeff Roberson 	ZONE_UNLOCK(zone);
21028355f576SJeff Roberson }
21038355f576SJeff Roberson 
21048355f576SJeff Roberson /* See uma.h */
21058355f576SJeff Roberson void
21068355f576SJeff Roberson uma_reclaim(void)
21078355f576SJeff Roberson {
21088355f576SJeff Roberson 	/*
21098355f576SJeff Roberson 	 * You might think that the delay below would improve performance since
21108355f576SJeff Roberson 	 * the allocator will give away memory that it may ask for immediately.
21118355f576SJeff Roberson 	 * Really, it makes things worse, since cpu cycles are so much cheaper
21128355f576SJeff Roberson 	 * than disk activity.
21138355f576SJeff Roberson 	 */
21148355f576SJeff Roberson #if 0
21158355f576SJeff Roberson 	static struct timeval tv = {0};
21168355f576SJeff Roberson 	struct timeval now;
21178355f576SJeff Roberson 	getmicrouptime(&now);
21188355f576SJeff Roberson 	if (now.tv_sec > tv.tv_sec + 30)
21198355f576SJeff Roberson 		tv = now;
21208355f576SJeff Roberson 	else
21218355f576SJeff Roberson 		return;
21228355f576SJeff Roberson #endif
21238355f576SJeff Roberson #ifdef UMA_DEBUG
21248355f576SJeff Roberson 	printf("UMA: vm asked us to release pages!\n");
21258355f576SJeff Roberson #endif
212686bbae32SJeff Roberson 	bucket_enable();
21278355f576SJeff Roberson 	zone_foreach(zone_drain);
21288355f576SJeff Roberson 
21298355f576SJeff Roberson 	/*
21308355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
21318355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
21328355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
21338355f576SJeff Roberson 	 */
2134cae33c14SJeff Roberson 	zone_drain_common(slabzone, 0);
2135cae33c14SJeff Roberson 	bucket_zone_drain();
21368355f576SJeff Roberson }
21378355f576SJeff Roberson 
21388355f576SJeff Roberson void *
21398355f576SJeff Roberson uma_large_malloc(int size, int wait)
21408355f576SJeff Roberson {
21418355f576SJeff Roberson 	void *mem;
21428355f576SJeff Roberson 	uma_slab_t slab;
21438355f576SJeff Roberson 	u_int8_t flags;
21448355f576SJeff Roberson 
2145bbee39c6SJeff Roberson 	slab = uma_zalloc_internal(slabzone, NULL, wait);
21468355f576SJeff Roberson 	if (slab == NULL)
21478355f576SJeff Roberson 		return (NULL);
21488355f576SJeff Roberson 
21498355f576SJeff Roberson 	mem = page_alloc(NULL, size, &flags, wait);
21508355f576SJeff Roberson 	if (mem) {
215199571dc3SJeff Roberson 		vsetslab((vm_offset_t)mem, slab);
21528355f576SJeff Roberson 		slab->us_data = mem;
21538355f576SJeff Roberson 		slab->us_flags = flags | UMA_SLAB_MALLOC;
21548355f576SJeff Roberson 		slab->us_size = size;
21558355f576SJeff Roberson 	} else {
21568355f576SJeff Roberson 		uma_zfree_internal(slabzone, slab, NULL, 0);
21578355f576SJeff Roberson 	}
21588355f576SJeff Roberson 
21598355f576SJeff Roberson 
21608355f576SJeff Roberson 	return (mem);
21618355f576SJeff Roberson }
21628355f576SJeff Roberson 
21638355f576SJeff Roberson void
21648355f576SJeff Roberson uma_large_free(uma_slab_t slab)
21658355f576SJeff Roberson {
216699571dc3SJeff Roberson 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
2167125ee0d1STor Egge 	/*
21685103186cSAlan Cox 	 * XXX: We get a lock order reversal if we don't have Giant:
2169125ee0d1STor Egge 	 * vm_map_remove (locks system map) -> vm_map_delete ->
2170125ee0d1STor Egge 	 *    vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant)
2171125ee0d1STor Egge 	 */
2172125ee0d1STor Egge 	if (!mtx_owned(&Giant)) {
2173125ee0d1STor Egge 		mtx_lock(&Giant);
2174125ee0d1STor Egge 		page_free(slab->us_data, slab->us_size, slab->us_flags);
2175125ee0d1STor Egge 		mtx_unlock(&Giant);
2176125ee0d1STor Egge 	} else
21778355f576SJeff Roberson 		page_free(slab->us_data, slab->us_size, slab->us_flags);
21788355f576SJeff Roberson 	uma_zfree_internal(slabzone, slab, NULL, 0);
21798355f576SJeff Roberson }
21808355f576SJeff Roberson 
21818355f576SJeff Roberson void
21828355f576SJeff Roberson uma_print_stats(void)
21838355f576SJeff Roberson {
21848355f576SJeff Roberson 	zone_foreach(uma_print_zone);
21858355f576SJeff Roberson }
21868355f576SJeff Roberson 
21878355f576SJeff Roberson void
21888355f576SJeff Roberson uma_print_zone(uma_zone_t zone)
21898355f576SJeff Roberson {
21908355f576SJeff Roberson 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
21918355f576SJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
21928355f576SJeff Roberson 	    zone->uz_ipers, zone->uz_ppera,
21938355f576SJeff Roberson 	    (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
21948355f576SJeff Roberson }
21958355f576SJeff Roberson 
21968355f576SJeff Roberson /*
21978355f576SJeff Roberson  * Sysctl handler for vm.zone
21988355f576SJeff Roberson  *
21998355f576SJeff Roberson  * stolen from vm_zone.c
22008355f576SJeff Roberson  */
22018355f576SJeff Roberson static int
22028355f576SJeff Roberson sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
22038355f576SJeff Roberson {
22048355f576SJeff Roberson 	int error, len, cnt;
22058355f576SJeff Roberson 	const int linesize = 128;	/* conservative */
22068355f576SJeff Roberson 	int totalfree;
22078355f576SJeff Roberson 	char *tmpbuf, *offset;
22088355f576SJeff Roberson 	uma_zone_t z;
22098355f576SJeff Roberson 	char *p;
2210f828e5beSJeff Roberson 	int cpu;
2211f828e5beSJeff Roberson 	int cachefree;
2212f828e5beSJeff Roberson 	uma_bucket_t bucket;
2213f828e5beSJeff Roberson 	uma_cache_t cache;
22148355f576SJeff Roberson 
22158355f576SJeff Roberson 	cnt = 0;
22160da47b2fSJeff Roberson 	mtx_lock(&uma_mtx);
22178355f576SJeff Roberson 	LIST_FOREACH(z, &uma_zones, uz_link)
22188355f576SJeff Roberson 		cnt++;
22190da47b2fSJeff Roberson 	mtx_unlock(&uma_mtx);
22208355f576SJeff Roberson 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2221a163d034SWarner Losh 			M_TEMP, M_WAITOK);
22228355f576SJeff Roberson 	len = snprintf(tmpbuf, linesize,
22238355f576SJeff Roberson 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
22248355f576SJeff Roberson 	if (cnt == 0)
22258355f576SJeff Roberson 		tmpbuf[len - 1] = '\0';
22268355f576SJeff Roberson 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
22278355f576SJeff Roberson 	if (error || cnt == 0)
22288355f576SJeff Roberson 		goto out;
22298355f576SJeff Roberson 	offset = tmpbuf;
2230f4af24d5SJeff Roberson 	mtx_lock(&uma_mtx);
22318355f576SJeff Roberson 	LIST_FOREACH(z, &uma_zones, uz_link) {
22328355f576SJeff Roberson 		if (cnt == 0)	/* list may have changed size */
22338355f576SJeff Roberson 			break;
22341c35e213SBosko Milekic 		if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
2235f828e5beSJeff Roberson 			for (cpu = 0; cpu < maxcpu; cpu++) {
2236f828e5beSJeff Roberson 				if (CPU_ABSENT(cpu))
2237f828e5beSJeff Roberson 					continue;
2238f828e5beSJeff Roberson 				CPU_LOCK(cpu);
2239f828e5beSJeff Roberson 			}
22401c35e213SBosko Milekic 		}
22418355f576SJeff Roberson 		ZONE_LOCK(z);
2242f828e5beSJeff Roberson 		cachefree = 0;
22431c35e213SBosko Milekic 		if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
2244f828e5beSJeff Roberson 			for (cpu = 0; cpu < maxcpu; cpu++) {
2245f828e5beSJeff Roberson 				if (CPU_ABSENT(cpu))
2246f828e5beSJeff Roberson 					continue;
2247f828e5beSJeff Roberson 				cache = &z->uz_cpu[cpu];
2248f828e5beSJeff Roberson 				if (cache->uc_allocbucket != NULL)
2249cae33c14SJeff Roberson 					cachefree += cache->uc_allocbucket->ub_cnt;
2250f828e5beSJeff Roberson 				if (cache->uc_freebucket != NULL)
2251cae33c14SJeff Roberson 					cachefree += cache->uc_freebucket->ub_cnt;
2252f828e5beSJeff Roberson 				CPU_UNLOCK(cpu);
2253f828e5beSJeff Roberson 			}
22541c35e213SBosko Milekic 		}
2255f828e5beSJeff Roberson 		LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2256cae33c14SJeff Roberson 			cachefree += bucket->ub_cnt;
2257f828e5beSJeff Roberson 		}
2258f828e5beSJeff Roberson 		totalfree = z->uz_free + cachefree;
22598355f576SJeff Roberson 		len = snprintf(offset, linesize,
22608355f576SJeff Roberson 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
22618355f576SJeff Roberson 		    z->uz_name, z->uz_size,
22628355f576SJeff Roberson 		    z->uz_maxpages * z->uz_ipers,
22638355f576SJeff Roberson 		    (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
22648355f576SJeff Roberson 		    totalfree,
22658355f576SJeff Roberson 		    (unsigned long long)z->uz_allocs);
22668355f576SJeff Roberson 		ZONE_UNLOCK(z);
22678355f576SJeff Roberson 		for (p = offset + 12; p > offset && *p == ' '; --p)
22688355f576SJeff Roberson 			/* nothing */ ;
22698355f576SJeff Roberson 		p[1] = ':';
22708355f576SJeff Roberson 		cnt--;
22718355f576SJeff Roberson 		offset += len;
22728355f576SJeff Roberson 	}
2273f4af24d5SJeff Roberson 	mtx_unlock(&uma_mtx);
22748355f576SJeff Roberson 	*offset++ = '\0';
22758355f576SJeff Roberson 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
22768355f576SJeff Roberson out:
22778355f576SJeff Roberson 	FREE(tmpbuf, M_TEMP);
22788355f576SJeff Roberson 	return (error);
22798355f576SJeff Roberson }
2280