xref: /freebsd/sys/vm/uma_core.c (revision fe267a559009cbf34f9341666fe4d88a92c02d5e)
160727d8bSWarner Losh /*-
2*fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*fe267a55SPedro F. Giffuni  *
4ef72505eSJeff Roberson  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
508ecce74SRobert Watson  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6ae4e9636SRobert Watson  * Copyright (c) 2004-2006 Robert N. M. Watson
708ecce74SRobert Watson  * All rights reserved.
88355f576SJeff Roberson  *
98355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
108355f576SJeff Roberson  * modification, are permitted provided that the following conditions
118355f576SJeff Roberson  * are met:
128355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
138355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
148355f576SJeff Roberson  *    disclaimer.
158355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
168355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
178355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
188355f576SJeff Roberson  *
198355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
208355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
218355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
228355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
238355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
248355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
258355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
268355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
278355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
288355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
298355f576SJeff Roberson  */
308355f576SJeff Roberson 
318355f576SJeff Roberson /*
328355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
338355f576SJeff Roberson  *
348355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
358355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36763df3ecSPedro F. Giffuni  * efficient.  A primary design goal is to return unused memory to the rest of
378355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
388355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
398355f576SJeff Roberson  * pools of reserved memory unused.
408355f576SJeff Roberson  *
418355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
428355f576SJeff Roberson  * are well known.
438355f576SJeff Roberson  *
448355f576SJeff Roberson  */
458355f576SJeff Roberson 
468355f576SJeff Roberson /*
478355f576SJeff Roberson  * TODO:
488355f576SJeff Roberson  *	- Improve memory usage for large allocations
498355f576SJeff Roberson  *	- Investigate cache size adjustments
508355f576SJeff Roberson  */
518355f576SJeff Roberson 
52874651b1SDavid E. O'Brien #include <sys/cdefs.h>
53874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
54874651b1SDavid E. O'Brien 
5548c5777eSRobert Watson #include "opt_ddb.h"
568355f576SJeff Roberson #include "opt_param.h"
578d689e04SGleb Smirnoff #include "opt_vm.h"
5848c5777eSRobert Watson 
598355f576SJeff Roberson #include <sys/param.h>
608355f576SJeff Roberson #include <sys/systm.h>
61ef72505eSJeff Roberson #include <sys/bitset.h>
629b43bc27SAndriy Gapon #include <sys/eventhandler.h>
638355f576SJeff Roberson #include <sys/kernel.h>
648355f576SJeff Roberson #include <sys/types.h>
658355f576SJeff Roberson #include <sys/queue.h>
668355f576SJeff Roberson #include <sys/malloc.h>
673659f747SRobert Watson #include <sys/ktr.h>
688355f576SJeff Roberson #include <sys/lock.h>
698355f576SJeff Roberson #include <sys/sysctl.h>
708355f576SJeff Roberson #include <sys/mutex.h>
714c1cc01cSJohn Baldwin #include <sys/proc.h>
7210cb2424SMark Murray #include <sys/random.h>
7389f6b863SAttilio Rao #include <sys/rwlock.h>
747a52a97eSRobert Watson #include <sys/sbuf.h>
75a2de44abSAlexander Motin #include <sys/sched.h>
768355f576SJeff Roberson #include <sys/smp.h>
77e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h>
7886bbae32SJeff Roberson #include <sys/vmmeter.h>
7986bbae32SJeff Roberson 
808355f576SJeff Roberson #include <vm/vm.h>
818355f576SJeff Roberson #include <vm/vm_object.h>
828355f576SJeff Roberson #include <vm/vm_page.h>
83a4915c21SAttilio Rao #include <vm/vm_pageout.h>
848355f576SJeff Roberson #include <vm/vm_param.h>
858355f576SJeff Roberson #include <vm/vm_map.h>
868355f576SJeff Roberson #include <vm/vm_kern.h>
878355f576SJeff Roberson #include <vm/vm_extern.h>
888355f576SJeff Roberson #include <vm/uma.h>
898355f576SJeff Roberson #include <vm/uma_int.h>
90639c9550SJeff Roberson #include <vm/uma_dbg.h>
918355f576SJeff Roberson 
9248c5777eSRobert Watson #include <ddb/ddb.h>
9348c5777eSRobert Watson 
948d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
958d689e04SGleb Smirnoff #include <vm/memguard.h>
968d689e04SGleb Smirnoff #endif
978d689e04SGleb Smirnoff 
988355f576SJeff Roberson /*
99099a0e58SBosko Milekic  * This is the zone and keg from which all zones are spawned.  The idea is that
100099a0e58SBosko Milekic  * even the zone & keg heads are allocated from the allocator, so we use the
101099a0e58SBosko Milekic  * bss section to bootstrap us.
1028355f576SJeff Roberson  */
103099a0e58SBosko Milekic static struct uma_keg masterkeg;
104099a0e58SBosko Milekic static struct uma_zone masterzone_k;
105099a0e58SBosko Milekic static struct uma_zone masterzone_z;
106099a0e58SBosko Milekic static uma_zone_t kegs = &masterzone_k;
107099a0e58SBosko Milekic static uma_zone_t zones = &masterzone_z;
1088355f576SJeff Roberson 
1098355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */
1108355f576SJeff Roberson static uma_zone_t slabzone;
1118355f576SJeff Roberson 
1128355f576SJeff Roberson /*
1138355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
1148355f576SJeff Roberson  * prior to malloc coming up.
1158355f576SJeff Roberson  */
1168355f576SJeff Roberson static uma_zone_t hashzone;
1178355f576SJeff Roberson 
1181e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */
119e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1;
1201e319f6dSRobert Watson 
121961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122961647dfSJeff Roberson 
1238355f576SJeff Roberson /*
12486bbae32SJeff Roberson  * Are we allowed to allocate buckets?
12586bbae32SJeff Roberson  */
12686bbae32SJeff Roberson static int bucketdisable = 1;
12786bbae32SJeff Roberson 
128099a0e58SBosko Milekic /* Linked list of all kegs in the system */
12913e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
1308355f576SJeff Roberson 
13103175483SAlexander Motin /* Linked list of all cache-only zones in the system */
13203175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones =
13303175483SAlexander Motin     LIST_HEAD_INITIALIZER(uma_cachezones);
13403175483SAlexander Motin 
135111fbcd5SBryan Venteicher /* This RW lock protects the keg list */
136fe933c1dSMateusz Guzik static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
1378355f576SJeff Roberson 
138ac0a6fd0SGleb Smirnoff /*
139ac0a6fd0SGleb Smirnoff  * Pointer and counter to pool of pages, that is preallocated at
140ac0a6fd0SGleb Smirnoff  * startup to bootstrap UMA.  Early zones continue to use the pool
141ac0a6fd0SGleb Smirnoff  * until it is depleted, so allocations may happen after boot, thus
142ac0a6fd0SGleb Smirnoff  * we need a mutex to protect it.
143ac0a6fd0SGleb Smirnoff  */
144ac0a6fd0SGleb Smirnoff static char *bootmem;
145ac0a6fd0SGleb Smirnoff static int boot_pages;
146ac0a6fd0SGleb Smirnoff static struct mtx uma_boot_pages_mtx;
1478355f576SJeff Roberson 
14895c4bf75SKonstantin Belousov static struct sx uma_drain_lock;
14995c4bf75SKonstantin Belousov 
1508355f576SJeff Roberson /* Is the VM done starting up? */
1518355f576SJeff Roberson static int booted = 0;
152342f1793SAlan Cox #define	UMA_STARTUP	1
153342f1793SAlan Cox #define	UMA_STARTUP2	2
1548355f576SJeff Roberson 
155ef72505eSJeff Roberson /*
1569643769aSJeff Roberson  * This is the handle used to schedule events that need to happen
1579643769aSJeff Roberson  * outside of the allocation fast path.
1589643769aSJeff Roberson  */
1598355f576SJeff Roberson static struct callout uma_callout;
1609643769aSJeff Roberson #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
1618355f576SJeff Roberson 
1628355f576SJeff Roberson /*
1638355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1648355f576SJeff Roberson  * a special allocation function just for zones.
1658355f576SJeff Roberson  */
1668355f576SJeff Roberson struct uma_zctor_args {
167bb196eb4SMatthew D Fleming 	const char *name;
168c3bdc05fSAndrew R. Reiter 	size_t size;
1698355f576SJeff Roberson 	uma_ctor ctor;
1708355f576SJeff Roberson 	uma_dtor dtor;
1718355f576SJeff Roberson 	uma_init uminit;
1728355f576SJeff Roberson 	uma_fini fini;
1730095a784SJeff Roberson 	uma_import import;
1740095a784SJeff Roberson 	uma_release release;
1750095a784SJeff Roberson 	void *arg;
176099a0e58SBosko Milekic 	uma_keg_t keg;
177099a0e58SBosko Milekic 	int align;
17885dcf349SGleb Smirnoff 	uint32_t flags;
179099a0e58SBosko Milekic };
180099a0e58SBosko Milekic 
181099a0e58SBosko Milekic struct uma_kctor_args {
182099a0e58SBosko Milekic 	uma_zone_t zone;
183099a0e58SBosko Milekic 	size_t size;
184099a0e58SBosko Milekic 	uma_init uminit;
185099a0e58SBosko Milekic 	uma_fini fini;
1868355f576SJeff Roberson 	int align;
18785dcf349SGleb Smirnoff 	uint32_t flags;
1888355f576SJeff Roberson };
1898355f576SJeff Roberson 
190cae33c14SJeff Roberson struct uma_bucket_zone {
191cae33c14SJeff Roberson 	uma_zone_t	ubz_zone;
192cae33c14SJeff Roberson 	char		*ubz_name;
193fc03d22bSJeff Roberson 	int		ubz_entries;	/* Number of items it can hold. */
194fc03d22bSJeff Roberson 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
195cae33c14SJeff Roberson };
196cae33c14SJeff Roberson 
197f9d27e75SRobert Watson /*
198fc03d22bSJeff Roberson  * Compute the actual number of bucket entries to pack them in power
199fc03d22bSJeff Roberson  * of two sizes for more efficient space utilization.
200f9d27e75SRobert Watson  */
201fc03d22bSJeff Roberson #define	BUCKET_SIZE(n)						\
202fc03d22bSJeff Roberson     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
203fc03d22bSJeff Roberson 
2041aa6c758SAlexander Motin #define	BUCKET_MAX	BUCKET_SIZE(256)
205fc03d22bSJeff Roberson 
206fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = {
2076fd34d6fSJeff Roberson 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
208f3932e90SAlexander Motin 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
2096fd34d6fSJeff Roberson 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
210f3932e90SAlexander Motin 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
2116fd34d6fSJeff Roberson 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
212fc03d22bSJeff Roberson 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
213fc03d22bSJeff Roberson 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
214fc03d22bSJeff Roberson 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
2151aa6c758SAlexander Motin 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
216fc03d22bSJeff Roberson 	{ NULL, NULL, 0}
217fc03d22bSJeff Roberson };
218cae33c14SJeff Roberson 
2192019094aSRobert Watson /*
2202019094aSRobert Watson  * Flags and enumerations to be passed to internal functions.
2212019094aSRobert Watson  */
222ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
223b23f72e9SBrian Feldman 
2248355f576SJeff Roberson /* Prototypes.. */
2258355f576SJeff Roberson 
226f2c2231eSRyan Stone static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
227f2c2231eSRyan Stone static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
228f2c2231eSRyan Stone static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
229f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t);
230e20a199fSJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
2319643769aSJeff Roberson static void cache_drain(uma_zone_t);
2328355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
233aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone);
234b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int);
235099a0e58SBosko Milekic static void keg_dtor(void *, int, void *);
236b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int);
2379c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
238b23f72e9SBrian Feldman static int zero_init(void *, int, int);
239e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg);
240e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg);
2418355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
2428355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
2430aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
2440aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
2450aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
2468355f576SJeff Roberson static void uma_timeout(void *);
2478355f576SJeff Roberson static void uma_startup3(void);
248e20a199fSJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int);
2490095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
25086bbae32SJeff Roberson static void bucket_enable(void);
251cae33c14SJeff Roberson static void bucket_init(void);
2526fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
2536fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
254cae33c14SJeff Roberson static void bucket_zone_drain(void);
2556fd34d6fSJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
256e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
257e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
2580095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
2590095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
260e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
26185dcf349SGleb Smirnoff     uma_fini fini, int align, uint32_t flags);
2620095a784SJeff Roberson static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
2630095a784SJeff Roberson static void zone_release(uma_zone_t zone, void **bucket, int cnt);
26448343a2fSGleb Smirnoff static void uma_zero_item(void *item, uma_zone_t zone);
265bbee39c6SJeff Roberson 
2668355f576SJeff Roberson void uma_print_zone(uma_zone_t);
2678355f576SJeff Roberson void uma_print_stats(void);
2687a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
2697a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
2708355f576SJeff Roberson 
2719542ea7bSGleb Smirnoff #ifdef INVARIANTS
2729542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
2739542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
2749542ea7bSGleb Smirnoff #endif
2759542ea7bSGleb Smirnoff 
2768355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
2778355f576SJeff Roberson 
2787a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
2797a52a97eSRobert Watson     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
2807a52a97eSRobert Watson 
2817a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
2827a52a97eSRobert Watson     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
2837a52a97eSRobert Watson 
2842f891cd5SPawel Jakub Dawidek static int zone_warnings = 1;
285af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
2862f891cd5SPawel Jakub Dawidek     "Warn when UMA zones becomes full");
2872f891cd5SPawel Jakub Dawidek 
28886bbae32SJeff Roberson /*
28986bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
29086bbae32SJeff Roberson  */
29186bbae32SJeff Roberson static void
29286bbae32SJeff Roberson bucket_enable(void)
29386bbae32SJeff Roberson {
294251386b4SMaksim Yevmenkin 	bucketdisable = vm_page_count_min();
29586bbae32SJeff Roberson }
29686bbae32SJeff Roberson 
297dc2c7965SRobert Watson /*
298dc2c7965SRobert Watson  * Initialize bucket_zones, the array of zones of buckets of various sizes.
299dc2c7965SRobert Watson  *
300dc2c7965SRobert Watson  * For each zone, calculate the memory required for each bucket, consisting
301fc03d22bSJeff Roberson  * of the header and an array of pointers.
302dc2c7965SRobert Watson  */
303cae33c14SJeff Roberson static void
304cae33c14SJeff Roberson bucket_init(void)
305cae33c14SJeff Roberson {
306cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
307cae33c14SJeff Roberson 	int size;
308cae33c14SJeff Roberson 
309d74e6a1dSAlan Cox 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
310cae33c14SJeff Roberson 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
311cae33c14SJeff Roberson 		size += sizeof(void *) * ubz->ubz_entries;
312cae33c14SJeff Roberson 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
313e20a199fSJeff Roberson 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
3146fd34d6fSJeff Roberson 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
315cae33c14SJeff Roberson 	}
316cae33c14SJeff Roberson }
317cae33c14SJeff Roberson 
318dc2c7965SRobert Watson /*
319dc2c7965SRobert Watson  * Given a desired number of entries for a bucket, return the zone from which
320dc2c7965SRobert Watson  * to allocate the bucket.
321dc2c7965SRobert Watson  */
322dc2c7965SRobert Watson static struct uma_bucket_zone *
323dc2c7965SRobert Watson bucket_zone_lookup(int entries)
324dc2c7965SRobert Watson {
325fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
326dc2c7965SRobert Watson 
327fc03d22bSJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
328fc03d22bSJeff Roberson 		if (ubz->ubz_entries >= entries)
329fc03d22bSJeff Roberson 			return (ubz);
330fc03d22bSJeff Roberson 	ubz--;
331fc03d22bSJeff Roberson 	return (ubz);
332fc03d22bSJeff Roberson }
333fc03d22bSJeff Roberson 
334fc03d22bSJeff Roberson static int
335fc03d22bSJeff Roberson bucket_select(int size)
336fc03d22bSJeff Roberson {
337fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
338fc03d22bSJeff Roberson 
339fc03d22bSJeff Roberson 	ubz = &bucket_zones[0];
340fc03d22bSJeff Roberson 	if (size > ubz->ubz_maxsize)
341fc03d22bSJeff Roberson 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
342fc03d22bSJeff Roberson 
343fc03d22bSJeff Roberson 	for (; ubz->ubz_entries != 0; ubz++)
344fc03d22bSJeff Roberson 		if (ubz->ubz_maxsize < size)
345fc03d22bSJeff Roberson 			break;
346fc03d22bSJeff Roberson 	ubz--;
347fc03d22bSJeff Roberson 	return (ubz->ubz_entries);
348dc2c7965SRobert Watson }
349dc2c7965SRobert Watson 
350cae33c14SJeff Roberson static uma_bucket_t
3516fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags)
352cae33c14SJeff Roberson {
353cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
354cae33c14SJeff Roberson 	uma_bucket_t bucket;
355cae33c14SJeff Roberson 
356cae33c14SJeff Roberson 	/*
357cae33c14SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
3583803b26bSDag-Erling Smørgrav 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
359cae33c14SJeff Roberson 	 * boot pages.  This also prevents us from allocating buckets in
360cae33c14SJeff Roberson 	 * low memory situations.
361cae33c14SJeff Roberson 	 */
362cae33c14SJeff Roberson 	if (bucketdisable)
363cae33c14SJeff Roberson 		return (NULL);
3646fd34d6fSJeff Roberson 	/*
3656fd34d6fSJeff Roberson 	 * To limit bucket recursion we store the original zone flags
3666fd34d6fSJeff Roberson 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
3676fd34d6fSJeff Roberson 	 * NOVM flag to persist even through deep recursions.  We also
3686fd34d6fSJeff Roberson 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
3696fd34d6fSJeff Roberson 	 * a bucket for a bucket zone so we do not allow infinite bucket
3706fd34d6fSJeff Roberson 	 * recursion.  This cookie will even persist to frees of unused
3716fd34d6fSJeff Roberson 	 * buckets via the allocation path or bucket allocations in the
3726fd34d6fSJeff Roberson 	 * free path.
3736fd34d6fSJeff Roberson 	 */
3746fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
3756fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
376e8a720feSAlexander Motin 	else {
377e8a720feSAlexander Motin 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
378e8a720feSAlexander Motin 			return (NULL);
3796fd34d6fSJeff Roberson 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
380e8a720feSAlexander Motin 	}
3816fd34d6fSJeff Roberson 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
382af526374SJeff Roberson 		flags |= M_NOVM;
383af526374SJeff Roberson 	ubz = bucket_zone_lookup(zone->uz_count);
38420d3ab87SAlexander Motin 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
38520d3ab87SAlexander Motin 		ubz++;
3866fd34d6fSJeff Roberson 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
387cae33c14SJeff Roberson 	if (bucket) {
388cae33c14SJeff Roberson #ifdef INVARIANTS
389cae33c14SJeff Roberson 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
390cae33c14SJeff Roberson #endif
391cae33c14SJeff Roberson 		bucket->ub_cnt = 0;
392cae33c14SJeff Roberson 		bucket->ub_entries = ubz->ubz_entries;
393cae33c14SJeff Roberson 	}
394cae33c14SJeff Roberson 
395cae33c14SJeff Roberson 	return (bucket);
396cae33c14SJeff Roberson }
397cae33c14SJeff Roberson 
398cae33c14SJeff Roberson static void
3996fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
400cae33c14SJeff Roberson {
401cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
402cae33c14SJeff Roberson 
403fc03d22bSJeff Roberson 	KASSERT(bucket->ub_cnt == 0,
404fc03d22bSJeff Roberson 	    ("bucket_free: Freeing a non free bucket."));
4056fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
4066fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
407dc2c7965SRobert Watson 	ubz = bucket_zone_lookup(bucket->ub_entries);
4086fd34d6fSJeff Roberson 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
409cae33c14SJeff Roberson }
410cae33c14SJeff Roberson 
411cae33c14SJeff Roberson static void
412cae33c14SJeff Roberson bucket_zone_drain(void)
413cae33c14SJeff Roberson {
414cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
415cae33c14SJeff Roberson 
416cae33c14SJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
417cae33c14SJeff Roberson 		zone_drain(ubz->ubz_zone);
418cae33c14SJeff Roberson }
419cae33c14SJeff Roberson 
4202f891cd5SPawel Jakub Dawidek static void
4212f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone)
4222f891cd5SPawel Jakub Dawidek {
4232f891cd5SPawel Jakub Dawidek 	static const struct timeval warninterval = { 300, 0 };
4242f891cd5SPawel Jakub Dawidek 
4252f891cd5SPawel Jakub Dawidek 	if (!zone_warnings || zone->uz_warning == NULL)
4262f891cd5SPawel Jakub Dawidek 		return;
4272f891cd5SPawel Jakub Dawidek 
4282f891cd5SPawel Jakub Dawidek 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
4292f891cd5SPawel Jakub Dawidek 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
4302f891cd5SPawel Jakub Dawidek }
4312f891cd5SPawel Jakub Dawidek 
43254503a13SJonathan T. Looney static inline void
43354503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone)
43454503a13SJonathan T. Looney {
435e60b2fcbSGleb Smirnoff 
436e60b2fcbSGleb Smirnoff 	if (zone->uz_maxaction.ta_func != NULL)
437e60b2fcbSGleb Smirnoff 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
43854503a13SJonathan T. Looney }
43954503a13SJonathan T. Looney 
440e20a199fSJeff Roberson static void
441e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
442e20a199fSJeff Roberson {
443e20a199fSJeff Roberson 	uma_klink_t klink;
444e20a199fSJeff Roberson 
445e20a199fSJeff Roberson 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
446e20a199fSJeff Roberson 		kegfn(klink->kl_keg);
447e20a199fSJeff Roberson }
4488355f576SJeff Roberson 
4498355f576SJeff Roberson /*
4508355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
4519643769aSJeff Roberson  * based calculations.  (stats, hash size, etc.)
4528355f576SJeff Roberson  *
4538355f576SJeff Roberson  * Arguments:
4548355f576SJeff Roberson  *	arg   Unused
4558355f576SJeff Roberson  *
4568355f576SJeff Roberson  * Returns:
4578355f576SJeff Roberson  *	Nothing
4588355f576SJeff Roberson  */
4598355f576SJeff Roberson static void
4608355f576SJeff Roberson uma_timeout(void *unused)
4618355f576SJeff Roberson {
46286bbae32SJeff Roberson 	bucket_enable();
4638355f576SJeff Roberson 	zone_foreach(zone_timeout);
4648355f576SJeff Roberson 
4658355f576SJeff Roberson 	/* Reschedule this event */
4669643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
4678355f576SJeff Roberson }
4688355f576SJeff Roberson 
4698355f576SJeff Roberson /*
4709643769aSJeff Roberson  * Routine to perform timeout driven calculations.  This expands the
4719643769aSJeff Roberson  * hashes and does per cpu statistics aggregation.
4728355f576SJeff Roberson  *
473e20a199fSJeff Roberson  *  Returns nothing.
4748355f576SJeff Roberson  */
4758355f576SJeff Roberson static void
476e20a199fSJeff Roberson keg_timeout(uma_keg_t keg)
4778355f576SJeff Roberson {
4788355f576SJeff Roberson 
479e20a199fSJeff Roberson 	KEG_LOCK(keg);
4808355f576SJeff Roberson 	/*
481e20a199fSJeff Roberson 	 * Expand the keg hash table.
4828355f576SJeff Roberson 	 *
4838355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
4848355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
4858355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
4868355f576SJeff Roberson 	 */
487099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH &&
488099a0e58SBosko Milekic 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
4890aef6126SJeff Roberson 		struct uma_hash newhash;
4900aef6126SJeff Roberson 		struct uma_hash oldhash;
4910aef6126SJeff Roberson 		int ret;
4925300d9ddSJeff Roberson 
4930aef6126SJeff Roberson 		/*
4940aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
495e20a199fSJeff Roberson 		 * while the keg lock is held will lead to deadlock.
4960aef6126SJeff Roberson 		 * I have to do everything in stages and check for
4970aef6126SJeff Roberson 		 * races.
4980aef6126SJeff Roberson 		 */
499099a0e58SBosko Milekic 		newhash = keg->uk_hash;
500e20a199fSJeff Roberson 		KEG_UNLOCK(keg);
5010aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
502e20a199fSJeff Roberson 		KEG_LOCK(keg);
5030aef6126SJeff Roberson 		if (ret) {
504099a0e58SBosko Milekic 			if (hash_expand(&keg->uk_hash, &newhash)) {
505099a0e58SBosko Milekic 				oldhash = keg->uk_hash;
506099a0e58SBosko Milekic 				keg->uk_hash = newhash;
5070aef6126SJeff Roberson 			} else
5080aef6126SJeff Roberson 				oldhash = newhash;
5090aef6126SJeff Roberson 
510e20a199fSJeff Roberson 			KEG_UNLOCK(keg);
5110aef6126SJeff Roberson 			hash_free(&oldhash);
512a1dff920SDavide Italiano 			return;
5130aef6126SJeff Roberson 		}
5145300d9ddSJeff Roberson 	}
515e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
516e20a199fSJeff Roberson }
517e20a199fSJeff Roberson 
518e20a199fSJeff Roberson static void
519e20a199fSJeff Roberson zone_timeout(uma_zone_t zone)
520e20a199fSJeff Roberson {
521e20a199fSJeff Roberson 
522e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_timeout);
5238355f576SJeff Roberson }
5248355f576SJeff Roberson 
5258355f576SJeff Roberson /*
5265300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
5275300d9ddSJeff Roberson  * backing store.
5285300d9ddSJeff Roberson  *
5295300d9ddSJeff Roberson  * Arguments:
5300aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
5315300d9ddSJeff Roberson  *
5325300d9ddSJeff Roberson  * Returns:
533763df3ecSPedro F. Giffuni  *	1 on success and 0 on failure.
5345300d9ddSJeff Roberson  */
53537c84183SPoul-Henning Kamp static int
5360aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
5375300d9ddSJeff Roberson {
5380aef6126SJeff Roberson 	int oldsize;
5395300d9ddSJeff Roberson 	int alloc;
5405300d9ddSJeff Roberson 
5410aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
5420aef6126SJeff Roberson 
5435300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
5440aef6126SJeff Roberson 	if (oldsize)  {
5450aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
5460aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
5470aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
548961647dfSJeff Roberson 		    M_UMAHASH, M_NOWAIT);
5495300d9ddSJeff Roberson 	} else {
5500aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
551e20a199fSJeff Roberson 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
552a163d034SWarner Losh 		    M_WAITOK);
5530aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
5545300d9ddSJeff Roberson 	}
5550aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
5560aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
5570aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
5580aef6126SJeff Roberson 		return (1);
5590aef6126SJeff Roberson 	}
5605300d9ddSJeff Roberson 
5610aef6126SJeff Roberson 	return (0);
5625300d9ddSJeff Roberson }
5635300d9ddSJeff Roberson 
5645300d9ddSJeff Roberson /*
56564f051e9SJeff Roberson  * Expands the hash table for HASH zones.  This is done from zone_timeout
56664f051e9SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation
56764f051e9SJeff Roberson  * path, otherwise, we can recurse on the vm while allocating pages.
5688355f576SJeff Roberson  *
5698355f576SJeff Roberson  * Arguments:
5700aef6126SJeff Roberson  *	oldhash  The hash you want to expand
5710aef6126SJeff Roberson  *	newhash  The hash structure for the new table
5728355f576SJeff Roberson  *
5738355f576SJeff Roberson  * Returns:
5748355f576SJeff Roberson  *	Nothing
5758355f576SJeff Roberson  *
5768355f576SJeff Roberson  * Discussion:
5778355f576SJeff Roberson  */
5780aef6126SJeff Roberson static int
5790aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
5808355f576SJeff Roberson {
5818355f576SJeff Roberson 	uma_slab_t slab;
5828355f576SJeff Roberson 	int hval;
5838355f576SJeff Roberson 	int i;
5848355f576SJeff Roberson 
5850aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
5860aef6126SJeff Roberson 		return (0);
5878355f576SJeff Roberson 
5880aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
5890aef6126SJeff Roberson 		return (0);
5908355f576SJeff Roberson 
5918355f576SJeff Roberson 	/*
5928355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
5938355f576SJeff Roberson 	 * full rehash.
5948355f576SJeff Roberson 	 */
5958355f576SJeff Roberson 
5960aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
5970aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
5980aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
5990aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
6000aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
6010aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
6020aef6126SJeff Roberson 			    slab, us_hlink);
6038355f576SJeff Roberson 		}
6048355f576SJeff Roberson 
6050aef6126SJeff Roberson 	return (1);
6069c2cd7e5SJeff Roberson }
6079c2cd7e5SJeff Roberson 
6085300d9ddSJeff Roberson /*
6095300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
6105300d9ddSJeff Roberson  *
6115300d9ddSJeff Roberson  * Arguments:
6125300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
6135300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
6145300d9ddSJeff Roberson  *
6155300d9ddSJeff Roberson  * Returns:
6165300d9ddSJeff Roberson  *	Nothing
6175300d9ddSJeff Roberson  */
6189c2cd7e5SJeff Roberson static void
6190aef6126SJeff Roberson hash_free(struct uma_hash *hash)
6209c2cd7e5SJeff Roberson {
6210aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
6220aef6126SJeff Roberson 		return;
6230aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
6240095a784SJeff Roberson 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
6258355f576SJeff Roberson 	else
626961647dfSJeff Roberson 		free(hash->uh_slab_hash, M_UMAHASH);
6278355f576SJeff Roberson }
6288355f576SJeff Roberson 
6298355f576SJeff Roberson /*
6308355f576SJeff Roberson  * Frees all outstanding items in a bucket
6318355f576SJeff Roberson  *
6328355f576SJeff Roberson  * Arguments:
6338355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
6348355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
6358355f576SJeff Roberson  *
6368355f576SJeff Roberson  * Returns:
6378355f576SJeff Roberson  *	Nothing
6388355f576SJeff Roberson  */
6398355f576SJeff Roberson 
6408355f576SJeff Roberson static void
6418355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
6428355f576SJeff Roberson {
6430095a784SJeff Roberson 	int i;
6448355f576SJeff Roberson 
6458355f576SJeff Roberson 	if (bucket == NULL)
6468355f576SJeff Roberson 		return;
6478355f576SJeff Roberson 
6480095a784SJeff Roberson 	if (zone->uz_fini)
6490095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
6500095a784SJeff Roberson 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
6510095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
6520095a784SJeff Roberson 	bucket->ub_cnt = 0;
6538355f576SJeff Roberson }
6548355f576SJeff Roberson 
6558355f576SJeff Roberson /*
6568355f576SJeff Roberson  * Drains the per cpu caches for a zone.
6578355f576SJeff Roberson  *
6585d1ae027SRobert Watson  * NOTE: This may only be called while the zone is being turn down, and not
6595d1ae027SRobert Watson  * during normal operation.  This is necessary in order that we do not have
6605d1ae027SRobert Watson  * to migrate CPUs to drain the per-CPU caches.
6615d1ae027SRobert Watson  *
6628355f576SJeff Roberson  * Arguments:
6638355f576SJeff Roberson  *	zone     The zone to drain, must be unlocked.
6648355f576SJeff Roberson  *
6658355f576SJeff Roberson  * Returns:
6668355f576SJeff Roberson  *	Nothing
6678355f576SJeff Roberson  */
6688355f576SJeff Roberson static void
6699643769aSJeff Roberson cache_drain(uma_zone_t zone)
6708355f576SJeff Roberson {
6718355f576SJeff Roberson 	uma_cache_t cache;
6728355f576SJeff Roberson 	int cpu;
6738355f576SJeff Roberson 
6748355f576SJeff Roberson 	/*
6755d1ae027SRobert Watson 	 * XXX: It is safe to not lock the per-CPU caches, because we're
6765d1ae027SRobert Watson 	 * tearing down the zone anyway.  I.e., there will be no further use
6775d1ae027SRobert Watson 	 * of the caches at this point.
6785d1ae027SRobert Watson 	 *
6795d1ae027SRobert Watson 	 * XXX: It would good to be able to assert that the zone is being
6805d1ae027SRobert Watson 	 * torn down to prevent improper use of cache_drain().
6815d1ae027SRobert Watson 	 *
6825d1ae027SRobert Watson 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
6835d1ae027SRobert Watson 	 * it is used elsewhere.  Should the tear-down path be made special
6845d1ae027SRobert Watson 	 * there in some form?
6858355f576SJeff Roberson 	 */
6863aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
6878355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
6888355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
6898355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
690174ab450SBosko Milekic 		if (cache->uc_allocbucket != NULL)
6916fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_allocbucket, NULL);
692174ab450SBosko Milekic 		if (cache->uc_freebucket != NULL)
6936fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_freebucket, NULL);
694d56368d7SBosko Milekic 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
695d56368d7SBosko Milekic 	}
696aaa8bb16SJeff Roberson 	ZONE_LOCK(zone);
697aaa8bb16SJeff Roberson 	bucket_cache_drain(zone);
698aaa8bb16SJeff Roberson 	ZONE_UNLOCK(zone);
699aaa8bb16SJeff Roberson }
700aaa8bb16SJeff Roberson 
701a2de44abSAlexander Motin static void
702a2de44abSAlexander Motin cache_shrink(uma_zone_t zone)
703a2de44abSAlexander Motin {
704a2de44abSAlexander Motin 
705a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
706a2de44abSAlexander Motin 		return;
707a2de44abSAlexander Motin 
708a2de44abSAlexander Motin 	ZONE_LOCK(zone);
709a2de44abSAlexander Motin 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
710a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
711a2de44abSAlexander Motin }
712a2de44abSAlexander Motin 
713a2de44abSAlexander Motin static void
714a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone)
715a2de44abSAlexander Motin {
716a2de44abSAlexander Motin 	uma_cache_t cache;
7178a8d9d14SAlexander Motin 	uma_bucket_t b1, b2;
718a2de44abSAlexander Motin 
719a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
720a2de44abSAlexander Motin 		return;
721a2de44abSAlexander Motin 
7228a8d9d14SAlexander Motin 	b1 = b2 = NULL;
723a2de44abSAlexander Motin 	ZONE_LOCK(zone);
724a2de44abSAlexander Motin 	critical_enter();
725a2de44abSAlexander Motin 	cache = &zone->uz_cpu[curcpu];
726a2de44abSAlexander Motin 	if (cache->uc_allocbucket) {
7278a8d9d14SAlexander Motin 		if (cache->uc_allocbucket->ub_cnt != 0)
7288a8d9d14SAlexander Motin 			LIST_INSERT_HEAD(&zone->uz_buckets,
7298a8d9d14SAlexander Motin 			    cache->uc_allocbucket, ub_link);
7308a8d9d14SAlexander Motin 		else
7318a8d9d14SAlexander Motin 			b1 = cache->uc_allocbucket;
732a2de44abSAlexander Motin 		cache->uc_allocbucket = NULL;
733a2de44abSAlexander Motin 	}
734a2de44abSAlexander Motin 	if (cache->uc_freebucket) {
7358a8d9d14SAlexander Motin 		if (cache->uc_freebucket->ub_cnt != 0)
7368a8d9d14SAlexander Motin 			LIST_INSERT_HEAD(&zone->uz_buckets,
7378a8d9d14SAlexander Motin 			    cache->uc_freebucket, ub_link);
7388a8d9d14SAlexander Motin 		else
7398a8d9d14SAlexander Motin 			b2 = cache->uc_freebucket;
740a2de44abSAlexander Motin 		cache->uc_freebucket = NULL;
741a2de44abSAlexander Motin 	}
742a2de44abSAlexander Motin 	critical_exit();
743a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
7448a8d9d14SAlexander Motin 	if (b1)
7458a8d9d14SAlexander Motin 		bucket_free(zone, b1, NULL);
7468a8d9d14SAlexander Motin 	if (b2)
7478a8d9d14SAlexander Motin 		bucket_free(zone, b2, NULL);
748a2de44abSAlexander Motin }
749a2de44abSAlexander Motin 
750a2de44abSAlexander Motin /*
751a2de44abSAlexander Motin  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
752a2de44abSAlexander Motin  * This is an expensive call because it needs to bind to all CPUs
753a2de44abSAlexander Motin  * one by one and enter a critical section on each of them in order
754a2de44abSAlexander Motin  * to safely access their cache buckets.
755a2de44abSAlexander Motin  * Zone lock must not be held on call this function.
756a2de44abSAlexander Motin  */
757a2de44abSAlexander Motin static void
758a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone)
759a2de44abSAlexander Motin {
760a2de44abSAlexander Motin 	int cpu;
761a2de44abSAlexander Motin 
762a2de44abSAlexander Motin 	/*
763a2de44abSAlexander Motin 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
764a2de44abSAlexander Motin 	 */
765a2de44abSAlexander Motin 	if (zone)
766a2de44abSAlexander Motin 		cache_shrink(zone);
767a2de44abSAlexander Motin 	else
768a2de44abSAlexander Motin 		zone_foreach(cache_shrink);
769a2de44abSAlexander Motin 
770a2de44abSAlexander Motin 	CPU_FOREACH(cpu) {
771a2de44abSAlexander Motin 		thread_lock(curthread);
772a2de44abSAlexander Motin 		sched_bind(curthread, cpu);
773a2de44abSAlexander Motin 		thread_unlock(curthread);
774a2de44abSAlexander Motin 
775a2de44abSAlexander Motin 		if (zone)
776a2de44abSAlexander Motin 			cache_drain_safe_cpu(zone);
777a2de44abSAlexander Motin 		else
778a2de44abSAlexander Motin 			zone_foreach(cache_drain_safe_cpu);
779a2de44abSAlexander Motin 	}
780a2de44abSAlexander Motin 	thread_lock(curthread);
781a2de44abSAlexander Motin 	sched_unbind(curthread);
782a2de44abSAlexander Motin 	thread_unlock(curthread);
783a2de44abSAlexander Motin }
784a2de44abSAlexander Motin 
785aaa8bb16SJeff Roberson /*
786aaa8bb16SJeff Roberson  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
787aaa8bb16SJeff Roberson  */
788aaa8bb16SJeff Roberson static void
789aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone)
790aaa8bb16SJeff Roberson {
791aaa8bb16SJeff Roberson 	uma_bucket_t bucket;
7928355f576SJeff Roberson 
7938355f576SJeff Roberson 	/*
7948355f576SJeff Roberson 	 * Drain the bucket queues and free the buckets, we just keep two per
7958355f576SJeff Roberson 	 * cpu (alloc/free).
7968355f576SJeff Roberson 	 */
797fc03d22bSJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
7988355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
7998355f576SJeff Roberson 		ZONE_UNLOCK(zone);
8008355f576SJeff Roberson 		bucket_drain(zone, bucket);
8016fd34d6fSJeff Roberson 		bucket_free(zone, bucket, NULL);
8028355f576SJeff Roberson 		ZONE_LOCK(zone);
8038355f576SJeff Roberson 	}
804ace66b56SAlexander Motin 
805ace66b56SAlexander Motin 	/*
806ace66b56SAlexander Motin 	 * Shrink further bucket sizes.  Price of single zone lock collision
807ace66b56SAlexander Motin 	 * is probably lower then price of global cache drain.
808ace66b56SAlexander Motin 	 */
809ace66b56SAlexander Motin 	if (zone->uz_count > zone->uz_count_min)
810ace66b56SAlexander Motin 		zone->uz_count--;
8118355f576SJeff Roberson }
812fc03d22bSJeff Roberson 
813fc03d22bSJeff Roberson static void
814fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
815fc03d22bSJeff Roberson {
816fc03d22bSJeff Roberson 	uint8_t *mem;
817fc03d22bSJeff Roberson 	int i;
818fc03d22bSJeff Roberson 	uint8_t flags;
819fc03d22bSJeff Roberson 
8201431a748SGleb Smirnoff 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
8211431a748SGleb Smirnoff 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
8221431a748SGleb Smirnoff 
823fc03d22bSJeff Roberson 	mem = slab->us_data;
824fc03d22bSJeff Roberson 	flags = slab->us_flags;
825fc03d22bSJeff Roberson 	i = start;
826fc03d22bSJeff Roberson 	if (keg->uk_fini != NULL) {
827fc03d22bSJeff Roberson 		for (i--; i > -1; i--)
828fc03d22bSJeff Roberson 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
829fc03d22bSJeff Roberson 			    keg->uk_size);
830fc03d22bSJeff Roberson 	}
831fc03d22bSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
832fc03d22bSJeff Roberson 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
833fc03d22bSJeff Roberson 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
8348355f576SJeff Roberson }
8358355f576SJeff Roberson 
8368355f576SJeff Roberson /*
837e20a199fSJeff Roberson  * Frees pages from a keg back to the system.  This is done on demand from
8388355f576SJeff Roberson  * the pageout daemon.
8398355f576SJeff Roberson  *
840e20a199fSJeff Roberson  * Returns nothing.
8418355f576SJeff Roberson  */
842e20a199fSJeff Roberson static void
843e20a199fSJeff Roberson keg_drain(uma_keg_t keg)
8448355f576SJeff Roberson {
8451e183df2SStefan Farfeleder 	struct slabhead freeslabs = { 0 };
846829be516SMark Johnston 	uma_slab_t slab, tmp;
8478355f576SJeff Roberson 
8488355f576SJeff Roberson 	/*
849e20a199fSJeff Roberson 	 * We don't want to take pages from statically allocated kegs at this
8508355f576SJeff Roberson 	 * time
8518355f576SJeff Roberson 	 */
852099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
8538355f576SJeff Roberson 		return;
8548355f576SJeff Roberson 
8551431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
8561431a748SGleb Smirnoff 	    keg->uk_name, keg, keg->uk_free);
857e20a199fSJeff Roberson 	KEG_LOCK(keg);
858099a0e58SBosko Milekic 	if (keg->uk_free == 0)
8598355f576SJeff Roberson 		goto finished;
8608355f576SJeff Roberson 
861829be516SMark Johnston 	LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
862829be516SMark Johnston 		/* We have nowhere to free these to. */
863829be516SMark Johnston 		if (slab->us_flags & UMA_SLAB_BOOT)
8648355f576SJeff Roberson 			continue;
8658355f576SJeff Roberson 
8668355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
867099a0e58SBosko Milekic 		keg->uk_pages -= keg->uk_ppera;
868099a0e58SBosko Milekic 		keg->uk_free -= keg->uk_ipers;
869713deb36SJeff Roberson 
870099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZONE_HASH)
871099a0e58SBosko Milekic 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
872713deb36SJeff Roberson 
873713deb36SJeff Roberson 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
874713deb36SJeff Roberson 	}
875713deb36SJeff Roberson finished:
876e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
877713deb36SJeff Roberson 
878713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
879713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
8801645995bSKirk McKusick 		keg_free_slab(keg, slab, keg->uk_ipers);
8818355f576SJeff Roberson 	}
8828355f576SJeff Roberson }
8838355f576SJeff Roberson 
884e20a199fSJeff Roberson static void
885e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok)
886e20a199fSJeff Roberson {
887e20a199fSJeff Roberson 
8888355f576SJeff Roberson 	/*
889e20a199fSJeff Roberson 	 * Set draining to interlock with zone_dtor() so we can release our
890e20a199fSJeff Roberson 	 * locks as we go.  Only dtor() should do a WAITOK call since it
891e20a199fSJeff Roberson 	 * is the only call that knows the structure will still be available
892e20a199fSJeff Roberson 	 * when it wakes up.
893e20a199fSJeff Roberson 	 */
894e20a199fSJeff Roberson 	ZONE_LOCK(zone);
895e20a199fSJeff Roberson 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
896e20a199fSJeff Roberson 		if (waitok == M_NOWAIT)
897e20a199fSJeff Roberson 			goto out;
898af526374SJeff Roberson 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
899e20a199fSJeff Roberson 	}
900e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
901e20a199fSJeff Roberson 	bucket_cache_drain(zone);
902e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
903e20a199fSJeff Roberson 	/*
904e20a199fSJeff Roberson 	 * The DRAINING flag protects us from being freed while
905111fbcd5SBryan Venteicher 	 * we're running.  Normally the uma_rwlock would protect us but we
906e20a199fSJeff Roberson 	 * must be able to release and acquire the right lock for each keg.
907e20a199fSJeff Roberson 	 */
908e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_drain);
909e20a199fSJeff Roberson 	ZONE_LOCK(zone);
910e20a199fSJeff Roberson 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
911e20a199fSJeff Roberson 	wakeup(zone);
912e20a199fSJeff Roberson out:
913e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
914e20a199fSJeff Roberson }
915e20a199fSJeff Roberson 
916e20a199fSJeff Roberson void
917e20a199fSJeff Roberson zone_drain(uma_zone_t zone)
918e20a199fSJeff Roberson {
919e20a199fSJeff Roberson 
920e20a199fSJeff Roberson 	zone_drain_wait(zone, M_NOWAIT);
921e20a199fSJeff Roberson }
922e20a199fSJeff Roberson 
923e20a199fSJeff Roberson /*
924e20a199fSJeff Roberson  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
9258355f576SJeff Roberson  *
9268355f576SJeff Roberson  * Arguments:
9278355f576SJeff Roberson  *	wait  Shall we wait?
9288355f576SJeff Roberson  *
9298355f576SJeff Roberson  * Returns:
9308355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
9318355f576SJeff Roberson  *	caller specified M_NOWAIT.
9328355f576SJeff Roberson  */
9338355f576SJeff Roberson static uma_slab_t
934e20a199fSJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
9358355f576SJeff Roberson {
936e20a199fSJeff Roberson 	uma_alloc allocf;
937099a0e58SBosko Milekic 	uma_slab_t slab;
93885dcf349SGleb Smirnoff 	uint8_t *mem;
93985dcf349SGleb Smirnoff 	uint8_t flags;
9408355f576SJeff Roberson 	int i;
9418355f576SJeff Roberson 
942e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
943a553d4b8SJeff Roberson 	slab = NULL;
944fc03d22bSJeff Roberson 	mem = NULL;
945a553d4b8SJeff Roberson 
946e20a199fSJeff Roberson 	allocf = keg->uk_allocf;
947e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
948a553d4b8SJeff Roberson 
949099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
950e20a199fSJeff Roberson 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
951fc03d22bSJeff Roberson 		if (slab == NULL)
952fc03d22bSJeff Roberson 			goto out;
953a553d4b8SJeff Roberson 	}
954a553d4b8SJeff Roberson 
9553370c5bfSJeff Roberson 	/*
9563370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
9573370c5bfSJeff Roberson 	 * first time they are added to a zone.
9583370c5bfSJeff Roberson 	 *
9593370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
9603370c5bfSJeff Roberson 	 */
9613370c5bfSJeff Roberson 
962099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
9633370c5bfSJeff Roberson 		wait |= M_ZERO;
9643370c5bfSJeff Roberson 	else
9653370c5bfSJeff Roberson 		wait &= ~M_ZERO;
9663370c5bfSJeff Roberson 
967263811f7SKip Macy 	if (keg->uk_flags & UMA_ZONE_NODUMP)
968263811f7SKip Macy 		wait |= M_NODUMP;
969263811f7SKip Macy 
970e20a199fSJeff Roberson 	/* zone is passed for legacy reasons. */
971ad97af7eSGleb Smirnoff 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
972a553d4b8SJeff Roberson 	if (mem == NULL) {
973b23f72e9SBrian Feldman 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
9740095a784SJeff Roberson 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
975fc03d22bSJeff Roberson 		slab = NULL;
976fc03d22bSJeff Roberson 		goto out;
977a553d4b8SJeff Roberson 	}
9788355f576SJeff Roberson 
9795c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
980099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
981099a0e58SBosko Milekic 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
9825c0e403bSJeff Roberson 
983e20a199fSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
984099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ppera; i++)
98599571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
9868355f576SJeff Roberson 
987099a0e58SBosko Milekic 	slab->us_keg = keg;
9888355f576SJeff Roberson 	slab->us_data = mem;
989099a0e58SBosko Milekic 	slab->us_freecount = keg->uk_ipers;
9908355f576SJeff Roberson 	slab->us_flags = flags;
991ef72505eSJeff Roberson 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
992ef72505eSJeff Roberson #ifdef INVARIANTS
993ef72505eSJeff Roberson 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
994ef72505eSJeff Roberson #endif
995099a0e58SBosko Milekic 
996b23f72e9SBrian Feldman 	if (keg->uk_init != NULL) {
997099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ipers; i++)
998b23f72e9SBrian Feldman 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
999b23f72e9SBrian Feldman 			    keg->uk_size, wait) != 0)
1000b23f72e9SBrian Feldman 				break;
1001b23f72e9SBrian Feldman 		if (i != keg->uk_ipers) {
1002fc03d22bSJeff Roberson 			keg_free_slab(keg, slab, i);
1003fc03d22bSJeff Roberson 			slab = NULL;
1004fc03d22bSJeff Roberson 			goto out;
1005b23f72e9SBrian Feldman 		}
1006b23f72e9SBrian Feldman 	}
1007fc03d22bSJeff Roberson out:
1008e20a199fSJeff Roberson 	KEG_LOCK(keg);
10095c0e403bSJeff Roberson 
10101431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
10111431a748SGleb Smirnoff 	    slab, keg->uk_name, keg);
10121431a748SGleb Smirnoff 
1013fc03d22bSJeff Roberson 	if (slab != NULL) {
1014099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZONE_HASH)
1015099a0e58SBosko Milekic 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
10168355f576SJeff Roberson 
1017099a0e58SBosko Milekic 		keg->uk_pages += keg->uk_ppera;
1018099a0e58SBosko Milekic 		keg->uk_free += keg->uk_ipers;
1019fc03d22bSJeff Roberson 	}
10208355f576SJeff Roberson 
10218355f576SJeff Roberson 	return (slab);
10228355f576SJeff Roberson }
10238355f576SJeff Roberson 
10248355f576SJeff Roberson /*
1025009b6fcbSJeff Roberson  * This function is intended to be used early on in place of page_alloc() so
1026009b6fcbSJeff Roberson  * that we may use the boot time page cache to satisfy allocations before
1027009b6fcbSJeff Roberson  * the VM is ready.
1028009b6fcbSJeff Roberson  */
1029009b6fcbSJeff Roberson static void *
1030f2c2231eSRyan Stone startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1031009b6fcbSJeff Roberson {
1032099a0e58SBosko Milekic 	uma_keg_t keg;
1033ac0a6fd0SGleb Smirnoff 	void *mem;
1034ac0a6fd0SGleb Smirnoff 	int pages;
1035099a0e58SBosko Milekic 
1036e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1037e9a069d8SJohn Baldwin 	pages = howmany(bytes, PAGE_SIZE);
1038e9a069d8SJohn Baldwin 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1039099a0e58SBosko Milekic 
1040009b6fcbSJeff Roberson 	/*
1041009b6fcbSJeff Roberson 	 * Check our small startup cache to see if it has pages remaining.
1042009b6fcbSJeff Roberson 	 */
1043f353d338SAlan Cox 	mtx_lock(&uma_boot_pages_mtx);
1044ac0a6fd0SGleb Smirnoff 	if (pages <= boot_pages) {
1045ac0a6fd0SGleb Smirnoff 		mem = bootmem;
1046ac0a6fd0SGleb Smirnoff 		boot_pages -= pages;
1047ac0a6fd0SGleb Smirnoff 		bootmem += pages * PAGE_SIZE;
1048f353d338SAlan Cox 		mtx_unlock(&uma_boot_pages_mtx);
1049ac0a6fd0SGleb Smirnoff 		*pflag = UMA_SLAB_BOOT;
1050ac0a6fd0SGleb Smirnoff 		return (mem);
1051009b6fcbSJeff Roberson 	}
1052f353d338SAlan Cox 	mtx_unlock(&uma_boot_pages_mtx);
1053342f1793SAlan Cox 	if (booted < UMA_STARTUP2)
10543803b26bSDag-Erling Smørgrav 		panic("UMA: Increase vm.boot_pages");
1055009b6fcbSJeff Roberson 	/*
1056009b6fcbSJeff Roberson 	 * Now that we've booted reset these users to their real allocator.
1057009b6fcbSJeff Roberson 	 */
1058009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
1059e9a069d8SJohn Baldwin 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1060009b6fcbSJeff Roberson #else
1061099a0e58SBosko Milekic 	keg->uk_allocf = page_alloc;
1062009b6fcbSJeff Roberson #endif
1063099a0e58SBosko Milekic 	return keg->uk_allocf(zone, bytes, pflag, wait);
1064009b6fcbSJeff Roberson }
1065009b6fcbSJeff Roberson 
1066009b6fcbSJeff Roberson /*
10678355f576SJeff Roberson  * Allocates a number of pages from the system
10688355f576SJeff Roberson  *
10698355f576SJeff Roberson  * Arguments:
10708355f576SJeff Roberson  *	bytes  The number of bytes requested
10718355f576SJeff Roberson  *	wait  Shall we wait?
10728355f576SJeff Roberson  *
10738355f576SJeff Roberson  * Returns:
10748355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
10758355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
10768355f576SJeff Roberson  */
10778355f576SJeff Roberson static void *
1078f2c2231eSRyan Stone page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
10798355f576SJeff Roberson {
10808355f576SJeff Roberson 	void *p;	/* Returned page */
10818355f576SJeff Roberson 
10828355f576SJeff Roberson 	*pflag = UMA_SLAB_KMEM;
10835df87b21SJeff Roberson 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
10848355f576SJeff Roberson 
10858355f576SJeff Roberson 	return (p);
10868355f576SJeff Roberson }
10878355f576SJeff Roberson 
10888355f576SJeff Roberson /*
10898355f576SJeff Roberson  * Allocates a number of pages from within an object
10908355f576SJeff Roberson  *
10918355f576SJeff Roberson  * Arguments:
10928355f576SJeff Roberson  *	bytes  The number of bytes requested
10938355f576SJeff Roberson  *	wait   Shall we wait?
10948355f576SJeff Roberson  *
10958355f576SJeff Roberson  * Returns:
10968355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
10978355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
10988355f576SJeff Roberson  */
10998355f576SJeff Roberson static void *
1100f2c2231eSRyan Stone noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
11018355f576SJeff Roberson {
1102a4915c21SAttilio Rao 	TAILQ_HEAD(, vm_page) alloctail;
1103a4915c21SAttilio Rao 	u_long npages;
1104b245ac95SAlan Cox 	vm_offset_t retkva, zkva;
1105a4915c21SAttilio Rao 	vm_page_t p, p_next;
1106e20a199fSJeff Roberson 	uma_keg_t keg;
11078355f576SJeff Roberson 
1108a4915c21SAttilio Rao 	TAILQ_INIT(&alloctail);
1109e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1110a4915c21SAttilio Rao 
1111a4915c21SAttilio Rao 	npages = howmany(bytes, PAGE_SIZE);
1112a4915c21SAttilio Rao 	while (npages > 0) {
1113a4915c21SAttilio Rao 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
11148d6fbbb8SJeff Roberson 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1115772c8b67SKonstantin Belousov 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1116772c8b67SKonstantin Belousov 		    VM_ALLOC_NOWAIT));
1117a4915c21SAttilio Rao 		if (p != NULL) {
1118a4915c21SAttilio Rao 			/*
1119a4915c21SAttilio Rao 			 * Since the page does not belong to an object, its
1120a4915c21SAttilio Rao 			 * listq is unused.
1121a4915c21SAttilio Rao 			 */
1122a4915c21SAttilio Rao 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1123a4915c21SAttilio Rao 			npages--;
1124a4915c21SAttilio Rao 			continue;
1125a4915c21SAttilio Rao 		}
11268355f576SJeff Roberson 		/*
1127a4915c21SAttilio Rao 		 * Page allocation failed, free intermediate pages and
1128a4915c21SAttilio Rao 		 * exit.
11298355f576SJeff Roberson 		 */
1130a4915c21SAttilio Rao 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1131087a6132SAlan Cox 			vm_page_unwire(p, PQ_NONE);
1132b245ac95SAlan Cox 			vm_page_free(p);
1133b245ac95SAlan Cox 		}
1134a4915c21SAttilio Rao 		return (NULL);
1135b245ac95SAlan Cox 	}
11368355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
1137a4915c21SAttilio Rao 	zkva = keg->uk_kva +
1138a4915c21SAttilio Rao 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1139a4915c21SAttilio Rao 	retkva = zkva;
1140a4915c21SAttilio Rao 	TAILQ_FOREACH(p, &alloctail, listq) {
1141a4915c21SAttilio Rao 		pmap_qenter(zkva, &p, 1);
1142a4915c21SAttilio Rao 		zkva += PAGE_SIZE;
1143a4915c21SAttilio Rao 	}
11448355f576SJeff Roberson 
11458355f576SJeff Roberson 	return ((void *)retkva);
11468355f576SJeff Roberson }
11478355f576SJeff Roberson 
11488355f576SJeff Roberson /*
11498355f576SJeff Roberson  * Frees a number of pages to the system
11508355f576SJeff Roberson  *
11518355f576SJeff Roberson  * Arguments:
11528355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
11538355f576SJeff Roberson  *	size  The size of the memory being freed
11548355f576SJeff Roberson  *	flags The original p->us_flags field
11558355f576SJeff Roberson  *
11568355f576SJeff Roberson  * Returns:
11578355f576SJeff Roberson  *	Nothing
11588355f576SJeff Roberson  */
11598355f576SJeff Roberson static void
1160f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags)
11618355f576SJeff Roberson {
11625df87b21SJeff Roberson 	struct vmem *vmem;
11633370c5bfSJeff Roberson 
11648355f576SJeff Roberson 	if (flags & UMA_SLAB_KMEM)
11655df87b21SJeff Roberson 		vmem = kmem_arena;
1166aea6e893SAlan Cox 	else if (flags & UMA_SLAB_KERNEL)
11675df87b21SJeff Roberson 		vmem = kernel_arena;
11688355f576SJeff Roberson 	else
1169b5345ef1SJustin Hibbits 		panic("UMA: page_free used with invalid flags %x", flags);
11708355f576SJeff Roberson 
11715df87b21SJeff Roberson 	kmem_free(vmem, (vm_offset_t)mem, size);
11728355f576SJeff Roberson }
11738355f576SJeff Roberson 
11748355f576SJeff Roberson /*
11758355f576SJeff Roberson  * Zero fill initializer
11768355f576SJeff Roberson  *
11778355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
11788355f576SJeff Roberson  */
1179b23f72e9SBrian Feldman static int
1180b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags)
11818355f576SJeff Roberson {
11828355f576SJeff Roberson 	bzero(mem, size);
1183b23f72e9SBrian Feldman 	return (0);
11848355f576SJeff Roberson }
11858355f576SJeff Roberson 
11868355f576SJeff Roberson /*
1187e20a199fSJeff Roberson  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
11888355f576SJeff Roberson  *
11898355f576SJeff Roberson  * Arguments
1190e20a199fSJeff Roberson  *	keg  The zone we should initialize
11918355f576SJeff Roberson  *
11928355f576SJeff Roberson  * Returns
11938355f576SJeff Roberson  *	Nothing
11948355f576SJeff Roberson  */
11958355f576SJeff Roberson static void
1196e20a199fSJeff Roberson keg_small_init(uma_keg_t keg)
11978355f576SJeff Roberson {
1198244f4554SBosko Milekic 	u_int rsize;
1199244f4554SBosko Milekic 	u_int memused;
1200244f4554SBosko Milekic 	u_int wastedspace;
1201244f4554SBosko Milekic 	u_int shsize;
1202a55ebb7cSAndriy Gapon 	u_int slabsize;
12038355f576SJeff Roberson 
1204ad97af7eSGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_PCPU) {
120596c85efbSNathan Whitehorn 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1206e28a647dSGleb Smirnoff 
1207a55ebb7cSAndriy Gapon 		slabsize = sizeof(struct pcpu);
1208e28a647dSGleb Smirnoff 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1209ad97af7eSGleb Smirnoff 		    PAGE_SIZE);
1210ad97af7eSGleb Smirnoff 	} else {
1211a55ebb7cSAndriy Gapon 		slabsize = UMA_SLAB_SIZE;
1212ad97af7eSGleb Smirnoff 		keg->uk_ppera = 1;
1213ad97af7eSGleb Smirnoff 	}
1214ad97af7eSGleb Smirnoff 
1215ef72505eSJeff Roberson 	/*
1216ef72505eSJeff Roberson 	 * Calculate the size of each allocation (rsize) according to
1217ef72505eSJeff Roberson 	 * alignment.  If the requested size is smaller than we have
1218ef72505eSJeff Roberson 	 * allocation bits for we round it up.
1219ef72505eSJeff Roberson 	 */
1220099a0e58SBosko Milekic 	rsize = keg->uk_size;
1221a55ebb7cSAndriy Gapon 	if (rsize < slabsize / SLAB_SETSIZE)
1222a55ebb7cSAndriy Gapon 		rsize = slabsize / SLAB_SETSIZE;
1223099a0e58SBosko Milekic 	if (rsize & keg->uk_align)
1224099a0e58SBosko Milekic 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1225099a0e58SBosko Milekic 	keg->uk_rsize = rsize;
1226ad97af7eSGleb Smirnoff 
1227ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1228ad97af7eSGleb Smirnoff 	    keg->uk_rsize < sizeof(struct pcpu),
1229ad97af7eSGleb Smirnoff 	    ("%s: size %u too large", __func__, keg->uk_rsize));
12308355f576SJeff Roberson 
1231ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
12322864dbbfSGleb Smirnoff 		shsize = 0;
1233ef72505eSJeff Roberson 	else
1234244f4554SBosko Milekic 		shsize = sizeof(struct uma_slab);
12358355f576SJeff Roberson 
1236a55ebb7cSAndriy Gapon 	keg->uk_ipers = (slabsize - shsize) / rsize;
1237ef72505eSJeff Roberson 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1238ad97af7eSGleb Smirnoff 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1239ad97af7eSGleb Smirnoff 
1240244f4554SBosko Milekic 	memused = keg->uk_ipers * rsize + shsize;
1241a55ebb7cSAndriy Gapon 	wastedspace = slabsize - memused;
1242244f4554SBosko Milekic 
124320e8e865SBosko Milekic 	/*
1244244f4554SBosko Milekic 	 * We can't do OFFPAGE if we're internal or if we've been
124520e8e865SBosko Milekic 	 * asked to not go to the VM for buckets.  If we do this we
12466fd34d6fSJeff Roberson 	 * may end up going to the VM  for slabs which we do not
12476fd34d6fSJeff Roberson 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
12486fd34d6fSJeff Roberson 	 * of UMA_ZONE_VM, which clearly forbids it.
124920e8e865SBosko Milekic 	 */
1250099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1251099a0e58SBosko Milekic 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
12528355f576SJeff Roberson 		return;
1253244f4554SBosko Milekic 
1254ef72505eSJeff Roberson 	/*
1255ef72505eSJeff Roberson 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1256ef72505eSJeff Roberson 	 * this if it permits more items per-slab.
1257ef72505eSJeff Roberson 	 *
1258ef72505eSJeff Roberson 	 * XXX We could try growing slabsize to limit max waste as well.
1259ef72505eSJeff Roberson 	 * Historically this was not done because the VM could not
1260ef72505eSJeff Roberson 	 * efficiently handle contiguous allocations.
1261ef72505eSJeff Roberson 	 */
1262a55ebb7cSAndriy Gapon 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1263a55ebb7cSAndriy Gapon 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1264a55ebb7cSAndriy Gapon 		keg->uk_ipers = slabsize / keg->uk_rsize;
1265ef72505eSJeff Roberson 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1266ad97af7eSGleb Smirnoff 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
12671431a748SGleb Smirnoff 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
12681431a748SGleb Smirnoff 		    "keg: %s(%p), calculated wastedspace = %d, "
1269244f4554SBosko Milekic 		    "maximum wasted space allowed = %d, "
1270244f4554SBosko Milekic 		    "calculated ipers = %d, "
12711431a748SGleb Smirnoff 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1272a55ebb7cSAndriy Gapon 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1273a55ebb7cSAndriy Gapon 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1274099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
12758355f576SJeff Roberson 	}
1276ad97af7eSGleb Smirnoff 
1277ad97af7eSGleb Smirnoff 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1278ad97af7eSGleb Smirnoff 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1279ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_HASH;
12808355f576SJeff Roberson }
12818355f576SJeff Roberson 
12828355f576SJeff Roberson /*
1283e20a199fSJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
12848355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
12858355f576SJeff Roberson  * more complicated.
12868355f576SJeff Roberson  *
12878355f576SJeff Roberson  * Arguments
1288e20a199fSJeff Roberson  *	keg  The keg we should initialize
12898355f576SJeff Roberson  *
12908355f576SJeff Roberson  * Returns
12918355f576SJeff Roberson  *	Nothing
12928355f576SJeff Roberson  */
12938355f576SJeff Roberson static void
1294e20a199fSJeff Roberson keg_large_init(uma_keg_t keg)
12958355f576SJeff Roberson {
1296cec48e00SAlexander Motin 	u_int shsize;
12978355f576SJeff Roberson 
1298e20a199fSJeff Roberson 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1299099a0e58SBosko Milekic 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1300e20a199fSJeff Roberson 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1301ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1302ad97af7eSGleb Smirnoff 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
130320e8e865SBosko Milekic 
1304ad97af7eSGleb Smirnoff 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1305099a0e58SBosko Milekic 	keg->uk_ipers = 1;
1306e9a069d8SJohn Baldwin 	keg->uk_rsize = keg->uk_size;
1307e9a069d8SJohn Baldwin 
1308cec48e00SAlexander Motin 	/* Check whether we have enough space to not do OFFPAGE. */
1309cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1310cec48e00SAlexander Motin 		shsize = sizeof(struct uma_slab);
1311cec48e00SAlexander Motin 		if (shsize & UMA_ALIGN_PTR)
1312cec48e00SAlexander Motin 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1313cec48e00SAlexander Motin 			    (UMA_ALIGN_PTR + 1);
1314cec48e00SAlexander Motin 
13152934eb8aSMark Johnston 		if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
13162934eb8aSMark Johnston 			/*
13172934eb8aSMark Johnston 			 * We can't do OFFPAGE if we're internal, in which case
13182934eb8aSMark Johnston 			 * we need an extra page per allocation to contain the
13192934eb8aSMark Johnston 			 * slab header.
13202934eb8aSMark Johnston 			 */
13212934eb8aSMark Johnston 			if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1322099a0e58SBosko Milekic 				keg->uk_flags |= UMA_ZONE_OFFPAGE;
13232934eb8aSMark Johnston 			else
13242934eb8aSMark Johnston 				keg->uk_ppera++;
13252934eb8aSMark Johnston 		}
1326cec48e00SAlexander Motin 	}
1327cec48e00SAlexander Motin 
1328cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1329cec48e00SAlexander Motin 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1330099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_HASH;
13318355f576SJeff Roberson }
13328355f576SJeff Roberson 
1333e20a199fSJeff Roberson static void
1334e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg)
1335e20a199fSJeff Roberson {
1336e20a199fSJeff Roberson 	int alignsize;
1337e20a199fSJeff Roberson 	int trailer;
1338e20a199fSJeff Roberson 	int pages;
1339e20a199fSJeff Roberson 	int rsize;
1340e20a199fSJeff Roberson 
1341ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1342ad97af7eSGleb Smirnoff 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1343ad97af7eSGleb Smirnoff 
1344e20a199fSJeff Roberson 	alignsize = keg->uk_align + 1;
1345e20a199fSJeff Roberson 	rsize = keg->uk_size;
1346e20a199fSJeff Roberson 	/*
1347e20a199fSJeff Roberson 	 * We want one item to start on every align boundary in a page.  To
1348e20a199fSJeff Roberson 	 * do this we will span pages.  We will also extend the item by the
1349e20a199fSJeff Roberson 	 * size of align if it is an even multiple of align.  Otherwise, it
1350e20a199fSJeff Roberson 	 * would fall on the same boundary every time.
1351e20a199fSJeff Roberson 	 */
1352e20a199fSJeff Roberson 	if (rsize & keg->uk_align)
1353e20a199fSJeff Roberson 		rsize = (rsize & ~keg->uk_align) + alignsize;
1354e20a199fSJeff Roberson 	if ((rsize & alignsize) == 0)
1355e20a199fSJeff Roberson 		rsize += alignsize;
1356e20a199fSJeff Roberson 	trailer = rsize - keg->uk_size;
1357e20a199fSJeff Roberson 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1358e20a199fSJeff Roberson 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1359e20a199fSJeff Roberson 	keg->uk_rsize = rsize;
1360e20a199fSJeff Roberson 	keg->uk_ppera = pages;
1361e20a199fSJeff Roberson 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1362e20a199fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
13632367b4ddSDimitry Andric 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
136442321809SGleb Smirnoff 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1365e20a199fSJeff Roberson 	    keg->uk_ipers));
1366e20a199fSJeff Roberson }
1367e20a199fSJeff Roberson 
13688355f576SJeff Roberson /*
1369099a0e58SBosko Milekic  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1370099a0e58SBosko Milekic  * the keg onto the global keg list.
13718355f576SJeff Roberson  *
13728355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
1373099a0e58SBosko Milekic  *	udata  Actually uma_kctor_args
1374099a0e58SBosko Milekic  */
1375b23f72e9SBrian Feldman static int
1376b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags)
1377099a0e58SBosko Milekic {
1378099a0e58SBosko Milekic 	struct uma_kctor_args *arg = udata;
1379099a0e58SBosko Milekic 	uma_keg_t keg = mem;
1380099a0e58SBosko Milekic 	uma_zone_t zone;
1381099a0e58SBosko Milekic 
1382099a0e58SBosko Milekic 	bzero(keg, size);
1383099a0e58SBosko Milekic 	keg->uk_size = arg->size;
1384099a0e58SBosko Milekic 	keg->uk_init = arg->uminit;
1385099a0e58SBosko Milekic 	keg->uk_fini = arg->fini;
1386099a0e58SBosko Milekic 	keg->uk_align = arg->align;
1387099a0e58SBosko Milekic 	keg->uk_free = 0;
13886fd34d6fSJeff Roberson 	keg->uk_reserve = 0;
1389099a0e58SBosko Milekic 	keg->uk_pages = 0;
1390099a0e58SBosko Milekic 	keg->uk_flags = arg->flags;
1391099a0e58SBosko Milekic 	keg->uk_slabzone = NULL;
1392099a0e58SBosko Milekic 
1393099a0e58SBosko Milekic 	/*
1394099a0e58SBosko Milekic 	 * The master zone is passed to us at keg-creation time.
1395099a0e58SBosko Milekic 	 */
1396099a0e58SBosko Milekic 	zone = arg->zone;
1397e20a199fSJeff Roberson 	keg->uk_name = zone->uz_name;
1398099a0e58SBosko Milekic 
1399099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_VM)
1400099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1401099a0e58SBosko Milekic 
1402099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_ZINIT)
1403099a0e58SBosko Milekic 		keg->uk_init = zero_init;
1404099a0e58SBosko Milekic 
1405cfcae3f8SGleb Smirnoff 	if (arg->flags & UMA_ZONE_MALLOC)
1406e20a199fSJeff Roberson 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1407e20a199fSJeff Roberson 
1408ad97af7eSGleb Smirnoff 	if (arg->flags & UMA_ZONE_PCPU)
1409ad97af7eSGleb Smirnoff #ifdef SMP
1410ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1411ad97af7eSGleb Smirnoff #else
1412ad97af7eSGleb Smirnoff 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1413ad97af7eSGleb Smirnoff #endif
1414ad97af7eSGleb Smirnoff 
1415ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1416e20a199fSJeff Roberson 		keg_cachespread_init(keg);
1417244f4554SBosko Milekic 	} else {
1418ef72505eSJeff Roberson 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1419e20a199fSJeff Roberson 			keg_large_init(keg);
1420244f4554SBosko Milekic 		else
1421e20a199fSJeff Roberson 			keg_small_init(keg);
1422244f4554SBosko Milekic 	}
1423099a0e58SBosko Milekic 
1424cfcae3f8SGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1425099a0e58SBosko Milekic 		keg->uk_slabzone = slabzone;
1426099a0e58SBosko Milekic 
1427099a0e58SBosko Milekic 	/*
1428099a0e58SBosko Milekic 	 * If we haven't booted yet we need allocations to go through the
1429099a0e58SBosko Milekic 	 * startup cache until the vm is ready.
1430099a0e58SBosko Milekic 	 */
14318cd02d00SAlan Cox 	if (booted < UMA_STARTUP2)
14328cd02d00SAlan Cox 		keg->uk_allocf = startup_alloc;
143377e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
143477e19437SGleb Smirnoff 	else if (keg->uk_ppera == 1)
143577e19437SGleb Smirnoff 		keg->uk_allocf = uma_small_alloc;
14368cd02d00SAlan Cox #endif
143777e19437SGleb Smirnoff 	else
143877e19437SGleb Smirnoff 		keg->uk_allocf = page_alloc;
143977e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
144077e19437SGleb Smirnoff 	if (keg->uk_ppera == 1)
144177e19437SGleb Smirnoff 		keg->uk_freef = uma_small_free;
144277e19437SGleb Smirnoff 	else
144377e19437SGleb Smirnoff #endif
144477e19437SGleb Smirnoff 		keg->uk_freef = page_free;
1445099a0e58SBosko Milekic 
1446099a0e58SBosko Milekic 	/*
1447af526374SJeff Roberson 	 * Initialize keg's lock
1448099a0e58SBosko Milekic 	 */
1449af526374SJeff Roberson 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1450099a0e58SBosko Milekic 
1451099a0e58SBosko Milekic 	/*
1452099a0e58SBosko Milekic 	 * If we're putting the slab header in the actual page we need to
1453099a0e58SBosko Milekic 	 * figure out where in each page it goes.  This calculates a right
1454099a0e58SBosko Milekic 	 * justified offset into the memory on an ALIGN_PTR boundary.
1455099a0e58SBosko Milekic 	 */
1456099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1457244f4554SBosko Milekic 		u_int totsize;
1458099a0e58SBosko Milekic 
1459099a0e58SBosko Milekic 		/* Size of the slab struct and free list */
1460ef72505eSJeff Roberson 		totsize = sizeof(struct uma_slab);
1461ef72505eSJeff Roberson 
1462099a0e58SBosko Milekic 		if (totsize & UMA_ALIGN_PTR)
1463099a0e58SBosko Milekic 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1464099a0e58SBosko Milekic 			    (UMA_ALIGN_PTR + 1);
1465ad97af7eSGleb Smirnoff 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1466244f4554SBosko Milekic 
1467244f4554SBosko Milekic 		/*
1468244f4554SBosko Milekic 		 * The only way the following is possible is if with our
1469244f4554SBosko Milekic 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1470244f4554SBosko Milekic 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1471244f4554SBosko Milekic 		 * mathematically possible for all cases, so we make
1472244f4554SBosko Milekic 		 * sure here anyway.
1473244f4554SBosko Milekic 		 */
1474ef72505eSJeff Roberson 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1475ad97af7eSGleb Smirnoff 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1476099a0e58SBosko Milekic 			printf("zone %s ipers %d rsize %d size %d\n",
1477099a0e58SBosko Milekic 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1478099a0e58SBosko Milekic 			    keg->uk_size);
1479aea6e893SAlan Cox 			panic("UMA slab won't fit.");
1480099a0e58SBosko Milekic 		}
1481099a0e58SBosko Milekic 	}
1482099a0e58SBosko Milekic 
1483099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH)
1484099a0e58SBosko Milekic 		hash_alloc(&keg->uk_hash);
1485099a0e58SBosko Milekic 
14861431a748SGleb Smirnoff 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
14871431a748SGleb Smirnoff 	    keg, zone->uz_name, zone,
148857223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
148957223e99SAndriy Gapon 	    keg->uk_free);
1490099a0e58SBosko Milekic 
1491099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1492099a0e58SBosko Milekic 
1493111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1494099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1495111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1496b23f72e9SBrian Feldman 	return (0);
1497099a0e58SBosko Milekic }
1498099a0e58SBosko Milekic 
1499099a0e58SBosko Milekic /*
1500099a0e58SBosko Milekic  * Zone header ctor.  This initializes all fields, locks, etc.
1501099a0e58SBosko Milekic  *
1502099a0e58SBosko Milekic  * Arguments/Returns follow uma_ctor specifications
1503099a0e58SBosko Milekic  *	udata  Actually uma_zctor_args
15048355f576SJeff Roberson  */
1505b23f72e9SBrian Feldman static int
1506b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags)
15078355f576SJeff Roberson {
15088355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
15098355f576SJeff Roberson 	uma_zone_t zone = mem;
1510099a0e58SBosko Milekic 	uma_zone_t z;
1511099a0e58SBosko Milekic 	uma_keg_t keg;
15128355f576SJeff Roberson 
15138355f576SJeff Roberson 	bzero(zone, size);
15148355f576SJeff Roberson 	zone->uz_name = arg->name;
15158355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
15168355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
1517e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab;
1518099a0e58SBosko Milekic 	zone->uz_init = NULL;
1519099a0e58SBosko Milekic 	zone->uz_fini = NULL;
1520099a0e58SBosko Milekic 	zone->uz_allocs = 0;
1521773df9abSRobert Watson 	zone->uz_frees = 0;
15222019094aSRobert Watson 	zone->uz_fails = 0;
1523bf965959SSean Bruno 	zone->uz_sleeps = 0;
1524fc03d22bSJeff Roberson 	zone->uz_count = 0;
1525ace66b56SAlexander Motin 	zone->uz_count_min = 0;
1526e20a199fSJeff Roberson 	zone->uz_flags = 0;
15272f891cd5SPawel Jakub Dawidek 	zone->uz_warning = NULL;
15282f891cd5SPawel Jakub Dawidek 	timevalclear(&zone->uz_ratecheck);
1529e20a199fSJeff Roberson 	keg = arg->keg;
1530099a0e58SBosko Milekic 
1531af526374SJeff Roberson 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1532af526374SJeff Roberson 
15330095a784SJeff Roberson 	/*
15340095a784SJeff Roberson 	 * This is a pure cache zone, no kegs.
15350095a784SJeff Roberson 	 */
15360095a784SJeff Roberson 	if (arg->import) {
15376fd34d6fSJeff Roberson 		if (arg->flags & UMA_ZONE_VM)
15386fd34d6fSJeff Roberson 			arg->flags |= UMA_ZFLAG_CACHEONLY;
15396fd34d6fSJeff Roberson 		zone->uz_flags = arg->flags;
1540af526374SJeff Roberson 		zone->uz_size = arg->size;
15410095a784SJeff Roberson 		zone->uz_import = arg->import;
15420095a784SJeff Roberson 		zone->uz_release = arg->release;
15430095a784SJeff Roberson 		zone->uz_arg = arg->arg;
1544af526374SJeff Roberson 		zone->uz_lockptr = &zone->uz_lock;
1545111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
154603175483SAlexander Motin 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1547111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1548af526374SJeff Roberson 		goto out;
15490095a784SJeff Roberson 	}
15500095a784SJeff Roberson 
15510095a784SJeff Roberson 	/*
15520095a784SJeff Roberson 	 * Use the regular zone/keg/slab allocator.
15530095a784SJeff Roberson 	 */
15540095a784SJeff Roberson 	zone->uz_import = (uma_import)zone_import;
15550095a784SJeff Roberson 	zone->uz_release = (uma_release)zone_release;
15560095a784SJeff Roberson 	zone->uz_arg = zone;
15570095a784SJeff Roberson 
1558099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_SECONDARY) {
1559099a0e58SBosko Milekic 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
15608355f576SJeff Roberson 		zone->uz_init = arg->uminit;
1561e221e841SJeff Roberson 		zone->uz_fini = arg->fini;
1562af526374SJeff Roberson 		zone->uz_lockptr = &keg->uk_lock;
1563e20a199fSJeff Roberson 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1564111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1565099a0e58SBosko Milekic 		ZONE_LOCK(zone);
1566099a0e58SBosko Milekic 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1567099a0e58SBosko Milekic 			if (LIST_NEXT(z, uz_link) == NULL) {
1568099a0e58SBosko Milekic 				LIST_INSERT_AFTER(z, zone, uz_link);
1569099a0e58SBosko Milekic 				break;
1570099a0e58SBosko Milekic 			}
1571099a0e58SBosko Milekic 		}
1572099a0e58SBosko Milekic 		ZONE_UNLOCK(zone);
1573111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1574e20a199fSJeff Roberson 	} else if (keg == NULL) {
1575e20a199fSJeff Roberson 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1576e20a199fSJeff Roberson 		    arg->align, arg->flags)) == NULL)
1577b23f72e9SBrian Feldman 			return (ENOMEM);
1578099a0e58SBosko Milekic 	} else {
1579099a0e58SBosko Milekic 		struct uma_kctor_args karg;
1580b23f72e9SBrian Feldman 		int error;
1581099a0e58SBosko Milekic 
1582099a0e58SBosko Milekic 		/* We should only be here from uma_startup() */
1583099a0e58SBosko Milekic 		karg.size = arg->size;
1584099a0e58SBosko Milekic 		karg.uminit = arg->uminit;
1585099a0e58SBosko Milekic 		karg.fini = arg->fini;
1586099a0e58SBosko Milekic 		karg.align = arg->align;
1587099a0e58SBosko Milekic 		karg.flags = arg->flags;
1588099a0e58SBosko Milekic 		karg.zone = zone;
1589b23f72e9SBrian Feldman 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1590b23f72e9SBrian Feldman 		    flags);
1591b23f72e9SBrian Feldman 		if (error)
1592b23f72e9SBrian Feldman 			return (error);
1593099a0e58SBosko Milekic 	}
15940095a784SJeff Roberson 
1595e20a199fSJeff Roberson 	/*
1596e20a199fSJeff Roberson 	 * Link in the first keg.
1597e20a199fSJeff Roberson 	 */
1598e20a199fSJeff Roberson 	zone->uz_klink.kl_keg = keg;
1599e20a199fSJeff Roberson 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1600af526374SJeff Roberson 	zone->uz_lockptr = &keg->uk_lock;
1601e20a199fSJeff Roberson 	zone->uz_size = keg->uk_size;
1602e20a199fSJeff Roberson 	zone->uz_flags |= (keg->uk_flags &
1603e20a199fSJeff Roberson 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
16048355f576SJeff Roberson 
16058355f576SJeff Roberson 	/*
16068355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
16078355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
16088355f576SJeff Roberson 	 */
1609099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1610e20a199fSJeff Roberson 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1611099a0e58SBosko Milekic 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1612b23f72e9SBrian Feldman 		return (0);
1613099a0e58SBosko Milekic 	}
16148355f576SJeff Roberson 
1615af526374SJeff Roberson out:
1616af526374SJeff Roberson 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1617af526374SJeff Roberson 		zone->uz_count = bucket_select(zone->uz_size);
16188355f576SJeff Roberson 	else
1619cae33c14SJeff Roberson 		zone->uz_count = BUCKET_MAX;
1620ace66b56SAlexander Motin 	zone->uz_count_min = zone->uz_count;
1621fc03d22bSJeff Roberson 
1622b23f72e9SBrian Feldman 	return (0);
16238355f576SJeff Roberson }
16248355f576SJeff Roberson 
16258355f576SJeff Roberson /*
1626099a0e58SBosko Milekic  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1627099a0e58SBosko Milekic  * table and removes the keg from the global list.
16289c2cd7e5SJeff Roberson  *
16299c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
16309c2cd7e5SJeff Roberson  *	udata  unused
16319c2cd7e5SJeff Roberson  */
1632099a0e58SBosko Milekic static void
1633099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata)
1634099a0e58SBosko Milekic {
1635099a0e58SBosko Milekic 	uma_keg_t keg;
16369c2cd7e5SJeff Roberson 
1637099a0e58SBosko Milekic 	keg = (uma_keg_t)arg;
1638e20a199fSJeff Roberson 	KEG_LOCK(keg);
1639099a0e58SBosko Milekic 	if (keg->uk_free != 0) {
1640a3845534SCraig Rodrigues 		printf("Freed UMA keg (%s) was not empty (%d items). "
1641099a0e58SBosko Milekic 		    " Lost %d pages of memory.\n",
1642a3845534SCraig Rodrigues 		    keg->uk_name ? keg->uk_name : "",
1643099a0e58SBosko Milekic 		    keg->uk_free, keg->uk_pages);
1644099a0e58SBosko Milekic 	}
1645e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1646099a0e58SBosko Milekic 
1647099a0e58SBosko Milekic 	hash_free(&keg->uk_hash);
1648099a0e58SBosko Milekic 
1649e20a199fSJeff Roberson 	KEG_LOCK_FINI(keg);
1650099a0e58SBosko Milekic }
1651099a0e58SBosko Milekic 
1652099a0e58SBosko Milekic /*
1653099a0e58SBosko Milekic  * Zone header dtor.
1654099a0e58SBosko Milekic  *
1655099a0e58SBosko Milekic  * Arguments/Returns follow uma_dtor specifications
1656099a0e58SBosko Milekic  *	udata  unused
1657099a0e58SBosko Milekic  */
16589c2cd7e5SJeff Roberson static void
16599c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
16609c2cd7e5SJeff Roberson {
1661e20a199fSJeff Roberson 	uma_klink_t klink;
16629c2cd7e5SJeff Roberson 	uma_zone_t zone;
1663099a0e58SBosko Milekic 	uma_keg_t keg;
16649c2cd7e5SJeff Roberson 
16659c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
1666e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
16679643769aSJeff Roberson 
1668e20a199fSJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
16699643769aSJeff Roberson 		cache_drain(zone);
1670099a0e58SBosko Milekic 
1671111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1672099a0e58SBosko Milekic 	LIST_REMOVE(zone, uz_link);
1673111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1674099a0e58SBosko Milekic 	/*
1675099a0e58SBosko Milekic 	 * XXX there are some races here where
1676099a0e58SBosko Milekic 	 * the zone can be drained but zone lock
1677099a0e58SBosko Milekic 	 * released and then refilled before we
1678099a0e58SBosko Milekic 	 * remove it... we dont care for now
1679099a0e58SBosko Milekic 	 */
1680e20a199fSJeff Roberson 	zone_drain_wait(zone, M_WAITOK);
1681e20a199fSJeff Roberson 	/*
1682e20a199fSJeff Roberson 	 * Unlink all of our kegs.
1683e20a199fSJeff Roberson 	 */
1684e20a199fSJeff Roberson 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1685e20a199fSJeff Roberson 		klink->kl_keg = NULL;
1686e20a199fSJeff Roberson 		LIST_REMOVE(klink, kl_link);
1687e20a199fSJeff Roberson 		if (klink == &zone->uz_klink)
1688e20a199fSJeff Roberson 			continue;
1689e20a199fSJeff Roberson 		free(klink, M_TEMP);
1690e20a199fSJeff Roberson 	}
1691e20a199fSJeff Roberson 	/*
1692e20a199fSJeff Roberson 	 * We only destroy kegs from non secondary zones.
1693e20a199fSJeff Roberson 	 */
16940095a784SJeff Roberson 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1695111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1696099a0e58SBosko Milekic 		LIST_REMOVE(keg, uk_link);
1697111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
16980095a784SJeff Roberson 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
16999c2cd7e5SJeff Roberson 	}
1700af526374SJeff Roberson 	ZONE_LOCK_FINI(zone);
1701099a0e58SBosko Milekic }
1702099a0e58SBosko Milekic 
17039c2cd7e5SJeff Roberson /*
17048355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
17058355f576SJeff Roberson  *
17068355f576SJeff Roberson  * Arguments:
17078355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
17088355f576SJeff Roberson  *		as an argument.
17098355f576SJeff Roberson  *
17108355f576SJeff Roberson  * Returns:
17118355f576SJeff Roberson  *	Nothing
17128355f576SJeff Roberson  */
17138355f576SJeff Roberson static void
17148355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
17158355f576SJeff Roberson {
1716099a0e58SBosko Milekic 	uma_keg_t keg;
17178355f576SJeff Roberson 	uma_zone_t zone;
17188355f576SJeff Roberson 
1719111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
1720099a0e58SBosko Milekic 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1721099a0e58SBosko Milekic 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
17228355f576SJeff Roberson 			zfunc(zone);
1723099a0e58SBosko Milekic 	}
1724111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
17258355f576SJeff Roberson }
17268355f576SJeff Roberson 
17278355f576SJeff Roberson /* Public functions */
17288355f576SJeff Roberson /* See uma.h */
17298355f576SJeff Roberson void
1730ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages)
17318355f576SJeff Roberson {
17328355f576SJeff Roberson 	struct uma_zctor_args args;
17338355f576SJeff Roberson 
1734111fbcd5SBryan Venteicher 	rw_init(&uma_rwlock, "UMA lock");
1735099a0e58SBosko Milekic 
1736099a0e58SBosko Milekic 	/* "manually" create the initial zone */
17370095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1738099a0e58SBosko Milekic 	args.name = "UMA Kegs";
1739099a0e58SBosko Milekic 	args.size = sizeof(struct uma_keg);
1740099a0e58SBosko Milekic 	args.ctor = keg_ctor;
1741099a0e58SBosko Milekic 	args.dtor = keg_dtor;
17428355f576SJeff Roberson 	args.uminit = zero_init;
17438355f576SJeff Roberson 	args.fini = NULL;
1744099a0e58SBosko Milekic 	args.keg = &masterkeg;
17458355f576SJeff Roberson 	args.align = 32 - 1;
1746b60f5b79SJeff Roberson 	args.flags = UMA_ZFLAG_INTERNAL;
17478355f576SJeff Roberson 	/* The initial zone has no Per cpu queues so it's smaller */
1748b23f72e9SBrian Feldman 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
17498355f576SJeff Roberson 
1750f353d338SAlan Cox 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1751ac0a6fd0SGleb Smirnoff 	bootmem = mem;
1752ac0a6fd0SGleb Smirnoff 	boot_pages = npages;
17538355f576SJeff Roberson 
1754099a0e58SBosko Milekic 	args.name = "UMA Zones";
1755099a0e58SBosko Milekic 	args.size = sizeof(struct uma_zone) +
175651cfb0beSDmitry Chagin 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1757099a0e58SBosko Milekic 	args.ctor = zone_ctor;
1758099a0e58SBosko Milekic 	args.dtor = zone_dtor;
1759099a0e58SBosko Milekic 	args.uminit = zero_init;
1760099a0e58SBosko Milekic 	args.fini = NULL;
1761099a0e58SBosko Milekic 	args.keg = NULL;
1762099a0e58SBosko Milekic 	args.align = 32 - 1;
1763099a0e58SBosko Milekic 	args.flags = UMA_ZFLAG_INTERNAL;
1764099a0e58SBosko Milekic 	/* The initial zone has no Per cpu queues so it's smaller */
1765b23f72e9SBrian Feldman 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1766099a0e58SBosko Milekic 
17678355f576SJeff Roberson 	/* Now make a zone for slab headers */
17688355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
1769ef72505eSJeff Roberson 				sizeof(struct uma_slab),
17708355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
1771b60f5b79SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
17728355f576SJeff Roberson 
17738355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
17748355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
17758355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
1776b60f5b79SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
17778355f576SJeff Roberson 
1778cae33c14SJeff Roberson 	bucket_init();
17798355f576SJeff Roberson 
1780342f1793SAlan Cox 	booted = UMA_STARTUP;
17818355f576SJeff Roberson }
17828355f576SJeff Roberson 
17838355f576SJeff Roberson /* see uma.h */
17848355f576SJeff Roberson void
178599571dc3SJeff Roberson uma_startup2(void)
17868355f576SJeff Roberson {
1787342f1793SAlan Cox 	booted = UMA_STARTUP2;
178886bbae32SJeff Roberson 	bucket_enable();
178995c4bf75SKonstantin Belousov 	sx_init(&uma_drain_lock, "umadrain");
17908355f576SJeff Roberson }
17918355f576SJeff Roberson 
17928355f576SJeff Roberson /*
17938355f576SJeff Roberson  * Initialize our callout handle
17948355f576SJeff Roberson  *
17958355f576SJeff Roberson  */
17968355f576SJeff Roberson 
17978355f576SJeff Roberson static void
17988355f576SJeff Roberson uma_startup3(void)
17998355f576SJeff Roberson {
18001431a748SGleb Smirnoff 
1801fd90e2edSJung-uk Kim 	callout_init(&uma_callout, 1);
18029643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
18038355f576SJeff Roberson }
18048355f576SJeff Roberson 
1805e20a199fSJeff Roberson static uma_keg_t
1806099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
180785dcf349SGleb Smirnoff 		int align, uint32_t flags)
1808099a0e58SBosko Milekic {
1809099a0e58SBosko Milekic 	struct uma_kctor_args args;
1810099a0e58SBosko Milekic 
1811099a0e58SBosko Milekic 	args.size = size;
1812099a0e58SBosko Milekic 	args.uminit = uminit;
1813099a0e58SBosko Milekic 	args.fini = fini;
18141e319f6dSRobert Watson 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1815099a0e58SBosko Milekic 	args.flags = flags;
1816099a0e58SBosko Milekic 	args.zone = zone;
1817e20a199fSJeff Roberson 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1818099a0e58SBosko Milekic }
1819099a0e58SBosko Milekic 
18208355f576SJeff Roberson /* See uma.h */
18211e319f6dSRobert Watson void
18221e319f6dSRobert Watson uma_set_align(int align)
18231e319f6dSRobert Watson {
18241e319f6dSRobert Watson 
18251e319f6dSRobert Watson 	if (align != UMA_ALIGN_CACHE)
18261e319f6dSRobert Watson 		uma_align_cache = align;
18271e319f6dSRobert Watson }
18281e319f6dSRobert Watson 
18291e319f6dSRobert Watson /* See uma.h */
18308355f576SJeff Roberson uma_zone_t
1831bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
183285dcf349SGleb Smirnoff 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
18338355f576SJeff Roberson 
18348355f576SJeff Roberson {
18358355f576SJeff Roberson 	struct uma_zctor_args args;
183695c4bf75SKonstantin Belousov 	uma_zone_t res;
183795c4bf75SKonstantin Belousov 	bool locked;
18388355f576SJeff Roberson 
1839a5a35578SJohn Baldwin 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
1840a5a35578SJohn Baldwin 	    align, name));
1841a5a35578SJohn Baldwin 
18428355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
18430095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
18448355f576SJeff Roberson 	args.name = name;
18458355f576SJeff Roberson 	args.size = size;
18468355f576SJeff Roberson 	args.ctor = ctor;
18478355f576SJeff Roberson 	args.dtor = dtor;
18488355f576SJeff Roberson 	args.uminit = uminit;
18498355f576SJeff Roberson 	args.fini = fini;
1850afc6dc36SJohn-Mark Gurney #ifdef  INVARIANTS
1851afc6dc36SJohn-Mark Gurney 	/*
1852afc6dc36SJohn-Mark Gurney 	 * If a zone is being created with an empty constructor and
1853afc6dc36SJohn-Mark Gurney 	 * destructor, pass UMA constructor/destructor which checks for
1854afc6dc36SJohn-Mark Gurney 	 * memory use after free.
1855afc6dc36SJohn-Mark Gurney 	 */
185619c591bfSMateusz Guzik 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
185719c591bfSMateusz Guzik 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1858afc6dc36SJohn-Mark Gurney 		args.ctor = trash_ctor;
1859afc6dc36SJohn-Mark Gurney 		args.dtor = trash_dtor;
1860afc6dc36SJohn-Mark Gurney 		args.uminit = trash_init;
1861afc6dc36SJohn-Mark Gurney 		args.fini = trash_fini;
1862afc6dc36SJohn-Mark Gurney 	}
1863afc6dc36SJohn-Mark Gurney #endif
18648355f576SJeff Roberson 	args.align = align;
18658355f576SJeff Roberson 	args.flags = flags;
1866099a0e58SBosko Milekic 	args.keg = NULL;
1867099a0e58SBosko Milekic 
186895c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
186995c4bf75SKonstantin Belousov 		locked = false;
187095c4bf75SKonstantin Belousov 	} else {
187195c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
187295c4bf75SKonstantin Belousov 		locked = true;
187395c4bf75SKonstantin Belousov 	}
187495c4bf75SKonstantin Belousov 	res = zone_alloc_item(zones, &args, M_WAITOK);
187595c4bf75SKonstantin Belousov 	if (locked)
187695c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
187795c4bf75SKonstantin Belousov 	return (res);
1878099a0e58SBosko Milekic }
1879099a0e58SBosko Milekic 
1880099a0e58SBosko Milekic /* See uma.h */
1881099a0e58SBosko Milekic uma_zone_t
1882099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1883099a0e58SBosko Milekic 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1884099a0e58SBosko Milekic {
1885099a0e58SBosko Milekic 	struct uma_zctor_args args;
1886e20a199fSJeff Roberson 	uma_keg_t keg;
188795c4bf75SKonstantin Belousov 	uma_zone_t res;
188895c4bf75SKonstantin Belousov 	bool locked;
1889099a0e58SBosko Milekic 
1890e20a199fSJeff Roberson 	keg = zone_first_keg(master);
18910095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1892099a0e58SBosko Milekic 	args.name = name;
1893e20a199fSJeff Roberson 	args.size = keg->uk_size;
1894099a0e58SBosko Milekic 	args.ctor = ctor;
1895099a0e58SBosko Milekic 	args.dtor = dtor;
1896099a0e58SBosko Milekic 	args.uminit = zinit;
1897099a0e58SBosko Milekic 	args.fini = zfini;
1898e20a199fSJeff Roberson 	args.align = keg->uk_align;
1899e20a199fSJeff Roberson 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1900e20a199fSJeff Roberson 	args.keg = keg;
19018355f576SJeff Roberson 
190295c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
190395c4bf75SKonstantin Belousov 		locked = false;
190495c4bf75SKonstantin Belousov 	} else {
190595c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
190695c4bf75SKonstantin Belousov 		locked = true;
190795c4bf75SKonstantin Belousov 	}
1908e20a199fSJeff Roberson 	/* XXX Attaches only one keg of potentially many. */
190995c4bf75SKonstantin Belousov 	res = zone_alloc_item(zones, &args, M_WAITOK);
191095c4bf75SKonstantin Belousov 	if (locked)
191195c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
191295c4bf75SKonstantin Belousov 	return (res);
19138355f576SJeff Roberson }
19148355f576SJeff Roberson 
19150095a784SJeff Roberson /* See uma.h */
19160095a784SJeff Roberson uma_zone_t
1917af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1918af526374SJeff Roberson 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1919af526374SJeff Roberson 		    uma_release zrelease, void *arg, int flags)
19200095a784SJeff Roberson {
19210095a784SJeff Roberson 	struct uma_zctor_args args;
19220095a784SJeff Roberson 
19230095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
19240095a784SJeff Roberson 	args.name = name;
1925af526374SJeff Roberson 	args.size = size;
19260095a784SJeff Roberson 	args.ctor = ctor;
19270095a784SJeff Roberson 	args.dtor = dtor;
19280095a784SJeff Roberson 	args.uminit = zinit;
19290095a784SJeff Roberson 	args.fini = zfini;
19300095a784SJeff Roberson 	args.import = zimport;
19310095a784SJeff Roberson 	args.release = zrelease;
19320095a784SJeff Roberson 	args.arg = arg;
19330095a784SJeff Roberson 	args.align = 0;
19340095a784SJeff Roberson 	args.flags = flags;
19350095a784SJeff Roberson 
19360095a784SJeff Roberson 	return (zone_alloc_item(zones, &args, M_WAITOK));
19370095a784SJeff Roberson }
19380095a784SJeff Roberson 
1939e20a199fSJeff Roberson static void
1940e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b)
1941e20a199fSJeff Roberson {
1942e20a199fSJeff Roberson 	if (a < b) {
1943e20a199fSJeff Roberson 		ZONE_LOCK(a);
1944af526374SJeff Roberson 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1945e20a199fSJeff Roberson 	} else {
1946e20a199fSJeff Roberson 		ZONE_LOCK(b);
1947af526374SJeff Roberson 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1948e20a199fSJeff Roberson 	}
1949e20a199fSJeff Roberson }
1950e20a199fSJeff Roberson 
1951e20a199fSJeff Roberson static void
1952e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1953e20a199fSJeff Roberson {
1954e20a199fSJeff Roberson 
1955e20a199fSJeff Roberson 	ZONE_UNLOCK(a);
1956e20a199fSJeff Roberson 	ZONE_UNLOCK(b);
1957e20a199fSJeff Roberson }
1958e20a199fSJeff Roberson 
1959e20a199fSJeff Roberson int
1960e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1961e20a199fSJeff Roberson {
1962e20a199fSJeff Roberson 	uma_klink_t klink;
1963e20a199fSJeff Roberson 	uma_klink_t kl;
1964e20a199fSJeff Roberson 	int error;
1965e20a199fSJeff Roberson 
1966e20a199fSJeff Roberson 	error = 0;
1967e20a199fSJeff Roberson 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1968e20a199fSJeff Roberson 
1969e20a199fSJeff Roberson 	zone_lock_pair(zone, master);
1970e20a199fSJeff Roberson 	/*
1971e20a199fSJeff Roberson 	 * zone must use vtoslab() to resolve objects and must already be
1972e20a199fSJeff Roberson 	 * a secondary.
1973e20a199fSJeff Roberson 	 */
1974e20a199fSJeff Roberson 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1975e20a199fSJeff Roberson 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1976e20a199fSJeff Roberson 		error = EINVAL;
1977e20a199fSJeff Roberson 		goto out;
1978e20a199fSJeff Roberson 	}
1979e20a199fSJeff Roberson 	/*
1980e20a199fSJeff Roberson 	 * The new master must also use vtoslab().
1981e20a199fSJeff Roberson 	 */
1982e20a199fSJeff Roberson 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1983e20a199fSJeff Roberson 		error = EINVAL;
1984e20a199fSJeff Roberson 		goto out;
1985e20a199fSJeff Roberson 	}
1986cfcae3f8SGleb Smirnoff 
1987e20a199fSJeff Roberson 	/*
1988e20a199fSJeff Roberson 	 * The underlying object must be the same size.  rsize
1989e20a199fSJeff Roberson 	 * may be different.
1990e20a199fSJeff Roberson 	 */
1991e20a199fSJeff Roberson 	if (master->uz_size != zone->uz_size) {
1992e20a199fSJeff Roberson 		error = E2BIG;
1993e20a199fSJeff Roberson 		goto out;
1994e20a199fSJeff Roberson 	}
1995e20a199fSJeff Roberson 	/*
1996e20a199fSJeff Roberson 	 * Put it at the end of the list.
1997e20a199fSJeff Roberson 	 */
1998e20a199fSJeff Roberson 	klink->kl_keg = zone_first_keg(master);
1999e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2000e20a199fSJeff Roberson 		if (LIST_NEXT(kl, kl_link) == NULL) {
2001e20a199fSJeff Roberson 			LIST_INSERT_AFTER(kl, klink, kl_link);
2002e20a199fSJeff Roberson 			break;
2003e20a199fSJeff Roberson 		}
2004e20a199fSJeff Roberson 	}
2005e20a199fSJeff Roberson 	klink = NULL;
2006e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2007e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab_multi;
2008e20a199fSJeff Roberson 
2009e20a199fSJeff Roberson out:
2010e20a199fSJeff Roberson 	zone_unlock_pair(zone, master);
2011e20a199fSJeff Roberson 	if (klink != NULL)
2012e20a199fSJeff Roberson 		free(klink, M_TEMP);
2013e20a199fSJeff Roberson 
2014e20a199fSJeff Roberson 	return (error);
2015e20a199fSJeff Roberson }
2016e20a199fSJeff Roberson 
2017e20a199fSJeff Roberson 
20188355f576SJeff Roberson /* See uma.h */
20199c2cd7e5SJeff Roberson void
20209c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
20219c2cd7e5SJeff Roberson {
2022f4ff923bSRobert Watson 
202395c4bf75SKonstantin Belousov 	sx_slock(&uma_drain_lock);
20240095a784SJeff Roberson 	zone_free_item(zones, zone, NULL, SKIP_NONE);
202595c4bf75SKonstantin Belousov 	sx_sunlock(&uma_drain_lock);
20269c2cd7e5SJeff Roberson }
20279c2cd7e5SJeff Roberson 
20288d6fbbb8SJeff Roberson void
20298d6fbbb8SJeff Roberson uma_zwait(uma_zone_t zone)
20308d6fbbb8SJeff Roberson {
20318d6fbbb8SJeff Roberson 	void *item;
20328d6fbbb8SJeff Roberson 
20338d6fbbb8SJeff Roberson 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
20348d6fbbb8SJeff Roberson 	uma_zfree(zone, item);
20358d6fbbb8SJeff Roberson }
20368d6fbbb8SJeff Roberson 
20379c2cd7e5SJeff Roberson /* See uma.h */
20388355f576SJeff Roberson void *
20392cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
20408355f576SJeff Roberson {
20418355f576SJeff Roberson 	void *item;
20428355f576SJeff Roberson 	uma_cache_t cache;
20438355f576SJeff Roberson 	uma_bucket_t bucket;
2044fc03d22bSJeff Roberson 	int lockfail;
20458355f576SJeff Roberson 	int cpu;
20468355f576SJeff Roberson 
2047e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2048e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
204910cb2424SMark Murray 
20508355f576SJeff Roberson 	/* This is the fast path allocation */
20511431a748SGleb Smirnoff 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
20521431a748SGleb Smirnoff 	    curthread, zone->uz_name, zone, flags);
2053a553d4b8SJeff Roberson 
2054635fd505SRobert Watson 	if (flags & M_WAITOK) {
2055b23f72e9SBrian Feldman 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2056635fd505SRobert Watson 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
20574c1cc01cSJohn Baldwin 	}
2058d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
20591067a2baSJonathan T. Looney 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
20601067a2baSJonathan T. Looney 
20618d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
20628d689e04SGleb Smirnoff 	if (memguard_cmp_zone(zone)) {
20638d689e04SGleb Smirnoff 		item = memguard_alloc(zone->uz_size, flags);
20648d689e04SGleb Smirnoff 		if (item != NULL) {
20658d689e04SGleb Smirnoff 			if (zone->uz_init != NULL &&
20668d689e04SGleb Smirnoff 			    zone->uz_init(item, zone->uz_size, flags) != 0)
20678d689e04SGleb Smirnoff 				return (NULL);
20688d689e04SGleb Smirnoff 			if (zone->uz_ctor != NULL &&
2069fc03d22bSJeff Roberson 			    zone->uz_ctor(item, zone->uz_size, udata,
2070fc03d22bSJeff Roberson 			    flags) != 0) {
20718d689e04SGleb Smirnoff 			    	zone->uz_fini(item, zone->uz_size);
20728d689e04SGleb Smirnoff 				return (NULL);
20738d689e04SGleb Smirnoff 			}
20748d689e04SGleb Smirnoff 			return (item);
20758d689e04SGleb Smirnoff 		}
20768d689e04SGleb Smirnoff 		/* This is unfortunate but should not be fatal. */
20778d689e04SGleb Smirnoff 	}
20788d689e04SGleb Smirnoff #endif
20795d1ae027SRobert Watson 	/*
20805d1ae027SRobert Watson 	 * If possible, allocate from the per-CPU cache.  There are two
20815d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
20825d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
20835d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
20845d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
20855d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
20865d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to allocate from
20875d1ae027SRobert Watson 	 * the current cache; when we re-acquire the critical section, we
20885d1ae027SRobert Watson 	 * must detect and handle migration if it has occurred.
20895d1ae027SRobert Watson 	 */
20905d1ae027SRobert Watson 	critical_enter();
20915d1ae027SRobert Watson 	cpu = curcpu;
20928355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
20938355f576SJeff Roberson 
20948355f576SJeff Roberson zalloc_start:
20958355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
2096fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
2097cae33c14SJeff Roberson 		bucket->ub_cnt--;
2098cae33c14SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_cnt];
20998355f576SJeff Roberson #ifdef INVARIANTS
2100cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
21018355f576SJeff Roberson #endif
2102fc03d22bSJeff Roberson 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
21038355f576SJeff Roberson 		cache->uc_allocs++;
21045d1ae027SRobert Watson 		critical_exit();
2105fc03d22bSJeff Roberson 		if (zone->uz_ctor != NULL &&
2106fc03d22bSJeff Roberson 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
21070095a784SJeff Roberson 			atomic_add_long(&zone->uz_fails, 1);
2108fc03d22bSJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
2109b23f72e9SBrian Feldman 			return (NULL);
2110b23f72e9SBrian Feldman 		}
2111ef72505eSJeff Roberson #ifdef INVARIANTS
2112ef72505eSJeff Roberson 		uma_dbg_alloc(zone, NULL, item);
2113ef72505eSJeff Roberson #endif
21142cc35ff9SJeff Roberson 		if (flags & M_ZERO)
211548343a2fSGleb Smirnoff 			uma_zero_item(item, zone);
21168355f576SJeff Roberson 		return (item);
2117fc03d22bSJeff Roberson 	}
2118fc03d22bSJeff Roberson 
21198355f576SJeff Roberson 	/*
21208355f576SJeff Roberson 	 * We have run out of items in our alloc bucket.
21218355f576SJeff Roberson 	 * See if we can switch with our free bucket.
21228355f576SJeff Roberson 	 */
2123b983089aSJeff Roberson 	bucket = cache->uc_freebucket;
2124fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
21251431a748SGleb Smirnoff 		CTR2(KTR_UMA,
21261431a748SGleb Smirnoff 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
21271431a748SGleb Smirnoff 		    zone->uz_name, zone);
21288355f576SJeff Roberson 		cache->uc_freebucket = cache->uc_allocbucket;
2129b983089aSJeff Roberson 		cache->uc_allocbucket = bucket;
21308355f576SJeff Roberson 		goto zalloc_start;
21318355f576SJeff Roberson 	}
2132fc03d22bSJeff Roberson 
2133fc03d22bSJeff Roberson 	/*
2134fc03d22bSJeff Roberson 	 * Discard any empty allocation bucket while we hold no locks.
2135fc03d22bSJeff Roberson 	 */
2136fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2137fc03d22bSJeff Roberson 	cache->uc_allocbucket = NULL;
2138fc03d22bSJeff Roberson 	critical_exit();
2139fc03d22bSJeff Roberson 	if (bucket != NULL)
21406fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2141fc03d22bSJeff Roberson 
2142fc03d22bSJeff Roberson 	/* Short-circuit for zones without buckets and low memory. */
2143fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2144fc03d22bSJeff Roberson 		goto zalloc_item;
2145fc03d22bSJeff Roberson 
21465d1ae027SRobert Watson 	/*
21475d1ae027SRobert Watson 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
21485d1ae027SRobert Watson 	 * we must go back to the zone.  This requires the zone lock, so we
21495d1ae027SRobert Watson 	 * must drop the critical section, then re-acquire it when we go back
21505d1ae027SRobert Watson 	 * to the cache.  Since the critical section is released, we may be
21515d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
21525d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
21535d1ae027SRobert Watson 	 * the critical section.
21545d1ae027SRobert Watson 	 */
2155fc03d22bSJeff Roberson 	lockfail = 0;
2156fc03d22bSJeff Roberson 	if (ZONE_TRYLOCK(zone) == 0) {
2157fc03d22bSJeff Roberson 		/* Record contention to size the buckets. */
2158a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
2159fc03d22bSJeff Roberson 		lockfail = 1;
2160fc03d22bSJeff Roberson 	}
21615d1ae027SRobert Watson 	critical_enter();
21625d1ae027SRobert Watson 	cpu = curcpu;
21635d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
21645d1ae027SRobert Watson 
2165fc03d22bSJeff Roberson 	/*
2166fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2167fc03d22bSJeff Roberson 	 */
21680095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
21690095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2170a553d4b8SJeff Roberson 	cache->uc_allocs = 0;
2171773df9abSRobert Watson 	cache->uc_frees = 0;
21728355f576SJeff Roberson 
2173fc03d22bSJeff Roberson 	/* See if we lost the race to fill the cache. */
2174fc03d22bSJeff Roberson 	if (cache->uc_allocbucket != NULL) {
2175fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2176fc03d22bSJeff Roberson 		goto zalloc_start;
2177a553d4b8SJeff Roberson 	}
21788355f576SJeff Roberson 
2179fc03d22bSJeff Roberson 	/*
2180fc03d22bSJeff Roberson 	 * Check the zone's cache of buckets.
2181fc03d22bSJeff Roberson 	 */
2182fc03d22bSJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2183cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
2184a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
21858355f576SJeff Roberson 
2186a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
2187a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
2188a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
21898355f576SJeff Roberson 		goto zalloc_start;
2190a553d4b8SJeff Roberson 	}
21915d1ae027SRobert Watson 	/* We are no longer associated with this CPU. */
21925d1ae027SRobert Watson 	critical_exit();
2193bbee39c6SJeff Roberson 
2194fc03d22bSJeff Roberson 	/*
2195fc03d22bSJeff Roberson 	 * We bump the uz count when the cache size is insufficient to
2196fc03d22bSJeff Roberson 	 * handle the working set.
2197fc03d22bSJeff Roberson 	 */
21986fd34d6fSJeff Roberson 	if (lockfail && zone->uz_count < BUCKET_MAX)
2199a553d4b8SJeff Roberson 		zone->uz_count++;
2200fc03d22bSJeff Roberson 	ZONE_UNLOCK(zone);
2201099a0e58SBosko Milekic 
22028355f576SJeff Roberson 	/*
2203a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
2204763df3ecSPedro F. Giffuni 	 * works we'll restart the allocation from the beginning and it
2205fc03d22bSJeff Roberson 	 * will use the just filled bucket.
2206bbee39c6SJeff Roberson 	 */
22076fd34d6fSJeff Roberson 	bucket = zone_alloc_bucket(zone, udata, flags);
22081431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
22091431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
2210fc03d22bSJeff Roberson 	if (bucket != NULL) {
2211fc03d22bSJeff Roberson 		ZONE_LOCK(zone);
2212fc03d22bSJeff Roberson 		critical_enter();
2213fc03d22bSJeff Roberson 		cpu = curcpu;
2214fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2215fc03d22bSJeff Roberson 		/*
2216fc03d22bSJeff Roberson 		 * See if we lost the race or were migrated.  Cache the
2217fc03d22bSJeff Roberson 		 * initialized bucket to make this less likely or claim
2218fc03d22bSJeff Roberson 		 * the memory directly.
2219fc03d22bSJeff Roberson 		 */
2220fc03d22bSJeff Roberson 		if (cache->uc_allocbucket == NULL)
2221fc03d22bSJeff Roberson 			cache->uc_allocbucket = bucket;
2222fc03d22bSJeff Roberson 		else
2223fc03d22bSJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2224bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
2225fc03d22bSJeff Roberson 		goto zalloc_start;
2226bbee39c6SJeff Roberson 	}
2227fc03d22bSJeff Roberson 
2228bbee39c6SJeff Roberson 	/*
2229bbee39c6SJeff Roberson 	 * We may not be able to get a bucket so return an actual item.
2230bbee39c6SJeff Roberson 	 */
2231fc03d22bSJeff Roberson zalloc_item:
2232e20a199fSJeff Roberson 	item = zone_alloc_item(zone, udata, flags);
2233fc03d22bSJeff Roberson 
2234e20a199fSJeff Roberson 	return (item);
2235bbee39c6SJeff Roberson }
2236bbee39c6SJeff Roberson 
2237bbee39c6SJeff Roberson static uma_slab_t
2238e20a199fSJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2239bbee39c6SJeff Roberson {
2240bbee39c6SJeff Roberson 	uma_slab_t slab;
22416fd34d6fSJeff Roberson 	int reserve;
2242099a0e58SBosko Milekic 
2243e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2244bbee39c6SJeff Roberson 	slab = NULL;
22456fd34d6fSJeff Roberson 	reserve = 0;
22466fd34d6fSJeff Roberson 	if ((flags & M_USE_RESERVE) == 0)
22476fd34d6fSJeff Roberson 		reserve = keg->uk_reserve;
2248bbee39c6SJeff Roberson 
2249bbee39c6SJeff Roberson 	for (;;) {
2250bbee39c6SJeff Roberson 		/*
2251bbee39c6SJeff Roberson 		 * Find a slab with some space.  Prefer slabs that are partially
2252bbee39c6SJeff Roberson 		 * used over those that are totally full.  This helps to reduce
2253bbee39c6SJeff Roberson 		 * fragmentation.
2254bbee39c6SJeff Roberson 		 */
22556fd34d6fSJeff Roberson 		if (keg->uk_free > reserve) {
2256099a0e58SBosko Milekic 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2257099a0e58SBosko Milekic 				slab = LIST_FIRST(&keg->uk_part_slab);
2258bbee39c6SJeff Roberson 			} else {
2259099a0e58SBosko Milekic 				slab = LIST_FIRST(&keg->uk_free_slab);
2260bbee39c6SJeff Roberson 				LIST_REMOVE(slab, us_link);
2261099a0e58SBosko Milekic 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2262bbee39c6SJeff Roberson 				    us_link);
2263bbee39c6SJeff Roberson 			}
2264e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2265bbee39c6SJeff Roberson 			return (slab);
2266bbee39c6SJeff Roberson 		}
2267bbee39c6SJeff Roberson 
2268bbee39c6SJeff Roberson 		/*
2269bbee39c6SJeff Roberson 		 * M_NOVM means don't ask at all!
2270bbee39c6SJeff Roberson 		 */
2271bbee39c6SJeff Roberson 		if (flags & M_NOVM)
2272bbee39c6SJeff Roberson 			break;
2273bbee39c6SJeff Roberson 
2274e20a199fSJeff Roberson 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2275099a0e58SBosko Milekic 			keg->uk_flags |= UMA_ZFLAG_FULL;
2276e20a199fSJeff Roberson 			/*
2277e20a199fSJeff Roberson 			 * If this is not a multi-zone, set the FULL bit.
2278e20a199fSJeff Roberson 			 * Otherwise slab_multi() takes care of it.
2279e20a199fSJeff Roberson 			 */
22802f891cd5SPawel Jakub Dawidek 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2281e20a199fSJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_FULL;
22822f891cd5SPawel Jakub Dawidek 				zone_log_warning(zone);
228354503a13SJonathan T. Looney 				zone_maxaction(zone);
22842f891cd5SPawel Jakub Dawidek 			}
2285ebc85edfSJeff Roberson 			if (flags & M_NOWAIT)
2286bbee39c6SJeff Roberson 				break;
2287c288b548SEitan Adler 			zone->uz_sleeps++;
2288e20a199fSJeff Roberson 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2289bbee39c6SJeff Roberson 			continue;
2290bbee39c6SJeff Roberson 		}
2291e20a199fSJeff Roberson 		slab = keg_alloc_slab(keg, zone, flags);
2292bbee39c6SJeff Roberson 		/*
2293bbee39c6SJeff Roberson 		 * If we got a slab here it's safe to mark it partially used
2294bbee39c6SJeff Roberson 		 * and return.  We assume that the caller is going to remove
2295bbee39c6SJeff Roberson 		 * at least one item.
2296bbee39c6SJeff Roberson 		 */
2297bbee39c6SJeff Roberson 		if (slab) {
2298e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2299099a0e58SBosko Milekic 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2300bbee39c6SJeff Roberson 			return (slab);
2301bbee39c6SJeff Roberson 		}
2302bbee39c6SJeff Roberson 		/*
2303bbee39c6SJeff Roberson 		 * We might not have been able to get a slab but another cpu
2304bbee39c6SJeff Roberson 		 * could have while we were unlocked.  Check again before we
2305bbee39c6SJeff Roberson 		 * fail.
2306bbee39c6SJeff Roberson 		 */
2307bbee39c6SJeff Roberson 		flags |= M_NOVM;
2308bbee39c6SJeff Roberson 	}
2309bbee39c6SJeff Roberson 	return (slab);
2310bbee39c6SJeff Roberson }
2311bbee39c6SJeff Roberson 
2312e20a199fSJeff Roberson static uma_slab_t
2313e20a199fSJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2314e20a199fSJeff Roberson {
2315e20a199fSJeff Roberson 	uma_slab_t slab;
2316e20a199fSJeff Roberson 
2317af526374SJeff Roberson 	if (keg == NULL) {
2318e20a199fSJeff Roberson 		keg = zone_first_keg(zone);
2319af526374SJeff Roberson 		KEG_LOCK(keg);
2320af526374SJeff Roberson 	}
2321e20a199fSJeff Roberson 
2322e20a199fSJeff Roberson 	for (;;) {
2323e20a199fSJeff Roberson 		slab = keg_fetch_slab(keg, zone, flags);
2324e20a199fSJeff Roberson 		if (slab)
2325e20a199fSJeff Roberson 			return (slab);
2326e20a199fSJeff Roberson 		if (flags & (M_NOWAIT | M_NOVM))
2327e20a199fSJeff Roberson 			break;
2328e20a199fSJeff Roberson 	}
2329af526374SJeff Roberson 	KEG_UNLOCK(keg);
2330e20a199fSJeff Roberson 	return (NULL);
2331e20a199fSJeff Roberson }
2332e20a199fSJeff Roberson 
2333e20a199fSJeff Roberson /*
2334e20a199fSJeff Roberson  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2335af526374SJeff Roberson  * with the keg locked.  On NULL no lock is held.
2336e20a199fSJeff Roberson  *
2337e20a199fSJeff Roberson  * The last pointer is used to seed the search.  It is not required.
2338e20a199fSJeff Roberson  */
2339e20a199fSJeff Roberson static uma_slab_t
2340e20a199fSJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2341e20a199fSJeff Roberson {
2342e20a199fSJeff Roberson 	uma_klink_t klink;
2343e20a199fSJeff Roberson 	uma_slab_t slab;
2344e20a199fSJeff Roberson 	uma_keg_t keg;
2345e20a199fSJeff Roberson 	int flags;
2346e20a199fSJeff Roberson 	int empty;
2347e20a199fSJeff Roberson 	int full;
2348e20a199fSJeff Roberson 
2349e20a199fSJeff Roberson 	/*
2350e20a199fSJeff Roberson 	 * Don't wait on the first pass.  This will skip limit tests
2351e20a199fSJeff Roberson 	 * as well.  We don't want to block if we can find a provider
2352e20a199fSJeff Roberson 	 * without blocking.
2353e20a199fSJeff Roberson 	 */
2354e20a199fSJeff Roberson 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2355e20a199fSJeff Roberson 	/*
2356e20a199fSJeff Roberson 	 * Use the last slab allocated as a hint for where to start
2357e20a199fSJeff Roberson 	 * the search.
2358e20a199fSJeff Roberson 	 */
2359af526374SJeff Roberson 	if (last != NULL) {
2360e20a199fSJeff Roberson 		slab = keg_fetch_slab(last, zone, flags);
2361e20a199fSJeff Roberson 		if (slab)
2362e20a199fSJeff Roberson 			return (slab);
2363af526374SJeff Roberson 		KEG_UNLOCK(last);
2364e20a199fSJeff Roberson 	}
2365e20a199fSJeff Roberson 	/*
2366e20a199fSJeff Roberson 	 * Loop until we have a slab incase of transient failures
2367e20a199fSJeff Roberson 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2368e20a199fSJeff Roberson 	 * required but we've done it for so long now.
2369e20a199fSJeff Roberson 	 */
2370e20a199fSJeff Roberson 	for (;;) {
2371e20a199fSJeff Roberson 		empty = 0;
2372e20a199fSJeff Roberson 		full = 0;
2373e20a199fSJeff Roberson 		/*
2374e20a199fSJeff Roberson 		 * Search the available kegs for slabs.  Be careful to hold the
2375e20a199fSJeff Roberson 		 * correct lock while calling into the keg layer.
2376e20a199fSJeff Roberson 		 */
2377e20a199fSJeff Roberson 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2378e20a199fSJeff Roberson 			keg = klink->kl_keg;
2379af526374SJeff Roberson 			KEG_LOCK(keg);
2380e20a199fSJeff Roberson 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2381e20a199fSJeff Roberson 				slab = keg_fetch_slab(keg, zone, flags);
2382e20a199fSJeff Roberson 				if (slab)
2383e20a199fSJeff Roberson 					return (slab);
2384e20a199fSJeff Roberson 			}
2385e20a199fSJeff Roberson 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2386e20a199fSJeff Roberson 				full++;
2387e20a199fSJeff Roberson 			else
2388e20a199fSJeff Roberson 				empty++;
2389af526374SJeff Roberson 			KEG_UNLOCK(keg);
2390e20a199fSJeff Roberson 		}
2391e20a199fSJeff Roberson 		if (rflags & (M_NOWAIT | M_NOVM))
2392e20a199fSJeff Roberson 			break;
2393e20a199fSJeff Roberson 		flags = rflags;
2394e20a199fSJeff Roberson 		/*
2395e20a199fSJeff Roberson 		 * All kegs are full.  XXX We can't atomically check all kegs
2396e20a199fSJeff Roberson 		 * and sleep so just sleep for a short period and retry.
2397e20a199fSJeff Roberson 		 */
2398e20a199fSJeff Roberson 		if (full && !empty) {
2399af526374SJeff Roberson 			ZONE_LOCK(zone);
2400e20a199fSJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
2401bf965959SSean Bruno 			zone->uz_sleeps++;
24022f891cd5SPawel Jakub Dawidek 			zone_log_warning(zone);
240354503a13SJonathan T. Looney 			zone_maxaction(zone);
2404af526374SJeff Roberson 			msleep(zone, zone->uz_lockptr, PVM,
2405af526374SJeff Roberson 			    "zonelimit", hz/100);
2406e20a199fSJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2407af526374SJeff Roberson 			ZONE_UNLOCK(zone);
2408e20a199fSJeff Roberson 			continue;
2409e20a199fSJeff Roberson 		}
2410e20a199fSJeff Roberson 	}
2411e20a199fSJeff Roberson 	return (NULL);
2412e20a199fSJeff Roberson }
2413e20a199fSJeff Roberson 
2414d56368d7SBosko Milekic static void *
24150095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2416bbee39c6SJeff Roberson {
2417bbee39c6SJeff Roberson 	void *item;
241885dcf349SGleb Smirnoff 	uint8_t freei;
2419bbee39c6SJeff Roberson 
24200095a784SJeff Roberson 	MPASS(keg == slab->us_keg);
2421e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2422099a0e58SBosko Milekic 
2423ef72505eSJeff Roberson 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2424ef72505eSJeff Roberson 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2425099a0e58SBosko Milekic 	item = slab->us_data + (keg->uk_rsize * freei);
2426bbee39c6SJeff Roberson 	slab->us_freecount--;
2427099a0e58SBosko Milekic 	keg->uk_free--;
2428ef72505eSJeff Roberson 
2429bbee39c6SJeff Roberson 	/* Move this slab to the full list */
2430bbee39c6SJeff Roberson 	if (slab->us_freecount == 0) {
2431bbee39c6SJeff Roberson 		LIST_REMOVE(slab, us_link);
2432099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2433bbee39c6SJeff Roberson 	}
2434bbee39c6SJeff Roberson 
2435bbee39c6SJeff Roberson 	return (item);
2436bbee39c6SJeff Roberson }
2437bbee39c6SJeff Roberson 
2438bbee39c6SJeff Roberson static int
24390095a784SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int flags)
24400095a784SJeff Roberson {
24410095a784SJeff Roberson 	uma_slab_t slab;
24420095a784SJeff Roberson 	uma_keg_t keg;
24430095a784SJeff Roberson 	int i;
24440095a784SJeff Roberson 
24450095a784SJeff Roberson 	slab = NULL;
24460095a784SJeff Roberson 	keg = NULL;
2447af526374SJeff Roberson 	/* Try to keep the buckets totally full */
24480095a784SJeff Roberson 	for (i = 0; i < max; ) {
24490095a784SJeff Roberson 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
24500095a784SJeff Roberson 			break;
24510095a784SJeff Roberson 		keg = slab->us_keg;
24526fd34d6fSJeff Roberson 		while (slab->us_freecount && i < max) {
24530095a784SJeff Roberson 			bucket[i++] = slab_alloc_item(keg, slab);
24546fd34d6fSJeff Roberson 			if (keg->uk_free <= keg->uk_reserve)
24556fd34d6fSJeff Roberson 				break;
24566fd34d6fSJeff Roberson 		}
24576fd34d6fSJeff Roberson 		/* Don't grab more than one slab at a time. */
24580095a784SJeff Roberson 		flags &= ~M_WAITOK;
24590095a784SJeff Roberson 		flags |= M_NOWAIT;
24600095a784SJeff Roberson 	}
24610095a784SJeff Roberson 	if (slab != NULL)
24620095a784SJeff Roberson 		KEG_UNLOCK(keg);
24630095a784SJeff Roberson 
24640095a784SJeff Roberson 	return i;
24650095a784SJeff Roberson }
24660095a784SJeff Roberson 
2467fc03d22bSJeff Roberson static uma_bucket_t
24686fd34d6fSJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2469bbee39c6SJeff Roberson {
2470bbee39c6SJeff Roberson 	uma_bucket_t bucket;
24710095a784SJeff Roberson 	int max;
2472bbee39c6SJeff Roberson 
24736fd34d6fSJeff Roberson 	/* Don't wait for buckets, preserve caller's NOVM setting. */
24746fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
24750095a784SJeff Roberson 	if (bucket == NULL)
2476f7104ccdSAlexander Motin 		return (NULL);
24770095a784SJeff Roberson 
2478af526374SJeff Roberson 	max = MIN(bucket->ub_entries, zone->uz_count);
24790095a784SJeff Roberson 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
24800095a784SJeff Roberson 	    max, flags);
24810095a784SJeff Roberson 
24820095a784SJeff Roberson 	/*
24830095a784SJeff Roberson 	 * Initialize the memory if necessary.
24840095a784SJeff Roberson 	 */
24850095a784SJeff Roberson 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2486099a0e58SBosko Milekic 		int i;
2487bbee39c6SJeff Roberson 
24880095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
2489e20a199fSJeff Roberson 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
24900095a784SJeff Roberson 			    flags) != 0)
2491b23f72e9SBrian Feldman 				break;
2492b23f72e9SBrian Feldman 		/*
2493b23f72e9SBrian Feldman 		 * If we couldn't initialize the whole bucket, put the
2494b23f72e9SBrian Feldman 		 * rest back onto the freelist.
2495b23f72e9SBrian Feldman 		 */
2496b23f72e9SBrian Feldman 		if (i != bucket->ub_cnt) {
2497af526374SJeff Roberson 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
24980095a784SJeff Roberson 			    bucket->ub_cnt - i);
2499a5a262c6SBosko Milekic #ifdef INVARIANTS
25000095a784SJeff Roberson 			bzero(&bucket->ub_bucket[i],
25010095a784SJeff Roberson 			    sizeof(void *) * (bucket->ub_cnt - i));
2502a5a262c6SBosko Milekic #endif
2503b23f72e9SBrian Feldman 			bucket->ub_cnt = i;
2504b23f72e9SBrian Feldman 		}
2505099a0e58SBosko Milekic 	}
2506099a0e58SBosko Milekic 
2507f7104ccdSAlexander Motin 	if (bucket->ub_cnt == 0) {
25086fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2509fc03d22bSJeff Roberson 		atomic_add_long(&zone->uz_fails, 1);
2510fc03d22bSJeff Roberson 		return (NULL);
2511bbee39c6SJeff Roberson 	}
2512fc03d22bSJeff Roberson 
2513fc03d22bSJeff Roberson 	return (bucket);
2514fc03d22bSJeff Roberson }
2515fc03d22bSJeff Roberson 
25168355f576SJeff Roberson /*
25170095a784SJeff Roberson  * Allocates a single item from a zone.
25188355f576SJeff Roberson  *
25198355f576SJeff Roberson  * Arguments
25208355f576SJeff Roberson  *	zone   The zone to alloc for.
25218355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
2522a163d034SWarner Losh  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
25238355f576SJeff Roberson  *
25248355f576SJeff Roberson  * Returns
25258355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
2526bbee39c6SJeff Roberson  *	An item if successful
25278355f576SJeff Roberson  */
25288355f576SJeff Roberson 
25298355f576SJeff Roberson static void *
2530e20a199fSJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int flags)
25318355f576SJeff Roberson {
25328355f576SJeff Roberson 	void *item;
25338355f576SJeff Roberson 
25348355f576SJeff Roberson 	item = NULL;
25358355f576SJeff Roberson 
25360095a784SJeff Roberson 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
25370095a784SJeff Roberson 		goto fail;
25380095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, 1);
25398355f576SJeff Roberson 
2540099a0e58SBosko Milekic 	/*
2541099a0e58SBosko Milekic 	 * We have to call both the zone's init (not the keg's init)
2542099a0e58SBosko Milekic 	 * and the zone's ctor.  This is because the item is going from
2543099a0e58SBosko Milekic 	 * a keg slab directly to the user, and the user is expecting it
2544099a0e58SBosko Milekic 	 * to be both zone-init'd as well as zone-ctor'd.
2545099a0e58SBosko Milekic 	 */
2546b23f72e9SBrian Feldman 	if (zone->uz_init != NULL) {
2547e20a199fSJeff Roberson 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
25480095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_FINI);
25490095a784SJeff Roberson 			goto fail;
2550b23f72e9SBrian Feldman 		}
2551b23f72e9SBrian Feldman 	}
2552b23f72e9SBrian Feldman 	if (zone->uz_ctor != NULL) {
2553e20a199fSJeff Roberson 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
25540095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
25550095a784SJeff Roberson 			goto fail;
2556b23f72e9SBrian Feldman 		}
2557b23f72e9SBrian Feldman 	}
2558ef72505eSJeff Roberson #ifdef INVARIANTS
25590095a784SJeff Roberson 	uma_dbg_alloc(zone, NULL, item);
2560ef72505eSJeff Roberson #endif
25612cc35ff9SJeff Roberson 	if (flags & M_ZERO)
256248343a2fSGleb Smirnoff 		uma_zero_item(item, zone);
25638355f576SJeff Roberson 
25641431a748SGleb Smirnoff 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
25651431a748SGleb Smirnoff 	    zone->uz_name, zone);
25661431a748SGleb Smirnoff 
25678355f576SJeff Roberson 	return (item);
25680095a784SJeff Roberson 
25690095a784SJeff Roberson fail:
25701431a748SGleb Smirnoff 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
25711431a748SGleb Smirnoff 	    zone->uz_name, zone);
25720095a784SJeff Roberson 	atomic_add_long(&zone->uz_fails, 1);
25730095a784SJeff Roberson 	return (NULL);
25748355f576SJeff Roberson }
25758355f576SJeff Roberson 
25768355f576SJeff Roberson /* See uma.h */
25778355f576SJeff Roberson void
25788355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
25798355f576SJeff Roberson {
25808355f576SJeff Roberson 	uma_cache_t cache;
25818355f576SJeff Roberson 	uma_bucket_t bucket;
25824d104ba0SAlexander Motin 	int lockfail;
25838355f576SJeff Roberson 	int cpu;
25848355f576SJeff Roberson 
2585e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2586e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
258710cb2424SMark Murray 
25883659f747SRobert Watson 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
25893659f747SRobert Watson 	    zone->uz_name);
25903659f747SRobert Watson 
2591d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
25921067a2baSJonathan T. Looney 	    ("uma_zfree_arg: called with spinlock or critical section held"));
25931067a2baSJonathan T. Looney 
259420ed0cb0SMatthew D Fleming         /* uma_zfree(..., NULL) does nothing, to match free(9). */
259520ed0cb0SMatthew D Fleming         if (item == NULL)
259620ed0cb0SMatthew D Fleming                 return;
25978d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
25988d689e04SGleb Smirnoff 	if (is_memguard_addr(item)) {
2599bc9d08e1SMark Johnston 		if (zone->uz_dtor != NULL)
26008d689e04SGleb Smirnoff 			zone->uz_dtor(item, zone->uz_size, udata);
2601bc9d08e1SMark Johnston 		if (zone->uz_fini != NULL)
26028d689e04SGleb Smirnoff 			zone->uz_fini(item, zone->uz_size);
26038d689e04SGleb Smirnoff 		memguard_free(item);
26048d689e04SGleb Smirnoff 		return;
26058d689e04SGleb Smirnoff 	}
26068d689e04SGleb Smirnoff #endif
26075d1ae027SRobert Watson #ifdef INVARIANTS
2608e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZONE_MALLOC)
26095d1ae027SRobert Watson 		uma_dbg_free(zone, udata, item);
26105d1ae027SRobert Watson 	else
26115d1ae027SRobert Watson 		uma_dbg_free(zone, NULL, item);
26125d1ae027SRobert Watson #endif
2613fc03d22bSJeff Roberson 	if (zone->uz_dtor != NULL)
2614ef72505eSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
2615ef72505eSJeff Roberson 
2616af7f9b97SJeff Roberson 	/*
2617af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
2618af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
2619af7f9b97SJeff Roberson 	 */
2620e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2621fc03d22bSJeff Roberson 		goto zfree_item;
2622af7f9b97SJeff Roberson 
26235d1ae027SRobert Watson 	/*
26245d1ae027SRobert Watson 	 * If possible, free to the per-CPU cache.  There are two
26255d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
26265d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
26275d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
26285d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
26295d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
26305d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to free to the
26315d1ae027SRobert Watson 	 * current cache; when we re-acquire the critical section, we must
26325d1ae027SRobert Watson 	 * detect and handle migration if it has occurred.
26335d1ae027SRobert Watson 	 */
2634a553d4b8SJeff Roberson zfree_restart:
26355d1ae027SRobert Watson 	critical_enter();
26365d1ae027SRobert Watson 	cpu = curcpu;
26378355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
26388355f576SJeff Roberson 
26398355f576SJeff Roberson zfree_start:
2640a553d4b8SJeff Roberson 	/*
2641fc03d22bSJeff Roberson 	 * Try to free into the allocbucket first to give LIFO ordering
2642fc03d22bSJeff Roberson 	 * for cache-hot datastructures.  Spill over into the freebucket
2643fc03d22bSJeff Roberson 	 * if necessary.  Alloc will swap them if one runs dry.
2644a553d4b8SJeff Roberson 	 */
2645fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2646fc03d22bSJeff Roberson 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2647fc03d22bSJeff Roberson 		bucket = cache->uc_freebucket;
2648fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2649cae33c14SJeff Roberson 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
26508355f576SJeff Roberson 		    ("uma_zfree: Freeing to non free bucket index."));
2651cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = item;
2652cae33c14SJeff Roberson 		bucket->ub_cnt++;
2653773df9abSRobert Watson 		cache->uc_frees++;
26545d1ae027SRobert Watson 		critical_exit();
26558355f576SJeff Roberson 		return;
2656fc03d22bSJeff Roberson 	}
2657fc03d22bSJeff Roberson 
26588355f576SJeff Roberson 	/*
26595d1ae027SRobert Watson 	 * We must go back the zone, which requires acquiring the zone lock,
26605d1ae027SRobert Watson 	 * which in turn means we must release and re-acquire the critical
26615d1ae027SRobert Watson 	 * section.  Since the critical section is released, we may be
26625d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
26635d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
26645d1ae027SRobert Watson 	 * the critical section.
26658355f576SJeff Roberson 	 */
26665d1ae027SRobert Watson 	critical_exit();
2667fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2668fc03d22bSJeff Roberson 		goto zfree_item;
2669fc03d22bSJeff Roberson 
26704d104ba0SAlexander Motin 	lockfail = 0;
26714d104ba0SAlexander Motin 	if (ZONE_TRYLOCK(zone) == 0) {
26724d104ba0SAlexander Motin 		/* Record contention to size the buckets. */
26738355f576SJeff Roberson 		ZONE_LOCK(zone);
26744d104ba0SAlexander Motin 		lockfail = 1;
26754d104ba0SAlexander Motin 	}
26765d1ae027SRobert Watson 	critical_enter();
26775d1ae027SRobert Watson 	cpu = curcpu;
26785d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
26798355f576SJeff Roberson 
2680fc03d22bSJeff Roberson 	/*
2681fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2682fc03d22bSJeff Roberson 	 */
26830095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
26840095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2685f4ff923bSRobert Watson 	cache->uc_allocs = 0;
2686f4ff923bSRobert Watson 	cache->uc_frees = 0;
2687f4ff923bSRobert Watson 
26888355f576SJeff Roberson 	bucket = cache->uc_freebucket;
2689fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2690fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2691fc03d22bSJeff Roberson 		goto zfree_start;
2692fc03d22bSJeff Roberson 	}
26938355f576SJeff Roberson 	cache->uc_freebucket = NULL;
2694afa5d703SMark Johnston 	/* We are no longer associated with this CPU. */
2695afa5d703SMark Johnston 	critical_exit();
26968355f576SJeff Roberson 
26978355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
26988355f576SJeff Roberson 	if (bucket != NULL) {
26991431a748SGleb Smirnoff 		CTR3(KTR_UMA,
27001431a748SGleb Smirnoff 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
27011431a748SGleb Smirnoff 		    zone->uz_name, zone, bucket);
2702cae33c14SJeff Roberson 		/* ub_cnt is pointing to the last free item */
2703cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
27048355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2705fc03d22bSJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
27068355f576SJeff Roberson 	}
2707fc03d22bSJeff Roberson 
27084d104ba0SAlexander Motin 	/*
27094d104ba0SAlexander Motin 	 * We bump the uz count when the cache size is insufficient to
27104d104ba0SAlexander Motin 	 * handle the working set.
27114d104ba0SAlexander Motin 	 */
27124d104ba0SAlexander Motin 	if (lockfail && zone->uz_count < BUCKET_MAX)
27134d104ba0SAlexander Motin 		zone->uz_count++;
2714a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
2715a553d4b8SJeff Roberson 
27166fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
27171431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
27181431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
27194741dcbfSJeff Roberson 	if (bucket) {
2720fc03d22bSJeff Roberson 		critical_enter();
2721fc03d22bSJeff Roberson 		cpu = curcpu;
2722fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2723fc03d22bSJeff Roberson 		if (cache->uc_freebucket == NULL) {
2724fc03d22bSJeff Roberson 			cache->uc_freebucket = bucket;
2725fc03d22bSJeff Roberson 			goto zfree_start;
2726fc03d22bSJeff Roberson 		}
2727fc03d22bSJeff Roberson 		/*
2728fc03d22bSJeff Roberson 		 * We lost the race, start over.  We have to drop our
2729fc03d22bSJeff Roberson 		 * critical section to free the bucket.
2730fc03d22bSJeff Roberson 		 */
2731fc03d22bSJeff Roberson 		critical_exit();
27326fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2733a553d4b8SJeff Roberson 		goto zfree_restart;
27348355f576SJeff Roberson 	}
27358355f576SJeff Roberson 
2736a553d4b8SJeff Roberson 	/*
2737a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
2738a553d4b8SJeff Roberson 	 */
2739fc03d22bSJeff Roberson zfree_item:
27400095a784SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_DTOR);
27418355f576SJeff Roberson 
27428355f576SJeff Roberson 	return;
27438355f576SJeff Roberson }
27448355f576SJeff Roberson 
27458355f576SJeff Roberson static void
27460095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
27478355f576SJeff Roberson {
274885dcf349SGleb Smirnoff 	uint8_t freei;
2749099a0e58SBosko Milekic 
27500095a784SJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2751e20a199fSJeff Roberson 	MPASS(keg == slab->us_keg);
27528355f576SJeff Roberson 
27538355f576SJeff Roberson 	/* Do we need to remove from any lists? */
2754099a0e58SBosko Milekic 	if (slab->us_freecount+1 == keg->uk_ipers) {
27558355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2756099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
27578355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
27588355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2759099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
27608355f576SJeff Roberson 	}
27618355f576SJeff Roberson 
2762ef72505eSJeff Roberson 	/* Slab management. */
2763ef72505eSJeff Roberson 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2764ef72505eSJeff Roberson 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
27658355f576SJeff Roberson 	slab->us_freecount++;
27668355f576SJeff Roberson 
2767ef72505eSJeff Roberson 	/* Keg statistics. */
2768099a0e58SBosko Milekic 	keg->uk_free++;
27690095a784SJeff Roberson }
27700095a784SJeff Roberson 
27710095a784SJeff Roberson static void
27720095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt)
27730095a784SJeff Roberson {
27740095a784SJeff Roberson 	void *item;
27750095a784SJeff Roberson 	uma_slab_t slab;
27760095a784SJeff Roberson 	uma_keg_t keg;
27770095a784SJeff Roberson 	uint8_t *mem;
27780095a784SJeff Roberson 	int clearfull;
27790095a784SJeff Roberson 	int i;
27808355f576SJeff Roberson 
2781e20a199fSJeff Roberson 	clearfull = 0;
27820095a784SJeff Roberson 	keg = zone_first_keg(zone);
2783af526374SJeff Roberson 	KEG_LOCK(keg);
27840095a784SJeff Roberson 	for (i = 0; i < cnt; i++) {
27850095a784SJeff Roberson 		item = bucket[i];
27860095a784SJeff Roberson 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
27870095a784SJeff Roberson 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
27880095a784SJeff Roberson 			if (zone->uz_flags & UMA_ZONE_HASH) {
27890095a784SJeff Roberson 				slab = hash_sfind(&keg->uk_hash, mem);
27900095a784SJeff Roberson 			} else {
27910095a784SJeff Roberson 				mem += keg->uk_pgoff;
27920095a784SJeff Roberson 				slab = (uma_slab_t)mem;
27930095a784SJeff Roberson 			}
27940095a784SJeff Roberson 		} else {
27950095a784SJeff Roberson 			slab = vtoslab((vm_offset_t)item);
27960095a784SJeff Roberson 			if (slab->us_keg != keg) {
27970095a784SJeff Roberson 				KEG_UNLOCK(keg);
27980095a784SJeff Roberson 				keg = slab->us_keg;
27990095a784SJeff Roberson 				KEG_LOCK(keg);
28000095a784SJeff Roberson 			}
28010095a784SJeff Roberson 		}
28020095a784SJeff Roberson 		slab_free_item(keg, slab, item);
2803099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2804e20a199fSJeff Roberson 			if (keg->uk_pages < keg->uk_maxpages) {
2805099a0e58SBosko Milekic 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2806e20a199fSJeff Roberson 				clearfull = 1;
2807e20a199fSJeff Roberson 			}
2808af7f9b97SJeff Roberson 
280977380291SMohan Srinivasan 			/*
2810ef72505eSJeff Roberson 			 * We can handle one more allocation. Since we're
2811ef72505eSJeff Roberson 			 * clearing ZFLAG_FULL, wake up all procs blocked
2812ef72505eSJeff Roberson 			 * on pages. This should be uncommon, so keeping this
2813ef72505eSJeff Roberson 			 * simple for now (rather than adding count of blocked
281477380291SMohan Srinivasan 			 * threads etc).
281577380291SMohan Srinivasan 			 */
281677380291SMohan Srinivasan 			wakeup(keg);
2817af7f9b97SJeff Roberson 		}
28180095a784SJeff Roberson 	}
2819af526374SJeff Roberson 	KEG_UNLOCK(keg);
28200095a784SJeff Roberson 	if (clearfull) {
2821af526374SJeff Roberson 		ZONE_LOCK(zone);
2822e20a199fSJeff Roberson 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2823e20a199fSJeff Roberson 		wakeup(zone);
2824605cbd6aSJeff Roberson 		ZONE_UNLOCK(zone);
2825af526374SJeff Roberson 	}
2826ef72505eSJeff Roberson 
28278355f576SJeff Roberson }
28288355f576SJeff Roberson 
28290095a784SJeff Roberson /*
28300095a784SJeff Roberson  * Frees a single item to any zone.
28310095a784SJeff Roberson  *
28320095a784SJeff Roberson  * Arguments:
28330095a784SJeff Roberson  *	zone   The zone to free to
28340095a784SJeff Roberson  *	item   The item we're freeing
28350095a784SJeff Roberson  *	udata  User supplied data for the dtor
28360095a784SJeff Roberson  *	skip   Skip dtors and finis
28370095a784SJeff Roberson  */
28380095a784SJeff Roberson static void
28390095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
28400095a784SJeff Roberson {
28410095a784SJeff Roberson 
28420095a784SJeff Roberson #ifdef INVARIANTS
28430095a784SJeff Roberson 	if (skip == SKIP_NONE) {
28440095a784SJeff Roberson 		if (zone->uz_flags & UMA_ZONE_MALLOC)
28450095a784SJeff Roberson 			uma_dbg_free(zone, udata, item);
28460095a784SJeff Roberson 		else
28470095a784SJeff Roberson 			uma_dbg_free(zone, NULL, item);
28480095a784SJeff Roberson 	}
28490095a784SJeff Roberson #endif
28500095a784SJeff Roberson 	if (skip < SKIP_DTOR && zone->uz_dtor)
28510095a784SJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
28520095a784SJeff Roberson 
28530095a784SJeff Roberson 	if (skip < SKIP_FINI && zone->uz_fini)
28540095a784SJeff Roberson 		zone->uz_fini(item, zone->uz_size);
28550095a784SJeff Roberson 
28560095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, 1);
28570095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, &item, 1);
28580095a784SJeff Roberson }
28590095a784SJeff Roberson 
28608355f576SJeff Roberson /* See uma.h */
28611c6cae97SLawrence Stewart int
2862736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
2863736ee590SJeff Roberson {
2864099a0e58SBosko Milekic 	uma_keg_t keg;
2865099a0e58SBosko Milekic 
2866e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
28670095a784SJeff Roberson 	if (keg == NULL)
28680095a784SJeff Roberson 		return (0);
2869af526374SJeff Roberson 	KEG_LOCK(keg);
2870e20a199fSJeff Roberson 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2871099a0e58SBosko Milekic 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2872e20a199fSJeff Roberson 		keg->uk_maxpages += keg->uk_ppera;
287357223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2874af526374SJeff Roberson 	KEG_UNLOCK(keg);
28751c6cae97SLawrence Stewart 
28761c6cae97SLawrence Stewart 	return (nitems);
2877736ee590SJeff Roberson }
2878736ee590SJeff Roberson 
2879736ee590SJeff Roberson /* See uma.h */
2880e49471b0SAndre Oppermann int
2881e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone)
2882e49471b0SAndre Oppermann {
2883e49471b0SAndre Oppermann 	int nitems;
2884e49471b0SAndre Oppermann 	uma_keg_t keg;
2885e49471b0SAndre Oppermann 
2886e49471b0SAndre Oppermann 	keg = zone_first_keg(zone);
28870095a784SJeff Roberson 	if (keg == NULL)
28880095a784SJeff Roberson 		return (0);
2889af526374SJeff Roberson 	KEG_LOCK(keg);
289057223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2891af526374SJeff Roberson 	KEG_UNLOCK(keg);
2892e49471b0SAndre Oppermann 
2893e49471b0SAndre Oppermann 	return (nitems);
2894e49471b0SAndre Oppermann }
2895e49471b0SAndre Oppermann 
2896e49471b0SAndre Oppermann /* See uma.h */
28972f891cd5SPawel Jakub Dawidek void
28982f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning)
28992f891cd5SPawel Jakub Dawidek {
29002f891cd5SPawel Jakub Dawidek 
29012f891cd5SPawel Jakub Dawidek 	ZONE_LOCK(zone);
29022f891cd5SPawel Jakub Dawidek 	zone->uz_warning = warning;
29032f891cd5SPawel Jakub Dawidek 	ZONE_UNLOCK(zone);
29042f891cd5SPawel Jakub Dawidek }
29052f891cd5SPawel Jakub Dawidek 
29062f891cd5SPawel Jakub Dawidek /* See uma.h */
290754503a13SJonathan T. Looney void
290854503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
290954503a13SJonathan T. Looney {
291054503a13SJonathan T. Looney 
291154503a13SJonathan T. Looney 	ZONE_LOCK(zone);
2912e60b2fcbSGleb Smirnoff 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
291354503a13SJonathan T. Looney 	ZONE_UNLOCK(zone);
291454503a13SJonathan T. Looney }
291554503a13SJonathan T. Looney 
291654503a13SJonathan T. Looney /* See uma.h */
2917c4ae7908SLawrence Stewart int
2918c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone)
2919c4ae7908SLawrence Stewart {
2920c4ae7908SLawrence Stewart 	int64_t nitems;
2921c4ae7908SLawrence Stewart 	u_int i;
2922c4ae7908SLawrence Stewart 
2923c4ae7908SLawrence Stewart 	ZONE_LOCK(zone);
2924c4ae7908SLawrence Stewart 	nitems = zone->uz_allocs - zone->uz_frees;
2925c4ae7908SLawrence Stewart 	CPU_FOREACH(i) {
2926c4ae7908SLawrence Stewart 		/*
2927c4ae7908SLawrence Stewart 		 * See the comment in sysctl_vm_zone_stats() regarding the
2928c4ae7908SLawrence Stewart 		 * safety of accessing the per-cpu caches. With the zone lock
2929c4ae7908SLawrence Stewart 		 * held, it is safe, but can potentially result in stale data.
2930c4ae7908SLawrence Stewart 		 */
2931c4ae7908SLawrence Stewart 		nitems += zone->uz_cpu[i].uc_allocs -
2932c4ae7908SLawrence Stewart 		    zone->uz_cpu[i].uc_frees;
2933c4ae7908SLawrence Stewart 	}
2934c4ae7908SLawrence Stewart 	ZONE_UNLOCK(zone);
2935c4ae7908SLawrence Stewart 
2936c4ae7908SLawrence Stewart 	return (nitems < 0 ? 0 : nitems);
2937c4ae7908SLawrence Stewart }
2938c4ae7908SLawrence Stewart 
2939c4ae7908SLawrence Stewart /* See uma.h */
2940736ee590SJeff Roberson void
2941099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2942099a0e58SBosko Milekic {
2943e20a199fSJeff Roberson 	uma_keg_t keg;
2944e20a199fSJeff Roberson 
2945e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
29460095a784SJeff Roberson 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2947af526374SJeff Roberson 	KEG_LOCK(keg);
2948e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
2949099a0e58SBosko Milekic 	    ("uma_zone_set_init on non-empty keg"));
2950e20a199fSJeff Roberson 	keg->uk_init = uminit;
2951af526374SJeff Roberson 	KEG_UNLOCK(keg);
2952099a0e58SBosko Milekic }
2953099a0e58SBosko Milekic 
2954099a0e58SBosko Milekic /* See uma.h */
2955099a0e58SBosko Milekic void
2956099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2957099a0e58SBosko Milekic {
2958e20a199fSJeff Roberson 	uma_keg_t keg;
2959e20a199fSJeff Roberson 
2960e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
29611d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
2962af526374SJeff Roberson 	KEG_LOCK(keg);
2963e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
2964099a0e58SBosko Milekic 	    ("uma_zone_set_fini on non-empty keg"));
2965e20a199fSJeff Roberson 	keg->uk_fini = fini;
2966af526374SJeff Roberson 	KEG_UNLOCK(keg);
2967099a0e58SBosko Milekic }
2968099a0e58SBosko Milekic 
2969099a0e58SBosko Milekic /* See uma.h */
2970099a0e58SBosko Milekic void
2971099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2972099a0e58SBosko Milekic {
2973af526374SJeff Roberson 
2974099a0e58SBosko Milekic 	ZONE_LOCK(zone);
2975e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2976099a0e58SBosko Milekic 	    ("uma_zone_set_zinit on non-empty keg"));
2977099a0e58SBosko Milekic 	zone->uz_init = zinit;
2978099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
2979099a0e58SBosko Milekic }
2980099a0e58SBosko Milekic 
2981099a0e58SBosko Milekic /* See uma.h */
2982099a0e58SBosko Milekic void
2983099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2984099a0e58SBosko Milekic {
2985af526374SJeff Roberson 
2986099a0e58SBosko Milekic 	ZONE_LOCK(zone);
2987e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2988099a0e58SBosko Milekic 	    ("uma_zone_set_zfini on non-empty keg"));
2989099a0e58SBosko Milekic 	zone->uz_fini = zfini;
2990099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
2991099a0e58SBosko Milekic }
2992099a0e58SBosko Milekic 
2993099a0e58SBosko Milekic /* See uma.h */
2994b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */
2995099a0e58SBosko Milekic void
29968355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
29978355f576SJeff Roberson {
29980095a784SJeff Roberson 	uma_keg_t keg;
2999e20a199fSJeff Roberson 
30000095a784SJeff Roberson 	keg = zone_first_keg(zone);
30011d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3002af526374SJeff Roberson 	KEG_LOCK(keg);
30030095a784SJeff Roberson 	keg->uk_freef = freef;
3004af526374SJeff Roberson 	KEG_UNLOCK(keg);
30058355f576SJeff Roberson }
30068355f576SJeff Roberson 
30078355f576SJeff Roberson /* See uma.h */
3008b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */
30098355f576SJeff Roberson void
30108355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
30118355f576SJeff Roberson {
3012e20a199fSJeff Roberson 	uma_keg_t keg;
3013e20a199fSJeff Roberson 
3014e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
3015af526374SJeff Roberson 	KEG_LOCK(keg);
3016e20a199fSJeff Roberson 	keg->uk_allocf = allocf;
3017af526374SJeff Roberson 	KEG_UNLOCK(keg);
30188355f576SJeff Roberson }
30198355f576SJeff Roberson 
30208355f576SJeff Roberson /* See uma.h */
30216fd34d6fSJeff Roberson void
30226fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items)
30236fd34d6fSJeff Roberson {
30246fd34d6fSJeff Roberson 	uma_keg_t keg;
30256fd34d6fSJeff Roberson 
30266fd34d6fSJeff Roberson 	keg = zone_first_keg(zone);
30276fd34d6fSJeff Roberson 	if (keg == NULL)
30286fd34d6fSJeff Roberson 		return;
30296fd34d6fSJeff Roberson 	KEG_LOCK(keg);
30306fd34d6fSJeff Roberson 	keg->uk_reserve = items;
30316fd34d6fSJeff Roberson 	KEG_UNLOCK(keg);
30326fd34d6fSJeff Roberson 
30336fd34d6fSJeff Roberson 	return;
30346fd34d6fSJeff Roberson }
30356fd34d6fSJeff Roberson 
30366fd34d6fSJeff Roberson /* See uma.h */
30378355f576SJeff Roberson int
3038a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count)
30398355f576SJeff Roberson {
3040099a0e58SBosko Milekic 	uma_keg_t keg;
30418355f576SJeff Roberson 	vm_offset_t kva;
30429ba30bcbSZbigniew Bodek 	u_int pages;
30438355f576SJeff Roberson 
3044e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
30450095a784SJeff Roberson 	if (keg == NULL)
30460095a784SJeff Roberson 		return (0);
3047099a0e58SBosko Milekic 	pages = count / keg->uk_ipers;
30488355f576SJeff Roberson 
3049099a0e58SBosko Milekic 	if (pages * keg->uk_ipers < count)
30508355f576SJeff Roberson 		pages++;
305157223e99SAndriy Gapon 	pages *= keg->uk_ppera;
3052a553d4b8SJeff Roberson 
3053a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3054a4915c21SAttilio Rao 	if (keg->uk_ppera > 1) {
3055a4915c21SAttilio Rao #else
3056a4915c21SAttilio Rao 	if (1) {
3057a4915c21SAttilio Rao #endif
305857223e99SAndriy Gapon 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3059d1f42ac2SAlan Cox 		if (kva == 0)
30608355f576SJeff Roberson 			return (0);
3061a4915c21SAttilio Rao 	} else
3062a4915c21SAttilio Rao 		kva = 0;
3063af526374SJeff Roberson 	KEG_LOCK(keg);
3064099a0e58SBosko Milekic 	keg->uk_kva = kva;
3065a4915c21SAttilio Rao 	keg->uk_offset = 0;
3066099a0e58SBosko Milekic 	keg->uk_maxpages = pages;
3067a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3068a4915c21SAttilio Rao 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3069a4915c21SAttilio Rao #else
3070a4915c21SAttilio Rao 	keg->uk_allocf = noobj_alloc;
3071a4915c21SAttilio Rao #endif
30726fd34d6fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_NOFREE;
3073af526374SJeff Roberson 	KEG_UNLOCK(keg);
3074af526374SJeff Roberson 
30758355f576SJeff Roberson 	return (1);
30768355f576SJeff Roberson }
30778355f576SJeff Roberson 
30788355f576SJeff Roberson /* See uma.h */
30798355f576SJeff Roberson void
30808355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
30818355f576SJeff Roberson {
30828355f576SJeff Roberson 	int slabs;
30838355f576SJeff Roberson 	uma_slab_t slab;
3084099a0e58SBosko Milekic 	uma_keg_t keg;
30858355f576SJeff Roberson 
3086e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
30870095a784SJeff Roberson 	if (keg == NULL)
30880095a784SJeff Roberson 		return;
3089af526374SJeff Roberson 	KEG_LOCK(keg);
3090099a0e58SBosko Milekic 	slabs = items / keg->uk_ipers;
3091099a0e58SBosko Milekic 	if (slabs * keg->uk_ipers < items)
30928355f576SJeff Roberson 		slabs++;
30938355f576SJeff Roberson 	while (slabs > 0) {
3094e20a199fSJeff Roberson 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3095e20a199fSJeff Roberson 		if (slab == NULL)
3096e20a199fSJeff Roberson 			break;
3097e20a199fSJeff Roberson 		MPASS(slab->us_keg == keg);
3098099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
30998355f576SJeff Roberson 		slabs--;
31008355f576SJeff Roberson 	}
3101af526374SJeff Roberson 	KEG_UNLOCK(keg);
31028355f576SJeff Roberson }
31038355f576SJeff Roberson 
31048355f576SJeff Roberson /* See uma.h */
310544ec2b63SKonstantin Belousov static void
310644ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger)
31078355f576SJeff Roberson {
310844ec2b63SKonstantin Belousov 
31091431a748SGleb Smirnoff 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
311044ec2b63SKonstantin Belousov 	sx_assert(&uma_drain_lock, SA_XLOCKED);
311186bbae32SJeff Roberson 	bucket_enable();
31128355f576SJeff Roberson 	zone_foreach(zone_drain);
311344ec2b63SKonstantin Belousov 	if (vm_page_count_min() || kmem_danger) {
3114a2de44abSAlexander Motin 		cache_drain_safe(NULL);
3115a2de44abSAlexander Motin 		zone_foreach(zone_drain);
3116a2de44abSAlexander Motin 	}
31178355f576SJeff Roberson 	/*
31188355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
31198355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
31208355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
31218355f576SJeff Roberson 	 */
31229643769aSJeff Roberson 	zone_drain(slabzone);
3123cae33c14SJeff Roberson 	bucket_zone_drain();
312444ec2b63SKonstantin Belousov }
312544ec2b63SKonstantin Belousov 
312644ec2b63SKonstantin Belousov void
312744ec2b63SKonstantin Belousov uma_reclaim(void)
312844ec2b63SKonstantin Belousov {
312944ec2b63SKonstantin Belousov 
313044ec2b63SKonstantin Belousov 	sx_xlock(&uma_drain_lock);
313144ec2b63SKonstantin Belousov 	uma_reclaim_locked(false);
313295c4bf75SKonstantin Belousov 	sx_xunlock(&uma_drain_lock);
31338355f576SJeff Roberson }
31348355f576SJeff Roberson 
313544ec2b63SKonstantin Belousov static int uma_reclaim_needed;
313644ec2b63SKonstantin Belousov 
313744ec2b63SKonstantin Belousov void
313844ec2b63SKonstantin Belousov uma_reclaim_wakeup(void)
313944ec2b63SKonstantin Belousov {
314044ec2b63SKonstantin Belousov 
314144ec2b63SKonstantin Belousov 	uma_reclaim_needed = 1;
314244ec2b63SKonstantin Belousov 	wakeup(&uma_reclaim_needed);
314344ec2b63SKonstantin Belousov }
314444ec2b63SKonstantin Belousov 
314544ec2b63SKonstantin Belousov void
314644ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused)
314744ec2b63SKonstantin Belousov {
314844ec2b63SKonstantin Belousov 
314944ec2b63SKonstantin Belousov 	sx_xlock(&uma_drain_lock);
315044ec2b63SKonstantin Belousov 	for (;;) {
315144ec2b63SKonstantin Belousov 		sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
315244ec2b63SKonstantin Belousov 		    "umarcl", 0);
315344ec2b63SKonstantin Belousov 		if (uma_reclaim_needed) {
315444ec2b63SKonstantin Belousov 			uma_reclaim_needed = 0;
31559b43bc27SAndriy Gapon 			sx_xunlock(&uma_drain_lock);
31569b43bc27SAndriy Gapon 			EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
31579b43bc27SAndriy Gapon 			sx_xlock(&uma_drain_lock);
315844ec2b63SKonstantin Belousov 			uma_reclaim_locked(true);
315944ec2b63SKonstantin Belousov 		}
316044ec2b63SKonstantin Belousov 	}
316144ec2b63SKonstantin Belousov }
316244ec2b63SKonstantin Belousov 
3163663b416fSJohn Baldwin /* See uma.h */
3164663b416fSJohn Baldwin int
3165663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone)
3166663b416fSJohn Baldwin {
3167663b416fSJohn Baldwin 	int full;
3168663b416fSJohn Baldwin 
3169663b416fSJohn Baldwin 	ZONE_LOCK(zone);
3170e20a199fSJeff Roberson 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3171663b416fSJohn Baldwin 	ZONE_UNLOCK(zone);
3172663b416fSJohn Baldwin 	return (full);
3173663b416fSJohn Baldwin }
3174663b416fSJohn Baldwin 
31756c125b8dSMohan Srinivasan int
31766c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone)
31776c125b8dSMohan Srinivasan {
3178e20a199fSJeff Roberson 	return (zone->uz_flags & UMA_ZFLAG_FULL);
31796c125b8dSMohan Srinivasan }
31806c125b8dSMohan Srinivasan 
31818355f576SJeff Roberson void *
3182f2c2231eSRyan Stone uma_large_malloc(vm_size_t size, int wait)
31838355f576SJeff Roberson {
31848355f576SJeff Roberson 	void *mem;
31858355f576SJeff Roberson 	uma_slab_t slab;
318685dcf349SGleb Smirnoff 	uint8_t flags;
31878355f576SJeff Roberson 
3188e20a199fSJeff Roberson 	slab = zone_alloc_item(slabzone, NULL, wait);
31898355f576SJeff Roberson 	if (slab == NULL)
31908355f576SJeff Roberson 		return (NULL);
31918355f576SJeff Roberson 	mem = page_alloc(NULL, size, &flags, wait);
31928355f576SJeff Roberson 	if (mem) {
319399571dc3SJeff Roberson 		vsetslab((vm_offset_t)mem, slab);
31948355f576SJeff Roberson 		slab->us_data = mem;
31958355f576SJeff Roberson 		slab->us_flags = flags | UMA_SLAB_MALLOC;
31968355f576SJeff Roberson 		slab->us_size = size;
31978355f576SJeff Roberson 	} else {
31980095a784SJeff Roberson 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
31998355f576SJeff Roberson 	}
32008355f576SJeff Roberson 
32018355f576SJeff Roberson 	return (mem);
32028355f576SJeff Roberson }
32038355f576SJeff Roberson 
32048355f576SJeff Roberson void
32058355f576SJeff Roberson uma_large_free(uma_slab_t slab)
32068355f576SJeff Roberson {
3207c325e866SKonstantin Belousov 
32088355f576SJeff Roberson 	page_free(slab->us_data, slab->us_size, slab->us_flags);
32090095a784SJeff Roberson 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
32108355f576SJeff Roberson }
32118355f576SJeff Roberson 
321248343a2fSGleb Smirnoff static void
321348343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone)
321448343a2fSGleb Smirnoff {
321596c85efbSNathan Whitehorn 	int i;
321648343a2fSGleb Smirnoff 
321748343a2fSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_PCPU) {
321896c85efbSNathan Whitehorn 		CPU_FOREACH(i)
321948343a2fSGleb Smirnoff 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
322048343a2fSGleb Smirnoff 	} else
322148343a2fSGleb Smirnoff 		bzero(item, zone->uz_size);
322248343a2fSGleb Smirnoff }
322348343a2fSGleb Smirnoff 
32248355f576SJeff Roberson void
32258355f576SJeff Roberson uma_print_stats(void)
32268355f576SJeff Roberson {
32278355f576SJeff Roberson 	zone_foreach(uma_print_zone);
32288355f576SJeff Roberson }
32298355f576SJeff Roberson 
3230504d5de3SJeff Roberson static void
3231504d5de3SJeff Roberson slab_print(uma_slab_t slab)
3232504d5de3SJeff Roberson {
3233ef72505eSJeff Roberson 	printf("slab: keg %p, data %p, freecount %d\n",
3234ef72505eSJeff Roberson 		slab->us_keg, slab->us_data, slab->us_freecount);
3235504d5de3SJeff Roberson }
3236504d5de3SJeff Roberson 
3237504d5de3SJeff Roberson static void
3238504d5de3SJeff Roberson cache_print(uma_cache_t cache)
3239504d5de3SJeff Roberson {
3240504d5de3SJeff Roberson 	printf("alloc: %p(%d), free: %p(%d)\n",
3241504d5de3SJeff Roberson 		cache->uc_allocbucket,
3242504d5de3SJeff Roberson 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3243504d5de3SJeff Roberson 		cache->uc_freebucket,
3244504d5de3SJeff Roberson 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3245504d5de3SJeff Roberson }
3246504d5de3SJeff Roberson 
3247e20a199fSJeff Roberson static void
3248e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg)
32498355f576SJeff Roberson {
3250504d5de3SJeff Roberson 	uma_slab_t slab;
3251504d5de3SJeff Roberson 
32520b80c1e4SEitan Adler 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3253e20a199fSJeff Roberson 	    "out %d free %d limit %d\n",
3254e20a199fSJeff Roberson 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3255099a0e58SBosko Milekic 	    keg->uk_ipers, keg->uk_ppera,
325657223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
325757223e99SAndriy Gapon 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3258504d5de3SJeff Roberson 	printf("Part slabs:\n");
3259099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3260504d5de3SJeff Roberson 		slab_print(slab);
3261504d5de3SJeff Roberson 	printf("Free slabs:\n");
3262099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3263504d5de3SJeff Roberson 		slab_print(slab);
3264504d5de3SJeff Roberson 	printf("Full slabs:\n");
3265099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3266504d5de3SJeff Roberson 		slab_print(slab);
3267e20a199fSJeff Roberson }
3268e20a199fSJeff Roberson 
3269e20a199fSJeff Roberson void
3270e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone)
3271e20a199fSJeff Roberson {
3272e20a199fSJeff Roberson 	uma_cache_t cache;
3273e20a199fSJeff Roberson 	uma_klink_t kl;
3274e20a199fSJeff Roberson 	int i;
3275e20a199fSJeff Roberson 
32760b80c1e4SEitan Adler 	printf("zone: %s(%p) size %d flags %#x\n",
3277e20a199fSJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3278e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3279e20a199fSJeff Roberson 		uma_print_keg(kl->kl_keg);
32803aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3281504d5de3SJeff Roberson 		cache = &zone->uz_cpu[i];
3282504d5de3SJeff Roberson 		printf("CPU %d Cache:\n", i);
3283504d5de3SJeff Roberson 		cache_print(cache);
3284504d5de3SJeff Roberson 	}
32858355f576SJeff Roberson }
32868355f576SJeff Roberson 
3287a0d4b0aeSRobert Watson #ifdef DDB
32888355f576SJeff Roberson /*
32897a52a97eSRobert Watson  * Generate statistics across both the zone and its per-cpu cache's.  Return
32907a52a97eSRobert Watson  * desired statistics if the pointer is non-NULL for that statistic.
32917a52a97eSRobert Watson  *
32927a52a97eSRobert Watson  * Note: does not update the zone statistics, as it can't safely clear the
32937a52a97eSRobert Watson  * per-CPU cache statistic.
32947a52a97eSRobert Watson  *
32957a52a97eSRobert Watson  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
32967a52a97eSRobert Watson  * safe from off-CPU; we should modify the caches to track this information
32977a52a97eSRobert Watson  * directly so that we don't have to.
32987a52a97eSRobert Watson  */
32997a52a97eSRobert Watson static void
330085dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
330185dcf349SGleb Smirnoff     uint64_t *freesp, uint64_t *sleepsp)
33027a52a97eSRobert Watson {
33037a52a97eSRobert Watson 	uma_cache_t cache;
330485dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
33057a52a97eSRobert Watson 	int cachefree, cpu;
33067a52a97eSRobert Watson 
3307bf965959SSean Bruno 	allocs = frees = sleeps = 0;
33087a52a97eSRobert Watson 	cachefree = 0;
33093aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
33107a52a97eSRobert Watson 		cache = &z->uz_cpu[cpu];
33117a52a97eSRobert Watson 		if (cache->uc_allocbucket != NULL)
33127a52a97eSRobert Watson 			cachefree += cache->uc_allocbucket->ub_cnt;
33137a52a97eSRobert Watson 		if (cache->uc_freebucket != NULL)
33147a52a97eSRobert Watson 			cachefree += cache->uc_freebucket->ub_cnt;
33157a52a97eSRobert Watson 		allocs += cache->uc_allocs;
33167a52a97eSRobert Watson 		frees += cache->uc_frees;
33177a52a97eSRobert Watson 	}
33187a52a97eSRobert Watson 	allocs += z->uz_allocs;
33197a52a97eSRobert Watson 	frees += z->uz_frees;
3320bf965959SSean Bruno 	sleeps += z->uz_sleeps;
33217a52a97eSRobert Watson 	if (cachefreep != NULL)
33227a52a97eSRobert Watson 		*cachefreep = cachefree;
33237a52a97eSRobert Watson 	if (allocsp != NULL)
33247a52a97eSRobert Watson 		*allocsp = allocs;
33257a52a97eSRobert Watson 	if (freesp != NULL)
33267a52a97eSRobert Watson 		*freesp = frees;
3327bf965959SSean Bruno 	if (sleepsp != NULL)
3328bf965959SSean Bruno 		*sleepsp = sleeps;
33297a52a97eSRobert Watson }
3330a0d4b0aeSRobert Watson #endif /* DDB */
33317a52a97eSRobert Watson 
33327a52a97eSRobert Watson static int
33337a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
33347a52a97eSRobert Watson {
33357a52a97eSRobert Watson 	uma_keg_t kz;
33367a52a97eSRobert Watson 	uma_zone_t z;
33377a52a97eSRobert Watson 	int count;
33387a52a97eSRobert Watson 
33397a52a97eSRobert Watson 	count = 0;
3340111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
33417a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
33427a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
33437a52a97eSRobert Watson 			count++;
33447a52a97eSRobert Watson 	}
3345111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
33467a52a97eSRobert Watson 	return (sysctl_handle_int(oidp, &count, 0, req));
33477a52a97eSRobert Watson }
33487a52a97eSRobert Watson 
33497a52a97eSRobert Watson static int
33507a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
33517a52a97eSRobert Watson {
33527a52a97eSRobert Watson 	struct uma_stream_header ush;
33537a52a97eSRobert Watson 	struct uma_type_header uth;
33547a52a97eSRobert Watson 	struct uma_percpu_stat ups;
33557a52a97eSRobert Watson 	uma_bucket_t bucket;
33567a52a97eSRobert Watson 	struct sbuf sbuf;
33577a52a97eSRobert Watson 	uma_cache_t cache;
3358e20a199fSJeff Roberson 	uma_klink_t kl;
33597a52a97eSRobert Watson 	uma_keg_t kz;
33607a52a97eSRobert Watson 	uma_zone_t z;
3361e20a199fSJeff Roberson 	uma_keg_t k;
33624e657159SMatthew D Fleming 	int count, error, i;
33637a52a97eSRobert Watson 
336400f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
336500f0e671SMatthew D Fleming 	if (error != 0)
336600f0e671SMatthew D Fleming 		return (error);
33674e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
33681eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
33694e657159SMatthew D Fleming 
3370404a593eSMatthew D Fleming 	count = 0;
3371111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
33727a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
33737a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
33747a52a97eSRobert Watson 			count++;
33757a52a97eSRobert Watson 	}
33767a52a97eSRobert Watson 
33777a52a97eSRobert Watson 	/*
33787a52a97eSRobert Watson 	 * Insert stream header.
33797a52a97eSRobert Watson 	 */
33807a52a97eSRobert Watson 	bzero(&ush, sizeof(ush));
33817a52a97eSRobert Watson 	ush.ush_version = UMA_STREAM_VERSION;
3382ab3a57c0SRobert Watson 	ush.ush_maxcpus = (mp_maxid + 1);
33837a52a97eSRobert Watson 	ush.ush_count = count;
33844e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
33857a52a97eSRobert Watson 
33867a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
33877a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
33887a52a97eSRobert Watson 			bzero(&uth, sizeof(uth));
33897a52a97eSRobert Watson 			ZONE_LOCK(z);
3390cbbb4a00SRobert Watson 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
33917a52a97eSRobert Watson 			uth.uth_align = kz->uk_align;
33927a52a97eSRobert Watson 			uth.uth_size = kz->uk_size;
33937a52a97eSRobert Watson 			uth.uth_rsize = kz->uk_rsize;
3394e20a199fSJeff Roberson 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3395e20a199fSJeff Roberson 				k = kl->kl_keg;
3396e20a199fSJeff Roberson 				uth.uth_maxpages += k->uk_maxpages;
3397e20a199fSJeff Roberson 				uth.uth_pages += k->uk_pages;
3398e20a199fSJeff Roberson 				uth.uth_keg_free += k->uk_free;
3399e20a199fSJeff Roberson 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3400e20a199fSJeff Roberson 				    * k->uk_ipers;
3401e20a199fSJeff Roberson 			}
3402cbbb4a00SRobert Watson 
3403cbbb4a00SRobert Watson 			/*
3404cbbb4a00SRobert Watson 			 * A zone is secondary is it is not the first entry
3405cbbb4a00SRobert Watson 			 * on the keg's zone list.
3406cbbb4a00SRobert Watson 			 */
3407e20a199fSJeff Roberson 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3408cbbb4a00SRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z))
3409cbbb4a00SRobert Watson 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3410cbbb4a00SRobert Watson 
3411fc03d22bSJeff Roberson 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
34127a52a97eSRobert Watson 				uth.uth_zone_free += bucket->ub_cnt;
34137a52a97eSRobert Watson 			uth.uth_allocs = z->uz_allocs;
34147a52a97eSRobert Watson 			uth.uth_frees = z->uz_frees;
34152019094aSRobert Watson 			uth.uth_fails = z->uz_fails;
3416bf965959SSean Bruno 			uth.uth_sleeps = z->uz_sleeps;
34174e657159SMatthew D Fleming 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
34187a52a97eSRobert Watson 			/*
34192450bbb8SRobert Watson 			 * While it is not normally safe to access the cache
34202450bbb8SRobert Watson 			 * bucket pointers while not on the CPU that owns the
34212450bbb8SRobert Watson 			 * cache, we only allow the pointers to be exchanged
34222450bbb8SRobert Watson 			 * without the zone lock held, not invalidated, so
34232450bbb8SRobert Watson 			 * accept the possible race associated with bucket
34242450bbb8SRobert Watson 			 * exchange during monitoring.
34257a52a97eSRobert Watson 			 */
3426ab3a57c0SRobert Watson 			for (i = 0; i < (mp_maxid + 1); i++) {
34277a52a97eSRobert Watson 				bzero(&ups, sizeof(ups));
34287a52a97eSRobert Watson 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
34297a52a97eSRobert Watson 					goto skip;
3430082dc776SRobert Watson 				if (CPU_ABSENT(i))
3431082dc776SRobert Watson 					goto skip;
34327a52a97eSRobert Watson 				cache = &z->uz_cpu[i];
34337a52a97eSRobert Watson 				if (cache->uc_allocbucket != NULL)
34347a52a97eSRobert Watson 					ups.ups_cache_free +=
34357a52a97eSRobert Watson 					    cache->uc_allocbucket->ub_cnt;
34367a52a97eSRobert Watson 				if (cache->uc_freebucket != NULL)
34377a52a97eSRobert Watson 					ups.ups_cache_free +=
34387a52a97eSRobert Watson 					    cache->uc_freebucket->ub_cnt;
34397a52a97eSRobert Watson 				ups.ups_allocs = cache->uc_allocs;
34407a52a97eSRobert Watson 				ups.ups_frees = cache->uc_frees;
34417a52a97eSRobert Watson skip:
34424e657159SMatthew D Fleming 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
34437a52a97eSRobert Watson 			}
34442450bbb8SRobert Watson 			ZONE_UNLOCK(z);
34457a52a97eSRobert Watson 		}
34467a52a97eSRobert Watson 	}
3447111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
34484e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
34494e657159SMatthew D Fleming 	sbuf_delete(&sbuf);
34507a52a97eSRobert Watson 	return (error);
34517a52a97eSRobert Watson }
345248c5777eSRobert Watson 
34530a5a3ccbSGleb Smirnoff int
34540a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
34550a5a3ccbSGleb Smirnoff {
34560a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
345716be9f54SGleb Smirnoff 	int error, max;
34580a5a3ccbSGleb Smirnoff 
345916be9f54SGleb Smirnoff 	max = uma_zone_get_max(zone);
34600a5a3ccbSGleb Smirnoff 	error = sysctl_handle_int(oidp, &max, 0, req);
34610a5a3ccbSGleb Smirnoff 	if (error || !req->newptr)
34620a5a3ccbSGleb Smirnoff 		return (error);
34630a5a3ccbSGleb Smirnoff 
34640a5a3ccbSGleb Smirnoff 	uma_zone_set_max(zone, max);
34650a5a3ccbSGleb Smirnoff 
34660a5a3ccbSGleb Smirnoff 	return (0);
34670a5a3ccbSGleb Smirnoff }
34680a5a3ccbSGleb Smirnoff 
34690a5a3ccbSGleb Smirnoff int
34700a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
34710a5a3ccbSGleb Smirnoff {
34720a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
34730a5a3ccbSGleb Smirnoff 	int cur;
34740a5a3ccbSGleb Smirnoff 
34750a5a3ccbSGleb Smirnoff 	cur = uma_zone_get_cur(zone);
34760a5a3ccbSGleb Smirnoff 	return (sysctl_handle_int(oidp, &cur, 0, req));
34770a5a3ccbSGleb Smirnoff }
34780a5a3ccbSGleb Smirnoff 
34799542ea7bSGleb Smirnoff #ifdef INVARIANTS
34809542ea7bSGleb Smirnoff static uma_slab_t
34819542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item)
34829542ea7bSGleb Smirnoff {
34839542ea7bSGleb Smirnoff 	uma_slab_t slab;
34849542ea7bSGleb Smirnoff 	uma_keg_t keg;
34859542ea7bSGleb Smirnoff 	uint8_t *mem;
34869542ea7bSGleb Smirnoff 
34879542ea7bSGleb Smirnoff 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
34889542ea7bSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
34899542ea7bSGleb Smirnoff 		slab = vtoslab((vm_offset_t)mem);
34909542ea7bSGleb Smirnoff 	} else {
34919542ea7bSGleb Smirnoff 		/*
34929542ea7bSGleb Smirnoff 		 * It is safe to return the slab here even though the
34939542ea7bSGleb Smirnoff 		 * zone is unlocked because the item's allocation state
34949542ea7bSGleb Smirnoff 		 * essentially holds a reference.
34959542ea7bSGleb Smirnoff 		 */
34969542ea7bSGleb Smirnoff 		ZONE_LOCK(zone);
34979542ea7bSGleb Smirnoff 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
34989542ea7bSGleb Smirnoff 		if (keg->uk_flags & UMA_ZONE_HASH)
34999542ea7bSGleb Smirnoff 			slab = hash_sfind(&keg->uk_hash, mem);
35009542ea7bSGleb Smirnoff 		else
35019542ea7bSGleb Smirnoff 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
35029542ea7bSGleb Smirnoff 		ZONE_UNLOCK(zone);
35039542ea7bSGleb Smirnoff 	}
35049542ea7bSGleb Smirnoff 
35059542ea7bSGleb Smirnoff 	return (slab);
35069542ea7bSGleb Smirnoff }
35079542ea7bSGleb Smirnoff 
35089542ea7bSGleb Smirnoff /*
35099542ea7bSGleb Smirnoff  * Set up the slab's freei data such that uma_dbg_free can function.
35109542ea7bSGleb Smirnoff  *
35119542ea7bSGleb Smirnoff  */
35129542ea7bSGleb Smirnoff static void
35139542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
35149542ea7bSGleb Smirnoff {
35159542ea7bSGleb Smirnoff 	uma_keg_t keg;
35169542ea7bSGleb Smirnoff 	int freei;
35179542ea7bSGleb Smirnoff 
35189542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
35199542ea7bSGleb Smirnoff 		return;
35209542ea7bSGleb Smirnoff 	if (slab == NULL) {
35219542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
35229542ea7bSGleb Smirnoff 		if (slab == NULL)
35239542ea7bSGleb Smirnoff 			panic("uma: item %p did not belong to zone %s\n",
35249542ea7bSGleb Smirnoff 			    item, zone->uz_name);
35259542ea7bSGleb Smirnoff 	}
35269542ea7bSGleb Smirnoff 	keg = slab->us_keg;
35279542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
35289542ea7bSGleb Smirnoff 
35299542ea7bSGleb Smirnoff 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
35309542ea7bSGleb Smirnoff 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
35319542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
35329542ea7bSGleb Smirnoff 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
35339542ea7bSGleb Smirnoff 
35349542ea7bSGleb Smirnoff 	return;
35359542ea7bSGleb Smirnoff }
35369542ea7bSGleb Smirnoff 
35379542ea7bSGleb Smirnoff /*
35389542ea7bSGleb Smirnoff  * Verifies freed addresses.  Checks for alignment, valid slab membership
35399542ea7bSGleb Smirnoff  * and duplicate frees.
35409542ea7bSGleb Smirnoff  *
35419542ea7bSGleb Smirnoff  */
35429542ea7bSGleb Smirnoff static void
35439542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
35449542ea7bSGleb Smirnoff {
35459542ea7bSGleb Smirnoff 	uma_keg_t keg;
35469542ea7bSGleb Smirnoff 	int freei;
35479542ea7bSGleb Smirnoff 
35489542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
35499542ea7bSGleb Smirnoff 		return;
35509542ea7bSGleb Smirnoff 	if (slab == NULL) {
35519542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
35529542ea7bSGleb Smirnoff 		if (slab == NULL)
35539542ea7bSGleb Smirnoff 			panic("uma: Freed item %p did not belong to zone %s\n",
35549542ea7bSGleb Smirnoff 			    item, zone->uz_name);
35559542ea7bSGleb Smirnoff 	}
35569542ea7bSGleb Smirnoff 	keg = slab->us_keg;
35579542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
35589542ea7bSGleb Smirnoff 
35599542ea7bSGleb Smirnoff 	if (freei >= keg->uk_ipers)
35609542ea7bSGleb Smirnoff 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
35619542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
35629542ea7bSGleb Smirnoff 
35639542ea7bSGleb Smirnoff 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
35649542ea7bSGleb Smirnoff 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
35659542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
35669542ea7bSGleb Smirnoff 
35679542ea7bSGleb Smirnoff 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
35689542ea7bSGleb Smirnoff 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
35699542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
35709542ea7bSGleb Smirnoff 
35719542ea7bSGleb Smirnoff 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
35729542ea7bSGleb Smirnoff }
35739542ea7bSGleb Smirnoff #endif /* INVARIANTS */
35749542ea7bSGleb Smirnoff 
357548c5777eSRobert Watson #ifdef DDB
357648c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma)
357748c5777eSRobert Watson {
357885dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
357948c5777eSRobert Watson 	uma_bucket_t bucket;
358048c5777eSRobert Watson 	uma_keg_t kz;
358148c5777eSRobert Watson 	uma_zone_t z;
358248c5777eSRobert Watson 	int cachefree;
358348c5777eSRobert Watson 
358403175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
358503175483SAlexander Motin 	    "Free", "Requests", "Sleeps", "Bucket");
358648c5777eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
358748c5777eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
358848c5777eSRobert Watson 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
358948c5777eSRobert Watson 				allocs = z->uz_allocs;
359048c5777eSRobert Watson 				frees = z->uz_frees;
3591bf965959SSean Bruno 				sleeps = z->uz_sleeps;
359248c5777eSRobert Watson 				cachefree = 0;
359348c5777eSRobert Watson 			} else
359448c5777eSRobert Watson 				uma_zone_sumstat(z, &cachefree, &allocs,
3595bf965959SSean Bruno 				    &frees, &sleeps);
3596e20a199fSJeff Roberson 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
359748c5777eSRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z)))
359848c5777eSRobert Watson 				cachefree += kz->uk_free;
3599fc03d22bSJeff Roberson 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
360048c5777eSRobert Watson 				cachefree += bucket->ub_cnt;
360103175483SAlexander Motin 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
360203175483SAlexander Motin 			    z->uz_name, (uintmax_t)kz->uk_size,
3603ae4e9636SRobert Watson 			    (intmax_t)(allocs - frees), cachefree,
360403175483SAlexander Motin 			    (uintmax_t)allocs, sleeps, z->uz_count);
3605687c94aaSJohn Baldwin 			if (db_pager_quit)
3606687c94aaSJohn Baldwin 				return;
360748c5777eSRobert Watson 		}
360848c5777eSRobert Watson 	}
360948c5777eSRobert Watson }
361003175483SAlexander Motin 
361103175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache)
361203175483SAlexander Motin {
361303175483SAlexander Motin 	uint64_t allocs, frees;
361403175483SAlexander Motin 	uma_bucket_t bucket;
361503175483SAlexander Motin 	uma_zone_t z;
361603175483SAlexander Motin 	int cachefree;
361703175483SAlexander Motin 
361803175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
361903175483SAlexander Motin 	    "Requests", "Bucket");
362003175483SAlexander Motin 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
362103175483SAlexander Motin 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
362203175483SAlexander Motin 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
362303175483SAlexander Motin 			cachefree += bucket->ub_cnt;
362403175483SAlexander Motin 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
362503175483SAlexander Motin 		    z->uz_name, (uintmax_t)z->uz_size,
362603175483SAlexander Motin 		    (intmax_t)(allocs - frees), cachefree,
362703175483SAlexander Motin 		    (uintmax_t)allocs, z->uz_count);
362803175483SAlexander Motin 		if (db_pager_quit)
362903175483SAlexander Motin 			return;
363003175483SAlexander Motin 	}
363103175483SAlexander Motin }
36329542ea7bSGleb Smirnoff #endif	/* DDB */
3633