xref: /freebsd/sys/vm/uma_core.c (revision 3d5e3df73f5321725313a2d917407e894ef72a5e)
160727d8bSWarner Losh /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
4ef72505eSJeff Roberson  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
508ecce74SRobert Watson  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6ae4e9636SRobert Watson  * Copyright (c) 2004-2006 Robert N. M. Watson
708ecce74SRobert Watson  * All rights reserved.
88355f576SJeff Roberson  *
98355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
108355f576SJeff Roberson  * modification, are permitted provided that the following conditions
118355f576SJeff Roberson  * are met:
128355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
138355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
148355f576SJeff Roberson  *    disclaimer.
158355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
168355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
178355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
188355f576SJeff Roberson  *
198355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
208355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
218355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
228355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
238355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
248355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
258355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
268355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
278355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
288355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
298355f576SJeff Roberson  */
308355f576SJeff Roberson 
318355f576SJeff Roberson /*
328355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
338355f576SJeff Roberson  *
348355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
358355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36763df3ecSPedro F. Giffuni  * efficient.  A primary design goal is to return unused memory to the rest of
378355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
388355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
398355f576SJeff Roberson  * pools of reserved memory unused.
408355f576SJeff Roberson  *
418355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
428355f576SJeff Roberson  * are well known.
438355f576SJeff Roberson  *
448355f576SJeff Roberson  */
458355f576SJeff Roberson 
468355f576SJeff Roberson /*
478355f576SJeff Roberson  * TODO:
488355f576SJeff Roberson  *	- Improve memory usage for large allocations
498355f576SJeff Roberson  *	- Investigate cache size adjustments
508355f576SJeff Roberson  */
518355f576SJeff Roberson 
52874651b1SDavid E. O'Brien #include <sys/cdefs.h>
53874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
54874651b1SDavid E. O'Brien 
5548c5777eSRobert Watson #include "opt_ddb.h"
568355f576SJeff Roberson #include "opt_param.h"
578d689e04SGleb Smirnoff #include "opt_vm.h"
5848c5777eSRobert Watson 
598355f576SJeff Roberson #include <sys/param.h>
608355f576SJeff Roberson #include <sys/systm.h>
61ef72505eSJeff Roberson #include <sys/bitset.h>
62194a979eSMark Johnston #include <sys/domainset.h>
639b43bc27SAndriy Gapon #include <sys/eventhandler.h>
648355f576SJeff Roberson #include <sys/kernel.h>
658355f576SJeff Roberson #include <sys/types.h>
66ad5b0f5bSJeff Roberson #include <sys/limits.h>
678355f576SJeff Roberson #include <sys/queue.h>
688355f576SJeff Roberson #include <sys/malloc.h>
693659f747SRobert Watson #include <sys/ktr.h>
708355f576SJeff Roberson #include <sys/lock.h>
718355f576SJeff Roberson #include <sys/sysctl.h>
728355f576SJeff Roberson #include <sys/mutex.h>
734c1cc01cSJohn Baldwin #include <sys/proc.h>
7410cb2424SMark Murray #include <sys/random.h>
7589f6b863SAttilio Rao #include <sys/rwlock.h>
767a52a97eSRobert Watson #include <sys/sbuf.h>
77a2de44abSAlexander Motin #include <sys/sched.h>
788355f576SJeff Roberson #include <sys/smp.h>
79e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h>
8086bbae32SJeff Roberson #include <sys/vmmeter.h>
8186bbae32SJeff Roberson 
828355f576SJeff Roberson #include <vm/vm.h>
83194a979eSMark Johnston #include <vm/vm_domainset.h>
848355f576SJeff Roberson #include <vm/vm_object.h>
858355f576SJeff Roberson #include <vm/vm_page.h>
86a4915c21SAttilio Rao #include <vm/vm_pageout.h>
878355f576SJeff Roberson #include <vm/vm_param.h>
88ab3185d1SJeff Roberson #include <vm/vm_phys.h>
8930c5525bSAndrew Gallatin #include <vm/vm_pagequeue.h>
908355f576SJeff Roberson #include <vm/vm_map.h>
918355f576SJeff Roberson #include <vm/vm_kern.h>
928355f576SJeff Roberson #include <vm/vm_extern.h>
938355f576SJeff Roberson #include <vm/uma.h>
948355f576SJeff Roberson #include <vm/uma_int.h>
95639c9550SJeff Roberson #include <vm/uma_dbg.h>
968355f576SJeff Roberson 
9748c5777eSRobert Watson #include <ddb/ddb.h>
9848c5777eSRobert Watson 
998d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
1008d689e04SGleb Smirnoff #include <vm/memguard.h>
1018d689e04SGleb Smirnoff #endif
1028d689e04SGleb Smirnoff 
1038355f576SJeff Roberson /*
104ab3185d1SJeff Roberson  * This is the zone and keg from which all zones are spawned.
1058355f576SJeff Roberson  */
106ab3185d1SJeff Roberson static uma_zone_t kegs;
107ab3185d1SJeff Roberson static uma_zone_t zones;
1088355f576SJeff Roberson 
109ab3185d1SJeff Roberson /* This is the zone from which all offpage uma_slab_ts are allocated. */
1108355f576SJeff Roberson static uma_zone_t slabzone;
1118355f576SJeff Roberson 
1128355f576SJeff Roberson /*
1138355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
1148355f576SJeff Roberson  * prior to malloc coming up.
1158355f576SJeff Roberson  */
1168355f576SJeff Roberson static uma_zone_t hashzone;
1178355f576SJeff Roberson 
1181e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */
119e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1;
1201e319f6dSRobert Watson 
121961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122961647dfSJeff Roberson 
1238355f576SJeff Roberson /*
12486bbae32SJeff Roberson  * Are we allowed to allocate buckets?
12586bbae32SJeff Roberson  */
12686bbae32SJeff Roberson static int bucketdisable = 1;
12786bbae32SJeff Roberson 
128099a0e58SBosko Milekic /* Linked list of all kegs in the system */
12913e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
1308355f576SJeff Roberson 
13103175483SAlexander Motin /* Linked list of all cache-only zones in the system */
13203175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones =
13303175483SAlexander Motin     LIST_HEAD_INITIALIZER(uma_cachezones);
13403175483SAlexander Motin 
135111fbcd5SBryan Venteicher /* This RW lock protects the keg list */
136fe933c1dSMateusz Guzik static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
1378355f576SJeff Roberson 
138ac0a6fd0SGleb Smirnoff /*
139ac0a6fd0SGleb Smirnoff  * Pointer and counter to pool of pages, that is preallocated at
140f7d35785SGleb Smirnoff  * startup to bootstrap UMA.
141ac0a6fd0SGleb Smirnoff  */
142ac0a6fd0SGleb Smirnoff static char *bootmem;
143ac0a6fd0SGleb Smirnoff static int boot_pages;
1448355f576SJeff Roberson 
14595c4bf75SKonstantin Belousov static struct sx uma_drain_lock;
14695c4bf75SKonstantin Belousov 
1472e47807cSJeff Roberson /* kmem soft limit. */
148ad5b0f5bSJeff Roberson static unsigned long uma_kmem_limit = LONG_MAX;
1492e47807cSJeff Roberson static volatile unsigned long uma_kmem_total;
1502e47807cSJeff Roberson 
1518355f576SJeff Roberson /* Is the VM done starting up? */
152f4bef67cSGleb Smirnoff static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
153f4bef67cSGleb Smirnoff     BOOT_RUNNING } booted = BOOT_COLD;
1548355f576SJeff Roberson 
155ef72505eSJeff Roberson /*
1569643769aSJeff Roberson  * This is the handle used to schedule events that need to happen
1579643769aSJeff Roberson  * outside of the allocation fast path.
1589643769aSJeff Roberson  */
1598355f576SJeff Roberson static struct callout uma_callout;
1609643769aSJeff Roberson #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
1618355f576SJeff Roberson 
1628355f576SJeff Roberson /*
1638355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1648355f576SJeff Roberson  * a special allocation function just for zones.
1658355f576SJeff Roberson  */
1668355f576SJeff Roberson struct uma_zctor_args {
167bb196eb4SMatthew D Fleming 	const char *name;
168c3bdc05fSAndrew R. Reiter 	size_t size;
1698355f576SJeff Roberson 	uma_ctor ctor;
1708355f576SJeff Roberson 	uma_dtor dtor;
1718355f576SJeff Roberson 	uma_init uminit;
1728355f576SJeff Roberson 	uma_fini fini;
1730095a784SJeff Roberson 	uma_import import;
1740095a784SJeff Roberson 	uma_release release;
1750095a784SJeff Roberson 	void *arg;
176099a0e58SBosko Milekic 	uma_keg_t keg;
177099a0e58SBosko Milekic 	int align;
17885dcf349SGleb Smirnoff 	uint32_t flags;
179099a0e58SBosko Milekic };
180099a0e58SBosko Milekic 
181099a0e58SBosko Milekic struct uma_kctor_args {
182099a0e58SBosko Milekic 	uma_zone_t zone;
183099a0e58SBosko Milekic 	size_t size;
184099a0e58SBosko Milekic 	uma_init uminit;
185099a0e58SBosko Milekic 	uma_fini fini;
1868355f576SJeff Roberson 	int align;
18785dcf349SGleb Smirnoff 	uint32_t flags;
1888355f576SJeff Roberson };
1898355f576SJeff Roberson 
190cae33c14SJeff Roberson struct uma_bucket_zone {
191cae33c14SJeff Roberson 	uma_zone_t	ubz_zone;
192cae33c14SJeff Roberson 	char		*ubz_name;
193fc03d22bSJeff Roberson 	int		ubz_entries;	/* Number of items it can hold. */
194fc03d22bSJeff Roberson 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
195cae33c14SJeff Roberson };
196cae33c14SJeff Roberson 
197f9d27e75SRobert Watson /*
198fc03d22bSJeff Roberson  * Compute the actual number of bucket entries to pack them in power
199fc03d22bSJeff Roberson  * of two sizes for more efficient space utilization.
200f9d27e75SRobert Watson  */
201fc03d22bSJeff Roberson #define	BUCKET_SIZE(n)						\
202fc03d22bSJeff Roberson     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
203fc03d22bSJeff Roberson 
2041aa6c758SAlexander Motin #define	BUCKET_MAX	BUCKET_SIZE(256)
205fc03d22bSJeff Roberson 
206fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = {
2076fd34d6fSJeff Roberson 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
208f3932e90SAlexander Motin 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
2096fd34d6fSJeff Roberson 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
210f3932e90SAlexander Motin 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
2116fd34d6fSJeff Roberson 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
212fc03d22bSJeff Roberson 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
213fc03d22bSJeff Roberson 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
214fc03d22bSJeff Roberson 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
2151aa6c758SAlexander Motin 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
216fc03d22bSJeff Roberson 	{ NULL, NULL, 0}
217fc03d22bSJeff Roberson };
218cae33c14SJeff Roberson 
2192019094aSRobert Watson /*
2202019094aSRobert Watson  * Flags and enumerations to be passed to internal functions.
2212019094aSRobert Watson  */
222ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
223b23f72e9SBrian Feldman 
224ab3185d1SJeff Roberson #define	UMA_ANYDOMAIN	-1	/* Special value for domain search. */
225ab3185d1SJeff Roberson 
2268355f576SJeff Roberson /* Prototypes.. */
2278355f576SJeff Roberson 
228f4bef67cSGleb Smirnoff int	uma_startup_count(int);
229f4bef67cSGleb Smirnoff void	uma_startup(void *, int);
230f4bef67cSGleb Smirnoff void	uma_startup1(void);
231f4bef67cSGleb Smirnoff void	uma_startup2(void);
232f4bef67cSGleb Smirnoff 
233ab3185d1SJeff Roberson static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
234ab3185d1SJeff Roberson static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
235ab3059a8SMatt Macy static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
236ab3185d1SJeff Roberson static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
237f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t);
238ab3059a8SMatt Macy static void pcpu_page_free(void *, vm_size_t, uint8_t);
239ab3185d1SJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int);
2409643769aSJeff Roberson static void cache_drain(uma_zone_t);
2418355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
242aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone);
243b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int);
244099a0e58SBosko Milekic static void keg_dtor(void *, int, void *);
245b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int);
2469c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
247b23f72e9SBrian Feldman static int zero_init(void *, int, int);
248e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg);
249e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg);
2508355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
2518355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
2520aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
2530aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
2540aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
2558355f576SJeff Roberson static void uma_timeout(void *);
2568355f576SJeff Roberson static void uma_startup3(void);
257ab3185d1SJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int, int);
2580095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
25986bbae32SJeff Roberson static void bucket_enable(void);
260cae33c14SJeff Roberson static void bucket_init(void);
2616fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
2626fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
263cae33c14SJeff Roberson static void bucket_zone_drain(void);
264ab3185d1SJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
265ab3185d1SJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
266ab3185d1SJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int);
2670095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
2680095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
269e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
27085dcf349SGleb Smirnoff     uma_fini fini, int align, uint32_t flags);
271ab3185d1SJeff Roberson static int zone_import(uma_zone_t, void **, int, int, int);
272ab3185d1SJeff Roberson static void zone_release(uma_zone_t, void **, int);
273ab3185d1SJeff Roberson static void uma_zero_item(void *, uma_zone_t);
274bbee39c6SJeff Roberson 
2758355f576SJeff Roberson void uma_print_zone(uma_zone_t);
2768355f576SJeff Roberson void uma_print_stats(void);
2777a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
2787a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
2798355f576SJeff Roberson 
2809542ea7bSGleb Smirnoff #ifdef INVARIANTS
281c5deaf04SGleb Smirnoff static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
282c5deaf04SGleb Smirnoff static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
2839542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
2849542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
285c5deaf04SGleb Smirnoff 
286c5deaf04SGleb Smirnoff static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
287c5deaf04SGleb Smirnoff     "Memory allocation debugging");
288c5deaf04SGleb Smirnoff 
289c5deaf04SGleb Smirnoff static u_int dbg_divisor = 1;
290c5deaf04SGleb Smirnoff SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
291c5deaf04SGleb Smirnoff     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
292c5deaf04SGleb Smirnoff     "Debug & thrash every this item in memory allocator");
293c5deaf04SGleb Smirnoff 
294c5deaf04SGleb Smirnoff static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
295c5deaf04SGleb Smirnoff static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
296c5deaf04SGleb Smirnoff SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
297c5deaf04SGleb Smirnoff     &uma_dbg_cnt, "memory items debugged");
298c5deaf04SGleb Smirnoff SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
299c5deaf04SGleb Smirnoff     &uma_skip_cnt, "memory items skipped, not debugged");
3009542ea7bSGleb Smirnoff #endif
3019542ea7bSGleb Smirnoff 
3028355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
3038355f576SJeff Roberson 
3047a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
3057a52a97eSRobert Watson     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
3067a52a97eSRobert Watson 
3077a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
3087a52a97eSRobert Watson     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
3097a52a97eSRobert Watson 
3102f891cd5SPawel Jakub Dawidek static int zone_warnings = 1;
311af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
3122f891cd5SPawel Jakub Dawidek     "Warn when UMA zones becomes full");
3132f891cd5SPawel Jakub Dawidek 
3142e47807cSJeff Roberson /* Adjust bytes under management by UMA. */
3152e47807cSJeff Roberson static inline void
3162e47807cSJeff Roberson uma_total_dec(unsigned long size)
3172e47807cSJeff Roberson {
3182e47807cSJeff Roberson 
3192e47807cSJeff Roberson 	atomic_subtract_long(&uma_kmem_total, size);
3202e47807cSJeff Roberson }
3212e47807cSJeff Roberson 
3222e47807cSJeff Roberson static inline void
3232e47807cSJeff Roberson uma_total_inc(unsigned long size)
3242e47807cSJeff Roberson {
3252e47807cSJeff Roberson 
3262e47807cSJeff Roberson 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
3272e47807cSJeff Roberson 		uma_reclaim_wakeup();
3282e47807cSJeff Roberson }
3292e47807cSJeff Roberson 
33086bbae32SJeff Roberson /*
33186bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
33286bbae32SJeff Roberson  */
33386bbae32SJeff Roberson static void
33486bbae32SJeff Roberson bucket_enable(void)
33586bbae32SJeff Roberson {
336251386b4SMaksim Yevmenkin 	bucketdisable = vm_page_count_min();
33786bbae32SJeff Roberson }
33886bbae32SJeff Roberson 
339dc2c7965SRobert Watson /*
340dc2c7965SRobert Watson  * Initialize bucket_zones, the array of zones of buckets of various sizes.
341dc2c7965SRobert Watson  *
342dc2c7965SRobert Watson  * For each zone, calculate the memory required for each bucket, consisting
343fc03d22bSJeff Roberson  * of the header and an array of pointers.
344dc2c7965SRobert Watson  */
345cae33c14SJeff Roberson static void
346cae33c14SJeff Roberson bucket_init(void)
347cae33c14SJeff Roberson {
348cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
349cae33c14SJeff Roberson 	int size;
350cae33c14SJeff Roberson 
351d74e6a1dSAlan Cox 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
352cae33c14SJeff Roberson 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
353cae33c14SJeff Roberson 		size += sizeof(void *) * ubz->ubz_entries;
354cae33c14SJeff Roberson 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
355e20a199fSJeff Roberson 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
356ab3185d1SJeff Roberson 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
357cae33c14SJeff Roberson 	}
358cae33c14SJeff Roberson }
359cae33c14SJeff Roberson 
360dc2c7965SRobert Watson /*
361dc2c7965SRobert Watson  * Given a desired number of entries for a bucket, return the zone from which
362dc2c7965SRobert Watson  * to allocate the bucket.
363dc2c7965SRobert Watson  */
364dc2c7965SRobert Watson static struct uma_bucket_zone *
365dc2c7965SRobert Watson bucket_zone_lookup(int entries)
366dc2c7965SRobert Watson {
367fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
368dc2c7965SRobert Watson 
369fc03d22bSJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
370fc03d22bSJeff Roberson 		if (ubz->ubz_entries >= entries)
371fc03d22bSJeff Roberson 			return (ubz);
372fc03d22bSJeff Roberson 	ubz--;
373fc03d22bSJeff Roberson 	return (ubz);
374fc03d22bSJeff Roberson }
375fc03d22bSJeff Roberson 
376fc03d22bSJeff Roberson static int
377fc03d22bSJeff Roberson bucket_select(int size)
378fc03d22bSJeff Roberson {
379fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
380fc03d22bSJeff Roberson 
381fc03d22bSJeff Roberson 	ubz = &bucket_zones[0];
382fc03d22bSJeff Roberson 	if (size > ubz->ubz_maxsize)
383fc03d22bSJeff Roberson 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
384fc03d22bSJeff Roberson 
385fc03d22bSJeff Roberson 	for (; ubz->ubz_entries != 0; ubz++)
386fc03d22bSJeff Roberson 		if (ubz->ubz_maxsize < size)
387fc03d22bSJeff Roberson 			break;
388fc03d22bSJeff Roberson 	ubz--;
389fc03d22bSJeff Roberson 	return (ubz->ubz_entries);
390dc2c7965SRobert Watson }
391dc2c7965SRobert Watson 
392cae33c14SJeff Roberson static uma_bucket_t
3936fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags)
394cae33c14SJeff Roberson {
395cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
396cae33c14SJeff Roberson 	uma_bucket_t bucket;
397cae33c14SJeff Roberson 
398cae33c14SJeff Roberson 	/*
399cae33c14SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
4003803b26bSDag-Erling Smørgrav 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
401cae33c14SJeff Roberson 	 * boot pages.  This also prevents us from allocating buckets in
402cae33c14SJeff Roberson 	 * low memory situations.
403cae33c14SJeff Roberson 	 */
404cae33c14SJeff Roberson 	if (bucketdisable)
405cae33c14SJeff Roberson 		return (NULL);
4066fd34d6fSJeff Roberson 	/*
4076fd34d6fSJeff Roberson 	 * To limit bucket recursion we store the original zone flags
4086fd34d6fSJeff Roberson 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
4096fd34d6fSJeff Roberson 	 * NOVM flag to persist even through deep recursions.  We also
4106fd34d6fSJeff Roberson 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
4116fd34d6fSJeff Roberson 	 * a bucket for a bucket zone so we do not allow infinite bucket
4126fd34d6fSJeff Roberson 	 * recursion.  This cookie will even persist to frees of unused
4136fd34d6fSJeff Roberson 	 * buckets via the allocation path or bucket allocations in the
4146fd34d6fSJeff Roberson 	 * free path.
4156fd34d6fSJeff Roberson 	 */
4166fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
4176fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
418e8a720feSAlexander Motin 	else {
419e8a720feSAlexander Motin 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
420e8a720feSAlexander Motin 			return (NULL);
4216fd34d6fSJeff Roberson 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
422e8a720feSAlexander Motin 	}
4236fd34d6fSJeff Roberson 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
424af526374SJeff Roberson 		flags |= M_NOVM;
425af526374SJeff Roberson 	ubz = bucket_zone_lookup(zone->uz_count);
42620d3ab87SAlexander Motin 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
42720d3ab87SAlexander Motin 		ubz++;
4286fd34d6fSJeff Roberson 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
429cae33c14SJeff Roberson 	if (bucket) {
430cae33c14SJeff Roberson #ifdef INVARIANTS
431cae33c14SJeff Roberson 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
432cae33c14SJeff Roberson #endif
433cae33c14SJeff Roberson 		bucket->ub_cnt = 0;
434cae33c14SJeff Roberson 		bucket->ub_entries = ubz->ubz_entries;
435cae33c14SJeff Roberson 	}
436cae33c14SJeff Roberson 
437cae33c14SJeff Roberson 	return (bucket);
438cae33c14SJeff Roberson }
439cae33c14SJeff Roberson 
440cae33c14SJeff Roberson static void
4416fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
442cae33c14SJeff Roberson {
443cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
444cae33c14SJeff Roberson 
445fc03d22bSJeff Roberson 	KASSERT(bucket->ub_cnt == 0,
446fc03d22bSJeff Roberson 	    ("bucket_free: Freeing a non free bucket."));
4476fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
4486fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
449dc2c7965SRobert Watson 	ubz = bucket_zone_lookup(bucket->ub_entries);
4506fd34d6fSJeff Roberson 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
451cae33c14SJeff Roberson }
452cae33c14SJeff Roberson 
453cae33c14SJeff Roberson static void
454cae33c14SJeff Roberson bucket_zone_drain(void)
455cae33c14SJeff Roberson {
456cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
457cae33c14SJeff Roberson 
458cae33c14SJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
459cae33c14SJeff Roberson 		zone_drain(ubz->ubz_zone);
460cae33c14SJeff Roberson }
461cae33c14SJeff Roberson 
4620f9b7bf3SMark Johnston static uma_bucket_t
4630f9b7bf3SMark Johnston zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws)
4640f9b7bf3SMark Johnston {
4650f9b7bf3SMark Johnston 	uma_bucket_t bucket;
4660f9b7bf3SMark Johnston 
4670f9b7bf3SMark Johnston 	ZONE_LOCK_ASSERT(zone);
4680f9b7bf3SMark Johnston 
4690f9b7bf3SMark Johnston 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
4700f9b7bf3SMark Johnston 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
4710f9b7bf3SMark Johnston 		LIST_REMOVE(bucket, ub_link);
4720f9b7bf3SMark Johnston 		zdom->uzd_nitems -= bucket->ub_cnt;
4730f9b7bf3SMark Johnston 		if (ws && zdom->uzd_imin > zdom->uzd_nitems)
4740f9b7bf3SMark Johnston 			zdom->uzd_imin = zdom->uzd_nitems;
4750f9b7bf3SMark Johnston 	}
4760f9b7bf3SMark Johnston 	return (bucket);
4770f9b7bf3SMark Johnston }
4780f9b7bf3SMark Johnston 
4790f9b7bf3SMark Johnston static void
4800f9b7bf3SMark Johnston zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
4810f9b7bf3SMark Johnston     const bool ws)
4820f9b7bf3SMark Johnston {
4830f9b7bf3SMark Johnston 
4840f9b7bf3SMark Johnston 	ZONE_LOCK_ASSERT(zone);
4850f9b7bf3SMark Johnston 
4860f9b7bf3SMark Johnston 	LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
4870f9b7bf3SMark Johnston 	zdom->uzd_nitems += bucket->ub_cnt;
4880f9b7bf3SMark Johnston 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
4890f9b7bf3SMark Johnston 		zdom->uzd_imax = zdom->uzd_nitems;
4900f9b7bf3SMark Johnston }
4910f9b7bf3SMark Johnston 
4922f891cd5SPawel Jakub Dawidek static void
4932f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone)
4942f891cd5SPawel Jakub Dawidek {
4952f891cd5SPawel Jakub Dawidek 	static const struct timeval warninterval = { 300, 0 };
4962f891cd5SPawel Jakub Dawidek 
4972f891cd5SPawel Jakub Dawidek 	if (!zone_warnings || zone->uz_warning == NULL)
4982f891cd5SPawel Jakub Dawidek 		return;
4992f891cd5SPawel Jakub Dawidek 
5002f891cd5SPawel Jakub Dawidek 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
5012f891cd5SPawel Jakub Dawidek 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
5022f891cd5SPawel Jakub Dawidek }
5032f891cd5SPawel Jakub Dawidek 
50454503a13SJonathan T. Looney static inline void
50554503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone)
50654503a13SJonathan T. Looney {
507e60b2fcbSGleb Smirnoff 
508e60b2fcbSGleb Smirnoff 	if (zone->uz_maxaction.ta_func != NULL)
509e60b2fcbSGleb Smirnoff 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
51054503a13SJonathan T. Looney }
51154503a13SJonathan T. Looney 
512e20a199fSJeff Roberson static void
513e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
514e20a199fSJeff Roberson {
515e20a199fSJeff Roberson 	uma_klink_t klink;
516e20a199fSJeff Roberson 
517e20a199fSJeff Roberson 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
518e20a199fSJeff Roberson 		kegfn(klink->kl_keg);
519e20a199fSJeff Roberson }
5208355f576SJeff Roberson 
5218355f576SJeff Roberson /*
5228355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
5239643769aSJeff Roberson  * based calculations.  (stats, hash size, etc.)
5248355f576SJeff Roberson  *
5258355f576SJeff Roberson  * Arguments:
5268355f576SJeff Roberson  *	arg   Unused
5278355f576SJeff Roberson  *
5288355f576SJeff Roberson  * Returns:
5298355f576SJeff Roberson  *	Nothing
5308355f576SJeff Roberson  */
5318355f576SJeff Roberson static void
5328355f576SJeff Roberson uma_timeout(void *unused)
5338355f576SJeff Roberson {
53486bbae32SJeff Roberson 	bucket_enable();
5358355f576SJeff Roberson 	zone_foreach(zone_timeout);
5368355f576SJeff Roberson 
5378355f576SJeff Roberson 	/* Reschedule this event */
5389643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
5398355f576SJeff Roberson }
5408355f576SJeff Roberson 
5418355f576SJeff Roberson /*
5420f9b7bf3SMark Johnston  * Update the working set size estimate for the zone's bucket cache.
5430f9b7bf3SMark Johnston  * The constants chosen here are somewhat arbitrary.  With an update period of
5440f9b7bf3SMark Johnston  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
5450f9b7bf3SMark Johnston  * last 100s.
5460f9b7bf3SMark Johnston  */
5470f9b7bf3SMark Johnston static void
5480f9b7bf3SMark Johnston zone_domain_update_wss(uma_zone_domain_t zdom)
5490f9b7bf3SMark Johnston {
5500f9b7bf3SMark Johnston 	long wss;
5510f9b7bf3SMark Johnston 
5520f9b7bf3SMark Johnston 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
5530f9b7bf3SMark Johnston 	wss = zdom->uzd_imax - zdom->uzd_imin;
5540f9b7bf3SMark Johnston 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
5550f9b7bf3SMark Johnston 	zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5;
5560f9b7bf3SMark Johnston }
5570f9b7bf3SMark Johnston 
5580f9b7bf3SMark Johnston /*
5599643769aSJeff Roberson  * Routine to perform timeout driven calculations.  This expands the
5609643769aSJeff Roberson  * hashes and does per cpu statistics aggregation.
5618355f576SJeff Roberson  *
562e20a199fSJeff Roberson  *  Returns nothing.
5638355f576SJeff Roberson  */
5648355f576SJeff Roberson static void
565e20a199fSJeff Roberson keg_timeout(uma_keg_t keg)
5668355f576SJeff Roberson {
5678355f576SJeff Roberson 
568e20a199fSJeff Roberson 	KEG_LOCK(keg);
5698355f576SJeff Roberson 	/*
570e20a199fSJeff Roberson 	 * Expand the keg hash table.
5718355f576SJeff Roberson 	 *
5728355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
5738355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
5748355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
5758355f576SJeff Roberson 	 */
576099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH &&
577099a0e58SBosko Milekic 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
5780aef6126SJeff Roberson 		struct uma_hash newhash;
5790aef6126SJeff Roberson 		struct uma_hash oldhash;
5800aef6126SJeff Roberson 		int ret;
5815300d9ddSJeff Roberson 
5820aef6126SJeff Roberson 		/*
5830aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
584e20a199fSJeff Roberson 		 * while the keg lock is held will lead to deadlock.
5850aef6126SJeff Roberson 		 * I have to do everything in stages and check for
5860aef6126SJeff Roberson 		 * races.
5870aef6126SJeff Roberson 		 */
588099a0e58SBosko Milekic 		newhash = keg->uk_hash;
589e20a199fSJeff Roberson 		KEG_UNLOCK(keg);
5900aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
591e20a199fSJeff Roberson 		KEG_LOCK(keg);
5920aef6126SJeff Roberson 		if (ret) {
593099a0e58SBosko Milekic 			if (hash_expand(&keg->uk_hash, &newhash)) {
594099a0e58SBosko Milekic 				oldhash = keg->uk_hash;
595099a0e58SBosko Milekic 				keg->uk_hash = newhash;
5960aef6126SJeff Roberson 			} else
5970aef6126SJeff Roberson 				oldhash = newhash;
5980aef6126SJeff Roberson 
599e20a199fSJeff Roberson 			KEG_UNLOCK(keg);
6000aef6126SJeff Roberson 			hash_free(&oldhash);
601a1dff920SDavide Italiano 			return;
6020aef6126SJeff Roberson 		}
6035300d9ddSJeff Roberson 	}
604e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
605e20a199fSJeff Roberson }
606e20a199fSJeff Roberson 
607e20a199fSJeff Roberson static void
608e20a199fSJeff Roberson zone_timeout(uma_zone_t zone)
609e20a199fSJeff Roberson {
6100f9b7bf3SMark Johnston 	int i;
611e20a199fSJeff Roberson 
612e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_timeout);
6130f9b7bf3SMark Johnston 
6140f9b7bf3SMark Johnston 	ZONE_LOCK(zone);
6150f9b7bf3SMark Johnston 	for (i = 0; i < vm_ndomains; i++)
6160f9b7bf3SMark Johnston 		zone_domain_update_wss(&zone->uz_domain[i]);
6170f9b7bf3SMark Johnston 	ZONE_UNLOCK(zone);
6188355f576SJeff Roberson }
6198355f576SJeff Roberson 
6208355f576SJeff Roberson /*
6215300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
6225300d9ddSJeff Roberson  * backing store.
6235300d9ddSJeff Roberson  *
6245300d9ddSJeff Roberson  * Arguments:
6250aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
6265300d9ddSJeff Roberson  *
6275300d9ddSJeff Roberson  * Returns:
628763df3ecSPedro F. Giffuni  *	1 on success and 0 on failure.
6295300d9ddSJeff Roberson  */
63037c84183SPoul-Henning Kamp static int
6310aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
6325300d9ddSJeff Roberson {
6330aef6126SJeff Roberson 	int oldsize;
6345300d9ddSJeff Roberson 	int alloc;
6355300d9ddSJeff Roberson 
6360aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
6370aef6126SJeff Roberson 
6385300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
6390aef6126SJeff Roberson 	if (oldsize)  {
6400aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
6410aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
6420aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
643961647dfSJeff Roberson 		    M_UMAHASH, M_NOWAIT);
6445300d9ddSJeff Roberson 	} else {
6450aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
646e20a199fSJeff Roberson 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
647ab3185d1SJeff Roberson 		    UMA_ANYDOMAIN, M_WAITOK);
6480aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
6495300d9ddSJeff Roberson 	}
6500aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
6510aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
6520aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
6530aef6126SJeff Roberson 		return (1);
6540aef6126SJeff Roberson 	}
6555300d9ddSJeff Roberson 
6560aef6126SJeff Roberson 	return (0);
6575300d9ddSJeff Roberson }
6585300d9ddSJeff Roberson 
6595300d9ddSJeff Roberson /*
66064f051e9SJeff Roberson  * Expands the hash table for HASH zones.  This is done from zone_timeout
66164f051e9SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation
66264f051e9SJeff Roberson  * path, otherwise, we can recurse on the vm while allocating pages.
6638355f576SJeff Roberson  *
6648355f576SJeff Roberson  * Arguments:
6650aef6126SJeff Roberson  *	oldhash  The hash you want to expand
6660aef6126SJeff Roberson  *	newhash  The hash structure for the new table
6678355f576SJeff Roberson  *
6688355f576SJeff Roberson  * Returns:
6698355f576SJeff Roberson  *	Nothing
6708355f576SJeff Roberson  *
6718355f576SJeff Roberson  * Discussion:
6728355f576SJeff Roberson  */
6730aef6126SJeff Roberson static int
6740aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
6758355f576SJeff Roberson {
6768355f576SJeff Roberson 	uma_slab_t slab;
6778355f576SJeff Roberson 	int hval;
6788355f576SJeff Roberson 	int i;
6798355f576SJeff Roberson 
6800aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
6810aef6126SJeff Roberson 		return (0);
6828355f576SJeff Roberson 
6830aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
6840aef6126SJeff Roberson 		return (0);
6858355f576SJeff Roberson 
6868355f576SJeff Roberson 	/*
6878355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
6888355f576SJeff Roberson 	 * full rehash.
6898355f576SJeff Roberson 	 */
6908355f576SJeff Roberson 
6910aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
6920aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
6930aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
6940aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
6950aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
6960aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
6970aef6126SJeff Roberson 			    slab, us_hlink);
6988355f576SJeff Roberson 		}
6998355f576SJeff Roberson 
7000aef6126SJeff Roberson 	return (1);
7019c2cd7e5SJeff Roberson }
7029c2cd7e5SJeff Roberson 
7035300d9ddSJeff Roberson /*
7045300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
7055300d9ddSJeff Roberson  *
7065300d9ddSJeff Roberson  * Arguments:
7075300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
7085300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
7095300d9ddSJeff Roberson  *
7105300d9ddSJeff Roberson  * Returns:
7115300d9ddSJeff Roberson  *	Nothing
7125300d9ddSJeff Roberson  */
7139c2cd7e5SJeff Roberson static void
7140aef6126SJeff Roberson hash_free(struct uma_hash *hash)
7159c2cd7e5SJeff Roberson {
7160aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
7170aef6126SJeff Roberson 		return;
7180aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
7190095a784SJeff Roberson 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
7208355f576SJeff Roberson 	else
721961647dfSJeff Roberson 		free(hash->uh_slab_hash, M_UMAHASH);
7228355f576SJeff Roberson }
7238355f576SJeff Roberson 
7248355f576SJeff Roberson /*
7258355f576SJeff Roberson  * Frees all outstanding items in a bucket
7268355f576SJeff Roberson  *
7278355f576SJeff Roberson  * Arguments:
7288355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
7298355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
7308355f576SJeff Roberson  *
7318355f576SJeff Roberson  * Returns:
7328355f576SJeff Roberson  *	Nothing
7338355f576SJeff Roberson  */
7348355f576SJeff Roberson 
7358355f576SJeff Roberson static void
7368355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
7378355f576SJeff Roberson {
7380095a784SJeff Roberson 	int i;
7398355f576SJeff Roberson 
7408355f576SJeff Roberson 	if (bucket == NULL)
7418355f576SJeff Roberson 		return;
7428355f576SJeff Roberson 
7430095a784SJeff Roberson 	if (zone->uz_fini)
7440095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
7450095a784SJeff Roberson 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
7460095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
7470095a784SJeff Roberson 	bucket->ub_cnt = 0;
7488355f576SJeff Roberson }
7498355f576SJeff Roberson 
7508355f576SJeff Roberson /*
7518355f576SJeff Roberson  * Drains the per cpu caches for a zone.
7528355f576SJeff Roberson  *
7535d1ae027SRobert Watson  * NOTE: This may only be called while the zone is being turn down, and not
7545d1ae027SRobert Watson  * during normal operation.  This is necessary in order that we do not have
7555d1ae027SRobert Watson  * to migrate CPUs to drain the per-CPU caches.
7565d1ae027SRobert Watson  *
7578355f576SJeff Roberson  * Arguments:
7588355f576SJeff Roberson  *	zone     The zone to drain, must be unlocked.
7598355f576SJeff Roberson  *
7608355f576SJeff Roberson  * Returns:
7618355f576SJeff Roberson  *	Nothing
7628355f576SJeff Roberson  */
7638355f576SJeff Roberson static void
7649643769aSJeff Roberson cache_drain(uma_zone_t zone)
7658355f576SJeff Roberson {
7668355f576SJeff Roberson 	uma_cache_t cache;
7678355f576SJeff Roberson 	int cpu;
7688355f576SJeff Roberson 
7698355f576SJeff Roberson 	/*
7705d1ae027SRobert Watson 	 * XXX: It is safe to not lock the per-CPU caches, because we're
7715d1ae027SRobert Watson 	 * tearing down the zone anyway.  I.e., there will be no further use
7725d1ae027SRobert Watson 	 * of the caches at this point.
7735d1ae027SRobert Watson 	 *
7745d1ae027SRobert Watson 	 * XXX: It would good to be able to assert that the zone is being
7755d1ae027SRobert Watson 	 * torn down to prevent improper use of cache_drain().
7765d1ae027SRobert Watson 	 *
7775d1ae027SRobert Watson 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
7785d1ae027SRobert Watson 	 * it is used elsewhere.  Should the tear-down path be made special
7795d1ae027SRobert Watson 	 * there in some form?
7808355f576SJeff Roberson 	 */
7813aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
7828355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
7838355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
7848355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
785174ab450SBosko Milekic 		if (cache->uc_allocbucket != NULL)
7866fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_allocbucket, NULL);
787174ab450SBosko Milekic 		if (cache->uc_freebucket != NULL)
7886fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_freebucket, NULL);
789d56368d7SBosko Milekic 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
790d56368d7SBosko Milekic 	}
791aaa8bb16SJeff Roberson 	ZONE_LOCK(zone);
792aaa8bb16SJeff Roberson 	bucket_cache_drain(zone);
793aaa8bb16SJeff Roberson 	ZONE_UNLOCK(zone);
794aaa8bb16SJeff Roberson }
795aaa8bb16SJeff Roberson 
796a2de44abSAlexander Motin static void
797a2de44abSAlexander Motin cache_shrink(uma_zone_t zone)
798a2de44abSAlexander Motin {
799a2de44abSAlexander Motin 
800a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
801a2de44abSAlexander Motin 		return;
802a2de44abSAlexander Motin 
803a2de44abSAlexander Motin 	ZONE_LOCK(zone);
804a2de44abSAlexander Motin 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
805a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
806a2de44abSAlexander Motin }
807a2de44abSAlexander Motin 
808a2de44abSAlexander Motin static void
809a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone)
810a2de44abSAlexander Motin {
811a2de44abSAlexander Motin 	uma_cache_t cache;
8128a8d9d14SAlexander Motin 	uma_bucket_t b1, b2;
813ab3185d1SJeff Roberson 	int domain;
814a2de44abSAlexander Motin 
815a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
816a2de44abSAlexander Motin 		return;
817a2de44abSAlexander Motin 
8188a8d9d14SAlexander Motin 	b1 = b2 = NULL;
819a2de44abSAlexander Motin 	ZONE_LOCK(zone);
820a2de44abSAlexander Motin 	critical_enter();
821ab3185d1SJeff Roberson 	if (zone->uz_flags & UMA_ZONE_NUMA)
822ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
823ab3185d1SJeff Roberson 	else
824ab3185d1SJeff Roberson 		domain = 0;
825a2de44abSAlexander Motin 	cache = &zone->uz_cpu[curcpu];
826a2de44abSAlexander Motin 	if (cache->uc_allocbucket) {
8278a8d9d14SAlexander Motin 		if (cache->uc_allocbucket->ub_cnt != 0)
8280f9b7bf3SMark Johnston 			zone_put_bucket(zone, &zone->uz_domain[domain],
8290f9b7bf3SMark Johnston 			    cache->uc_allocbucket, false);
8308a8d9d14SAlexander Motin 		else
8318a8d9d14SAlexander Motin 			b1 = cache->uc_allocbucket;
832a2de44abSAlexander Motin 		cache->uc_allocbucket = NULL;
833a2de44abSAlexander Motin 	}
834a2de44abSAlexander Motin 	if (cache->uc_freebucket) {
8358a8d9d14SAlexander Motin 		if (cache->uc_freebucket->ub_cnt != 0)
8360f9b7bf3SMark Johnston 			zone_put_bucket(zone, &zone->uz_domain[domain],
8370f9b7bf3SMark Johnston 			    cache->uc_freebucket, false);
8388a8d9d14SAlexander Motin 		else
8398a8d9d14SAlexander Motin 			b2 = cache->uc_freebucket;
840a2de44abSAlexander Motin 		cache->uc_freebucket = NULL;
841a2de44abSAlexander Motin 	}
842a2de44abSAlexander Motin 	critical_exit();
843a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
8448a8d9d14SAlexander Motin 	if (b1)
8458a8d9d14SAlexander Motin 		bucket_free(zone, b1, NULL);
8468a8d9d14SAlexander Motin 	if (b2)
8478a8d9d14SAlexander Motin 		bucket_free(zone, b2, NULL);
848a2de44abSAlexander Motin }
849a2de44abSAlexander Motin 
850a2de44abSAlexander Motin /*
851a2de44abSAlexander Motin  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
852a2de44abSAlexander Motin  * This is an expensive call because it needs to bind to all CPUs
853a2de44abSAlexander Motin  * one by one and enter a critical section on each of them in order
854a2de44abSAlexander Motin  * to safely access their cache buckets.
855a2de44abSAlexander Motin  * Zone lock must not be held on call this function.
856a2de44abSAlexander Motin  */
857a2de44abSAlexander Motin static void
858a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone)
859a2de44abSAlexander Motin {
860a2de44abSAlexander Motin 	int cpu;
861a2de44abSAlexander Motin 
862a2de44abSAlexander Motin 	/*
863a2de44abSAlexander Motin 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
864a2de44abSAlexander Motin 	 */
865a2de44abSAlexander Motin 	if (zone)
866a2de44abSAlexander Motin 		cache_shrink(zone);
867a2de44abSAlexander Motin 	else
868a2de44abSAlexander Motin 		zone_foreach(cache_shrink);
869a2de44abSAlexander Motin 
870a2de44abSAlexander Motin 	CPU_FOREACH(cpu) {
871a2de44abSAlexander Motin 		thread_lock(curthread);
872a2de44abSAlexander Motin 		sched_bind(curthread, cpu);
873a2de44abSAlexander Motin 		thread_unlock(curthread);
874a2de44abSAlexander Motin 
875a2de44abSAlexander Motin 		if (zone)
876a2de44abSAlexander Motin 			cache_drain_safe_cpu(zone);
877a2de44abSAlexander Motin 		else
878a2de44abSAlexander Motin 			zone_foreach(cache_drain_safe_cpu);
879a2de44abSAlexander Motin 	}
880a2de44abSAlexander Motin 	thread_lock(curthread);
881a2de44abSAlexander Motin 	sched_unbind(curthread);
882a2de44abSAlexander Motin 	thread_unlock(curthread);
883a2de44abSAlexander Motin }
884a2de44abSAlexander Motin 
885aaa8bb16SJeff Roberson /*
886aaa8bb16SJeff Roberson  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
887aaa8bb16SJeff Roberson  */
888aaa8bb16SJeff Roberson static void
889aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone)
890aaa8bb16SJeff Roberson {
891ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
892aaa8bb16SJeff Roberson 	uma_bucket_t bucket;
893ab3185d1SJeff Roberson 	int i;
8948355f576SJeff Roberson 
8958355f576SJeff Roberson 	/*
896ab3185d1SJeff Roberson 	 * Drain the bucket queues and free the buckets.
8978355f576SJeff Roberson 	 */
898ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
899ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[i];
9000f9b7bf3SMark Johnston 		while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) !=
9010f9b7bf3SMark Johnston 		    NULL) {
9028355f576SJeff Roberson 			ZONE_UNLOCK(zone);
9038355f576SJeff Roberson 			bucket_drain(zone, bucket);
9046fd34d6fSJeff Roberson 			bucket_free(zone, bucket, NULL);
9058355f576SJeff Roberson 			ZONE_LOCK(zone);
9068355f576SJeff Roberson 		}
907ab3185d1SJeff Roberson 	}
908ace66b56SAlexander Motin 
909ace66b56SAlexander Motin 	/*
910ace66b56SAlexander Motin 	 * Shrink further bucket sizes.  Price of single zone lock collision
911ace66b56SAlexander Motin 	 * is probably lower then price of global cache drain.
912ace66b56SAlexander Motin 	 */
913ace66b56SAlexander Motin 	if (zone->uz_count > zone->uz_count_min)
914ace66b56SAlexander Motin 		zone->uz_count--;
9158355f576SJeff Roberson }
916fc03d22bSJeff Roberson 
917fc03d22bSJeff Roberson static void
918fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
919fc03d22bSJeff Roberson {
920fc03d22bSJeff Roberson 	uint8_t *mem;
921fc03d22bSJeff Roberson 	int i;
922fc03d22bSJeff Roberson 	uint8_t flags;
923fc03d22bSJeff Roberson 
9241431a748SGleb Smirnoff 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
9251431a748SGleb Smirnoff 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
9261431a748SGleb Smirnoff 
927fc03d22bSJeff Roberson 	mem = slab->us_data;
928fc03d22bSJeff Roberson 	flags = slab->us_flags;
929fc03d22bSJeff Roberson 	i = start;
930fc03d22bSJeff Roberson 	if (keg->uk_fini != NULL) {
931fc03d22bSJeff Roberson 		for (i--; i > -1; i--)
932c5deaf04SGleb Smirnoff #ifdef INVARIANTS
933c5deaf04SGleb Smirnoff 		/*
934c5deaf04SGleb Smirnoff 		 * trash_fini implies that dtor was trash_dtor. trash_fini
935c5deaf04SGleb Smirnoff 		 * would check that memory hasn't been modified since free,
936c5deaf04SGleb Smirnoff 		 * which executed trash_dtor.
937c5deaf04SGleb Smirnoff 		 * That's why we need to run uma_dbg_kskip() check here,
938c5deaf04SGleb Smirnoff 		 * albeit we don't make skip check for other init/fini
939c5deaf04SGleb Smirnoff 		 * invocations.
940c5deaf04SGleb Smirnoff 		 */
941c5deaf04SGleb Smirnoff 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
942c5deaf04SGleb Smirnoff 		    keg->uk_fini != trash_fini)
943c5deaf04SGleb Smirnoff #endif
944fc03d22bSJeff Roberson 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
945fc03d22bSJeff Roberson 			    keg->uk_size);
946fc03d22bSJeff Roberson 	}
947fc03d22bSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
948fc03d22bSJeff Roberson 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
949fc03d22bSJeff Roberson 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
9502e47807cSJeff Roberson 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
9518355f576SJeff Roberson }
9528355f576SJeff Roberson 
9538355f576SJeff Roberson /*
954e20a199fSJeff Roberson  * Frees pages from a keg back to the system.  This is done on demand from
9558355f576SJeff Roberson  * the pageout daemon.
9568355f576SJeff Roberson  *
957e20a199fSJeff Roberson  * Returns nothing.
9588355f576SJeff Roberson  */
959e20a199fSJeff Roberson static void
960e20a199fSJeff Roberson keg_drain(uma_keg_t keg)
9618355f576SJeff Roberson {
9621e183df2SStefan Farfeleder 	struct slabhead freeslabs = { 0 };
963ab3185d1SJeff Roberson 	uma_domain_t dom;
964829be516SMark Johnston 	uma_slab_t slab, tmp;
965ab3185d1SJeff Roberson 	int i;
9668355f576SJeff Roberson 
9678355f576SJeff Roberson 	/*
968e20a199fSJeff Roberson 	 * We don't want to take pages from statically allocated kegs at this
9698355f576SJeff Roberson 	 * time
9708355f576SJeff Roberson 	 */
971099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
9728355f576SJeff Roberson 		return;
9738355f576SJeff Roberson 
9741431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
9751431a748SGleb Smirnoff 	    keg->uk_name, keg, keg->uk_free);
976e20a199fSJeff Roberson 	KEG_LOCK(keg);
977099a0e58SBosko Milekic 	if (keg->uk_free == 0)
9788355f576SJeff Roberson 		goto finished;
9798355f576SJeff Roberson 
980ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
981ab3185d1SJeff Roberson 		dom = &keg->uk_domain[i];
982ab3185d1SJeff Roberson 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
983829be516SMark Johnston 			/* We have nowhere to free these to. */
984829be516SMark Johnston 			if (slab->us_flags & UMA_SLAB_BOOT)
9858355f576SJeff Roberson 				continue;
9868355f576SJeff Roberson 
9878355f576SJeff Roberson 			LIST_REMOVE(slab, us_link);
988099a0e58SBosko Milekic 			keg->uk_pages -= keg->uk_ppera;
989099a0e58SBosko Milekic 			keg->uk_free -= keg->uk_ipers;
990713deb36SJeff Roberson 
991099a0e58SBosko Milekic 			if (keg->uk_flags & UMA_ZONE_HASH)
992ab3185d1SJeff Roberson 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
993ab3185d1SJeff Roberson 				    slab->us_data);
994713deb36SJeff Roberson 
995713deb36SJeff Roberson 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
996713deb36SJeff Roberson 		}
997ab3185d1SJeff Roberson 	}
998ab3185d1SJeff Roberson 
999713deb36SJeff Roberson finished:
1000e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1001713deb36SJeff Roberson 
1002713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1003713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
10041645995bSKirk McKusick 		keg_free_slab(keg, slab, keg->uk_ipers);
10058355f576SJeff Roberson 	}
10068355f576SJeff Roberson }
10078355f576SJeff Roberson 
1008e20a199fSJeff Roberson static void
1009e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok)
1010e20a199fSJeff Roberson {
1011e20a199fSJeff Roberson 
10128355f576SJeff Roberson 	/*
1013e20a199fSJeff Roberson 	 * Set draining to interlock with zone_dtor() so we can release our
1014e20a199fSJeff Roberson 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1015e20a199fSJeff Roberson 	 * is the only call that knows the structure will still be available
1016e20a199fSJeff Roberson 	 * when it wakes up.
1017e20a199fSJeff Roberson 	 */
1018e20a199fSJeff Roberson 	ZONE_LOCK(zone);
1019e20a199fSJeff Roberson 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
1020e20a199fSJeff Roberson 		if (waitok == M_NOWAIT)
1021e20a199fSJeff Roberson 			goto out;
1022af526374SJeff Roberson 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1023e20a199fSJeff Roberson 	}
1024e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
1025e20a199fSJeff Roberson 	bucket_cache_drain(zone);
1026e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
1027e20a199fSJeff Roberson 	/*
1028e20a199fSJeff Roberson 	 * The DRAINING flag protects us from being freed while
1029111fbcd5SBryan Venteicher 	 * we're running.  Normally the uma_rwlock would protect us but we
1030e20a199fSJeff Roberson 	 * must be able to release and acquire the right lock for each keg.
1031e20a199fSJeff Roberson 	 */
1032e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_drain);
1033e20a199fSJeff Roberson 	ZONE_LOCK(zone);
1034e20a199fSJeff Roberson 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
1035e20a199fSJeff Roberson 	wakeup(zone);
1036e20a199fSJeff Roberson out:
1037e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
1038e20a199fSJeff Roberson }
1039e20a199fSJeff Roberson 
1040e20a199fSJeff Roberson void
1041e20a199fSJeff Roberson zone_drain(uma_zone_t zone)
1042e20a199fSJeff Roberson {
1043e20a199fSJeff Roberson 
1044e20a199fSJeff Roberson 	zone_drain_wait(zone, M_NOWAIT);
1045e20a199fSJeff Roberson }
1046e20a199fSJeff Roberson 
1047e20a199fSJeff Roberson /*
1048e20a199fSJeff Roberson  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1049194a979eSMark Johnston  * If the allocation was successful, the keg lock will be held upon return,
1050194a979eSMark Johnston  * otherwise the keg will be left unlocked.
10518355f576SJeff Roberson  *
10528355f576SJeff Roberson  * Arguments:
10538355f576SJeff Roberson  *	wait  Shall we wait?
10548355f576SJeff Roberson  *
10558355f576SJeff Roberson  * Returns:
10568355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
10578355f576SJeff Roberson  *	caller specified M_NOWAIT.
10588355f576SJeff Roberson  */
10598355f576SJeff Roberson static uma_slab_t
1060ab3185d1SJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
10618355f576SJeff Roberson {
1062e20a199fSJeff Roberson 	uma_alloc allocf;
1063099a0e58SBosko Milekic 	uma_slab_t slab;
10642e47807cSJeff Roberson 	unsigned long size;
106585dcf349SGleb Smirnoff 	uint8_t *mem;
106685dcf349SGleb Smirnoff 	uint8_t flags;
10678355f576SJeff Roberson 	int i;
10688355f576SJeff Roberson 
1069ab3185d1SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
1070ab3185d1SJeff Roberson 	    ("keg_alloc_slab: domain %d out of range", domain));
1071e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
1072a553d4b8SJeff Roberson 
1073e20a199fSJeff Roberson 	allocf = keg->uk_allocf;
1074e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1075a553d4b8SJeff Roberson 
1076194a979eSMark Johnston 	slab = NULL;
1077194a979eSMark Johnston 	mem = NULL;
1078099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1079ab3185d1SJeff Roberson 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
1080fc03d22bSJeff Roberson 		if (slab == NULL)
1081fc03d22bSJeff Roberson 			goto out;
1082a553d4b8SJeff Roberson 	}
1083a553d4b8SJeff Roberson 
10843370c5bfSJeff Roberson 	/*
10853370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
10863370c5bfSJeff Roberson 	 * first time they are added to a zone.
10873370c5bfSJeff Roberson 	 *
10883370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
10893370c5bfSJeff Roberson 	 */
10903370c5bfSJeff Roberson 
1091099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
10923370c5bfSJeff Roberson 		wait |= M_ZERO;
10933370c5bfSJeff Roberson 	else
10943370c5bfSJeff Roberson 		wait &= ~M_ZERO;
10953370c5bfSJeff Roberson 
1096263811f7SKip Macy 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1097263811f7SKip Macy 		wait |= M_NODUMP;
1098263811f7SKip Macy 
1099e20a199fSJeff Roberson 	/* zone is passed for legacy reasons. */
1100194a979eSMark Johnston 	size = keg->uk_ppera * PAGE_SIZE;
1101ab3185d1SJeff Roberson 	mem = allocf(zone, size, domain, &flags, wait);
1102a553d4b8SJeff Roberson 	if (mem == NULL) {
1103b23f72e9SBrian Feldman 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
11040095a784SJeff Roberson 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1105fc03d22bSJeff Roberson 		slab = NULL;
1106fc03d22bSJeff Roberson 		goto out;
1107a553d4b8SJeff Roberson 	}
11082e47807cSJeff Roberson 	uma_total_inc(size);
11098355f576SJeff Roberson 
11105c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
1111099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1112099a0e58SBosko Milekic 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
11135c0e403bSJeff Roberson 
1114e20a199fSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1115099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ppera; i++)
111699571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
11178355f576SJeff Roberson 
1118099a0e58SBosko Milekic 	slab->us_keg = keg;
11198355f576SJeff Roberson 	slab->us_data = mem;
1120099a0e58SBosko Milekic 	slab->us_freecount = keg->uk_ipers;
11218355f576SJeff Roberson 	slab->us_flags = flags;
1122ab3185d1SJeff Roberson 	slab->us_domain = domain;
1123ef72505eSJeff Roberson 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1124ef72505eSJeff Roberson #ifdef INVARIANTS
1125ef72505eSJeff Roberson 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1126ef72505eSJeff Roberson #endif
1127099a0e58SBosko Milekic 
1128b23f72e9SBrian Feldman 	if (keg->uk_init != NULL) {
1129099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ipers; i++)
1130b23f72e9SBrian Feldman 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1131b23f72e9SBrian Feldman 			    keg->uk_size, wait) != 0)
1132b23f72e9SBrian Feldman 				break;
1133b23f72e9SBrian Feldman 		if (i != keg->uk_ipers) {
1134fc03d22bSJeff Roberson 			keg_free_slab(keg, slab, i);
1135fc03d22bSJeff Roberson 			slab = NULL;
1136fc03d22bSJeff Roberson 			goto out;
1137b23f72e9SBrian Feldman 		}
1138b23f72e9SBrian Feldman 	}
1139e20a199fSJeff Roberson 	KEG_LOCK(keg);
11405c0e403bSJeff Roberson 
11411431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
11421431a748SGleb Smirnoff 	    slab, keg->uk_name, keg);
11431431a748SGleb Smirnoff 
1144099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH)
1145099a0e58SBosko Milekic 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
11468355f576SJeff Roberson 
1147099a0e58SBosko Milekic 	keg->uk_pages += keg->uk_ppera;
1148099a0e58SBosko Milekic 	keg->uk_free += keg->uk_ipers;
11498355f576SJeff Roberson 
1150194a979eSMark Johnston out:
11518355f576SJeff Roberson 	return (slab);
11528355f576SJeff Roberson }
11538355f576SJeff Roberson 
11548355f576SJeff Roberson /*
1155009b6fcbSJeff Roberson  * This function is intended to be used early on in place of page_alloc() so
1156009b6fcbSJeff Roberson  * that we may use the boot time page cache to satisfy allocations before
1157009b6fcbSJeff Roberson  * the VM is ready.
1158009b6fcbSJeff Roberson  */
1159009b6fcbSJeff Roberson static void *
1160ab3185d1SJeff Roberson startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1161ab3185d1SJeff Roberson     int wait)
1162009b6fcbSJeff Roberson {
1163099a0e58SBosko Milekic 	uma_keg_t keg;
1164ac0a6fd0SGleb Smirnoff 	void *mem;
1165ac0a6fd0SGleb Smirnoff 	int pages;
1166099a0e58SBosko Milekic 
1167e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1168099a0e58SBosko Milekic 
1169009b6fcbSJeff Roberson 	/*
1170f7d35785SGleb Smirnoff 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1171f7d35785SGleb Smirnoff 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1172009b6fcbSJeff Roberson 	 */
1173f7d35785SGleb Smirnoff 	switch (booted) {
1174f7d35785SGleb Smirnoff 		case BOOT_COLD:
1175f7d35785SGleb Smirnoff 		case BOOT_STRAPPED:
1176f7d35785SGleb Smirnoff 			break;
1177f7d35785SGleb Smirnoff 		case BOOT_PAGEALLOC:
1178f7d35785SGleb Smirnoff 			if (keg->uk_ppera > 1)
1179f7d35785SGleb Smirnoff 				break;
1180f7d35785SGleb Smirnoff 		case BOOT_BUCKETS:
1181f7d35785SGleb Smirnoff 		case BOOT_RUNNING:
1182009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
1183f7d35785SGleb Smirnoff 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1184f7d35785SGleb Smirnoff 			    page_alloc : uma_small_alloc;
1185009b6fcbSJeff Roberson #else
1186099a0e58SBosko Milekic 			keg->uk_allocf = page_alloc;
1187009b6fcbSJeff Roberson #endif
1188ab3185d1SJeff Roberson 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1189009b6fcbSJeff Roberson 	}
1190009b6fcbSJeff Roberson 
1191009b6fcbSJeff Roberson 	/*
1192f7d35785SGleb Smirnoff 	 * Check our small startup cache to see if it has pages remaining.
1193f7d35785SGleb Smirnoff 	 */
1194f7d35785SGleb Smirnoff 	pages = howmany(bytes, PAGE_SIZE);
1195f7d35785SGleb Smirnoff 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1196f7d35785SGleb Smirnoff 	if (pages > boot_pages)
1197f7d35785SGleb Smirnoff 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1198f7d35785SGleb Smirnoff #ifdef DIAGNOSTIC
1199f7d35785SGleb Smirnoff 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1200f7d35785SGleb Smirnoff 	    boot_pages);
1201f7d35785SGleb Smirnoff #endif
1202f7d35785SGleb Smirnoff 	mem = bootmem;
1203f7d35785SGleb Smirnoff 	boot_pages -= pages;
1204f7d35785SGleb Smirnoff 	bootmem += pages * PAGE_SIZE;
1205f7d35785SGleb Smirnoff 	*pflag = UMA_SLAB_BOOT;
1206f7d35785SGleb Smirnoff 
1207f7d35785SGleb Smirnoff 	return (mem);
1208f7d35785SGleb Smirnoff }
1209f7d35785SGleb Smirnoff 
1210f7d35785SGleb Smirnoff /*
12118355f576SJeff Roberson  * Allocates a number of pages from the system
12128355f576SJeff Roberson  *
12138355f576SJeff Roberson  * Arguments:
12148355f576SJeff Roberson  *	bytes  The number of bytes requested
12158355f576SJeff Roberson  *	wait  Shall we wait?
12168355f576SJeff Roberson  *
12178355f576SJeff Roberson  * Returns:
12188355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
12198355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
12208355f576SJeff Roberson  */
12218355f576SJeff Roberson static void *
1222ab3185d1SJeff Roberson page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1223ab3185d1SJeff Roberson     int wait)
12248355f576SJeff Roberson {
12258355f576SJeff Roberson 	void *p;	/* Returned page */
12268355f576SJeff Roberson 
12272e47807cSJeff Roberson 	*pflag = UMA_SLAB_KERNEL;
12289978bd99SMark Johnston 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
12298355f576SJeff Roberson 
12308355f576SJeff Roberson 	return (p);
12318355f576SJeff Roberson }
12328355f576SJeff Roberson 
1233ab3059a8SMatt Macy static void *
1234ab3059a8SMatt Macy pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1235ab3059a8SMatt Macy     int wait)
1236ab3059a8SMatt Macy {
1237ab3059a8SMatt Macy 	struct pglist alloctail;
1238ab3059a8SMatt Macy 	vm_offset_t addr, zkva;
1239ab3059a8SMatt Macy 	int cpu, flags;
1240ab3059a8SMatt Macy 	vm_page_t p, p_next;
1241ab3059a8SMatt Macy #ifdef NUMA
1242ab3059a8SMatt Macy 	struct pcpu *pc;
1243ab3059a8SMatt Macy #endif
1244ab3059a8SMatt Macy 
1245ab3059a8SMatt Macy 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1246ab3059a8SMatt Macy 
1247013072f0SMark Johnston 	TAILQ_INIT(&alloctail);
1248ab3059a8SMatt Macy 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1249013072f0SMark Johnston 	    malloc2vm_flags(wait);
1250013072f0SMark Johnston 	*pflag = UMA_SLAB_KERNEL;
1251ab3059a8SMatt Macy 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1252ab3059a8SMatt Macy 		if (CPU_ABSENT(cpu)) {
1253ab3059a8SMatt Macy 			p = vm_page_alloc(NULL, 0, flags);
1254ab3059a8SMatt Macy 		} else {
1255ab3059a8SMatt Macy #ifndef NUMA
1256ab3059a8SMatt Macy 			p = vm_page_alloc(NULL, 0, flags);
1257ab3059a8SMatt Macy #else
1258ab3059a8SMatt Macy 			pc = pcpu_find(cpu);
1259ab3059a8SMatt Macy 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1260ab3059a8SMatt Macy 			if (__predict_false(p == NULL))
1261ab3059a8SMatt Macy 				p = vm_page_alloc(NULL, 0, flags);
1262ab3059a8SMatt Macy #endif
1263ab3059a8SMatt Macy 		}
1264ab3059a8SMatt Macy 		if (__predict_false(p == NULL))
1265ab3059a8SMatt Macy 			goto fail;
1266ab3059a8SMatt Macy 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1267ab3059a8SMatt Macy 	}
1268ab3059a8SMatt Macy 	if ((addr = kva_alloc(bytes)) == 0)
1269ab3059a8SMatt Macy 		goto fail;
1270ab3059a8SMatt Macy 	zkva = addr;
1271ab3059a8SMatt Macy 	TAILQ_FOREACH(p, &alloctail, listq) {
1272ab3059a8SMatt Macy 		pmap_qenter(zkva, &p, 1);
1273ab3059a8SMatt Macy 		zkva += PAGE_SIZE;
1274ab3059a8SMatt Macy 	}
1275ab3059a8SMatt Macy 	return ((void*)addr);
1276ab3059a8SMatt Macy  fail:
1277ab3059a8SMatt Macy 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1278ab3059a8SMatt Macy 		vm_page_unwire(p, PQ_NONE);
1279ab3059a8SMatt Macy 		vm_page_free(p);
1280ab3059a8SMatt Macy 	}
1281ab3059a8SMatt Macy 	return (NULL);
1282ab3059a8SMatt Macy }
1283ab3059a8SMatt Macy 
12848355f576SJeff Roberson /*
12858355f576SJeff Roberson  * Allocates a number of pages from within an object
12868355f576SJeff Roberson  *
12878355f576SJeff Roberson  * Arguments:
12888355f576SJeff Roberson  *	bytes  The number of bytes requested
12898355f576SJeff Roberson  *	wait   Shall we wait?
12908355f576SJeff Roberson  *
12918355f576SJeff Roberson  * Returns:
12928355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
12938355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
12948355f576SJeff Roberson  */
12958355f576SJeff Roberson static void *
1296ab3185d1SJeff Roberson noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1297ab3185d1SJeff Roberson     int wait)
12988355f576SJeff Roberson {
1299a4915c21SAttilio Rao 	TAILQ_HEAD(, vm_page) alloctail;
1300a4915c21SAttilio Rao 	u_long npages;
1301b245ac95SAlan Cox 	vm_offset_t retkva, zkva;
1302a4915c21SAttilio Rao 	vm_page_t p, p_next;
1303e20a199fSJeff Roberson 	uma_keg_t keg;
13048355f576SJeff Roberson 
1305a4915c21SAttilio Rao 	TAILQ_INIT(&alloctail);
1306e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1307a4915c21SAttilio Rao 
1308a4915c21SAttilio Rao 	npages = howmany(bytes, PAGE_SIZE);
1309a4915c21SAttilio Rao 	while (npages > 0) {
1310ab3185d1SJeff Roberson 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
13118d6fbbb8SJeff Roberson 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1312772c8b67SKonstantin Belousov 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1313772c8b67SKonstantin Belousov 		    VM_ALLOC_NOWAIT));
1314a4915c21SAttilio Rao 		if (p != NULL) {
1315a4915c21SAttilio Rao 			/*
1316a4915c21SAttilio Rao 			 * Since the page does not belong to an object, its
1317a4915c21SAttilio Rao 			 * listq is unused.
1318a4915c21SAttilio Rao 			 */
1319a4915c21SAttilio Rao 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1320a4915c21SAttilio Rao 			npages--;
1321a4915c21SAttilio Rao 			continue;
1322a4915c21SAttilio Rao 		}
13238355f576SJeff Roberson 		/*
1324a4915c21SAttilio Rao 		 * Page allocation failed, free intermediate pages and
1325a4915c21SAttilio Rao 		 * exit.
13268355f576SJeff Roberson 		 */
1327a4915c21SAttilio Rao 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1328087a6132SAlan Cox 			vm_page_unwire(p, PQ_NONE);
1329b245ac95SAlan Cox 			vm_page_free(p);
1330b245ac95SAlan Cox 		}
1331a4915c21SAttilio Rao 		return (NULL);
1332b245ac95SAlan Cox 	}
13338355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
1334a4915c21SAttilio Rao 	zkva = keg->uk_kva +
1335a4915c21SAttilio Rao 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1336a4915c21SAttilio Rao 	retkva = zkva;
1337a4915c21SAttilio Rao 	TAILQ_FOREACH(p, &alloctail, listq) {
1338a4915c21SAttilio Rao 		pmap_qenter(zkva, &p, 1);
1339a4915c21SAttilio Rao 		zkva += PAGE_SIZE;
1340a4915c21SAttilio Rao 	}
13418355f576SJeff Roberson 
13428355f576SJeff Roberson 	return ((void *)retkva);
13438355f576SJeff Roberson }
13448355f576SJeff Roberson 
13458355f576SJeff Roberson /*
13468355f576SJeff Roberson  * Frees a number of pages to the system
13478355f576SJeff Roberson  *
13488355f576SJeff Roberson  * Arguments:
13498355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
13508355f576SJeff Roberson  *	size  The size of the memory being freed
13518355f576SJeff Roberson  *	flags The original p->us_flags field
13528355f576SJeff Roberson  *
13538355f576SJeff Roberson  * Returns:
13548355f576SJeff Roberson  *	Nothing
13558355f576SJeff Roberson  */
13568355f576SJeff Roberson static void
1357f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags)
13588355f576SJeff Roberson {
13593370c5bfSJeff Roberson 
136049bfa624SAlan Cox 	if ((flags & UMA_SLAB_KERNEL) == 0)
1361b5345ef1SJustin Hibbits 		panic("UMA: page_free used with invalid flags %x", flags);
13628355f576SJeff Roberson 
136349bfa624SAlan Cox 	kmem_free((vm_offset_t)mem, size);
13648355f576SJeff Roberson }
13658355f576SJeff Roberson 
13668355f576SJeff Roberson /*
1367ab3059a8SMatt Macy  * Frees pcpu zone allocations
1368ab3059a8SMatt Macy  *
1369ab3059a8SMatt Macy  * Arguments:
1370ab3059a8SMatt Macy  *	mem   A pointer to the memory to be freed
1371ab3059a8SMatt Macy  *	size  The size of the memory being freed
1372ab3059a8SMatt Macy  *	flags The original p->us_flags field
1373ab3059a8SMatt Macy  *
1374ab3059a8SMatt Macy  * Returns:
1375ab3059a8SMatt Macy  *	Nothing
1376ab3059a8SMatt Macy  */
1377ab3059a8SMatt Macy static void
1378ab3059a8SMatt Macy pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1379ab3059a8SMatt Macy {
1380ab3059a8SMatt Macy 	vm_offset_t sva, curva;
1381ab3059a8SMatt Macy 	vm_paddr_t paddr;
1382ab3059a8SMatt Macy 	vm_page_t m;
1383ab3059a8SMatt Macy 
1384ab3059a8SMatt Macy 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1385ab3059a8SMatt Macy 	sva = (vm_offset_t)mem;
1386ab3059a8SMatt Macy 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1387ab3059a8SMatt Macy 		paddr = pmap_kextract(curva);
1388ab3059a8SMatt Macy 		m = PHYS_TO_VM_PAGE(paddr);
1389ab3059a8SMatt Macy 		vm_page_unwire(m, PQ_NONE);
1390ab3059a8SMatt Macy 		vm_page_free(m);
1391ab3059a8SMatt Macy 	}
1392ab3059a8SMatt Macy 	pmap_qremove(sva, size >> PAGE_SHIFT);
1393ab3059a8SMatt Macy 	kva_free(sva, size);
1394ab3059a8SMatt Macy }
1395ab3059a8SMatt Macy 
1396ab3059a8SMatt Macy 
1397ab3059a8SMatt Macy /*
13988355f576SJeff Roberson  * Zero fill initializer
13998355f576SJeff Roberson  *
14008355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
14018355f576SJeff Roberson  */
1402b23f72e9SBrian Feldman static int
1403b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags)
14048355f576SJeff Roberson {
14058355f576SJeff Roberson 	bzero(mem, size);
1406b23f72e9SBrian Feldman 	return (0);
14078355f576SJeff Roberson }
14088355f576SJeff Roberson 
14098355f576SJeff Roberson /*
1410e20a199fSJeff Roberson  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
14118355f576SJeff Roberson  *
14128355f576SJeff Roberson  * Arguments
1413e20a199fSJeff Roberson  *	keg  The zone we should initialize
14148355f576SJeff Roberson  *
14158355f576SJeff Roberson  * Returns
14168355f576SJeff Roberson  *	Nothing
14178355f576SJeff Roberson  */
14188355f576SJeff Roberson static void
1419e20a199fSJeff Roberson keg_small_init(uma_keg_t keg)
14208355f576SJeff Roberson {
1421244f4554SBosko Milekic 	u_int rsize;
1422244f4554SBosko Milekic 	u_int memused;
1423244f4554SBosko Milekic 	u_int wastedspace;
1424244f4554SBosko Milekic 	u_int shsize;
1425a55ebb7cSAndriy Gapon 	u_int slabsize;
14268355f576SJeff Roberson 
1427ad97af7eSGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_PCPU) {
142896c85efbSNathan Whitehorn 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1429e28a647dSGleb Smirnoff 
1430ab3059a8SMatt Macy 		slabsize = UMA_PCPU_ALLOC_SIZE;
1431ab3059a8SMatt Macy 		keg->uk_ppera = ncpus;
1432ad97af7eSGleb Smirnoff 	} else {
1433a55ebb7cSAndriy Gapon 		slabsize = UMA_SLAB_SIZE;
1434ad97af7eSGleb Smirnoff 		keg->uk_ppera = 1;
1435ad97af7eSGleb Smirnoff 	}
1436ad97af7eSGleb Smirnoff 
1437ef72505eSJeff Roberson 	/*
1438ef72505eSJeff Roberson 	 * Calculate the size of each allocation (rsize) according to
1439ef72505eSJeff Roberson 	 * alignment.  If the requested size is smaller than we have
1440ef72505eSJeff Roberson 	 * allocation bits for we round it up.
1441ef72505eSJeff Roberson 	 */
1442099a0e58SBosko Milekic 	rsize = keg->uk_size;
1443a55ebb7cSAndriy Gapon 	if (rsize < slabsize / SLAB_SETSIZE)
1444a55ebb7cSAndriy Gapon 		rsize = slabsize / SLAB_SETSIZE;
1445099a0e58SBosko Milekic 	if (rsize & keg->uk_align)
1446099a0e58SBosko Milekic 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1447099a0e58SBosko Milekic 	keg->uk_rsize = rsize;
1448ad97af7eSGleb Smirnoff 
1449ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1450ab3059a8SMatt Macy 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1451ad97af7eSGleb Smirnoff 	    ("%s: size %u too large", __func__, keg->uk_rsize));
14528355f576SJeff Roberson 
1453ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
14542864dbbfSGleb Smirnoff 		shsize = 0;
1455ef72505eSJeff Roberson 	else
1456*3d5e3df7SGleb Smirnoff 		shsize = SIZEOF_UMA_SLAB;
14578355f576SJeff Roberson 
14581ca6ed45SGleb Smirnoff 	if (rsize <= slabsize - shsize)
1459a55ebb7cSAndriy Gapon 		keg->uk_ipers = (slabsize - shsize) / rsize;
14601ca6ed45SGleb Smirnoff 	else {
14611ca6ed45SGleb Smirnoff 		/* Handle special case when we have 1 item per slab, so
14621ca6ed45SGleb Smirnoff 		 * alignment requirement can be relaxed. */
14631ca6ed45SGleb Smirnoff 		KASSERT(keg->uk_size <= slabsize - shsize,
14641ca6ed45SGleb Smirnoff 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
14651ca6ed45SGleb Smirnoff 		keg->uk_ipers = 1;
14661ca6ed45SGleb Smirnoff 	}
1467ef72505eSJeff Roberson 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1468ad97af7eSGleb Smirnoff 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1469ad97af7eSGleb Smirnoff 
1470244f4554SBosko Milekic 	memused = keg->uk_ipers * rsize + shsize;
1471a55ebb7cSAndriy Gapon 	wastedspace = slabsize - memused;
1472244f4554SBosko Milekic 
147320e8e865SBosko Milekic 	/*
1474244f4554SBosko Milekic 	 * We can't do OFFPAGE if we're internal or if we've been
147520e8e865SBosko Milekic 	 * asked to not go to the VM for buckets.  If we do this we
14766fd34d6fSJeff Roberson 	 * may end up going to the VM  for slabs which we do not
14776fd34d6fSJeff Roberson 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
14786fd34d6fSJeff Roberson 	 * of UMA_ZONE_VM, which clearly forbids it.
147920e8e865SBosko Milekic 	 */
1480099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1481099a0e58SBosko Milekic 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
14828355f576SJeff Roberson 		return;
1483244f4554SBosko Milekic 
1484ef72505eSJeff Roberson 	/*
1485ef72505eSJeff Roberson 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1486ef72505eSJeff Roberson 	 * this if it permits more items per-slab.
1487ef72505eSJeff Roberson 	 *
1488ef72505eSJeff Roberson 	 * XXX We could try growing slabsize to limit max waste as well.
1489ef72505eSJeff Roberson 	 * Historically this was not done because the VM could not
1490ef72505eSJeff Roberson 	 * efficiently handle contiguous allocations.
1491ef72505eSJeff Roberson 	 */
1492a55ebb7cSAndriy Gapon 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1493a55ebb7cSAndriy Gapon 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1494a55ebb7cSAndriy Gapon 		keg->uk_ipers = slabsize / keg->uk_rsize;
1495ef72505eSJeff Roberson 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1496ad97af7eSGleb Smirnoff 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
14971431a748SGleb Smirnoff 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
14981431a748SGleb Smirnoff 		    "keg: %s(%p), calculated wastedspace = %d, "
1499244f4554SBosko Milekic 		    "maximum wasted space allowed = %d, "
1500244f4554SBosko Milekic 		    "calculated ipers = %d, "
15011431a748SGleb Smirnoff 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1502a55ebb7cSAndriy Gapon 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1503a55ebb7cSAndriy Gapon 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1504099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
15058355f576SJeff Roberson 	}
1506ad97af7eSGleb Smirnoff 
1507ad97af7eSGleb Smirnoff 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1508ad97af7eSGleb Smirnoff 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1509ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_HASH;
15108355f576SJeff Roberson }
15118355f576SJeff Roberson 
15128355f576SJeff Roberson /*
1513e20a199fSJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
15148355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
15158355f576SJeff Roberson  * more complicated.
15168355f576SJeff Roberson  *
15178355f576SJeff Roberson  * Arguments
1518e20a199fSJeff Roberson  *	keg  The keg we should initialize
15198355f576SJeff Roberson  *
15208355f576SJeff Roberson  * Returns
15218355f576SJeff Roberson  *	Nothing
15228355f576SJeff Roberson  */
15238355f576SJeff Roberson static void
1524e20a199fSJeff Roberson keg_large_init(uma_keg_t keg)
15258355f576SJeff Roberson {
15268355f576SJeff Roberson 
1527e20a199fSJeff Roberson 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1528099a0e58SBosko Milekic 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1529e20a199fSJeff Roberson 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1530ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1531ad97af7eSGleb Smirnoff 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
153220e8e865SBosko Milekic 
1533ad97af7eSGleb Smirnoff 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1534099a0e58SBosko Milekic 	keg->uk_ipers = 1;
1535e9a069d8SJohn Baldwin 	keg->uk_rsize = keg->uk_size;
1536e9a069d8SJohn Baldwin 
1537cec48e00SAlexander Motin 	/* Check whether we have enough space to not do OFFPAGE. */
1538*3d5e3df7SGleb Smirnoff 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1539*3d5e3df7SGleb Smirnoff 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
15402934eb8aSMark Johnston 		/*
15412934eb8aSMark Johnston 		 * We can't do OFFPAGE if we're internal, in which case
15422934eb8aSMark Johnston 		 * we need an extra page per allocation to contain the
15432934eb8aSMark Johnston 		 * slab header.
15442934eb8aSMark Johnston 		 */
15452934eb8aSMark Johnston 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1546099a0e58SBosko Milekic 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
15472934eb8aSMark Johnston 		else
15482934eb8aSMark Johnston 			keg->uk_ppera++;
15492934eb8aSMark Johnston 	}
1550cec48e00SAlexander Motin 
1551cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1552cec48e00SAlexander Motin 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1553099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_HASH;
15548355f576SJeff Roberson }
15558355f576SJeff Roberson 
1556e20a199fSJeff Roberson static void
1557e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg)
1558e20a199fSJeff Roberson {
1559e20a199fSJeff Roberson 	int alignsize;
1560e20a199fSJeff Roberson 	int trailer;
1561e20a199fSJeff Roberson 	int pages;
1562e20a199fSJeff Roberson 	int rsize;
1563e20a199fSJeff Roberson 
1564ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1565ad97af7eSGleb Smirnoff 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1566ad97af7eSGleb Smirnoff 
1567e20a199fSJeff Roberson 	alignsize = keg->uk_align + 1;
1568e20a199fSJeff Roberson 	rsize = keg->uk_size;
1569e20a199fSJeff Roberson 	/*
1570e20a199fSJeff Roberson 	 * We want one item to start on every align boundary in a page.  To
1571e20a199fSJeff Roberson 	 * do this we will span pages.  We will also extend the item by the
1572e20a199fSJeff Roberson 	 * size of align if it is an even multiple of align.  Otherwise, it
1573e20a199fSJeff Roberson 	 * would fall on the same boundary every time.
1574e20a199fSJeff Roberson 	 */
1575e20a199fSJeff Roberson 	if (rsize & keg->uk_align)
1576e20a199fSJeff Roberson 		rsize = (rsize & ~keg->uk_align) + alignsize;
1577e20a199fSJeff Roberson 	if ((rsize & alignsize) == 0)
1578e20a199fSJeff Roberson 		rsize += alignsize;
1579e20a199fSJeff Roberson 	trailer = rsize - keg->uk_size;
1580e20a199fSJeff Roberson 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1581e20a199fSJeff Roberson 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1582e20a199fSJeff Roberson 	keg->uk_rsize = rsize;
1583e20a199fSJeff Roberson 	keg->uk_ppera = pages;
1584e20a199fSJeff Roberson 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1585e20a199fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
15862367b4ddSDimitry Andric 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
158742321809SGleb Smirnoff 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1588e20a199fSJeff Roberson 	    keg->uk_ipers));
1589e20a199fSJeff Roberson }
1590e20a199fSJeff Roberson 
15918355f576SJeff Roberson /*
1592099a0e58SBosko Milekic  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1593099a0e58SBosko Milekic  * the keg onto the global keg list.
15948355f576SJeff Roberson  *
15958355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
1596099a0e58SBosko Milekic  *	udata  Actually uma_kctor_args
1597099a0e58SBosko Milekic  */
1598b23f72e9SBrian Feldman static int
1599b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags)
1600099a0e58SBosko Milekic {
1601099a0e58SBosko Milekic 	struct uma_kctor_args *arg = udata;
1602099a0e58SBosko Milekic 	uma_keg_t keg = mem;
1603099a0e58SBosko Milekic 	uma_zone_t zone;
1604099a0e58SBosko Milekic 
1605099a0e58SBosko Milekic 	bzero(keg, size);
1606099a0e58SBosko Milekic 	keg->uk_size = arg->size;
1607099a0e58SBosko Milekic 	keg->uk_init = arg->uminit;
1608099a0e58SBosko Milekic 	keg->uk_fini = arg->fini;
1609099a0e58SBosko Milekic 	keg->uk_align = arg->align;
1610099a0e58SBosko Milekic 	keg->uk_free = 0;
16116fd34d6fSJeff Roberson 	keg->uk_reserve = 0;
1612099a0e58SBosko Milekic 	keg->uk_pages = 0;
1613099a0e58SBosko Milekic 	keg->uk_flags = arg->flags;
1614099a0e58SBosko Milekic 	keg->uk_slabzone = NULL;
1615099a0e58SBosko Milekic 
1616099a0e58SBosko Milekic 	/*
1617194a979eSMark Johnston 	 * We use a global round-robin policy by default.  Zones with
1618194a979eSMark Johnston 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1619194a979eSMark Johnston 	 * iterator is never run.
1620194a979eSMark Johnston 	 */
1621194a979eSMark Johnston 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1622194a979eSMark Johnston 	keg->uk_dr.dr_iter = 0;
1623194a979eSMark Johnston 
1624194a979eSMark Johnston 	/*
1625099a0e58SBosko Milekic 	 * The master zone is passed to us at keg-creation time.
1626099a0e58SBosko Milekic 	 */
1627099a0e58SBosko Milekic 	zone = arg->zone;
1628e20a199fSJeff Roberson 	keg->uk_name = zone->uz_name;
1629099a0e58SBosko Milekic 
1630099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_VM)
1631099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1632099a0e58SBosko Milekic 
1633099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_ZINIT)
1634099a0e58SBosko Milekic 		keg->uk_init = zero_init;
1635099a0e58SBosko Milekic 
1636cfcae3f8SGleb Smirnoff 	if (arg->flags & UMA_ZONE_MALLOC)
1637e20a199fSJeff Roberson 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1638e20a199fSJeff Roberson 
1639ad97af7eSGleb Smirnoff 	if (arg->flags & UMA_ZONE_PCPU)
1640ad97af7eSGleb Smirnoff #ifdef SMP
1641ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1642ad97af7eSGleb Smirnoff #else
1643ad97af7eSGleb Smirnoff 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1644ad97af7eSGleb Smirnoff #endif
1645ad97af7eSGleb Smirnoff 
1646ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1647e20a199fSJeff Roberson 		keg_cachespread_init(keg);
1648244f4554SBosko Milekic 	} else {
1649b92b26adSGleb Smirnoff 		if (keg->uk_size > UMA_SLAB_SPACE)
1650e20a199fSJeff Roberson 			keg_large_init(keg);
1651244f4554SBosko Milekic 		else
1652e20a199fSJeff Roberson 			keg_small_init(keg);
1653244f4554SBosko Milekic 	}
1654099a0e58SBosko Milekic 
1655cfcae3f8SGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1656099a0e58SBosko Milekic 		keg->uk_slabzone = slabzone;
1657099a0e58SBosko Milekic 
1658099a0e58SBosko Milekic 	/*
1659099a0e58SBosko Milekic 	 * If we haven't booted yet we need allocations to go through the
1660099a0e58SBosko Milekic 	 * startup cache until the vm is ready.
1661099a0e58SBosko Milekic 	 */
1662f4bef67cSGleb Smirnoff 	if (booted < BOOT_PAGEALLOC)
16638cd02d00SAlan Cox 		keg->uk_allocf = startup_alloc;
166477e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
166577e19437SGleb Smirnoff 	else if (keg->uk_ppera == 1)
166677e19437SGleb Smirnoff 		keg->uk_allocf = uma_small_alloc;
16678cd02d00SAlan Cox #endif
1668ab3059a8SMatt Macy 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1669ab3059a8SMatt Macy 		keg->uk_allocf = pcpu_page_alloc;
167077e19437SGleb Smirnoff 	else
167177e19437SGleb Smirnoff 		keg->uk_allocf = page_alloc;
167277e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
167377e19437SGleb Smirnoff 	if (keg->uk_ppera == 1)
167477e19437SGleb Smirnoff 		keg->uk_freef = uma_small_free;
167577e19437SGleb Smirnoff 	else
167677e19437SGleb Smirnoff #endif
1677ab3059a8SMatt Macy 	if (keg->uk_flags & UMA_ZONE_PCPU)
1678ab3059a8SMatt Macy 		keg->uk_freef = pcpu_page_free;
1679ab3059a8SMatt Macy 	else
168077e19437SGleb Smirnoff 		keg->uk_freef = page_free;
1681099a0e58SBosko Milekic 
1682099a0e58SBosko Milekic 	/*
1683af526374SJeff Roberson 	 * Initialize keg's lock
1684099a0e58SBosko Milekic 	 */
1685af526374SJeff Roberson 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1686099a0e58SBosko Milekic 
1687099a0e58SBosko Milekic 	/*
1688099a0e58SBosko Milekic 	 * If we're putting the slab header in the actual page we need to
1689*3d5e3df7SGleb Smirnoff 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1690*3d5e3df7SGleb Smirnoff 	 * macro definition.
1691099a0e58SBosko Milekic 	 */
1692099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1693*3d5e3df7SGleb Smirnoff 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1694244f4554SBosko Milekic 		/*
1695244f4554SBosko Milekic 		 * The only way the following is possible is if with our
1696244f4554SBosko Milekic 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1697244f4554SBosko Milekic 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1698244f4554SBosko Milekic 		 * mathematically possible for all cases, so we make
1699244f4554SBosko Milekic 		 * sure here anyway.
1700244f4554SBosko Milekic 		 */
1701*3d5e3df7SGleb Smirnoff 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1702*3d5e3df7SGleb Smirnoff 		    PAGE_SIZE * keg->uk_ppera,
1703*3d5e3df7SGleb Smirnoff 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1704*3d5e3df7SGleb Smirnoff 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1705099a0e58SBosko Milekic 	}
1706099a0e58SBosko Milekic 
1707099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH)
1708099a0e58SBosko Milekic 		hash_alloc(&keg->uk_hash);
1709099a0e58SBosko Milekic 
17101431a748SGleb Smirnoff 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
17111431a748SGleb Smirnoff 	    keg, zone->uz_name, zone,
171257223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
171357223e99SAndriy Gapon 	    keg->uk_free);
1714099a0e58SBosko Milekic 
1715099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1716099a0e58SBosko Milekic 
1717111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1718099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1719111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1720b23f72e9SBrian Feldman 	return (0);
1721099a0e58SBosko Milekic }
1722099a0e58SBosko Milekic 
1723099a0e58SBosko Milekic /*
1724099a0e58SBosko Milekic  * Zone header ctor.  This initializes all fields, locks, etc.
1725099a0e58SBosko Milekic  *
1726099a0e58SBosko Milekic  * Arguments/Returns follow uma_ctor specifications
1727099a0e58SBosko Milekic  *	udata  Actually uma_zctor_args
17288355f576SJeff Roberson  */
1729b23f72e9SBrian Feldman static int
1730b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags)
17318355f576SJeff Roberson {
17328355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
17338355f576SJeff Roberson 	uma_zone_t zone = mem;
1734099a0e58SBosko Milekic 	uma_zone_t z;
1735099a0e58SBosko Milekic 	uma_keg_t keg;
17368355f576SJeff Roberson 
17378355f576SJeff Roberson 	bzero(zone, size);
17388355f576SJeff Roberson 	zone->uz_name = arg->name;
17398355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
17408355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
1741e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab;
1742099a0e58SBosko Milekic 	zone->uz_init = NULL;
1743099a0e58SBosko Milekic 	zone->uz_fini = NULL;
1744099a0e58SBosko Milekic 	zone->uz_allocs = 0;
1745773df9abSRobert Watson 	zone->uz_frees = 0;
17462019094aSRobert Watson 	zone->uz_fails = 0;
1747bf965959SSean Bruno 	zone->uz_sleeps = 0;
1748fc03d22bSJeff Roberson 	zone->uz_count = 0;
1749ace66b56SAlexander Motin 	zone->uz_count_min = 0;
1750e20a199fSJeff Roberson 	zone->uz_flags = 0;
17512f891cd5SPawel Jakub Dawidek 	zone->uz_warning = NULL;
1752ab3185d1SJeff Roberson 	/* The domain structures follow the cpu structures. */
1753ab3185d1SJeff Roberson 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
17542f891cd5SPawel Jakub Dawidek 	timevalclear(&zone->uz_ratecheck);
1755e20a199fSJeff Roberson 	keg = arg->keg;
1756099a0e58SBosko Milekic 
1757af526374SJeff Roberson 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1758af526374SJeff Roberson 
17590095a784SJeff Roberson 	/*
17600095a784SJeff Roberson 	 * This is a pure cache zone, no kegs.
17610095a784SJeff Roberson 	 */
17620095a784SJeff Roberson 	if (arg->import) {
17636fd34d6fSJeff Roberson 		if (arg->flags & UMA_ZONE_VM)
17646fd34d6fSJeff Roberson 			arg->flags |= UMA_ZFLAG_CACHEONLY;
17656fd34d6fSJeff Roberson 		zone->uz_flags = arg->flags;
1766af526374SJeff Roberson 		zone->uz_size = arg->size;
17670095a784SJeff Roberson 		zone->uz_import = arg->import;
17680095a784SJeff Roberson 		zone->uz_release = arg->release;
17690095a784SJeff Roberson 		zone->uz_arg = arg->arg;
1770af526374SJeff Roberson 		zone->uz_lockptr = &zone->uz_lock;
1771111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
177203175483SAlexander Motin 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1773111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1774af526374SJeff Roberson 		goto out;
17750095a784SJeff Roberson 	}
17760095a784SJeff Roberson 
17770095a784SJeff Roberson 	/*
17780095a784SJeff Roberson 	 * Use the regular zone/keg/slab allocator.
17790095a784SJeff Roberson 	 */
17800095a784SJeff Roberson 	zone->uz_import = (uma_import)zone_import;
17810095a784SJeff Roberson 	zone->uz_release = (uma_release)zone_release;
17820095a784SJeff Roberson 	zone->uz_arg = zone;
17830095a784SJeff Roberson 
1784099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_SECONDARY) {
1785099a0e58SBosko Milekic 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
17868355f576SJeff Roberson 		zone->uz_init = arg->uminit;
1787e221e841SJeff Roberson 		zone->uz_fini = arg->fini;
1788af526374SJeff Roberson 		zone->uz_lockptr = &keg->uk_lock;
1789e20a199fSJeff Roberson 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1790111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1791099a0e58SBosko Milekic 		ZONE_LOCK(zone);
1792099a0e58SBosko Milekic 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1793099a0e58SBosko Milekic 			if (LIST_NEXT(z, uz_link) == NULL) {
1794099a0e58SBosko Milekic 				LIST_INSERT_AFTER(z, zone, uz_link);
1795099a0e58SBosko Milekic 				break;
1796099a0e58SBosko Milekic 			}
1797099a0e58SBosko Milekic 		}
1798099a0e58SBosko Milekic 		ZONE_UNLOCK(zone);
1799111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1800e20a199fSJeff Roberson 	} else if (keg == NULL) {
1801e20a199fSJeff Roberson 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1802e20a199fSJeff Roberson 		    arg->align, arg->flags)) == NULL)
1803b23f72e9SBrian Feldman 			return (ENOMEM);
1804099a0e58SBosko Milekic 	} else {
1805099a0e58SBosko Milekic 		struct uma_kctor_args karg;
1806b23f72e9SBrian Feldman 		int error;
1807099a0e58SBosko Milekic 
1808099a0e58SBosko Milekic 		/* We should only be here from uma_startup() */
1809099a0e58SBosko Milekic 		karg.size = arg->size;
1810099a0e58SBosko Milekic 		karg.uminit = arg->uminit;
1811099a0e58SBosko Milekic 		karg.fini = arg->fini;
1812099a0e58SBosko Milekic 		karg.align = arg->align;
1813099a0e58SBosko Milekic 		karg.flags = arg->flags;
1814099a0e58SBosko Milekic 		karg.zone = zone;
1815b23f72e9SBrian Feldman 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1816b23f72e9SBrian Feldman 		    flags);
1817b23f72e9SBrian Feldman 		if (error)
1818b23f72e9SBrian Feldman 			return (error);
1819099a0e58SBosko Milekic 	}
18200095a784SJeff Roberson 
1821e20a199fSJeff Roberson 	/*
1822e20a199fSJeff Roberson 	 * Link in the first keg.
1823e20a199fSJeff Roberson 	 */
1824e20a199fSJeff Roberson 	zone->uz_klink.kl_keg = keg;
1825e20a199fSJeff Roberson 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1826af526374SJeff Roberson 	zone->uz_lockptr = &keg->uk_lock;
1827e20a199fSJeff Roberson 	zone->uz_size = keg->uk_size;
1828e20a199fSJeff Roberson 	zone->uz_flags |= (keg->uk_flags &
1829e20a199fSJeff Roberson 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
18308355f576SJeff Roberson 
18318355f576SJeff Roberson 	/*
18328355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
18338355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
18348355f576SJeff Roberson 	 */
1835099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1836e20a199fSJeff Roberson 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1837099a0e58SBosko Milekic 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1838b23f72e9SBrian Feldman 		return (0);
1839099a0e58SBosko Milekic 	}
18408355f576SJeff Roberson 
1841af526374SJeff Roberson out:
18427e28037aSMark Johnston 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
18437e28037aSMark Johnston 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
18447e28037aSMark Johnston 	    ("Invalid zone flag combination"));
18457e28037aSMark Johnston 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
1846cae33c14SJeff Roberson 		zone->uz_count = BUCKET_MAX;
18477e28037aSMark Johnston 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
18487e28037aSMark Johnston 		zone->uz_count = 0;
18497e28037aSMark Johnston 	else
18507e28037aSMark Johnston 		zone->uz_count = bucket_select(zone->uz_size);
1851ace66b56SAlexander Motin 	zone->uz_count_min = zone->uz_count;
1852fc03d22bSJeff Roberson 
1853b23f72e9SBrian Feldman 	return (0);
18548355f576SJeff Roberson }
18558355f576SJeff Roberson 
18568355f576SJeff Roberson /*
1857099a0e58SBosko Milekic  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1858099a0e58SBosko Milekic  * table and removes the keg from the global list.
18599c2cd7e5SJeff Roberson  *
18609c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
18619c2cd7e5SJeff Roberson  *	udata  unused
18629c2cd7e5SJeff Roberson  */
1863099a0e58SBosko Milekic static void
1864099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata)
1865099a0e58SBosko Milekic {
1866099a0e58SBosko Milekic 	uma_keg_t keg;
18679c2cd7e5SJeff Roberson 
1868099a0e58SBosko Milekic 	keg = (uma_keg_t)arg;
1869e20a199fSJeff Roberson 	KEG_LOCK(keg);
1870099a0e58SBosko Milekic 	if (keg->uk_free != 0) {
1871a3845534SCraig Rodrigues 		printf("Freed UMA keg (%s) was not empty (%d items). "
1872099a0e58SBosko Milekic 		    " Lost %d pages of memory.\n",
1873a3845534SCraig Rodrigues 		    keg->uk_name ? keg->uk_name : "",
1874099a0e58SBosko Milekic 		    keg->uk_free, keg->uk_pages);
1875099a0e58SBosko Milekic 	}
1876e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1877099a0e58SBosko Milekic 
1878099a0e58SBosko Milekic 	hash_free(&keg->uk_hash);
1879099a0e58SBosko Milekic 
1880e20a199fSJeff Roberson 	KEG_LOCK_FINI(keg);
1881099a0e58SBosko Milekic }
1882099a0e58SBosko Milekic 
1883099a0e58SBosko Milekic /*
1884099a0e58SBosko Milekic  * Zone header dtor.
1885099a0e58SBosko Milekic  *
1886099a0e58SBosko Milekic  * Arguments/Returns follow uma_dtor specifications
1887099a0e58SBosko Milekic  *	udata  unused
1888099a0e58SBosko Milekic  */
18899c2cd7e5SJeff Roberson static void
18909c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
18919c2cd7e5SJeff Roberson {
1892e20a199fSJeff Roberson 	uma_klink_t klink;
18939c2cd7e5SJeff Roberson 	uma_zone_t zone;
1894099a0e58SBosko Milekic 	uma_keg_t keg;
18959c2cd7e5SJeff Roberson 
18969c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
1897e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
18989643769aSJeff Roberson 
1899e20a199fSJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
19009643769aSJeff Roberson 		cache_drain(zone);
1901099a0e58SBosko Milekic 
1902111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1903099a0e58SBosko Milekic 	LIST_REMOVE(zone, uz_link);
1904111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1905099a0e58SBosko Milekic 	/*
1906099a0e58SBosko Milekic 	 * XXX there are some races here where
1907099a0e58SBosko Milekic 	 * the zone can be drained but zone lock
1908099a0e58SBosko Milekic 	 * released and then refilled before we
1909099a0e58SBosko Milekic 	 * remove it... we dont care for now
1910099a0e58SBosko Milekic 	 */
1911e20a199fSJeff Roberson 	zone_drain_wait(zone, M_WAITOK);
1912e20a199fSJeff Roberson 	/*
1913e20a199fSJeff Roberson 	 * Unlink all of our kegs.
1914e20a199fSJeff Roberson 	 */
1915e20a199fSJeff Roberson 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1916e20a199fSJeff Roberson 		klink->kl_keg = NULL;
1917e20a199fSJeff Roberson 		LIST_REMOVE(klink, kl_link);
1918e20a199fSJeff Roberson 		if (klink == &zone->uz_klink)
1919e20a199fSJeff Roberson 			continue;
1920e20a199fSJeff Roberson 		free(klink, M_TEMP);
1921e20a199fSJeff Roberson 	}
1922e20a199fSJeff Roberson 	/*
1923e20a199fSJeff Roberson 	 * We only destroy kegs from non secondary zones.
1924e20a199fSJeff Roberson 	 */
19250095a784SJeff Roberson 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1926111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1927099a0e58SBosko Milekic 		LIST_REMOVE(keg, uk_link);
1928111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
19290095a784SJeff Roberson 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
19309c2cd7e5SJeff Roberson 	}
1931af526374SJeff Roberson 	ZONE_LOCK_FINI(zone);
1932099a0e58SBosko Milekic }
1933099a0e58SBosko Milekic 
19349c2cd7e5SJeff Roberson /*
19358355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
19368355f576SJeff Roberson  *
19378355f576SJeff Roberson  * Arguments:
19388355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
19398355f576SJeff Roberson  *		as an argument.
19408355f576SJeff Roberson  *
19418355f576SJeff Roberson  * Returns:
19428355f576SJeff Roberson  *	Nothing
19438355f576SJeff Roberson  */
19448355f576SJeff Roberson static void
19458355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
19468355f576SJeff Roberson {
1947099a0e58SBosko Milekic 	uma_keg_t keg;
19488355f576SJeff Roberson 	uma_zone_t zone;
19498355f576SJeff Roberson 
1950111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
1951099a0e58SBosko Milekic 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1952099a0e58SBosko Milekic 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
19538355f576SJeff Roberson 			zfunc(zone);
1954099a0e58SBosko Milekic 	}
1955111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
19568355f576SJeff Roberson }
19578355f576SJeff Roberson 
1958f4bef67cSGleb Smirnoff /*
1959f4bef67cSGleb Smirnoff  * Count how many pages do we need to bootstrap.  VM supplies
1960f4bef67cSGleb Smirnoff  * its need in early zones in the argument, we add up our zones,
1961f4bef67cSGleb Smirnoff  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1962f4bef67cSGleb Smirnoff  * zone of zones and zone of kegs are accounted separately.
1963f4bef67cSGleb Smirnoff  */
1964f4bef67cSGleb Smirnoff #define	UMA_BOOT_ZONES	11
19655073a083SGleb Smirnoff /* Zone of zones and zone of kegs have arbitrary alignment. */
19665073a083SGleb Smirnoff #define	UMA_BOOT_ALIGN	32
1967f4bef67cSGleb Smirnoff static int zsize, ksize;
1968f4bef67cSGleb Smirnoff int
1969f7d35785SGleb Smirnoff uma_startup_count(int vm_zones)
1970f4bef67cSGleb Smirnoff {
1971f7d35785SGleb Smirnoff 	int zones, pages;
1972f4bef67cSGleb Smirnoff 
1973f4bef67cSGleb Smirnoff 	ksize = sizeof(struct uma_keg) +
1974f4bef67cSGleb Smirnoff 	    (sizeof(struct uma_domain) * vm_ndomains);
1975f4bef67cSGleb Smirnoff 	zsize = sizeof(struct uma_zone) +
1976f4bef67cSGleb Smirnoff 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1977f4bef67cSGleb Smirnoff 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1978f4bef67cSGleb Smirnoff 
19795073a083SGleb Smirnoff 	/*
19805073a083SGleb Smirnoff 	 * Memory for the zone of kegs and its keg,
19815073a083SGleb Smirnoff 	 * and for zone of zones.
19825073a083SGleb Smirnoff 	 */
1983f4bef67cSGleb Smirnoff 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
1984f4bef67cSGleb Smirnoff 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
1985f4bef67cSGleb Smirnoff 
1986f7d35785SGleb Smirnoff #ifdef	UMA_MD_SMALL_ALLOC
1987f7d35785SGleb Smirnoff 	zones = UMA_BOOT_ZONES;
1988f7d35785SGleb Smirnoff #else
1989f7d35785SGleb Smirnoff 	zones = UMA_BOOT_ZONES + vm_zones;
1990f7d35785SGleb Smirnoff 	vm_zones = 0;
1991f7d35785SGleb Smirnoff #endif
1992f4bef67cSGleb Smirnoff 
19935073a083SGleb Smirnoff 	/* Memory for the rest of startup zones, UMA and VM, ... */
199496a10340SGleb Smirnoff 	if (zsize > UMA_SLAB_SPACE)
1995f7d35785SGleb Smirnoff 		pages += (zones + vm_zones) *
1996f7d35785SGleb Smirnoff 		    howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE);
199796a10340SGleb Smirnoff 	else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
199896a10340SGleb Smirnoff 		pages += zones;
1999f4bef67cSGleb Smirnoff 	else
20005073a083SGleb Smirnoff 		pages += howmany(zones,
20015073a083SGleb Smirnoff 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2002f4bef67cSGleb Smirnoff 
20035073a083SGleb Smirnoff 	/* ... and their kegs. Note that zone of zones allocates a keg! */
20045073a083SGleb Smirnoff 	pages += howmany(zones + 1,
20055073a083SGleb Smirnoff 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2006f4bef67cSGleb Smirnoff 
2007f4bef67cSGleb Smirnoff 	/*
20085073a083SGleb Smirnoff 	 * Most of startup zones are not going to be offpages, that's
20095073a083SGleb Smirnoff 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
20105073a083SGleb Smirnoff 	 * calculations.  Some large bucket zones will be offpage, and
20115073a083SGleb Smirnoff 	 * thus will allocate hashes.  We take conservative approach
20125073a083SGleb Smirnoff 	 * and assume that all zones may allocate hash.  This may give
20135073a083SGleb Smirnoff 	 * us some positive inaccuracy, usually an extra single page.
2014f4bef67cSGleb Smirnoff 	 */
20155073a083SGleb Smirnoff 	pages += howmany(zones, UMA_SLAB_SPACE /
2016d2be4a1eSGleb Smirnoff 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2017f4bef67cSGleb Smirnoff 
2018f4bef67cSGleb Smirnoff 	return (pages);
2019f4bef67cSGleb Smirnoff }
2020f4bef67cSGleb Smirnoff 
20218355f576SJeff Roberson void
2022ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages)
20238355f576SJeff Roberson {
20248355f576SJeff Roberson 	struct uma_zctor_args args;
2025ab3185d1SJeff Roberson 	uma_keg_t masterkeg;
2026ab3185d1SJeff Roberson 	uintptr_t m;
2027f4bef67cSGleb Smirnoff 
2028f4bef67cSGleb Smirnoff #ifdef DIAGNOSTIC
2029f4bef67cSGleb Smirnoff 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2030f4bef67cSGleb Smirnoff #endif
20318355f576SJeff Roberson 
2032111fbcd5SBryan Venteicher 	rw_init(&uma_rwlock, "UMA lock");
2033099a0e58SBosko Milekic 
2034ab3185d1SJeff Roberson 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2035ab3185d1SJeff Roberson 	m = (uintptr_t)mem;
2036ab3185d1SJeff Roberson 	zones = (uma_zone_t)m;
2037ab3185d1SJeff Roberson 	m += roundup(zsize, CACHE_LINE_SIZE);
2038ab3185d1SJeff Roberson 	kegs = (uma_zone_t)m;
2039ab3185d1SJeff Roberson 	m += roundup(zsize, CACHE_LINE_SIZE);
2040ab3185d1SJeff Roberson 	masterkeg = (uma_keg_t)m;
2041ab3185d1SJeff Roberson 	m += roundup(ksize, CACHE_LINE_SIZE);
2042ab3185d1SJeff Roberson 	m = roundup(m, PAGE_SIZE);
2043ab3185d1SJeff Roberson 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2044ab3185d1SJeff Roberson 	mem = (void *)m;
2045ab3185d1SJeff Roberson 
2046099a0e58SBosko Milekic 	/* "manually" create the initial zone */
20470095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
2048099a0e58SBosko Milekic 	args.name = "UMA Kegs";
2049ab3185d1SJeff Roberson 	args.size = ksize;
2050099a0e58SBosko Milekic 	args.ctor = keg_ctor;
2051099a0e58SBosko Milekic 	args.dtor = keg_dtor;
20528355f576SJeff Roberson 	args.uminit = zero_init;
20538355f576SJeff Roberson 	args.fini = NULL;
2054ab3185d1SJeff Roberson 	args.keg = masterkeg;
20555073a083SGleb Smirnoff 	args.align = UMA_BOOT_ALIGN - 1;
2056b60f5b79SJeff Roberson 	args.flags = UMA_ZFLAG_INTERNAL;
2057ab3185d1SJeff Roberson 	zone_ctor(kegs, zsize, &args, M_WAITOK);
20588355f576SJeff Roberson 
2059ac0a6fd0SGleb Smirnoff 	bootmem = mem;
2060ac0a6fd0SGleb Smirnoff 	boot_pages = npages;
20618355f576SJeff Roberson 
2062099a0e58SBosko Milekic 	args.name = "UMA Zones";
2063f4bef67cSGleb Smirnoff 	args.size = zsize;
2064099a0e58SBosko Milekic 	args.ctor = zone_ctor;
2065099a0e58SBosko Milekic 	args.dtor = zone_dtor;
2066099a0e58SBosko Milekic 	args.uminit = zero_init;
2067099a0e58SBosko Milekic 	args.fini = NULL;
2068099a0e58SBosko Milekic 	args.keg = NULL;
20695073a083SGleb Smirnoff 	args.align = UMA_BOOT_ALIGN - 1;
2070099a0e58SBosko Milekic 	args.flags = UMA_ZFLAG_INTERNAL;
2071ab3185d1SJeff Roberson 	zone_ctor(zones, zsize, &args, M_WAITOK);
2072099a0e58SBosko Milekic 
20738355f576SJeff Roberson 	/* Now make a zone for slab headers */
20748355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
2075ef72505eSJeff Roberson 				sizeof(struct uma_slab),
20768355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
2077b60f5b79SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
20788355f576SJeff Roberson 
20798355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
20808355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
20818355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
2082b60f5b79SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
20838355f576SJeff Roberson 
2084cae33c14SJeff Roberson 	bucket_init();
20858355f576SJeff Roberson 
2086f4bef67cSGleb Smirnoff 	booted = BOOT_STRAPPED;
20878355f576SJeff Roberson }
20888355f576SJeff Roberson 
2089f4bef67cSGleb Smirnoff void
2090f4bef67cSGleb Smirnoff uma_startup1(void)
2091f4bef67cSGleb Smirnoff {
2092f4bef67cSGleb Smirnoff 
2093f4bef67cSGleb Smirnoff #ifdef DIAGNOSTIC
2094f4bef67cSGleb Smirnoff 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2095f4bef67cSGleb Smirnoff #endif
2096f4bef67cSGleb Smirnoff 	booted = BOOT_PAGEALLOC;
2097f4bef67cSGleb Smirnoff }
2098f4bef67cSGleb Smirnoff 
20998355f576SJeff Roberson void
210099571dc3SJeff Roberson uma_startup2(void)
21018355f576SJeff Roberson {
2102f4bef67cSGleb Smirnoff 
2103f7d35785SGleb Smirnoff #ifdef DIAGNOSTIC
2104f7d35785SGleb Smirnoff 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2105f7d35785SGleb Smirnoff #endif
2106f4bef67cSGleb Smirnoff 	booted = BOOT_BUCKETS;
210795c4bf75SKonstantin Belousov 	sx_init(&uma_drain_lock, "umadrain");
2108f4bef67cSGleb Smirnoff 	bucket_enable();
21098355f576SJeff Roberson }
21108355f576SJeff Roberson 
21118355f576SJeff Roberson /*
21128355f576SJeff Roberson  * Initialize our callout handle
21138355f576SJeff Roberson  *
21148355f576SJeff Roberson  */
21158355f576SJeff Roberson static void
21168355f576SJeff Roberson uma_startup3(void)
21178355f576SJeff Roberson {
21181431a748SGleb Smirnoff 
2119c5deaf04SGleb Smirnoff #ifdef INVARIANTS
2120c5deaf04SGleb Smirnoff 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2121c5deaf04SGleb Smirnoff 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2122c5deaf04SGleb Smirnoff 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2123c5deaf04SGleb Smirnoff #endif
2124fd90e2edSJung-uk Kim 	callout_init(&uma_callout, 1);
21259643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2126c5deaf04SGleb Smirnoff 	booted = BOOT_RUNNING;
21278355f576SJeff Roberson }
21288355f576SJeff Roberson 
2129e20a199fSJeff Roberson static uma_keg_t
2130099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
213185dcf349SGleb Smirnoff 		int align, uint32_t flags)
2132099a0e58SBosko Milekic {
2133099a0e58SBosko Milekic 	struct uma_kctor_args args;
2134099a0e58SBosko Milekic 
2135099a0e58SBosko Milekic 	args.size = size;
2136099a0e58SBosko Milekic 	args.uminit = uminit;
2137099a0e58SBosko Milekic 	args.fini = fini;
21381e319f6dSRobert Watson 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2139099a0e58SBosko Milekic 	args.flags = flags;
2140099a0e58SBosko Milekic 	args.zone = zone;
2141ab3185d1SJeff Roberson 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2142099a0e58SBosko Milekic }
2143099a0e58SBosko Milekic 
2144f4bef67cSGleb Smirnoff /* Public functions */
21458355f576SJeff Roberson /* See uma.h */
21461e319f6dSRobert Watson void
21471e319f6dSRobert Watson uma_set_align(int align)
21481e319f6dSRobert Watson {
21491e319f6dSRobert Watson 
21501e319f6dSRobert Watson 	if (align != UMA_ALIGN_CACHE)
21511e319f6dSRobert Watson 		uma_align_cache = align;
21521e319f6dSRobert Watson }
21531e319f6dSRobert Watson 
21541e319f6dSRobert Watson /* See uma.h */
21558355f576SJeff Roberson uma_zone_t
2156bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
215785dcf349SGleb Smirnoff 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
21588355f576SJeff Roberson 
21598355f576SJeff Roberson {
21608355f576SJeff Roberson 	struct uma_zctor_args args;
216195c4bf75SKonstantin Belousov 	uma_zone_t res;
216295c4bf75SKonstantin Belousov 	bool locked;
21638355f576SJeff Roberson 
2164a5a35578SJohn Baldwin 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2165a5a35578SJohn Baldwin 	    align, name));
2166a5a35578SJohn Baldwin 
21678355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
21680095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
21698355f576SJeff Roberson 	args.name = name;
21708355f576SJeff Roberson 	args.size = size;
21718355f576SJeff Roberson 	args.ctor = ctor;
21728355f576SJeff Roberson 	args.dtor = dtor;
21738355f576SJeff Roberson 	args.uminit = uminit;
21748355f576SJeff Roberson 	args.fini = fini;
2175afc6dc36SJohn-Mark Gurney #ifdef  INVARIANTS
2176afc6dc36SJohn-Mark Gurney 	/*
2177afc6dc36SJohn-Mark Gurney 	 * If a zone is being created with an empty constructor and
2178afc6dc36SJohn-Mark Gurney 	 * destructor, pass UMA constructor/destructor which checks for
2179afc6dc36SJohn-Mark Gurney 	 * memory use after free.
2180afc6dc36SJohn-Mark Gurney 	 */
218119c591bfSMateusz Guzik 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
218219c591bfSMateusz Guzik 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2183afc6dc36SJohn-Mark Gurney 		args.ctor = trash_ctor;
2184afc6dc36SJohn-Mark Gurney 		args.dtor = trash_dtor;
2185afc6dc36SJohn-Mark Gurney 		args.uminit = trash_init;
2186afc6dc36SJohn-Mark Gurney 		args.fini = trash_fini;
2187afc6dc36SJohn-Mark Gurney 	}
2188afc6dc36SJohn-Mark Gurney #endif
21898355f576SJeff Roberson 	args.align = align;
21908355f576SJeff Roberson 	args.flags = flags;
2191099a0e58SBosko Milekic 	args.keg = NULL;
2192099a0e58SBosko Milekic 
2193f4bef67cSGleb Smirnoff 	if (booted < BOOT_BUCKETS) {
219495c4bf75SKonstantin Belousov 		locked = false;
219595c4bf75SKonstantin Belousov 	} else {
219695c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
219795c4bf75SKonstantin Belousov 		locked = true;
219895c4bf75SKonstantin Belousov 	}
2199ab3185d1SJeff Roberson 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
220095c4bf75SKonstantin Belousov 	if (locked)
220195c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
220295c4bf75SKonstantin Belousov 	return (res);
2203099a0e58SBosko Milekic }
2204099a0e58SBosko Milekic 
2205099a0e58SBosko Milekic /* See uma.h */
2206099a0e58SBosko Milekic uma_zone_t
2207099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2208099a0e58SBosko Milekic 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2209099a0e58SBosko Milekic {
2210099a0e58SBosko Milekic 	struct uma_zctor_args args;
2211e20a199fSJeff Roberson 	uma_keg_t keg;
221295c4bf75SKonstantin Belousov 	uma_zone_t res;
221395c4bf75SKonstantin Belousov 	bool locked;
2214099a0e58SBosko Milekic 
2215e20a199fSJeff Roberson 	keg = zone_first_keg(master);
22160095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
2217099a0e58SBosko Milekic 	args.name = name;
2218e20a199fSJeff Roberson 	args.size = keg->uk_size;
2219099a0e58SBosko Milekic 	args.ctor = ctor;
2220099a0e58SBosko Milekic 	args.dtor = dtor;
2221099a0e58SBosko Milekic 	args.uminit = zinit;
2222099a0e58SBosko Milekic 	args.fini = zfini;
2223e20a199fSJeff Roberson 	args.align = keg->uk_align;
2224e20a199fSJeff Roberson 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2225e20a199fSJeff Roberson 	args.keg = keg;
22268355f576SJeff Roberson 
2227f4bef67cSGleb Smirnoff 	if (booted < BOOT_BUCKETS) {
222895c4bf75SKonstantin Belousov 		locked = false;
222995c4bf75SKonstantin Belousov 	} else {
223095c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
223195c4bf75SKonstantin Belousov 		locked = true;
223295c4bf75SKonstantin Belousov 	}
2233e20a199fSJeff Roberson 	/* XXX Attaches only one keg of potentially many. */
2234ab3185d1SJeff Roberson 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
223595c4bf75SKonstantin Belousov 	if (locked)
223695c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
223795c4bf75SKonstantin Belousov 	return (res);
22388355f576SJeff Roberson }
22398355f576SJeff Roberson 
22400095a784SJeff Roberson /* See uma.h */
22410095a784SJeff Roberson uma_zone_t
2242af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2243af526374SJeff Roberson 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2244af526374SJeff Roberson 		    uma_release zrelease, void *arg, int flags)
22450095a784SJeff Roberson {
22460095a784SJeff Roberson 	struct uma_zctor_args args;
22470095a784SJeff Roberson 
22480095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
22490095a784SJeff Roberson 	args.name = name;
2250af526374SJeff Roberson 	args.size = size;
22510095a784SJeff Roberson 	args.ctor = ctor;
22520095a784SJeff Roberson 	args.dtor = dtor;
22530095a784SJeff Roberson 	args.uminit = zinit;
22540095a784SJeff Roberson 	args.fini = zfini;
22550095a784SJeff Roberson 	args.import = zimport;
22560095a784SJeff Roberson 	args.release = zrelease;
22570095a784SJeff Roberson 	args.arg = arg;
22580095a784SJeff Roberson 	args.align = 0;
22590095a784SJeff Roberson 	args.flags = flags;
22600095a784SJeff Roberson 
2261ab3185d1SJeff Roberson 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
22620095a784SJeff Roberson }
22630095a784SJeff Roberson 
2264e20a199fSJeff Roberson static void
2265e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b)
2266e20a199fSJeff Roberson {
2267e20a199fSJeff Roberson 	if (a < b) {
2268e20a199fSJeff Roberson 		ZONE_LOCK(a);
2269af526374SJeff Roberson 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2270e20a199fSJeff Roberson 	} else {
2271e20a199fSJeff Roberson 		ZONE_LOCK(b);
2272af526374SJeff Roberson 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2273e20a199fSJeff Roberson 	}
2274e20a199fSJeff Roberson }
2275e20a199fSJeff Roberson 
2276e20a199fSJeff Roberson static void
2277e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2278e20a199fSJeff Roberson {
2279e20a199fSJeff Roberson 
2280e20a199fSJeff Roberson 	ZONE_UNLOCK(a);
2281e20a199fSJeff Roberson 	ZONE_UNLOCK(b);
2282e20a199fSJeff Roberson }
2283e20a199fSJeff Roberson 
2284e20a199fSJeff Roberson int
2285e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2286e20a199fSJeff Roberson {
2287e20a199fSJeff Roberson 	uma_klink_t klink;
2288e20a199fSJeff Roberson 	uma_klink_t kl;
2289e20a199fSJeff Roberson 	int error;
2290e20a199fSJeff Roberson 
2291e20a199fSJeff Roberson 	error = 0;
2292e20a199fSJeff Roberson 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2293e20a199fSJeff Roberson 
2294e20a199fSJeff Roberson 	zone_lock_pair(zone, master);
2295e20a199fSJeff Roberson 	/*
2296e20a199fSJeff Roberson 	 * zone must use vtoslab() to resolve objects and must already be
2297e20a199fSJeff Roberson 	 * a secondary.
2298e20a199fSJeff Roberson 	 */
2299e20a199fSJeff Roberson 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2300e20a199fSJeff Roberson 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2301e20a199fSJeff Roberson 		error = EINVAL;
2302e20a199fSJeff Roberson 		goto out;
2303e20a199fSJeff Roberson 	}
2304e20a199fSJeff Roberson 	/*
2305e20a199fSJeff Roberson 	 * The new master must also use vtoslab().
2306e20a199fSJeff Roberson 	 */
2307e20a199fSJeff Roberson 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2308e20a199fSJeff Roberson 		error = EINVAL;
2309e20a199fSJeff Roberson 		goto out;
2310e20a199fSJeff Roberson 	}
2311cfcae3f8SGleb Smirnoff 
2312e20a199fSJeff Roberson 	/*
2313e20a199fSJeff Roberson 	 * The underlying object must be the same size.  rsize
2314e20a199fSJeff Roberson 	 * may be different.
2315e20a199fSJeff Roberson 	 */
2316e20a199fSJeff Roberson 	if (master->uz_size != zone->uz_size) {
2317e20a199fSJeff Roberson 		error = E2BIG;
2318e20a199fSJeff Roberson 		goto out;
2319e20a199fSJeff Roberson 	}
2320e20a199fSJeff Roberson 	/*
2321e20a199fSJeff Roberson 	 * Put it at the end of the list.
2322e20a199fSJeff Roberson 	 */
2323e20a199fSJeff Roberson 	klink->kl_keg = zone_first_keg(master);
2324e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2325e20a199fSJeff Roberson 		if (LIST_NEXT(kl, kl_link) == NULL) {
2326e20a199fSJeff Roberson 			LIST_INSERT_AFTER(kl, klink, kl_link);
2327e20a199fSJeff Roberson 			break;
2328e20a199fSJeff Roberson 		}
2329e20a199fSJeff Roberson 	}
2330e20a199fSJeff Roberson 	klink = NULL;
2331e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2332e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab_multi;
2333e20a199fSJeff Roberson 
2334e20a199fSJeff Roberson out:
2335e20a199fSJeff Roberson 	zone_unlock_pair(zone, master);
2336e20a199fSJeff Roberson 	if (klink != NULL)
2337e20a199fSJeff Roberson 		free(klink, M_TEMP);
2338e20a199fSJeff Roberson 
2339e20a199fSJeff Roberson 	return (error);
2340e20a199fSJeff Roberson }
2341e20a199fSJeff Roberson 
2342e20a199fSJeff Roberson 
23438355f576SJeff Roberson /* See uma.h */
23449c2cd7e5SJeff Roberson void
23459c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
23469c2cd7e5SJeff Roberson {
2347f4ff923bSRobert Watson 
234895c4bf75SKonstantin Belousov 	sx_slock(&uma_drain_lock);
23490095a784SJeff Roberson 	zone_free_item(zones, zone, NULL, SKIP_NONE);
235095c4bf75SKonstantin Belousov 	sx_sunlock(&uma_drain_lock);
23519c2cd7e5SJeff Roberson }
23529c2cd7e5SJeff Roberson 
23538d6fbbb8SJeff Roberson void
23548d6fbbb8SJeff Roberson uma_zwait(uma_zone_t zone)
23558d6fbbb8SJeff Roberson {
23568d6fbbb8SJeff Roberson 	void *item;
23578d6fbbb8SJeff Roberson 
23588d6fbbb8SJeff Roberson 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
23598d6fbbb8SJeff Roberson 	uma_zfree(zone, item);
23608d6fbbb8SJeff Roberson }
23618d6fbbb8SJeff Roberson 
23624e180881SMateusz Guzik void *
23634e180881SMateusz Guzik uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
23644e180881SMateusz Guzik {
23654e180881SMateusz Guzik 	void *item;
2366b4799947SRuslan Bukin #ifdef SMP
23674e180881SMateusz Guzik 	int i;
23684e180881SMateusz Guzik 
23694e180881SMateusz Guzik 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2370b4799947SRuslan Bukin #endif
23714e180881SMateusz Guzik 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
23724e180881SMateusz Guzik 	if (item != NULL && (flags & M_ZERO)) {
2373b4799947SRuslan Bukin #ifdef SMP
2374013072f0SMark Johnston 		for (i = 0; i <= mp_maxid; i++)
23754e180881SMateusz Guzik 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2376b4799947SRuslan Bukin #else
2377b4799947SRuslan Bukin 		bzero(item, zone->uz_size);
2378b4799947SRuslan Bukin #endif
23794e180881SMateusz Guzik 	}
23804e180881SMateusz Guzik 	return (item);
23814e180881SMateusz Guzik }
23824e180881SMateusz Guzik 
23834e180881SMateusz Guzik /*
23844e180881SMateusz Guzik  * A stub while both regular and pcpu cases are identical.
23854e180881SMateusz Guzik  */
23864e180881SMateusz Guzik void
23874e180881SMateusz Guzik uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
23884e180881SMateusz Guzik {
23894e180881SMateusz Guzik 
2390c5b7751fSIan Lepore #ifdef SMP
23914e180881SMateusz Guzik 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2392c5b7751fSIan Lepore #endif
23934e180881SMateusz Guzik 	uma_zfree_arg(zone, item, udata);
23944e180881SMateusz Guzik }
23954e180881SMateusz Guzik 
23969c2cd7e5SJeff Roberson /* See uma.h */
23978355f576SJeff Roberson void *
23982cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
23998355f576SJeff Roberson {
2400ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
24018355f576SJeff Roberson 	uma_bucket_t bucket;
2402ab3185d1SJeff Roberson 	uma_cache_t cache;
2403ab3185d1SJeff Roberson 	void *item;
2404ab3185d1SJeff Roberson 	int cpu, domain, lockfail;
2405c5deaf04SGleb Smirnoff #ifdef INVARIANTS
2406c5deaf04SGleb Smirnoff 	bool skipdbg;
2407c5deaf04SGleb Smirnoff #endif
24088355f576SJeff Roberson 
2409e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
241019fa89e9SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
241110cb2424SMark Murray 
24128355f576SJeff Roberson 	/* This is the fast path allocation */
24131431a748SGleb Smirnoff 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
24141431a748SGleb Smirnoff 	    curthread, zone->uz_name, zone, flags);
2415a553d4b8SJeff Roberson 
2416635fd505SRobert Watson 	if (flags & M_WAITOK) {
2417b23f72e9SBrian Feldman 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2418635fd505SRobert Watson 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
24194c1cc01cSJohn Baldwin 	}
24200766f278SJonathan T. Looney 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2421d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
24221067a2baSJonathan T. Looney 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2423ea99223eSMateusz Guzik 	if (zone->uz_flags & UMA_ZONE_PCPU)
2424b8af2820SMateusz Guzik 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2425b8af2820SMateusz Guzik 		    "with M_ZERO passed"));
24261067a2baSJonathan T. Looney 
24278d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
24288d689e04SGleb Smirnoff 	if (memguard_cmp_zone(zone)) {
24298d689e04SGleb Smirnoff 		item = memguard_alloc(zone->uz_size, flags);
24308d689e04SGleb Smirnoff 		if (item != NULL) {
24318d689e04SGleb Smirnoff 			if (zone->uz_init != NULL &&
24328d689e04SGleb Smirnoff 			    zone->uz_init(item, zone->uz_size, flags) != 0)
24338d689e04SGleb Smirnoff 				return (NULL);
24348d689e04SGleb Smirnoff 			if (zone->uz_ctor != NULL &&
2435fc03d22bSJeff Roberson 			    zone->uz_ctor(item, zone->uz_size, udata,
2436fc03d22bSJeff Roberson 			    flags) != 0) {
24378d689e04SGleb Smirnoff 			    	zone->uz_fini(item, zone->uz_size);
24388d689e04SGleb Smirnoff 				return (NULL);
24398d689e04SGleb Smirnoff 			}
24408d689e04SGleb Smirnoff 			return (item);
24418d689e04SGleb Smirnoff 		}
24428d689e04SGleb Smirnoff 		/* This is unfortunate but should not be fatal. */
24438d689e04SGleb Smirnoff 	}
24448d689e04SGleb Smirnoff #endif
24455d1ae027SRobert Watson 	/*
24465d1ae027SRobert Watson 	 * If possible, allocate from the per-CPU cache.  There are two
24475d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
24485d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
24495d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
24505d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
24515d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
24525d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to allocate from
24535d1ae027SRobert Watson 	 * the current cache; when we re-acquire the critical section, we
24545d1ae027SRobert Watson 	 * must detect and handle migration if it has occurred.
24555d1ae027SRobert Watson 	 */
245681c0d72cSGleb Smirnoff zalloc_restart:
24575d1ae027SRobert Watson 	critical_enter();
24585d1ae027SRobert Watson 	cpu = curcpu;
24598355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
24608355f576SJeff Roberson 
24618355f576SJeff Roberson zalloc_start:
24628355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
2463fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
2464cae33c14SJeff Roberson 		bucket->ub_cnt--;
2465cae33c14SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_cnt];
24668355f576SJeff Roberson #ifdef INVARIANTS
2467cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
24688355f576SJeff Roberson #endif
2469fc03d22bSJeff Roberson 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
24708355f576SJeff Roberson 		cache->uc_allocs++;
24715d1ae027SRobert Watson 		critical_exit();
2472c5deaf04SGleb Smirnoff #ifdef INVARIANTS
2473c5deaf04SGleb Smirnoff 		skipdbg = uma_dbg_zskip(zone, item);
2474c5deaf04SGleb Smirnoff #endif
2475fc03d22bSJeff Roberson 		if (zone->uz_ctor != NULL &&
2476c5deaf04SGleb Smirnoff #ifdef INVARIANTS
2477c5deaf04SGleb Smirnoff 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2478c5deaf04SGleb Smirnoff 		    zone->uz_dtor != trash_dtor) &&
2479c5deaf04SGleb Smirnoff #endif
2480fc03d22bSJeff Roberson 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
24810095a784SJeff Roberson 			atomic_add_long(&zone->uz_fails, 1);
2482fc03d22bSJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
2483b23f72e9SBrian Feldman 			return (NULL);
2484b23f72e9SBrian Feldman 		}
2485ef72505eSJeff Roberson #ifdef INVARIANTS
2486c5deaf04SGleb Smirnoff 		if (!skipdbg)
2487ef72505eSJeff Roberson 			uma_dbg_alloc(zone, NULL, item);
2488ef72505eSJeff Roberson #endif
24892cc35ff9SJeff Roberson 		if (flags & M_ZERO)
249048343a2fSGleb Smirnoff 			uma_zero_item(item, zone);
24918355f576SJeff Roberson 		return (item);
2492fc03d22bSJeff Roberson 	}
2493fc03d22bSJeff Roberson 
24948355f576SJeff Roberson 	/*
24958355f576SJeff Roberson 	 * We have run out of items in our alloc bucket.
24968355f576SJeff Roberson 	 * See if we can switch with our free bucket.
24978355f576SJeff Roberson 	 */
2498b983089aSJeff Roberson 	bucket = cache->uc_freebucket;
2499fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
25001431a748SGleb Smirnoff 		CTR2(KTR_UMA,
25011431a748SGleb Smirnoff 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
25021431a748SGleb Smirnoff 		    zone->uz_name, zone);
25038355f576SJeff Roberson 		cache->uc_freebucket = cache->uc_allocbucket;
2504b983089aSJeff Roberson 		cache->uc_allocbucket = bucket;
25058355f576SJeff Roberson 		goto zalloc_start;
25068355f576SJeff Roberson 	}
2507fc03d22bSJeff Roberson 
2508fc03d22bSJeff Roberson 	/*
2509fc03d22bSJeff Roberson 	 * Discard any empty allocation bucket while we hold no locks.
2510fc03d22bSJeff Roberson 	 */
2511fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2512fc03d22bSJeff Roberson 	cache->uc_allocbucket = NULL;
2513fc03d22bSJeff Roberson 	critical_exit();
2514fc03d22bSJeff Roberson 	if (bucket != NULL)
25156fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2516fc03d22bSJeff Roberson 
251730c5525bSAndrew Gallatin 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2518ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
251930c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(domain))
252030c5525bSAndrew Gallatin 			domain = UMA_ANYDOMAIN;
252130c5525bSAndrew Gallatin 	} else
2522ab3185d1SJeff Roberson 		domain = UMA_ANYDOMAIN;
2523ab3185d1SJeff Roberson 
2524fc03d22bSJeff Roberson 	/* Short-circuit for zones without buckets and low memory. */
2525fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2526fc03d22bSJeff Roberson 		goto zalloc_item;
2527fc03d22bSJeff Roberson 
25285d1ae027SRobert Watson 	/*
25295d1ae027SRobert Watson 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
25305d1ae027SRobert Watson 	 * we must go back to the zone.  This requires the zone lock, so we
25315d1ae027SRobert Watson 	 * must drop the critical section, then re-acquire it when we go back
25325d1ae027SRobert Watson 	 * to the cache.  Since the critical section is released, we may be
25335d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
25345d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
25355d1ae027SRobert Watson 	 * the critical section.
25365d1ae027SRobert Watson 	 */
2537fc03d22bSJeff Roberson 	lockfail = 0;
2538fc03d22bSJeff Roberson 	if (ZONE_TRYLOCK(zone) == 0) {
2539fc03d22bSJeff Roberson 		/* Record contention to size the buckets. */
2540a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
2541fc03d22bSJeff Roberson 		lockfail = 1;
2542fc03d22bSJeff Roberson 	}
25435d1ae027SRobert Watson 	critical_enter();
25445d1ae027SRobert Watson 	cpu = curcpu;
25455d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
25465d1ae027SRobert Watson 
2547fc03d22bSJeff Roberson 	/* See if we lost the race to fill the cache. */
2548fc03d22bSJeff Roberson 	if (cache->uc_allocbucket != NULL) {
2549fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2550fc03d22bSJeff Roberson 		goto zalloc_start;
2551a553d4b8SJeff Roberson 	}
25528355f576SJeff Roberson 
2553fc03d22bSJeff Roberson 	/*
2554fc03d22bSJeff Roberson 	 * Check the zone's cache of buckets.
2555fc03d22bSJeff Roberson 	 */
2556ab3185d1SJeff Roberson 	if (domain == UMA_ANYDOMAIN)
2557ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[0];
2558ab3185d1SJeff Roberson 	else
2559ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[domain];
25600f9b7bf3SMark Johnston 	if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) {
2561cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
2562a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
2563a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
2564a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
25658355f576SJeff Roberson 		goto zalloc_start;
2566a553d4b8SJeff Roberson 	}
25675d1ae027SRobert Watson 	/* We are no longer associated with this CPU. */
25685d1ae027SRobert Watson 	critical_exit();
2569bbee39c6SJeff Roberson 
2570fc03d22bSJeff Roberson 	/*
2571fc03d22bSJeff Roberson 	 * We bump the uz count when the cache size is insufficient to
2572fc03d22bSJeff Roberson 	 * handle the working set.
2573fc03d22bSJeff Roberson 	 */
25746fd34d6fSJeff Roberson 	if (lockfail && zone->uz_count < BUCKET_MAX)
2575a553d4b8SJeff Roberson 		zone->uz_count++;
2576fc03d22bSJeff Roberson 	ZONE_UNLOCK(zone);
2577099a0e58SBosko Milekic 
25788355f576SJeff Roberson 	/*
2579a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
2580763df3ecSPedro F. Giffuni 	 * works we'll restart the allocation from the beginning and it
2581fc03d22bSJeff Roberson 	 * will use the just filled bucket.
2582bbee39c6SJeff Roberson 	 */
2583ab3185d1SJeff Roberson 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
25841431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
25851431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
2586fc03d22bSJeff Roberson 	if (bucket != NULL) {
2587fc03d22bSJeff Roberson 		ZONE_LOCK(zone);
2588fc03d22bSJeff Roberson 		critical_enter();
2589fc03d22bSJeff Roberson 		cpu = curcpu;
2590fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
25910f9b7bf3SMark Johnston 
2592fc03d22bSJeff Roberson 		/*
2593fc03d22bSJeff Roberson 		 * See if we lost the race or were migrated.  Cache the
2594fc03d22bSJeff Roberson 		 * initialized bucket to make this less likely or claim
2595fc03d22bSJeff Roberson 		 * the memory directly.
2596fc03d22bSJeff Roberson 		 */
259781c0d72cSGleb Smirnoff 		if (cache->uc_allocbucket == NULL &&
259881c0d72cSGleb Smirnoff 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
259981c0d72cSGleb Smirnoff 		    domain == PCPU_GET(domain))) {
2600ab3185d1SJeff Roberson 			cache->uc_allocbucket = bucket;
26010f9b7bf3SMark Johnston 			zdom->uzd_imax += bucket->ub_cnt;
260281c0d72cSGleb Smirnoff 		} else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
260381c0d72cSGleb Smirnoff 			critical_exit();
260481c0d72cSGleb Smirnoff 			ZONE_UNLOCK(zone);
260581c0d72cSGleb Smirnoff 			bucket_drain(zone, bucket);
260681c0d72cSGleb Smirnoff 			bucket_free(zone, bucket, udata);
260781c0d72cSGleb Smirnoff 			goto zalloc_restart;
260881c0d72cSGleb Smirnoff 		} else
26090f9b7bf3SMark Johnston 			zone_put_bucket(zone, zdom, bucket, false);
2610bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
2611fc03d22bSJeff Roberson 		goto zalloc_start;
2612bbee39c6SJeff Roberson 	}
2613fc03d22bSJeff Roberson 
2614bbee39c6SJeff Roberson 	/*
2615bbee39c6SJeff Roberson 	 * We may not be able to get a bucket so return an actual item.
2616bbee39c6SJeff Roberson 	 */
2617fc03d22bSJeff Roberson zalloc_item:
2618ab3185d1SJeff Roberson 	item = zone_alloc_item(zone, udata, domain, flags);
2619fc03d22bSJeff Roberson 
2620e20a199fSJeff Roberson 	return (item);
2621bbee39c6SJeff Roberson }
2622bbee39c6SJeff Roberson 
2623ab3185d1SJeff Roberson void *
2624ab3185d1SJeff Roberson uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2625bbee39c6SJeff Roberson {
2626ab3185d1SJeff Roberson 
2627ab3185d1SJeff Roberson 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
262819fa89e9SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2629ab3185d1SJeff Roberson 
2630ab3185d1SJeff Roberson 	/* This is the fast path allocation */
2631ab3185d1SJeff Roberson 	CTR5(KTR_UMA,
2632ab3185d1SJeff Roberson 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2633ab3185d1SJeff Roberson 	    curthread, zone->uz_name, zone, domain, flags);
2634ab3185d1SJeff Roberson 
2635ab3185d1SJeff Roberson 	if (flags & M_WAITOK) {
2636ab3185d1SJeff Roberson 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2637ab3185d1SJeff Roberson 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2638ab3185d1SJeff Roberson 	}
2639ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2640ab3185d1SJeff Roberson 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2641ab3185d1SJeff Roberson 
2642ab3185d1SJeff Roberson 	return (zone_alloc_item(zone, udata, domain, flags));
2643ab3185d1SJeff Roberson }
2644ab3185d1SJeff Roberson 
2645ab3185d1SJeff Roberson /*
2646ab3185d1SJeff Roberson  * Find a slab with some space.  Prefer slabs that are partially used over those
2647ab3185d1SJeff Roberson  * that are totally full.  This helps to reduce fragmentation.
2648ab3185d1SJeff Roberson  *
2649ab3185d1SJeff Roberson  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2650ab3185d1SJeff Roberson  * only 'domain'.
2651ab3185d1SJeff Roberson  */
2652ab3185d1SJeff Roberson static uma_slab_t
2653194a979eSMark Johnston keg_first_slab(uma_keg_t keg, int domain, bool rr)
2654ab3185d1SJeff Roberson {
2655ab3185d1SJeff Roberson 	uma_domain_t dom;
2656bbee39c6SJeff Roberson 	uma_slab_t slab;
2657ab3185d1SJeff Roberson 	int start;
2658ab3185d1SJeff Roberson 
2659ab3185d1SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
2660ab3185d1SJeff Roberson 	    ("keg_first_slab: domain %d out of range", domain));
2661ab3185d1SJeff Roberson 
2662ab3185d1SJeff Roberson 	slab = NULL;
2663ab3185d1SJeff Roberson 	start = domain;
2664ab3185d1SJeff Roberson 	do {
2665ab3185d1SJeff Roberson 		dom = &keg->uk_domain[domain];
2666ab3185d1SJeff Roberson 		if (!LIST_EMPTY(&dom->ud_part_slab))
2667ab3185d1SJeff Roberson 			return (LIST_FIRST(&dom->ud_part_slab));
2668ab3185d1SJeff Roberson 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2669ab3185d1SJeff Roberson 			slab = LIST_FIRST(&dom->ud_free_slab);
2670ab3185d1SJeff Roberson 			LIST_REMOVE(slab, us_link);
2671ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2672ab3185d1SJeff Roberson 			return (slab);
2673ab3185d1SJeff Roberson 		}
2674ab3185d1SJeff Roberson 		if (rr)
2675ab3185d1SJeff Roberson 			domain = (domain + 1) % vm_ndomains;
2676ab3185d1SJeff Roberson 	} while (domain != start);
2677ab3185d1SJeff Roberson 
2678ab3185d1SJeff Roberson 	return (NULL);
2679ab3185d1SJeff Roberson }
2680ab3185d1SJeff Roberson 
2681ab3185d1SJeff Roberson static uma_slab_t
2682194a979eSMark Johnston keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2683ab3185d1SJeff Roberson {
2684194a979eSMark Johnston 	uint32_t reserve;
2685099a0e58SBosko Milekic 
2686e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2687194a979eSMark Johnston 
2688194a979eSMark Johnston 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2689194a979eSMark Johnston 	if (keg->uk_free <= reserve)
2690194a979eSMark Johnston 		return (NULL);
2691194a979eSMark Johnston 	return (keg_first_slab(keg, domain, rr));
2692194a979eSMark Johnston }
2693194a979eSMark Johnston 
2694194a979eSMark Johnston static uma_slab_t
2695194a979eSMark Johnston keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2696194a979eSMark Johnston {
2697194a979eSMark Johnston 	struct vm_domainset_iter di;
2698194a979eSMark Johnston 	uma_domain_t dom;
2699194a979eSMark Johnston 	uma_slab_t slab;
2700194a979eSMark Johnston 	int aflags, domain;
2701194a979eSMark Johnston 	bool rr;
2702194a979eSMark Johnston 
2703194a979eSMark Johnston restart:
2704194a979eSMark Johnston 	mtx_assert(&keg->uk_lock, MA_OWNED);
2705bbee39c6SJeff Roberson 
2706bbee39c6SJeff Roberson 	/*
2707194a979eSMark Johnston 	 * Use the keg's policy if upper layers haven't already specified a
2708194a979eSMark Johnston 	 * domain (as happens with first-touch zones).
2709194a979eSMark Johnston 	 *
2710194a979eSMark Johnston 	 * To avoid races we run the iterator with the keg lock held, but that
2711194a979eSMark Johnston 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2712194a979eSMark Johnston 	 * clear M_WAITOK and handle low memory conditions locally.
2713bbee39c6SJeff Roberson 	 */
2714ab3185d1SJeff Roberson 	rr = rdomain == UMA_ANYDOMAIN;
2715ab3185d1SJeff Roberson 	if (rr) {
2716194a979eSMark Johnston 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2717194a979eSMark Johnston 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2718194a979eSMark Johnston 		    &aflags);
2719194a979eSMark Johnston 	} else {
2720194a979eSMark Johnston 		aflags = flags;
2721194a979eSMark Johnston 		domain = rdomain;
2722194a979eSMark Johnston 	}
2723ab3185d1SJeff Roberson 
2724194a979eSMark Johnston 	for (;;) {
2725194a979eSMark Johnston 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2726194a979eSMark Johnston 		if (slab != NULL) {
2727e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2728bbee39c6SJeff Roberson 			return (slab);
2729bbee39c6SJeff Roberson 		}
2730bbee39c6SJeff Roberson 
2731bbee39c6SJeff Roberson 		/*
2732bbee39c6SJeff Roberson 		 * M_NOVM means don't ask at all!
2733bbee39c6SJeff Roberson 		 */
2734bbee39c6SJeff Roberson 		if (flags & M_NOVM)
2735bbee39c6SJeff Roberson 			break;
2736bbee39c6SJeff Roberson 
2737e20a199fSJeff Roberson 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2738099a0e58SBosko Milekic 			keg->uk_flags |= UMA_ZFLAG_FULL;
2739e20a199fSJeff Roberson 			/*
2740e20a199fSJeff Roberson 			 * If this is not a multi-zone, set the FULL bit.
2741e20a199fSJeff Roberson 			 * Otherwise slab_multi() takes care of it.
2742e20a199fSJeff Roberson 			 */
27432f891cd5SPawel Jakub Dawidek 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2744e20a199fSJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_FULL;
27452f891cd5SPawel Jakub Dawidek 				zone_log_warning(zone);
274654503a13SJonathan T. Looney 				zone_maxaction(zone);
27472f891cd5SPawel Jakub Dawidek 			}
2748ebc85edfSJeff Roberson 			if (flags & M_NOWAIT)
2749ab3185d1SJeff Roberson 				return (NULL);
2750c288b548SEitan Adler 			zone->uz_sleeps++;
2751e20a199fSJeff Roberson 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2752bbee39c6SJeff Roberson 			continue;
2753bbee39c6SJeff Roberson 		}
2754194a979eSMark Johnston 		slab = keg_alloc_slab(keg, zone, domain, aflags);
2755bbee39c6SJeff Roberson 		/*
2756bbee39c6SJeff Roberson 		 * If we got a slab here it's safe to mark it partially used
2757bbee39c6SJeff Roberson 		 * and return.  We assume that the caller is going to remove
2758bbee39c6SJeff Roberson 		 * at least one item.
2759bbee39c6SJeff Roberson 		 */
2760bbee39c6SJeff Roberson 		if (slab) {
2761e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2762ab3185d1SJeff Roberson 			dom = &keg->uk_domain[slab->us_domain];
2763ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2764bbee39c6SJeff Roberson 			return (slab);
2765bbee39c6SJeff Roberson 		}
2766194a979eSMark Johnston 		KEG_LOCK(keg);
2767194a979eSMark Johnston 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2768194a979eSMark Johnston 			if ((flags & M_WAITOK) != 0) {
2769194a979eSMark Johnston 				KEG_UNLOCK(keg);
2770194a979eSMark Johnston 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2771194a979eSMark Johnston 				KEG_LOCK(keg);
2772194a979eSMark Johnston 				goto restart;
277330c5525bSAndrew Gallatin 			}
2774194a979eSMark Johnston 			break;
2775194a979eSMark Johnston 		}
2776ab3185d1SJeff Roberson 	}
2777ab3185d1SJeff Roberson 
2778bbee39c6SJeff Roberson 	/*
2779bbee39c6SJeff Roberson 	 * We might not have been able to get a slab but another cpu
2780bbee39c6SJeff Roberson 	 * could have while we were unlocked.  Check again before we
2781bbee39c6SJeff Roberson 	 * fail.
2782bbee39c6SJeff Roberson 	 */
2783194a979eSMark Johnston 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2784ab3185d1SJeff Roberson 		MPASS(slab->us_keg == keg);
2785bbee39c6SJeff Roberson 		return (slab);
2786bbee39c6SJeff Roberson 	}
2787ab3185d1SJeff Roberson 	return (NULL);
2788ab3185d1SJeff Roberson }
2789bbee39c6SJeff Roberson 
2790e20a199fSJeff Roberson static uma_slab_t
2791ab3185d1SJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2792e20a199fSJeff Roberson {
2793e20a199fSJeff Roberson 	uma_slab_t slab;
2794e20a199fSJeff Roberson 
2795af526374SJeff Roberson 	if (keg == NULL) {
2796e20a199fSJeff Roberson 		keg = zone_first_keg(zone);
2797af526374SJeff Roberson 		KEG_LOCK(keg);
2798af526374SJeff Roberson 	}
2799e20a199fSJeff Roberson 
2800e20a199fSJeff Roberson 	for (;;) {
2801ab3185d1SJeff Roberson 		slab = keg_fetch_slab(keg, zone, domain, flags);
2802e20a199fSJeff Roberson 		if (slab)
2803e20a199fSJeff Roberson 			return (slab);
2804e20a199fSJeff Roberson 		if (flags & (M_NOWAIT | M_NOVM))
2805e20a199fSJeff Roberson 			break;
2806e20a199fSJeff Roberson 	}
2807af526374SJeff Roberson 	KEG_UNLOCK(keg);
2808e20a199fSJeff Roberson 	return (NULL);
2809e20a199fSJeff Roberson }
2810e20a199fSJeff Roberson 
2811e20a199fSJeff Roberson /*
2812e20a199fSJeff Roberson  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2813af526374SJeff Roberson  * with the keg locked.  On NULL no lock is held.
2814e20a199fSJeff Roberson  *
2815e20a199fSJeff Roberson  * The last pointer is used to seed the search.  It is not required.
2816e20a199fSJeff Roberson  */
2817e20a199fSJeff Roberson static uma_slab_t
2818ab3185d1SJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags)
2819e20a199fSJeff Roberson {
2820e20a199fSJeff Roberson 	uma_klink_t klink;
2821e20a199fSJeff Roberson 	uma_slab_t slab;
2822e20a199fSJeff Roberson 	uma_keg_t keg;
2823e20a199fSJeff Roberson 	int flags;
2824e20a199fSJeff Roberson 	int empty;
2825e20a199fSJeff Roberson 	int full;
2826e20a199fSJeff Roberson 
2827e20a199fSJeff Roberson 	/*
2828e20a199fSJeff Roberson 	 * Don't wait on the first pass.  This will skip limit tests
2829e20a199fSJeff Roberson 	 * as well.  We don't want to block if we can find a provider
2830e20a199fSJeff Roberson 	 * without blocking.
2831e20a199fSJeff Roberson 	 */
2832e20a199fSJeff Roberson 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2833e20a199fSJeff Roberson 	/*
2834e20a199fSJeff Roberson 	 * Use the last slab allocated as a hint for where to start
2835e20a199fSJeff Roberson 	 * the search.
2836e20a199fSJeff Roberson 	 */
2837af526374SJeff Roberson 	if (last != NULL) {
2838ab3185d1SJeff Roberson 		slab = keg_fetch_slab(last, zone, domain, flags);
2839e20a199fSJeff Roberson 		if (slab)
2840e20a199fSJeff Roberson 			return (slab);
2841af526374SJeff Roberson 		KEG_UNLOCK(last);
2842e20a199fSJeff Roberson 	}
2843e20a199fSJeff Roberson 	/*
2844e20a199fSJeff Roberson 	 * Loop until we have a slab incase of transient failures
2845e20a199fSJeff Roberson 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2846e20a199fSJeff Roberson 	 * required but we've done it for so long now.
2847e20a199fSJeff Roberson 	 */
2848e20a199fSJeff Roberson 	for (;;) {
2849e20a199fSJeff Roberson 		empty = 0;
2850e20a199fSJeff Roberson 		full = 0;
2851e20a199fSJeff Roberson 		/*
2852e20a199fSJeff Roberson 		 * Search the available kegs for slabs.  Be careful to hold the
2853e20a199fSJeff Roberson 		 * correct lock while calling into the keg layer.
2854e20a199fSJeff Roberson 		 */
2855e20a199fSJeff Roberson 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2856e20a199fSJeff Roberson 			keg = klink->kl_keg;
2857af526374SJeff Roberson 			KEG_LOCK(keg);
2858e20a199fSJeff Roberson 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2859ab3185d1SJeff Roberson 				slab = keg_fetch_slab(keg, zone, domain, flags);
2860e20a199fSJeff Roberson 				if (slab)
2861e20a199fSJeff Roberson 					return (slab);
2862e20a199fSJeff Roberson 			}
2863e20a199fSJeff Roberson 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2864e20a199fSJeff Roberson 				full++;
2865e20a199fSJeff Roberson 			else
2866e20a199fSJeff Roberson 				empty++;
2867af526374SJeff Roberson 			KEG_UNLOCK(keg);
2868e20a199fSJeff Roberson 		}
2869e20a199fSJeff Roberson 		if (rflags & (M_NOWAIT | M_NOVM))
2870e20a199fSJeff Roberson 			break;
2871e20a199fSJeff Roberson 		flags = rflags;
2872e20a199fSJeff Roberson 		/*
2873e20a199fSJeff Roberson 		 * All kegs are full.  XXX We can't atomically check all kegs
2874e20a199fSJeff Roberson 		 * and sleep so just sleep for a short period and retry.
2875e20a199fSJeff Roberson 		 */
2876e20a199fSJeff Roberson 		if (full && !empty) {
2877af526374SJeff Roberson 			ZONE_LOCK(zone);
2878e20a199fSJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
2879bf965959SSean Bruno 			zone->uz_sleeps++;
28802f891cd5SPawel Jakub Dawidek 			zone_log_warning(zone);
288154503a13SJonathan T. Looney 			zone_maxaction(zone);
2882af526374SJeff Roberson 			msleep(zone, zone->uz_lockptr, PVM,
2883af526374SJeff Roberson 			    "zonelimit", hz/100);
2884e20a199fSJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2885af526374SJeff Roberson 			ZONE_UNLOCK(zone);
2886e20a199fSJeff Roberson 			continue;
2887e20a199fSJeff Roberson 		}
2888e20a199fSJeff Roberson 	}
2889e20a199fSJeff Roberson 	return (NULL);
2890e20a199fSJeff Roberson }
2891e20a199fSJeff Roberson 
2892d56368d7SBosko Milekic static void *
28930095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2894bbee39c6SJeff Roberson {
2895ab3185d1SJeff Roberson 	uma_domain_t dom;
2896bbee39c6SJeff Roberson 	void *item;
289785dcf349SGleb Smirnoff 	uint8_t freei;
2898bbee39c6SJeff Roberson 
28990095a784SJeff Roberson 	MPASS(keg == slab->us_keg);
2900e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2901099a0e58SBosko Milekic 
2902ef72505eSJeff Roberson 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2903ef72505eSJeff Roberson 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2904099a0e58SBosko Milekic 	item = slab->us_data + (keg->uk_rsize * freei);
2905bbee39c6SJeff Roberson 	slab->us_freecount--;
2906099a0e58SBosko Milekic 	keg->uk_free--;
2907ef72505eSJeff Roberson 
2908bbee39c6SJeff Roberson 	/* Move this slab to the full list */
2909bbee39c6SJeff Roberson 	if (slab->us_freecount == 0) {
2910bbee39c6SJeff Roberson 		LIST_REMOVE(slab, us_link);
2911ab3185d1SJeff Roberson 		dom = &keg->uk_domain[slab->us_domain];
2912ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2913bbee39c6SJeff Roberson 	}
2914bbee39c6SJeff Roberson 
2915bbee39c6SJeff Roberson 	return (item);
2916bbee39c6SJeff Roberson }
2917bbee39c6SJeff Roberson 
2918bbee39c6SJeff Roberson static int
2919ab3185d1SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
29200095a784SJeff Roberson {
29210095a784SJeff Roberson 	uma_slab_t slab;
29220095a784SJeff Roberson 	uma_keg_t keg;
2923a03af342SSean Bruno #ifdef NUMA
2924ab3185d1SJeff Roberson 	int stripe;
2925a03af342SSean Bruno #endif
29260095a784SJeff Roberson 	int i;
29270095a784SJeff Roberson 
29280095a784SJeff Roberson 	slab = NULL;
29290095a784SJeff Roberson 	keg = NULL;
2930af526374SJeff Roberson 	/* Try to keep the buckets totally full */
29310095a784SJeff Roberson 	for (i = 0; i < max; ) {
2932ab3185d1SJeff Roberson 		if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
29330095a784SJeff Roberson 			break;
29340095a784SJeff Roberson 		keg = slab->us_keg;
2935a03af342SSean Bruno #ifdef NUMA
2936ab3185d1SJeff Roberson 		stripe = howmany(max, vm_ndomains);
2937a03af342SSean Bruno #endif
29386fd34d6fSJeff Roberson 		while (slab->us_freecount && i < max) {
29390095a784SJeff Roberson 			bucket[i++] = slab_alloc_item(keg, slab);
29406fd34d6fSJeff Roberson 			if (keg->uk_free <= keg->uk_reserve)
29416fd34d6fSJeff Roberson 				break;
2942b6715dabSJeff Roberson #ifdef NUMA
2943ab3185d1SJeff Roberson 			/*
2944ab3185d1SJeff Roberson 			 * If the zone is striped we pick a new slab for every
2945ab3185d1SJeff Roberson 			 * N allocations.  Eliminating this conditional will
2946ab3185d1SJeff Roberson 			 * instead pick a new domain for each bucket rather
2947ab3185d1SJeff Roberson 			 * than stripe within each bucket.  The current option
2948ab3185d1SJeff Roberson 			 * produces more fragmentation and requires more cpu
2949ab3185d1SJeff Roberson 			 * time but yields better distribution.
2950ab3185d1SJeff Roberson 			 */
2951ab3185d1SJeff Roberson 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2952ab3185d1SJeff Roberson 			    vm_ndomains > 1 && --stripe == 0)
2953ab3185d1SJeff Roberson 				break;
2954ab3185d1SJeff Roberson #endif
29556fd34d6fSJeff Roberson 		}
2956ab3185d1SJeff Roberson 		/* Don't block if we allocated any successfully. */
29570095a784SJeff Roberson 		flags &= ~M_WAITOK;
29580095a784SJeff Roberson 		flags |= M_NOWAIT;
29590095a784SJeff Roberson 	}
29600095a784SJeff Roberson 	if (slab != NULL)
29610095a784SJeff Roberson 		KEG_UNLOCK(keg);
29620095a784SJeff Roberson 
29630095a784SJeff Roberson 	return i;
29640095a784SJeff Roberson }
29650095a784SJeff Roberson 
2966fc03d22bSJeff Roberson static uma_bucket_t
2967ab3185d1SJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
2968bbee39c6SJeff Roberson {
2969bbee39c6SJeff Roberson 	uma_bucket_t bucket;
29700095a784SJeff Roberson 	int max;
2971bbee39c6SJeff Roberson 
297230c5525bSAndrew Gallatin 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
297330c5525bSAndrew Gallatin 
29746fd34d6fSJeff Roberson 	/* Don't wait for buckets, preserve caller's NOVM setting. */
29756fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
29760095a784SJeff Roberson 	if (bucket == NULL)
2977f7104ccdSAlexander Motin 		return (NULL);
29780095a784SJeff Roberson 
2979af526374SJeff Roberson 	max = MIN(bucket->ub_entries, zone->uz_count);
29800095a784SJeff Roberson 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2981ab3185d1SJeff Roberson 	    max, domain, flags);
29820095a784SJeff Roberson 
29830095a784SJeff Roberson 	/*
29840095a784SJeff Roberson 	 * Initialize the memory if necessary.
29850095a784SJeff Roberson 	 */
29860095a784SJeff Roberson 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2987099a0e58SBosko Milekic 		int i;
2988bbee39c6SJeff Roberson 
29890095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
2990e20a199fSJeff Roberson 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
29910095a784SJeff Roberson 			    flags) != 0)
2992b23f72e9SBrian Feldman 				break;
2993b23f72e9SBrian Feldman 		/*
2994b23f72e9SBrian Feldman 		 * If we couldn't initialize the whole bucket, put the
2995b23f72e9SBrian Feldman 		 * rest back onto the freelist.
2996b23f72e9SBrian Feldman 		 */
2997b23f72e9SBrian Feldman 		if (i != bucket->ub_cnt) {
2998af526374SJeff Roberson 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
29990095a784SJeff Roberson 			    bucket->ub_cnt - i);
3000a5a262c6SBosko Milekic #ifdef INVARIANTS
30010095a784SJeff Roberson 			bzero(&bucket->ub_bucket[i],
30020095a784SJeff Roberson 			    sizeof(void *) * (bucket->ub_cnt - i));
3003a5a262c6SBosko Milekic #endif
3004b23f72e9SBrian Feldman 			bucket->ub_cnt = i;
3005b23f72e9SBrian Feldman 		}
3006099a0e58SBosko Milekic 	}
3007099a0e58SBosko Milekic 
3008f7104ccdSAlexander Motin 	if (bucket->ub_cnt == 0) {
30096fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
3010fc03d22bSJeff Roberson 		atomic_add_long(&zone->uz_fails, 1);
3011fc03d22bSJeff Roberson 		return (NULL);
3012bbee39c6SJeff Roberson 	}
3013fc03d22bSJeff Roberson 
3014fc03d22bSJeff Roberson 	return (bucket);
3015fc03d22bSJeff Roberson }
3016fc03d22bSJeff Roberson 
30178355f576SJeff Roberson /*
30180095a784SJeff Roberson  * Allocates a single item from a zone.
30198355f576SJeff Roberson  *
30208355f576SJeff Roberson  * Arguments
30218355f576SJeff Roberson  *	zone   The zone to alloc for.
30228355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
3023ab3185d1SJeff Roberson  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3024a163d034SWarner Losh  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
30258355f576SJeff Roberson  *
30268355f576SJeff Roberson  * Returns
30278355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
3028bbee39c6SJeff Roberson  *	An item if successful
30298355f576SJeff Roberson  */
30308355f576SJeff Roberson 
30318355f576SJeff Roberson static void *
3032ab3185d1SJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
30338355f576SJeff Roberson {
30348355f576SJeff Roberson 	void *item;
3035c5deaf04SGleb Smirnoff #ifdef INVARIANTS
3036c5deaf04SGleb Smirnoff 	bool skipdbg;
3037c5deaf04SGleb Smirnoff #endif
30388355f576SJeff Roberson 
30398355f576SJeff Roberson 	item = NULL;
30408355f576SJeff Roberson 
304130c5525bSAndrew Gallatin 	if (domain != UMA_ANYDOMAIN) {
304230c5525bSAndrew Gallatin 		/* avoid allocs targeting empty domains */
304330c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(domain))
304430c5525bSAndrew Gallatin 			domain = UMA_ANYDOMAIN;
304530c5525bSAndrew Gallatin 	}
3046ab3185d1SJeff Roberson 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
30470095a784SJeff Roberson 		goto fail;
30480095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, 1);
30498355f576SJeff Roberson 
3050c5deaf04SGleb Smirnoff #ifdef INVARIANTS
3051c5deaf04SGleb Smirnoff 	skipdbg = uma_dbg_zskip(zone, item);
3052c5deaf04SGleb Smirnoff #endif
3053099a0e58SBosko Milekic 	/*
3054099a0e58SBosko Milekic 	 * We have to call both the zone's init (not the keg's init)
3055099a0e58SBosko Milekic 	 * and the zone's ctor.  This is because the item is going from
3056099a0e58SBosko Milekic 	 * a keg slab directly to the user, and the user is expecting it
3057099a0e58SBosko Milekic 	 * to be both zone-init'd as well as zone-ctor'd.
3058099a0e58SBosko Milekic 	 */
3059b23f72e9SBrian Feldman 	if (zone->uz_init != NULL) {
3060e20a199fSJeff Roberson 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
30610095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_FINI);
30620095a784SJeff Roberson 			goto fail;
3063b23f72e9SBrian Feldman 		}
3064b23f72e9SBrian Feldman 	}
3065c5deaf04SGleb Smirnoff 	if (zone->uz_ctor != NULL &&
3066c5deaf04SGleb Smirnoff #ifdef INVARIANTS
3067c5deaf04SGleb Smirnoff 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
3068c5deaf04SGleb Smirnoff 	    zone->uz_dtor != trash_dtor) &&
3069c5deaf04SGleb Smirnoff #endif
3070c5deaf04SGleb Smirnoff 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
30710095a784SJeff Roberson 		zone_free_item(zone, item, udata, SKIP_DTOR);
30720095a784SJeff Roberson 		goto fail;
3073b23f72e9SBrian Feldman 	}
3074ef72505eSJeff Roberson #ifdef INVARIANTS
3075c5deaf04SGleb Smirnoff 	if (!skipdbg)
30760095a784SJeff Roberson 		uma_dbg_alloc(zone, NULL, item);
3077ef72505eSJeff Roberson #endif
30782cc35ff9SJeff Roberson 	if (flags & M_ZERO)
307948343a2fSGleb Smirnoff 		uma_zero_item(item, zone);
30808355f576SJeff Roberson 
30811431a748SGleb Smirnoff 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
30821431a748SGleb Smirnoff 	    zone->uz_name, zone);
30831431a748SGleb Smirnoff 
30848355f576SJeff Roberson 	return (item);
30850095a784SJeff Roberson 
30860095a784SJeff Roberson fail:
30871431a748SGleb Smirnoff 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
30881431a748SGleb Smirnoff 	    zone->uz_name, zone);
30890095a784SJeff Roberson 	atomic_add_long(&zone->uz_fails, 1);
30900095a784SJeff Roberson 	return (NULL);
30918355f576SJeff Roberson }
30928355f576SJeff Roberson 
30938355f576SJeff Roberson /* See uma.h */
30948355f576SJeff Roberson void
30958355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
30968355f576SJeff Roberson {
30978355f576SJeff Roberson 	uma_cache_t cache;
30988355f576SJeff Roberson 	uma_bucket_t bucket;
3099ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
3100ab3185d1SJeff Roberson 	int cpu, domain, lockfail;
3101c5deaf04SGleb Smirnoff #ifdef INVARIANTS
3102c5deaf04SGleb Smirnoff 	bool skipdbg;
3103c5deaf04SGleb Smirnoff #endif
31048355f576SJeff Roberson 
3105e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
310619fa89e9SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
310710cb2424SMark Murray 
31083659f747SRobert Watson 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
31093659f747SRobert Watson 	    zone->uz_name);
31103659f747SRobert Watson 
3111d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
31121067a2baSJonathan T. Looney 	    ("uma_zfree_arg: called with spinlock or critical section held"));
31131067a2baSJonathan T. Looney 
311420ed0cb0SMatthew D Fleming         /* uma_zfree(..., NULL) does nothing, to match free(9). */
311520ed0cb0SMatthew D Fleming         if (item == NULL)
311620ed0cb0SMatthew D Fleming                 return;
31178d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
31188d689e04SGleb Smirnoff 	if (is_memguard_addr(item)) {
3119bc9d08e1SMark Johnston 		if (zone->uz_dtor != NULL)
31208d689e04SGleb Smirnoff 			zone->uz_dtor(item, zone->uz_size, udata);
3121bc9d08e1SMark Johnston 		if (zone->uz_fini != NULL)
31228d689e04SGleb Smirnoff 			zone->uz_fini(item, zone->uz_size);
31238d689e04SGleb Smirnoff 		memguard_free(item);
31248d689e04SGleb Smirnoff 		return;
31258d689e04SGleb Smirnoff 	}
31268d689e04SGleb Smirnoff #endif
31275d1ae027SRobert Watson #ifdef INVARIANTS
3128c5deaf04SGleb Smirnoff 	skipdbg = uma_dbg_zskip(zone, item);
3129c5deaf04SGleb Smirnoff 	if (skipdbg == false) {
3130e20a199fSJeff Roberson 		if (zone->uz_flags & UMA_ZONE_MALLOC)
31315d1ae027SRobert Watson 			uma_dbg_free(zone, udata, item);
31325d1ae027SRobert Watson 		else
31335d1ae027SRobert Watson 			uma_dbg_free(zone, NULL, item);
3134c5deaf04SGleb Smirnoff 	}
3135c5deaf04SGleb Smirnoff 	if (zone->uz_dtor != NULL && (!skipdbg ||
3136c5deaf04SGleb Smirnoff 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3137c5deaf04SGleb Smirnoff #else
3138fc03d22bSJeff Roberson 	if (zone->uz_dtor != NULL)
3139c5deaf04SGleb Smirnoff #endif
3140ef72505eSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
3141ef72505eSJeff Roberson 
3142af7f9b97SJeff Roberson 	/*
3143af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
3144af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
3145af7f9b97SJeff Roberson 	 */
3146e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
3147fc03d22bSJeff Roberson 		goto zfree_item;
3148af7f9b97SJeff Roberson 
31495d1ae027SRobert Watson 	/*
31505d1ae027SRobert Watson 	 * If possible, free to the per-CPU cache.  There are two
31515d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
31525d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
31535d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
31545d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
31555d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
31565d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to free to the
31575d1ae027SRobert Watson 	 * current cache; when we re-acquire the critical section, we must
31585d1ae027SRobert Watson 	 * detect and handle migration if it has occurred.
31595d1ae027SRobert Watson 	 */
3160a553d4b8SJeff Roberson zfree_restart:
31615d1ae027SRobert Watson 	critical_enter();
31625d1ae027SRobert Watson 	cpu = curcpu;
31638355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
31648355f576SJeff Roberson 
31658355f576SJeff Roberson zfree_start:
3166a553d4b8SJeff Roberson 	/*
3167fc03d22bSJeff Roberson 	 * Try to free into the allocbucket first to give LIFO ordering
3168fc03d22bSJeff Roberson 	 * for cache-hot datastructures.  Spill over into the freebucket
3169fc03d22bSJeff Roberson 	 * if necessary.  Alloc will swap them if one runs dry.
3170a553d4b8SJeff Roberson 	 */
3171fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
3172fc03d22bSJeff Roberson 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3173fc03d22bSJeff Roberson 		bucket = cache->uc_freebucket;
3174fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3175cae33c14SJeff Roberson 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
31768355f576SJeff Roberson 		    ("uma_zfree: Freeing to non free bucket index."));
3177cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = item;
3178cae33c14SJeff Roberson 		bucket->ub_cnt++;
3179773df9abSRobert Watson 		cache->uc_frees++;
31805d1ae027SRobert Watson 		critical_exit();
31818355f576SJeff Roberson 		return;
3182fc03d22bSJeff Roberson 	}
3183fc03d22bSJeff Roberson 
31848355f576SJeff Roberson 	/*
31855d1ae027SRobert Watson 	 * We must go back the zone, which requires acquiring the zone lock,
31865d1ae027SRobert Watson 	 * which in turn means we must release and re-acquire the critical
31875d1ae027SRobert Watson 	 * section.  Since the critical section is released, we may be
31885d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
31895d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
31905d1ae027SRobert Watson 	 * the critical section.
31918355f576SJeff Roberson 	 */
31925d1ae027SRobert Watson 	critical_exit();
3193fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
3194fc03d22bSJeff Roberson 		goto zfree_item;
3195fc03d22bSJeff Roberson 
31964d104ba0SAlexander Motin 	lockfail = 0;
31974d104ba0SAlexander Motin 	if (ZONE_TRYLOCK(zone) == 0) {
31984d104ba0SAlexander Motin 		/* Record contention to size the buckets. */
31998355f576SJeff Roberson 		ZONE_LOCK(zone);
32004d104ba0SAlexander Motin 		lockfail = 1;
32014d104ba0SAlexander Motin 	}
32025d1ae027SRobert Watson 	critical_enter();
32035d1ae027SRobert Watson 	cpu = curcpu;
32045d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
32058355f576SJeff Roberson 
32068355f576SJeff Roberson 	bucket = cache->uc_freebucket;
3207fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3208fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
3209fc03d22bSJeff Roberson 		goto zfree_start;
3210fc03d22bSJeff Roberson 	}
32118355f576SJeff Roberson 	cache->uc_freebucket = NULL;
3212afa5d703SMark Johnston 	/* We are no longer associated with this CPU. */
3213afa5d703SMark Johnston 	critical_exit();
32148355f576SJeff Roberson 
321530c5525bSAndrew Gallatin 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3216ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
321730c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(domain))
321830c5525bSAndrew Gallatin 			domain = UMA_ANYDOMAIN;
321930c5525bSAndrew Gallatin 	} else
3220ab3185d1SJeff Roberson 		domain = 0;
3221ab3185d1SJeff Roberson 	zdom = &zone->uz_domain[0];
3222ab3185d1SJeff Roberson 
32238355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
32248355f576SJeff Roberson 	if (bucket != NULL) {
32251431a748SGleb Smirnoff 		CTR3(KTR_UMA,
32261431a748SGleb Smirnoff 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
32271431a748SGleb Smirnoff 		    zone->uz_name, zone, bucket);
3228cae33c14SJeff Roberson 		/* ub_cnt is pointing to the last free item */
3229cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
32308355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
3231e8bb2dc7SJeff Roberson 		if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
3232e8bb2dc7SJeff Roberson 			ZONE_UNLOCK(zone);
3233e8bb2dc7SJeff Roberson 			bucket_drain(zone, bucket);
3234e8bb2dc7SJeff Roberson 			bucket_free(zone, bucket, udata);
3235e8bb2dc7SJeff Roberson 			goto zfree_restart;
3236e8bb2dc7SJeff Roberson 		} else
32370f9b7bf3SMark Johnston 			zone_put_bucket(zone, zdom, bucket, true);
32388355f576SJeff Roberson 	}
3239fc03d22bSJeff Roberson 
32404d104ba0SAlexander Motin 	/*
32414d104ba0SAlexander Motin 	 * We bump the uz count when the cache size is insufficient to
32424d104ba0SAlexander Motin 	 * handle the working set.
32434d104ba0SAlexander Motin 	 */
32444d104ba0SAlexander Motin 	if (lockfail && zone->uz_count < BUCKET_MAX)
32454d104ba0SAlexander Motin 		zone->uz_count++;
3246a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
3247a553d4b8SJeff Roberson 
32486fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
32491431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
32501431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
32514741dcbfSJeff Roberson 	if (bucket) {
3252fc03d22bSJeff Roberson 		critical_enter();
3253fc03d22bSJeff Roberson 		cpu = curcpu;
3254fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
3255ab3185d1SJeff Roberson 		if (cache->uc_freebucket == NULL &&
3256ab3185d1SJeff Roberson 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3257ab3185d1SJeff Roberson 		    domain == PCPU_GET(domain))) {
3258fc03d22bSJeff Roberson 			cache->uc_freebucket = bucket;
3259fc03d22bSJeff Roberson 			goto zfree_start;
3260fc03d22bSJeff Roberson 		}
3261fc03d22bSJeff Roberson 		/*
3262fc03d22bSJeff Roberson 		 * We lost the race, start over.  We have to drop our
3263fc03d22bSJeff Roberson 		 * critical section to free the bucket.
3264fc03d22bSJeff Roberson 		 */
3265fc03d22bSJeff Roberson 		critical_exit();
32666fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
3267a553d4b8SJeff Roberson 		goto zfree_restart;
32688355f576SJeff Roberson 	}
32698355f576SJeff Roberson 
3270a553d4b8SJeff Roberson 	/*
3271a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
3272a553d4b8SJeff Roberson 	 */
3273fc03d22bSJeff Roberson zfree_item:
32740095a784SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_DTOR);
32758355f576SJeff Roberson 
32768355f576SJeff Roberson 	return;
32778355f576SJeff Roberson }
32788355f576SJeff Roberson 
3279ab3185d1SJeff Roberson void
3280ab3185d1SJeff Roberson uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3281ab3185d1SJeff Roberson {
3282ab3185d1SJeff Roberson 
3283ab3185d1SJeff Roberson 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
328419fa89e9SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3285ab3185d1SJeff Roberson 
3286ab3185d1SJeff Roberson 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3287ab3185d1SJeff Roberson 	    zone->uz_name);
3288ab3185d1SJeff Roberson 
3289ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3290ab3185d1SJeff Roberson 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3291ab3185d1SJeff Roberson 
3292ab3185d1SJeff Roberson         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3293ab3185d1SJeff Roberson         if (item == NULL)
3294ab3185d1SJeff Roberson                 return;
3295ab3185d1SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_NONE);
3296ab3185d1SJeff Roberson }
3297ab3185d1SJeff Roberson 
32988355f576SJeff Roberson static void
32990095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
33008355f576SJeff Roberson {
3301ab3185d1SJeff Roberson 	uma_domain_t dom;
330285dcf349SGleb Smirnoff 	uint8_t freei;
3303099a0e58SBosko Milekic 
33040095a784SJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
3305e20a199fSJeff Roberson 	MPASS(keg == slab->us_keg);
33068355f576SJeff Roberson 
3307ab3185d1SJeff Roberson 	dom = &keg->uk_domain[slab->us_domain];
3308ab3185d1SJeff Roberson 
33098355f576SJeff Roberson 	/* Do we need to remove from any lists? */
3310099a0e58SBosko Milekic 	if (slab->us_freecount+1 == keg->uk_ipers) {
33118355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
3312ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
33138355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
33148355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
3315ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
33168355f576SJeff Roberson 	}
33178355f576SJeff Roberson 
3318ef72505eSJeff Roberson 	/* Slab management. */
3319ef72505eSJeff Roberson 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3320ef72505eSJeff Roberson 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
33218355f576SJeff Roberson 	slab->us_freecount++;
33228355f576SJeff Roberson 
3323ef72505eSJeff Roberson 	/* Keg statistics. */
3324099a0e58SBosko Milekic 	keg->uk_free++;
33250095a784SJeff Roberson }
33260095a784SJeff Roberson 
33270095a784SJeff Roberson static void
33280095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt)
33290095a784SJeff Roberson {
33300095a784SJeff Roberson 	void *item;
33310095a784SJeff Roberson 	uma_slab_t slab;
33320095a784SJeff Roberson 	uma_keg_t keg;
33330095a784SJeff Roberson 	uint8_t *mem;
33340095a784SJeff Roberson 	int clearfull;
33350095a784SJeff Roberson 	int i;
33368355f576SJeff Roberson 
3337e20a199fSJeff Roberson 	clearfull = 0;
33380095a784SJeff Roberson 	keg = zone_first_keg(zone);
3339af526374SJeff Roberson 	KEG_LOCK(keg);
33400095a784SJeff Roberson 	for (i = 0; i < cnt; i++) {
33410095a784SJeff Roberson 		item = bucket[i];
33420095a784SJeff Roberson 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
33430095a784SJeff Roberson 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
33440095a784SJeff Roberson 			if (zone->uz_flags & UMA_ZONE_HASH) {
33450095a784SJeff Roberson 				slab = hash_sfind(&keg->uk_hash, mem);
33460095a784SJeff Roberson 			} else {
33470095a784SJeff Roberson 				mem += keg->uk_pgoff;
33480095a784SJeff Roberson 				slab = (uma_slab_t)mem;
33490095a784SJeff Roberson 			}
33500095a784SJeff Roberson 		} else {
33510095a784SJeff Roberson 			slab = vtoslab((vm_offset_t)item);
33520095a784SJeff Roberson 			if (slab->us_keg != keg) {
33530095a784SJeff Roberson 				KEG_UNLOCK(keg);
33540095a784SJeff Roberson 				keg = slab->us_keg;
33550095a784SJeff Roberson 				KEG_LOCK(keg);
33560095a784SJeff Roberson 			}
33570095a784SJeff Roberson 		}
33580095a784SJeff Roberson 		slab_free_item(keg, slab, item);
3359099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
3360e20a199fSJeff Roberson 			if (keg->uk_pages < keg->uk_maxpages) {
3361099a0e58SBosko Milekic 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
3362e20a199fSJeff Roberson 				clearfull = 1;
3363e20a199fSJeff Roberson 			}
3364af7f9b97SJeff Roberson 
336577380291SMohan Srinivasan 			/*
3366ef72505eSJeff Roberson 			 * We can handle one more allocation. Since we're
3367ef72505eSJeff Roberson 			 * clearing ZFLAG_FULL, wake up all procs blocked
3368ef72505eSJeff Roberson 			 * on pages. This should be uncommon, so keeping this
3369ef72505eSJeff Roberson 			 * simple for now (rather than adding count of blocked
337077380291SMohan Srinivasan 			 * threads etc).
337177380291SMohan Srinivasan 			 */
337277380291SMohan Srinivasan 			wakeup(keg);
3373af7f9b97SJeff Roberson 		}
33740095a784SJeff Roberson 	}
3375af526374SJeff Roberson 	KEG_UNLOCK(keg);
33760095a784SJeff Roberson 	if (clearfull) {
3377af526374SJeff Roberson 		ZONE_LOCK(zone);
3378e20a199fSJeff Roberson 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
3379e20a199fSJeff Roberson 		wakeup(zone);
3380605cbd6aSJeff Roberson 		ZONE_UNLOCK(zone);
3381af526374SJeff Roberson 	}
3382ef72505eSJeff Roberson 
33838355f576SJeff Roberson }
33848355f576SJeff Roberson 
33850095a784SJeff Roberson /*
33860095a784SJeff Roberson  * Frees a single item to any zone.
33870095a784SJeff Roberson  *
33880095a784SJeff Roberson  * Arguments:
33890095a784SJeff Roberson  *	zone   The zone to free to
33900095a784SJeff Roberson  *	item   The item we're freeing
33910095a784SJeff Roberson  *	udata  User supplied data for the dtor
33920095a784SJeff Roberson  *	skip   Skip dtors and finis
33930095a784SJeff Roberson  */
33940095a784SJeff Roberson static void
33950095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
33960095a784SJeff Roberson {
33970095a784SJeff Roberson #ifdef INVARIANTS
3398c5deaf04SGleb Smirnoff 	bool skipdbg;
3399c5deaf04SGleb Smirnoff 
3400c5deaf04SGleb Smirnoff 	skipdbg = uma_dbg_zskip(zone, item);
3401c5deaf04SGleb Smirnoff 	if (skip == SKIP_NONE && !skipdbg) {
34020095a784SJeff Roberson 		if (zone->uz_flags & UMA_ZONE_MALLOC)
34030095a784SJeff Roberson 			uma_dbg_free(zone, udata, item);
34040095a784SJeff Roberson 		else
34050095a784SJeff Roberson 			uma_dbg_free(zone, NULL, item);
34060095a784SJeff Roberson 	}
3407c5deaf04SGleb Smirnoff 
3408c5deaf04SGleb Smirnoff 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3409c5deaf04SGleb Smirnoff 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3410c5deaf04SGleb Smirnoff 	    zone->uz_ctor != trash_ctor))
3411c5deaf04SGleb Smirnoff #else
3412c5deaf04SGleb Smirnoff 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
34130095a784SJeff Roberson #endif
34140095a784SJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
34150095a784SJeff Roberson 
34160095a784SJeff Roberson 	if (skip < SKIP_FINI && zone->uz_fini)
34170095a784SJeff Roberson 		zone->uz_fini(item, zone->uz_size);
34180095a784SJeff Roberson 
34190095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, 1);
34200095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, &item, 1);
34210095a784SJeff Roberson }
34220095a784SJeff Roberson 
34238355f576SJeff Roberson /* See uma.h */
34241c6cae97SLawrence Stewart int
3425736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
3426736ee590SJeff Roberson {
3427099a0e58SBosko Milekic 	uma_keg_t keg;
3428099a0e58SBosko Milekic 
3429e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
34300095a784SJeff Roberson 	if (keg == NULL)
34310095a784SJeff Roberson 		return (0);
3432af526374SJeff Roberson 	KEG_LOCK(keg);
3433e20a199fSJeff Roberson 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
3434099a0e58SBosko Milekic 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
3435e20a199fSJeff Roberson 		keg->uk_maxpages += keg->uk_ppera;
343657223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3437af526374SJeff Roberson 	KEG_UNLOCK(keg);
34381c6cae97SLawrence Stewart 
34391c6cae97SLawrence Stewart 	return (nitems);
3440736ee590SJeff Roberson }
3441736ee590SJeff Roberson 
3442736ee590SJeff Roberson /* See uma.h */
3443e49471b0SAndre Oppermann int
3444e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone)
3445e49471b0SAndre Oppermann {
3446e49471b0SAndre Oppermann 	int nitems;
3447e49471b0SAndre Oppermann 	uma_keg_t keg;
3448e49471b0SAndre Oppermann 
3449e49471b0SAndre Oppermann 	keg = zone_first_keg(zone);
34500095a784SJeff Roberson 	if (keg == NULL)
34510095a784SJeff Roberson 		return (0);
3452af526374SJeff Roberson 	KEG_LOCK(keg);
345357223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3454af526374SJeff Roberson 	KEG_UNLOCK(keg);
3455e49471b0SAndre Oppermann 
3456e49471b0SAndre Oppermann 	return (nitems);
3457e49471b0SAndre Oppermann }
3458e49471b0SAndre Oppermann 
3459e49471b0SAndre Oppermann /* See uma.h */
34602f891cd5SPawel Jakub Dawidek void
34612f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning)
34622f891cd5SPawel Jakub Dawidek {
34632f891cd5SPawel Jakub Dawidek 
34642f891cd5SPawel Jakub Dawidek 	ZONE_LOCK(zone);
34652f891cd5SPawel Jakub Dawidek 	zone->uz_warning = warning;
34662f891cd5SPawel Jakub Dawidek 	ZONE_UNLOCK(zone);
34672f891cd5SPawel Jakub Dawidek }
34682f891cd5SPawel Jakub Dawidek 
34692f891cd5SPawel Jakub Dawidek /* See uma.h */
347054503a13SJonathan T. Looney void
347154503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
347254503a13SJonathan T. Looney {
347354503a13SJonathan T. Looney 
347454503a13SJonathan T. Looney 	ZONE_LOCK(zone);
3475e60b2fcbSGleb Smirnoff 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
347654503a13SJonathan T. Looney 	ZONE_UNLOCK(zone);
347754503a13SJonathan T. Looney }
347854503a13SJonathan T. Looney 
347954503a13SJonathan T. Looney /* See uma.h */
3480c4ae7908SLawrence Stewart int
3481c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone)
3482c4ae7908SLawrence Stewart {
3483c4ae7908SLawrence Stewart 	int64_t nitems;
3484c4ae7908SLawrence Stewart 	u_int i;
3485c4ae7908SLawrence Stewart 
3486c4ae7908SLawrence Stewart 	ZONE_LOCK(zone);
3487c4ae7908SLawrence Stewart 	nitems = zone->uz_allocs - zone->uz_frees;
3488c4ae7908SLawrence Stewart 	CPU_FOREACH(i) {
3489c4ae7908SLawrence Stewart 		/*
3490c4ae7908SLawrence Stewart 		 * See the comment in sysctl_vm_zone_stats() regarding the
3491c4ae7908SLawrence Stewart 		 * safety of accessing the per-cpu caches. With the zone lock
3492c4ae7908SLawrence Stewart 		 * held, it is safe, but can potentially result in stale data.
3493c4ae7908SLawrence Stewart 		 */
3494c4ae7908SLawrence Stewart 		nitems += zone->uz_cpu[i].uc_allocs -
3495c4ae7908SLawrence Stewart 		    zone->uz_cpu[i].uc_frees;
3496c4ae7908SLawrence Stewart 	}
3497c4ae7908SLawrence Stewart 	ZONE_UNLOCK(zone);
3498c4ae7908SLawrence Stewart 
3499c4ae7908SLawrence Stewart 	return (nitems < 0 ? 0 : nitems);
3500c4ae7908SLawrence Stewart }
3501c4ae7908SLawrence Stewart 
3502c4ae7908SLawrence Stewart /* See uma.h */
3503736ee590SJeff Roberson void
3504099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3505099a0e58SBosko Milekic {
3506e20a199fSJeff Roberson 	uma_keg_t keg;
3507e20a199fSJeff Roberson 
3508e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
35090095a784SJeff Roberson 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3510af526374SJeff Roberson 	KEG_LOCK(keg);
3511e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
3512099a0e58SBosko Milekic 	    ("uma_zone_set_init on non-empty keg"));
3513e20a199fSJeff Roberson 	keg->uk_init = uminit;
3514af526374SJeff Roberson 	KEG_UNLOCK(keg);
3515099a0e58SBosko Milekic }
3516099a0e58SBosko Milekic 
3517099a0e58SBosko Milekic /* See uma.h */
3518099a0e58SBosko Milekic void
3519099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3520099a0e58SBosko Milekic {
3521e20a199fSJeff Roberson 	uma_keg_t keg;
3522e20a199fSJeff Roberson 
3523e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
35241d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3525af526374SJeff Roberson 	KEG_LOCK(keg);
3526e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
3527099a0e58SBosko Milekic 	    ("uma_zone_set_fini on non-empty keg"));
3528e20a199fSJeff Roberson 	keg->uk_fini = fini;
3529af526374SJeff Roberson 	KEG_UNLOCK(keg);
3530099a0e58SBosko Milekic }
3531099a0e58SBosko Milekic 
3532099a0e58SBosko Milekic /* See uma.h */
3533099a0e58SBosko Milekic void
3534099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3535099a0e58SBosko Milekic {
3536af526374SJeff Roberson 
3537099a0e58SBosko Milekic 	ZONE_LOCK(zone);
3538e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3539099a0e58SBosko Milekic 	    ("uma_zone_set_zinit on non-empty keg"));
3540099a0e58SBosko Milekic 	zone->uz_init = zinit;
3541099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3542099a0e58SBosko Milekic }
3543099a0e58SBosko Milekic 
3544099a0e58SBosko Milekic /* See uma.h */
3545099a0e58SBosko Milekic void
3546099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3547099a0e58SBosko Milekic {
3548af526374SJeff Roberson 
3549099a0e58SBosko Milekic 	ZONE_LOCK(zone);
3550e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3551099a0e58SBosko Milekic 	    ("uma_zone_set_zfini on non-empty keg"));
3552099a0e58SBosko Milekic 	zone->uz_fini = zfini;
3553099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3554099a0e58SBosko Milekic }
3555099a0e58SBosko Milekic 
3556099a0e58SBosko Milekic /* See uma.h */
3557b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */
3558099a0e58SBosko Milekic void
35598355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
35608355f576SJeff Roberson {
35610095a784SJeff Roberson 	uma_keg_t keg;
3562e20a199fSJeff Roberson 
35630095a784SJeff Roberson 	keg = zone_first_keg(zone);
35641d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3565af526374SJeff Roberson 	KEG_LOCK(keg);
35660095a784SJeff Roberson 	keg->uk_freef = freef;
3567af526374SJeff Roberson 	KEG_UNLOCK(keg);
35688355f576SJeff Roberson }
35698355f576SJeff Roberson 
35708355f576SJeff Roberson /* See uma.h */
3571b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */
35728355f576SJeff Roberson void
35738355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
35748355f576SJeff Roberson {
3575e20a199fSJeff Roberson 	uma_keg_t keg;
3576e20a199fSJeff Roberson 
3577e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
3578af526374SJeff Roberson 	KEG_LOCK(keg);
3579e20a199fSJeff Roberson 	keg->uk_allocf = allocf;
3580af526374SJeff Roberson 	KEG_UNLOCK(keg);
35818355f576SJeff Roberson }
35828355f576SJeff Roberson 
35838355f576SJeff Roberson /* See uma.h */
35846fd34d6fSJeff Roberson void
35856fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items)
35866fd34d6fSJeff Roberson {
35876fd34d6fSJeff Roberson 	uma_keg_t keg;
35886fd34d6fSJeff Roberson 
35896fd34d6fSJeff Roberson 	keg = zone_first_keg(zone);
35906fd34d6fSJeff Roberson 	if (keg == NULL)
35916fd34d6fSJeff Roberson 		return;
35926fd34d6fSJeff Roberson 	KEG_LOCK(keg);
35936fd34d6fSJeff Roberson 	keg->uk_reserve = items;
35946fd34d6fSJeff Roberson 	KEG_UNLOCK(keg);
35956fd34d6fSJeff Roberson 
35966fd34d6fSJeff Roberson 	return;
35976fd34d6fSJeff Roberson }
35986fd34d6fSJeff Roberson 
35996fd34d6fSJeff Roberson /* See uma.h */
36008355f576SJeff Roberson int
3601a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count)
36028355f576SJeff Roberson {
3603099a0e58SBosko Milekic 	uma_keg_t keg;
36048355f576SJeff Roberson 	vm_offset_t kva;
36059ba30bcbSZbigniew Bodek 	u_int pages;
36068355f576SJeff Roberson 
3607e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
36080095a784SJeff Roberson 	if (keg == NULL)
36090095a784SJeff Roberson 		return (0);
3610099a0e58SBosko Milekic 	pages = count / keg->uk_ipers;
36118355f576SJeff Roberson 
3612099a0e58SBosko Milekic 	if (pages * keg->uk_ipers < count)
36138355f576SJeff Roberson 		pages++;
361457223e99SAndriy Gapon 	pages *= keg->uk_ppera;
3615a553d4b8SJeff Roberson 
3616a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3617a4915c21SAttilio Rao 	if (keg->uk_ppera > 1) {
3618a4915c21SAttilio Rao #else
3619a4915c21SAttilio Rao 	if (1) {
3620a4915c21SAttilio Rao #endif
362157223e99SAndriy Gapon 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3622d1f42ac2SAlan Cox 		if (kva == 0)
36238355f576SJeff Roberson 			return (0);
3624a4915c21SAttilio Rao 	} else
3625a4915c21SAttilio Rao 		kva = 0;
3626af526374SJeff Roberson 	KEG_LOCK(keg);
3627099a0e58SBosko Milekic 	keg->uk_kva = kva;
3628a4915c21SAttilio Rao 	keg->uk_offset = 0;
3629099a0e58SBosko Milekic 	keg->uk_maxpages = pages;
3630a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3631a4915c21SAttilio Rao 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3632a4915c21SAttilio Rao #else
3633a4915c21SAttilio Rao 	keg->uk_allocf = noobj_alloc;
3634a4915c21SAttilio Rao #endif
36356fd34d6fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_NOFREE;
3636af526374SJeff Roberson 	KEG_UNLOCK(keg);
3637af526374SJeff Roberson 
36388355f576SJeff Roberson 	return (1);
36398355f576SJeff Roberson }
36408355f576SJeff Roberson 
36418355f576SJeff Roberson /* See uma.h */
36428355f576SJeff Roberson void
36438355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
36448355f576SJeff Roberson {
3645920239efSMark Johnston 	struct vm_domainset_iter di;
3646ab3185d1SJeff Roberson 	uma_domain_t dom;
36478355f576SJeff Roberson 	uma_slab_t slab;
3648099a0e58SBosko Milekic 	uma_keg_t keg;
3649920239efSMark Johnston 	int domain, flags, slabs;
36508355f576SJeff Roberson 
3651e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
36520095a784SJeff Roberson 	if (keg == NULL)
36530095a784SJeff Roberson 		return;
3654af526374SJeff Roberson 	KEG_LOCK(keg);
3655099a0e58SBosko Milekic 	slabs = items / keg->uk_ipers;
3656099a0e58SBosko Milekic 	if (slabs * keg->uk_ipers < items)
36578355f576SJeff Roberson 		slabs++;
3658920239efSMark Johnston 	flags = M_WAITOK;
3659920239efSMark Johnston 	vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, &flags);
3660194a979eSMark Johnston 	while (slabs-- > 0) {
3661920239efSMark Johnston 		slab = keg_alloc_slab(keg, zone, domain, flags);
3662e20a199fSJeff Roberson 		if (slab == NULL)
3663194a979eSMark Johnston 			return;
3664e20a199fSJeff Roberson 		MPASS(slab->us_keg == keg);
3665ab3185d1SJeff Roberson 		dom = &keg->uk_domain[slab->us_domain];
3666ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3667920239efSMark Johnston 		if (vm_domainset_iter_policy(&di, &domain) != 0)
3668920239efSMark Johnston 			break;
36698355f576SJeff Roberson 	}
3670af526374SJeff Roberson 	KEG_UNLOCK(keg);
36718355f576SJeff Roberson }
36728355f576SJeff Roberson 
36738355f576SJeff Roberson /* See uma.h */
367444ec2b63SKonstantin Belousov static void
367544ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger)
36768355f576SJeff Roberson {
367744ec2b63SKonstantin Belousov 
36781431a748SGleb Smirnoff 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
367944ec2b63SKonstantin Belousov 	sx_assert(&uma_drain_lock, SA_XLOCKED);
368086bbae32SJeff Roberson 	bucket_enable();
36818355f576SJeff Roberson 	zone_foreach(zone_drain);
368244ec2b63SKonstantin Belousov 	if (vm_page_count_min() || kmem_danger) {
3683a2de44abSAlexander Motin 		cache_drain_safe(NULL);
3684a2de44abSAlexander Motin 		zone_foreach(zone_drain);
3685a2de44abSAlexander Motin 	}
36860f9b7bf3SMark Johnston 
36878355f576SJeff Roberson 	/*
36888355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
36898355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
36908355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
36918355f576SJeff Roberson 	 */
36929643769aSJeff Roberson 	zone_drain(slabzone);
3693cae33c14SJeff Roberson 	bucket_zone_drain();
369444ec2b63SKonstantin Belousov }
369544ec2b63SKonstantin Belousov 
369644ec2b63SKonstantin Belousov void
369744ec2b63SKonstantin Belousov uma_reclaim(void)
369844ec2b63SKonstantin Belousov {
369944ec2b63SKonstantin Belousov 
370044ec2b63SKonstantin Belousov 	sx_xlock(&uma_drain_lock);
370144ec2b63SKonstantin Belousov 	uma_reclaim_locked(false);
370295c4bf75SKonstantin Belousov 	sx_xunlock(&uma_drain_lock);
37038355f576SJeff Roberson }
37048355f576SJeff Roberson 
37052e47807cSJeff Roberson static volatile int uma_reclaim_needed;
370644ec2b63SKonstantin Belousov 
370744ec2b63SKonstantin Belousov void
370844ec2b63SKonstantin Belousov uma_reclaim_wakeup(void)
370944ec2b63SKonstantin Belousov {
371044ec2b63SKonstantin Belousov 
37112e47807cSJeff Roberson 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
37122e47807cSJeff Roberson 		wakeup(uma_reclaim);
371344ec2b63SKonstantin Belousov }
371444ec2b63SKonstantin Belousov 
371544ec2b63SKonstantin Belousov void
371644ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused)
371744ec2b63SKonstantin Belousov {
371844ec2b63SKonstantin Belousov 
371944ec2b63SKonstantin Belousov 	for (;;) {
37202e47807cSJeff Roberson 		sx_xlock(&uma_drain_lock);
3721200f8117SKonstantin Belousov 		while (atomic_load_int(&uma_reclaim_needed) == 0)
37222e47807cSJeff Roberson 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
37232e47807cSJeff Roberson 			    hz);
37249b43bc27SAndriy Gapon 		sx_xunlock(&uma_drain_lock);
37259b43bc27SAndriy Gapon 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
37269b43bc27SAndriy Gapon 		sx_xlock(&uma_drain_lock);
372744ec2b63SKonstantin Belousov 		uma_reclaim_locked(true);
3728200f8117SKonstantin Belousov 		atomic_store_int(&uma_reclaim_needed, 0);
37292e47807cSJeff Roberson 		sx_xunlock(&uma_drain_lock);
37302e47807cSJeff Roberson 		/* Don't fire more than once per-second. */
37312e47807cSJeff Roberson 		pause("umarclslp", hz);
373244ec2b63SKonstantin Belousov 	}
373344ec2b63SKonstantin Belousov }
373444ec2b63SKonstantin Belousov 
3735663b416fSJohn Baldwin /* See uma.h */
3736663b416fSJohn Baldwin int
3737663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone)
3738663b416fSJohn Baldwin {
3739663b416fSJohn Baldwin 	int full;
3740663b416fSJohn Baldwin 
3741663b416fSJohn Baldwin 	ZONE_LOCK(zone);
3742e20a199fSJeff Roberson 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3743663b416fSJohn Baldwin 	ZONE_UNLOCK(zone);
3744663b416fSJohn Baldwin 	return (full);
3745663b416fSJohn Baldwin }
3746663b416fSJohn Baldwin 
37476c125b8dSMohan Srinivasan int
37486c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone)
37496c125b8dSMohan Srinivasan {
3750e20a199fSJeff Roberson 	return (zone->uz_flags & UMA_ZFLAG_FULL);
37516c125b8dSMohan Srinivasan }
37526c125b8dSMohan Srinivasan 
37538355f576SJeff Roberson void *
3754ab3185d1SJeff Roberson uma_large_malloc_domain(vm_size_t size, int domain, int wait)
37558355f576SJeff Roberson {
37569978bd99SMark Johnston 	struct domainset *policy;
3757ab3185d1SJeff Roberson 	vm_offset_t addr;
37588355f576SJeff Roberson 	uma_slab_t slab;
37598355f576SJeff Roberson 
376030c5525bSAndrew Gallatin 	if (domain != UMA_ANYDOMAIN) {
376130c5525bSAndrew Gallatin 		/* avoid allocs targeting empty domains */
376230c5525bSAndrew Gallatin 		if (VM_DOMAIN_EMPTY(domain))
376330c5525bSAndrew Gallatin 			domain = UMA_ANYDOMAIN;
376430c5525bSAndrew Gallatin 	}
3765ab3185d1SJeff Roberson 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
37668355f576SJeff Roberson 	if (slab == NULL)
37678355f576SJeff Roberson 		return (NULL);
37689978bd99SMark Johnston 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
37699978bd99SMark Johnston 	    DOMAINSET_FIXED(domain);
37709978bd99SMark Johnston 	addr = kmem_malloc_domainset(policy, size, wait);
3771ab3185d1SJeff Roberson 	if (addr != 0) {
3772ab3185d1SJeff Roberson 		vsetslab(addr, slab);
3773ab3185d1SJeff Roberson 		slab->us_data = (void *)addr;
3774ab3185d1SJeff Roberson 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
37758355f576SJeff Roberson 		slab->us_size = size;
3776e2068d0bSJeff Roberson 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3777ab3185d1SJeff Roberson 		    pmap_kextract(addr)));
37782e47807cSJeff Roberson 		uma_total_inc(size);
37798355f576SJeff Roberson 	} else {
37800095a784SJeff Roberson 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
37818355f576SJeff Roberson 	}
37828355f576SJeff Roberson 
3783ab3185d1SJeff Roberson 	return ((void *)addr);
3784ab3185d1SJeff Roberson }
3785ab3185d1SJeff Roberson 
3786ab3185d1SJeff Roberson void *
3787ab3185d1SJeff Roberson uma_large_malloc(vm_size_t size, int wait)
3788ab3185d1SJeff Roberson {
3789ab3185d1SJeff Roberson 
3790ab3185d1SJeff Roberson 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
37918355f576SJeff Roberson }
37928355f576SJeff Roberson 
37938355f576SJeff Roberson void
37948355f576SJeff Roberson uma_large_free(uma_slab_t slab)
37958355f576SJeff Roberson {
3796c325e866SKonstantin Belousov 
3797ab3185d1SJeff Roberson 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3798ab3185d1SJeff Roberson 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
379949bfa624SAlan Cox 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
38002e47807cSJeff Roberson 	uma_total_dec(slab->us_size);
38010095a784SJeff Roberson 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
38028355f576SJeff Roberson }
38038355f576SJeff Roberson 
380448343a2fSGleb Smirnoff static void
380548343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone)
380648343a2fSGleb Smirnoff {
380748343a2fSGleb Smirnoff 
380848343a2fSGleb Smirnoff 	bzero(item, zone->uz_size);
380948343a2fSGleb Smirnoff }
381048343a2fSGleb Smirnoff 
38112e47807cSJeff Roberson unsigned long
38122e47807cSJeff Roberson uma_limit(void)
38132e47807cSJeff Roberson {
38142e47807cSJeff Roberson 
38152e47807cSJeff Roberson 	return (uma_kmem_limit);
38162e47807cSJeff Roberson }
38172e47807cSJeff Roberson 
38182e47807cSJeff Roberson void
38192e47807cSJeff Roberson uma_set_limit(unsigned long limit)
38202e47807cSJeff Roberson {
38212e47807cSJeff Roberson 
38222e47807cSJeff Roberson 	uma_kmem_limit = limit;
38232e47807cSJeff Roberson }
38242e47807cSJeff Roberson 
38252e47807cSJeff Roberson unsigned long
38262e47807cSJeff Roberson uma_size(void)
38272e47807cSJeff Roberson {
38282e47807cSJeff Roberson 
3829ad5b0f5bSJeff Roberson 	return (uma_kmem_total);
3830ad5b0f5bSJeff Roberson }
3831ad5b0f5bSJeff Roberson 
3832ad5b0f5bSJeff Roberson long
3833ad5b0f5bSJeff Roberson uma_avail(void)
3834ad5b0f5bSJeff Roberson {
3835ad5b0f5bSJeff Roberson 
3836ad5b0f5bSJeff Roberson 	return (uma_kmem_limit - uma_kmem_total);
38372e47807cSJeff Roberson }
38382e47807cSJeff Roberson 
38398355f576SJeff Roberson void
38408355f576SJeff Roberson uma_print_stats(void)
38418355f576SJeff Roberson {
38428355f576SJeff Roberson 	zone_foreach(uma_print_zone);
38438355f576SJeff Roberson }
38448355f576SJeff Roberson 
3845504d5de3SJeff Roberson static void
3846504d5de3SJeff Roberson slab_print(uma_slab_t slab)
3847504d5de3SJeff Roberson {
3848ef72505eSJeff Roberson 	printf("slab: keg %p, data %p, freecount %d\n",
3849ef72505eSJeff Roberson 		slab->us_keg, slab->us_data, slab->us_freecount);
3850504d5de3SJeff Roberson }
3851504d5de3SJeff Roberson 
3852504d5de3SJeff Roberson static void
3853504d5de3SJeff Roberson cache_print(uma_cache_t cache)
3854504d5de3SJeff Roberson {
3855504d5de3SJeff Roberson 	printf("alloc: %p(%d), free: %p(%d)\n",
3856504d5de3SJeff Roberson 		cache->uc_allocbucket,
3857504d5de3SJeff Roberson 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3858504d5de3SJeff Roberson 		cache->uc_freebucket,
3859504d5de3SJeff Roberson 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3860504d5de3SJeff Roberson }
3861504d5de3SJeff Roberson 
3862e20a199fSJeff Roberson static void
3863e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg)
38648355f576SJeff Roberson {
3865ab3185d1SJeff Roberson 	uma_domain_t dom;
3866504d5de3SJeff Roberson 	uma_slab_t slab;
3867ab3185d1SJeff Roberson 	int i;
3868504d5de3SJeff Roberson 
38690b80c1e4SEitan Adler 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3870e20a199fSJeff Roberson 	    "out %d free %d limit %d\n",
3871e20a199fSJeff Roberson 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3872099a0e58SBosko Milekic 	    keg->uk_ipers, keg->uk_ppera,
387357223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
387457223e99SAndriy Gapon 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3875ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
3876ab3185d1SJeff Roberson 		dom = &keg->uk_domain[i];
3877504d5de3SJeff Roberson 		printf("Part slabs:\n");
3878ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3879504d5de3SJeff Roberson 			slab_print(slab);
3880504d5de3SJeff Roberson 		printf("Free slabs:\n");
3881ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3882504d5de3SJeff Roberson 			slab_print(slab);
3883504d5de3SJeff Roberson 		printf("Full slabs:\n");
3884ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3885504d5de3SJeff Roberson 			slab_print(slab);
3886e20a199fSJeff Roberson 	}
3887ab3185d1SJeff Roberson }
3888e20a199fSJeff Roberson 
3889e20a199fSJeff Roberson void
3890e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone)
3891e20a199fSJeff Roberson {
3892e20a199fSJeff Roberson 	uma_cache_t cache;
3893e20a199fSJeff Roberson 	uma_klink_t kl;
3894e20a199fSJeff Roberson 	int i;
3895e20a199fSJeff Roberson 
38960b80c1e4SEitan Adler 	printf("zone: %s(%p) size %d flags %#x\n",
3897e20a199fSJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3898e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3899e20a199fSJeff Roberson 		uma_print_keg(kl->kl_keg);
39003aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3901504d5de3SJeff Roberson 		cache = &zone->uz_cpu[i];
3902504d5de3SJeff Roberson 		printf("CPU %d Cache:\n", i);
3903504d5de3SJeff Roberson 		cache_print(cache);
3904504d5de3SJeff Roberson 	}
39058355f576SJeff Roberson }
39068355f576SJeff Roberson 
3907a0d4b0aeSRobert Watson #ifdef DDB
39088355f576SJeff Roberson /*
39097a52a97eSRobert Watson  * Generate statistics across both the zone and its per-cpu cache's.  Return
39107a52a97eSRobert Watson  * desired statistics if the pointer is non-NULL for that statistic.
39117a52a97eSRobert Watson  *
39127a52a97eSRobert Watson  * Note: does not update the zone statistics, as it can't safely clear the
39137a52a97eSRobert Watson  * per-CPU cache statistic.
39147a52a97eSRobert Watson  *
39157a52a97eSRobert Watson  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
39167a52a97eSRobert Watson  * safe from off-CPU; we should modify the caches to track this information
39177a52a97eSRobert Watson  * directly so that we don't have to.
39187a52a97eSRobert Watson  */
39197a52a97eSRobert Watson static void
39200f9b7bf3SMark Johnston uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
392185dcf349SGleb Smirnoff     uint64_t *freesp, uint64_t *sleepsp)
39227a52a97eSRobert Watson {
39237a52a97eSRobert Watson 	uma_cache_t cache;
392485dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
39257a52a97eSRobert Watson 	int cachefree, cpu;
39267a52a97eSRobert Watson 
3927bf965959SSean Bruno 	allocs = frees = sleeps = 0;
39287a52a97eSRobert Watson 	cachefree = 0;
39293aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
39307a52a97eSRobert Watson 		cache = &z->uz_cpu[cpu];
39317a52a97eSRobert Watson 		if (cache->uc_allocbucket != NULL)
39327a52a97eSRobert Watson 			cachefree += cache->uc_allocbucket->ub_cnt;
39337a52a97eSRobert Watson 		if (cache->uc_freebucket != NULL)
39347a52a97eSRobert Watson 			cachefree += cache->uc_freebucket->ub_cnt;
39357a52a97eSRobert Watson 		allocs += cache->uc_allocs;
39367a52a97eSRobert Watson 		frees += cache->uc_frees;
39377a52a97eSRobert Watson 	}
39387a52a97eSRobert Watson 	allocs += z->uz_allocs;
39397a52a97eSRobert Watson 	frees += z->uz_frees;
3940bf965959SSean Bruno 	sleeps += z->uz_sleeps;
39417a52a97eSRobert Watson 	if (cachefreep != NULL)
39427a52a97eSRobert Watson 		*cachefreep = cachefree;
39437a52a97eSRobert Watson 	if (allocsp != NULL)
39447a52a97eSRobert Watson 		*allocsp = allocs;
39457a52a97eSRobert Watson 	if (freesp != NULL)
39467a52a97eSRobert Watson 		*freesp = frees;
3947bf965959SSean Bruno 	if (sleepsp != NULL)
3948bf965959SSean Bruno 		*sleepsp = sleeps;
39497a52a97eSRobert Watson }
3950a0d4b0aeSRobert Watson #endif /* DDB */
39517a52a97eSRobert Watson 
39527a52a97eSRobert Watson static int
39537a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
39547a52a97eSRobert Watson {
39557a52a97eSRobert Watson 	uma_keg_t kz;
39567a52a97eSRobert Watson 	uma_zone_t z;
39577a52a97eSRobert Watson 	int count;
39587a52a97eSRobert Watson 
39597a52a97eSRobert Watson 	count = 0;
3960111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
39617a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
39627a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
39637a52a97eSRobert Watson 			count++;
39647a52a97eSRobert Watson 	}
3965111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
39667a52a97eSRobert Watson 	return (sysctl_handle_int(oidp, &count, 0, req));
39677a52a97eSRobert Watson }
39687a52a97eSRobert Watson 
39697a52a97eSRobert Watson static int
39707a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
39717a52a97eSRobert Watson {
39727a52a97eSRobert Watson 	struct uma_stream_header ush;
39737a52a97eSRobert Watson 	struct uma_type_header uth;
397463b5d112SKonstantin Belousov 	struct uma_percpu_stat *ups;
3975ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
39767a52a97eSRobert Watson 	struct sbuf sbuf;
39777a52a97eSRobert Watson 	uma_cache_t cache;
3978e20a199fSJeff Roberson 	uma_klink_t kl;
39797a52a97eSRobert Watson 	uma_keg_t kz;
39807a52a97eSRobert Watson 	uma_zone_t z;
3981e20a199fSJeff Roberson 	uma_keg_t k;
39824e657159SMatthew D Fleming 	int count, error, i;
39837a52a97eSRobert Watson 
398400f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
398500f0e671SMatthew D Fleming 	if (error != 0)
398600f0e671SMatthew D Fleming 		return (error);
39874e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
39881eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
398963b5d112SKonstantin Belousov 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
39904e657159SMatthew D Fleming 
3991404a593eSMatthew D Fleming 	count = 0;
3992111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
39937a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
39947a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
39957a52a97eSRobert Watson 			count++;
39967a52a97eSRobert Watson 	}
39977a52a97eSRobert Watson 
39987a52a97eSRobert Watson 	/*
39997a52a97eSRobert Watson 	 * Insert stream header.
40007a52a97eSRobert Watson 	 */
40017a52a97eSRobert Watson 	bzero(&ush, sizeof(ush));
40027a52a97eSRobert Watson 	ush.ush_version = UMA_STREAM_VERSION;
4003ab3a57c0SRobert Watson 	ush.ush_maxcpus = (mp_maxid + 1);
40047a52a97eSRobert Watson 	ush.ush_count = count;
40054e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
40067a52a97eSRobert Watson 
40077a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
40087a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
40097a52a97eSRobert Watson 			bzero(&uth, sizeof(uth));
40107a52a97eSRobert Watson 			ZONE_LOCK(z);
4011cbbb4a00SRobert Watson 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
40127a52a97eSRobert Watson 			uth.uth_align = kz->uk_align;
40137a52a97eSRobert Watson 			uth.uth_size = kz->uk_size;
40147a52a97eSRobert Watson 			uth.uth_rsize = kz->uk_rsize;
4015e20a199fSJeff Roberson 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
4016e20a199fSJeff Roberson 				k = kl->kl_keg;
4017e20a199fSJeff Roberson 				uth.uth_maxpages += k->uk_maxpages;
4018e20a199fSJeff Roberson 				uth.uth_pages += k->uk_pages;
4019e20a199fSJeff Roberson 				uth.uth_keg_free += k->uk_free;
4020e20a199fSJeff Roberson 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
4021e20a199fSJeff Roberson 				    * k->uk_ipers;
4022e20a199fSJeff Roberson 			}
4023cbbb4a00SRobert Watson 
4024cbbb4a00SRobert Watson 			/*
4025cbbb4a00SRobert Watson 			 * A zone is secondary is it is not the first entry
4026cbbb4a00SRobert Watson 			 * on the keg's zone list.
4027cbbb4a00SRobert Watson 			 */
4028e20a199fSJeff Roberson 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4029cbbb4a00SRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z))
4030cbbb4a00SRobert Watson 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4031cbbb4a00SRobert Watson 
4032ab3185d1SJeff Roberson 			for (i = 0; i < vm_ndomains; i++) {
4033ab3185d1SJeff Roberson 				zdom = &z->uz_domain[i];
40340f9b7bf3SMark Johnston 				uth.uth_zone_free += zdom->uzd_nitems;
4035ab3185d1SJeff Roberson 			}
40367a52a97eSRobert Watson 			uth.uth_allocs = z->uz_allocs;
40377a52a97eSRobert Watson 			uth.uth_frees = z->uz_frees;
40382019094aSRobert Watson 			uth.uth_fails = z->uz_fails;
4039bf965959SSean Bruno 			uth.uth_sleeps = z->uz_sleeps;
40407a52a97eSRobert Watson 			/*
40412450bbb8SRobert Watson 			 * While it is not normally safe to access the cache
40422450bbb8SRobert Watson 			 * bucket pointers while not on the CPU that owns the
40432450bbb8SRobert Watson 			 * cache, we only allow the pointers to be exchanged
40442450bbb8SRobert Watson 			 * without the zone lock held, not invalidated, so
40452450bbb8SRobert Watson 			 * accept the possible race associated with bucket
40462450bbb8SRobert Watson 			 * exchange during monitoring.
40477a52a97eSRobert Watson 			 */
404863b5d112SKonstantin Belousov 			for (i = 0; i < mp_maxid + 1; i++) {
404963b5d112SKonstantin Belousov 				bzero(&ups[i], sizeof(*ups));
405063b5d112SKonstantin Belousov 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
405163b5d112SKonstantin Belousov 				    CPU_ABSENT(i))
405263b5d112SKonstantin Belousov 					continue;
40537a52a97eSRobert Watson 				cache = &z->uz_cpu[i];
40547a52a97eSRobert Watson 				if (cache->uc_allocbucket != NULL)
405563b5d112SKonstantin Belousov 					ups[i].ups_cache_free +=
40567a52a97eSRobert Watson 					    cache->uc_allocbucket->ub_cnt;
40577a52a97eSRobert Watson 				if (cache->uc_freebucket != NULL)
405863b5d112SKonstantin Belousov 					ups[i].ups_cache_free +=
40597a52a97eSRobert Watson 					    cache->uc_freebucket->ub_cnt;
406063b5d112SKonstantin Belousov 				ups[i].ups_allocs = cache->uc_allocs;
406163b5d112SKonstantin Belousov 				ups[i].ups_frees = cache->uc_frees;
40627a52a97eSRobert Watson 			}
40632450bbb8SRobert Watson 			ZONE_UNLOCK(z);
406463b5d112SKonstantin Belousov 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
406563b5d112SKonstantin Belousov 			for (i = 0; i < mp_maxid + 1; i++)
406663b5d112SKonstantin Belousov 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
40677a52a97eSRobert Watson 		}
40687a52a97eSRobert Watson 	}
4069111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
40704e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
40714e657159SMatthew D Fleming 	sbuf_delete(&sbuf);
407263b5d112SKonstantin Belousov 	free(ups, M_TEMP);
40737a52a97eSRobert Watson 	return (error);
40747a52a97eSRobert Watson }
407548c5777eSRobert Watson 
40760a5a3ccbSGleb Smirnoff int
40770a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
40780a5a3ccbSGleb Smirnoff {
40790a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
408016be9f54SGleb Smirnoff 	int error, max;
40810a5a3ccbSGleb Smirnoff 
408216be9f54SGleb Smirnoff 	max = uma_zone_get_max(zone);
40830a5a3ccbSGleb Smirnoff 	error = sysctl_handle_int(oidp, &max, 0, req);
40840a5a3ccbSGleb Smirnoff 	if (error || !req->newptr)
40850a5a3ccbSGleb Smirnoff 		return (error);
40860a5a3ccbSGleb Smirnoff 
40870a5a3ccbSGleb Smirnoff 	uma_zone_set_max(zone, max);
40880a5a3ccbSGleb Smirnoff 
40890a5a3ccbSGleb Smirnoff 	return (0);
40900a5a3ccbSGleb Smirnoff }
40910a5a3ccbSGleb Smirnoff 
40920a5a3ccbSGleb Smirnoff int
40930a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
40940a5a3ccbSGleb Smirnoff {
40950a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
40960a5a3ccbSGleb Smirnoff 	int cur;
40970a5a3ccbSGleb Smirnoff 
40980a5a3ccbSGleb Smirnoff 	cur = uma_zone_get_cur(zone);
40990a5a3ccbSGleb Smirnoff 	return (sysctl_handle_int(oidp, &cur, 0, req));
41000a5a3ccbSGleb Smirnoff }
41010a5a3ccbSGleb Smirnoff 
41029542ea7bSGleb Smirnoff #ifdef INVARIANTS
41039542ea7bSGleb Smirnoff static uma_slab_t
41049542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item)
41059542ea7bSGleb Smirnoff {
41069542ea7bSGleb Smirnoff 	uma_slab_t slab;
41079542ea7bSGleb Smirnoff 	uma_keg_t keg;
41089542ea7bSGleb Smirnoff 	uint8_t *mem;
41099542ea7bSGleb Smirnoff 
41109542ea7bSGleb Smirnoff 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
41119542ea7bSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
41129542ea7bSGleb Smirnoff 		slab = vtoslab((vm_offset_t)mem);
41139542ea7bSGleb Smirnoff 	} else {
41149542ea7bSGleb Smirnoff 		/*
41159542ea7bSGleb Smirnoff 		 * It is safe to return the slab here even though the
41169542ea7bSGleb Smirnoff 		 * zone is unlocked because the item's allocation state
41179542ea7bSGleb Smirnoff 		 * essentially holds a reference.
41189542ea7bSGleb Smirnoff 		 */
41199542ea7bSGleb Smirnoff 		ZONE_LOCK(zone);
41209542ea7bSGleb Smirnoff 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
41219542ea7bSGleb Smirnoff 		if (keg->uk_flags & UMA_ZONE_HASH)
41229542ea7bSGleb Smirnoff 			slab = hash_sfind(&keg->uk_hash, mem);
41239542ea7bSGleb Smirnoff 		else
41249542ea7bSGleb Smirnoff 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
41259542ea7bSGleb Smirnoff 		ZONE_UNLOCK(zone);
41269542ea7bSGleb Smirnoff 	}
41279542ea7bSGleb Smirnoff 
41289542ea7bSGleb Smirnoff 	return (slab);
41299542ea7bSGleb Smirnoff }
41309542ea7bSGleb Smirnoff 
4131c5deaf04SGleb Smirnoff static bool
4132c5deaf04SGleb Smirnoff uma_dbg_zskip(uma_zone_t zone, void *mem)
4133c5deaf04SGleb Smirnoff {
4134c5deaf04SGleb Smirnoff 	uma_keg_t keg;
4135c5deaf04SGleb Smirnoff 
4136c5deaf04SGleb Smirnoff 	if ((keg = zone_first_keg(zone)) == NULL)
4137c5deaf04SGleb Smirnoff 		return (true);
4138c5deaf04SGleb Smirnoff 
4139c5deaf04SGleb Smirnoff 	return (uma_dbg_kskip(keg, mem));
4140c5deaf04SGleb Smirnoff }
4141c5deaf04SGleb Smirnoff 
4142c5deaf04SGleb Smirnoff static bool
4143c5deaf04SGleb Smirnoff uma_dbg_kskip(uma_keg_t keg, void *mem)
4144c5deaf04SGleb Smirnoff {
4145c5deaf04SGleb Smirnoff 	uintptr_t idx;
4146c5deaf04SGleb Smirnoff 
4147c5deaf04SGleb Smirnoff 	if (dbg_divisor == 0)
4148c5deaf04SGleb Smirnoff 		return (true);
4149c5deaf04SGleb Smirnoff 
4150c5deaf04SGleb Smirnoff 	if (dbg_divisor == 1)
4151c5deaf04SGleb Smirnoff 		return (false);
4152c5deaf04SGleb Smirnoff 
4153c5deaf04SGleb Smirnoff 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4154c5deaf04SGleb Smirnoff 	if (keg->uk_ipers > 1) {
4155c5deaf04SGleb Smirnoff 		idx *= keg->uk_ipers;
4156c5deaf04SGleb Smirnoff 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4157c5deaf04SGleb Smirnoff 	}
4158c5deaf04SGleb Smirnoff 
4159c5deaf04SGleb Smirnoff 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4160c5deaf04SGleb Smirnoff 		counter_u64_add(uma_skip_cnt, 1);
4161c5deaf04SGleb Smirnoff 		return (true);
4162c5deaf04SGleb Smirnoff 	}
4163c5deaf04SGleb Smirnoff 	counter_u64_add(uma_dbg_cnt, 1);
4164c5deaf04SGleb Smirnoff 
4165c5deaf04SGleb Smirnoff 	return (false);
4166c5deaf04SGleb Smirnoff }
4167c5deaf04SGleb Smirnoff 
41689542ea7bSGleb Smirnoff /*
41699542ea7bSGleb Smirnoff  * Set up the slab's freei data such that uma_dbg_free can function.
41709542ea7bSGleb Smirnoff  *
41719542ea7bSGleb Smirnoff  */
41729542ea7bSGleb Smirnoff static void
41739542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
41749542ea7bSGleb Smirnoff {
41759542ea7bSGleb Smirnoff 	uma_keg_t keg;
41769542ea7bSGleb Smirnoff 	int freei;
41779542ea7bSGleb Smirnoff 
41789542ea7bSGleb Smirnoff 	if (slab == NULL) {
41799542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
41809542ea7bSGleb Smirnoff 		if (slab == NULL)
41819542ea7bSGleb Smirnoff 			panic("uma: item %p did not belong to zone %s\n",
41829542ea7bSGleb Smirnoff 			    item, zone->uz_name);
41839542ea7bSGleb Smirnoff 	}
41849542ea7bSGleb Smirnoff 	keg = slab->us_keg;
41859542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
41869542ea7bSGleb Smirnoff 
41879542ea7bSGleb Smirnoff 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
41889542ea7bSGleb Smirnoff 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
41899542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
41909542ea7bSGleb Smirnoff 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
41919542ea7bSGleb Smirnoff 
41929542ea7bSGleb Smirnoff 	return;
41939542ea7bSGleb Smirnoff }
41949542ea7bSGleb Smirnoff 
41959542ea7bSGleb Smirnoff /*
41969542ea7bSGleb Smirnoff  * Verifies freed addresses.  Checks for alignment, valid slab membership
41979542ea7bSGleb Smirnoff  * and duplicate frees.
41989542ea7bSGleb Smirnoff  *
41999542ea7bSGleb Smirnoff  */
42009542ea7bSGleb Smirnoff static void
42019542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
42029542ea7bSGleb Smirnoff {
42039542ea7bSGleb Smirnoff 	uma_keg_t keg;
42049542ea7bSGleb Smirnoff 	int freei;
42059542ea7bSGleb Smirnoff 
42069542ea7bSGleb Smirnoff 	if (slab == NULL) {
42079542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
42089542ea7bSGleb Smirnoff 		if (slab == NULL)
42099542ea7bSGleb Smirnoff 			panic("uma: Freed item %p did not belong to zone %s\n",
42109542ea7bSGleb Smirnoff 			    item, zone->uz_name);
42119542ea7bSGleb Smirnoff 	}
42129542ea7bSGleb Smirnoff 	keg = slab->us_keg;
42139542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
42149542ea7bSGleb Smirnoff 
42159542ea7bSGleb Smirnoff 	if (freei >= keg->uk_ipers)
42169542ea7bSGleb Smirnoff 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
42179542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
42189542ea7bSGleb Smirnoff 
42199542ea7bSGleb Smirnoff 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
42209542ea7bSGleb Smirnoff 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
42219542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
42229542ea7bSGleb Smirnoff 
42239542ea7bSGleb Smirnoff 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
42249542ea7bSGleb Smirnoff 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
42259542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
42269542ea7bSGleb Smirnoff 
42279542ea7bSGleb Smirnoff 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
42289542ea7bSGleb Smirnoff }
42299542ea7bSGleb Smirnoff #endif /* INVARIANTS */
42309542ea7bSGleb Smirnoff 
423148c5777eSRobert Watson #ifdef DDB
423248c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma)
423348c5777eSRobert Watson {
423448c5777eSRobert Watson 	uma_keg_t kz;
423548c5777eSRobert Watson 	uma_zone_t z;
4236ab3185d1SJeff Roberson 	uint64_t allocs, frees, sleeps;
42370f9b7bf3SMark Johnston 	long cachefree;
42380f9b7bf3SMark Johnston 	int i;
423948c5777eSRobert Watson 
424003175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
424103175483SAlexander Motin 	    "Free", "Requests", "Sleeps", "Bucket");
424248c5777eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
424348c5777eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
424448c5777eSRobert Watson 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
424548c5777eSRobert Watson 				allocs = z->uz_allocs;
424648c5777eSRobert Watson 				frees = z->uz_frees;
4247bf965959SSean Bruno 				sleeps = z->uz_sleeps;
424848c5777eSRobert Watson 				cachefree = 0;
424948c5777eSRobert Watson 			} else
425048c5777eSRobert Watson 				uma_zone_sumstat(z, &cachefree, &allocs,
4251bf965959SSean Bruno 				    &frees, &sleeps);
4252e20a199fSJeff Roberson 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
425348c5777eSRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z)))
425448c5777eSRobert Watson 				cachefree += kz->uk_free;
42550f9b7bf3SMark Johnston 			for (i = 0; i < vm_ndomains; i++)
42560f9b7bf3SMark Johnston 				cachefree += z->uz_domain[i].uzd_nitems;
42570f9b7bf3SMark Johnston 
42580f9b7bf3SMark Johnston 			db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u\n",
425903175483SAlexander Motin 			    z->uz_name, (uintmax_t)kz->uk_size,
4260ae4e9636SRobert Watson 			    (intmax_t)(allocs - frees), cachefree,
426103175483SAlexander Motin 			    (uintmax_t)allocs, sleeps, z->uz_count);
4262687c94aaSJohn Baldwin 			if (db_pager_quit)
4263687c94aaSJohn Baldwin 				return;
426448c5777eSRobert Watson 		}
426548c5777eSRobert Watson 	}
426648c5777eSRobert Watson }
426703175483SAlexander Motin 
426803175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache)
426903175483SAlexander Motin {
427003175483SAlexander Motin 	uma_zone_t z;
4271ab3185d1SJeff Roberson 	uint64_t allocs, frees;
42720f9b7bf3SMark Johnston 	long cachefree;
42730f9b7bf3SMark Johnston 	int i;
427403175483SAlexander Motin 
427503175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
427603175483SAlexander Motin 	    "Requests", "Bucket");
427703175483SAlexander Motin 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
427803175483SAlexander Motin 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
42790f9b7bf3SMark Johnston 		for (i = 0; i < vm_ndomains; i++)
42800f9b7bf3SMark Johnston 			cachefree += z->uz_domain[i].uzd_nitems;
42810f9b7bf3SMark Johnston 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
428203175483SAlexander Motin 		    z->uz_name, (uintmax_t)z->uz_size,
428303175483SAlexander Motin 		    (intmax_t)(allocs - frees), cachefree,
428403175483SAlexander Motin 		    (uintmax_t)allocs, z->uz_count);
428503175483SAlexander Motin 		if (db_pager_quit)
428603175483SAlexander Motin 			return;
428703175483SAlexander Motin 	}
428803175483SAlexander Motin }
42899542ea7bSGleb Smirnoff #endif	/* DDB */
4290