xref: /freebsd/sys/vm/uma_core.c (revision b6715dab8fff7e4869777a68db83c26c2bf841fd)
160727d8bSWarner Losh /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
4ef72505eSJeff Roberson  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
508ecce74SRobert Watson  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6ae4e9636SRobert Watson  * Copyright (c) 2004-2006 Robert N. M. Watson
708ecce74SRobert Watson  * All rights reserved.
88355f576SJeff Roberson  *
98355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
108355f576SJeff Roberson  * modification, are permitted provided that the following conditions
118355f576SJeff Roberson  * are met:
128355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
138355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
148355f576SJeff Roberson  *    disclaimer.
158355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
168355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
178355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
188355f576SJeff Roberson  *
198355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
208355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
218355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
228355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
238355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
248355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
258355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
268355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
278355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
288355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
298355f576SJeff Roberson  */
308355f576SJeff Roberson 
318355f576SJeff Roberson /*
328355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
338355f576SJeff Roberson  *
348355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
358355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36763df3ecSPedro F. Giffuni  * efficient.  A primary design goal is to return unused memory to the rest of
378355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
388355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
398355f576SJeff Roberson  * pools of reserved memory unused.
408355f576SJeff Roberson  *
418355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
428355f576SJeff Roberson  * are well known.
438355f576SJeff Roberson  *
448355f576SJeff Roberson  */
458355f576SJeff Roberson 
468355f576SJeff Roberson /*
478355f576SJeff Roberson  * TODO:
488355f576SJeff Roberson  *	- Improve memory usage for large allocations
498355f576SJeff Roberson  *	- Investigate cache size adjustments
508355f576SJeff Roberson  */
518355f576SJeff Roberson 
52874651b1SDavid E. O'Brien #include <sys/cdefs.h>
53874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
54874651b1SDavid E. O'Brien 
5548c5777eSRobert Watson #include "opt_ddb.h"
568355f576SJeff Roberson #include "opt_param.h"
578d689e04SGleb Smirnoff #include "opt_vm.h"
5848c5777eSRobert Watson 
598355f576SJeff Roberson #include <sys/param.h>
608355f576SJeff Roberson #include <sys/systm.h>
61ef72505eSJeff Roberson #include <sys/bitset.h>
629b43bc27SAndriy Gapon #include <sys/eventhandler.h>
638355f576SJeff Roberson #include <sys/kernel.h>
648355f576SJeff Roberson #include <sys/types.h>
65ad5b0f5bSJeff Roberson #include <sys/limits.h>
668355f576SJeff Roberson #include <sys/queue.h>
678355f576SJeff Roberson #include <sys/malloc.h>
683659f747SRobert Watson #include <sys/ktr.h>
698355f576SJeff Roberson #include <sys/lock.h>
708355f576SJeff Roberson #include <sys/sysctl.h>
718355f576SJeff Roberson #include <sys/mutex.h>
724c1cc01cSJohn Baldwin #include <sys/proc.h>
7310cb2424SMark Murray #include <sys/random.h>
7489f6b863SAttilio Rao #include <sys/rwlock.h>
757a52a97eSRobert Watson #include <sys/sbuf.h>
76a2de44abSAlexander Motin #include <sys/sched.h>
778355f576SJeff Roberson #include <sys/smp.h>
78e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h>
7986bbae32SJeff Roberson #include <sys/vmmeter.h>
8086bbae32SJeff Roberson 
818355f576SJeff Roberson #include <vm/vm.h>
828355f576SJeff Roberson #include <vm/vm_object.h>
838355f576SJeff Roberson #include <vm/vm_page.h>
84a4915c21SAttilio Rao #include <vm/vm_pageout.h>
858355f576SJeff Roberson #include <vm/vm_param.h>
86ab3185d1SJeff Roberson #include <vm/vm_phys.h>
878355f576SJeff Roberson #include <vm/vm_map.h>
888355f576SJeff Roberson #include <vm/vm_kern.h>
898355f576SJeff Roberson #include <vm/vm_extern.h>
908355f576SJeff Roberson #include <vm/uma.h>
918355f576SJeff Roberson #include <vm/uma_int.h>
92639c9550SJeff Roberson #include <vm/uma_dbg.h>
938355f576SJeff Roberson 
9448c5777eSRobert Watson #include <ddb/ddb.h>
9548c5777eSRobert Watson 
968d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
978d689e04SGleb Smirnoff #include <vm/memguard.h>
988d689e04SGleb Smirnoff #endif
998d689e04SGleb Smirnoff 
1008355f576SJeff Roberson /*
101ab3185d1SJeff Roberson  * This is the zone and keg from which all zones are spawned.
1028355f576SJeff Roberson  */
103ab3185d1SJeff Roberson static uma_zone_t kegs;
104ab3185d1SJeff Roberson static uma_zone_t zones;
1058355f576SJeff Roberson 
106ab3185d1SJeff Roberson /* This is the zone from which all offpage uma_slab_ts are allocated. */
1078355f576SJeff Roberson static uma_zone_t slabzone;
1088355f576SJeff Roberson 
1098355f576SJeff Roberson /*
1108355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
1118355f576SJeff Roberson  * prior to malloc coming up.
1128355f576SJeff Roberson  */
1138355f576SJeff Roberson static uma_zone_t hashzone;
1148355f576SJeff Roberson 
1151e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */
116e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1;
1171e319f6dSRobert Watson 
118961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
119961647dfSJeff Roberson 
1208355f576SJeff Roberson /*
12186bbae32SJeff Roberson  * Are we allowed to allocate buckets?
12286bbae32SJeff Roberson  */
12386bbae32SJeff Roberson static int bucketdisable = 1;
12486bbae32SJeff Roberson 
125099a0e58SBosko Milekic /* Linked list of all kegs in the system */
12613e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
1278355f576SJeff Roberson 
12803175483SAlexander Motin /* Linked list of all cache-only zones in the system */
12903175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones =
13003175483SAlexander Motin     LIST_HEAD_INITIALIZER(uma_cachezones);
13103175483SAlexander Motin 
132111fbcd5SBryan Venteicher /* This RW lock protects the keg list */
133fe933c1dSMateusz Guzik static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
1348355f576SJeff Roberson 
135ac0a6fd0SGleb Smirnoff /*
136ac0a6fd0SGleb Smirnoff  * Pointer and counter to pool of pages, that is preallocated at
137ac0a6fd0SGleb Smirnoff  * startup to bootstrap UMA.  Early zones continue to use the pool
138ac0a6fd0SGleb Smirnoff  * until it is depleted, so allocations may happen after boot, thus
139ac0a6fd0SGleb Smirnoff  * we need a mutex to protect it.
140ac0a6fd0SGleb Smirnoff  */
141ac0a6fd0SGleb Smirnoff static char *bootmem;
142ac0a6fd0SGleb Smirnoff static int boot_pages;
143ac0a6fd0SGleb Smirnoff static struct mtx uma_boot_pages_mtx;
1448355f576SJeff Roberson 
14595c4bf75SKonstantin Belousov static struct sx uma_drain_lock;
14695c4bf75SKonstantin Belousov 
1472e47807cSJeff Roberson /* kmem soft limit. */
148ad5b0f5bSJeff Roberson static unsigned long uma_kmem_limit = LONG_MAX;
1492e47807cSJeff Roberson static volatile unsigned long uma_kmem_total;
1502e47807cSJeff Roberson 
1518355f576SJeff Roberson /* Is the VM done starting up? */
1528355f576SJeff Roberson static int booted = 0;
153342f1793SAlan Cox #define	UMA_STARTUP	1
154342f1793SAlan Cox #define	UMA_STARTUP2	2
1558355f576SJeff Roberson 
156ef72505eSJeff Roberson /*
1579643769aSJeff Roberson  * This is the handle used to schedule events that need to happen
1589643769aSJeff Roberson  * outside of the allocation fast path.
1599643769aSJeff Roberson  */
1608355f576SJeff Roberson static struct callout uma_callout;
1619643769aSJeff Roberson #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
1628355f576SJeff Roberson 
1638355f576SJeff Roberson /*
1648355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1658355f576SJeff Roberson  * a special allocation function just for zones.
1668355f576SJeff Roberson  */
1678355f576SJeff Roberson struct uma_zctor_args {
168bb196eb4SMatthew D Fleming 	const char *name;
169c3bdc05fSAndrew R. Reiter 	size_t size;
1708355f576SJeff Roberson 	uma_ctor ctor;
1718355f576SJeff Roberson 	uma_dtor dtor;
1728355f576SJeff Roberson 	uma_init uminit;
1738355f576SJeff Roberson 	uma_fini fini;
1740095a784SJeff Roberson 	uma_import import;
1750095a784SJeff Roberson 	uma_release release;
1760095a784SJeff Roberson 	void *arg;
177099a0e58SBosko Milekic 	uma_keg_t keg;
178099a0e58SBosko Milekic 	int align;
17985dcf349SGleb Smirnoff 	uint32_t flags;
180099a0e58SBosko Milekic };
181099a0e58SBosko Milekic 
182099a0e58SBosko Milekic struct uma_kctor_args {
183099a0e58SBosko Milekic 	uma_zone_t zone;
184099a0e58SBosko Milekic 	size_t size;
185099a0e58SBosko Milekic 	uma_init uminit;
186099a0e58SBosko Milekic 	uma_fini fini;
1878355f576SJeff Roberson 	int align;
18885dcf349SGleb Smirnoff 	uint32_t flags;
1898355f576SJeff Roberson };
1908355f576SJeff Roberson 
191cae33c14SJeff Roberson struct uma_bucket_zone {
192cae33c14SJeff Roberson 	uma_zone_t	ubz_zone;
193cae33c14SJeff Roberson 	char		*ubz_name;
194fc03d22bSJeff Roberson 	int		ubz_entries;	/* Number of items it can hold. */
195fc03d22bSJeff Roberson 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
196cae33c14SJeff Roberson };
197cae33c14SJeff Roberson 
198f9d27e75SRobert Watson /*
199fc03d22bSJeff Roberson  * Compute the actual number of bucket entries to pack them in power
200fc03d22bSJeff Roberson  * of two sizes for more efficient space utilization.
201f9d27e75SRobert Watson  */
202fc03d22bSJeff Roberson #define	BUCKET_SIZE(n)						\
203fc03d22bSJeff Roberson     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
204fc03d22bSJeff Roberson 
2051aa6c758SAlexander Motin #define	BUCKET_MAX	BUCKET_SIZE(256)
206fc03d22bSJeff Roberson 
207fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = {
2086fd34d6fSJeff Roberson 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
209f3932e90SAlexander Motin 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
2106fd34d6fSJeff Roberson 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
211f3932e90SAlexander Motin 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
2126fd34d6fSJeff Roberson 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
213fc03d22bSJeff Roberson 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
214fc03d22bSJeff Roberson 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
215fc03d22bSJeff Roberson 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
2161aa6c758SAlexander Motin 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
217fc03d22bSJeff Roberson 	{ NULL, NULL, 0}
218fc03d22bSJeff Roberson };
219cae33c14SJeff Roberson 
2202019094aSRobert Watson /*
2212019094aSRobert Watson  * Flags and enumerations to be passed to internal functions.
2222019094aSRobert Watson  */
223ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
224b23f72e9SBrian Feldman 
225ab3185d1SJeff Roberson #define	UMA_ANYDOMAIN	-1	/* Special value for domain search. */
226ab3185d1SJeff Roberson 
2278355f576SJeff Roberson /* Prototypes.. */
2288355f576SJeff Roberson 
229ab3185d1SJeff Roberson static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
230ab3185d1SJeff Roberson static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
231ab3185d1SJeff Roberson static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
232f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t);
233ab3185d1SJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int);
2349643769aSJeff Roberson static void cache_drain(uma_zone_t);
2358355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
236aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone);
237b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int);
238099a0e58SBosko Milekic static void keg_dtor(void *, int, void *);
239b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int);
2409c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
241b23f72e9SBrian Feldman static int zero_init(void *, int, int);
242e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg);
243e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg);
2448355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
2458355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
2460aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
2470aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
2480aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
2498355f576SJeff Roberson static void uma_timeout(void *);
2508355f576SJeff Roberson static void uma_startup3(void);
251ab3185d1SJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int, int);
2520095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
25386bbae32SJeff Roberson static void bucket_enable(void);
254cae33c14SJeff Roberson static void bucket_init(void);
2556fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
2566fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
257cae33c14SJeff Roberson static void bucket_zone_drain(void);
258ab3185d1SJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
259ab3185d1SJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
260ab3185d1SJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int);
2610095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
2620095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
263e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
26485dcf349SGleb Smirnoff     uma_fini fini, int align, uint32_t flags);
265ab3185d1SJeff Roberson static int zone_import(uma_zone_t, void **, int, int, int);
266ab3185d1SJeff Roberson static void zone_release(uma_zone_t, void **, int);
267ab3185d1SJeff Roberson static void uma_zero_item(void *, uma_zone_t);
268bbee39c6SJeff Roberson 
2698355f576SJeff Roberson void uma_print_zone(uma_zone_t);
2708355f576SJeff Roberson void uma_print_stats(void);
2717a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
2727a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
2738355f576SJeff Roberson 
2749542ea7bSGleb Smirnoff #ifdef INVARIANTS
2759542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
2769542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
2779542ea7bSGleb Smirnoff #endif
2789542ea7bSGleb Smirnoff 
2798355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
2808355f576SJeff Roberson 
2817a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
2827a52a97eSRobert Watson     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
2837a52a97eSRobert Watson 
2847a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
2857a52a97eSRobert Watson     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
2867a52a97eSRobert Watson 
2872f891cd5SPawel Jakub Dawidek static int zone_warnings = 1;
288af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
2892f891cd5SPawel Jakub Dawidek     "Warn when UMA zones becomes full");
2902f891cd5SPawel Jakub Dawidek 
2912e47807cSJeff Roberson /* Adjust bytes under management by UMA. */
2922e47807cSJeff Roberson static inline void
2932e47807cSJeff Roberson uma_total_dec(unsigned long size)
2942e47807cSJeff Roberson {
2952e47807cSJeff Roberson 
2962e47807cSJeff Roberson 	atomic_subtract_long(&uma_kmem_total, size);
2972e47807cSJeff Roberson }
2982e47807cSJeff Roberson 
2992e47807cSJeff Roberson static inline void
3002e47807cSJeff Roberson uma_total_inc(unsigned long size)
3012e47807cSJeff Roberson {
3022e47807cSJeff Roberson 
3032e47807cSJeff Roberson 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
3042e47807cSJeff Roberson 		uma_reclaim_wakeup();
3052e47807cSJeff Roberson }
3062e47807cSJeff Roberson 
30786bbae32SJeff Roberson /*
30886bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
30986bbae32SJeff Roberson  */
31086bbae32SJeff Roberson static void
31186bbae32SJeff Roberson bucket_enable(void)
31286bbae32SJeff Roberson {
313251386b4SMaksim Yevmenkin 	bucketdisable = vm_page_count_min();
31486bbae32SJeff Roberson }
31586bbae32SJeff Roberson 
316dc2c7965SRobert Watson /*
317dc2c7965SRobert Watson  * Initialize bucket_zones, the array of zones of buckets of various sizes.
318dc2c7965SRobert Watson  *
319dc2c7965SRobert Watson  * For each zone, calculate the memory required for each bucket, consisting
320fc03d22bSJeff Roberson  * of the header and an array of pointers.
321dc2c7965SRobert Watson  */
322cae33c14SJeff Roberson static void
323cae33c14SJeff Roberson bucket_init(void)
324cae33c14SJeff Roberson {
325cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
326cae33c14SJeff Roberson 	int size;
327cae33c14SJeff Roberson 
328d74e6a1dSAlan Cox 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
329cae33c14SJeff Roberson 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
330cae33c14SJeff Roberson 		size += sizeof(void *) * ubz->ubz_entries;
331cae33c14SJeff Roberson 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
332e20a199fSJeff Roberson 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
333ab3185d1SJeff Roberson 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
334cae33c14SJeff Roberson 	}
335cae33c14SJeff Roberson }
336cae33c14SJeff Roberson 
337dc2c7965SRobert Watson /*
338dc2c7965SRobert Watson  * Given a desired number of entries for a bucket, return the zone from which
339dc2c7965SRobert Watson  * to allocate the bucket.
340dc2c7965SRobert Watson  */
341dc2c7965SRobert Watson static struct uma_bucket_zone *
342dc2c7965SRobert Watson bucket_zone_lookup(int entries)
343dc2c7965SRobert Watson {
344fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
345dc2c7965SRobert Watson 
346fc03d22bSJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
347fc03d22bSJeff Roberson 		if (ubz->ubz_entries >= entries)
348fc03d22bSJeff Roberson 			return (ubz);
349fc03d22bSJeff Roberson 	ubz--;
350fc03d22bSJeff Roberson 	return (ubz);
351fc03d22bSJeff Roberson }
352fc03d22bSJeff Roberson 
353fc03d22bSJeff Roberson static int
354fc03d22bSJeff Roberson bucket_select(int size)
355fc03d22bSJeff Roberson {
356fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
357fc03d22bSJeff Roberson 
358fc03d22bSJeff Roberson 	ubz = &bucket_zones[0];
359fc03d22bSJeff Roberson 	if (size > ubz->ubz_maxsize)
360fc03d22bSJeff Roberson 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
361fc03d22bSJeff Roberson 
362fc03d22bSJeff Roberson 	for (; ubz->ubz_entries != 0; ubz++)
363fc03d22bSJeff Roberson 		if (ubz->ubz_maxsize < size)
364fc03d22bSJeff Roberson 			break;
365fc03d22bSJeff Roberson 	ubz--;
366fc03d22bSJeff Roberson 	return (ubz->ubz_entries);
367dc2c7965SRobert Watson }
368dc2c7965SRobert Watson 
369cae33c14SJeff Roberson static uma_bucket_t
3706fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags)
371cae33c14SJeff Roberson {
372cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
373cae33c14SJeff Roberson 	uma_bucket_t bucket;
374cae33c14SJeff Roberson 
375cae33c14SJeff Roberson 	/*
376cae33c14SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
3773803b26bSDag-Erling Smørgrav 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
378cae33c14SJeff Roberson 	 * boot pages.  This also prevents us from allocating buckets in
379cae33c14SJeff Roberson 	 * low memory situations.
380cae33c14SJeff Roberson 	 */
381cae33c14SJeff Roberson 	if (bucketdisable)
382cae33c14SJeff Roberson 		return (NULL);
3836fd34d6fSJeff Roberson 	/*
3846fd34d6fSJeff Roberson 	 * To limit bucket recursion we store the original zone flags
3856fd34d6fSJeff Roberson 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
3866fd34d6fSJeff Roberson 	 * NOVM flag to persist even through deep recursions.  We also
3876fd34d6fSJeff Roberson 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
3886fd34d6fSJeff Roberson 	 * a bucket for a bucket zone so we do not allow infinite bucket
3896fd34d6fSJeff Roberson 	 * recursion.  This cookie will even persist to frees of unused
3906fd34d6fSJeff Roberson 	 * buckets via the allocation path or bucket allocations in the
3916fd34d6fSJeff Roberson 	 * free path.
3926fd34d6fSJeff Roberson 	 */
3936fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
3946fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
395e8a720feSAlexander Motin 	else {
396e8a720feSAlexander Motin 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
397e8a720feSAlexander Motin 			return (NULL);
3986fd34d6fSJeff Roberson 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
399e8a720feSAlexander Motin 	}
4006fd34d6fSJeff Roberson 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
401af526374SJeff Roberson 		flags |= M_NOVM;
402af526374SJeff Roberson 	ubz = bucket_zone_lookup(zone->uz_count);
40320d3ab87SAlexander Motin 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
40420d3ab87SAlexander Motin 		ubz++;
4056fd34d6fSJeff Roberson 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
406cae33c14SJeff Roberson 	if (bucket) {
407cae33c14SJeff Roberson #ifdef INVARIANTS
408cae33c14SJeff Roberson 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
409cae33c14SJeff Roberson #endif
410cae33c14SJeff Roberson 		bucket->ub_cnt = 0;
411cae33c14SJeff Roberson 		bucket->ub_entries = ubz->ubz_entries;
412cae33c14SJeff Roberson 	}
413cae33c14SJeff Roberson 
414cae33c14SJeff Roberson 	return (bucket);
415cae33c14SJeff Roberson }
416cae33c14SJeff Roberson 
417cae33c14SJeff Roberson static void
4186fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
419cae33c14SJeff Roberson {
420cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
421cae33c14SJeff Roberson 
422fc03d22bSJeff Roberson 	KASSERT(bucket->ub_cnt == 0,
423fc03d22bSJeff Roberson 	    ("bucket_free: Freeing a non free bucket."));
4246fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
4256fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
426dc2c7965SRobert Watson 	ubz = bucket_zone_lookup(bucket->ub_entries);
4276fd34d6fSJeff Roberson 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
428cae33c14SJeff Roberson }
429cae33c14SJeff Roberson 
430cae33c14SJeff Roberson static void
431cae33c14SJeff Roberson bucket_zone_drain(void)
432cae33c14SJeff Roberson {
433cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
434cae33c14SJeff Roberson 
435cae33c14SJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
436cae33c14SJeff Roberson 		zone_drain(ubz->ubz_zone);
437cae33c14SJeff Roberson }
438cae33c14SJeff Roberson 
4392f891cd5SPawel Jakub Dawidek static void
4402f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone)
4412f891cd5SPawel Jakub Dawidek {
4422f891cd5SPawel Jakub Dawidek 	static const struct timeval warninterval = { 300, 0 };
4432f891cd5SPawel Jakub Dawidek 
4442f891cd5SPawel Jakub Dawidek 	if (!zone_warnings || zone->uz_warning == NULL)
4452f891cd5SPawel Jakub Dawidek 		return;
4462f891cd5SPawel Jakub Dawidek 
4472f891cd5SPawel Jakub Dawidek 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
4482f891cd5SPawel Jakub Dawidek 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
4492f891cd5SPawel Jakub Dawidek }
4502f891cd5SPawel Jakub Dawidek 
45154503a13SJonathan T. Looney static inline void
45254503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone)
45354503a13SJonathan T. Looney {
454e60b2fcbSGleb Smirnoff 
455e60b2fcbSGleb Smirnoff 	if (zone->uz_maxaction.ta_func != NULL)
456e60b2fcbSGleb Smirnoff 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
45754503a13SJonathan T. Looney }
45854503a13SJonathan T. Looney 
459e20a199fSJeff Roberson static void
460e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
461e20a199fSJeff Roberson {
462e20a199fSJeff Roberson 	uma_klink_t klink;
463e20a199fSJeff Roberson 
464e20a199fSJeff Roberson 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
465e20a199fSJeff Roberson 		kegfn(klink->kl_keg);
466e20a199fSJeff Roberson }
4678355f576SJeff Roberson 
4688355f576SJeff Roberson /*
4698355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
4709643769aSJeff Roberson  * based calculations.  (stats, hash size, etc.)
4718355f576SJeff Roberson  *
4728355f576SJeff Roberson  * Arguments:
4738355f576SJeff Roberson  *	arg   Unused
4748355f576SJeff Roberson  *
4758355f576SJeff Roberson  * Returns:
4768355f576SJeff Roberson  *	Nothing
4778355f576SJeff Roberson  */
4788355f576SJeff Roberson static void
4798355f576SJeff Roberson uma_timeout(void *unused)
4808355f576SJeff Roberson {
48186bbae32SJeff Roberson 	bucket_enable();
4828355f576SJeff Roberson 	zone_foreach(zone_timeout);
4838355f576SJeff Roberson 
4848355f576SJeff Roberson 	/* Reschedule this event */
4859643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
4868355f576SJeff Roberson }
4878355f576SJeff Roberson 
4888355f576SJeff Roberson /*
4899643769aSJeff Roberson  * Routine to perform timeout driven calculations.  This expands the
4909643769aSJeff Roberson  * hashes and does per cpu statistics aggregation.
4918355f576SJeff Roberson  *
492e20a199fSJeff Roberson  *  Returns nothing.
4938355f576SJeff Roberson  */
4948355f576SJeff Roberson static void
495e20a199fSJeff Roberson keg_timeout(uma_keg_t keg)
4968355f576SJeff Roberson {
4978355f576SJeff Roberson 
498e20a199fSJeff Roberson 	KEG_LOCK(keg);
4998355f576SJeff Roberson 	/*
500e20a199fSJeff Roberson 	 * Expand the keg hash table.
5018355f576SJeff Roberson 	 *
5028355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
5038355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
5048355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
5058355f576SJeff Roberson 	 */
506099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH &&
507099a0e58SBosko Milekic 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
5080aef6126SJeff Roberson 		struct uma_hash newhash;
5090aef6126SJeff Roberson 		struct uma_hash oldhash;
5100aef6126SJeff Roberson 		int ret;
5115300d9ddSJeff Roberson 
5120aef6126SJeff Roberson 		/*
5130aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
514e20a199fSJeff Roberson 		 * while the keg lock is held will lead to deadlock.
5150aef6126SJeff Roberson 		 * I have to do everything in stages and check for
5160aef6126SJeff Roberson 		 * races.
5170aef6126SJeff Roberson 		 */
518099a0e58SBosko Milekic 		newhash = keg->uk_hash;
519e20a199fSJeff Roberson 		KEG_UNLOCK(keg);
5200aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
521e20a199fSJeff Roberson 		KEG_LOCK(keg);
5220aef6126SJeff Roberson 		if (ret) {
523099a0e58SBosko Milekic 			if (hash_expand(&keg->uk_hash, &newhash)) {
524099a0e58SBosko Milekic 				oldhash = keg->uk_hash;
525099a0e58SBosko Milekic 				keg->uk_hash = newhash;
5260aef6126SJeff Roberson 			} else
5270aef6126SJeff Roberson 				oldhash = newhash;
5280aef6126SJeff Roberson 
529e20a199fSJeff Roberson 			KEG_UNLOCK(keg);
5300aef6126SJeff Roberson 			hash_free(&oldhash);
531a1dff920SDavide Italiano 			return;
5320aef6126SJeff Roberson 		}
5335300d9ddSJeff Roberson 	}
534e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
535e20a199fSJeff Roberson }
536e20a199fSJeff Roberson 
537e20a199fSJeff Roberson static void
538e20a199fSJeff Roberson zone_timeout(uma_zone_t zone)
539e20a199fSJeff Roberson {
540e20a199fSJeff Roberson 
541e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_timeout);
5428355f576SJeff Roberson }
5438355f576SJeff Roberson 
5448355f576SJeff Roberson /*
5455300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
5465300d9ddSJeff Roberson  * backing store.
5475300d9ddSJeff Roberson  *
5485300d9ddSJeff Roberson  * Arguments:
5490aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
5505300d9ddSJeff Roberson  *
5515300d9ddSJeff Roberson  * Returns:
552763df3ecSPedro F. Giffuni  *	1 on success and 0 on failure.
5535300d9ddSJeff Roberson  */
55437c84183SPoul-Henning Kamp static int
5550aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
5565300d9ddSJeff Roberson {
5570aef6126SJeff Roberson 	int oldsize;
5585300d9ddSJeff Roberson 	int alloc;
5595300d9ddSJeff Roberson 
5600aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
5610aef6126SJeff Roberson 
5625300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
5630aef6126SJeff Roberson 	if (oldsize)  {
5640aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
5650aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
5660aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
567961647dfSJeff Roberson 		    M_UMAHASH, M_NOWAIT);
5685300d9ddSJeff Roberson 	} else {
5690aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
570e20a199fSJeff Roberson 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
571ab3185d1SJeff Roberson 		    UMA_ANYDOMAIN, M_WAITOK);
5720aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
5735300d9ddSJeff Roberson 	}
5740aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
5750aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
5760aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
5770aef6126SJeff Roberson 		return (1);
5780aef6126SJeff Roberson 	}
5795300d9ddSJeff Roberson 
5800aef6126SJeff Roberson 	return (0);
5815300d9ddSJeff Roberson }
5825300d9ddSJeff Roberson 
5835300d9ddSJeff Roberson /*
58464f051e9SJeff Roberson  * Expands the hash table for HASH zones.  This is done from zone_timeout
58564f051e9SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation
58664f051e9SJeff Roberson  * path, otherwise, we can recurse on the vm while allocating pages.
5878355f576SJeff Roberson  *
5888355f576SJeff Roberson  * Arguments:
5890aef6126SJeff Roberson  *	oldhash  The hash you want to expand
5900aef6126SJeff Roberson  *	newhash  The hash structure for the new table
5918355f576SJeff Roberson  *
5928355f576SJeff Roberson  * Returns:
5938355f576SJeff Roberson  *	Nothing
5948355f576SJeff Roberson  *
5958355f576SJeff Roberson  * Discussion:
5968355f576SJeff Roberson  */
5970aef6126SJeff Roberson static int
5980aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
5998355f576SJeff Roberson {
6008355f576SJeff Roberson 	uma_slab_t slab;
6018355f576SJeff Roberson 	int hval;
6028355f576SJeff Roberson 	int i;
6038355f576SJeff Roberson 
6040aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
6050aef6126SJeff Roberson 		return (0);
6068355f576SJeff Roberson 
6070aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
6080aef6126SJeff Roberson 		return (0);
6098355f576SJeff Roberson 
6108355f576SJeff Roberson 	/*
6118355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
6128355f576SJeff Roberson 	 * full rehash.
6138355f576SJeff Roberson 	 */
6148355f576SJeff Roberson 
6150aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
6160aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
6170aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
6180aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
6190aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
6200aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
6210aef6126SJeff Roberson 			    slab, us_hlink);
6228355f576SJeff Roberson 		}
6238355f576SJeff Roberson 
6240aef6126SJeff Roberson 	return (1);
6259c2cd7e5SJeff Roberson }
6269c2cd7e5SJeff Roberson 
6275300d9ddSJeff Roberson /*
6285300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
6295300d9ddSJeff Roberson  *
6305300d9ddSJeff Roberson  * Arguments:
6315300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
6325300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
6335300d9ddSJeff Roberson  *
6345300d9ddSJeff Roberson  * Returns:
6355300d9ddSJeff Roberson  *	Nothing
6365300d9ddSJeff Roberson  */
6379c2cd7e5SJeff Roberson static void
6380aef6126SJeff Roberson hash_free(struct uma_hash *hash)
6399c2cd7e5SJeff Roberson {
6400aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
6410aef6126SJeff Roberson 		return;
6420aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
6430095a784SJeff Roberson 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
6448355f576SJeff Roberson 	else
645961647dfSJeff Roberson 		free(hash->uh_slab_hash, M_UMAHASH);
6468355f576SJeff Roberson }
6478355f576SJeff Roberson 
6488355f576SJeff Roberson /*
6498355f576SJeff Roberson  * Frees all outstanding items in a bucket
6508355f576SJeff Roberson  *
6518355f576SJeff Roberson  * Arguments:
6528355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
6538355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
6548355f576SJeff Roberson  *
6558355f576SJeff Roberson  * Returns:
6568355f576SJeff Roberson  *	Nothing
6578355f576SJeff Roberson  */
6588355f576SJeff Roberson 
6598355f576SJeff Roberson static void
6608355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
6618355f576SJeff Roberson {
6620095a784SJeff Roberson 	int i;
6638355f576SJeff Roberson 
6648355f576SJeff Roberson 	if (bucket == NULL)
6658355f576SJeff Roberson 		return;
6668355f576SJeff Roberson 
6670095a784SJeff Roberson 	if (zone->uz_fini)
6680095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
6690095a784SJeff Roberson 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
6700095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
6710095a784SJeff Roberson 	bucket->ub_cnt = 0;
6728355f576SJeff Roberson }
6738355f576SJeff Roberson 
6748355f576SJeff Roberson /*
6758355f576SJeff Roberson  * Drains the per cpu caches for a zone.
6768355f576SJeff Roberson  *
6775d1ae027SRobert Watson  * NOTE: This may only be called while the zone is being turn down, and not
6785d1ae027SRobert Watson  * during normal operation.  This is necessary in order that we do not have
6795d1ae027SRobert Watson  * to migrate CPUs to drain the per-CPU caches.
6805d1ae027SRobert Watson  *
6818355f576SJeff Roberson  * Arguments:
6828355f576SJeff Roberson  *	zone     The zone to drain, must be unlocked.
6838355f576SJeff Roberson  *
6848355f576SJeff Roberson  * Returns:
6858355f576SJeff Roberson  *	Nothing
6868355f576SJeff Roberson  */
6878355f576SJeff Roberson static void
6889643769aSJeff Roberson cache_drain(uma_zone_t zone)
6898355f576SJeff Roberson {
6908355f576SJeff Roberson 	uma_cache_t cache;
6918355f576SJeff Roberson 	int cpu;
6928355f576SJeff Roberson 
6938355f576SJeff Roberson 	/*
6945d1ae027SRobert Watson 	 * XXX: It is safe to not lock the per-CPU caches, because we're
6955d1ae027SRobert Watson 	 * tearing down the zone anyway.  I.e., there will be no further use
6965d1ae027SRobert Watson 	 * of the caches at this point.
6975d1ae027SRobert Watson 	 *
6985d1ae027SRobert Watson 	 * XXX: It would good to be able to assert that the zone is being
6995d1ae027SRobert Watson 	 * torn down to prevent improper use of cache_drain().
7005d1ae027SRobert Watson 	 *
7015d1ae027SRobert Watson 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
7025d1ae027SRobert Watson 	 * it is used elsewhere.  Should the tear-down path be made special
7035d1ae027SRobert Watson 	 * there in some form?
7048355f576SJeff Roberson 	 */
7053aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
7068355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
7078355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
7088355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
709174ab450SBosko Milekic 		if (cache->uc_allocbucket != NULL)
7106fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_allocbucket, NULL);
711174ab450SBosko Milekic 		if (cache->uc_freebucket != NULL)
7126fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_freebucket, NULL);
713d56368d7SBosko Milekic 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
714d56368d7SBosko Milekic 	}
715aaa8bb16SJeff Roberson 	ZONE_LOCK(zone);
716aaa8bb16SJeff Roberson 	bucket_cache_drain(zone);
717aaa8bb16SJeff Roberson 	ZONE_UNLOCK(zone);
718aaa8bb16SJeff Roberson }
719aaa8bb16SJeff Roberson 
720a2de44abSAlexander Motin static void
721a2de44abSAlexander Motin cache_shrink(uma_zone_t zone)
722a2de44abSAlexander Motin {
723a2de44abSAlexander Motin 
724a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
725a2de44abSAlexander Motin 		return;
726a2de44abSAlexander Motin 
727a2de44abSAlexander Motin 	ZONE_LOCK(zone);
728a2de44abSAlexander Motin 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
729a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
730a2de44abSAlexander Motin }
731a2de44abSAlexander Motin 
732a2de44abSAlexander Motin static void
733a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone)
734a2de44abSAlexander Motin {
735a2de44abSAlexander Motin 	uma_cache_t cache;
7368a8d9d14SAlexander Motin 	uma_bucket_t b1, b2;
737ab3185d1SJeff Roberson 	int domain;
738a2de44abSAlexander Motin 
739a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
740a2de44abSAlexander Motin 		return;
741a2de44abSAlexander Motin 
7428a8d9d14SAlexander Motin 	b1 = b2 = NULL;
743a2de44abSAlexander Motin 	ZONE_LOCK(zone);
744a2de44abSAlexander Motin 	critical_enter();
745ab3185d1SJeff Roberson 	if (zone->uz_flags & UMA_ZONE_NUMA)
746ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
747ab3185d1SJeff Roberson 	else
748ab3185d1SJeff Roberson 		domain = 0;
749a2de44abSAlexander Motin 	cache = &zone->uz_cpu[curcpu];
750a2de44abSAlexander Motin 	if (cache->uc_allocbucket) {
7518a8d9d14SAlexander Motin 		if (cache->uc_allocbucket->ub_cnt != 0)
752ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
7538a8d9d14SAlexander Motin 			    cache->uc_allocbucket, ub_link);
7548a8d9d14SAlexander Motin 		else
7558a8d9d14SAlexander Motin 			b1 = cache->uc_allocbucket;
756a2de44abSAlexander Motin 		cache->uc_allocbucket = NULL;
757a2de44abSAlexander Motin 	}
758a2de44abSAlexander Motin 	if (cache->uc_freebucket) {
7598a8d9d14SAlexander Motin 		if (cache->uc_freebucket->ub_cnt != 0)
760ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
7618a8d9d14SAlexander Motin 			    cache->uc_freebucket, ub_link);
7628a8d9d14SAlexander Motin 		else
7638a8d9d14SAlexander Motin 			b2 = cache->uc_freebucket;
764a2de44abSAlexander Motin 		cache->uc_freebucket = NULL;
765a2de44abSAlexander Motin 	}
766a2de44abSAlexander Motin 	critical_exit();
767a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
7688a8d9d14SAlexander Motin 	if (b1)
7698a8d9d14SAlexander Motin 		bucket_free(zone, b1, NULL);
7708a8d9d14SAlexander Motin 	if (b2)
7718a8d9d14SAlexander Motin 		bucket_free(zone, b2, NULL);
772a2de44abSAlexander Motin }
773a2de44abSAlexander Motin 
774a2de44abSAlexander Motin /*
775a2de44abSAlexander Motin  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
776a2de44abSAlexander Motin  * This is an expensive call because it needs to bind to all CPUs
777a2de44abSAlexander Motin  * one by one and enter a critical section on each of them in order
778a2de44abSAlexander Motin  * to safely access their cache buckets.
779a2de44abSAlexander Motin  * Zone lock must not be held on call this function.
780a2de44abSAlexander Motin  */
781a2de44abSAlexander Motin static void
782a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone)
783a2de44abSAlexander Motin {
784a2de44abSAlexander Motin 	int cpu;
785a2de44abSAlexander Motin 
786a2de44abSAlexander Motin 	/*
787a2de44abSAlexander Motin 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
788a2de44abSAlexander Motin 	 */
789a2de44abSAlexander Motin 	if (zone)
790a2de44abSAlexander Motin 		cache_shrink(zone);
791a2de44abSAlexander Motin 	else
792a2de44abSAlexander Motin 		zone_foreach(cache_shrink);
793a2de44abSAlexander Motin 
794a2de44abSAlexander Motin 	CPU_FOREACH(cpu) {
795a2de44abSAlexander Motin 		thread_lock(curthread);
796a2de44abSAlexander Motin 		sched_bind(curthread, cpu);
797a2de44abSAlexander Motin 		thread_unlock(curthread);
798a2de44abSAlexander Motin 
799a2de44abSAlexander Motin 		if (zone)
800a2de44abSAlexander Motin 			cache_drain_safe_cpu(zone);
801a2de44abSAlexander Motin 		else
802a2de44abSAlexander Motin 			zone_foreach(cache_drain_safe_cpu);
803a2de44abSAlexander Motin 	}
804a2de44abSAlexander Motin 	thread_lock(curthread);
805a2de44abSAlexander Motin 	sched_unbind(curthread);
806a2de44abSAlexander Motin 	thread_unlock(curthread);
807a2de44abSAlexander Motin }
808a2de44abSAlexander Motin 
809aaa8bb16SJeff Roberson /*
810aaa8bb16SJeff Roberson  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
811aaa8bb16SJeff Roberson  */
812aaa8bb16SJeff Roberson static void
813aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone)
814aaa8bb16SJeff Roberson {
815ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
816aaa8bb16SJeff Roberson 	uma_bucket_t bucket;
817ab3185d1SJeff Roberson 	int i;
8188355f576SJeff Roberson 
8198355f576SJeff Roberson 	/*
820ab3185d1SJeff Roberson 	 * Drain the bucket queues and free the buckets.
8218355f576SJeff Roberson 	 */
822ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
823ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[i];
824ab3185d1SJeff Roberson 		while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
8258355f576SJeff Roberson 			LIST_REMOVE(bucket, ub_link);
8268355f576SJeff Roberson 			ZONE_UNLOCK(zone);
8278355f576SJeff Roberson 			bucket_drain(zone, bucket);
8286fd34d6fSJeff Roberson 			bucket_free(zone, bucket, NULL);
8298355f576SJeff Roberson 			ZONE_LOCK(zone);
8308355f576SJeff Roberson 		}
831ab3185d1SJeff Roberson 	}
832ace66b56SAlexander Motin 
833ace66b56SAlexander Motin 	/*
834ace66b56SAlexander Motin 	 * Shrink further bucket sizes.  Price of single zone lock collision
835ace66b56SAlexander Motin 	 * is probably lower then price of global cache drain.
836ace66b56SAlexander Motin 	 */
837ace66b56SAlexander Motin 	if (zone->uz_count > zone->uz_count_min)
838ace66b56SAlexander Motin 		zone->uz_count--;
8398355f576SJeff Roberson }
840fc03d22bSJeff Roberson 
841fc03d22bSJeff Roberson static void
842fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
843fc03d22bSJeff Roberson {
844fc03d22bSJeff Roberson 	uint8_t *mem;
845fc03d22bSJeff Roberson 	int i;
846fc03d22bSJeff Roberson 	uint8_t flags;
847fc03d22bSJeff Roberson 
8481431a748SGleb Smirnoff 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
8491431a748SGleb Smirnoff 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
8501431a748SGleb Smirnoff 
851fc03d22bSJeff Roberson 	mem = slab->us_data;
852fc03d22bSJeff Roberson 	flags = slab->us_flags;
853fc03d22bSJeff Roberson 	i = start;
854fc03d22bSJeff Roberson 	if (keg->uk_fini != NULL) {
855fc03d22bSJeff Roberson 		for (i--; i > -1; i--)
856fc03d22bSJeff Roberson 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
857fc03d22bSJeff Roberson 			    keg->uk_size);
858fc03d22bSJeff Roberson 	}
859fc03d22bSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
860fc03d22bSJeff Roberson 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
861fc03d22bSJeff Roberson 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
8622e47807cSJeff Roberson 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
8638355f576SJeff Roberson }
8648355f576SJeff Roberson 
8658355f576SJeff Roberson /*
866e20a199fSJeff Roberson  * Frees pages from a keg back to the system.  This is done on demand from
8678355f576SJeff Roberson  * the pageout daemon.
8688355f576SJeff Roberson  *
869e20a199fSJeff Roberson  * Returns nothing.
8708355f576SJeff Roberson  */
871e20a199fSJeff Roberson static void
872e20a199fSJeff Roberson keg_drain(uma_keg_t keg)
8738355f576SJeff Roberson {
8741e183df2SStefan Farfeleder 	struct slabhead freeslabs = { 0 };
875ab3185d1SJeff Roberson 	uma_domain_t dom;
876829be516SMark Johnston 	uma_slab_t slab, tmp;
877ab3185d1SJeff Roberson 	int i;
8788355f576SJeff Roberson 
8798355f576SJeff Roberson 	/*
880e20a199fSJeff Roberson 	 * We don't want to take pages from statically allocated kegs at this
8818355f576SJeff Roberson 	 * time
8828355f576SJeff Roberson 	 */
883099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
8848355f576SJeff Roberson 		return;
8858355f576SJeff Roberson 
8861431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
8871431a748SGleb Smirnoff 	    keg->uk_name, keg, keg->uk_free);
888e20a199fSJeff Roberson 	KEG_LOCK(keg);
889099a0e58SBosko Milekic 	if (keg->uk_free == 0)
8908355f576SJeff Roberson 		goto finished;
8918355f576SJeff Roberson 
892ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
893ab3185d1SJeff Roberson 		dom = &keg->uk_domain[i];
894ab3185d1SJeff Roberson 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
895829be516SMark Johnston 			/* We have nowhere to free these to. */
896829be516SMark Johnston 			if (slab->us_flags & UMA_SLAB_BOOT)
8978355f576SJeff Roberson 				continue;
8988355f576SJeff Roberson 
8998355f576SJeff Roberson 			LIST_REMOVE(slab, us_link);
900099a0e58SBosko Milekic 			keg->uk_pages -= keg->uk_ppera;
901099a0e58SBosko Milekic 			keg->uk_free -= keg->uk_ipers;
902713deb36SJeff Roberson 
903099a0e58SBosko Milekic 			if (keg->uk_flags & UMA_ZONE_HASH)
904ab3185d1SJeff Roberson 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
905ab3185d1SJeff Roberson 				    slab->us_data);
906713deb36SJeff Roberson 
907713deb36SJeff Roberson 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
908713deb36SJeff Roberson 		}
909ab3185d1SJeff Roberson 	}
910ab3185d1SJeff Roberson 
911713deb36SJeff Roberson finished:
912e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
913713deb36SJeff Roberson 
914713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
915713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
9161645995bSKirk McKusick 		keg_free_slab(keg, slab, keg->uk_ipers);
9178355f576SJeff Roberson 	}
9188355f576SJeff Roberson }
9198355f576SJeff Roberson 
920e20a199fSJeff Roberson static void
921e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok)
922e20a199fSJeff Roberson {
923e20a199fSJeff Roberson 
9248355f576SJeff Roberson 	/*
925e20a199fSJeff Roberson 	 * Set draining to interlock with zone_dtor() so we can release our
926e20a199fSJeff Roberson 	 * locks as we go.  Only dtor() should do a WAITOK call since it
927e20a199fSJeff Roberson 	 * is the only call that knows the structure will still be available
928e20a199fSJeff Roberson 	 * when it wakes up.
929e20a199fSJeff Roberson 	 */
930e20a199fSJeff Roberson 	ZONE_LOCK(zone);
931e20a199fSJeff Roberson 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
932e20a199fSJeff Roberson 		if (waitok == M_NOWAIT)
933e20a199fSJeff Roberson 			goto out;
934af526374SJeff Roberson 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
935e20a199fSJeff Roberson 	}
936e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
937e20a199fSJeff Roberson 	bucket_cache_drain(zone);
938e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
939e20a199fSJeff Roberson 	/*
940e20a199fSJeff Roberson 	 * The DRAINING flag protects us from being freed while
941111fbcd5SBryan Venteicher 	 * we're running.  Normally the uma_rwlock would protect us but we
942e20a199fSJeff Roberson 	 * must be able to release and acquire the right lock for each keg.
943e20a199fSJeff Roberson 	 */
944e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_drain);
945e20a199fSJeff Roberson 	ZONE_LOCK(zone);
946e20a199fSJeff Roberson 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
947e20a199fSJeff Roberson 	wakeup(zone);
948e20a199fSJeff Roberson out:
949e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
950e20a199fSJeff Roberson }
951e20a199fSJeff Roberson 
952e20a199fSJeff Roberson void
953e20a199fSJeff Roberson zone_drain(uma_zone_t zone)
954e20a199fSJeff Roberson {
955e20a199fSJeff Roberson 
956e20a199fSJeff Roberson 	zone_drain_wait(zone, M_NOWAIT);
957e20a199fSJeff Roberson }
958e20a199fSJeff Roberson 
959e20a199fSJeff Roberson /*
960e20a199fSJeff Roberson  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
9618355f576SJeff Roberson  *
9628355f576SJeff Roberson  * Arguments:
9638355f576SJeff Roberson  *	wait  Shall we wait?
9648355f576SJeff Roberson  *
9658355f576SJeff Roberson  * Returns:
9668355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
9678355f576SJeff Roberson  *	caller specified M_NOWAIT.
9688355f576SJeff Roberson  */
9698355f576SJeff Roberson static uma_slab_t
970ab3185d1SJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
9718355f576SJeff Roberson {
972e20a199fSJeff Roberson 	uma_alloc allocf;
973099a0e58SBosko Milekic 	uma_slab_t slab;
9742e47807cSJeff Roberson 	unsigned long size;
97585dcf349SGleb Smirnoff 	uint8_t *mem;
97685dcf349SGleb Smirnoff 	uint8_t flags;
9778355f576SJeff Roberson 	int i;
9788355f576SJeff Roberson 
979ab3185d1SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
980ab3185d1SJeff Roberson 	    ("keg_alloc_slab: domain %d out of range", domain));
981e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
982a553d4b8SJeff Roberson 	slab = NULL;
983fc03d22bSJeff Roberson 	mem = NULL;
984a553d4b8SJeff Roberson 
985e20a199fSJeff Roberson 	allocf = keg->uk_allocf;
986e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
9872e47807cSJeff Roberson 	size = keg->uk_ppera * PAGE_SIZE;
988a553d4b8SJeff Roberson 
989099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
990ab3185d1SJeff Roberson 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
991fc03d22bSJeff Roberson 		if (slab == NULL)
992fc03d22bSJeff Roberson 			goto out;
993a553d4b8SJeff Roberson 	}
994a553d4b8SJeff Roberson 
9953370c5bfSJeff Roberson 	/*
9963370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
9973370c5bfSJeff Roberson 	 * first time they are added to a zone.
9983370c5bfSJeff Roberson 	 *
9993370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
10003370c5bfSJeff Roberson 	 */
10013370c5bfSJeff Roberson 
1002099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
10033370c5bfSJeff Roberson 		wait |= M_ZERO;
10043370c5bfSJeff Roberson 	else
10053370c5bfSJeff Roberson 		wait &= ~M_ZERO;
10063370c5bfSJeff Roberson 
1007263811f7SKip Macy 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1008263811f7SKip Macy 		wait |= M_NODUMP;
1009263811f7SKip Macy 
1010e20a199fSJeff Roberson 	/* zone is passed for legacy reasons. */
1011ab3185d1SJeff Roberson 	mem = allocf(zone, size, domain, &flags, wait);
1012a553d4b8SJeff Roberson 	if (mem == NULL) {
1013b23f72e9SBrian Feldman 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
10140095a784SJeff Roberson 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1015fc03d22bSJeff Roberson 		slab = NULL;
1016fc03d22bSJeff Roberson 		goto out;
1017a553d4b8SJeff Roberson 	}
10182e47807cSJeff Roberson 	uma_total_inc(size);
10198355f576SJeff Roberson 
10205c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
1021099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1022099a0e58SBosko Milekic 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
10235c0e403bSJeff Roberson 
1024e20a199fSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1025099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ppera; i++)
102699571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
10278355f576SJeff Roberson 
1028099a0e58SBosko Milekic 	slab->us_keg = keg;
10298355f576SJeff Roberson 	slab->us_data = mem;
1030099a0e58SBosko Milekic 	slab->us_freecount = keg->uk_ipers;
10318355f576SJeff Roberson 	slab->us_flags = flags;
1032ab3185d1SJeff Roberson 	slab->us_domain = domain;
1033ef72505eSJeff Roberson 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1034ef72505eSJeff Roberson #ifdef INVARIANTS
1035ef72505eSJeff Roberson 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1036ef72505eSJeff Roberson #endif
1037099a0e58SBosko Milekic 
1038b23f72e9SBrian Feldman 	if (keg->uk_init != NULL) {
1039099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ipers; i++)
1040b23f72e9SBrian Feldman 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1041b23f72e9SBrian Feldman 			    keg->uk_size, wait) != 0)
1042b23f72e9SBrian Feldman 				break;
1043b23f72e9SBrian Feldman 		if (i != keg->uk_ipers) {
1044fc03d22bSJeff Roberson 			keg_free_slab(keg, slab, i);
1045fc03d22bSJeff Roberson 			slab = NULL;
1046fc03d22bSJeff Roberson 			goto out;
1047b23f72e9SBrian Feldman 		}
1048b23f72e9SBrian Feldman 	}
1049fc03d22bSJeff Roberson out:
1050e20a199fSJeff Roberson 	KEG_LOCK(keg);
10515c0e403bSJeff Roberson 
10521431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
10531431a748SGleb Smirnoff 	    slab, keg->uk_name, keg);
10541431a748SGleb Smirnoff 
1055fc03d22bSJeff Roberson 	if (slab != NULL) {
1056099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZONE_HASH)
1057099a0e58SBosko Milekic 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
10588355f576SJeff Roberson 
1059099a0e58SBosko Milekic 		keg->uk_pages += keg->uk_ppera;
1060099a0e58SBosko Milekic 		keg->uk_free += keg->uk_ipers;
1061fc03d22bSJeff Roberson 	}
10628355f576SJeff Roberson 
10638355f576SJeff Roberson 	return (slab);
10648355f576SJeff Roberson }
10658355f576SJeff Roberson 
10668355f576SJeff Roberson /*
1067009b6fcbSJeff Roberson  * This function is intended to be used early on in place of page_alloc() so
1068009b6fcbSJeff Roberson  * that we may use the boot time page cache to satisfy allocations before
1069009b6fcbSJeff Roberson  * the VM is ready.
1070009b6fcbSJeff Roberson  */
1071009b6fcbSJeff Roberson static void *
1072ab3185d1SJeff Roberson startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1073ab3185d1SJeff Roberson     int wait)
1074009b6fcbSJeff Roberson {
1075099a0e58SBosko Milekic 	uma_keg_t keg;
1076ac0a6fd0SGleb Smirnoff 	void *mem;
1077ac0a6fd0SGleb Smirnoff 	int pages;
1078099a0e58SBosko Milekic 
1079e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1080e9a069d8SJohn Baldwin 	pages = howmany(bytes, PAGE_SIZE);
1081e9a069d8SJohn Baldwin 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1082099a0e58SBosko Milekic 
1083009b6fcbSJeff Roberson 	/*
1084009b6fcbSJeff Roberson 	 * Check our small startup cache to see if it has pages remaining.
1085009b6fcbSJeff Roberson 	 */
1086f353d338SAlan Cox 	mtx_lock(&uma_boot_pages_mtx);
1087ac0a6fd0SGleb Smirnoff 	if (pages <= boot_pages) {
1088ac0a6fd0SGleb Smirnoff 		mem = bootmem;
1089ac0a6fd0SGleb Smirnoff 		boot_pages -= pages;
1090ac0a6fd0SGleb Smirnoff 		bootmem += pages * PAGE_SIZE;
1091f353d338SAlan Cox 		mtx_unlock(&uma_boot_pages_mtx);
1092ac0a6fd0SGleb Smirnoff 		*pflag = UMA_SLAB_BOOT;
1093ac0a6fd0SGleb Smirnoff 		return (mem);
1094009b6fcbSJeff Roberson 	}
1095f353d338SAlan Cox 	mtx_unlock(&uma_boot_pages_mtx);
1096342f1793SAlan Cox 	if (booted < UMA_STARTUP2)
10973803b26bSDag-Erling Smørgrav 		panic("UMA: Increase vm.boot_pages");
1098009b6fcbSJeff Roberson 	/*
1099009b6fcbSJeff Roberson 	 * Now that we've booted reset these users to their real allocator.
1100009b6fcbSJeff Roberson 	 */
1101009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
1102e9a069d8SJohn Baldwin 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1103009b6fcbSJeff Roberson #else
1104099a0e58SBosko Milekic 	keg->uk_allocf = page_alloc;
1105009b6fcbSJeff Roberson #endif
1106ab3185d1SJeff Roberson 	return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1107009b6fcbSJeff Roberson }
1108009b6fcbSJeff Roberson 
1109009b6fcbSJeff Roberson /*
11108355f576SJeff Roberson  * Allocates a number of pages from the system
11118355f576SJeff Roberson  *
11128355f576SJeff Roberson  * Arguments:
11138355f576SJeff Roberson  *	bytes  The number of bytes requested
11148355f576SJeff Roberson  *	wait  Shall we wait?
11158355f576SJeff Roberson  *
11168355f576SJeff Roberson  * Returns:
11178355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
11188355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
11198355f576SJeff Roberson  */
11208355f576SJeff Roberson static void *
1121ab3185d1SJeff Roberson page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1122ab3185d1SJeff Roberson     int wait)
11238355f576SJeff Roberson {
11248355f576SJeff Roberson 	void *p;	/* Returned page */
11258355f576SJeff Roberson 
11262e47807cSJeff Roberson 	*pflag = UMA_SLAB_KERNEL;
1127ab3185d1SJeff Roberson 	p = (void *) kmem_malloc_domain(domain, bytes, wait);
11288355f576SJeff Roberson 
11298355f576SJeff Roberson 	return (p);
11308355f576SJeff Roberson }
11318355f576SJeff Roberson 
11328355f576SJeff Roberson /*
11338355f576SJeff Roberson  * Allocates a number of pages from within an object
11348355f576SJeff Roberson  *
11358355f576SJeff Roberson  * Arguments:
11368355f576SJeff Roberson  *	bytes  The number of bytes requested
11378355f576SJeff Roberson  *	wait   Shall we wait?
11388355f576SJeff Roberson  *
11398355f576SJeff Roberson  * Returns:
11408355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
11418355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
11428355f576SJeff Roberson  */
11438355f576SJeff Roberson static void *
1144ab3185d1SJeff Roberson noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1145ab3185d1SJeff Roberson     int wait)
11468355f576SJeff Roberson {
1147a4915c21SAttilio Rao 	TAILQ_HEAD(, vm_page) alloctail;
1148a4915c21SAttilio Rao 	u_long npages;
1149b245ac95SAlan Cox 	vm_offset_t retkva, zkva;
1150a4915c21SAttilio Rao 	vm_page_t p, p_next;
1151e20a199fSJeff Roberson 	uma_keg_t keg;
11528355f576SJeff Roberson 
1153a4915c21SAttilio Rao 	TAILQ_INIT(&alloctail);
1154e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1155a4915c21SAttilio Rao 
1156a4915c21SAttilio Rao 	npages = howmany(bytes, PAGE_SIZE);
1157a4915c21SAttilio Rao 	while (npages > 0) {
1158ab3185d1SJeff Roberson 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
11598d6fbbb8SJeff Roberson 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1160772c8b67SKonstantin Belousov 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1161772c8b67SKonstantin Belousov 		    VM_ALLOC_NOWAIT));
1162a4915c21SAttilio Rao 		if (p != NULL) {
1163a4915c21SAttilio Rao 			/*
1164a4915c21SAttilio Rao 			 * Since the page does not belong to an object, its
1165a4915c21SAttilio Rao 			 * listq is unused.
1166a4915c21SAttilio Rao 			 */
1167a4915c21SAttilio Rao 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1168a4915c21SAttilio Rao 			npages--;
1169a4915c21SAttilio Rao 			continue;
1170a4915c21SAttilio Rao 		}
11718355f576SJeff Roberson 		/*
1172a4915c21SAttilio Rao 		 * Page allocation failed, free intermediate pages and
1173a4915c21SAttilio Rao 		 * exit.
11748355f576SJeff Roberson 		 */
1175a4915c21SAttilio Rao 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1176087a6132SAlan Cox 			vm_page_unwire(p, PQ_NONE);
1177b245ac95SAlan Cox 			vm_page_free(p);
1178b245ac95SAlan Cox 		}
1179a4915c21SAttilio Rao 		return (NULL);
1180b245ac95SAlan Cox 	}
11818355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
1182a4915c21SAttilio Rao 	zkva = keg->uk_kva +
1183a4915c21SAttilio Rao 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1184a4915c21SAttilio Rao 	retkva = zkva;
1185a4915c21SAttilio Rao 	TAILQ_FOREACH(p, &alloctail, listq) {
1186a4915c21SAttilio Rao 		pmap_qenter(zkva, &p, 1);
1187a4915c21SAttilio Rao 		zkva += PAGE_SIZE;
1188a4915c21SAttilio Rao 	}
11898355f576SJeff Roberson 
11908355f576SJeff Roberson 	return ((void *)retkva);
11918355f576SJeff Roberson }
11928355f576SJeff Roberson 
11938355f576SJeff Roberson /*
11948355f576SJeff Roberson  * Frees a number of pages to the system
11958355f576SJeff Roberson  *
11968355f576SJeff Roberson  * Arguments:
11978355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
11988355f576SJeff Roberson  *	size  The size of the memory being freed
11998355f576SJeff Roberson  *	flags The original p->us_flags field
12008355f576SJeff Roberson  *
12018355f576SJeff Roberson  * Returns:
12028355f576SJeff Roberson  *	Nothing
12038355f576SJeff Roberson  */
12048355f576SJeff Roberson static void
1205f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags)
12068355f576SJeff Roberson {
12075df87b21SJeff Roberson 	struct vmem *vmem;
12083370c5bfSJeff Roberson 
12092e47807cSJeff Roberson 	if (flags & UMA_SLAB_KERNEL)
12105df87b21SJeff Roberson 		vmem = kernel_arena;
12118355f576SJeff Roberson 	else
1212b5345ef1SJustin Hibbits 		panic("UMA: page_free used with invalid flags %x", flags);
12138355f576SJeff Roberson 
12145df87b21SJeff Roberson 	kmem_free(vmem, (vm_offset_t)mem, size);
12158355f576SJeff Roberson }
12168355f576SJeff Roberson 
12178355f576SJeff Roberson /*
12188355f576SJeff Roberson  * Zero fill initializer
12198355f576SJeff Roberson  *
12208355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
12218355f576SJeff Roberson  */
1222b23f72e9SBrian Feldman static int
1223b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags)
12248355f576SJeff Roberson {
12258355f576SJeff Roberson 	bzero(mem, size);
1226b23f72e9SBrian Feldman 	return (0);
12278355f576SJeff Roberson }
12288355f576SJeff Roberson 
12298355f576SJeff Roberson /*
1230e20a199fSJeff Roberson  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
12318355f576SJeff Roberson  *
12328355f576SJeff Roberson  * Arguments
1233e20a199fSJeff Roberson  *	keg  The zone we should initialize
12348355f576SJeff Roberson  *
12358355f576SJeff Roberson  * Returns
12368355f576SJeff Roberson  *	Nothing
12378355f576SJeff Roberson  */
12388355f576SJeff Roberson static void
1239e20a199fSJeff Roberson keg_small_init(uma_keg_t keg)
12408355f576SJeff Roberson {
1241244f4554SBosko Milekic 	u_int rsize;
1242244f4554SBosko Milekic 	u_int memused;
1243244f4554SBosko Milekic 	u_int wastedspace;
1244244f4554SBosko Milekic 	u_int shsize;
1245a55ebb7cSAndriy Gapon 	u_int slabsize;
12468355f576SJeff Roberson 
1247ad97af7eSGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_PCPU) {
124896c85efbSNathan Whitehorn 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1249e28a647dSGleb Smirnoff 
1250a55ebb7cSAndriy Gapon 		slabsize = sizeof(struct pcpu);
1251e28a647dSGleb Smirnoff 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1252ad97af7eSGleb Smirnoff 		    PAGE_SIZE);
1253ad97af7eSGleb Smirnoff 	} else {
1254a55ebb7cSAndriy Gapon 		slabsize = UMA_SLAB_SIZE;
1255ad97af7eSGleb Smirnoff 		keg->uk_ppera = 1;
1256ad97af7eSGleb Smirnoff 	}
1257ad97af7eSGleb Smirnoff 
1258ef72505eSJeff Roberson 	/*
1259ef72505eSJeff Roberson 	 * Calculate the size of each allocation (rsize) according to
1260ef72505eSJeff Roberson 	 * alignment.  If the requested size is smaller than we have
1261ef72505eSJeff Roberson 	 * allocation bits for we round it up.
1262ef72505eSJeff Roberson 	 */
1263099a0e58SBosko Milekic 	rsize = keg->uk_size;
1264a55ebb7cSAndriy Gapon 	if (rsize < slabsize / SLAB_SETSIZE)
1265a55ebb7cSAndriy Gapon 		rsize = slabsize / SLAB_SETSIZE;
1266099a0e58SBosko Milekic 	if (rsize & keg->uk_align)
1267099a0e58SBosko Milekic 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1268099a0e58SBosko Milekic 	keg->uk_rsize = rsize;
1269ad97af7eSGleb Smirnoff 
1270ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1271ad97af7eSGleb Smirnoff 	    keg->uk_rsize < sizeof(struct pcpu),
1272ad97af7eSGleb Smirnoff 	    ("%s: size %u too large", __func__, keg->uk_rsize));
12738355f576SJeff Roberson 
1274ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
12752864dbbfSGleb Smirnoff 		shsize = 0;
1276ef72505eSJeff Roberson 	else
1277244f4554SBosko Milekic 		shsize = sizeof(struct uma_slab);
12788355f576SJeff Roberson 
1279a55ebb7cSAndriy Gapon 	keg->uk_ipers = (slabsize - shsize) / rsize;
1280ef72505eSJeff Roberson 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1281ad97af7eSGleb Smirnoff 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1282ad97af7eSGleb Smirnoff 
1283244f4554SBosko Milekic 	memused = keg->uk_ipers * rsize + shsize;
1284a55ebb7cSAndriy Gapon 	wastedspace = slabsize - memused;
1285244f4554SBosko Milekic 
128620e8e865SBosko Milekic 	/*
1287244f4554SBosko Milekic 	 * We can't do OFFPAGE if we're internal or if we've been
128820e8e865SBosko Milekic 	 * asked to not go to the VM for buckets.  If we do this we
12896fd34d6fSJeff Roberson 	 * may end up going to the VM  for slabs which we do not
12906fd34d6fSJeff Roberson 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
12916fd34d6fSJeff Roberson 	 * of UMA_ZONE_VM, which clearly forbids it.
129220e8e865SBosko Milekic 	 */
1293099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1294099a0e58SBosko Milekic 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
12958355f576SJeff Roberson 		return;
1296244f4554SBosko Milekic 
1297ef72505eSJeff Roberson 	/*
1298ef72505eSJeff Roberson 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1299ef72505eSJeff Roberson 	 * this if it permits more items per-slab.
1300ef72505eSJeff Roberson 	 *
1301ef72505eSJeff Roberson 	 * XXX We could try growing slabsize to limit max waste as well.
1302ef72505eSJeff Roberson 	 * Historically this was not done because the VM could not
1303ef72505eSJeff Roberson 	 * efficiently handle contiguous allocations.
1304ef72505eSJeff Roberson 	 */
1305a55ebb7cSAndriy Gapon 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1306a55ebb7cSAndriy Gapon 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1307a55ebb7cSAndriy Gapon 		keg->uk_ipers = slabsize / keg->uk_rsize;
1308ef72505eSJeff Roberson 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1309ad97af7eSGleb Smirnoff 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
13101431a748SGleb Smirnoff 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
13111431a748SGleb Smirnoff 		    "keg: %s(%p), calculated wastedspace = %d, "
1312244f4554SBosko Milekic 		    "maximum wasted space allowed = %d, "
1313244f4554SBosko Milekic 		    "calculated ipers = %d, "
13141431a748SGleb Smirnoff 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1315a55ebb7cSAndriy Gapon 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1316a55ebb7cSAndriy Gapon 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1317099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
13188355f576SJeff Roberson 	}
1319ad97af7eSGleb Smirnoff 
1320ad97af7eSGleb Smirnoff 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1321ad97af7eSGleb Smirnoff 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1322ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_HASH;
13238355f576SJeff Roberson }
13248355f576SJeff Roberson 
13258355f576SJeff Roberson /*
1326e20a199fSJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
13278355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
13288355f576SJeff Roberson  * more complicated.
13298355f576SJeff Roberson  *
13308355f576SJeff Roberson  * Arguments
1331e20a199fSJeff Roberson  *	keg  The keg we should initialize
13328355f576SJeff Roberson  *
13338355f576SJeff Roberson  * Returns
13348355f576SJeff Roberson  *	Nothing
13358355f576SJeff Roberson  */
13368355f576SJeff Roberson static void
1337e20a199fSJeff Roberson keg_large_init(uma_keg_t keg)
13388355f576SJeff Roberson {
1339cec48e00SAlexander Motin 	u_int shsize;
13408355f576SJeff Roberson 
1341e20a199fSJeff Roberson 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1342099a0e58SBosko Milekic 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1343e20a199fSJeff Roberson 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1344ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1345ad97af7eSGleb Smirnoff 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
134620e8e865SBosko Milekic 
1347ad97af7eSGleb Smirnoff 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1348099a0e58SBosko Milekic 	keg->uk_ipers = 1;
1349e9a069d8SJohn Baldwin 	keg->uk_rsize = keg->uk_size;
1350e9a069d8SJohn Baldwin 
1351cec48e00SAlexander Motin 	/* Check whether we have enough space to not do OFFPAGE. */
1352cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1353cec48e00SAlexander Motin 		shsize = sizeof(struct uma_slab);
1354cec48e00SAlexander Motin 		if (shsize & UMA_ALIGN_PTR)
1355cec48e00SAlexander Motin 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1356cec48e00SAlexander Motin 			    (UMA_ALIGN_PTR + 1);
1357cec48e00SAlexander Motin 
13582934eb8aSMark Johnston 		if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
13592934eb8aSMark Johnston 			/*
13602934eb8aSMark Johnston 			 * We can't do OFFPAGE if we're internal, in which case
13612934eb8aSMark Johnston 			 * we need an extra page per allocation to contain the
13622934eb8aSMark Johnston 			 * slab header.
13632934eb8aSMark Johnston 			 */
13642934eb8aSMark Johnston 			if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1365099a0e58SBosko Milekic 				keg->uk_flags |= UMA_ZONE_OFFPAGE;
13662934eb8aSMark Johnston 			else
13672934eb8aSMark Johnston 				keg->uk_ppera++;
13682934eb8aSMark Johnston 		}
1369cec48e00SAlexander Motin 	}
1370cec48e00SAlexander Motin 
1371cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1372cec48e00SAlexander Motin 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1373099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_HASH;
13748355f576SJeff Roberson }
13758355f576SJeff Roberson 
1376e20a199fSJeff Roberson static void
1377e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg)
1378e20a199fSJeff Roberson {
1379e20a199fSJeff Roberson 	int alignsize;
1380e20a199fSJeff Roberson 	int trailer;
1381e20a199fSJeff Roberson 	int pages;
1382e20a199fSJeff Roberson 	int rsize;
1383e20a199fSJeff Roberson 
1384ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1385ad97af7eSGleb Smirnoff 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1386ad97af7eSGleb Smirnoff 
1387e20a199fSJeff Roberson 	alignsize = keg->uk_align + 1;
1388e20a199fSJeff Roberson 	rsize = keg->uk_size;
1389e20a199fSJeff Roberson 	/*
1390e20a199fSJeff Roberson 	 * We want one item to start on every align boundary in a page.  To
1391e20a199fSJeff Roberson 	 * do this we will span pages.  We will also extend the item by the
1392e20a199fSJeff Roberson 	 * size of align if it is an even multiple of align.  Otherwise, it
1393e20a199fSJeff Roberson 	 * would fall on the same boundary every time.
1394e20a199fSJeff Roberson 	 */
1395e20a199fSJeff Roberson 	if (rsize & keg->uk_align)
1396e20a199fSJeff Roberson 		rsize = (rsize & ~keg->uk_align) + alignsize;
1397e20a199fSJeff Roberson 	if ((rsize & alignsize) == 0)
1398e20a199fSJeff Roberson 		rsize += alignsize;
1399e20a199fSJeff Roberson 	trailer = rsize - keg->uk_size;
1400e20a199fSJeff Roberson 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1401e20a199fSJeff Roberson 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1402e20a199fSJeff Roberson 	keg->uk_rsize = rsize;
1403e20a199fSJeff Roberson 	keg->uk_ppera = pages;
1404e20a199fSJeff Roberson 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1405e20a199fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
14062367b4ddSDimitry Andric 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
140742321809SGleb Smirnoff 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1408e20a199fSJeff Roberson 	    keg->uk_ipers));
1409e20a199fSJeff Roberson }
1410e20a199fSJeff Roberson 
14118355f576SJeff Roberson /*
1412099a0e58SBosko Milekic  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1413099a0e58SBosko Milekic  * the keg onto the global keg list.
14148355f576SJeff Roberson  *
14158355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
1416099a0e58SBosko Milekic  *	udata  Actually uma_kctor_args
1417099a0e58SBosko Milekic  */
1418b23f72e9SBrian Feldman static int
1419b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags)
1420099a0e58SBosko Milekic {
1421099a0e58SBosko Milekic 	struct uma_kctor_args *arg = udata;
1422099a0e58SBosko Milekic 	uma_keg_t keg = mem;
1423099a0e58SBosko Milekic 	uma_zone_t zone;
1424099a0e58SBosko Milekic 
1425099a0e58SBosko Milekic 	bzero(keg, size);
1426099a0e58SBosko Milekic 	keg->uk_size = arg->size;
1427099a0e58SBosko Milekic 	keg->uk_init = arg->uminit;
1428099a0e58SBosko Milekic 	keg->uk_fini = arg->fini;
1429099a0e58SBosko Milekic 	keg->uk_align = arg->align;
1430ab3185d1SJeff Roberson 	keg->uk_cursor = 0;
1431099a0e58SBosko Milekic 	keg->uk_free = 0;
14326fd34d6fSJeff Roberson 	keg->uk_reserve = 0;
1433099a0e58SBosko Milekic 	keg->uk_pages = 0;
1434099a0e58SBosko Milekic 	keg->uk_flags = arg->flags;
1435099a0e58SBosko Milekic 	keg->uk_slabzone = NULL;
1436099a0e58SBosko Milekic 
1437099a0e58SBosko Milekic 	/*
1438099a0e58SBosko Milekic 	 * The master zone is passed to us at keg-creation time.
1439099a0e58SBosko Milekic 	 */
1440099a0e58SBosko Milekic 	zone = arg->zone;
1441e20a199fSJeff Roberson 	keg->uk_name = zone->uz_name;
1442099a0e58SBosko Milekic 
1443099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_VM)
1444099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1445099a0e58SBosko Milekic 
1446099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_ZINIT)
1447099a0e58SBosko Milekic 		keg->uk_init = zero_init;
1448099a0e58SBosko Milekic 
1449cfcae3f8SGleb Smirnoff 	if (arg->flags & UMA_ZONE_MALLOC)
1450e20a199fSJeff Roberson 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1451e20a199fSJeff Roberson 
1452ad97af7eSGleb Smirnoff 	if (arg->flags & UMA_ZONE_PCPU)
1453ad97af7eSGleb Smirnoff #ifdef SMP
1454ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1455ad97af7eSGleb Smirnoff #else
1456ad97af7eSGleb Smirnoff 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1457ad97af7eSGleb Smirnoff #endif
1458ad97af7eSGleb Smirnoff 
1459ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1460e20a199fSJeff Roberson 		keg_cachespread_init(keg);
1461244f4554SBosko Milekic 	} else {
1462ef72505eSJeff Roberson 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1463e20a199fSJeff Roberson 			keg_large_init(keg);
1464244f4554SBosko Milekic 		else
1465e20a199fSJeff Roberson 			keg_small_init(keg);
1466244f4554SBosko Milekic 	}
1467099a0e58SBosko Milekic 
1468cfcae3f8SGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1469099a0e58SBosko Milekic 		keg->uk_slabzone = slabzone;
1470099a0e58SBosko Milekic 
1471099a0e58SBosko Milekic 	/*
1472099a0e58SBosko Milekic 	 * If we haven't booted yet we need allocations to go through the
1473099a0e58SBosko Milekic 	 * startup cache until the vm is ready.
1474099a0e58SBosko Milekic 	 */
14758cd02d00SAlan Cox 	if (booted < UMA_STARTUP2)
14768cd02d00SAlan Cox 		keg->uk_allocf = startup_alloc;
147777e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
147877e19437SGleb Smirnoff 	else if (keg->uk_ppera == 1)
147977e19437SGleb Smirnoff 		keg->uk_allocf = uma_small_alloc;
14808cd02d00SAlan Cox #endif
148177e19437SGleb Smirnoff 	else
148277e19437SGleb Smirnoff 		keg->uk_allocf = page_alloc;
148377e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
148477e19437SGleb Smirnoff 	if (keg->uk_ppera == 1)
148577e19437SGleb Smirnoff 		keg->uk_freef = uma_small_free;
148677e19437SGleb Smirnoff 	else
148777e19437SGleb Smirnoff #endif
148877e19437SGleb Smirnoff 		keg->uk_freef = page_free;
1489099a0e58SBosko Milekic 
1490099a0e58SBosko Milekic 	/*
1491af526374SJeff Roberson 	 * Initialize keg's lock
1492099a0e58SBosko Milekic 	 */
1493af526374SJeff Roberson 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1494099a0e58SBosko Milekic 
1495099a0e58SBosko Milekic 	/*
1496099a0e58SBosko Milekic 	 * If we're putting the slab header in the actual page we need to
1497099a0e58SBosko Milekic 	 * figure out where in each page it goes.  This calculates a right
1498099a0e58SBosko Milekic 	 * justified offset into the memory on an ALIGN_PTR boundary.
1499099a0e58SBosko Milekic 	 */
1500099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1501244f4554SBosko Milekic 		u_int totsize;
1502099a0e58SBosko Milekic 
1503099a0e58SBosko Milekic 		/* Size of the slab struct and free list */
1504ef72505eSJeff Roberson 		totsize = sizeof(struct uma_slab);
1505ef72505eSJeff Roberson 
1506099a0e58SBosko Milekic 		if (totsize & UMA_ALIGN_PTR)
1507099a0e58SBosko Milekic 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1508099a0e58SBosko Milekic 			    (UMA_ALIGN_PTR + 1);
1509ad97af7eSGleb Smirnoff 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1510244f4554SBosko Milekic 
1511244f4554SBosko Milekic 		/*
1512244f4554SBosko Milekic 		 * The only way the following is possible is if with our
1513244f4554SBosko Milekic 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1514244f4554SBosko Milekic 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1515244f4554SBosko Milekic 		 * mathematically possible for all cases, so we make
1516244f4554SBosko Milekic 		 * sure here anyway.
1517244f4554SBosko Milekic 		 */
1518ef72505eSJeff Roberson 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1519ad97af7eSGleb Smirnoff 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1520099a0e58SBosko Milekic 			printf("zone %s ipers %d rsize %d size %d\n",
1521099a0e58SBosko Milekic 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1522099a0e58SBosko Milekic 			    keg->uk_size);
1523aea6e893SAlan Cox 			panic("UMA slab won't fit.");
1524099a0e58SBosko Milekic 		}
1525099a0e58SBosko Milekic 	}
1526099a0e58SBosko Milekic 
1527099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH)
1528099a0e58SBosko Milekic 		hash_alloc(&keg->uk_hash);
1529099a0e58SBosko Milekic 
15301431a748SGleb Smirnoff 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
15311431a748SGleb Smirnoff 	    keg, zone->uz_name, zone,
153257223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
153357223e99SAndriy Gapon 	    keg->uk_free);
1534099a0e58SBosko Milekic 
1535099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1536099a0e58SBosko Milekic 
1537111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1538099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1539111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1540b23f72e9SBrian Feldman 	return (0);
1541099a0e58SBosko Milekic }
1542099a0e58SBosko Milekic 
1543099a0e58SBosko Milekic /*
1544099a0e58SBosko Milekic  * Zone header ctor.  This initializes all fields, locks, etc.
1545099a0e58SBosko Milekic  *
1546099a0e58SBosko Milekic  * Arguments/Returns follow uma_ctor specifications
1547099a0e58SBosko Milekic  *	udata  Actually uma_zctor_args
15488355f576SJeff Roberson  */
1549b23f72e9SBrian Feldman static int
1550b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags)
15518355f576SJeff Roberson {
15528355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
15538355f576SJeff Roberson 	uma_zone_t zone = mem;
1554099a0e58SBosko Milekic 	uma_zone_t z;
1555099a0e58SBosko Milekic 	uma_keg_t keg;
15568355f576SJeff Roberson 
15578355f576SJeff Roberson 	bzero(zone, size);
15588355f576SJeff Roberson 	zone->uz_name = arg->name;
15598355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
15608355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
1561e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab;
1562099a0e58SBosko Milekic 	zone->uz_init = NULL;
1563099a0e58SBosko Milekic 	zone->uz_fini = NULL;
1564099a0e58SBosko Milekic 	zone->uz_allocs = 0;
1565773df9abSRobert Watson 	zone->uz_frees = 0;
15662019094aSRobert Watson 	zone->uz_fails = 0;
1567bf965959SSean Bruno 	zone->uz_sleeps = 0;
1568fc03d22bSJeff Roberson 	zone->uz_count = 0;
1569ace66b56SAlexander Motin 	zone->uz_count_min = 0;
1570e20a199fSJeff Roberson 	zone->uz_flags = 0;
15712f891cd5SPawel Jakub Dawidek 	zone->uz_warning = NULL;
1572ab3185d1SJeff Roberson 	/* The domain structures follow the cpu structures. */
1573ab3185d1SJeff Roberson 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
15742f891cd5SPawel Jakub Dawidek 	timevalclear(&zone->uz_ratecheck);
1575e20a199fSJeff Roberson 	keg = arg->keg;
1576099a0e58SBosko Milekic 
1577af526374SJeff Roberson 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1578af526374SJeff Roberson 
15790095a784SJeff Roberson 	/*
15800095a784SJeff Roberson 	 * This is a pure cache zone, no kegs.
15810095a784SJeff Roberson 	 */
15820095a784SJeff Roberson 	if (arg->import) {
15836fd34d6fSJeff Roberson 		if (arg->flags & UMA_ZONE_VM)
15846fd34d6fSJeff Roberson 			arg->flags |= UMA_ZFLAG_CACHEONLY;
15856fd34d6fSJeff Roberson 		zone->uz_flags = arg->flags;
1586af526374SJeff Roberson 		zone->uz_size = arg->size;
15870095a784SJeff Roberson 		zone->uz_import = arg->import;
15880095a784SJeff Roberson 		zone->uz_release = arg->release;
15890095a784SJeff Roberson 		zone->uz_arg = arg->arg;
1590af526374SJeff Roberson 		zone->uz_lockptr = &zone->uz_lock;
1591111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
159203175483SAlexander Motin 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1593111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1594af526374SJeff Roberson 		goto out;
15950095a784SJeff Roberson 	}
15960095a784SJeff Roberson 
15970095a784SJeff Roberson 	/*
15980095a784SJeff Roberson 	 * Use the regular zone/keg/slab allocator.
15990095a784SJeff Roberson 	 */
16000095a784SJeff Roberson 	zone->uz_import = (uma_import)zone_import;
16010095a784SJeff Roberson 	zone->uz_release = (uma_release)zone_release;
16020095a784SJeff Roberson 	zone->uz_arg = zone;
16030095a784SJeff Roberson 
1604099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_SECONDARY) {
1605099a0e58SBosko Milekic 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
16068355f576SJeff Roberson 		zone->uz_init = arg->uminit;
1607e221e841SJeff Roberson 		zone->uz_fini = arg->fini;
1608af526374SJeff Roberson 		zone->uz_lockptr = &keg->uk_lock;
1609e20a199fSJeff Roberson 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1610111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1611099a0e58SBosko Milekic 		ZONE_LOCK(zone);
1612099a0e58SBosko Milekic 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1613099a0e58SBosko Milekic 			if (LIST_NEXT(z, uz_link) == NULL) {
1614099a0e58SBosko Milekic 				LIST_INSERT_AFTER(z, zone, uz_link);
1615099a0e58SBosko Milekic 				break;
1616099a0e58SBosko Milekic 			}
1617099a0e58SBosko Milekic 		}
1618099a0e58SBosko Milekic 		ZONE_UNLOCK(zone);
1619111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1620e20a199fSJeff Roberson 	} else if (keg == NULL) {
1621e20a199fSJeff Roberson 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1622e20a199fSJeff Roberson 		    arg->align, arg->flags)) == NULL)
1623b23f72e9SBrian Feldman 			return (ENOMEM);
1624099a0e58SBosko Milekic 	} else {
1625099a0e58SBosko Milekic 		struct uma_kctor_args karg;
1626b23f72e9SBrian Feldman 		int error;
1627099a0e58SBosko Milekic 
1628099a0e58SBosko Milekic 		/* We should only be here from uma_startup() */
1629099a0e58SBosko Milekic 		karg.size = arg->size;
1630099a0e58SBosko Milekic 		karg.uminit = arg->uminit;
1631099a0e58SBosko Milekic 		karg.fini = arg->fini;
1632099a0e58SBosko Milekic 		karg.align = arg->align;
1633099a0e58SBosko Milekic 		karg.flags = arg->flags;
1634099a0e58SBosko Milekic 		karg.zone = zone;
1635b23f72e9SBrian Feldman 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1636b23f72e9SBrian Feldman 		    flags);
1637b23f72e9SBrian Feldman 		if (error)
1638b23f72e9SBrian Feldman 			return (error);
1639099a0e58SBosko Milekic 	}
16400095a784SJeff Roberson 
1641e20a199fSJeff Roberson 	/*
1642e20a199fSJeff Roberson 	 * Link in the first keg.
1643e20a199fSJeff Roberson 	 */
1644e20a199fSJeff Roberson 	zone->uz_klink.kl_keg = keg;
1645e20a199fSJeff Roberson 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1646af526374SJeff Roberson 	zone->uz_lockptr = &keg->uk_lock;
1647e20a199fSJeff Roberson 	zone->uz_size = keg->uk_size;
1648e20a199fSJeff Roberson 	zone->uz_flags |= (keg->uk_flags &
1649e20a199fSJeff Roberson 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
16508355f576SJeff Roberson 
16518355f576SJeff Roberson 	/*
16528355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
16538355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
16548355f576SJeff Roberson 	 */
1655099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1656e20a199fSJeff Roberson 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1657099a0e58SBosko Milekic 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1658b23f72e9SBrian Feldman 		return (0);
1659099a0e58SBosko Milekic 	}
16608355f576SJeff Roberson 
1661af526374SJeff Roberson out:
1662af526374SJeff Roberson 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1663af526374SJeff Roberson 		zone->uz_count = bucket_select(zone->uz_size);
16648355f576SJeff Roberson 	else
1665cae33c14SJeff Roberson 		zone->uz_count = BUCKET_MAX;
1666ace66b56SAlexander Motin 	zone->uz_count_min = zone->uz_count;
1667fc03d22bSJeff Roberson 
1668b23f72e9SBrian Feldman 	return (0);
16698355f576SJeff Roberson }
16708355f576SJeff Roberson 
16718355f576SJeff Roberson /*
1672099a0e58SBosko Milekic  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1673099a0e58SBosko Milekic  * table and removes the keg from the global list.
16749c2cd7e5SJeff Roberson  *
16759c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
16769c2cd7e5SJeff Roberson  *	udata  unused
16779c2cd7e5SJeff Roberson  */
1678099a0e58SBosko Milekic static void
1679099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata)
1680099a0e58SBosko Milekic {
1681099a0e58SBosko Milekic 	uma_keg_t keg;
16829c2cd7e5SJeff Roberson 
1683099a0e58SBosko Milekic 	keg = (uma_keg_t)arg;
1684e20a199fSJeff Roberson 	KEG_LOCK(keg);
1685099a0e58SBosko Milekic 	if (keg->uk_free != 0) {
1686a3845534SCraig Rodrigues 		printf("Freed UMA keg (%s) was not empty (%d items). "
1687099a0e58SBosko Milekic 		    " Lost %d pages of memory.\n",
1688a3845534SCraig Rodrigues 		    keg->uk_name ? keg->uk_name : "",
1689099a0e58SBosko Milekic 		    keg->uk_free, keg->uk_pages);
1690099a0e58SBosko Milekic 	}
1691e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1692099a0e58SBosko Milekic 
1693099a0e58SBosko Milekic 	hash_free(&keg->uk_hash);
1694099a0e58SBosko Milekic 
1695e20a199fSJeff Roberson 	KEG_LOCK_FINI(keg);
1696099a0e58SBosko Milekic }
1697099a0e58SBosko Milekic 
1698099a0e58SBosko Milekic /*
1699099a0e58SBosko Milekic  * Zone header dtor.
1700099a0e58SBosko Milekic  *
1701099a0e58SBosko Milekic  * Arguments/Returns follow uma_dtor specifications
1702099a0e58SBosko Milekic  *	udata  unused
1703099a0e58SBosko Milekic  */
17049c2cd7e5SJeff Roberson static void
17059c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
17069c2cd7e5SJeff Roberson {
1707e20a199fSJeff Roberson 	uma_klink_t klink;
17089c2cd7e5SJeff Roberson 	uma_zone_t zone;
1709099a0e58SBosko Milekic 	uma_keg_t keg;
17109c2cd7e5SJeff Roberson 
17119c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
1712e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
17139643769aSJeff Roberson 
1714e20a199fSJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
17159643769aSJeff Roberson 		cache_drain(zone);
1716099a0e58SBosko Milekic 
1717111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1718099a0e58SBosko Milekic 	LIST_REMOVE(zone, uz_link);
1719111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1720099a0e58SBosko Milekic 	/*
1721099a0e58SBosko Milekic 	 * XXX there are some races here where
1722099a0e58SBosko Milekic 	 * the zone can be drained but zone lock
1723099a0e58SBosko Milekic 	 * released and then refilled before we
1724099a0e58SBosko Milekic 	 * remove it... we dont care for now
1725099a0e58SBosko Milekic 	 */
1726e20a199fSJeff Roberson 	zone_drain_wait(zone, M_WAITOK);
1727e20a199fSJeff Roberson 	/*
1728e20a199fSJeff Roberson 	 * Unlink all of our kegs.
1729e20a199fSJeff Roberson 	 */
1730e20a199fSJeff Roberson 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1731e20a199fSJeff Roberson 		klink->kl_keg = NULL;
1732e20a199fSJeff Roberson 		LIST_REMOVE(klink, kl_link);
1733e20a199fSJeff Roberson 		if (klink == &zone->uz_klink)
1734e20a199fSJeff Roberson 			continue;
1735e20a199fSJeff Roberson 		free(klink, M_TEMP);
1736e20a199fSJeff Roberson 	}
1737e20a199fSJeff Roberson 	/*
1738e20a199fSJeff Roberson 	 * We only destroy kegs from non secondary zones.
1739e20a199fSJeff Roberson 	 */
17400095a784SJeff Roberson 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1741111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1742099a0e58SBosko Milekic 		LIST_REMOVE(keg, uk_link);
1743111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
17440095a784SJeff Roberson 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
17459c2cd7e5SJeff Roberson 	}
1746af526374SJeff Roberson 	ZONE_LOCK_FINI(zone);
1747099a0e58SBosko Milekic }
1748099a0e58SBosko Milekic 
17499c2cd7e5SJeff Roberson /*
17508355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
17518355f576SJeff Roberson  *
17528355f576SJeff Roberson  * Arguments:
17538355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
17548355f576SJeff Roberson  *		as an argument.
17558355f576SJeff Roberson  *
17568355f576SJeff Roberson  * Returns:
17578355f576SJeff Roberson  *	Nothing
17588355f576SJeff Roberson  */
17598355f576SJeff Roberson static void
17608355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
17618355f576SJeff Roberson {
1762099a0e58SBosko Milekic 	uma_keg_t keg;
17638355f576SJeff Roberson 	uma_zone_t zone;
17648355f576SJeff Roberson 
1765111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
1766099a0e58SBosko Milekic 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1767099a0e58SBosko Milekic 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
17688355f576SJeff Roberson 			zfunc(zone);
1769099a0e58SBosko Milekic 	}
1770111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
17718355f576SJeff Roberson }
17728355f576SJeff Roberson 
17738355f576SJeff Roberson /* Public functions */
17748355f576SJeff Roberson /* See uma.h */
17758355f576SJeff Roberson void
1776ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages)
17778355f576SJeff Roberson {
17788355f576SJeff Roberson 	struct uma_zctor_args args;
1779ab3185d1SJeff Roberson 	uma_keg_t masterkeg;
1780ab3185d1SJeff Roberson 	uintptr_t m;
1781ab3185d1SJeff Roberson 	int zsize;
1782ab3185d1SJeff Roberson 	int ksize;
17838355f576SJeff Roberson 
1784111fbcd5SBryan Venteicher 	rw_init(&uma_rwlock, "UMA lock");
1785099a0e58SBosko Milekic 
1786ab3185d1SJeff Roberson 	ksize = sizeof(struct uma_keg) +
1787ab3185d1SJeff Roberson 	    (sizeof(struct uma_domain) * vm_ndomains);
1788ab3185d1SJeff Roberson 	zsize = sizeof(struct uma_zone) +
1789ab3185d1SJeff Roberson 	    (sizeof(struct uma_cache) * mp_ncpus) +
1790ab3185d1SJeff Roberson 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1791ab3185d1SJeff Roberson 
1792ab3185d1SJeff Roberson 	/* Use bootpages memory for the zone of zones and zone of kegs. */
1793ab3185d1SJeff Roberson 	m = (uintptr_t)mem;
1794ab3185d1SJeff Roberson 	zones = (uma_zone_t)m;
1795ab3185d1SJeff Roberson 	m += roundup(zsize, CACHE_LINE_SIZE);
1796ab3185d1SJeff Roberson 	kegs = (uma_zone_t)m;
1797ab3185d1SJeff Roberson 	m += roundup(zsize, CACHE_LINE_SIZE);
1798ab3185d1SJeff Roberson 	masterkeg = (uma_keg_t)m;
1799ab3185d1SJeff Roberson 	m += roundup(ksize, CACHE_LINE_SIZE);
1800ab3185d1SJeff Roberson 	m = roundup(m, PAGE_SIZE);
1801ab3185d1SJeff Roberson 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
1802ab3185d1SJeff Roberson 	mem = (void *)m;
1803ab3185d1SJeff Roberson 
1804099a0e58SBosko Milekic 	/* "manually" create the initial zone */
18050095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1806099a0e58SBosko Milekic 	args.name = "UMA Kegs";
1807ab3185d1SJeff Roberson 	args.size = ksize;
1808099a0e58SBosko Milekic 	args.ctor = keg_ctor;
1809099a0e58SBosko Milekic 	args.dtor = keg_dtor;
18108355f576SJeff Roberson 	args.uminit = zero_init;
18118355f576SJeff Roberson 	args.fini = NULL;
1812ab3185d1SJeff Roberson 	args.keg = masterkeg;
18138355f576SJeff Roberson 	args.align = 32 - 1;
1814b60f5b79SJeff Roberson 	args.flags = UMA_ZFLAG_INTERNAL;
1815ab3185d1SJeff Roberson 	zone_ctor(kegs, zsize, &args, M_WAITOK);
18168355f576SJeff Roberson 
1817f353d338SAlan Cox 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1818ac0a6fd0SGleb Smirnoff 	bootmem = mem;
1819ac0a6fd0SGleb Smirnoff 	boot_pages = npages;
18208355f576SJeff Roberson 
1821099a0e58SBosko Milekic 	args.name = "UMA Zones";
1822099a0e58SBosko Milekic 	args.size = sizeof(struct uma_zone) +
1823ab3185d1SJeff Roberson 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1824ab3185d1SJeff Roberson 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1825099a0e58SBosko Milekic 	args.ctor = zone_ctor;
1826099a0e58SBosko Milekic 	args.dtor = zone_dtor;
1827099a0e58SBosko Milekic 	args.uminit = zero_init;
1828099a0e58SBosko Milekic 	args.fini = NULL;
1829099a0e58SBosko Milekic 	args.keg = NULL;
1830099a0e58SBosko Milekic 	args.align = 32 - 1;
1831099a0e58SBosko Milekic 	args.flags = UMA_ZFLAG_INTERNAL;
1832ab3185d1SJeff Roberson 	zone_ctor(zones, zsize, &args, M_WAITOK);
1833099a0e58SBosko Milekic 
18348355f576SJeff Roberson 	/* Now make a zone for slab headers */
18358355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
1836ef72505eSJeff Roberson 				sizeof(struct uma_slab),
18378355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
1838b60f5b79SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
18398355f576SJeff Roberson 
18408355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
18418355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
18428355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
1843b60f5b79SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
18448355f576SJeff Roberson 
1845cae33c14SJeff Roberson 	bucket_init();
18468355f576SJeff Roberson 
1847342f1793SAlan Cox 	booted = UMA_STARTUP;
18488355f576SJeff Roberson }
18498355f576SJeff Roberson 
18508355f576SJeff Roberson /* see uma.h */
18518355f576SJeff Roberson void
185299571dc3SJeff Roberson uma_startup2(void)
18538355f576SJeff Roberson {
1854342f1793SAlan Cox 	booted = UMA_STARTUP2;
185586bbae32SJeff Roberson 	bucket_enable();
185695c4bf75SKonstantin Belousov 	sx_init(&uma_drain_lock, "umadrain");
18578355f576SJeff Roberson }
18588355f576SJeff Roberson 
18598355f576SJeff Roberson /*
18608355f576SJeff Roberson  * Initialize our callout handle
18618355f576SJeff Roberson  *
18628355f576SJeff Roberson  */
18638355f576SJeff Roberson 
18648355f576SJeff Roberson static void
18658355f576SJeff Roberson uma_startup3(void)
18668355f576SJeff Roberson {
18671431a748SGleb Smirnoff 
1868fd90e2edSJung-uk Kim 	callout_init(&uma_callout, 1);
18699643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
18708355f576SJeff Roberson }
18718355f576SJeff Roberson 
1872e20a199fSJeff Roberson static uma_keg_t
1873099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
187485dcf349SGleb Smirnoff 		int align, uint32_t flags)
1875099a0e58SBosko Milekic {
1876099a0e58SBosko Milekic 	struct uma_kctor_args args;
1877099a0e58SBosko Milekic 
1878099a0e58SBosko Milekic 	args.size = size;
1879099a0e58SBosko Milekic 	args.uminit = uminit;
1880099a0e58SBosko Milekic 	args.fini = fini;
18811e319f6dSRobert Watson 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1882099a0e58SBosko Milekic 	args.flags = flags;
1883099a0e58SBosko Milekic 	args.zone = zone;
1884ab3185d1SJeff Roberson 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
1885099a0e58SBosko Milekic }
1886099a0e58SBosko Milekic 
18878355f576SJeff Roberson /* See uma.h */
18881e319f6dSRobert Watson void
18891e319f6dSRobert Watson uma_set_align(int align)
18901e319f6dSRobert Watson {
18911e319f6dSRobert Watson 
18921e319f6dSRobert Watson 	if (align != UMA_ALIGN_CACHE)
18931e319f6dSRobert Watson 		uma_align_cache = align;
18941e319f6dSRobert Watson }
18951e319f6dSRobert Watson 
18961e319f6dSRobert Watson /* See uma.h */
18978355f576SJeff Roberson uma_zone_t
1898bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
189985dcf349SGleb Smirnoff 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
19008355f576SJeff Roberson 
19018355f576SJeff Roberson {
19028355f576SJeff Roberson 	struct uma_zctor_args args;
190395c4bf75SKonstantin Belousov 	uma_zone_t res;
190495c4bf75SKonstantin Belousov 	bool locked;
19058355f576SJeff Roberson 
1906a5a35578SJohn Baldwin 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
1907a5a35578SJohn Baldwin 	    align, name));
1908a5a35578SJohn Baldwin 
19098355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
19100095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
19118355f576SJeff Roberson 	args.name = name;
19128355f576SJeff Roberson 	args.size = size;
19138355f576SJeff Roberson 	args.ctor = ctor;
19148355f576SJeff Roberson 	args.dtor = dtor;
19158355f576SJeff Roberson 	args.uminit = uminit;
19168355f576SJeff Roberson 	args.fini = fini;
1917afc6dc36SJohn-Mark Gurney #ifdef  INVARIANTS
1918afc6dc36SJohn-Mark Gurney 	/*
1919afc6dc36SJohn-Mark Gurney 	 * If a zone is being created with an empty constructor and
1920afc6dc36SJohn-Mark Gurney 	 * destructor, pass UMA constructor/destructor which checks for
1921afc6dc36SJohn-Mark Gurney 	 * memory use after free.
1922afc6dc36SJohn-Mark Gurney 	 */
192319c591bfSMateusz Guzik 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
192419c591bfSMateusz Guzik 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1925afc6dc36SJohn-Mark Gurney 		args.ctor = trash_ctor;
1926afc6dc36SJohn-Mark Gurney 		args.dtor = trash_dtor;
1927afc6dc36SJohn-Mark Gurney 		args.uminit = trash_init;
1928afc6dc36SJohn-Mark Gurney 		args.fini = trash_fini;
1929afc6dc36SJohn-Mark Gurney 	}
1930afc6dc36SJohn-Mark Gurney #endif
19318355f576SJeff Roberson 	args.align = align;
19328355f576SJeff Roberson 	args.flags = flags;
1933099a0e58SBosko Milekic 	args.keg = NULL;
1934099a0e58SBosko Milekic 
193595c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
193695c4bf75SKonstantin Belousov 		locked = false;
193795c4bf75SKonstantin Belousov 	} else {
193895c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
193995c4bf75SKonstantin Belousov 		locked = true;
194095c4bf75SKonstantin Belousov 	}
1941ab3185d1SJeff Roberson 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
194295c4bf75SKonstantin Belousov 	if (locked)
194395c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
194495c4bf75SKonstantin Belousov 	return (res);
1945099a0e58SBosko Milekic }
1946099a0e58SBosko Milekic 
1947099a0e58SBosko Milekic /* See uma.h */
1948099a0e58SBosko Milekic uma_zone_t
1949099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1950099a0e58SBosko Milekic 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1951099a0e58SBosko Milekic {
1952099a0e58SBosko Milekic 	struct uma_zctor_args args;
1953e20a199fSJeff Roberson 	uma_keg_t keg;
195495c4bf75SKonstantin Belousov 	uma_zone_t res;
195595c4bf75SKonstantin Belousov 	bool locked;
1956099a0e58SBosko Milekic 
1957e20a199fSJeff Roberson 	keg = zone_first_keg(master);
19580095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1959099a0e58SBosko Milekic 	args.name = name;
1960e20a199fSJeff Roberson 	args.size = keg->uk_size;
1961099a0e58SBosko Milekic 	args.ctor = ctor;
1962099a0e58SBosko Milekic 	args.dtor = dtor;
1963099a0e58SBosko Milekic 	args.uminit = zinit;
1964099a0e58SBosko Milekic 	args.fini = zfini;
1965e20a199fSJeff Roberson 	args.align = keg->uk_align;
1966e20a199fSJeff Roberson 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1967e20a199fSJeff Roberson 	args.keg = keg;
19688355f576SJeff Roberson 
196995c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
197095c4bf75SKonstantin Belousov 		locked = false;
197195c4bf75SKonstantin Belousov 	} else {
197295c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
197395c4bf75SKonstantin Belousov 		locked = true;
197495c4bf75SKonstantin Belousov 	}
1975e20a199fSJeff Roberson 	/* XXX Attaches only one keg of potentially many. */
1976ab3185d1SJeff Roberson 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
197795c4bf75SKonstantin Belousov 	if (locked)
197895c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
197995c4bf75SKonstantin Belousov 	return (res);
19808355f576SJeff Roberson }
19818355f576SJeff Roberson 
19820095a784SJeff Roberson /* See uma.h */
19830095a784SJeff Roberson uma_zone_t
1984af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1985af526374SJeff Roberson 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1986af526374SJeff Roberson 		    uma_release zrelease, void *arg, int flags)
19870095a784SJeff Roberson {
19880095a784SJeff Roberson 	struct uma_zctor_args args;
19890095a784SJeff Roberson 
19900095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
19910095a784SJeff Roberson 	args.name = name;
1992af526374SJeff Roberson 	args.size = size;
19930095a784SJeff Roberson 	args.ctor = ctor;
19940095a784SJeff Roberson 	args.dtor = dtor;
19950095a784SJeff Roberson 	args.uminit = zinit;
19960095a784SJeff Roberson 	args.fini = zfini;
19970095a784SJeff Roberson 	args.import = zimport;
19980095a784SJeff Roberson 	args.release = zrelease;
19990095a784SJeff Roberson 	args.arg = arg;
20000095a784SJeff Roberson 	args.align = 0;
20010095a784SJeff Roberson 	args.flags = flags;
20020095a784SJeff Roberson 
2003ab3185d1SJeff Roberson 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
20040095a784SJeff Roberson }
20050095a784SJeff Roberson 
2006e20a199fSJeff Roberson static void
2007e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b)
2008e20a199fSJeff Roberson {
2009e20a199fSJeff Roberson 	if (a < b) {
2010e20a199fSJeff Roberson 		ZONE_LOCK(a);
2011af526374SJeff Roberson 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2012e20a199fSJeff Roberson 	} else {
2013e20a199fSJeff Roberson 		ZONE_LOCK(b);
2014af526374SJeff Roberson 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2015e20a199fSJeff Roberson 	}
2016e20a199fSJeff Roberson }
2017e20a199fSJeff Roberson 
2018e20a199fSJeff Roberson static void
2019e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2020e20a199fSJeff Roberson {
2021e20a199fSJeff Roberson 
2022e20a199fSJeff Roberson 	ZONE_UNLOCK(a);
2023e20a199fSJeff Roberson 	ZONE_UNLOCK(b);
2024e20a199fSJeff Roberson }
2025e20a199fSJeff Roberson 
2026e20a199fSJeff Roberson int
2027e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2028e20a199fSJeff Roberson {
2029e20a199fSJeff Roberson 	uma_klink_t klink;
2030e20a199fSJeff Roberson 	uma_klink_t kl;
2031e20a199fSJeff Roberson 	int error;
2032e20a199fSJeff Roberson 
2033e20a199fSJeff Roberson 	error = 0;
2034e20a199fSJeff Roberson 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2035e20a199fSJeff Roberson 
2036e20a199fSJeff Roberson 	zone_lock_pair(zone, master);
2037e20a199fSJeff Roberson 	/*
2038e20a199fSJeff Roberson 	 * zone must use vtoslab() to resolve objects and must already be
2039e20a199fSJeff Roberson 	 * a secondary.
2040e20a199fSJeff Roberson 	 */
2041e20a199fSJeff Roberson 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2042e20a199fSJeff Roberson 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2043e20a199fSJeff Roberson 		error = EINVAL;
2044e20a199fSJeff Roberson 		goto out;
2045e20a199fSJeff Roberson 	}
2046e20a199fSJeff Roberson 	/*
2047e20a199fSJeff Roberson 	 * The new master must also use vtoslab().
2048e20a199fSJeff Roberson 	 */
2049e20a199fSJeff Roberson 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2050e20a199fSJeff Roberson 		error = EINVAL;
2051e20a199fSJeff Roberson 		goto out;
2052e20a199fSJeff Roberson 	}
2053cfcae3f8SGleb Smirnoff 
2054e20a199fSJeff Roberson 	/*
2055e20a199fSJeff Roberson 	 * The underlying object must be the same size.  rsize
2056e20a199fSJeff Roberson 	 * may be different.
2057e20a199fSJeff Roberson 	 */
2058e20a199fSJeff Roberson 	if (master->uz_size != zone->uz_size) {
2059e20a199fSJeff Roberson 		error = E2BIG;
2060e20a199fSJeff Roberson 		goto out;
2061e20a199fSJeff Roberson 	}
2062e20a199fSJeff Roberson 	/*
2063e20a199fSJeff Roberson 	 * Put it at the end of the list.
2064e20a199fSJeff Roberson 	 */
2065e20a199fSJeff Roberson 	klink->kl_keg = zone_first_keg(master);
2066e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2067e20a199fSJeff Roberson 		if (LIST_NEXT(kl, kl_link) == NULL) {
2068e20a199fSJeff Roberson 			LIST_INSERT_AFTER(kl, klink, kl_link);
2069e20a199fSJeff Roberson 			break;
2070e20a199fSJeff Roberson 		}
2071e20a199fSJeff Roberson 	}
2072e20a199fSJeff Roberson 	klink = NULL;
2073e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2074e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab_multi;
2075e20a199fSJeff Roberson 
2076e20a199fSJeff Roberson out:
2077e20a199fSJeff Roberson 	zone_unlock_pair(zone, master);
2078e20a199fSJeff Roberson 	if (klink != NULL)
2079e20a199fSJeff Roberson 		free(klink, M_TEMP);
2080e20a199fSJeff Roberson 
2081e20a199fSJeff Roberson 	return (error);
2082e20a199fSJeff Roberson }
2083e20a199fSJeff Roberson 
2084e20a199fSJeff Roberson 
20858355f576SJeff Roberson /* See uma.h */
20869c2cd7e5SJeff Roberson void
20879c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
20889c2cd7e5SJeff Roberson {
2089f4ff923bSRobert Watson 
209095c4bf75SKonstantin Belousov 	sx_slock(&uma_drain_lock);
20910095a784SJeff Roberson 	zone_free_item(zones, zone, NULL, SKIP_NONE);
209295c4bf75SKonstantin Belousov 	sx_sunlock(&uma_drain_lock);
20939c2cd7e5SJeff Roberson }
20949c2cd7e5SJeff Roberson 
20958d6fbbb8SJeff Roberson void
20968d6fbbb8SJeff Roberson uma_zwait(uma_zone_t zone)
20978d6fbbb8SJeff Roberson {
20988d6fbbb8SJeff Roberson 	void *item;
20998d6fbbb8SJeff Roberson 
21008d6fbbb8SJeff Roberson 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
21018d6fbbb8SJeff Roberson 	uma_zfree(zone, item);
21028d6fbbb8SJeff Roberson }
21038d6fbbb8SJeff Roberson 
21049c2cd7e5SJeff Roberson /* See uma.h */
21058355f576SJeff Roberson void *
21062cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
21078355f576SJeff Roberson {
2108ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
21098355f576SJeff Roberson 	uma_bucket_t bucket;
2110ab3185d1SJeff Roberson 	uma_cache_t cache;
2111ab3185d1SJeff Roberson 	void *item;
2112ab3185d1SJeff Roberson 	int cpu, domain, lockfail;
21138355f576SJeff Roberson 
2114e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2115e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
211610cb2424SMark Murray 
21178355f576SJeff Roberson 	/* This is the fast path allocation */
21181431a748SGleb Smirnoff 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
21191431a748SGleb Smirnoff 	    curthread, zone->uz_name, zone, flags);
2120a553d4b8SJeff Roberson 
2121635fd505SRobert Watson 	if (flags & M_WAITOK) {
2122b23f72e9SBrian Feldman 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2123635fd505SRobert Watson 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
21244c1cc01cSJohn Baldwin 	}
2125d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
21261067a2baSJonathan T. Looney 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
21271067a2baSJonathan T. Looney 
21288d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
21298d689e04SGleb Smirnoff 	if (memguard_cmp_zone(zone)) {
21308d689e04SGleb Smirnoff 		item = memguard_alloc(zone->uz_size, flags);
21318d689e04SGleb Smirnoff 		if (item != NULL) {
21328d689e04SGleb Smirnoff 			if (zone->uz_init != NULL &&
21338d689e04SGleb Smirnoff 			    zone->uz_init(item, zone->uz_size, flags) != 0)
21348d689e04SGleb Smirnoff 				return (NULL);
21358d689e04SGleb Smirnoff 			if (zone->uz_ctor != NULL &&
2136fc03d22bSJeff Roberson 			    zone->uz_ctor(item, zone->uz_size, udata,
2137fc03d22bSJeff Roberson 			    flags) != 0) {
21388d689e04SGleb Smirnoff 			    	zone->uz_fini(item, zone->uz_size);
21398d689e04SGleb Smirnoff 				return (NULL);
21408d689e04SGleb Smirnoff 			}
21418d689e04SGleb Smirnoff 			return (item);
21428d689e04SGleb Smirnoff 		}
21438d689e04SGleb Smirnoff 		/* This is unfortunate but should not be fatal. */
21448d689e04SGleb Smirnoff 	}
21458d689e04SGleb Smirnoff #endif
21465d1ae027SRobert Watson 	/*
21475d1ae027SRobert Watson 	 * If possible, allocate from the per-CPU cache.  There are two
21485d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
21495d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
21505d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
21515d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
21525d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
21535d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to allocate from
21545d1ae027SRobert Watson 	 * the current cache; when we re-acquire the critical section, we
21555d1ae027SRobert Watson 	 * must detect and handle migration if it has occurred.
21565d1ae027SRobert Watson 	 */
21575d1ae027SRobert Watson 	critical_enter();
21585d1ae027SRobert Watson 	cpu = curcpu;
21598355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
21608355f576SJeff Roberson 
21618355f576SJeff Roberson zalloc_start:
21628355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
2163fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
2164cae33c14SJeff Roberson 		bucket->ub_cnt--;
2165cae33c14SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_cnt];
21668355f576SJeff Roberson #ifdef INVARIANTS
2167cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
21688355f576SJeff Roberson #endif
2169fc03d22bSJeff Roberson 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
21708355f576SJeff Roberson 		cache->uc_allocs++;
21715d1ae027SRobert Watson 		critical_exit();
2172fc03d22bSJeff Roberson 		if (zone->uz_ctor != NULL &&
2173fc03d22bSJeff Roberson 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
21740095a784SJeff Roberson 			atomic_add_long(&zone->uz_fails, 1);
2175fc03d22bSJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
2176b23f72e9SBrian Feldman 			return (NULL);
2177b23f72e9SBrian Feldman 		}
2178ef72505eSJeff Roberson #ifdef INVARIANTS
2179ef72505eSJeff Roberson 		uma_dbg_alloc(zone, NULL, item);
2180ef72505eSJeff Roberson #endif
21812cc35ff9SJeff Roberson 		if (flags & M_ZERO)
218248343a2fSGleb Smirnoff 			uma_zero_item(item, zone);
21838355f576SJeff Roberson 		return (item);
2184fc03d22bSJeff Roberson 	}
2185fc03d22bSJeff Roberson 
21868355f576SJeff Roberson 	/*
21878355f576SJeff Roberson 	 * We have run out of items in our alloc bucket.
21888355f576SJeff Roberson 	 * See if we can switch with our free bucket.
21898355f576SJeff Roberson 	 */
2190b983089aSJeff Roberson 	bucket = cache->uc_freebucket;
2191fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
21921431a748SGleb Smirnoff 		CTR2(KTR_UMA,
21931431a748SGleb Smirnoff 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
21941431a748SGleb Smirnoff 		    zone->uz_name, zone);
21958355f576SJeff Roberson 		cache->uc_freebucket = cache->uc_allocbucket;
2196b983089aSJeff Roberson 		cache->uc_allocbucket = bucket;
21978355f576SJeff Roberson 		goto zalloc_start;
21988355f576SJeff Roberson 	}
2199fc03d22bSJeff Roberson 
2200fc03d22bSJeff Roberson 	/*
2201fc03d22bSJeff Roberson 	 * Discard any empty allocation bucket while we hold no locks.
2202fc03d22bSJeff Roberson 	 */
2203fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2204fc03d22bSJeff Roberson 	cache->uc_allocbucket = NULL;
2205fc03d22bSJeff Roberson 	critical_exit();
2206fc03d22bSJeff Roberson 	if (bucket != NULL)
22076fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2208fc03d22bSJeff Roberson 
2209ab3185d1SJeff Roberson 	if (zone->uz_flags & UMA_ZONE_NUMA)
2210ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
2211ab3185d1SJeff Roberson 	else
2212ab3185d1SJeff Roberson 		domain = UMA_ANYDOMAIN;
2213ab3185d1SJeff Roberson 
2214fc03d22bSJeff Roberson 	/* Short-circuit for zones without buckets and low memory. */
2215fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2216fc03d22bSJeff Roberson 		goto zalloc_item;
2217fc03d22bSJeff Roberson 
22185d1ae027SRobert Watson 	/*
22195d1ae027SRobert Watson 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
22205d1ae027SRobert Watson 	 * we must go back to the zone.  This requires the zone lock, so we
22215d1ae027SRobert Watson 	 * must drop the critical section, then re-acquire it when we go back
22225d1ae027SRobert Watson 	 * to the cache.  Since the critical section is released, we may be
22235d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
22245d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
22255d1ae027SRobert Watson 	 * the critical section.
22265d1ae027SRobert Watson 	 */
2227fc03d22bSJeff Roberson 	lockfail = 0;
2228fc03d22bSJeff Roberson 	if (ZONE_TRYLOCK(zone) == 0) {
2229fc03d22bSJeff Roberson 		/* Record contention to size the buckets. */
2230a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
2231fc03d22bSJeff Roberson 		lockfail = 1;
2232fc03d22bSJeff Roberson 	}
22335d1ae027SRobert Watson 	critical_enter();
22345d1ae027SRobert Watson 	cpu = curcpu;
22355d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
22365d1ae027SRobert Watson 
2237fc03d22bSJeff Roberson 	/*
2238fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2239fc03d22bSJeff Roberson 	 */
22400095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
22410095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2242a553d4b8SJeff Roberson 	cache->uc_allocs = 0;
2243773df9abSRobert Watson 	cache->uc_frees = 0;
22448355f576SJeff Roberson 
2245fc03d22bSJeff Roberson 	/* See if we lost the race to fill the cache. */
2246fc03d22bSJeff Roberson 	if (cache->uc_allocbucket != NULL) {
2247fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2248fc03d22bSJeff Roberson 		goto zalloc_start;
2249a553d4b8SJeff Roberson 	}
22508355f576SJeff Roberson 
2251fc03d22bSJeff Roberson 	/*
2252fc03d22bSJeff Roberson 	 * Check the zone's cache of buckets.
2253fc03d22bSJeff Roberson 	 */
2254ab3185d1SJeff Roberson 	if (domain == UMA_ANYDOMAIN)
2255ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[0];
2256ab3185d1SJeff Roberson 	else
2257ab3185d1SJeff Roberson 		zdom = &zone->uz_domain[domain];
2258ab3185d1SJeff Roberson 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
2259cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
2260a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
22618355f576SJeff Roberson 
2262a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
2263a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
2264a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
22658355f576SJeff Roberson 		goto zalloc_start;
2266a553d4b8SJeff Roberson 	}
22675d1ae027SRobert Watson 	/* We are no longer associated with this CPU. */
22685d1ae027SRobert Watson 	critical_exit();
2269bbee39c6SJeff Roberson 
2270fc03d22bSJeff Roberson 	/*
2271fc03d22bSJeff Roberson 	 * We bump the uz count when the cache size is insufficient to
2272fc03d22bSJeff Roberson 	 * handle the working set.
2273fc03d22bSJeff Roberson 	 */
22746fd34d6fSJeff Roberson 	if (lockfail && zone->uz_count < BUCKET_MAX)
2275a553d4b8SJeff Roberson 		zone->uz_count++;
2276fc03d22bSJeff Roberson 	ZONE_UNLOCK(zone);
2277099a0e58SBosko Milekic 
22788355f576SJeff Roberson 	/*
2279a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
2280763df3ecSPedro F. Giffuni 	 * works we'll restart the allocation from the beginning and it
2281fc03d22bSJeff Roberson 	 * will use the just filled bucket.
2282bbee39c6SJeff Roberson 	 */
2283ab3185d1SJeff Roberson 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
22841431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
22851431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
2286fc03d22bSJeff Roberson 	if (bucket != NULL) {
2287fc03d22bSJeff Roberson 		ZONE_LOCK(zone);
2288fc03d22bSJeff Roberson 		critical_enter();
2289fc03d22bSJeff Roberson 		cpu = curcpu;
2290fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2291fc03d22bSJeff Roberson 		/*
2292fc03d22bSJeff Roberson 		 * See if we lost the race or were migrated.  Cache the
2293fc03d22bSJeff Roberson 		 * initialized bucket to make this less likely or claim
2294fc03d22bSJeff Roberson 		 * the memory directly.
2295fc03d22bSJeff Roberson 		 */
2296ab3185d1SJeff Roberson 		if (cache->uc_allocbucket != NULL ||
2297ab3185d1SJeff Roberson 		    (zone->uz_flags & UMA_ZONE_NUMA &&
2298ab3185d1SJeff Roberson 		    domain != PCPU_GET(domain)))
2299ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
2300fc03d22bSJeff Roberson 		else
2301ab3185d1SJeff Roberson 			cache->uc_allocbucket = bucket;
2302bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
2303fc03d22bSJeff Roberson 		goto zalloc_start;
2304bbee39c6SJeff Roberson 	}
2305fc03d22bSJeff Roberson 
2306bbee39c6SJeff Roberson 	/*
2307bbee39c6SJeff Roberson 	 * We may not be able to get a bucket so return an actual item.
2308bbee39c6SJeff Roberson 	 */
2309fc03d22bSJeff Roberson zalloc_item:
2310ab3185d1SJeff Roberson 	item = zone_alloc_item(zone, udata, domain, flags);
2311fc03d22bSJeff Roberson 
2312e20a199fSJeff Roberson 	return (item);
2313bbee39c6SJeff Roberson }
2314bbee39c6SJeff Roberson 
2315ab3185d1SJeff Roberson void *
2316ab3185d1SJeff Roberson uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2317bbee39c6SJeff Roberson {
2318ab3185d1SJeff Roberson 
2319ab3185d1SJeff Roberson 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2320ab3185d1SJeff Roberson 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2321ab3185d1SJeff Roberson 
2322ab3185d1SJeff Roberson 	/* This is the fast path allocation */
2323ab3185d1SJeff Roberson 	CTR5(KTR_UMA,
2324ab3185d1SJeff Roberson 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2325ab3185d1SJeff Roberson 	    curthread, zone->uz_name, zone, domain, flags);
2326ab3185d1SJeff Roberson 
2327ab3185d1SJeff Roberson 	if (flags & M_WAITOK) {
2328ab3185d1SJeff Roberson 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2329ab3185d1SJeff Roberson 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2330ab3185d1SJeff Roberson 	}
2331ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2332ab3185d1SJeff Roberson 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2333ab3185d1SJeff Roberson 
2334ab3185d1SJeff Roberson 	return (zone_alloc_item(zone, udata, domain, flags));
2335ab3185d1SJeff Roberson }
2336ab3185d1SJeff Roberson 
2337ab3185d1SJeff Roberson /*
2338ab3185d1SJeff Roberson  * Find a slab with some space.  Prefer slabs that are partially used over those
2339ab3185d1SJeff Roberson  * that are totally full.  This helps to reduce fragmentation.
2340ab3185d1SJeff Roberson  *
2341ab3185d1SJeff Roberson  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2342ab3185d1SJeff Roberson  * only 'domain'.
2343ab3185d1SJeff Roberson  */
2344ab3185d1SJeff Roberson static uma_slab_t
2345ab3185d1SJeff Roberson keg_first_slab(uma_keg_t keg, int domain, int rr)
2346ab3185d1SJeff Roberson {
2347ab3185d1SJeff Roberson 	uma_domain_t dom;
2348bbee39c6SJeff Roberson 	uma_slab_t slab;
2349ab3185d1SJeff Roberson 	int start;
2350ab3185d1SJeff Roberson 
2351ab3185d1SJeff Roberson 	KASSERT(domain >= 0 && domain < vm_ndomains,
2352ab3185d1SJeff Roberson 	    ("keg_first_slab: domain %d out of range", domain));
2353ab3185d1SJeff Roberson 
2354ab3185d1SJeff Roberson 	slab = NULL;
2355ab3185d1SJeff Roberson 	start = domain;
2356ab3185d1SJeff Roberson 	do {
2357ab3185d1SJeff Roberson 		dom = &keg->uk_domain[domain];
2358ab3185d1SJeff Roberson 		if (!LIST_EMPTY(&dom->ud_part_slab))
2359ab3185d1SJeff Roberson 			return (LIST_FIRST(&dom->ud_part_slab));
2360ab3185d1SJeff Roberson 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2361ab3185d1SJeff Roberson 			slab = LIST_FIRST(&dom->ud_free_slab);
2362ab3185d1SJeff Roberson 			LIST_REMOVE(slab, us_link);
2363ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2364ab3185d1SJeff Roberson 			return (slab);
2365ab3185d1SJeff Roberson 		}
2366ab3185d1SJeff Roberson 		if (rr)
2367ab3185d1SJeff Roberson 			domain = (domain + 1) % vm_ndomains;
2368ab3185d1SJeff Roberson 	} while (domain != start);
2369ab3185d1SJeff Roberson 
2370ab3185d1SJeff Roberson 	return (NULL);
2371ab3185d1SJeff Roberson }
2372ab3185d1SJeff Roberson 
2373ab3185d1SJeff Roberson static uma_slab_t
2374ab3185d1SJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
2375ab3185d1SJeff Roberson {
2376ab3185d1SJeff Roberson 	uma_domain_t dom;
2377ab3185d1SJeff Roberson 	uma_slab_t slab;
2378ab3185d1SJeff Roberson 	int allocflags, domain, reserve, rr, start;
2379099a0e58SBosko Milekic 
2380e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2381bbee39c6SJeff Roberson 	slab = NULL;
23826fd34d6fSJeff Roberson 	reserve = 0;
2383ab3185d1SJeff Roberson 	allocflags = flags;
23846fd34d6fSJeff Roberson 	if ((flags & M_USE_RESERVE) == 0)
23856fd34d6fSJeff Roberson 		reserve = keg->uk_reserve;
2386bbee39c6SJeff Roberson 
2387bbee39c6SJeff Roberson 	/*
2388ab3185d1SJeff Roberson 	 * Round-robin for non first-touch zones when there is more than one
2389ab3185d1SJeff Roberson 	 * domain.
2390bbee39c6SJeff Roberson 	 */
2391ab3185d1SJeff Roberson 	if (vm_ndomains == 1)
2392ab3185d1SJeff Roberson 		rdomain = 0;
2393ab3185d1SJeff Roberson 	rr = rdomain == UMA_ANYDOMAIN;
2394ab3185d1SJeff Roberson 	if (rr) {
2395ab3185d1SJeff Roberson 		keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2396ab3185d1SJeff Roberson 		domain = start = keg->uk_cursor;
2397ab3185d1SJeff Roberson 		/* Only block on the second pass. */
2398ab3185d1SJeff Roberson 		if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK)
2399ab3185d1SJeff Roberson 			allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT;
2400ab3185d1SJeff Roberson 	} else
2401ab3185d1SJeff Roberson 		domain = start = rdomain;
2402ab3185d1SJeff Roberson 
2403ab3185d1SJeff Roberson again:
2404ab3185d1SJeff Roberson 	do {
2405ab3185d1SJeff Roberson 		if (keg->uk_free > reserve &&
2406ab3185d1SJeff Roberson 		    (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2407e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2408bbee39c6SJeff Roberson 			return (slab);
2409bbee39c6SJeff Roberson 		}
2410bbee39c6SJeff Roberson 
2411bbee39c6SJeff Roberson 		/*
2412bbee39c6SJeff Roberson 		 * M_NOVM means don't ask at all!
2413bbee39c6SJeff Roberson 		 */
2414bbee39c6SJeff Roberson 		if (flags & M_NOVM)
2415bbee39c6SJeff Roberson 			break;
2416bbee39c6SJeff Roberson 
2417e20a199fSJeff Roberson 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2418099a0e58SBosko Milekic 			keg->uk_flags |= UMA_ZFLAG_FULL;
2419e20a199fSJeff Roberson 			/*
2420e20a199fSJeff Roberson 			 * If this is not a multi-zone, set the FULL bit.
2421e20a199fSJeff Roberson 			 * Otherwise slab_multi() takes care of it.
2422e20a199fSJeff Roberson 			 */
24232f891cd5SPawel Jakub Dawidek 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2424e20a199fSJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_FULL;
24252f891cd5SPawel Jakub Dawidek 				zone_log_warning(zone);
242654503a13SJonathan T. Looney 				zone_maxaction(zone);
24272f891cd5SPawel Jakub Dawidek 			}
2428ebc85edfSJeff Roberson 			if (flags & M_NOWAIT)
2429ab3185d1SJeff Roberson 				return (NULL);
2430c288b548SEitan Adler 			zone->uz_sleeps++;
2431e20a199fSJeff Roberson 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2432bbee39c6SJeff Roberson 			continue;
2433bbee39c6SJeff Roberson 		}
2434ab3185d1SJeff Roberson 		slab = keg_alloc_slab(keg, zone, domain, allocflags);
2435bbee39c6SJeff Roberson 		/*
2436bbee39c6SJeff Roberson 		 * If we got a slab here it's safe to mark it partially used
2437bbee39c6SJeff Roberson 		 * and return.  We assume that the caller is going to remove
2438bbee39c6SJeff Roberson 		 * at least one item.
2439bbee39c6SJeff Roberson 		 */
2440bbee39c6SJeff Roberson 		if (slab) {
2441e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2442ab3185d1SJeff Roberson 			dom = &keg->uk_domain[slab->us_domain];
2443ab3185d1SJeff Roberson 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2444bbee39c6SJeff Roberson 			return (slab);
2445bbee39c6SJeff Roberson 		}
2446ab3185d1SJeff Roberson 		if (rr) {
2447ab3185d1SJeff Roberson 			keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2448ab3185d1SJeff Roberson 			domain = keg->uk_cursor;
2449ab3185d1SJeff Roberson 		}
2450ab3185d1SJeff Roberson 	} while (domain != start);
2451ab3185d1SJeff Roberson 
2452ab3185d1SJeff Roberson 	/* Retry domain scan with blocking. */
2453ab3185d1SJeff Roberson 	if (allocflags != flags) {
2454ab3185d1SJeff Roberson 		allocflags = flags;
2455ab3185d1SJeff Roberson 		goto again;
2456ab3185d1SJeff Roberson 	}
2457ab3185d1SJeff Roberson 
2458bbee39c6SJeff Roberson 	/*
2459bbee39c6SJeff Roberson 	 * We might not have been able to get a slab but another cpu
2460bbee39c6SJeff Roberson 	 * could have while we were unlocked.  Check again before we
2461bbee39c6SJeff Roberson 	 * fail.
2462bbee39c6SJeff Roberson 	 */
2463ab3185d1SJeff Roberson 	if (keg->uk_free > reserve &&
2464ab3185d1SJeff Roberson 	    (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2465ab3185d1SJeff Roberson 		MPASS(slab->us_keg == keg);
2466bbee39c6SJeff Roberson 		return (slab);
2467bbee39c6SJeff Roberson 	}
2468ab3185d1SJeff Roberson 	return (NULL);
2469ab3185d1SJeff Roberson }
2470bbee39c6SJeff Roberson 
2471e20a199fSJeff Roberson static uma_slab_t
2472ab3185d1SJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2473e20a199fSJeff Roberson {
2474e20a199fSJeff Roberson 	uma_slab_t slab;
2475e20a199fSJeff Roberson 
2476af526374SJeff Roberson 	if (keg == NULL) {
2477e20a199fSJeff Roberson 		keg = zone_first_keg(zone);
2478af526374SJeff Roberson 		KEG_LOCK(keg);
2479af526374SJeff Roberson 	}
2480e20a199fSJeff Roberson 
2481e20a199fSJeff Roberson 	for (;;) {
2482ab3185d1SJeff Roberson 		slab = keg_fetch_slab(keg, zone, domain, flags);
2483e20a199fSJeff Roberson 		if (slab)
2484e20a199fSJeff Roberson 			return (slab);
2485e20a199fSJeff Roberson 		if (flags & (M_NOWAIT | M_NOVM))
2486e20a199fSJeff Roberson 			break;
2487e20a199fSJeff Roberson 	}
2488af526374SJeff Roberson 	KEG_UNLOCK(keg);
2489e20a199fSJeff Roberson 	return (NULL);
2490e20a199fSJeff Roberson }
2491e20a199fSJeff Roberson 
2492e20a199fSJeff Roberson /*
2493e20a199fSJeff Roberson  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2494af526374SJeff Roberson  * with the keg locked.  On NULL no lock is held.
2495e20a199fSJeff Roberson  *
2496e20a199fSJeff Roberson  * The last pointer is used to seed the search.  It is not required.
2497e20a199fSJeff Roberson  */
2498e20a199fSJeff Roberson static uma_slab_t
2499ab3185d1SJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags)
2500e20a199fSJeff Roberson {
2501e20a199fSJeff Roberson 	uma_klink_t klink;
2502e20a199fSJeff Roberson 	uma_slab_t slab;
2503e20a199fSJeff Roberson 	uma_keg_t keg;
2504e20a199fSJeff Roberson 	int flags;
2505e20a199fSJeff Roberson 	int empty;
2506e20a199fSJeff Roberson 	int full;
2507e20a199fSJeff Roberson 
2508e20a199fSJeff Roberson 	/*
2509e20a199fSJeff Roberson 	 * Don't wait on the first pass.  This will skip limit tests
2510e20a199fSJeff Roberson 	 * as well.  We don't want to block if we can find a provider
2511e20a199fSJeff Roberson 	 * without blocking.
2512e20a199fSJeff Roberson 	 */
2513e20a199fSJeff Roberson 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2514e20a199fSJeff Roberson 	/*
2515e20a199fSJeff Roberson 	 * Use the last slab allocated as a hint for where to start
2516e20a199fSJeff Roberson 	 * the search.
2517e20a199fSJeff Roberson 	 */
2518af526374SJeff Roberson 	if (last != NULL) {
2519ab3185d1SJeff Roberson 		slab = keg_fetch_slab(last, zone, domain, flags);
2520e20a199fSJeff Roberson 		if (slab)
2521e20a199fSJeff Roberson 			return (slab);
2522af526374SJeff Roberson 		KEG_UNLOCK(last);
2523e20a199fSJeff Roberson 	}
2524e20a199fSJeff Roberson 	/*
2525e20a199fSJeff Roberson 	 * Loop until we have a slab incase of transient failures
2526e20a199fSJeff Roberson 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2527e20a199fSJeff Roberson 	 * required but we've done it for so long now.
2528e20a199fSJeff Roberson 	 */
2529e20a199fSJeff Roberson 	for (;;) {
2530e20a199fSJeff Roberson 		empty = 0;
2531e20a199fSJeff Roberson 		full = 0;
2532e20a199fSJeff Roberson 		/*
2533e20a199fSJeff Roberson 		 * Search the available kegs for slabs.  Be careful to hold the
2534e20a199fSJeff Roberson 		 * correct lock while calling into the keg layer.
2535e20a199fSJeff Roberson 		 */
2536e20a199fSJeff Roberson 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2537e20a199fSJeff Roberson 			keg = klink->kl_keg;
2538af526374SJeff Roberson 			KEG_LOCK(keg);
2539e20a199fSJeff Roberson 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2540ab3185d1SJeff Roberson 				slab = keg_fetch_slab(keg, zone, domain, flags);
2541e20a199fSJeff Roberson 				if (slab)
2542e20a199fSJeff Roberson 					return (slab);
2543e20a199fSJeff Roberson 			}
2544e20a199fSJeff Roberson 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2545e20a199fSJeff Roberson 				full++;
2546e20a199fSJeff Roberson 			else
2547e20a199fSJeff Roberson 				empty++;
2548af526374SJeff Roberson 			KEG_UNLOCK(keg);
2549e20a199fSJeff Roberson 		}
2550e20a199fSJeff Roberson 		if (rflags & (M_NOWAIT | M_NOVM))
2551e20a199fSJeff Roberson 			break;
2552e20a199fSJeff Roberson 		flags = rflags;
2553e20a199fSJeff Roberson 		/*
2554e20a199fSJeff Roberson 		 * All kegs are full.  XXX We can't atomically check all kegs
2555e20a199fSJeff Roberson 		 * and sleep so just sleep for a short period and retry.
2556e20a199fSJeff Roberson 		 */
2557e20a199fSJeff Roberson 		if (full && !empty) {
2558af526374SJeff Roberson 			ZONE_LOCK(zone);
2559e20a199fSJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
2560bf965959SSean Bruno 			zone->uz_sleeps++;
25612f891cd5SPawel Jakub Dawidek 			zone_log_warning(zone);
256254503a13SJonathan T. Looney 			zone_maxaction(zone);
2563af526374SJeff Roberson 			msleep(zone, zone->uz_lockptr, PVM,
2564af526374SJeff Roberson 			    "zonelimit", hz/100);
2565e20a199fSJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2566af526374SJeff Roberson 			ZONE_UNLOCK(zone);
2567e20a199fSJeff Roberson 			continue;
2568e20a199fSJeff Roberson 		}
2569e20a199fSJeff Roberson 	}
2570e20a199fSJeff Roberson 	return (NULL);
2571e20a199fSJeff Roberson }
2572e20a199fSJeff Roberson 
2573d56368d7SBosko Milekic static void *
25740095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2575bbee39c6SJeff Roberson {
2576ab3185d1SJeff Roberson 	uma_domain_t dom;
2577bbee39c6SJeff Roberson 	void *item;
257885dcf349SGleb Smirnoff 	uint8_t freei;
2579bbee39c6SJeff Roberson 
25800095a784SJeff Roberson 	MPASS(keg == slab->us_keg);
2581e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2582099a0e58SBosko Milekic 
2583ef72505eSJeff Roberson 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2584ef72505eSJeff Roberson 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2585099a0e58SBosko Milekic 	item = slab->us_data + (keg->uk_rsize * freei);
2586bbee39c6SJeff Roberson 	slab->us_freecount--;
2587099a0e58SBosko Milekic 	keg->uk_free--;
2588ef72505eSJeff Roberson 
2589bbee39c6SJeff Roberson 	/* Move this slab to the full list */
2590bbee39c6SJeff Roberson 	if (slab->us_freecount == 0) {
2591bbee39c6SJeff Roberson 		LIST_REMOVE(slab, us_link);
2592ab3185d1SJeff Roberson 		dom = &keg->uk_domain[slab->us_domain];
2593ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2594bbee39c6SJeff Roberson 	}
2595bbee39c6SJeff Roberson 
2596bbee39c6SJeff Roberson 	return (item);
2597bbee39c6SJeff Roberson }
2598bbee39c6SJeff Roberson 
2599bbee39c6SJeff Roberson static int
2600ab3185d1SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
26010095a784SJeff Roberson {
26020095a784SJeff Roberson 	uma_slab_t slab;
26030095a784SJeff Roberson 	uma_keg_t keg;
2604ab3185d1SJeff Roberson 	int stripe;
26050095a784SJeff Roberson 	int i;
26060095a784SJeff Roberson 
26070095a784SJeff Roberson 	slab = NULL;
26080095a784SJeff Roberson 	keg = NULL;
2609af526374SJeff Roberson 	/* Try to keep the buckets totally full */
26100095a784SJeff Roberson 	for (i = 0; i < max; ) {
2611ab3185d1SJeff Roberson 		if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
26120095a784SJeff Roberson 			break;
26130095a784SJeff Roberson 		keg = slab->us_keg;
2614ab3185d1SJeff Roberson 		stripe = howmany(max, vm_ndomains);
26156fd34d6fSJeff Roberson 		while (slab->us_freecount && i < max) {
26160095a784SJeff Roberson 			bucket[i++] = slab_alloc_item(keg, slab);
26176fd34d6fSJeff Roberson 			if (keg->uk_free <= keg->uk_reserve)
26186fd34d6fSJeff Roberson 				break;
2619*b6715dabSJeff Roberson #ifdef NUMA
2620ab3185d1SJeff Roberson 			/*
2621ab3185d1SJeff Roberson 			 * If the zone is striped we pick a new slab for every
2622ab3185d1SJeff Roberson 			 * N allocations.  Eliminating this conditional will
2623ab3185d1SJeff Roberson 			 * instead pick a new domain for each bucket rather
2624ab3185d1SJeff Roberson 			 * than stripe within each bucket.  The current option
2625ab3185d1SJeff Roberson 			 * produces more fragmentation and requires more cpu
2626ab3185d1SJeff Roberson 			 * time but yields better distribution.
2627ab3185d1SJeff Roberson 			 */
2628ab3185d1SJeff Roberson 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2629ab3185d1SJeff Roberson 			    vm_ndomains > 1 && --stripe == 0)
2630ab3185d1SJeff Roberson 				break;
2631ab3185d1SJeff Roberson #endif
26326fd34d6fSJeff Roberson 		}
2633ab3185d1SJeff Roberson 		/* Don't block if we allocated any successfully. */
26340095a784SJeff Roberson 		flags &= ~M_WAITOK;
26350095a784SJeff Roberson 		flags |= M_NOWAIT;
26360095a784SJeff Roberson 	}
26370095a784SJeff Roberson 	if (slab != NULL)
26380095a784SJeff Roberson 		KEG_UNLOCK(keg);
26390095a784SJeff Roberson 
26400095a784SJeff Roberson 	return i;
26410095a784SJeff Roberson }
26420095a784SJeff Roberson 
2643fc03d22bSJeff Roberson static uma_bucket_t
2644ab3185d1SJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
2645bbee39c6SJeff Roberson {
2646bbee39c6SJeff Roberson 	uma_bucket_t bucket;
26470095a784SJeff Roberson 	int max;
2648bbee39c6SJeff Roberson 
26496fd34d6fSJeff Roberson 	/* Don't wait for buckets, preserve caller's NOVM setting. */
26506fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
26510095a784SJeff Roberson 	if (bucket == NULL)
2652f7104ccdSAlexander Motin 		return (NULL);
26530095a784SJeff Roberson 
2654af526374SJeff Roberson 	max = MIN(bucket->ub_entries, zone->uz_count);
26550095a784SJeff Roberson 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2656ab3185d1SJeff Roberson 	    max, domain, flags);
26570095a784SJeff Roberson 
26580095a784SJeff Roberson 	/*
26590095a784SJeff Roberson 	 * Initialize the memory if necessary.
26600095a784SJeff Roberson 	 */
26610095a784SJeff Roberson 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2662099a0e58SBosko Milekic 		int i;
2663bbee39c6SJeff Roberson 
26640095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
2665e20a199fSJeff Roberson 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
26660095a784SJeff Roberson 			    flags) != 0)
2667b23f72e9SBrian Feldman 				break;
2668b23f72e9SBrian Feldman 		/*
2669b23f72e9SBrian Feldman 		 * If we couldn't initialize the whole bucket, put the
2670b23f72e9SBrian Feldman 		 * rest back onto the freelist.
2671b23f72e9SBrian Feldman 		 */
2672b23f72e9SBrian Feldman 		if (i != bucket->ub_cnt) {
2673af526374SJeff Roberson 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
26740095a784SJeff Roberson 			    bucket->ub_cnt - i);
2675a5a262c6SBosko Milekic #ifdef INVARIANTS
26760095a784SJeff Roberson 			bzero(&bucket->ub_bucket[i],
26770095a784SJeff Roberson 			    sizeof(void *) * (bucket->ub_cnt - i));
2678a5a262c6SBosko Milekic #endif
2679b23f72e9SBrian Feldman 			bucket->ub_cnt = i;
2680b23f72e9SBrian Feldman 		}
2681099a0e58SBosko Milekic 	}
2682099a0e58SBosko Milekic 
2683f7104ccdSAlexander Motin 	if (bucket->ub_cnt == 0) {
26846fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2685fc03d22bSJeff Roberson 		atomic_add_long(&zone->uz_fails, 1);
2686fc03d22bSJeff Roberson 		return (NULL);
2687bbee39c6SJeff Roberson 	}
2688fc03d22bSJeff Roberson 
2689fc03d22bSJeff Roberson 	return (bucket);
2690fc03d22bSJeff Roberson }
2691fc03d22bSJeff Roberson 
26928355f576SJeff Roberson /*
26930095a784SJeff Roberson  * Allocates a single item from a zone.
26948355f576SJeff Roberson  *
26958355f576SJeff Roberson  * Arguments
26968355f576SJeff Roberson  *	zone   The zone to alloc for.
26978355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
2698ab3185d1SJeff Roberson  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2699a163d034SWarner Losh  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
27008355f576SJeff Roberson  *
27018355f576SJeff Roberson  * Returns
27028355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
2703bbee39c6SJeff Roberson  *	An item if successful
27048355f576SJeff Roberson  */
27058355f576SJeff Roberson 
27068355f576SJeff Roberson static void *
2707ab3185d1SJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
27088355f576SJeff Roberson {
27098355f576SJeff Roberson 	void *item;
27108355f576SJeff Roberson 
27118355f576SJeff Roberson 	item = NULL;
27128355f576SJeff Roberson 
2713ab3185d1SJeff Roberson 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
27140095a784SJeff Roberson 		goto fail;
27150095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, 1);
27168355f576SJeff Roberson 
2717099a0e58SBosko Milekic 	/*
2718099a0e58SBosko Milekic 	 * We have to call both the zone's init (not the keg's init)
2719099a0e58SBosko Milekic 	 * and the zone's ctor.  This is because the item is going from
2720099a0e58SBosko Milekic 	 * a keg slab directly to the user, and the user is expecting it
2721099a0e58SBosko Milekic 	 * to be both zone-init'd as well as zone-ctor'd.
2722099a0e58SBosko Milekic 	 */
2723b23f72e9SBrian Feldman 	if (zone->uz_init != NULL) {
2724e20a199fSJeff Roberson 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
27250095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_FINI);
27260095a784SJeff Roberson 			goto fail;
2727b23f72e9SBrian Feldman 		}
2728b23f72e9SBrian Feldman 	}
2729b23f72e9SBrian Feldman 	if (zone->uz_ctor != NULL) {
2730e20a199fSJeff Roberson 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
27310095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
27320095a784SJeff Roberson 			goto fail;
2733b23f72e9SBrian Feldman 		}
2734b23f72e9SBrian Feldman 	}
2735ef72505eSJeff Roberson #ifdef INVARIANTS
27360095a784SJeff Roberson 	uma_dbg_alloc(zone, NULL, item);
2737ef72505eSJeff Roberson #endif
27382cc35ff9SJeff Roberson 	if (flags & M_ZERO)
273948343a2fSGleb Smirnoff 		uma_zero_item(item, zone);
27408355f576SJeff Roberson 
27411431a748SGleb Smirnoff 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
27421431a748SGleb Smirnoff 	    zone->uz_name, zone);
27431431a748SGleb Smirnoff 
27448355f576SJeff Roberson 	return (item);
27450095a784SJeff Roberson 
27460095a784SJeff Roberson fail:
27471431a748SGleb Smirnoff 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
27481431a748SGleb Smirnoff 	    zone->uz_name, zone);
27490095a784SJeff Roberson 	atomic_add_long(&zone->uz_fails, 1);
27500095a784SJeff Roberson 	return (NULL);
27518355f576SJeff Roberson }
27528355f576SJeff Roberson 
27538355f576SJeff Roberson /* See uma.h */
27548355f576SJeff Roberson void
27558355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
27568355f576SJeff Roberson {
27578355f576SJeff Roberson 	uma_cache_t cache;
27588355f576SJeff Roberson 	uma_bucket_t bucket;
2759ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
2760ab3185d1SJeff Roberson 	int cpu, domain, lockfail;
27618355f576SJeff Roberson 
2762e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2763e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
276410cb2424SMark Murray 
27653659f747SRobert Watson 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
27663659f747SRobert Watson 	    zone->uz_name);
27673659f747SRobert Watson 
2768d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
27691067a2baSJonathan T. Looney 	    ("uma_zfree_arg: called with spinlock or critical section held"));
27701067a2baSJonathan T. Looney 
277120ed0cb0SMatthew D Fleming         /* uma_zfree(..., NULL) does nothing, to match free(9). */
277220ed0cb0SMatthew D Fleming         if (item == NULL)
277320ed0cb0SMatthew D Fleming                 return;
27748d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
27758d689e04SGleb Smirnoff 	if (is_memguard_addr(item)) {
2776bc9d08e1SMark Johnston 		if (zone->uz_dtor != NULL)
27778d689e04SGleb Smirnoff 			zone->uz_dtor(item, zone->uz_size, udata);
2778bc9d08e1SMark Johnston 		if (zone->uz_fini != NULL)
27798d689e04SGleb Smirnoff 			zone->uz_fini(item, zone->uz_size);
27808d689e04SGleb Smirnoff 		memguard_free(item);
27818d689e04SGleb Smirnoff 		return;
27828d689e04SGleb Smirnoff 	}
27838d689e04SGleb Smirnoff #endif
27845d1ae027SRobert Watson #ifdef INVARIANTS
2785e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZONE_MALLOC)
27865d1ae027SRobert Watson 		uma_dbg_free(zone, udata, item);
27875d1ae027SRobert Watson 	else
27885d1ae027SRobert Watson 		uma_dbg_free(zone, NULL, item);
27895d1ae027SRobert Watson #endif
2790fc03d22bSJeff Roberson 	if (zone->uz_dtor != NULL)
2791ef72505eSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
2792ef72505eSJeff Roberson 
2793af7f9b97SJeff Roberson 	/*
2794af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
2795af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
2796af7f9b97SJeff Roberson 	 */
2797e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2798fc03d22bSJeff Roberson 		goto zfree_item;
2799af7f9b97SJeff Roberson 
28005d1ae027SRobert Watson 	/*
28015d1ae027SRobert Watson 	 * If possible, free to the per-CPU cache.  There are two
28025d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
28035d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
28045d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
28055d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
28065d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
28075d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to free to the
28085d1ae027SRobert Watson 	 * current cache; when we re-acquire the critical section, we must
28095d1ae027SRobert Watson 	 * detect and handle migration if it has occurred.
28105d1ae027SRobert Watson 	 */
2811a553d4b8SJeff Roberson zfree_restart:
28125d1ae027SRobert Watson 	critical_enter();
28135d1ae027SRobert Watson 	cpu = curcpu;
28148355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
28158355f576SJeff Roberson 
28168355f576SJeff Roberson zfree_start:
2817a553d4b8SJeff Roberson 	/*
2818fc03d22bSJeff Roberson 	 * Try to free into the allocbucket first to give LIFO ordering
2819fc03d22bSJeff Roberson 	 * for cache-hot datastructures.  Spill over into the freebucket
2820fc03d22bSJeff Roberson 	 * if necessary.  Alloc will swap them if one runs dry.
2821a553d4b8SJeff Roberson 	 */
2822fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2823fc03d22bSJeff Roberson 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2824fc03d22bSJeff Roberson 		bucket = cache->uc_freebucket;
2825fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2826cae33c14SJeff Roberson 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
28278355f576SJeff Roberson 		    ("uma_zfree: Freeing to non free bucket index."));
2828cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = item;
2829cae33c14SJeff Roberson 		bucket->ub_cnt++;
2830773df9abSRobert Watson 		cache->uc_frees++;
28315d1ae027SRobert Watson 		critical_exit();
28328355f576SJeff Roberson 		return;
2833fc03d22bSJeff Roberson 	}
2834fc03d22bSJeff Roberson 
28358355f576SJeff Roberson 	/*
28365d1ae027SRobert Watson 	 * We must go back the zone, which requires acquiring the zone lock,
28375d1ae027SRobert Watson 	 * which in turn means we must release and re-acquire the critical
28385d1ae027SRobert Watson 	 * section.  Since the critical section is released, we may be
28395d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
28405d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
28415d1ae027SRobert Watson 	 * the critical section.
28428355f576SJeff Roberson 	 */
28435d1ae027SRobert Watson 	critical_exit();
2844fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2845fc03d22bSJeff Roberson 		goto zfree_item;
2846fc03d22bSJeff Roberson 
28474d104ba0SAlexander Motin 	lockfail = 0;
28484d104ba0SAlexander Motin 	if (ZONE_TRYLOCK(zone) == 0) {
28494d104ba0SAlexander Motin 		/* Record contention to size the buckets. */
28508355f576SJeff Roberson 		ZONE_LOCK(zone);
28514d104ba0SAlexander Motin 		lockfail = 1;
28524d104ba0SAlexander Motin 	}
28535d1ae027SRobert Watson 	critical_enter();
28545d1ae027SRobert Watson 	cpu = curcpu;
28555d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
28568355f576SJeff Roberson 
2857fc03d22bSJeff Roberson 	/*
2858fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2859fc03d22bSJeff Roberson 	 */
28600095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
28610095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2862f4ff923bSRobert Watson 	cache->uc_allocs = 0;
2863f4ff923bSRobert Watson 	cache->uc_frees = 0;
2864f4ff923bSRobert Watson 
28658355f576SJeff Roberson 	bucket = cache->uc_freebucket;
2866fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2867fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2868fc03d22bSJeff Roberson 		goto zfree_start;
2869fc03d22bSJeff Roberson 	}
28708355f576SJeff Roberson 	cache->uc_freebucket = NULL;
2871afa5d703SMark Johnston 	/* We are no longer associated with this CPU. */
2872afa5d703SMark Johnston 	critical_exit();
28738355f576SJeff Roberson 
2874ab3185d1SJeff Roberson 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
2875ab3185d1SJeff Roberson 		domain = PCPU_GET(domain);
2876ab3185d1SJeff Roberson 	else
2877ab3185d1SJeff Roberson 		domain = 0;
2878ab3185d1SJeff Roberson 	zdom = &zone->uz_domain[0];
2879ab3185d1SJeff Roberson 
28808355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
28818355f576SJeff Roberson 	if (bucket != NULL) {
28821431a748SGleb Smirnoff 		CTR3(KTR_UMA,
28831431a748SGleb Smirnoff 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
28841431a748SGleb Smirnoff 		    zone->uz_name, zone, bucket);
2885cae33c14SJeff Roberson 		/* ub_cnt is pointing to the last free item */
2886cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
28878355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2888ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
28898355f576SJeff Roberson 	}
2890fc03d22bSJeff Roberson 
28914d104ba0SAlexander Motin 	/*
28924d104ba0SAlexander Motin 	 * We bump the uz count when the cache size is insufficient to
28934d104ba0SAlexander Motin 	 * handle the working set.
28944d104ba0SAlexander Motin 	 */
28954d104ba0SAlexander Motin 	if (lockfail && zone->uz_count < BUCKET_MAX)
28964d104ba0SAlexander Motin 		zone->uz_count++;
2897a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
2898a553d4b8SJeff Roberson 
28996fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
29001431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
29011431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
29024741dcbfSJeff Roberson 	if (bucket) {
2903fc03d22bSJeff Roberson 		critical_enter();
2904fc03d22bSJeff Roberson 		cpu = curcpu;
2905fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2906ab3185d1SJeff Roberson 		if (cache->uc_freebucket == NULL &&
2907ab3185d1SJeff Roberson 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2908ab3185d1SJeff Roberson 		    domain == PCPU_GET(domain))) {
2909fc03d22bSJeff Roberson 			cache->uc_freebucket = bucket;
2910fc03d22bSJeff Roberson 			goto zfree_start;
2911fc03d22bSJeff Roberson 		}
2912fc03d22bSJeff Roberson 		/*
2913fc03d22bSJeff Roberson 		 * We lost the race, start over.  We have to drop our
2914fc03d22bSJeff Roberson 		 * critical section to free the bucket.
2915fc03d22bSJeff Roberson 		 */
2916fc03d22bSJeff Roberson 		critical_exit();
29176fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2918a553d4b8SJeff Roberson 		goto zfree_restart;
29198355f576SJeff Roberson 	}
29208355f576SJeff Roberson 
2921a553d4b8SJeff Roberson 	/*
2922a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
2923a553d4b8SJeff Roberson 	 */
2924fc03d22bSJeff Roberson zfree_item:
29250095a784SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_DTOR);
29268355f576SJeff Roberson 
29278355f576SJeff Roberson 	return;
29288355f576SJeff Roberson }
29298355f576SJeff Roberson 
2930ab3185d1SJeff Roberson void
2931ab3185d1SJeff Roberson uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
2932ab3185d1SJeff Roberson {
2933ab3185d1SJeff Roberson 
2934ab3185d1SJeff Roberson 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2935ab3185d1SJeff Roberson 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2936ab3185d1SJeff Roberson 
2937ab3185d1SJeff Roberson 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
2938ab3185d1SJeff Roberson 	    zone->uz_name);
2939ab3185d1SJeff Roberson 
2940ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2941ab3185d1SJeff Roberson 	    ("uma_zfree_domain: called with spinlock or critical section held"));
2942ab3185d1SJeff Roberson 
2943ab3185d1SJeff Roberson         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2944ab3185d1SJeff Roberson         if (item == NULL)
2945ab3185d1SJeff Roberson                 return;
2946ab3185d1SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_NONE);
2947ab3185d1SJeff Roberson }
2948ab3185d1SJeff Roberson 
29498355f576SJeff Roberson static void
29500095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
29518355f576SJeff Roberson {
2952ab3185d1SJeff Roberson 	uma_domain_t dom;
295385dcf349SGleb Smirnoff 	uint8_t freei;
2954099a0e58SBosko Milekic 
29550095a784SJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2956e20a199fSJeff Roberson 	MPASS(keg == slab->us_keg);
29578355f576SJeff Roberson 
2958ab3185d1SJeff Roberson 	dom = &keg->uk_domain[slab->us_domain];
2959ab3185d1SJeff Roberson 
29608355f576SJeff Roberson 	/* Do we need to remove from any lists? */
2961099a0e58SBosko Milekic 	if (slab->us_freecount+1 == keg->uk_ipers) {
29628355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2963ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
29648355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
29658355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2966ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
29678355f576SJeff Roberson 	}
29688355f576SJeff Roberson 
2969ef72505eSJeff Roberson 	/* Slab management. */
2970ef72505eSJeff Roberson 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2971ef72505eSJeff Roberson 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
29728355f576SJeff Roberson 	slab->us_freecount++;
29738355f576SJeff Roberson 
2974ef72505eSJeff Roberson 	/* Keg statistics. */
2975099a0e58SBosko Milekic 	keg->uk_free++;
29760095a784SJeff Roberson }
29770095a784SJeff Roberson 
29780095a784SJeff Roberson static void
29790095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt)
29800095a784SJeff Roberson {
29810095a784SJeff Roberson 	void *item;
29820095a784SJeff Roberson 	uma_slab_t slab;
29830095a784SJeff Roberson 	uma_keg_t keg;
29840095a784SJeff Roberson 	uint8_t *mem;
29850095a784SJeff Roberson 	int clearfull;
29860095a784SJeff Roberson 	int i;
29878355f576SJeff Roberson 
2988e20a199fSJeff Roberson 	clearfull = 0;
29890095a784SJeff Roberson 	keg = zone_first_keg(zone);
2990af526374SJeff Roberson 	KEG_LOCK(keg);
29910095a784SJeff Roberson 	for (i = 0; i < cnt; i++) {
29920095a784SJeff Roberson 		item = bucket[i];
29930095a784SJeff Roberson 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
29940095a784SJeff Roberson 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
29950095a784SJeff Roberson 			if (zone->uz_flags & UMA_ZONE_HASH) {
29960095a784SJeff Roberson 				slab = hash_sfind(&keg->uk_hash, mem);
29970095a784SJeff Roberson 			} else {
29980095a784SJeff Roberson 				mem += keg->uk_pgoff;
29990095a784SJeff Roberson 				slab = (uma_slab_t)mem;
30000095a784SJeff Roberson 			}
30010095a784SJeff Roberson 		} else {
30020095a784SJeff Roberson 			slab = vtoslab((vm_offset_t)item);
30030095a784SJeff Roberson 			if (slab->us_keg != keg) {
30040095a784SJeff Roberson 				KEG_UNLOCK(keg);
30050095a784SJeff Roberson 				keg = slab->us_keg;
30060095a784SJeff Roberson 				KEG_LOCK(keg);
30070095a784SJeff Roberson 			}
30080095a784SJeff Roberson 		}
30090095a784SJeff Roberson 		slab_free_item(keg, slab, item);
3010099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
3011e20a199fSJeff Roberson 			if (keg->uk_pages < keg->uk_maxpages) {
3012099a0e58SBosko Milekic 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
3013e20a199fSJeff Roberson 				clearfull = 1;
3014e20a199fSJeff Roberson 			}
3015af7f9b97SJeff Roberson 
301677380291SMohan Srinivasan 			/*
3017ef72505eSJeff Roberson 			 * We can handle one more allocation. Since we're
3018ef72505eSJeff Roberson 			 * clearing ZFLAG_FULL, wake up all procs blocked
3019ef72505eSJeff Roberson 			 * on pages. This should be uncommon, so keeping this
3020ef72505eSJeff Roberson 			 * simple for now (rather than adding count of blocked
302177380291SMohan Srinivasan 			 * threads etc).
302277380291SMohan Srinivasan 			 */
302377380291SMohan Srinivasan 			wakeup(keg);
3024af7f9b97SJeff Roberson 		}
30250095a784SJeff Roberson 	}
3026af526374SJeff Roberson 	KEG_UNLOCK(keg);
30270095a784SJeff Roberson 	if (clearfull) {
3028af526374SJeff Roberson 		ZONE_LOCK(zone);
3029e20a199fSJeff Roberson 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
3030e20a199fSJeff Roberson 		wakeup(zone);
3031605cbd6aSJeff Roberson 		ZONE_UNLOCK(zone);
3032af526374SJeff Roberson 	}
3033ef72505eSJeff Roberson 
30348355f576SJeff Roberson }
30358355f576SJeff Roberson 
30360095a784SJeff Roberson /*
30370095a784SJeff Roberson  * Frees a single item to any zone.
30380095a784SJeff Roberson  *
30390095a784SJeff Roberson  * Arguments:
30400095a784SJeff Roberson  *	zone   The zone to free to
30410095a784SJeff Roberson  *	item   The item we're freeing
30420095a784SJeff Roberson  *	udata  User supplied data for the dtor
30430095a784SJeff Roberson  *	skip   Skip dtors and finis
30440095a784SJeff Roberson  */
30450095a784SJeff Roberson static void
30460095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
30470095a784SJeff Roberson {
30480095a784SJeff Roberson 
30490095a784SJeff Roberson #ifdef INVARIANTS
30500095a784SJeff Roberson 	if (skip == SKIP_NONE) {
30510095a784SJeff Roberson 		if (zone->uz_flags & UMA_ZONE_MALLOC)
30520095a784SJeff Roberson 			uma_dbg_free(zone, udata, item);
30530095a784SJeff Roberson 		else
30540095a784SJeff Roberson 			uma_dbg_free(zone, NULL, item);
30550095a784SJeff Roberson 	}
30560095a784SJeff Roberson #endif
30570095a784SJeff Roberson 	if (skip < SKIP_DTOR && zone->uz_dtor)
30580095a784SJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
30590095a784SJeff Roberson 
30600095a784SJeff Roberson 	if (skip < SKIP_FINI && zone->uz_fini)
30610095a784SJeff Roberson 		zone->uz_fini(item, zone->uz_size);
30620095a784SJeff Roberson 
30630095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, 1);
30640095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, &item, 1);
30650095a784SJeff Roberson }
30660095a784SJeff Roberson 
30678355f576SJeff Roberson /* See uma.h */
30681c6cae97SLawrence Stewart int
3069736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
3070736ee590SJeff Roberson {
3071099a0e58SBosko Milekic 	uma_keg_t keg;
3072099a0e58SBosko Milekic 
3073e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
30740095a784SJeff Roberson 	if (keg == NULL)
30750095a784SJeff Roberson 		return (0);
3076af526374SJeff Roberson 	KEG_LOCK(keg);
3077e20a199fSJeff Roberson 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
3078099a0e58SBosko Milekic 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
3079e20a199fSJeff Roberson 		keg->uk_maxpages += keg->uk_ppera;
308057223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3081af526374SJeff Roberson 	KEG_UNLOCK(keg);
30821c6cae97SLawrence Stewart 
30831c6cae97SLawrence Stewart 	return (nitems);
3084736ee590SJeff Roberson }
3085736ee590SJeff Roberson 
3086736ee590SJeff Roberson /* See uma.h */
3087e49471b0SAndre Oppermann int
3088e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone)
3089e49471b0SAndre Oppermann {
3090e49471b0SAndre Oppermann 	int nitems;
3091e49471b0SAndre Oppermann 	uma_keg_t keg;
3092e49471b0SAndre Oppermann 
3093e49471b0SAndre Oppermann 	keg = zone_first_keg(zone);
30940095a784SJeff Roberson 	if (keg == NULL)
30950095a784SJeff Roberson 		return (0);
3096af526374SJeff Roberson 	KEG_LOCK(keg);
309757223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3098af526374SJeff Roberson 	KEG_UNLOCK(keg);
3099e49471b0SAndre Oppermann 
3100e49471b0SAndre Oppermann 	return (nitems);
3101e49471b0SAndre Oppermann }
3102e49471b0SAndre Oppermann 
3103e49471b0SAndre Oppermann /* See uma.h */
31042f891cd5SPawel Jakub Dawidek void
31052f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning)
31062f891cd5SPawel Jakub Dawidek {
31072f891cd5SPawel Jakub Dawidek 
31082f891cd5SPawel Jakub Dawidek 	ZONE_LOCK(zone);
31092f891cd5SPawel Jakub Dawidek 	zone->uz_warning = warning;
31102f891cd5SPawel Jakub Dawidek 	ZONE_UNLOCK(zone);
31112f891cd5SPawel Jakub Dawidek }
31122f891cd5SPawel Jakub Dawidek 
31132f891cd5SPawel Jakub Dawidek /* See uma.h */
311454503a13SJonathan T. Looney void
311554503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
311654503a13SJonathan T. Looney {
311754503a13SJonathan T. Looney 
311854503a13SJonathan T. Looney 	ZONE_LOCK(zone);
3119e60b2fcbSGleb Smirnoff 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
312054503a13SJonathan T. Looney 	ZONE_UNLOCK(zone);
312154503a13SJonathan T. Looney }
312254503a13SJonathan T. Looney 
312354503a13SJonathan T. Looney /* See uma.h */
3124c4ae7908SLawrence Stewart int
3125c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone)
3126c4ae7908SLawrence Stewart {
3127c4ae7908SLawrence Stewart 	int64_t nitems;
3128c4ae7908SLawrence Stewart 	u_int i;
3129c4ae7908SLawrence Stewart 
3130c4ae7908SLawrence Stewart 	ZONE_LOCK(zone);
3131c4ae7908SLawrence Stewart 	nitems = zone->uz_allocs - zone->uz_frees;
3132c4ae7908SLawrence Stewart 	CPU_FOREACH(i) {
3133c4ae7908SLawrence Stewart 		/*
3134c4ae7908SLawrence Stewart 		 * See the comment in sysctl_vm_zone_stats() regarding the
3135c4ae7908SLawrence Stewart 		 * safety of accessing the per-cpu caches. With the zone lock
3136c4ae7908SLawrence Stewart 		 * held, it is safe, but can potentially result in stale data.
3137c4ae7908SLawrence Stewart 		 */
3138c4ae7908SLawrence Stewart 		nitems += zone->uz_cpu[i].uc_allocs -
3139c4ae7908SLawrence Stewart 		    zone->uz_cpu[i].uc_frees;
3140c4ae7908SLawrence Stewart 	}
3141c4ae7908SLawrence Stewart 	ZONE_UNLOCK(zone);
3142c4ae7908SLawrence Stewart 
3143c4ae7908SLawrence Stewart 	return (nitems < 0 ? 0 : nitems);
3144c4ae7908SLawrence Stewart }
3145c4ae7908SLawrence Stewart 
3146c4ae7908SLawrence Stewart /* See uma.h */
3147736ee590SJeff Roberson void
3148099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3149099a0e58SBosko Milekic {
3150e20a199fSJeff Roberson 	uma_keg_t keg;
3151e20a199fSJeff Roberson 
3152e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
31530095a784SJeff Roberson 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3154af526374SJeff Roberson 	KEG_LOCK(keg);
3155e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
3156099a0e58SBosko Milekic 	    ("uma_zone_set_init on non-empty keg"));
3157e20a199fSJeff Roberson 	keg->uk_init = uminit;
3158af526374SJeff Roberson 	KEG_UNLOCK(keg);
3159099a0e58SBosko Milekic }
3160099a0e58SBosko Milekic 
3161099a0e58SBosko Milekic /* See uma.h */
3162099a0e58SBosko Milekic void
3163099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3164099a0e58SBosko Milekic {
3165e20a199fSJeff Roberson 	uma_keg_t keg;
3166e20a199fSJeff Roberson 
3167e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
31681d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3169af526374SJeff Roberson 	KEG_LOCK(keg);
3170e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
3171099a0e58SBosko Milekic 	    ("uma_zone_set_fini on non-empty keg"));
3172e20a199fSJeff Roberson 	keg->uk_fini = fini;
3173af526374SJeff Roberson 	KEG_UNLOCK(keg);
3174099a0e58SBosko Milekic }
3175099a0e58SBosko Milekic 
3176099a0e58SBosko Milekic /* See uma.h */
3177099a0e58SBosko Milekic void
3178099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3179099a0e58SBosko Milekic {
3180af526374SJeff Roberson 
3181099a0e58SBosko Milekic 	ZONE_LOCK(zone);
3182e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3183099a0e58SBosko Milekic 	    ("uma_zone_set_zinit on non-empty keg"));
3184099a0e58SBosko Milekic 	zone->uz_init = zinit;
3185099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3186099a0e58SBosko Milekic }
3187099a0e58SBosko Milekic 
3188099a0e58SBosko Milekic /* See uma.h */
3189099a0e58SBosko Milekic void
3190099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3191099a0e58SBosko Milekic {
3192af526374SJeff Roberson 
3193099a0e58SBosko Milekic 	ZONE_LOCK(zone);
3194e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3195099a0e58SBosko Milekic 	    ("uma_zone_set_zfini on non-empty keg"));
3196099a0e58SBosko Milekic 	zone->uz_fini = zfini;
3197099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3198099a0e58SBosko Milekic }
3199099a0e58SBosko Milekic 
3200099a0e58SBosko Milekic /* See uma.h */
3201b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */
3202099a0e58SBosko Milekic void
32038355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
32048355f576SJeff Roberson {
32050095a784SJeff Roberson 	uma_keg_t keg;
3206e20a199fSJeff Roberson 
32070095a784SJeff Roberson 	keg = zone_first_keg(zone);
32081d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3209af526374SJeff Roberson 	KEG_LOCK(keg);
32100095a784SJeff Roberson 	keg->uk_freef = freef;
3211af526374SJeff Roberson 	KEG_UNLOCK(keg);
32128355f576SJeff Roberson }
32138355f576SJeff Roberson 
32148355f576SJeff Roberson /* See uma.h */
3215b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */
32168355f576SJeff Roberson void
32178355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
32188355f576SJeff Roberson {
3219e20a199fSJeff Roberson 	uma_keg_t keg;
3220e20a199fSJeff Roberson 
3221e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
3222af526374SJeff Roberson 	KEG_LOCK(keg);
3223e20a199fSJeff Roberson 	keg->uk_allocf = allocf;
3224af526374SJeff Roberson 	KEG_UNLOCK(keg);
32258355f576SJeff Roberson }
32268355f576SJeff Roberson 
32278355f576SJeff Roberson /* See uma.h */
32286fd34d6fSJeff Roberson void
32296fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items)
32306fd34d6fSJeff Roberson {
32316fd34d6fSJeff Roberson 	uma_keg_t keg;
32326fd34d6fSJeff Roberson 
32336fd34d6fSJeff Roberson 	keg = zone_first_keg(zone);
32346fd34d6fSJeff Roberson 	if (keg == NULL)
32356fd34d6fSJeff Roberson 		return;
32366fd34d6fSJeff Roberson 	KEG_LOCK(keg);
32376fd34d6fSJeff Roberson 	keg->uk_reserve = items;
32386fd34d6fSJeff Roberson 	KEG_UNLOCK(keg);
32396fd34d6fSJeff Roberson 
32406fd34d6fSJeff Roberson 	return;
32416fd34d6fSJeff Roberson }
32426fd34d6fSJeff Roberson 
32436fd34d6fSJeff Roberson /* See uma.h */
32448355f576SJeff Roberson int
3245a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count)
32468355f576SJeff Roberson {
3247099a0e58SBosko Milekic 	uma_keg_t keg;
32488355f576SJeff Roberson 	vm_offset_t kva;
32499ba30bcbSZbigniew Bodek 	u_int pages;
32508355f576SJeff Roberson 
3251e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
32520095a784SJeff Roberson 	if (keg == NULL)
32530095a784SJeff Roberson 		return (0);
3254099a0e58SBosko Milekic 	pages = count / keg->uk_ipers;
32558355f576SJeff Roberson 
3256099a0e58SBosko Milekic 	if (pages * keg->uk_ipers < count)
32578355f576SJeff Roberson 		pages++;
325857223e99SAndriy Gapon 	pages *= keg->uk_ppera;
3259a553d4b8SJeff Roberson 
3260a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3261a4915c21SAttilio Rao 	if (keg->uk_ppera > 1) {
3262a4915c21SAttilio Rao #else
3263a4915c21SAttilio Rao 	if (1) {
3264a4915c21SAttilio Rao #endif
326557223e99SAndriy Gapon 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3266d1f42ac2SAlan Cox 		if (kva == 0)
32678355f576SJeff Roberson 			return (0);
3268a4915c21SAttilio Rao 	} else
3269a4915c21SAttilio Rao 		kva = 0;
3270af526374SJeff Roberson 	KEG_LOCK(keg);
3271099a0e58SBosko Milekic 	keg->uk_kva = kva;
3272a4915c21SAttilio Rao 	keg->uk_offset = 0;
3273099a0e58SBosko Milekic 	keg->uk_maxpages = pages;
3274a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3275a4915c21SAttilio Rao 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3276a4915c21SAttilio Rao #else
3277a4915c21SAttilio Rao 	keg->uk_allocf = noobj_alloc;
3278a4915c21SAttilio Rao #endif
32796fd34d6fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_NOFREE;
3280af526374SJeff Roberson 	KEG_UNLOCK(keg);
3281af526374SJeff Roberson 
32828355f576SJeff Roberson 	return (1);
32838355f576SJeff Roberson }
32848355f576SJeff Roberson 
32858355f576SJeff Roberson /* See uma.h */
32868355f576SJeff Roberson void
32878355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
32888355f576SJeff Roberson {
3289ab3185d1SJeff Roberson 	uma_domain_t dom;
32908355f576SJeff Roberson 	uma_slab_t slab;
3291099a0e58SBosko Milekic 	uma_keg_t keg;
3292ab3185d1SJeff Roberson 	int domain, slabs;
32938355f576SJeff Roberson 
3294e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
32950095a784SJeff Roberson 	if (keg == NULL)
32960095a784SJeff Roberson 		return;
3297af526374SJeff Roberson 	KEG_LOCK(keg);
3298099a0e58SBosko Milekic 	slabs = items / keg->uk_ipers;
3299ab3185d1SJeff Roberson 	domain = 0;
3300099a0e58SBosko Milekic 	if (slabs * keg->uk_ipers < items)
33018355f576SJeff Roberson 		slabs++;
33028355f576SJeff Roberson 	while (slabs > 0) {
3303ab3185d1SJeff Roberson 		slab = keg_alloc_slab(keg, zone, domain, M_WAITOK);
3304e20a199fSJeff Roberson 		if (slab == NULL)
3305e20a199fSJeff Roberson 			break;
3306e20a199fSJeff Roberson 		MPASS(slab->us_keg == keg);
3307ab3185d1SJeff Roberson 		dom = &keg->uk_domain[slab->us_domain];
3308ab3185d1SJeff Roberson 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
33098355f576SJeff Roberson 		slabs--;
3310ab3185d1SJeff Roberson 		domain = (domain + 1) % vm_ndomains;
33118355f576SJeff Roberson 	}
3312af526374SJeff Roberson 	KEG_UNLOCK(keg);
33138355f576SJeff Roberson }
33148355f576SJeff Roberson 
33158355f576SJeff Roberson /* See uma.h */
331644ec2b63SKonstantin Belousov static void
331744ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger)
33188355f576SJeff Roberson {
331944ec2b63SKonstantin Belousov 
33201431a748SGleb Smirnoff 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
332144ec2b63SKonstantin Belousov 	sx_assert(&uma_drain_lock, SA_XLOCKED);
332286bbae32SJeff Roberson 	bucket_enable();
33238355f576SJeff Roberson 	zone_foreach(zone_drain);
332444ec2b63SKonstantin Belousov 	if (vm_page_count_min() || kmem_danger) {
3325a2de44abSAlexander Motin 		cache_drain_safe(NULL);
3326a2de44abSAlexander Motin 		zone_foreach(zone_drain);
3327a2de44abSAlexander Motin 	}
33288355f576SJeff Roberson 	/*
33298355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
33308355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
33318355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
33328355f576SJeff Roberson 	 */
33339643769aSJeff Roberson 	zone_drain(slabzone);
3334cae33c14SJeff Roberson 	bucket_zone_drain();
333544ec2b63SKonstantin Belousov }
333644ec2b63SKonstantin Belousov 
333744ec2b63SKonstantin Belousov void
333844ec2b63SKonstantin Belousov uma_reclaim(void)
333944ec2b63SKonstantin Belousov {
334044ec2b63SKonstantin Belousov 
334144ec2b63SKonstantin Belousov 	sx_xlock(&uma_drain_lock);
334244ec2b63SKonstantin Belousov 	uma_reclaim_locked(false);
334395c4bf75SKonstantin Belousov 	sx_xunlock(&uma_drain_lock);
33448355f576SJeff Roberson }
33458355f576SJeff Roberson 
33462e47807cSJeff Roberson static volatile int uma_reclaim_needed;
334744ec2b63SKonstantin Belousov 
334844ec2b63SKonstantin Belousov void
334944ec2b63SKonstantin Belousov uma_reclaim_wakeup(void)
335044ec2b63SKonstantin Belousov {
335144ec2b63SKonstantin Belousov 
33522e47807cSJeff Roberson 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
33532e47807cSJeff Roberson 		wakeup(uma_reclaim);
335444ec2b63SKonstantin Belousov }
335544ec2b63SKonstantin Belousov 
335644ec2b63SKonstantin Belousov void
335744ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused)
335844ec2b63SKonstantin Belousov {
335944ec2b63SKonstantin Belousov 
336044ec2b63SKonstantin Belousov 	for (;;) {
33612e47807cSJeff Roberson 		sx_xlock(&uma_drain_lock);
3362200f8117SKonstantin Belousov 		while (atomic_load_int(&uma_reclaim_needed) == 0)
33632e47807cSJeff Roberson 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
33642e47807cSJeff Roberson 			    hz);
33659b43bc27SAndriy Gapon 		sx_xunlock(&uma_drain_lock);
33669b43bc27SAndriy Gapon 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
33679b43bc27SAndriy Gapon 		sx_xlock(&uma_drain_lock);
336844ec2b63SKonstantin Belousov 		uma_reclaim_locked(true);
3369200f8117SKonstantin Belousov 		atomic_store_int(&uma_reclaim_needed, 0);
33702e47807cSJeff Roberson 		sx_xunlock(&uma_drain_lock);
33712e47807cSJeff Roberson 		/* Don't fire more than once per-second. */
33722e47807cSJeff Roberson 		pause("umarclslp", hz);
337344ec2b63SKonstantin Belousov 	}
337444ec2b63SKonstantin Belousov }
337544ec2b63SKonstantin Belousov 
3376663b416fSJohn Baldwin /* See uma.h */
3377663b416fSJohn Baldwin int
3378663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone)
3379663b416fSJohn Baldwin {
3380663b416fSJohn Baldwin 	int full;
3381663b416fSJohn Baldwin 
3382663b416fSJohn Baldwin 	ZONE_LOCK(zone);
3383e20a199fSJeff Roberson 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3384663b416fSJohn Baldwin 	ZONE_UNLOCK(zone);
3385663b416fSJohn Baldwin 	return (full);
3386663b416fSJohn Baldwin }
3387663b416fSJohn Baldwin 
33886c125b8dSMohan Srinivasan int
33896c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone)
33906c125b8dSMohan Srinivasan {
3391e20a199fSJeff Roberson 	return (zone->uz_flags & UMA_ZFLAG_FULL);
33926c125b8dSMohan Srinivasan }
33936c125b8dSMohan Srinivasan 
33948355f576SJeff Roberson void *
3395ab3185d1SJeff Roberson uma_large_malloc_domain(vm_size_t size, int domain, int wait)
33968355f576SJeff Roberson {
3397ab3185d1SJeff Roberson 	vm_offset_t addr;
33988355f576SJeff Roberson 	uma_slab_t slab;
33998355f576SJeff Roberson 
3400ab3185d1SJeff Roberson 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
34018355f576SJeff Roberson 	if (slab == NULL)
34028355f576SJeff Roberson 		return (NULL);
3403ab3185d1SJeff Roberson 	if (domain == UMA_ANYDOMAIN)
3404ab3185d1SJeff Roberson 		addr = kmem_malloc(kernel_arena, size, wait);
3405ab3185d1SJeff Roberson 	else
3406ab3185d1SJeff Roberson 		addr = kmem_malloc_domain(domain, size, wait);
3407ab3185d1SJeff Roberson 	if (addr != 0) {
3408ab3185d1SJeff Roberson 		vsetslab(addr, slab);
3409ab3185d1SJeff Roberson 		slab->us_data = (void *)addr;
3410ab3185d1SJeff Roberson 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
34118355f576SJeff Roberson 		slab->us_size = size;
3412ab3185d1SJeff Roberson 		slab->us_domain = vm_phys_domidx(PHYS_TO_VM_PAGE(
3413ab3185d1SJeff Roberson 		    pmap_kextract(addr)));
34142e47807cSJeff Roberson 		uma_total_inc(size);
34158355f576SJeff Roberson 	} else {
34160095a784SJeff Roberson 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
34178355f576SJeff Roberson 	}
34188355f576SJeff Roberson 
3419ab3185d1SJeff Roberson 	return ((void *)addr);
3420ab3185d1SJeff Roberson }
3421ab3185d1SJeff Roberson 
3422ab3185d1SJeff Roberson void *
3423ab3185d1SJeff Roberson uma_large_malloc(vm_size_t size, int wait)
3424ab3185d1SJeff Roberson {
3425ab3185d1SJeff Roberson 
3426ab3185d1SJeff Roberson 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
34278355f576SJeff Roberson }
34288355f576SJeff Roberson 
34298355f576SJeff Roberson void
34308355f576SJeff Roberson uma_large_free(uma_slab_t slab)
34318355f576SJeff Roberson {
3432c325e866SKonstantin Belousov 
3433ab3185d1SJeff Roberson 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3434ab3185d1SJeff Roberson 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3435ab3185d1SJeff Roberson 	kmem_free(kernel_arena, (vm_offset_t)slab->us_data, slab->us_size);
34362e47807cSJeff Roberson 	uma_total_dec(slab->us_size);
34370095a784SJeff Roberson 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
34388355f576SJeff Roberson }
34398355f576SJeff Roberson 
344048343a2fSGleb Smirnoff static void
344148343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone)
344248343a2fSGleb Smirnoff {
344396c85efbSNathan Whitehorn 	int i;
344448343a2fSGleb Smirnoff 
344548343a2fSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_PCPU) {
344696c85efbSNathan Whitehorn 		CPU_FOREACH(i)
344748343a2fSGleb Smirnoff 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
344848343a2fSGleb Smirnoff 	} else
344948343a2fSGleb Smirnoff 		bzero(item, zone->uz_size);
345048343a2fSGleb Smirnoff }
345148343a2fSGleb Smirnoff 
34522e47807cSJeff Roberson unsigned long
34532e47807cSJeff Roberson uma_limit(void)
34542e47807cSJeff Roberson {
34552e47807cSJeff Roberson 
34562e47807cSJeff Roberson 	return (uma_kmem_limit);
34572e47807cSJeff Roberson }
34582e47807cSJeff Roberson 
34592e47807cSJeff Roberson void
34602e47807cSJeff Roberson uma_set_limit(unsigned long limit)
34612e47807cSJeff Roberson {
34622e47807cSJeff Roberson 
34632e47807cSJeff Roberson 	uma_kmem_limit = limit;
34642e47807cSJeff Roberson }
34652e47807cSJeff Roberson 
34662e47807cSJeff Roberson unsigned long
34672e47807cSJeff Roberson uma_size(void)
34682e47807cSJeff Roberson {
34692e47807cSJeff Roberson 
3470ad5b0f5bSJeff Roberson 	return (uma_kmem_total);
3471ad5b0f5bSJeff Roberson }
3472ad5b0f5bSJeff Roberson 
3473ad5b0f5bSJeff Roberson long
3474ad5b0f5bSJeff Roberson uma_avail(void)
3475ad5b0f5bSJeff Roberson {
3476ad5b0f5bSJeff Roberson 
3477ad5b0f5bSJeff Roberson 	return (uma_kmem_limit - uma_kmem_total);
34782e47807cSJeff Roberson }
34792e47807cSJeff Roberson 
34808355f576SJeff Roberson void
34818355f576SJeff Roberson uma_print_stats(void)
34828355f576SJeff Roberson {
34838355f576SJeff Roberson 	zone_foreach(uma_print_zone);
34848355f576SJeff Roberson }
34858355f576SJeff Roberson 
3486504d5de3SJeff Roberson static void
3487504d5de3SJeff Roberson slab_print(uma_slab_t slab)
3488504d5de3SJeff Roberson {
3489ef72505eSJeff Roberson 	printf("slab: keg %p, data %p, freecount %d\n",
3490ef72505eSJeff Roberson 		slab->us_keg, slab->us_data, slab->us_freecount);
3491504d5de3SJeff Roberson }
3492504d5de3SJeff Roberson 
3493504d5de3SJeff Roberson static void
3494504d5de3SJeff Roberson cache_print(uma_cache_t cache)
3495504d5de3SJeff Roberson {
3496504d5de3SJeff Roberson 	printf("alloc: %p(%d), free: %p(%d)\n",
3497504d5de3SJeff Roberson 		cache->uc_allocbucket,
3498504d5de3SJeff Roberson 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3499504d5de3SJeff Roberson 		cache->uc_freebucket,
3500504d5de3SJeff Roberson 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3501504d5de3SJeff Roberson }
3502504d5de3SJeff Roberson 
3503e20a199fSJeff Roberson static void
3504e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg)
35058355f576SJeff Roberson {
3506ab3185d1SJeff Roberson 	uma_domain_t dom;
3507504d5de3SJeff Roberson 	uma_slab_t slab;
3508ab3185d1SJeff Roberson 	int i;
3509504d5de3SJeff Roberson 
35100b80c1e4SEitan Adler 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3511e20a199fSJeff Roberson 	    "out %d free %d limit %d\n",
3512e20a199fSJeff Roberson 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3513099a0e58SBosko Milekic 	    keg->uk_ipers, keg->uk_ppera,
351457223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
351557223e99SAndriy Gapon 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3516ab3185d1SJeff Roberson 	for (i = 0; i < vm_ndomains; i++) {
3517ab3185d1SJeff Roberson 		dom = &keg->uk_domain[i];
3518504d5de3SJeff Roberson 		printf("Part slabs:\n");
3519ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3520504d5de3SJeff Roberson 			slab_print(slab);
3521504d5de3SJeff Roberson 		printf("Free slabs:\n");
3522ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3523504d5de3SJeff Roberson 			slab_print(slab);
3524504d5de3SJeff Roberson 		printf("Full slabs:\n");
3525ab3185d1SJeff Roberson 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3526504d5de3SJeff Roberson 			slab_print(slab);
3527e20a199fSJeff Roberson 	}
3528ab3185d1SJeff Roberson }
3529e20a199fSJeff Roberson 
3530e20a199fSJeff Roberson void
3531e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone)
3532e20a199fSJeff Roberson {
3533e20a199fSJeff Roberson 	uma_cache_t cache;
3534e20a199fSJeff Roberson 	uma_klink_t kl;
3535e20a199fSJeff Roberson 	int i;
3536e20a199fSJeff Roberson 
35370b80c1e4SEitan Adler 	printf("zone: %s(%p) size %d flags %#x\n",
3538e20a199fSJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3539e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3540e20a199fSJeff Roberson 		uma_print_keg(kl->kl_keg);
35413aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3542504d5de3SJeff Roberson 		cache = &zone->uz_cpu[i];
3543504d5de3SJeff Roberson 		printf("CPU %d Cache:\n", i);
3544504d5de3SJeff Roberson 		cache_print(cache);
3545504d5de3SJeff Roberson 	}
35468355f576SJeff Roberson }
35478355f576SJeff Roberson 
3548a0d4b0aeSRobert Watson #ifdef DDB
35498355f576SJeff Roberson /*
35507a52a97eSRobert Watson  * Generate statistics across both the zone and its per-cpu cache's.  Return
35517a52a97eSRobert Watson  * desired statistics if the pointer is non-NULL for that statistic.
35527a52a97eSRobert Watson  *
35537a52a97eSRobert Watson  * Note: does not update the zone statistics, as it can't safely clear the
35547a52a97eSRobert Watson  * per-CPU cache statistic.
35557a52a97eSRobert Watson  *
35567a52a97eSRobert Watson  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
35577a52a97eSRobert Watson  * safe from off-CPU; we should modify the caches to track this information
35587a52a97eSRobert Watson  * directly so that we don't have to.
35597a52a97eSRobert Watson  */
35607a52a97eSRobert Watson static void
356185dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
356285dcf349SGleb Smirnoff     uint64_t *freesp, uint64_t *sleepsp)
35637a52a97eSRobert Watson {
35647a52a97eSRobert Watson 	uma_cache_t cache;
356585dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
35667a52a97eSRobert Watson 	int cachefree, cpu;
35677a52a97eSRobert Watson 
3568bf965959SSean Bruno 	allocs = frees = sleeps = 0;
35697a52a97eSRobert Watson 	cachefree = 0;
35703aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
35717a52a97eSRobert Watson 		cache = &z->uz_cpu[cpu];
35727a52a97eSRobert Watson 		if (cache->uc_allocbucket != NULL)
35737a52a97eSRobert Watson 			cachefree += cache->uc_allocbucket->ub_cnt;
35747a52a97eSRobert Watson 		if (cache->uc_freebucket != NULL)
35757a52a97eSRobert Watson 			cachefree += cache->uc_freebucket->ub_cnt;
35767a52a97eSRobert Watson 		allocs += cache->uc_allocs;
35777a52a97eSRobert Watson 		frees += cache->uc_frees;
35787a52a97eSRobert Watson 	}
35797a52a97eSRobert Watson 	allocs += z->uz_allocs;
35807a52a97eSRobert Watson 	frees += z->uz_frees;
3581bf965959SSean Bruno 	sleeps += z->uz_sleeps;
35827a52a97eSRobert Watson 	if (cachefreep != NULL)
35837a52a97eSRobert Watson 		*cachefreep = cachefree;
35847a52a97eSRobert Watson 	if (allocsp != NULL)
35857a52a97eSRobert Watson 		*allocsp = allocs;
35867a52a97eSRobert Watson 	if (freesp != NULL)
35877a52a97eSRobert Watson 		*freesp = frees;
3588bf965959SSean Bruno 	if (sleepsp != NULL)
3589bf965959SSean Bruno 		*sleepsp = sleeps;
35907a52a97eSRobert Watson }
3591a0d4b0aeSRobert Watson #endif /* DDB */
35927a52a97eSRobert Watson 
35937a52a97eSRobert Watson static int
35947a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
35957a52a97eSRobert Watson {
35967a52a97eSRobert Watson 	uma_keg_t kz;
35977a52a97eSRobert Watson 	uma_zone_t z;
35987a52a97eSRobert Watson 	int count;
35997a52a97eSRobert Watson 
36007a52a97eSRobert Watson 	count = 0;
3601111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
36027a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
36037a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
36047a52a97eSRobert Watson 			count++;
36057a52a97eSRobert Watson 	}
3606111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
36077a52a97eSRobert Watson 	return (sysctl_handle_int(oidp, &count, 0, req));
36087a52a97eSRobert Watson }
36097a52a97eSRobert Watson 
36107a52a97eSRobert Watson static int
36117a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
36127a52a97eSRobert Watson {
36137a52a97eSRobert Watson 	struct uma_stream_header ush;
36147a52a97eSRobert Watson 	struct uma_type_header uth;
36157a52a97eSRobert Watson 	struct uma_percpu_stat ups;
36167a52a97eSRobert Watson 	uma_bucket_t bucket;
3617ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
36187a52a97eSRobert Watson 	struct sbuf sbuf;
36197a52a97eSRobert Watson 	uma_cache_t cache;
3620e20a199fSJeff Roberson 	uma_klink_t kl;
36217a52a97eSRobert Watson 	uma_keg_t kz;
36227a52a97eSRobert Watson 	uma_zone_t z;
3623e20a199fSJeff Roberson 	uma_keg_t k;
36244e657159SMatthew D Fleming 	int count, error, i;
36257a52a97eSRobert Watson 
362600f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
362700f0e671SMatthew D Fleming 	if (error != 0)
362800f0e671SMatthew D Fleming 		return (error);
36294e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
36301eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
36314e657159SMatthew D Fleming 
3632404a593eSMatthew D Fleming 	count = 0;
3633111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
36347a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
36357a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
36367a52a97eSRobert Watson 			count++;
36377a52a97eSRobert Watson 	}
36387a52a97eSRobert Watson 
36397a52a97eSRobert Watson 	/*
36407a52a97eSRobert Watson 	 * Insert stream header.
36417a52a97eSRobert Watson 	 */
36427a52a97eSRobert Watson 	bzero(&ush, sizeof(ush));
36437a52a97eSRobert Watson 	ush.ush_version = UMA_STREAM_VERSION;
3644ab3a57c0SRobert Watson 	ush.ush_maxcpus = (mp_maxid + 1);
36457a52a97eSRobert Watson 	ush.ush_count = count;
36464e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
36477a52a97eSRobert Watson 
36487a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
36497a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
36507a52a97eSRobert Watson 			bzero(&uth, sizeof(uth));
36517a52a97eSRobert Watson 			ZONE_LOCK(z);
3652cbbb4a00SRobert Watson 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
36537a52a97eSRobert Watson 			uth.uth_align = kz->uk_align;
36547a52a97eSRobert Watson 			uth.uth_size = kz->uk_size;
36557a52a97eSRobert Watson 			uth.uth_rsize = kz->uk_rsize;
3656e20a199fSJeff Roberson 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3657e20a199fSJeff Roberson 				k = kl->kl_keg;
3658e20a199fSJeff Roberson 				uth.uth_maxpages += k->uk_maxpages;
3659e20a199fSJeff Roberson 				uth.uth_pages += k->uk_pages;
3660e20a199fSJeff Roberson 				uth.uth_keg_free += k->uk_free;
3661e20a199fSJeff Roberson 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3662e20a199fSJeff Roberson 				    * k->uk_ipers;
3663e20a199fSJeff Roberson 			}
3664cbbb4a00SRobert Watson 
3665cbbb4a00SRobert Watson 			/*
3666cbbb4a00SRobert Watson 			 * A zone is secondary is it is not the first entry
3667cbbb4a00SRobert Watson 			 * on the keg's zone list.
3668cbbb4a00SRobert Watson 			 */
3669e20a199fSJeff Roberson 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3670cbbb4a00SRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z))
3671cbbb4a00SRobert Watson 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3672cbbb4a00SRobert Watson 
3673ab3185d1SJeff Roberson 			for (i = 0; i < vm_ndomains; i++) {
3674ab3185d1SJeff Roberson 				zdom = &z->uz_domain[i];
3675ab3185d1SJeff Roberson 				LIST_FOREACH(bucket, &zdom->uzd_buckets,
3676ab3185d1SJeff Roberson 				    ub_link)
36777a52a97eSRobert Watson 					uth.uth_zone_free += bucket->ub_cnt;
3678ab3185d1SJeff Roberson 			}
36797a52a97eSRobert Watson 			uth.uth_allocs = z->uz_allocs;
36807a52a97eSRobert Watson 			uth.uth_frees = z->uz_frees;
36812019094aSRobert Watson 			uth.uth_fails = z->uz_fails;
3682bf965959SSean Bruno 			uth.uth_sleeps = z->uz_sleeps;
36834e657159SMatthew D Fleming 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
36847a52a97eSRobert Watson 			/*
36852450bbb8SRobert Watson 			 * While it is not normally safe to access the cache
36862450bbb8SRobert Watson 			 * bucket pointers while not on the CPU that owns the
36872450bbb8SRobert Watson 			 * cache, we only allow the pointers to be exchanged
36882450bbb8SRobert Watson 			 * without the zone lock held, not invalidated, so
36892450bbb8SRobert Watson 			 * accept the possible race associated with bucket
36902450bbb8SRobert Watson 			 * exchange during monitoring.
36917a52a97eSRobert Watson 			 */
3692ab3a57c0SRobert Watson 			for (i = 0; i < (mp_maxid + 1); i++) {
36937a52a97eSRobert Watson 				bzero(&ups, sizeof(ups));
36947a52a97eSRobert Watson 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
36957a52a97eSRobert Watson 					goto skip;
3696082dc776SRobert Watson 				if (CPU_ABSENT(i))
3697082dc776SRobert Watson 					goto skip;
36987a52a97eSRobert Watson 				cache = &z->uz_cpu[i];
36997a52a97eSRobert Watson 				if (cache->uc_allocbucket != NULL)
37007a52a97eSRobert Watson 					ups.ups_cache_free +=
37017a52a97eSRobert Watson 					    cache->uc_allocbucket->ub_cnt;
37027a52a97eSRobert Watson 				if (cache->uc_freebucket != NULL)
37037a52a97eSRobert Watson 					ups.ups_cache_free +=
37047a52a97eSRobert Watson 					    cache->uc_freebucket->ub_cnt;
37057a52a97eSRobert Watson 				ups.ups_allocs = cache->uc_allocs;
37067a52a97eSRobert Watson 				ups.ups_frees = cache->uc_frees;
37077a52a97eSRobert Watson skip:
37084e657159SMatthew D Fleming 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
37097a52a97eSRobert Watson 			}
37102450bbb8SRobert Watson 			ZONE_UNLOCK(z);
37117a52a97eSRobert Watson 		}
37127a52a97eSRobert Watson 	}
3713111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
37144e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
37154e657159SMatthew D Fleming 	sbuf_delete(&sbuf);
37167a52a97eSRobert Watson 	return (error);
37177a52a97eSRobert Watson }
371848c5777eSRobert Watson 
37190a5a3ccbSGleb Smirnoff int
37200a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
37210a5a3ccbSGleb Smirnoff {
37220a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
372316be9f54SGleb Smirnoff 	int error, max;
37240a5a3ccbSGleb Smirnoff 
372516be9f54SGleb Smirnoff 	max = uma_zone_get_max(zone);
37260a5a3ccbSGleb Smirnoff 	error = sysctl_handle_int(oidp, &max, 0, req);
37270a5a3ccbSGleb Smirnoff 	if (error || !req->newptr)
37280a5a3ccbSGleb Smirnoff 		return (error);
37290a5a3ccbSGleb Smirnoff 
37300a5a3ccbSGleb Smirnoff 	uma_zone_set_max(zone, max);
37310a5a3ccbSGleb Smirnoff 
37320a5a3ccbSGleb Smirnoff 	return (0);
37330a5a3ccbSGleb Smirnoff }
37340a5a3ccbSGleb Smirnoff 
37350a5a3ccbSGleb Smirnoff int
37360a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
37370a5a3ccbSGleb Smirnoff {
37380a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
37390a5a3ccbSGleb Smirnoff 	int cur;
37400a5a3ccbSGleb Smirnoff 
37410a5a3ccbSGleb Smirnoff 	cur = uma_zone_get_cur(zone);
37420a5a3ccbSGleb Smirnoff 	return (sysctl_handle_int(oidp, &cur, 0, req));
37430a5a3ccbSGleb Smirnoff }
37440a5a3ccbSGleb Smirnoff 
37459542ea7bSGleb Smirnoff #ifdef INVARIANTS
37469542ea7bSGleb Smirnoff static uma_slab_t
37479542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item)
37489542ea7bSGleb Smirnoff {
37499542ea7bSGleb Smirnoff 	uma_slab_t slab;
37509542ea7bSGleb Smirnoff 	uma_keg_t keg;
37519542ea7bSGleb Smirnoff 	uint8_t *mem;
37529542ea7bSGleb Smirnoff 
37539542ea7bSGleb Smirnoff 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
37549542ea7bSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
37559542ea7bSGleb Smirnoff 		slab = vtoslab((vm_offset_t)mem);
37569542ea7bSGleb Smirnoff 	} else {
37579542ea7bSGleb Smirnoff 		/*
37589542ea7bSGleb Smirnoff 		 * It is safe to return the slab here even though the
37599542ea7bSGleb Smirnoff 		 * zone is unlocked because the item's allocation state
37609542ea7bSGleb Smirnoff 		 * essentially holds a reference.
37619542ea7bSGleb Smirnoff 		 */
37629542ea7bSGleb Smirnoff 		ZONE_LOCK(zone);
37639542ea7bSGleb Smirnoff 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
37649542ea7bSGleb Smirnoff 		if (keg->uk_flags & UMA_ZONE_HASH)
37659542ea7bSGleb Smirnoff 			slab = hash_sfind(&keg->uk_hash, mem);
37669542ea7bSGleb Smirnoff 		else
37679542ea7bSGleb Smirnoff 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
37689542ea7bSGleb Smirnoff 		ZONE_UNLOCK(zone);
37699542ea7bSGleb Smirnoff 	}
37709542ea7bSGleb Smirnoff 
37719542ea7bSGleb Smirnoff 	return (slab);
37729542ea7bSGleb Smirnoff }
37739542ea7bSGleb Smirnoff 
37749542ea7bSGleb Smirnoff /*
37759542ea7bSGleb Smirnoff  * Set up the slab's freei data such that uma_dbg_free can function.
37769542ea7bSGleb Smirnoff  *
37779542ea7bSGleb Smirnoff  */
37789542ea7bSGleb Smirnoff static void
37799542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
37809542ea7bSGleb Smirnoff {
37819542ea7bSGleb Smirnoff 	uma_keg_t keg;
37829542ea7bSGleb Smirnoff 	int freei;
37839542ea7bSGleb Smirnoff 
37849542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
37859542ea7bSGleb Smirnoff 		return;
37869542ea7bSGleb Smirnoff 	if (slab == NULL) {
37879542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
37889542ea7bSGleb Smirnoff 		if (slab == NULL)
37899542ea7bSGleb Smirnoff 			panic("uma: item %p did not belong to zone %s\n",
37909542ea7bSGleb Smirnoff 			    item, zone->uz_name);
37919542ea7bSGleb Smirnoff 	}
37929542ea7bSGleb Smirnoff 	keg = slab->us_keg;
37939542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
37949542ea7bSGleb Smirnoff 
37959542ea7bSGleb Smirnoff 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
37969542ea7bSGleb Smirnoff 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
37979542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
37989542ea7bSGleb Smirnoff 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
37999542ea7bSGleb Smirnoff 
38009542ea7bSGleb Smirnoff 	return;
38019542ea7bSGleb Smirnoff }
38029542ea7bSGleb Smirnoff 
38039542ea7bSGleb Smirnoff /*
38049542ea7bSGleb Smirnoff  * Verifies freed addresses.  Checks for alignment, valid slab membership
38059542ea7bSGleb Smirnoff  * and duplicate frees.
38069542ea7bSGleb Smirnoff  *
38079542ea7bSGleb Smirnoff  */
38089542ea7bSGleb Smirnoff static void
38099542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
38109542ea7bSGleb Smirnoff {
38119542ea7bSGleb Smirnoff 	uma_keg_t keg;
38129542ea7bSGleb Smirnoff 	int freei;
38139542ea7bSGleb Smirnoff 
38149542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
38159542ea7bSGleb Smirnoff 		return;
38169542ea7bSGleb Smirnoff 	if (slab == NULL) {
38179542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
38189542ea7bSGleb Smirnoff 		if (slab == NULL)
38199542ea7bSGleb Smirnoff 			panic("uma: Freed item %p did not belong to zone %s\n",
38209542ea7bSGleb Smirnoff 			    item, zone->uz_name);
38219542ea7bSGleb Smirnoff 	}
38229542ea7bSGleb Smirnoff 	keg = slab->us_keg;
38239542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
38249542ea7bSGleb Smirnoff 
38259542ea7bSGleb Smirnoff 	if (freei >= keg->uk_ipers)
38269542ea7bSGleb Smirnoff 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
38279542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
38289542ea7bSGleb Smirnoff 
38299542ea7bSGleb Smirnoff 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
38309542ea7bSGleb Smirnoff 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
38319542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
38329542ea7bSGleb Smirnoff 
38339542ea7bSGleb Smirnoff 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
38349542ea7bSGleb Smirnoff 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
38359542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
38369542ea7bSGleb Smirnoff 
38379542ea7bSGleb Smirnoff 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
38389542ea7bSGleb Smirnoff }
38399542ea7bSGleb Smirnoff #endif /* INVARIANTS */
38409542ea7bSGleb Smirnoff 
384148c5777eSRobert Watson #ifdef DDB
384248c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma)
384348c5777eSRobert Watson {
384448c5777eSRobert Watson 	uma_bucket_t bucket;
384548c5777eSRobert Watson 	uma_keg_t kz;
384648c5777eSRobert Watson 	uma_zone_t z;
3847ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
3848ab3185d1SJeff Roberson 	uint64_t allocs, frees, sleeps;
3849ab3185d1SJeff Roberson 	int cachefree, i;
385048c5777eSRobert Watson 
385103175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
385203175483SAlexander Motin 	    "Free", "Requests", "Sleeps", "Bucket");
385348c5777eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
385448c5777eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
385548c5777eSRobert Watson 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
385648c5777eSRobert Watson 				allocs = z->uz_allocs;
385748c5777eSRobert Watson 				frees = z->uz_frees;
3858bf965959SSean Bruno 				sleeps = z->uz_sleeps;
385948c5777eSRobert Watson 				cachefree = 0;
386048c5777eSRobert Watson 			} else
386148c5777eSRobert Watson 				uma_zone_sumstat(z, &cachefree, &allocs,
3862bf965959SSean Bruno 				    &frees, &sleeps);
3863e20a199fSJeff Roberson 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
386448c5777eSRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z)))
386548c5777eSRobert Watson 				cachefree += kz->uk_free;
3866ab3185d1SJeff Roberson 			for (i = 0; i < vm_ndomains; i++) {
3867ab3185d1SJeff Roberson 				zdom = &z->uz_domain[i];
3868ab3185d1SJeff Roberson 				LIST_FOREACH(bucket, &zdom->uzd_buckets,
3869ab3185d1SJeff Roberson 				    ub_link)
387048c5777eSRobert Watson 					cachefree += bucket->ub_cnt;
3871ab3185d1SJeff Roberson 			}
387203175483SAlexander Motin 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
387303175483SAlexander Motin 			    z->uz_name, (uintmax_t)kz->uk_size,
3874ae4e9636SRobert Watson 			    (intmax_t)(allocs - frees), cachefree,
387503175483SAlexander Motin 			    (uintmax_t)allocs, sleeps, z->uz_count);
3876687c94aaSJohn Baldwin 			if (db_pager_quit)
3877687c94aaSJohn Baldwin 				return;
387848c5777eSRobert Watson 		}
387948c5777eSRobert Watson 	}
388048c5777eSRobert Watson }
388103175483SAlexander Motin 
388203175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache)
388303175483SAlexander Motin {
388403175483SAlexander Motin 	uma_bucket_t bucket;
388503175483SAlexander Motin 	uma_zone_t z;
3886ab3185d1SJeff Roberson 	uma_zone_domain_t zdom;
3887ab3185d1SJeff Roberson 	uint64_t allocs, frees;
3888ab3185d1SJeff Roberson 	int cachefree, i;
388903175483SAlexander Motin 
389003175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
389103175483SAlexander Motin 	    "Requests", "Bucket");
389203175483SAlexander Motin 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
389303175483SAlexander Motin 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3894ab3185d1SJeff Roberson 		for (i = 0; i < vm_ndomains; i++) {
3895ab3185d1SJeff Roberson 			zdom = &z->uz_domain[i];
3896ab3185d1SJeff Roberson 			LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link)
389703175483SAlexander Motin 				cachefree += bucket->ub_cnt;
3898ab3185d1SJeff Roberson 		}
389903175483SAlexander Motin 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
390003175483SAlexander Motin 		    z->uz_name, (uintmax_t)z->uz_size,
390103175483SAlexander Motin 		    (intmax_t)(allocs - frees), cachefree,
390203175483SAlexander Motin 		    (uintmax_t)allocs, z->uz_count);
390303175483SAlexander Motin 		if (db_pager_quit)
390403175483SAlexander Motin 			return;
390503175483SAlexander Motin 	}
390603175483SAlexander Motin }
39079542ea7bSGleb Smirnoff #endif	/* DDB */
3908