xref: /freebsd/sys/vm/uma_core.c (revision 200f8117ba72b9c4e93dd3491f322f0012634309)
160727d8bSWarner Losh /*-
2fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3fe267a55SPedro F. Giffuni  *
4ef72505eSJeff Roberson  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
508ecce74SRobert Watson  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6ae4e9636SRobert Watson  * Copyright (c) 2004-2006 Robert N. M. Watson
708ecce74SRobert Watson  * All rights reserved.
88355f576SJeff Roberson  *
98355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
108355f576SJeff Roberson  * modification, are permitted provided that the following conditions
118355f576SJeff Roberson  * are met:
128355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
138355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
148355f576SJeff Roberson  *    disclaimer.
158355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
168355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
178355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
188355f576SJeff Roberson  *
198355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
208355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
218355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
228355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
238355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
248355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
258355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
268355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
278355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
288355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
298355f576SJeff Roberson  */
308355f576SJeff Roberson 
318355f576SJeff Roberson /*
328355f576SJeff Roberson  * uma_core.c  Implementation of the Universal Memory allocator
338355f576SJeff Roberson  *
348355f576SJeff Roberson  * This allocator is intended to replace the multitude of similar object caches
358355f576SJeff Roberson  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36763df3ecSPedro F. Giffuni  * efficient.  A primary design goal is to return unused memory to the rest of
378355f576SJeff Roberson  * the system.  This will make the system as a whole more flexible due to the
388355f576SJeff Roberson  * ability to move memory to subsystems which most need it instead of leaving
398355f576SJeff Roberson  * pools of reserved memory unused.
408355f576SJeff Roberson  *
418355f576SJeff Roberson  * The basic ideas stem from similar slab/zone based allocators whose algorithms
428355f576SJeff Roberson  * are well known.
438355f576SJeff Roberson  *
448355f576SJeff Roberson  */
458355f576SJeff Roberson 
468355f576SJeff Roberson /*
478355f576SJeff Roberson  * TODO:
488355f576SJeff Roberson  *	- Improve memory usage for large allocations
498355f576SJeff Roberson  *	- Investigate cache size adjustments
508355f576SJeff Roberson  */
518355f576SJeff Roberson 
52874651b1SDavid E. O'Brien #include <sys/cdefs.h>
53874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
54874651b1SDavid E. O'Brien 
5548c5777eSRobert Watson #include "opt_ddb.h"
568355f576SJeff Roberson #include "opt_param.h"
578d689e04SGleb Smirnoff #include "opt_vm.h"
5848c5777eSRobert Watson 
598355f576SJeff Roberson #include <sys/param.h>
608355f576SJeff Roberson #include <sys/systm.h>
61ef72505eSJeff Roberson #include <sys/bitset.h>
629b43bc27SAndriy Gapon #include <sys/eventhandler.h>
638355f576SJeff Roberson #include <sys/kernel.h>
648355f576SJeff Roberson #include <sys/types.h>
658355f576SJeff Roberson #include <sys/queue.h>
668355f576SJeff Roberson #include <sys/malloc.h>
673659f747SRobert Watson #include <sys/ktr.h>
688355f576SJeff Roberson #include <sys/lock.h>
698355f576SJeff Roberson #include <sys/sysctl.h>
708355f576SJeff Roberson #include <sys/mutex.h>
714c1cc01cSJohn Baldwin #include <sys/proc.h>
7210cb2424SMark Murray #include <sys/random.h>
7389f6b863SAttilio Rao #include <sys/rwlock.h>
747a52a97eSRobert Watson #include <sys/sbuf.h>
75a2de44abSAlexander Motin #include <sys/sched.h>
768355f576SJeff Roberson #include <sys/smp.h>
77e60b2fcbSGleb Smirnoff #include <sys/taskqueue.h>
7886bbae32SJeff Roberson #include <sys/vmmeter.h>
7986bbae32SJeff Roberson 
808355f576SJeff Roberson #include <vm/vm.h>
818355f576SJeff Roberson #include <vm/vm_object.h>
828355f576SJeff Roberson #include <vm/vm_page.h>
83a4915c21SAttilio Rao #include <vm/vm_pageout.h>
848355f576SJeff Roberson #include <vm/vm_param.h>
858355f576SJeff Roberson #include <vm/vm_map.h>
868355f576SJeff Roberson #include <vm/vm_kern.h>
878355f576SJeff Roberson #include <vm/vm_extern.h>
888355f576SJeff Roberson #include <vm/uma.h>
898355f576SJeff Roberson #include <vm/uma_int.h>
90639c9550SJeff Roberson #include <vm/uma_dbg.h>
918355f576SJeff Roberson 
9248c5777eSRobert Watson #include <ddb/ddb.h>
9348c5777eSRobert Watson 
948d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
958d689e04SGleb Smirnoff #include <vm/memguard.h>
968d689e04SGleb Smirnoff #endif
978d689e04SGleb Smirnoff 
988355f576SJeff Roberson /*
99099a0e58SBosko Milekic  * This is the zone and keg from which all zones are spawned.  The idea is that
100099a0e58SBosko Milekic  * even the zone & keg heads are allocated from the allocator, so we use the
101099a0e58SBosko Milekic  * bss section to bootstrap us.
1028355f576SJeff Roberson  */
103099a0e58SBosko Milekic static struct uma_keg masterkeg;
104099a0e58SBosko Milekic static struct uma_zone masterzone_k;
105099a0e58SBosko Milekic static struct uma_zone masterzone_z;
106099a0e58SBosko Milekic static uma_zone_t kegs = &masterzone_k;
107099a0e58SBosko Milekic static uma_zone_t zones = &masterzone_z;
1088355f576SJeff Roberson 
1098355f576SJeff Roberson /* This is the zone from which all of uma_slab_t's are allocated. */
1108355f576SJeff Roberson static uma_zone_t slabzone;
1118355f576SJeff Roberson 
1128355f576SJeff Roberson /*
1138355f576SJeff Roberson  * The initial hash tables come out of this zone so they can be allocated
1148355f576SJeff Roberson  * prior to malloc coming up.
1158355f576SJeff Roberson  */
1168355f576SJeff Roberson static uma_zone_t hashzone;
1178355f576SJeff Roberson 
1181e319f6dSRobert Watson /* The boot-time adjusted value for cache line alignment. */
119e4cd31ddSJeff Roberson int uma_align_cache = 64 - 1;
1201e319f6dSRobert Watson 
121961647dfSJeff Roberson static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122961647dfSJeff Roberson 
1238355f576SJeff Roberson /*
12486bbae32SJeff Roberson  * Are we allowed to allocate buckets?
12586bbae32SJeff Roberson  */
12686bbae32SJeff Roberson static int bucketdisable = 1;
12786bbae32SJeff Roberson 
128099a0e58SBosko Milekic /* Linked list of all kegs in the system */
12913e403fdSAntoine Brodin static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
1308355f576SJeff Roberson 
13103175483SAlexander Motin /* Linked list of all cache-only zones in the system */
13203175483SAlexander Motin static LIST_HEAD(,uma_zone) uma_cachezones =
13303175483SAlexander Motin     LIST_HEAD_INITIALIZER(uma_cachezones);
13403175483SAlexander Motin 
135111fbcd5SBryan Venteicher /* This RW lock protects the keg list */
136fe933c1dSMateusz Guzik static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
1378355f576SJeff Roberson 
138ac0a6fd0SGleb Smirnoff /*
139ac0a6fd0SGleb Smirnoff  * Pointer and counter to pool of pages, that is preallocated at
140ac0a6fd0SGleb Smirnoff  * startup to bootstrap UMA.  Early zones continue to use the pool
141ac0a6fd0SGleb Smirnoff  * until it is depleted, so allocations may happen after boot, thus
142ac0a6fd0SGleb Smirnoff  * we need a mutex to protect it.
143ac0a6fd0SGleb Smirnoff  */
144ac0a6fd0SGleb Smirnoff static char *bootmem;
145ac0a6fd0SGleb Smirnoff static int boot_pages;
146ac0a6fd0SGleb Smirnoff static struct mtx uma_boot_pages_mtx;
1478355f576SJeff Roberson 
14895c4bf75SKonstantin Belousov static struct sx uma_drain_lock;
14995c4bf75SKonstantin Belousov 
1502e47807cSJeff Roberson /* kmem soft limit. */
1512e47807cSJeff Roberson static unsigned long uma_kmem_limit;
1522e47807cSJeff Roberson static volatile unsigned long uma_kmem_total;
1532e47807cSJeff Roberson 
1548355f576SJeff Roberson /* Is the VM done starting up? */
1558355f576SJeff Roberson static int booted = 0;
156342f1793SAlan Cox #define	UMA_STARTUP	1
157342f1793SAlan Cox #define	UMA_STARTUP2	2
1588355f576SJeff Roberson 
159ef72505eSJeff Roberson /*
1609643769aSJeff Roberson  * This is the handle used to schedule events that need to happen
1619643769aSJeff Roberson  * outside of the allocation fast path.
1629643769aSJeff Roberson  */
1638355f576SJeff Roberson static struct callout uma_callout;
1649643769aSJeff Roberson #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
1658355f576SJeff Roberson 
1668355f576SJeff Roberson /*
1678355f576SJeff Roberson  * This structure is passed as the zone ctor arg so that I don't have to create
1688355f576SJeff Roberson  * a special allocation function just for zones.
1698355f576SJeff Roberson  */
1708355f576SJeff Roberson struct uma_zctor_args {
171bb196eb4SMatthew D Fleming 	const char *name;
172c3bdc05fSAndrew R. Reiter 	size_t size;
1738355f576SJeff Roberson 	uma_ctor ctor;
1748355f576SJeff Roberson 	uma_dtor dtor;
1758355f576SJeff Roberson 	uma_init uminit;
1768355f576SJeff Roberson 	uma_fini fini;
1770095a784SJeff Roberson 	uma_import import;
1780095a784SJeff Roberson 	uma_release release;
1790095a784SJeff Roberson 	void *arg;
180099a0e58SBosko Milekic 	uma_keg_t keg;
181099a0e58SBosko Milekic 	int align;
18285dcf349SGleb Smirnoff 	uint32_t flags;
183099a0e58SBosko Milekic };
184099a0e58SBosko Milekic 
185099a0e58SBosko Milekic struct uma_kctor_args {
186099a0e58SBosko Milekic 	uma_zone_t zone;
187099a0e58SBosko Milekic 	size_t size;
188099a0e58SBosko Milekic 	uma_init uminit;
189099a0e58SBosko Milekic 	uma_fini fini;
1908355f576SJeff Roberson 	int align;
19185dcf349SGleb Smirnoff 	uint32_t flags;
1928355f576SJeff Roberson };
1938355f576SJeff Roberson 
194cae33c14SJeff Roberson struct uma_bucket_zone {
195cae33c14SJeff Roberson 	uma_zone_t	ubz_zone;
196cae33c14SJeff Roberson 	char		*ubz_name;
197fc03d22bSJeff Roberson 	int		ubz_entries;	/* Number of items it can hold. */
198fc03d22bSJeff Roberson 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
199cae33c14SJeff Roberson };
200cae33c14SJeff Roberson 
201f9d27e75SRobert Watson /*
202fc03d22bSJeff Roberson  * Compute the actual number of bucket entries to pack them in power
203fc03d22bSJeff Roberson  * of two sizes for more efficient space utilization.
204f9d27e75SRobert Watson  */
205fc03d22bSJeff Roberson #define	BUCKET_SIZE(n)						\
206fc03d22bSJeff Roberson     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
207fc03d22bSJeff Roberson 
2081aa6c758SAlexander Motin #define	BUCKET_MAX	BUCKET_SIZE(256)
209fc03d22bSJeff Roberson 
210fc03d22bSJeff Roberson struct uma_bucket_zone bucket_zones[] = {
2116fd34d6fSJeff Roberson 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
212f3932e90SAlexander Motin 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
2136fd34d6fSJeff Roberson 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
214f3932e90SAlexander Motin 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
2156fd34d6fSJeff Roberson 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
216fc03d22bSJeff Roberson 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
217fc03d22bSJeff Roberson 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
218fc03d22bSJeff Roberson 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
2191aa6c758SAlexander Motin 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
220fc03d22bSJeff Roberson 	{ NULL, NULL, 0}
221fc03d22bSJeff Roberson };
222cae33c14SJeff Roberson 
2232019094aSRobert Watson /*
2242019094aSRobert Watson  * Flags and enumerations to be passed to internal functions.
2252019094aSRobert Watson  */
226ef72505eSJeff Roberson enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
227b23f72e9SBrian Feldman 
2288355f576SJeff Roberson /* Prototypes.. */
2298355f576SJeff Roberson 
230f2c2231eSRyan Stone static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
231f2c2231eSRyan Stone static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
232f2c2231eSRyan Stone static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
233f2c2231eSRyan Stone static void page_free(void *, vm_size_t, uint8_t);
234e20a199fSJeff Roberson static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
2359643769aSJeff Roberson static void cache_drain(uma_zone_t);
2368355f576SJeff Roberson static void bucket_drain(uma_zone_t, uma_bucket_t);
237aaa8bb16SJeff Roberson static void bucket_cache_drain(uma_zone_t zone);
238b23f72e9SBrian Feldman static int keg_ctor(void *, int, void *, int);
239099a0e58SBosko Milekic static void keg_dtor(void *, int, void *);
240b23f72e9SBrian Feldman static int zone_ctor(void *, int, void *, int);
2419c2cd7e5SJeff Roberson static void zone_dtor(void *, int, void *);
242b23f72e9SBrian Feldman static int zero_init(void *, int, int);
243e20a199fSJeff Roberson static void keg_small_init(uma_keg_t keg);
244e20a199fSJeff Roberson static void keg_large_init(uma_keg_t keg);
2458355f576SJeff Roberson static void zone_foreach(void (*zfunc)(uma_zone_t));
2468355f576SJeff Roberson static void zone_timeout(uma_zone_t zone);
2470aef6126SJeff Roberson static int hash_alloc(struct uma_hash *);
2480aef6126SJeff Roberson static int hash_expand(struct uma_hash *, struct uma_hash *);
2490aef6126SJeff Roberson static void hash_free(struct uma_hash *hash);
2508355f576SJeff Roberson static void uma_timeout(void *);
2518355f576SJeff Roberson static void uma_startup3(void);
252e20a199fSJeff Roberson static void *zone_alloc_item(uma_zone_t, void *, int);
2530095a784SJeff Roberson static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
25486bbae32SJeff Roberson static void bucket_enable(void);
255cae33c14SJeff Roberson static void bucket_init(void);
2566fd34d6fSJeff Roberson static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
2576fd34d6fSJeff Roberson static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
258cae33c14SJeff Roberson static void bucket_zone_drain(void);
2596fd34d6fSJeff Roberson static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
260e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
261e20a199fSJeff Roberson static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
2620095a784SJeff Roberson static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
2630095a784SJeff Roberson static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
264e20a199fSJeff Roberson static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
26585dcf349SGleb Smirnoff     uma_fini fini, int align, uint32_t flags);
2660095a784SJeff Roberson static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
2670095a784SJeff Roberson static void zone_release(uma_zone_t zone, void **bucket, int cnt);
26848343a2fSGleb Smirnoff static void uma_zero_item(void *item, uma_zone_t zone);
269bbee39c6SJeff Roberson 
2708355f576SJeff Roberson void uma_print_zone(uma_zone_t);
2718355f576SJeff Roberson void uma_print_stats(void);
2727a52a97eSRobert Watson static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
2737a52a97eSRobert Watson static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
2748355f576SJeff Roberson 
2759542ea7bSGleb Smirnoff #ifdef INVARIANTS
2769542ea7bSGleb Smirnoff static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
2779542ea7bSGleb Smirnoff static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
2789542ea7bSGleb Smirnoff #endif
2799542ea7bSGleb Smirnoff 
2808355f576SJeff Roberson SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
2818355f576SJeff Roberson 
2827a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
2837a52a97eSRobert Watson     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
2847a52a97eSRobert Watson 
2857a52a97eSRobert Watson SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
2867a52a97eSRobert Watson     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
2877a52a97eSRobert Watson 
2882f891cd5SPawel Jakub Dawidek static int zone_warnings = 1;
289af3b2549SHans Petter Selasky SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
2902f891cd5SPawel Jakub Dawidek     "Warn when UMA zones becomes full");
2912f891cd5SPawel Jakub Dawidek 
2922e47807cSJeff Roberson /* Adjust bytes under management by UMA. */
2932e47807cSJeff Roberson static inline void
2942e47807cSJeff Roberson uma_total_dec(unsigned long size)
2952e47807cSJeff Roberson {
2962e47807cSJeff Roberson 
2972e47807cSJeff Roberson 	atomic_subtract_long(&uma_kmem_total, size);
2982e47807cSJeff Roberson }
2992e47807cSJeff Roberson 
3002e47807cSJeff Roberson static inline void
3012e47807cSJeff Roberson uma_total_inc(unsigned long size)
3022e47807cSJeff Roberson {
3032e47807cSJeff Roberson 
3042e47807cSJeff Roberson 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
3052e47807cSJeff Roberson 		uma_reclaim_wakeup();
3062e47807cSJeff Roberson }
3072e47807cSJeff Roberson 
30886bbae32SJeff Roberson /*
30986bbae32SJeff Roberson  * This routine checks to see whether or not it's safe to enable buckets.
31086bbae32SJeff Roberson  */
31186bbae32SJeff Roberson static void
31286bbae32SJeff Roberson bucket_enable(void)
31386bbae32SJeff Roberson {
314251386b4SMaksim Yevmenkin 	bucketdisable = vm_page_count_min();
31586bbae32SJeff Roberson }
31686bbae32SJeff Roberson 
317dc2c7965SRobert Watson /*
318dc2c7965SRobert Watson  * Initialize bucket_zones, the array of zones of buckets of various sizes.
319dc2c7965SRobert Watson  *
320dc2c7965SRobert Watson  * For each zone, calculate the memory required for each bucket, consisting
321fc03d22bSJeff Roberson  * of the header and an array of pointers.
322dc2c7965SRobert Watson  */
323cae33c14SJeff Roberson static void
324cae33c14SJeff Roberson bucket_init(void)
325cae33c14SJeff Roberson {
326cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
327cae33c14SJeff Roberson 	int size;
328cae33c14SJeff Roberson 
329d74e6a1dSAlan Cox 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
330cae33c14SJeff Roberson 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
331cae33c14SJeff Roberson 		size += sizeof(void *) * ubz->ubz_entries;
332cae33c14SJeff Roberson 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
333e20a199fSJeff Roberson 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
3346fd34d6fSJeff Roberson 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
335cae33c14SJeff Roberson 	}
336cae33c14SJeff Roberson }
337cae33c14SJeff Roberson 
338dc2c7965SRobert Watson /*
339dc2c7965SRobert Watson  * Given a desired number of entries for a bucket, return the zone from which
340dc2c7965SRobert Watson  * to allocate the bucket.
341dc2c7965SRobert Watson  */
342dc2c7965SRobert Watson static struct uma_bucket_zone *
343dc2c7965SRobert Watson bucket_zone_lookup(int entries)
344dc2c7965SRobert Watson {
345fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
346dc2c7965SRobert Watson 
347fc03d22bSJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
348fc03d22bSJeff Roberson 		if (ubz->ubz_entries >= entries)
349fc03d22bSJeff Roberson 			return (ubz);
350fc03d22bSJeff Roberson 	ubz--;
351fc03d22bSJeff Roberson 	return (ubz);
352fc03d22bSJeff Roberson }
353fc03d22bSJeff Roberson 
354fc03d22bSJeff Roberson static int
355fc03d22bSJeff Roberson bucket_select(int size)
356fc03d22bSJeff Roberson {
357fc03d22bSJeff Roberson 	struct uma_bucket_zone *ubz;
358fc03d22bSJeff Roberson 
359fc03d22bSJeff Roberson 	ubz = &bucket_zones[0];
360fc03d22bSJeff Roberson 	if (size > ubz->ubz_maxsize)
361fc03d22bSJeff Roberson 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
362fc03d22bSJeff Roberson 
363fc03d22bSJeff Roberson 	for (; ubz->ubz_entries != 0; ubz++)
364fc03d22bSJeff Roberson 		if (ubz->ubz_maxsize < size)
365fc03d22bSJeff Roberson 			break;
366fc03d22bSJeff Roberson 	ubz--;
367fc03d22bSJeff Roberson 	return (ubz->ubz_entries);
368dc2c7965SRobert Watson }
369dc2c7965SRobert Watson 
370cae33c14SJeff Roberson static uma_bucket_t
3716fd34d6fSJeff Roberson bucket_alloc(uma_zone_t zone, void *udata, int flags)
372cae33c14SJeff Roberson {
373cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
374cae33c14SJeff Roberson 	uma_bucket_t bucket;
375cae33c14SJeff Roberson 
376cae33c14SJeff Roberson 	/*
377cae33c14SJeff Roberson 	 * This is to stop us from allocating per cpu buckets while we're
3783803b26bSDag-Erling Smørgrav 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
379cae33c14SJeff Roberson 	 * boot pages.  This also prevents us from allocating buckets in
380cae33c14SJeff Roberson 	 * low memory situations.
381cae33c14SJeff Roberson 	 */
382cae33c14SJeff Roberson 	if (bucketdisable)
383cae33c14SJeff Roberson 		return (NULL);
3846fd34d6fSJeff Roberson 	/*
3856fd34d6fSJeff Roberson 	 * To limit bucket recursion we store the original zone flags
3866fd34d6fSJeff Roberson 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
3876fd34d6fSJeff Roberson 	 * NOVM flag to persist even through deep recursions.  We also
3886fd34d6fSJeff Roberson 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
3896fd34d6fSJeff Roberson 	 * a bucket for a bucket zone so we do not allow infinite bucket
3906fd34d6fSJeff Roberson 	 * recursion.  This cookie will even persist to frees of unused
3916fd34d6fSJeff Roberson 	 * buckets via the allocation path or bucket allocations in the
3926fd34d6fSJeff Roberson 	 * free path.
3936fd34d6fSJeff Roberson 	 */
3946fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
3956fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
396e8a720feSAlexander Motin 	else {
397e8a720feSAlexander Motin 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
398e8a720feSAlexander Motin 			return (NULL);
3996fd34d6fSJeff Roberson 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
400e8a720feSAlexander Motin 	}
4016fd34d6fSJeff Roberson 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
402af526374SJeff Roberson 		flags |= M_NOVM;
403af526374SJeff Roberson 	ubz = bucket_zone_lookup(zone->uz_count);
40420d3ab87SAlexander Motin 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
40520d3ab87SAlexander Motin 		ubz++;
4066fd34d6fSJeff Roberson 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
407cae33c14SJeff Roberson 	if (bucket) {
408cae33c14SJeff Roberson #ifdef INVARIANTS
409cae33c14SJeff Roberson 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
410cae33c14SJeff Roberson #endif
411cae33c14SJeff Roberson 		bucket->ub_cnt = 0;
412cae33c14SJeff Roberson 		bucket->ub_entries = ubz->ubz_entries;
413cae33c14SJeff Roberson 	}
414cae33c14SJeff Roberson 
415cae33c14SJeff Roberson 	return (bucket);
416cae33c14SJeff Roberson }
417cae33c14SJeff Roberson 
418cae33c14SJeff Roberson static void
4196fd34d6fSJeff Roberson bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
420cae33c14SJeff Roberson {
421cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
422cae33c14SJeff Roberson 
423fc03d22bSJeff Roberson 	KASSERT(bucket->ub_cnt == 0,
424fc03d22bSJeff Roberson 	    ("bucket_free: Freeing a non free bucket."));
4256fd34d6fSJeff Roberson 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
4266fd34d6fSJeff Roberson 		udata = (void *)(uintptr_t)zone->uz_flags;
427dc2c7965SRobert Watson 	ubz = bucket_zone_lookup(bucket->ub_entries);
4286fd34d6fSJeff Roberson 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
429cae33c14SJeff Roberson }
430cae33c14SJeff Roberson 
431cae33c14SJeff Roberson static void
432cae33c14SJeff Roberson bucket_zone_drain(void)
433cae33c14SJeff Roberson {
434cae33c14SJeff Roberson 	struct uma_bucket_zone *ubz;
435cae33c14SJeff Roberson 
436cae33c14SJeff Roberson 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
437cae33c14SJeff Roberson 		zone_drain(ubz->ubz_zone);
438cae33c14SJeff Roberson }
439cae33c14SJeff Roberson 
4402f891cd5SPawel Jakub Dawidek static void
4412f891cd5SPawel Jakub Dawidek zone_log_warning(uma_zone_t zone)
4422f891cd5SPawel Jakub Dawidek {
4432f891cd5SPawel Jakub Dawidek 	static const struct timeval warninterval = { 300, 0 };
4442f891cd5SPawel Jakub Dawidek 
4452f891cd5SPawel Jakub Dawidek 	if (!zone_warnings || zone->uz_warning == NULL)
4462f891cd5SPawel Jakub Dawidek 		return;
4472f891cd5SPawel Jakub Dawidek 
4482f891cd5SPawel Jakub Dawidek 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
4492f891cd5SPawel Jakub Dawidek 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
4502f891cd5SPawel Jakub Dawidek }
4512f891cd5SPawel Jakub Dawidek 
45254503a13SJonathan T. Looney static inline void
45354503a13SJonathan T. Looney zone_maxaction(uma_zone_t zone)
45454503a13SJonathan T. Looney {
455e60b2fcbSGleb Smirnoff 
456e60b2fcbSGleb Smirnoff 	if (zone->uz_maxaction.ta_func != NULL)
457e60b2fcbSGleb Smirnoff 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
45854503a13SJonathan T. Looney }
45954503a13SJonathan T. Looney 
460e20a199fSJeff Roberson static void
461e20a199fSJeff Roberson zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
462e20a199fSJeff Roberson {
463e20a199fSJeff Roberson 	uma_klink_t klink;
464e20a199fSJeff Roberson 
465e20a199fSJeff Roberson 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
466e20a199fSJeff Roberson 		kegfn(klink->kl_keg);
467e20a199fSJeff Roberson }
4688355f576SJeff Roberson 
4698355f576SJeff Roberson /*
4708355f576SJeff Roberson  * Routine called by timeout which is used to fire off some time interval
4719643769aSJeff Roberson  * based calculations.  (stats, hash size, etc.)
4728355f576SJeff Roberson  *
4738355f576SJeff Roberson  * Arguments:
4748355f576SJeff Roberson  *	arg   Unused
4758355f576SJeff Roberson  *
4768355f576SJeff Roberson  * Returns:
4778355f576SJeff Roberson  *	Nothing
4788355f576SJeff Roberson  */
4798355f576SJeff Roberson static void
4808355f576SJeff Roberson uma_timeout(void *unused)
4818355f576SJeff Roberson {
48286bbae32SJeff Roberson 	bucket_enable();
4838355f576SJeff Roberson 	zone_foreach(zone_timeout);
4848355f576SJeff Roberson 
4858355f576SJeff Roberson 	/* Reschedule this event */
4869643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
4878355f576SJeff Roberson }
4888355f576SJeff Roberson 
4898355f576SJeff Roberson /*
4909643769aSJeff Roberson  * Routine to perform timeout driven calculations.  This expands the
4919643769aSJeff Roberson  * hashes and does per cpu statistics aggregation.
4928355f576SJeff Roberson  *
493e20a199fSJeff Roberson  *  Returns nothing.
4948355f576SJeff Roberson  */
4958355f576SJeff Roberson static void
496e20a199fSJeff Roberson keg_timeout(uma_keg_t keg)
4978355f576SJeff Roberson {
4988355f576SJeff Roberson 
499e20a199fSJeff Roberson 	KEG_LOCK(keg);
5008355f576SJeff Roberson 	/*
501e20a199fSJeff Roberson 	 * Expand the keg hash table.
5028355f576SJeff Roberson 	 *
5038355f576SJeff Roberson 	 * This is done if the number of slabs is larger than the hash size.
5048355f576SJeff Roberson 	 * What I'm trying to do here is completely reduce collisions.  This
5058355f576SJeff Roberson 	 * may be a little aggressive.  Should I allow for two collisions max?
5068355f576SJeff Roberson 	 */
507099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH &&
508099a0e58SBosko Milekic 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
5090aef6126SJeff Roberson 		struct uma_hash newhash;
5100aef6126SJeff Roberson 		struct uma_hash oldhash;
5110aef6126SJeff Roberson 		int ret;
5125300d9ddSJeff Roberson 
5130aef6126SJeff Roberson 		/*
5140aef6126SJeff Roberson 		 * This is so involved because allocating and freeing
515e20a199fSJeff Roberson 		 * while the keg lock is held will lead to deadlock.
5160aef6126SJeff Roberson 		 * I have to do everything in stages and check for
5170aef6126SJeff Roberson 		 * races.
5180aef6126SJeff Roberson 		 */
519099a0e58SBosko Milekic 		newhash = keg->uk_hash;
520e20a199fSJeff Roberson 		KEG_UNLOCK(keg);
5210aef6126SJeff Roberson 		ret = hash_alloc(&newhash);
522e20a199fSJeff Roberson 		KEG_LOCK(keg);
5230aef6126SJeff Roberson 		if (ret) {
524099a0e58SBosko Milekic 			if (hash_expand(&keg->uk_hash, &newhash)) {
525099a0e58SBosko Milekic 				oldhash = keg->uk_hash;
526099a0e58SBosko Milekic 				keg->uk_hash = newhash;
5270aef6126SJeff Roberson 			} else
5280aef6126SJeff Roberson 				oldhash = newhash;
5290aef6126SJeff Roberson 
530e20a199fSJeff Roberson 			KEG_UNLOCK(keg);
5310aef6126SJeff Roberson 			hash_free(&oldhash);
532a1dff920SDavide Italiano 			return;
5330aef6126SJeff Roberson 		}
5345300d9ddSJeff Roberson 	}
535e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
536e20a199fSJeff Roberson }
537e20a199fSJeff Roberson 
538e20a199fSJeff Roberson static void
539e20a199fSJeff Roberson zone_timeout(uma_zone_t zone)
540e20a199fSJeff Roberson {
541e20a199fSJeff Roberson 
542e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_timeout);
5438355f576SJeff Roberson }
5448355f576SJeff Roberson 
5458355f576SJeff Roberson /*
5465300d9ddSJeff Roberson  * Allocate and zero fill the next sized hash table from the appropriate
5475300d9ddSJeff Roberson  * backing store.
5485300d9ddSJeff Roberson  *
5495300d9ddSJeff Roberson  * Arguments:
5500aef6126SJeff Roberson  *	hash  A new hash structure with the old hash size in uh_hashsize
5515300d9ddSJeff Roberson  *
5525300d9ddSJeff Roberson  * Returns:
553763df3ecSPedro F. Giffuni  *	1 on success and 0 on failure.
5545300d9ddSJeff Roberson  */
55537c84183SPoul-Henning Kamp static int
5560aef6126SJeff Roberson hash_alloc(struct uma_hash *hash)
5575300d9ddSJeff Roberson {
5580aef6126SJeff Roberson 	int oldsize;
5595300d9ddSJeff Roberson 	int alloc;
5605300d9ddSJeff Roberson 
5610aef6126SJeff Roberson 	oldsize = hash->uh_hashsize;
5620aef6126SJeff Roberson 
5635300d9ddSJeff Roberson 	/* We're just going to go to a power of two greater */
5640aef6126SJeff Roberson 	if (oldsize)  {
5650aef6126SJeff Roberson 		hash->uh_hashsize = oldsize * 2;
5660aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
5670aef6126SJeff Roberson 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
568961647dfSJeff Roberson 		    M_UMAHASH, M_NOWAIT);
5695300d9ddSJeff Roberson 	} else {
5700aef6126SJeff Roberson 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
571e20a199fSJeff Roberson 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
572a163d034SWarner Losh 		    M_WAITOK);
5730aef6126SJeff Roberson 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
5745300d9ddSJeff Roberson 	}
5750aef6126SJeff Roberson 	if (hash->uh_slab_hash) {
5760aef6126SJeff Roberson 		bzero(hash->uh_slab_hash, alloc);
5770aef6126SJeff Roberson 		hash->uh_hashmask = hash->uh_hashsize - 1;
5780aef6126SJeff Roberson 		return (1);
5790aef6126SJeff Roberson 	}
5805300d9ddSJeff Roberson 
5810aef6126SJeff Roberson 	return (0);
5825300d9ddSJeff Roberson }
5835300d9ddSJeff Roberson 
5845300d9ddSJeff Roberson /*
58564f051e9SJeff Roberson  * Expands the hash table for HASH zones.  This is done from zone_timeout
58664f051e9SJeff Roberson  * to reduce collisions.  This must not be done in the regular allocation
58764f051e9SJeff Roberson  * path, otherwise, we can recurse on the vm while allocating pages.
5888355f576SJeff Roberson  *
5898355f576SJeff Roberson  * Arguments:
5900aef6126SJeff Roberson  *	oldhash  The hash you want to expand
5910aef6126SJeff Roberson  *	newhash  The hash structure for the new table
5928355f576SJeff Roberson  *
5938355f576SJeff Roberson  * Returns:
5948355f576SJeff Roberson  *	Nothing
5958355f576SJeff Roberson  *
5968355f576SJeff Roberson  * Discussion:
5978355f576SJeff Roberson  */
5980aef6126SJeff Roberson static int
5990aef6126SJeff Roberson hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
6008355f576SJeff Roberson {
6018355f576SJeff Roberson 	uma_slab_t slab;
6028355f576SJeff Roberson 	int hval;
6038355f576SJeff Roberson 	int i;
6048355f576SJeff Roberson 
6050aef6126SJeff Roberson 	if (!newhash->uh_slab_hash)
6060aef6126SJeff Roberson 		return (0);
6078355f576SJeff Roberson 
6080aef6126SJeff Roberson 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
6090aef6126SJeff Roberson 		return (0);
6108355f576SJeff Roberson 
6118355f576SJeff Roberson 	/*
6128355f576SJeff Roberson 	 * I need to investigate hash algorithms for resizing without a
6138355f576SJeff Roberson 	 * full rehash.
6148355f576SJeff Roberson 	 */
6158355f576SJeff Roberson 
6160aef6126SJeff Roberson 	for (i = 0; i < oldhash->uh_hashsize; i++)
6170aef6126SJeff Roberson 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
6180aef6126SJeff Roberson 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
6190aef6126SJeff Roberson 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
6200aef6126SJeff Roberson 			hval = UMA_HASH(newhash, slab->us_data);
6210aef6126SJeff Roberson 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
6220aef6126SJeff Roberson 			    slab, us_hlink);
6238355f576SJeff Roberson 		}
6248355f576SJeff Roberson 
6250aef6126SJeff Roberson 	return (1);
6269c2cd7e5SJeff Roberson }
6279c2cd7e5SJeff Roberson 
6285300d9ddSJeff Roberson /*
6295300d9ddSJeff Roberson  * Free the hash bucket to the appropriate backing store.
6305300d9ddSJeff Roberson  *
6315300d9ddSJeff Roberson  * Arguments:
6325300d9ddSJeff Roberson  *	slab_hash  The hash bucket we're freeing
6335300d9ddSJeff Roberson  *	hashsize   The number of entries in that hash bucket
6345300d9ddSJeff Roberson  *
6355300d9ddSJeff Roberson  * Returns:
6365300d9ddSJeff Roberson  *	Nothing
6375300d9ddSJeff Roberson  */
6389c2cd7e5SJeff Roberson static void
6390aef6126SJeff Roberson hash_free(struct uma_hash *hash)
6409c2cd7e5SJeff Roberson {
6410aef6126SJeff Roberson 	if (hash->uh_slab_hash == NULL)
6420aef6126SJeff Roberson 		return;
6430aef6126SJeff Roberson 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
6440095a784SJeff Roberson 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
6458355f576SJeff Roberson 	else
646961647dfSJeff Roberson 		free(hash->uh_slab_hash, M_UMAHASH);
6478355f576SJeff Roberson }
6488355f576SJeff Roberson 
6498355f576SJeff Roberson /*
6508355f576SJeff Roberson  * Frees all outstanding items in a bucket
6518355f576SJeff Roberson  *
6528355f576SJeff Roberson  * Arguments:
6538355f576SJeff Roberson  *	zone   The zone to free to, must be unlocked.
6548355f576SJeff Roberson  *	bucket The free/alloc bucket with items, cpu queue must be locked.
6558355f576SJeff Roberson  *
6568355f576SJeff Roberson  * Returns:
6578355f576SJeff Roberson  *	Nothing
6588355f576SJeff Roberson  */
6598355f576SJeff Roberson 
6608355f576SJeff Roberson static void
6618355f576SJeff Roberson bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
6628355f576SJeff Roberson {
6630095a784SJeff Roberson 	int i;
6648355f576SJeff Roberson 
6658355f576SJeff Roberson 	if (bucket == NULL)
6668355f576SJeff Roberson 		return;
6678355f576SJeff Roberson 
6680095a784SJeff Roberson 	if (zone->uz_fini)
6690095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
6700095a784SJeff Roberson 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
6710095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
6720095a784SJeff Roberson 	bucket->ub_cnt = 0;
6738355f576SJeff Roberson }
6748355f576SJeff Roberson 
6758355f576SJeff Roberson /*
6768355f576SJeff Roberson  * Drains the per cpu caches for a zone.
6778355f576SJeff Roberson  *
6785d1ae027SRobert Watson  * NOTE: This may only be called while the zone is being turn down, and not
6795d1ae027SRobert Watson  * during normal operation.  This is necessary in order that we do not have
6805d1ae027SRobert Watson  * to migrate CPUs to drain the per-CPU caches.
6815d1ae027SRobert Watson  *
6828355f576SJeff Roberson  * Arguments:
6838355f576SJeff Roberson  *	zone     The zone to drain, must be unlocked.
6848355f576SJeff Roberson  *
6858355f576SJeff Roberson  * Returns:
6868355f576SJeff Roberson  *	Nothing
6878355f576SJeff Roberson  */
6888355f576SJeff Roberson static void
6899643769aSJeff Roberson cache_drain(uma_zone_t zone)
6908355f576SJeff Roberson {
6918355f576SJeff Roberson 	uma_cache_t cache;
6928355f576SJeff Roberson 	int cpu;
6938355f576SJeff Roberson 
6948355f576SJeff Roberson 	/*
6955d1ae027SRobert Watson 	 * XXX: It is safe to not lock the per-CPU caches, because we're
6965d1ae027SRobert Watson 	 * tearing down the zone anyway.  I.e., there will be no further use
6975d1ae027SRobert Watson 	 * of the caches at this point.
6985d1ae027SRobert Watson 	 *
6995d1ae027SRobert Watson 	 * XXX: It would good to be able to assert that the zone is being
7005d1ae027SRobert Watson 	 * torn down to prevent improper use of cache_drain().
7015d1ae027SRobert Watson 	 *
7025d1ae027SRobert Watson 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
7035d1ae027SRobert Watson 	 * it is used elsewhere.  Should the tear-down path be made special
7045d1ae027SRobert Watson 	 * there in some form?
7058355f576SJeff Roberson 	 */
7063aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
7078355f576SJeff Roberson 		cache = &zone->uz_cpu[cpu];
7088355f576SJeff Roberson 		bucket_drain(zone, cache->uc_allocbucket);
7098355f576SJeff Roberson 		bucket_drain(zone, cache->uc_freebucket);
710174ab450SBosko Milekic 		if (cache->uc_allocbucket != NULL)
7116fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_allocbucket, NULL);
712174ab450SBosko Milekic 		if (cache->uc_freebucket != NULL)
7136fd34d6fSJeff Roberson 			bucket_free(zone, cache->uc_freebucket, NULL);
714d56368d7SBosko Milekic 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
715d56368d7SBosko Milekic 	}
716aaa8bb16SJeff Roberson 	ZONE_LOCK(zone);
717aaa8bb16SJeff Roberson 	bucket_cache_drain(zone);
718aaa8bb16SJeff Roberson 	ZONE_UNLOCK(zone);
719aaa8bb16SJeff Roberson }
720aaa8bb16SJeff Roberson 
721a2de44abSAlexander Motin static void
722a2de44abSAlexander Motin cache_shrink(uma_zone_t zone)
723a2de44abSAlexander Motin {
724a2de44abSAlexander Motin 
725a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
726a2de44abSAlexander Motin 		return;
727a2de44abSAlexander Motin 
728a2de44abSAlexander Motin 	ZONE_LOCK(zone);
729a2de44abSAlexander Motin 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
730a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
731a2de44abSAlexander Motin }
732a2de44abSAlexander Motin 
733a2de44abSAlexander Motin static void
734a2de44abSAlexander Motin cache_drain_safe_cpu(uma_zone_t zone)
735a2de44abSAlexander Motin {
736a2de44abSAlexander Motin 	uma_cache_t cache;
7378a8d9d14SAlexander Motin 	uma_bucket_t b1, b2;
738a2de44abSAlexander Motin 
739a2de44abSAlexander Motin 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
740a2de44abSAlexander Motin 		return;
741a2de44abSAlexander Motin 
7428a8d9d14SAlexander Motin 	b1 = b2 = NULL;
743a2de44abSAlexander Motin 	ZONE_LOCK(zone);
744a2de44abSAlexander Motin 	critical_enter();
745a2de44abSAlexander Motin 	cache = &zone->uz_cpu[curcpu];
746a2de44abSAlexander Motin 	if (cache->uc_allocbucket) {
7478a8d9d14SAlexander Motin 		if (cache->uc_allocbucket->ub_cnt != 0)
7488a8d9d14SAlexander Motin 			LIST_INSERT_HEAD(&zone->uz_buckets,
7498a8d9d14SAlexander Motin 			    cache->uc_allocbucket, ub_link);
7508a8d9d14SAlexander Motin 		else
7518a8d9d14SAlexander Motin 			b1 = cache->uc_allocbucket;
752a2de44abSAlexander Motin 		cache->uc_allocbucket = NULL;
753a2de44abSAlexander Motin 	}
754a2de44abSAlexander Motin 	if (cache->uc_freebucket) {
7558a8d9d14SAlexander Motin 		if (cache->uc_freebucket->ub_cnt != 0)
7568a8d9d14SAlexander Motin 			LIST_INSERT_HEAD(&zone->uz_buckets,
7578a8d9d14SAlexander Motin 			    cache->uc_freebucket, ub_link);
7588a8d9d14SAlexander Motin 		else
7598a8d9d14SAlexander Motin 			b2 = cache->uc_freebucket;
760a2de44abSAlexander Motin 		cache->uc_freebucket = NULL;
761a2de44abSAlexander Motin 	}
762a2de44abSAlexander Motin 	critical_exit();
763a2de44abSAlexander Motin 	ZONE_UNLOCK(zone);
7648a8d9d14SAlexander Motin 	if (b1)
7658a8d9d14SAlexander Motin 		bucket_free(zone, b1, NULL);
7668a8d9d14SAlexander Motin 	if (b2)
7678a8d9d14SAlexander Motin 		bucket_free(zone, b2, NULL);
768a2de44abSAlexander Motin }
769a2de44abSAlexander Motin 
770a2de44abSAlexander Motin /*
771a2de44abSAlexander Motin  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
772a2de44abSAlexander Motin  * This is an expensive call because it needs to bind to all CPUs
773a2de44abSAlexander Motin  * one by one and enter a critical section on each of them in order
774a2de44abSAlexander Motin  * to safely access their cache buckets.
775a2de44abSAlexander Motin  * Zone lock must not be held on call this function.
776a2de44abSAlexander Motin  */
777a2de44abSAlexander Motin static void
778a2de44abSAlexander Motin cache_drain_safe(uma_zone_t zone)
779a2de44abSAlexander Motin {
780a2de44abSAlexander Motin 	int cpu;
781a2de44abSAlexander Motin 
782a2de44abSAlexander Motin 	/*
783a2de44abSAlexander Motin 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
784a2de44abSAlexander Motin 	 */
785a2de44abSAlexander Motin 	if (zone)
786a2de44abSAlexander Motin 		cache_shrink(zone);
787a2de44abSAlexander Motin 	else
788a2de44abSAlexander Motin 		zone_foreach(cache_shrink);
789a2de44abSAlexander Motin 
790a2de44abSAlexander Motin 	CPU_FOREACH(cpu) {
791a2de44abSAlexander Motin 		thread_lock(curthread);
792a2de44abSAlexander Motin 		sched_bind(curthread, cpu);
793a2de44abSAlexander Motin 		thread_unlock(curthread);
794a2de44abSAlexander Motin 
795a2de44abSAlexander Motin 		if (zone)
796a2de44abSAlexander Motin 			cache_drain_safe_cpu(zone);
797a2de44abSAlexander Motin 		else
798a2de44abSAlexander Motin 			zone_foreach(cache_drain_safe_cpu);
799a2de44abSAlexander Motin 	}
800a2de44abSAlexander Motin 	thread_lock(curthread);
801a2de44abSAlexander Motin 	sched_unbind(curthread);
802a2de44abSAlexander Motin 	thread_unlock(curthread);
803a2de44abSAlexander Motin }
804a2de44abSAlexander Motin 
805aaa8bb16SJeff Roberson /*
806aaa8bb16SJeff Roberson  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
807aaa8bb16SJeff Roberson  */
808aaa8bb16SJeff Roberson static void
809aaa8bb16SJeff Roberson bucket_cache_drain(uma_zone_t zone)
810aaa8bb16SJeff Roberson {
811aaa8bb16SJeff Roberson 	uma_bucket_t bucket;
8128355f576SJeff Roberson 
8138355f576SJeff Roberson 	/*
8148355f576SJeff Roberson 	 * Drain the bucket queues and free the buckets, we just keep two per
8158355f576SJeff Roberson 	 * cpu (alloc/free).
8168355f576SJeff Roberson 	 */
817fc03d22bSJeff Roberson 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
8188355f576SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
8198355f576SJeff Roberson 		ZONE_UNLOCK(zone);
8208355f576SJeff Roberson 		bucket_drain(zone, bucket);
8216fd34d6fSJeff Roberson 		bucket_free(zone, bucket, NULL);
8228355f576SJeff Roberson 		ZONE_LOCK(zone);
8238355f576SJeff Roberson 	}
824ace66b56SAlexander Motin 
825ace66b56SAlexander Motin 	/*
826ace66b56SAlexander Motin 	 * Shrink further bucket sizes.  Price of single zone lock collision
827ace66b56SAlexander Motin 	 * is probably lower then price of global cache drain.
828ace66b56SAlexander Motin 	 */
829ace66b56SAlexander Motin 	if (zone->uz_count > zone->uz_count_min)
830ace66b56SAlexander Motin 		zone->uz_count--;
8318355f576SJeff Roberson }
832fc03d22bSJeff Roberson 
833fc03d22bSJeff Roberson static void
834fc03d22bSJeff Roberson keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
835fc03d22bSJeff Roberson {
836fc03d22bSJeff Roberson 	uint8_t *mem;
837fc03d22bSJeff Roberson 	int i;
838fc03d22bSJeff Roberson 	uint8_t flags;
839fc03d22bSJeff Roberson 
8401431a748SGleb Smirnoff 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
8411431a748SGleb Smirnoff 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
8421431a748SGleb Smirnoff 
843fc03d22bSJeff Roberson 	mem = slab->us_data;
844fc03d22bSJeff Roberson 	flags = slab->us_flags;
845fc03d22bSJeff Roberson 	i = start;
846fc03d22bSJeff Roberson 	if (keg->uk_fini != NULL) {
847fc03d22bSJeff Roberson 		for (i--; i > -1; i--)
848fc03d22bSJeff Roberson 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
849fc03d22bSJeff Roberson 			    keg->uk_size);
850fc03d22bSJeff Roberson 	}
851fc03d22bSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
852fc03d22bSJeff Roberson 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
853fc03d22bSJeff Roberson 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
8542e47807cSJeff Roberson 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
8558355f576SJeff Roberson }
8568355f576SJeff Roberson 
8578355f576SJeff Roberson /*
858e20a199fSJeff Roberson  * Frees pages from a keg back to the system.  This is done on demand from
8598355f576SJeff Roberson  * the pageout daemon.
8608355f576SJeff Roberson  *
861e20a199fSJeff Roberson  * Returns nothing.
8628355f576SJeff Roberson  */
863e20a199fSJeff Roberson static void
864e20a199fSJeff Roberson keg_drain(uma_keg_t keg)
8658355f576SJeff Roberson {
8661e183df2SStefan Farfeleder 	struct slabhead freeslabs = { 0 };
867829be516SMark Johnston 	uma_slab_t slab, tmp;
8688355f576SJeff Roberson 
8698355f576SJeff Roberson 	/*
870e20a199fSJeff Roberson 	 * We don't want to take pages from statically allocated kegs at this
8718355f576SJeff Roberson 	 * time
8728355f576SJeff Roberson 	 */
873099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
8748355f576SJeff Roberson 		return;
8758355f576SJeff Roberson 
8761431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
8771431a748SGleb Smirnoff 	    keg->uk_name, keg, keg->uk_free);
878e20a199fSJeff Roberson 	KEG_LOCK(keg);
879099a0e58SBosko Milekic 	if (keg->uk_free == 0)
8808355f576SJeff Roberson 		goto finished;
8818355f576SJeff Roberson 
882829be516SMark Johnston 	LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
883829be516SMark Johnston 		/* We have nowhere to free these to. */
884829be516SMark Johnston 		if (slab->us_flags & UMA_SLAB_BOOT)
8858355f576SJeff Roberson 			continue;
8868355f576SJeff Roberson 
8878355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
888099a0e58SBosko Milekic 		keg->uk_pages -= keg->uk_ppera;
889099a0e58SBosko Milekic 		keg->uk_free -= keg->uk_ipers;
890713deb36SJeff Roberson 
891099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZONE_HASH)
892099a0e58SBosko Milekic 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
893713deb36SJeff Roberson 
894713deb36SJeff Roberson 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
895713deb36SJeff Roberson 	}
896713deb36SJeff Roberson finished:
897e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
898713deb36SJeff Roberson 
899713deb36SJeff Roberson 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
900713deb36SJeff Roberson 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
9011645995bSKirk McKusick 		keg_free_slab(keg, slab, keg->uk_ipers);
9028355f576SJeff Roberson 	}
9038355f576SJeff Roberson }
9048355f576SJeff Roberson 
905e20a199fSJeff Roberson static void
906e20a199fSJeff Roberson zone_drain_wait(uma_zone_t zone, int waitok)
907e20a199fSJeff Roberson {
908e20a199fSJeff Roberson 
9098355f576SJeff Roberson 	/*
910e20a199fSJeff Roberson 	 * Set draining to interlock with zone_dtor() so we can release our
911e20a199fSJeff Roberson 	 * locks as we go.  Only dtor() should do a WAITOK call since it
912e20a199fSJeff Roberson 	 * is the only call that knows the structure will still be available
913e20a199fSJeff Roberson 	 * when it wakes up.
914e20a199fSJeff Roberson 	 */
915e20a199fSJeff Roberson 	ZONE_LOCK(zone);
916e20a199fSJeff Roberson 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
917e20a199fSJeff Roberson 		if (waitok == M_NOWAIT)
918e20a199fSJeff Roberson 			goto out;
919af526374SJeff Roberson 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
920e20a199fSJeff Roberson 	}
921e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
922e20a199fSJeff Roberson 	bucket_cache_drain(zone);
923e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
924e20a199fSJeff Roberson 	/*
925e20a199fSJeff Roberson 	 * The DRAINING flag protects us from being freed while
926111fbcd5SBryan Venteicher 	 * we're running.  Normally the uma_rwlock would protect us but we
927e20a199fSJeff Roberson 	 * must be able to release and acquire the right lock for each keg.
928e20a199fSJeff Roberson 	 */
929e20a199fSJeff Roberson 	zone_foreach_keg(zone, &keg_drain);
930e20a199fSJeff Roberson 	ZONE_LOCK(zone);
931e20a199fSJeff Roberson 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
932e20a199fSJeff Roberson 	wakeup(zone);
933e20a199fSJeff Roberson out:
934e20a199fSJeff Roberson 	ZONE_UNLOCK(zone);
935e20a199fSJeff Roberson }
936e20a199fSJeff Roberson 
937e20a199fSJeff Roberson void
938e20a199fSJeff Roberson zone_drain(uma_zone_t zone)
939e20a199fSJeff Roberson {
940e20a199fSJeff Roberson 
941e20a199fSJeff Roberson 	zone_drain_wait(zone, M_NOWAIT);
942e20a199fSJeff Roberson }
943e20a199fSJeff Roberson 
944e20a199fSJeff Roberson /*
945e20a199fSJeff Roberson  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
9468355f576SJeff Roberson  *
9478355f576SJeff Roberson  * Arguments:
9488355f576SJeff Roberson  *	wait  Shall we wait?
9498355f576SJeff Roberson  *
9508355f576SJeff Roberson  * Returns:
9518355f576SJeff Roberson  *	The slab that was allocated or NULL if there is no memory and the
9528355f576SJeff Roberson  *	caller specified M_NOWAIT.
9538355f576SJeff Roberson  */
9548355f576SJeff Roberson static uma_slab_t
955e20a199fSJeff Roberson keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
9568355f576SJeff Roberson {
957e20a199fSJeff Roberson 	uma_alloc allocf;
958099a0e58SBosko Milekic 	uma_slab_t slab;
9592e47807cSJeff Roberson 	unsigned long size;
96085dcf349SGleb Smirnoff 	uint8_t *mem;
96185dcf349SGleb Smirnoff 	uint8_t flags;
9628355f576SJeff Roberson 	int i;
9638355f576SJeff Roberson 
964e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
965a553d4b8SJeff Roberson 	slab = NULL;
966fc03d22bSJeff Roberson 	mem = NULL;
967a553d4b8SJeff Roberson 
968e20a199fSJeff Roberson 	allocf = keg->uk_allocf;
969e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
9702e47807cSJeff Roberson 	size = keg->uk_ppera * PAGE_SIZE;
971a553d4b8SJeff Roberson 
972099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
973e20a199fSJeff Roberson 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
974fc03d22bSJeff Roberson 		if (slab == NULL)
975fc03d22bSJeff Roberson 			goto out;
976a553d4b8SJeff Roberson 	}
977a553d4b8SJeff Roberson 
9783370c5bfSJeff Roberson 	/*
9793370c5bfSJeff Roberson 	 * This reproduces the old vm_zone behavior of zero filling pages the
9803370c5bfSJeff Roberson 	 * first time they are added to a zone.
9813370c5bfSJeff Roberson 	 *
9823370c5bfSJeff Roberson 	 * Malloced items are zeroed in uma_zalloc.
9833370c5bfSJeff Roberson 	 */
9843370c5bfSJeff Roberson 
985099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
9863370c5bfSJeff Roberson 		wait |= M_ZERO;
9873370c5bfSJeff Roberson 	else
9883370c5bfSJeff Roberson 		wait &= ~M_ZERO;
9893370c5bfSJeff Roberson 
990263811f7SKip Macy 	if (keg->uk_flags & UMA_ZONE_NODUMP)
991263811f7SKip Macy 		wait |= M_NODUMP;
992263811f7SKip Macy 
993e20a199fSJeff Roberson 	/* zone is passed for legacy reasons. */
9942e47807cSJeff Roberson 	mem = allocf(zone, size, &flags, wait);
995a553d4b8SJeff Roberson 	if (mem == NULL) {
996b23f72e9SBrian Feldman 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
9970095a784SJeff Roberson 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
998fc03d22bSJeff Roberson 		slab = NULL;
999fc03d22bSJeff Roberson 		goto out;
1000a553d4b8SJeff Roberson 	}
10012e47807cSJeff Roberson 	uma_total_inc(size);
10028355f576SJeff Roberson 
10035c0e403bSJeff Roberson 	/* Point the slab into the allocated memory */
1004099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1005099a0e58SBosko Milekic 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
10065c0e403bSJeff Roberson 
1007e20a199fSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1008099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ppera; i++)
100999571dc3SJeff Roberson 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
10108355f576SJeff Roberson 
1011099a0e58SBosko Milekic 	slab->us_keg = keg;
10128355f576SJeff Roberson 	slab->us_data = mem;
1013099a0e58SBosko Milekic 	slab->us_freecount = keg->uk_ipers;
10148355f576SJeff Roberson 	slab->us_flags = flags;
1015ef72505eSJeff Roberson 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1016ef72505eSJeff Roberson #ifdef INVARIANTS
1017ef72505eSJeff Roberson 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1018ef72505eSJeff Roberson #endif
1019099a0e58SBosko Milekic 
1020b23f72e9SBrian Feldman 	if (keg->uk_init != NULL) {
1021099a0e58SBosko Milekic 		for (i = 0; i < keg->uk_ipers; i++)
1022b23f72e9SBrian Feldman 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1023b23f72e9SBrian Feldman 			    keg->uk_size, wait) != 0)
1024b23f72e9SBrian Feldman 				break;
1025b23f72e9SBrian Feldman 		if (i != keg->uk_ipers) {
1026fc03d22bSJeff Roberson 			keg_free_slab(keg, slab, i);
1027fc03d22bSJeff Roberson 			slab = NULL;
1028fc03d22bSJeff Roberson 			goto out;
1029b23f72e9SBrian Feldman 		}
1030b23f72e9SBrian Feldman 	}
1031fc03d22bSJeff Roberson out:
1032e20a199fSJeff Roberson 	KEG_LOCK(keg);
10335c0e403bSJeff Roberson 
10341431a748SGleb Smirnoff 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
10351431a748SGleb Smirnoff 	    slab, keg->uk_name, keg);
10361431a748SGleb Smirnoff 
1037fc03d22bSJeff Roberson 	if (slab != NULL) {
1038099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZONE_HASH)
1039099a0e58SBosko Milekic 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
10408355f576SJeff Roberson 
1041099a0e58SBosko Milekic 		keg->uk_pages += keg->uk_ppera;
1042099a0e58SBosko Milekic 		keg->uk_free += keg->uk_ipers;
1043fc03d22bSJeff Roberson 	}
10448355f576SJeff Roberson 
10458355f576SJeff Roberson 	return (slab);
10468355f576SJeff Roberson }
10478355f576SJeff Roberson 
10488355f576SJeff Roberson /*
1049009b6fcbSJeff Roberson  * This function is intended to be used early on in place of page_alloc() so
1050009b6fcbSJeff Roberson  * that we may use the boot time page cache to satisfy allocations before
1051009b6fcbSJeff Roberson  * the VM is ready.
1052009b6fcbSJeff Roberson  */
1053009b6fcbSJeff Roberson static void *
1054f2c2231eSRyan Stone startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1055009b6fcbSJeff Roberson {
1056099a0e58SBosko Milekic 	uma_keg_t keg;
1057ac0a6fd0SGleb Smirnoff 	void *mem;
1058ac0a6fd0SGleb Smirnoff 	int pages;
1059099a0e58SBosko Milekic 
1060e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1061e9a069d8SJohn Baldwin 	pages = howmany(bytes, PAGE_SIZE);
1062e9a069d8SJohn Baldwin 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1063099a0e58SBosko Milekic 
1064009b6fcbSJeff Roberson 	/*
1065009b6fcbSJeff Roberson 	 * Check our small startup cache to see if it has pages remaining.
1066009b6fcbSJeff Roberson 	 */
1067f353d338SAlan Cox 	mtx_lock(&uma_boot_pages_mtx);
1068ac0a6fd0SGleb Smirnoff 	if (pages <= boot_pages) {
1069ac0a6fd0SGleb Smirnoff 		mem = bootmem;
1070ac0a6fd0SGleb Smirnoff 		boot_pages -= pages;
1071ac0a6fd0SGleb Smirnoff 		bootmem += pages * PAGE_SIZE;
1072f353d338SAlan Cox 		mtx_unlock(&uma_boot_pages_mtx);
1073ac0a6fd0SGleb Smirnoff 		*pflag = UMA_SLAB_BOOT;
1074ac0a6fd0SGleb Smirnoff 		return (mem);
1075009b6fcbSJeff Roberson 	}
1076f353d338SAlan Cox 	mtx_unlock(&uma_boot_pages_mtx);
1077342f1793SAlan Cox 	if (booted < UMA_STARTUP2)
10783803b26bSDag-Erling Smørgrav 		panic("UMA: Increase vm.boot_pages");
1079009b6fcbSJeff Roberson 	/*
1080009b6fcbSJeff Roberson 	 * Now that we've booted reset these users to their real allocator.
1081009b6fcbSJeff Roberson 	 */
1082009b6fcbSJeff Roberson #ifdef UMA_MD_SMALL_ALLOC
1083e9a069d8SJohn Baldwin 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1084009b6fcbSJeff Roberson #else
1085099a0e58SBosko Milekic 	keg->uk_allocf = page_alloc;
1086009b6fcbSJeff Roberson #endif
1087099a0e58SBosko Milekic 	return keg->uk_allocf(zone, bytes, pflag, wait);
1088009b6fcbSJeff Roberson }
1089009b6fcbSJeff Roberson 
1090009b6fcbSJeff Roberson /*
10918355f576SJeff Roberson  * Allocates a number of pages from the system
10928355f576SJeff Roberson  *
10938355f576SJeff Roberson  * Arguments:
10948355f576SJeff Roberson  *	bytes  The number of bytes requested
10958355f576SJeff Roberson  *	wait  Shall we wait?
10968355f576SJeff Roberson  *
10978355f576SJeff Roberson  * Returns:
10988355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
10998355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
11008355f576SJeff Roberson  */
11018355f576SJeff Roberson static void *
1102f2c2231eSRyan Stone page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
11038355f576SJeff Roberson {
11048355f576SJeff Roberson 	void *p;	/* Returned page */
11058355f576SJeff Roberson 
11062e47807cSJeff Roberson 	*pflag = UMA_SLAB_KERNEL;
11072e47807cSJeff Roberson 	p = (void *) kmem_malloc(kernel_arena, bytes, wait);
11088355f576SJeff Roberson 
11098355f576SJeff Roberson 	return (p);
11108355f576SJeff Roberson }
11118355f576SJeff Roberson 
11128355f576SJeff Roberson /*
11138355f576SJeff Roberson  * Allocates a number of pages from within an object
11148355f576SJeff Roberson  *
11158355f576SJeff Roberson  * Arguments:
11168355f576SJeff Roberson  *	bytes  The number of bytes requested
11178355f576SJeff Roberson  *	wait   Shall we wait?
11188355f576SJeff Roberson  *
11198355f576SJeff Roberson  * Returns:
11208355f576SJeff Roberson  *	A pointer to the alloced memory or possibly
11218355f576SJeff Roberson  *	NULL if M_NOWAIT is set.
11228355f576SJeff Roberson  */
11238355f576SJeff Roberson static void *
1124f2c2231eSRyan Stone noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
11258355f576SJeff Roberson {
1126a4915c21SAttilio Rao 	TAILQ_HEAD(, vm_page) alloctail;
1127a4915c21SAttilio Rao 	u_long npages;
1128b245ac95SAlan Cox 	vm_offset_t retkva, zkva;
1129a4915c21SAttilio Rao 	vm_page_t p, p_next;
1130e20a199fSJeff Roberson 	uma_keg_t keg;
11318355f576SJeff Roberson 
1132a4915c21SAttilio Rao 	TAILQ_INIT(&alloctail);
1133e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
1134a4915c21SAttilio Rao 
1135a4915c21SAttilio Rao 	npages = howmany(bytes, PAGE_SIZE);
1136a4915c21SAttilio Rao 	while (npages > 0) {
1137a4915c21SAttilio Rao 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
11388d6fbbb8SJeff Roberson 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1139772c8b67SKonstantin Belousov 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1140772c8b67SKonstantin Belousov 		    VM_ALLOC_NOWAIT));
1141a4915c21SAttilio Rao 		if (p != NULL) {
1142a4915c21SAttilio Rao 			/*
1143a4915c21SAttilio Rao 			 * Since the page does not belong to an object, its
1144a4915c21SAttilio Rao 			 * listq is unused.
1145a4915c21SAttilio Rao 			 */
1146a4915c21SAttilio Rao 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1147a4915c21SAttilio Rao 			npages--;
1148a4915c21SAttilio Rao 			continue;
1149a4915c21SAttilio Rao 		}
11508355f576SJeff Roberson 		/*
1151a4915c21SAttilio Rao 		 * Page allocation failed, free intermediate pages and
1152a4915c21SAttilio Rao 		 * exit.
11538355f576SJeff Roberson 		 */
1154a4915c21SAttilio Rao 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1155087a6132SAlan Cox 			vm_page_unwire(p, PQ_NONE);
1156b245ac95SAlan Cox 			vm_page_free(p);
1157b245ac95SAlan Cox 		}
1158a4915c21SAttilio Rao 		return (NULL);
1159b245ac95SAlan Cox 	}
11608355f576SJeff Roberson 	*flags = UMA_SLAB_PRIV;
1161a4915c21SAttilio Rao 	zkva = keg->uk_kva +
1162a4915c21SAttilio Rao 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1163a4915c21SAttilio Rao 	retkva = zkva;
1164a4915c21SAttilio Rao 	TAILQ_FOREACH(p, &alloctail, listq) {
1165a4915c21SAttilio Rao 		pmap_qenter(zkva, &p, 1);
1166a4915c21SAttilio Rao 		zkva += PAGE_SIZE;
1167a4915c21SAttilio Rao 	}
11688355f576SJeff Roberson 
11698355f576SJeff Roberson 	return ((void *)retkva);
11708355f576SJeff Roberson }
11718355f576SJeff Roberson 
11728355f576SJeff Roberson /*
11738355f576SJeff Roberson  * Frees a number of pages to the system
11748355f576SJeff Roberson  *
11758355f576SJeff Roberson  * Arguments:
11768355f576SJeff Roberson  *	mem   A pointer to the memory to be freed
11778355f576SJeff Roberson  *	size  The size of the memory being freed
11788355f576SJeff Roberson  *	flags The original p->us_flags field
11798355f576SJeff Roberson  *
11808355f576SJeff Roberson  * Returns:
11818355f576SJeff Roberson  *	Nothing
11828355f576SJeff Roberson  */
11838355f576SJeff Roberson static void
1184f2c2231eSRyan Stone page_free(void *mem, vm_size_t size, uint8_t flags)
11858355f576SJeff Roberson {
11865df87b21SJeff Roberson 	struct vmem *vmem;
11873370c5bfSJeff Roberson 
11882e47807cSJeff Roberson 	if (flags & UMA_SLAB_KERNEL)
11895df87b21SJeff Roberson 		vmem = kernel_arena;
11908355f576SJeff Roberson 	else
1191b5345ef1SJustin Hibbits 		panic("UMA: page_free used with invalid flags %x", flags);
11928355f576SJeff Roberson 
11935df87b21SJeff Roberson 	kmem_free(vmem, (vm_offset_t)mem, size);
11948355f576SJeff Roberson }
11958355f576SJeff Roberson 
11968355f576SJeff Roberson /*
11978355f576SJeff Roberson  * Zero fill initializer
11988355f576SJeff Roberson  *
11998355f576SJeff Roberson  * Arguments/Returns follow uma_init specifications
12008355f576SJeff Roberson  */
1201b23f72e9SBrian Feldman static int
1202b23f72e9SBrian Feldman zero_init(void *mem, int size, int flags)
12038355f576SJeff Roberson {
12048355f576SJeff Roberson 	bzero(mem, size);
1205b23f72e9SBrian Feldman 	return (0);
12068355f576SJeff Roberson }
12078355f576SJeff Roberson 
12088355f576SJeff Roberson /*
1209e20a199fSJeff Roberson  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
12108355f576SJeff Roberson  *
12118355f576SJeff Roberson  * Arguments
1212e20a199fSJeff Roberson  *	keg  The zone we should initialize
12138355f576SJeff Roberson  *
12148355f576SJeff Roberson  * Returns
12158355f576SJeff Roberson  *	Nothing
12168355f576SJeff Roberson  */
12178355f576SJeff Roberson static void
1218e20a199fSJeff Roberson keg_small_init(uma_keg_t keg)
12198355f576SJeff Roberson {
1220244f4554SBosko Milekic 	u_int rsize;
1221244f4554SBosko Milekic 	u_int memused;
1222244f4554SBosko Milekic 	u_int wastedspace;
1223244f4554SBosko Milekic 	u_int shsize;
1224a55ebb7cSAndriy Gapon 	u_int slabsize;
12258355f576SJeff Roberson 
1226ad97af7eSGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_PCPU) {
122796c85efbSNathan Whitehorn 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1228e28a647dSGleb Smirnoff 
1229a55ebb7cSAndriy Gapon 		slabsize = sizeof(struct pcpu);
1230e28a647dSGleb Smirnoff 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1231ad97af7eSGleb Smirnoff 		    PAGE_SIZE);
1232ad97af7eSGleb Smirnoff 	} else {
1233a55ebb7cSAndriy Gapon 		slabsize = UMA_SLAB_SIZE;
1234ad97af7eSGleb Smirnoff 		keg->uk_ppera = 1;
1235ad97af7eSGleb Smirnoff 	}
1236ad97af7eSGleb Smirnoff 
1237ef72505eSJeff Roberson 	/*
1238ef72505eSJeff Roberson 	 * Calculate the size of each allocation (rsize) according to
1239ef72505eSJeff Roberson 	 * alignment.  If the requested size is smaller than we have
1240ef72505eSJeff Roberson 	 * allocation bits for we round it up.
1241ef72505eSJeff Roberson 	 */
1242099a0e58SBosko Milekic 	rsize = keg->uk_size;
1243a55ebb7cSAndriy Gapon 	if (rsize < slabsize / SLAB_SETSIZE)
1244a55ebb7cSAndriy Gapon 		rsize = slabsize / SLAB_SETSIZE;
1245099a0e58SBosko Milekic 	if (rsize & keg->uk_align)
1246099a0e58SBosko Milekic 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1247099a0e58SBosko Milekic 	keg->uk_rsize = rsize;
1248ad97af7eSGleb Smirnoff 
1249ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1250ad97af7eSGleb Smirnoff 	    keg->uk_rsize < sizeof(struct pcpu),
1251ad97af7eSGleb Smirnoff 	    ("%s: size %u too large", __func__, keg->uk_rsize));
12528355f576SJeff Roberson 
1253ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
12542864dbbfSGleb Smirnoff 		shsize = 0;
1255ef72505eSJeff Roberson 	else
1256244f4554SBosko Milekic 		shsize = sizeof(struct uma_slab);
12578355f576SJeff Roberson 
1258a55ebb7cSAndriy Gapon 	keg->uk_ipers = (slabsize - shsize) / rsize;
1259ef72505eSJeff Roberson 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1260ad97af7eSGleb Smirnoff 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1261ad97af7eSGleb Smirnoff 
1262244f4554SBosko Milekic 	memused = keg->uk_ipers * rsize + shsize;
1263a55ebb7cSAndriy Gapon 	wastedspace = slabsize - memused;
1264244f4554SBosko Milekic 
126520e8e865SBosko Milekic 	/*
1266244f4554SBosko Milekic 	 * We can't do OFFPAGE if we're internal or if we've been
126720e8e865SBosko Milekic 	 * asked to not go to the VM for buckets.  If we do this we
12686fd34d6fSJeff Roberson 	 * may end up going to the VM  for slabs which we do not
12696fd34d6fSJeff Roberson 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
12706fd34d6fSJeff Roberson 	 * of UMA_ZONE_VM, which clearly forbids it.
127120e8e865SBosko Milekic 	 */
1272099a0e58SBosko Milekic 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1273099a0e58SBosko Milekic 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
12748355f576SJeff Roberson 		return;
1275244f4554SBosko Milekic 
1276ef72505eSJeff Roberson 	/*
1277ef72505eSJeff Roberson 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1278ef72505eSJeff Roberson 	 * this if it permits more items per-slab.
1279ef72505eSJeff Roberson 	 *
1280ef72505eSJeff Roberson 	 * XXX We could try growing slabsize to limit max waste as well.
1281ef72505eSJeff Roberson 	 * Historically this was not done because the VM could not
1282ef72505eSJeff Roberson 	 * efficiently handle contiguous allocations.
1283ef72505eSJeff Roberson 	 */
1284a55ebb7cSAndriy Gapon 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1285a55ebb7cSAndriy Gapon 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1286a55ebb7cSAndriy Gapon 		keg->uk_ipers = slabsize / keg->uk_rsize;
1287ef72505eSJeff Roberson 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1288ad97af7eSGleb Smirnoff 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
12891431a748SGleb Smirnoff 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
12901431a748SGleb Smirnoff 		    "keg: %s(%p), calculated wastedspace = %d, "
1291244f4554SBosko Milekic 		    "maximum wasted space allowed = %d, "
1292244f4554SBosko Milekic 		    "calculated ipers = %d, "
12931431a748SGleb Smirnoff 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1294a55ebb7cSAndriy Gapon 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1295a55ebb7cSAndriy Gapon 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1296099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
12978355f576SJeff Roberson 	}
1298ad97af7eSGleb Smirnoff 
1299ad97af7eSGleb Smirnoff 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1300ad97af7eSGleb Smirnoff 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1301ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_HASH;
13028355f576SJeff Roberson }
13038355f576SJeff Roberson 
13048355f576SJeff Roberson /*
1305e20a199fSJeff Roberson  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
13068355f576SJeff Roberson  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
13078355f576SJeff Roberson  * more complicated.
13088355f576SJeff Roberson  *
13098355f576SJeff Roberson  * Arguments
1310e20a199fSJeff Roberson  *	keg  The keg we should initialize
13118355f576SJeff Roberson  *
13128355f576SJeff Roberson  * Returns
13138355f576SJeff Roberson  *	Nothing
13148355f576SJeff Roberson  */
13158355f576SJeff Roberson static void
1316e20a199fSJeff Roberson keg_large_init(uma_keg_t keg)
13178355f576SJeff Roberson {
1318cec48e00SAlexander Motin 	u_int shsize;
13198355f576SJeff Roberson 
1320e20a199fSJeff Roberson 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1321099a0e58SBosko Milekic 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1322e20a199fSJeff Roberson 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1323ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1324ad97af7eSGleb Smirnoff 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
132520e8e865SBosko Milekic 
1326ad97af7eSGleb Smirnoff 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1327099a0e58SBosko Milekic 	keg->uk_ipers = 1;
1328e9a069d8SJohn Baldwin 	keg->uk_rsize = keg->uk_size;
1329e9a069d8SJohn Baldwin 
1330cec48e00SAlexander Motin 	/* Check whether we have enough space to not do OFFPAGE. */
1331cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1332cec48e00SAlexander Motin 		shsize = sizeof(struct uma_slab);
1333cec48e00SAlexander Motin 		if (shsize & UMA_ALIGN_PTR)
1334cec48e00SAlexander Motin 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1335cec48e00SAlexander Motin 			    (UMA_ALIGN_PTR + 1);
1336cec48e00SAlexander Motin 
13372934eb8aSMark Johnston 		if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
13382934eb8aSMark Johnston 			/*
13392934eb8aSMark Johnston 			 * We can't do OFFPAGE if we're internal, in which case
13402934eb8aSMark Johnston 			 * we need an extra page per allocation to contain the
13412934eb8aSMark Johnston 			 * slab header.
13422934eb8aSMark Johnston 			 */
13432934eb8aSMark Johnston 			if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1344099a0e58SBosko Milekic 				keg->uk_flags |= UMA_ZONE_OFFPAGE;
13452934eb8aSMark Johnston 			else
13462934eb8aSMark Johnston 				keg->uk_ppera++;
13472934eb8aSMark Johnston 		}
1348cec48e00SAlexander Motin 	}
1349cec48e00SAlexander Motin 
1350cec48e00SAlexander Motin 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1351cec48e00SAlexander Motin 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1352099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZONE_HASH;
13538355f576SJeff Roberson }
13548355f576SJeff Roberson 
1355e20a199fSJeff Roberson static void
1356e20a199fSJeff Roberson keg_cachespread_init(uma_keg_t keg)
1357e20a199fSJeff Roberson {
1358e20a199fSJeff Roberson 	int alignsize;
1359e20a199fSJeff Roberson 	int trailer;
1360e20a199fSJeff Roberson 	int pages;
1361e20a199fSJeff Roberson 	int rsize;
1362e20a199fSJeff Roberson 
1363ad97af7eSGleb Smirnoff 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1364ad97af7eSGleb Smirnoff 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1365ad97af7eSGleb Smirnoff 
1366e20a199fSJeff Roberson 	alignsize = keg->uk_align + 1;
1367e20a199fSJeff Roberson 	rsize = keg->uk_size;
1368e20a199fSJeff Roberson 	/*
1369e20a199fSJeff Roberson 	 * We want one item to start on every align boundary in a page.  To
1370e20a199fSJeff Roberson 	 * do this we will span pages.  We will also extend the item by the
1371e20a199fSJeff Roberson 	 * size of align if it is an even multiple of align.  Otherwise, it
1372e20a199fSJeff Roberson 	 * would fall on the same boundary every time.
1373e20a199fSJeff Roberson 	 */
1374e20a199fSJeff Roberson 	if (rsize & keg->uk_align)
1375e20a199fSJeff Roberson 		rsize = (rsize & ~keg->uk_align) + alignsize;
1376e20a199fSJeff Roberson 	if ((rsize & alignsize) == 0)
1377e20a199fSJeff Roberson 		rsize += alignsize;
1378e20a199fSJeff Roberson 	trailer = rsize - keg->uk_size;
1379e20a199fSJeff Roberson 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1380e20a199fSJeff Roberson 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1381e20a199fSJeff Roberson 	keg->uk_rsize = rsize;
1382e20a199fSJeff Roberson 	keg->uk_ppera = pages;
1383e20a199fSJeff Roberson 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1384e20a199fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
13852367b4ddSDimitry Andric 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
138642321809SGleb Smirnoff 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1387e20a199fSJeff Roberson 	    keg->uk_ipers));
1388e20a199fSJeff Roberson }
1389e20a199fSJeff Roberson 
13908355f576SJeff Roberson /*
1391099a0e58SBosko Milekic  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1392099a0e58SBosko Milekic  * the keg onto the global keg list.
13938355f576SJeff Roberson  *
13948355f576SJeff Roberson  * Arguments/Returns follow uma_ctor specifications
1395099a0e58SBosko Milekic  *	udata  Actually uma_kctor_args
1396099a0e58SBosko Milekic  */
1397b23f72e9SBrian Feldman static int
1398b23f72e9SBrian Feldman keg_ctor(void *mem, int size, void *udata, int flags)
1399099a0e58SBosko Milekic {
1400099a0e58SBosko Milekic 	struct uma_kctor_args *arg = udata;
1401099a0e58SBosko Milekic 	uma_keg_t keg = mem;
1402099a0e58SBosko Milekic 	uma_zone_t zone;
1403099a0e58SBosko Milekic 
1404099a0e58SBosko Milekic 	bzero(keg, size);
1405099a0e58SBosko Milekic 	keg->uk_size = arg->size;
1406099a0e58SBosko Milekic 	keg->uk_init = arg->uminit;
1407099a0e58SBosko Milekic 	keg->uk_fini = arg->fini;
1408099a0e58SBosko Milekic 	keg->uk_align = arg->align;
1409099a0e58SBosko Milekic 	keg->uk_free = 0;
14106fd34d6fSJeff Roberson 	keg->uk_reserve = 0;
1411099a0e58SBosko Milekic 	keg->uk_pages = 0;
1412099a0e58SBosko Milekic 	keg->uk_flags = arg->flags;
1413099a0e58SBosko Milekic 	keg->uk_slabzone = NULL;
1414099a0e58SBosko Milekic 
1415099a0e58SBosko Milekic 	/*
1416099a0e58SBosko Milekic 	 * The master zone is passed to us at keg-creation time.
1417099a0e58SBosko Milekic 	 */
1418099a0e58SBosko Milekic 	zone = arg->zone;
1419e20a199fSJeff Roberson 	keg->uk_name = zone->uz_name;
1420099a0e58SBosko Milekic 
1421099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_VM)
1422099a0e58SBosko Milekic 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1423099a0e58SBosko Milekic 
1424099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_ZINIT)
1425099a0e58SBosko Milekic 		keg->uk_init = zero_init;
1426099a0e58SBosko Milekic 
1427cfcae3f8SGleb Smirnoff 	if (arg->flags & UMA_ZONE_MALLOC)
1428e20a199fSJeff Roberson 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1429e20a199fSJeff Roberson 
1430ad97af7eSGleb Smirnoff 	if (arg->flags & UMA_ZONE_PCPU)
1431ad97af7eSGleb Smirnoff #ifdef SMP
1432ad97af7eSGleb Smirnoff 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1433ad97af7eSGleb Smirnoff #else
1434ad97af7eSGleb Smirnoff 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1435ad97af7eSGleb Smirnoff #endif
1436ad97af7eSGleb Smirnoff 
1437ef72505eSJeff Roberson 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1438e20a199fSJeff Roberson 		keg_cachespread_init(keg);
1439244f4554SBosko Milekic 	} else {
1440ef72505eSJeff Roberson 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1441e20a199fSJeff Roberson 			keg_large_init(keg);
1442244f4554SBosko Milekic 		else
1443e20a199fSJeff Roberson 			keg_small_init(keg);
1444244f4554SBosko Milekic 	}
1445099a0e58SBosko Milekic 
1446cfcae3f8SGleb Smirnoff 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1447099a0e58SBosko Milekic 		keg->uk_slabzone = slabzone;
1448099a0e58SBosko Milekic 
1449099a0e58SBosko Milekic 	/*
1450099a0e58SBosko Milekic 	 * If we haven't booted yet we need allocations to go through the
1451099a0e58SBosko Milekic 	 * startup cache until the vm is ready.
1452099a0e58SBosko Milekic 	 */
14538cd02d00SAlan Cox 	if (booted < UMA_STARTUP2)
14548cd02d00SAlan Cox 		keg->uk_allocf = startup_alloc;
145577e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
145677e19437SGleb Smirnoff 	else if (keg->uk_ppera == 1)
145777e19437SGleb Smirnoff 		keg->uk_allocf = uma_small_alloc;
14588cd02d00SAlan Cox #endif
145977e19437SGleb Smirnoff 	else
146077e19437SGleb Smirnoff 		keg->uk_allocf = page_alloc;
146177e19437SGleb Smirnoff #ifdef UMA_MD_SMALL_ALLOC
146277e19437SGleb Smirnoff 	if (keg->uk_ppera == 1)
146377e19437SGleb Smirnoff 		keg->uk_freef = uma_small_free;
146477e19437SGleb Smirnoff 	else
146577e19437SGleb Smirnoff #endif
146677e19437SGleb Smirnoff 		keg->uk_freef = page_free;
1467099a0e58SBosko Milekic 
1468099a0e58SBosko Milekic 	/*
1469af526374SJeff Roberson 	 * Initialize keg's lock
1470099a0e58SBosko Milekic 	 */
1471af526374SJeff Roberson 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1472099a0e58SBosko Milekic 
1473099a0e58SBosko Milekic 	/*
1474099a0e58SBosko Milekic 	 * If we're putting the slab header in the actual page we need to
1475099a0e58SBosko Milekic 	 * figure out where in each page it goes.  This calculates a right
1476099a0e58SBosko Milekic 	 * justified offset into the memory on an ALIGN_PTR boundary.
1477099a0e58SBosko Milekic 	 */
1478099a0e58SBosko Milekic 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1479244f4554SBosko Milekic 		u_int totsize;
1480099a0e58SBosko Milekic 
1481099a0e58SBosko Milekic 		/* Size of the slab struct and free list */
1482ef72505eSJeff Roberson 		totsize = sizeof(struct uma_slab);
1483ef72505eSJeff Roberson 
1484099a0e58SBosko Milekic 		if (totsize & UMA_ALIGN_PTR)
1485099a0e58SBosko Milekic 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1486099a0e58SBosko Milekic 			    (UMA_ALIGN_PTR + 1);
1487ad97af7eSGleb Smirnoff 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1488244f4554SBosko Milekic 
1489244f4554SBosko Milekic 		/*
1490244f4554SBosko Milekic 		 * The only way the following is possible is if with our
1491244f4554SBosko Milekic 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1492244f4554SBosko Milekic 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1493244f4554SBosko Milekic 		 * mathematically possible for all cases, so we make
1494244f4554SBosko Milekic 		 * sure here anyway.
1495244f4554SBosko Milekic 		 */
1496ef72505eSJeff Roberson 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1497ad97af7eSGleb Smirnoff 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1498099a0e58SBosko Milekic 			printf("zone %s ipers %d rsize %d size %d\n",
1499099a0e58SBosko Milekic 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1500099a0e58SBosko Milekic 			    keg->uk_size);
1501aea6e893SAlan Cox 			panic("UMA slab won't fit.");
1502099a0e58SBosko Milekic 		}
1503099a0e58SBosko Milekic 	}
1504099a0e58SBosko Milekic 
1505099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZONE_HASH)
1506099a0e58SBosko Milekic 		hash_alloc(&keg->uk_hash);
1507099a0e58SBosko Milekic 
15081431a748SGleb Smirnoff 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
15091431a748SGleb Smirnoff 	    keg, zone->uz_name, zone,
151057223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
151157223e99SAndriy Gapon 	    keg->uk_free);
1512099a0e58SBosko Milekic 
1513099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1514099a0e58SBosko Milekic 
1515111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1516099a0e58SBosko Milekic 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1517111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1518b23f72e9SBrian Feldman 	return (0);
1519099a0e58SBosko Milekic }
1520099a0e58SBosko Milekic 
1521099a0e58SBosko Milekic /*
1522099a0e58SBosko Milekic  * Zone header ctor.  This initializes all fields, locks, etc.
1523099a0e58SBosko Milekic  *
1524099a0e58SBosko Milekic  * Arguments/Returns follow uma_ctor specifications
1525099a0e58SBosko Milekic  *	udata  Actually uma_zctor_args
15268355f576SJeff Roberson  */
1527b23f72e9SBrian Feldman static int
1528b23f72e9SBrian Feldman zone_ctor(void *mem, int size, void *udata, int flags)
15298355f576SJeff Roberson {
15308355f576SJeff Roberson 	struct uma_zctor_args *arg = udata;
15318355f576SJeff Roberson 	uma_zone_t zone = mem;
1532099a0e58SBosko Milekic 	uma_zone_t z;
1533099a0e58SBosko Milekic 	uma_keg_t keg;
15348355f576SJeff Roberson 
15358355f576SJeff Roberson 	bzero(zone, size);
15368355f576SJeff Roberson 	zone->uz_name = arg->name;
15378355f576SJeff Roberson 	zone->uz_ctor = arg->ctor;
15388355f576SJeff Roberson 	zone->uz_dtor = arg->dtor;
1539e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab;
1540099a0e58SBosko Milekic 	zone->uz_init = NULL;
1541099a0e58SBosko Milekic 	zone->uz_fini = NULL;
1542099a0e58SBosko Milekic 	zone->uz_allocs = 0;
1543773df9abSRobert Watson 	zone->uz_frees = 0;
15442019094aSRobert Watson 	zone->uz_fails = 0;
1545bf965959SSean Bruno 	zone->uz_sleeps = 0;
1546fc03d22bSJeff Roberson 	zone->uz_count = 0;
1547ace66b56SAlexander Motin 	zone->uz_count_min = 0;
1548e20a199fSJeff Roberson 	zone->uz_flags = 0;
15492f891cd5SPawel Jakub Dawidek 	zone->uz_warning = NULL;
15502f891cd5SPawel Jakub Dawidek 	timevalclear(&zone->uz_ratecheck);
1551e20a199fSJeff Roberson 	keg = arg->keg;
1552099a0e58SBosko Milekic 
1553af526374SJeff Roberson 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1554af526374SJeff Roberson 
15550095a784SJeff Roberson 	/*
15560095a784SJeff Roberson 	 * This is a pure cache zone, no kegs.
15570095a784SJeff Roberson 	 */
15580095a784SJeff Roberson 	if (arg->import) {
15596fd34d6fSJeff Roberson 		if (arg->flags & UMA_ZONE_VM)
15606fd34d6fSJeff Roberson 			arg->flags |= UMA_ZFLAG_CACHEONLY;
15616fd34d6fSJeff Roberson 		zone->uz_flags = arg->flags;
1562af526374SJeff Roberson 		zone->uz_size = arg->size;
15630095a784SJeff Roberson 		zone->uz_import = arg->import;
15640095a784SJeff Roberson 		zone->uz_release = arg->release;
15650095a784SJeff Roberson 		zone->uz_arg = arg->arg;
1566af526374SJeff Roberson 		zone->uz_lockptr = &zone->uz_lock;
1567111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
156803175483SAlexander Motin 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1569111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1570af526374SJeff Roberson 		goto out;
15710095a784SJeff Roberson 	}
15720095a784SJeff Roberson 
15730095a784SJeff Roberson 	/*
15740095a784SJeff Roberson 	 * Use the regular zone/keg/slab allocator.
15750095a784SJeff Roberson 	 */
15760095a784SJeff Roberson 	zone->uz_import = (uma_import)zone_import;
15770095a784SJeff Roberson 	zone->uz_release = (uma_release)zone_release;
15780095a784SJeff Roberson 	zone->uz_arg = zone;
15790095a784SJeff Roberson 
1580099a0e58SBosko Milekic 	if (arg->flags & UMA_ZONE_SECONDARY) {
1581099a0e58SBosko Milekic 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
15828355f576SJeff Roberson 		zone->uz_init = arg->uminit;
1583e221e841SJeff Roberson 		zone->uz_fini = arg->fini;
1584af526374SJeff Roberson 		zone->uz_lockptr = &keg->uk_lock;
1585e20a199fSJeff Roberson 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1586111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1587099a0e58SBosko Milekic 		ZONE_LOCK(zone);
1588099a0e58SBosko Milekic 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1589099a0e58SBosko Milekic 			if (LIST_NEXT(z, uz_link) == NULL) {
1590099a0e58SBosko Milekic 				LIST_INSERT_AFTER(z, zone, uz_link);
1591099a0e58SBosko Milekic 				break;
1592099a0e58SBosko Milekic 			}
1593099a0e58SBosko Milekic 		}
1594099a0e58SBosko Milekic 		ZONE_UNLOCK(zone);
1595111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
1596e20a199fSJeff Roberson 	} else if (keg == NULL) {
1597e20a199fSJeff Roberson 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1598e20a199fSJeff Roberson 		    arg->align, arg->flags)) == NULL)
1599b23f72e9SBrian Feldman 			return (ENOMEM);
1600099a0e58SBosko Milekic 	} else {
1601099a0e58SBosko Milekic 		struct uma_kctor_args karg;
1602b23f72e9SBrian Feldman 		int error;
1603099a0e58SBosko Milekic 
1604099a0e58SBosko Milekic 		/* We should only be here from uma_startup() */
1605099a0e58SBosko Milekic 		karg.size = arg->size;
1606099a0e58SBosko Milekic 		karg.uminit = arg->uminit;
1607099a0e58SBosko Milekic 		karg.fini = arg->fini;
1608099a0e58SBosko Milekic 		karg.align = arg->align;
1609099a0e58SBosko Milekic 		karg.flags = arg->flags;
1610099a0e58SBosko Milekic 		karg.zone = zone;
1611b23f72e9SBrian Feldman 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1612b23f72e9SBrian Feldman 		    flags);
1613b23f72e9SBrian Feldman 		if (error)
1614b23f72e9SBrian Feldman 			return (error);
1615099a0e58SBosko Milekic 	}
16160095a784SJeff Roberson 
1617e20a199fSJeff Roberson 	/*
1618e20a199fSJeff Roberson 	 * Link in the first keg.
1619e20a199fSJeff Roberson 	 */
1620e20a199fSJeff Roberson 	zone->uz_klink.kl_keg = keg;
1621e20a199fSJeff Roberson 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1622af526374SJeff Roberson 	zone->uz_lockptr = &keg->uk_lock;
1623e20a199fSJeff Roberson 	zone->uz_size = keg->uk_size;
1624e20a199fSJeff Roberson 	zone->uz_flags |= (keg->uk_flags &
1625e20a199fSJeff Roberson 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
16268355f576SJeff Roberson 
16278355f576SJeff Roberson 	/*
16288355f576SJeff Roberson 	 * Some internal zones don't have room allocated for the per cpu
16298355f576SJeff Roberson 	 * caches.  If we're internal, bail out here.
16308355f576SJeff Roberson 	 */
1631099a0e58SBosko Milekic 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1632e20a199fSJeff Roberson 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1633099a0e58SBosko Milekic 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1634b23f72e9SBrian Feldman 		return (0);
1635099a0e58SBosko Milekic 	}
16368355f576SJeff Roberson 
1637af526374SJeff Roberson out:
1638af526374SJeff Roberson 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1639af526374SJeff Roberson 		zone->uz_count = bucket_select(zone->uz_size);
16408355f576SJeff Roberson 	else
1641cae33c14SJeff Roberson 		zone->uz_count = BUCKET_MAX;
1642ace66b56SAlexander Motin 	zone->uz_count_min = zone->uz_count;
1643fc03d22bSJeff Roberson 
1644b23f72e9SBrian Feldman 	return (0);
16458355f576SJeff Roberson }
16468355f576SJeff Roberson 
16478355f576SJeff Roberson /*
1648099a0e58SBosko Milekic  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1649099a0e58SBosko Milekic  * table and removes the keg from the global list.
16509c2cd7e5SJeff Roberson  *
16519c2cd7e5SJeff Roberson  * Arguments/Returns follow uma_dtor specifications
16529c2cd7e5SJeff Roberson  *	udata  unused
16539c2cd7e5SJeff Roberson  */
1654099a0e58SBosko Milekic static void
1655099a0e58SBosko Milekic keg_dtor(void *arg, int size, void *udata)
1656099a0e58SBosko Milekic {
1657099a0e58SBosko Milekic 	uma_keg_t keg;
16589c2cd7e5SJeff Roberson 
1659099a0e58SBosko Milekic 	keg = (uma_keg_t)arg;
1660e20a199fSJeff Roberson 	KEG_LOCK(keg);
1661099a0e58SBosko Milekic 	if (keg->uk_free != 0) {
1662a3845534SCraig Rodrigues 		printf("Freed UMA keg (%s) was not empty (%d items). "
1663099a0e58SBosko Milekic 		    " Lost %d pages of memory.\n",
1664a3845534SCraig Rodrigues 		    keg->uk_name ? keg->uk_name : "",
1665099a0e58SBosko Milekic 		    keg->uk_free, keg->uk_pages);
1666099a0e58SBosko Milekic 	}
1667e20a199fSJeff Roberson 	KEG_UNLOCK(keg);
1668099a0e58SBosko Milekic 
1669099a0e58SBosko Milekic 	hash_free(&keg->uk_hash);
1670099a0e58SBosko Milekic 
1671e20a199fSJeff Roberson 	KEG_LOCK_FINI(keg);
1672099a0e58SBosko Milekic }
1673099a0e58SBosko Milekic 
1674099a0e58SBosko Milekic /*
1675099a0e58SBosko Milekic  * Zone header dtor.
1676099a0e58SBosko Milekic  *
1677099a0e58SBosko Milekic  * Arguments/Returns follow uma_dtor specifications
1678099a0e58SBosko Milekic  *	udata  unused
1679099a0e58SBosko Milekic  */
16809c2cd7e5SJeff Roberson static void
16819c2cd7e5SJeff Roberson zone_dtor(void *arg, int size, void *udata)
16829c2cd7e5SJeff Roberson {
1683e20a199fSJeff Roberson 	uma_klink_t klink;
16849c2cd7e5SJeff Roberson 	uma_zone_t zone;
1685099a0e58SBosko Milekic 	uma_keg_t keg;
16869c2cd7e5SJeff Roberson 
16879c2cd7e5SJeff Roberson 	zone = (uma_zone_t)arg;
1688e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
16899643769aSJeff Roberson 
1690e20a199fSJeff Roberson 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
16919643769aSJeff Roberson 		cache_drain(zone);
1692099a0e58SBosko Milekic 
1693111fbcd5SBryan Venteicher 	rw_wlock(&uma_rwlock);
1694099a0e58SBosko Milekic 	LIST_REMOVE(zone, uz_link);
1695111fbcd5SBryan Venteicher 	rw_wunlock(&uma_rwlock);
1696099a0e58SBosko Milekic 	/*
1697099a0e58SBosko Milekic 	 * XXX there are some races here where
1698099a0e58SBosko Milekic 	 * the zone can be drained but zone lock
1699099a0e58SBosko Milekic 	 * released and then refilled before we
1700099a0e58SBosko Milekic 	 * remove it... we dont care for now
1701099a0e58SBosko Milekic 	 */
1702e20a199fSJeff Roberson 	zone_drain_wait(zone, M_WAITOK);
1703e20a199fSJeff Roberson 	/*
1704e20a199fSJeff Roberson 	 * Unlink all of our kegs.
1705e20a199fSJeff Roberson 	 */
1706e20a199fSJeff Roberson 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1707e20a199fSJeff Roberson 		klink->kl_keg = NULL;
1708e20a199fSJeff Roberson 		LIST_REMOVE(klink, kl_link);
1709e20a199fSJeff Roberson 		if (klink == &zone->uz_klink)
1710e20a199fSJeff Roberson 			continue;
1711e20a199fSJeff Roberson 		free(klink, M_TEMP);
1712e20a199fSJeff Roberson 	}
1713e20a199fSJeff Roberson 	/*
1714e20a199fSJeff Roberson 	 * We only destroy kegs from non secondary zones.
1715e20a199fSJeff Roberson 	 */
17160095a784SJeff Roberson 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1717111fbcd5SBryan Venteicher 		rw_wlock(&uma_rwlock);
1718099a0e58SBosko Milekic 		LIST_REMOVE(keg, uk_link);
1719111fbcd5SBryan Venteicher 		rw_wunlock(&uma_rwlock);
17200095a784SJeff Roberson 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
17219c2cd7e5SJeff Roberson 	}
1722af526374SJeff Roberson 	ZONE_LOCK_FINI(zone);
1723099a0e58SBosko Milekic }
1724099a0e58SBosko Milekic 
17259c2cd7e5SJeff Roberson /*
17268355f576SJeff Roberson  * Traverses every zone in the system and calls a callback
17278355f576SJeff Roberson  *
17288355f576SJeff Roberson  * Arguments:
17298355f576SJeff Roberson  *	zfunc  A pointer to a function which accepts a zone
17308355f576SJeff Roberson  *		as an argument.
17318355f576SJeff Roberson  *
17328355f576SJeff Roberson  * Returns:
17338355f576SJeff Roberson  *	Nothing
17348355f576SJeff Roberson  */
17358355f576SJeff Roberson static void
17368355f576SJeff Roberson zone_foreach(void (*zfunc)(uma_zone_t))
17378355f576SJeff Roberson {
1738099a0e58SBosko Milekic 	uma_keg_t keg;
17398355f576SJeff Roberson 	uma_zone_t zone;
17408355f576SJeff Roberson 
1741111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
1742099a0e58SBosko Milekic 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1743099a0e58SBosko Milekic 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
17448355f576SJeff Roberson 			zfunc(zone);
1745099a0e58SBosko Milekic 	}
1746111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
17478355f576SJeff Roberson }
17488355f576SJeff Roberson 
17498355f576SJeff Roberson /* Public functions */
17508355f576SJeff Roberson /* See uma.h */
17518355f576SJeff Roberson void
1752ac0a6fd0SGleb Smirnoff uma_startup(void *mem, int npages)
17538355f576SJeff Roberson {
17548355f576SJeff Roberson 	struct uma_zctor_args args;
17558355f576SJeff Roberson 
1756111fbcd5SBryan Venteicher 	rw_init(&uma_rwlock, "UMA lock");
1757099a0e58SBosko Milekic 
1758099a0e58SBosko Milekic 	/* "manually" create the initial zone */
17590095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1760099a0e58SBosko Milekic 	args.name = "UMA Kegs";
1761099a0e58SBosko Milekic 	args.size = sizeof(struct uma_keg);
1762099a0e58SBosko Milekic 	args.ctor = keg_ctor;
1763099a0e58SBosko Milekic 	args.dtor = keg_dtor;
17648355f576SJeff Roberson 	args.uminit = zero_init;
17658355f576SJeff Roberson 	args.fini = NULL;
1766099a0e58SBosko Milekic 	args.keg = &masterkeg;
17678355f576SJeff Roberson 	args.align = 32 - 1;
1768b60f5b79SJeff Roberson 	args.flags = UMA_ZFLAG_INTERNAL;
17698355f576SJeff Roberson 	/* The initial zone has no Per cpu queues so it's smaller */
1770b23f72e9SBrian Feldman 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
17718355f576SJeff Roberson 
1772f353d338SAlan Cox 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1773ac0a6fd0SGleb Smirnoff 	bootmem = mem;
1774ac0a6fd0SGleb Smirnoff 	boot_pages = npages;
17758355f576SJeff Roberson 
1776099a0e58SBosko Milekic 	args.name = "UMA Zones";
1777099a0e58SBosko Milekic 	args.size = sizeof(struct uma_zone) +
177851cfb0beSDmitry Chagin 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1779099a0e58SBosko Milekic 	args.ctor = zone_ctor;
1780099a0e58SBosko Milekic 	args.dtor = zone_dtor;
1781099a0e58SBosko Milekic 	args.uminit = zero_init;
1782099a0e58SBosko Milekic 	args.fini = NULL;
1783099a0e58SBosko Milekic 	args.keg = NULL;
1784099a0e58SBosko Milekic 	args.align = 32 - 1;
1785099a0e58SBosko Milekic 	args.flags = UMA_ZFLAG_INTERNAL;
1786099a0e58SBosko Milekic 	/* The initial zone has no Per cpu queues so it's smaller */
1787b23f72e9SBrian Feldman 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1788099a0e58SBosko Milekic 
17898355f576SJeff Roberson 	/* Now make a zone for slab headers */
17908355f576SJeff Roberson 	slabzone = uma_zcreate("UMA Slabs",
1791ef72505eSJeff Roberson 				sizeof(struct uma_slab),
17928355f576SJeff Roberson 				NULL, NULL, NULL, NULL,
1793b60f5b79SJeff Roberson 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
17948355f576SJeff Roberson 
17958355f576SJeff Roberson 	hashzone = uma_zcreate("UMA Hash",
17968355f576SJeff Roberson 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
17978355f576SJeff Roberson 	    NULL, NULL, NULL, NULL,
1798b60f5b79SJeff Roberson 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
17998355f576SJeff Roberson 
1800cae33c14SJeff Roberson 	bucket_init();
18018355f576SJeff Roberson 
1802342f1793SAlan Cox 	booted = UMA_STARTUP;
18038355f576SJeff Roberson }
18048355f576SJeff Roberson 
18058355f576SJeff Roberson /* see uma.h */
18068355f576SJeff Roberson void
180799571dc3SJeff Roberson uma_startup2(void)
18088355f576SJeff Roberson {
1809342f1793SAlan Cox 	booted = UMA_STARTUP2;
181086bbae32SJeff Roberson 	bucket_enable();
181195c4bf75SKonstantin Belousov 	sx_init(&uma_drain_lock, "umadrain");
18128355f576SJeff Roberson }
18138355f576SJeff Roberson 
18148355f576SJeff Roberson /*
18158355f576SJeff Roberson  * Initialize our callout handle
18168355f576SJeff Roberson  *
18178355f576SJeff Roberson  */
18188355f576SJeff Roberson 
18198355f576SJeff Roberson static void
18208355f576SJeff Roberson uma_startup3(void)
18218355f576SJeff Roberson {
18221431a748SGleb Smirnoff 
1823fd90e2edSJung-uk Kim 	callout_init(&uma_callout, 1);
18249643769aSJeff Roberson 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
18258355f576SJeff Roberson }
18268355f576SJeff Roberson 
1827e20a199fSJeff Roberson static uma_keg_t
1828099a0e58SBosko Milekic uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
182985dcf349SGleb Smirnoff 		int align, uint32_t flags)
1830099a0e58SBosko Milekic {
1831099a0e58SBosko Milekic 	struct uma_kctor_args args;
1832099a0e58SBosko Milekic 
1833099a0e58SBosko Milekic 	args.size = size;
1834099a0e58SBosko Milekic 	args.uminit = uminit;
1835099a0e58SBosko Milekic 	args.fini = fini;
18361e319f6dSRobert Watson 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1837099a0e58SBosko Milekic 	args.flags = flags;
1838099a0e58SBosko Milekic 	args.zone = zone;
1839e20a199fSJeff Roberson 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1840099a0e58SBosko Milekic }
1841099a0e58SBosko Milekic 
18428355f576SJeff Roberson /* See uma.h */
18431e319f6dSRobert Watson void
18441e319f6dSRobert Watson uma_set_align(int align)
18451e319f6dSRobert Watson {
18461e319f6dSRobert Watson 
18471e319f6dSRobert Watson 	if (align != UMA_ALIGN_CACHE)
18481e319f6dSRobert Watson 		uma_align_cache = align;
18491e319f6dSRobert Watson }
18501e319f6dSRobert Watson 
18511e319f6dSRobert Watson /* See uma.h */
18528355f576SJeff Roberson uma_zone_t
1853bb196eb4SMatthew D Fleming uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
185485dcf349SGleb Smirnoff 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
18558355f576SJeff Roberson 
18568355f576SJeff Roberson {
18578355f576SJeff Roberson 	struct uma_zctor_args args;
185895c4bf75SKonstantin Belousov 	uma_zone_t res;
185995c4bf75SKonstantin Belousov 	bool locked;
18608355f576SJeff Roberson 
1861a5a35578SJohn Baldwin 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
1862a5a35578SJohn Baldwin 	    align, name));
1863a5a35578SJohn Baldwin 
18648355f576SJeff Roberson 	/* This stuff is essential for the zone ctor */
18650095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
18668355f576SJeff Roberson 	args.name = name;
18678355f576SJeff Roberson 	args.size = size;
18688355f576SJeff Roberson 	args.ctor = ctor;
18698355f576SJeff Roberson 	args.dtor = dtor;
18708355f576SJeff Roberson 	args.uminit = uminit;
18718355f576SJeff Roberson 	args.fini = fini;
1872afc6dc36SJohn-Mark Gurney #ifdef  INVARIANTS
1873afc6dc36SJohn-Mark Gurney 	/*
1874afc6dc36SJohn-Mark Gurney 	 * If a zone is being created with an empty constructor and
1875afc6dc36SJohn-Mark Gurney 	 * destructor, pass UMA constructor/destructor which checks for
1876afc6dc36SJohn-Mark Gurney 	 * memory use after free.
1877afc6dc36SJohn-Mark Gurney 	 */
187819c591bfSMateusz Guzik 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
187919c591bfSMateusz Guzik 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1880afc6dc36SJohn-Mark Gurney 		args.ctor = trash_ctor;
1881afc6dc36SJohn-Mark Gurney 		args.dtor = trash_dtor;
1882afc6dc36SJohn-Mark Gurney 		args.uminit = trash_init;
1883afc6dc36SJohn-Mark Gurney 		args.fini = trash_fini;
1884afc6dc36SJohn-Mark Gurney 	}
1885afc6dc36SJohn-Mark Gurney #endif
18868355f576SJeff Roberson 	args.align = align;
18878355f576SJeff Roberson 	args.flags = flags;
1888099a0e58SBosko Milekic 	args.keg = NULL;
1889099a0e58SBosko Milekic 
189095c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
189195c4bf75SKonstantin Belousov 		locked = false;
189295c4bf75SKonstantin Belousov 	} else {
189395c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
189495c4bf75SKonstantin Belousov 		locked = true;
189595c4bf75SKonstantin Belousov 	}
189695c4bf75SKonstantin Belousov 	res = zone_alloc_item(zones, &args, M_WAITOK);
189795c4bf75SKonstantin Belousov 	if (locked)
189895c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
189995c4bf75SKonstantin Belousov 	return (res);
1900099a0e58SBosko Milekic }
1901099a0e58SBosko Milekic 
1902099a0e58SBosko Milekic /* See uma.h */
1903099a0e58SBosko Milekic uma_zone_t
1904099a0e58SBosko Milekic uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1905099a0e58SBosko Milekic 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1906099a0e58SBosko Milekic {
1907099a0e58SBosko Milekic 	struct uma_zctor_args args;
1908e20a199fSJeff Roberson 	uma_keg_t keg;
190995c4bf75SKonstantin Belousov 	uma_zone_t res;
191095c4bf75SKonstantin Belousov 	bool locked;
1911099a0e58SBosko Milekic 
1912e20a199fSJeff Roberson 	keg = zone_first_keg(master);
19130095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
1914099a0e58SBosko Milekic 	args.name = name;
1915e20a199fSJeff Roberson 	args.size = keg->uk_size;
1916099a0e58SBosko Milekic 	args.ctor = ctor;
1917099a0e58SBosko Milekic 	args.dtor = dtor;
1918099a0e58SBosko Milekic 	args.uminit = zinit;
1919099a0e58SBosko Milekic 	args.fini = zfini;
1920e20a199fSJeff Roberson 	args.align = keg->uk_align;
1921e20a199fSJeff Roberson 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1922e20a199fSJeff Roberson 	args.keg = keg;
19238355f576SJeff Roberson 
192495c4bf75SKonstantin Belousov 	if (booted < UMA_STARTUP2) {
192595c4bf75SKonstantin Belousov 		locked = false;
192695c4bf75SKonstantin Belousov 	} else {
192795c4bf75SKonstantin Belousov 		sx_slock(&uma_drain_lock);
192895c4bf75SKonstantin Belousov 		locked = true;
192995c4bf75SKonstantin Belousov 	}
1930e20a199fSJeff Roberson 	/* XXX Attaches only one keg of potentially many. */
193195c4bf75SKonstantin Belousov 	res = zone_alloc_item(zones, &args, M_WAITOK);
193295c4bf75SKonstantin Belousov 	if (locked)
193395c4bf75SKonstantin Belousov 		sx_sunlock(&uma_drain_lock);
193495c4bf75SKonstantin Belousov 	return (res);
19358355f576SJeff Roberson }
19368355f576SJeff Roberson 
19370095a784SJeff Roberson /* See uma.h */
19380095a784SJeff Roberson uma_zone_t
1939af526374SJeff Roberson uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1940af526374SJeff Roberson 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1941af526374SJeff Roberson 		    uma_release zrelease, void *arg, int flags)
19420095a784SJeff Roberson {
19430095a784SJeff Roberson 	struct uma_zctor_args args;
19440095a784SJeff Roberson 
19450095a784SJeff Roberson 	memset(&args, 0, sizeof(args));
19460095a784SJeff Roberson 	args.name = name;
1947af526374SJeff Roberson 	args.size = size;
19480095a784SJeff Roberson 	args.ctor = ctor;
19490095a784SJeff Roberson 	args.dtor = dtor;
19500095a784SJeff Roberson 	args.uminit = zinit;
19510095a784SJeff Roberson 	args.fini = zfini;
19520095a784SJeff Roberson 	args.import = zimport;
19530095a784SJeff Roberson 	args.release = zrelease;
19540095a784SJeff Roberson 	args.arg = arg;
19550095a784SJeff Roberson 	args.align = 0;
19560095a784SJeff Roberson 	args.flags = flags;
19570095a784SJeff Roberson 
19580095a784SJeff Roberson 	return (zone_alloc_item(zones, &args, M_WAITOK));
19590095a784SJeff Roberson }
19600095a784SJeff Roberson 
1961e20a199fSJeff Roberson static void
1962e20a199fSJeff Roberson zone_lock_pair(uma_zone_t a, uma_zone_t b)
1963e20a199fSJeff Roberson {
1964e20a199fSJeff Roberson 	if (a < b) {
1965e20a199fSJeff Roberson 		ZONE_LOCK(a);
1966af526374SJeff Roberson 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1967e20a199fSJeff Roberson 	} else {
1968e20a199fSJeff Roberson 		ZONE_LOCK(b);
1969af526374SJeff Roberson 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1970e20a199fSJeff Roberson 	}
1971e20a199fSJeff Roberson }
1972e20a199fSJeff Roberson 
1973e20a199fSJeff Roberson static void
1974e20a199fSJeff Roberson zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1975e20a199fSJeff Roberson {
1976e20a199fSJeff Roberson 
1977e20a199fSJeff Roberson 	ZONE_UNLOCK(a);
1978e20a199fSJeff Roberson 	ZONE_UNLOCK(b);
1979e20a199fSJeff Roberson }
1980e20a199fSJeff Roberson 
1981e20a199fSJeff Roberson int
1982e20a199fSJeff Roberson uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1983e20a199fSJeff Roberson {
1984e20a199fSJeff Roberson 	uma_klink_t klink;
1985e20a199fSJeff Roberson 	uma_klink_t kl;
1986e20a199fSJeff Roberson 	int error;
1987e20a199fSJeff Roberson 
1988e20a199fSJeff Roberson 	error = 0;
1989e20a199fSJeff Roberson 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1990e20a199fSJeff Roberson 
1991e20a199fSJeff Roberson 	zone_lock_pair(zone, master);
1992e20a199fSJeff Roberson 	/*
1993e20a199fSJeff Roberson 	 * zone must use vtoslab() to resolve objects and must already be
1994e20a199fSJeff Roberson 	 * a secondary.
1995e20a199fSJeff Roberson 	 */
1996e20a199fSJeff Roberson 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1997e20a199fSJeff Roberson 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1998e20a199fSJeff Roberson 		error = EINVAL;
1999e20a199fSJeff Roberson 		goto out;
2000e20a199fSJeff Roberson 	}
2001e20a199fSJeff Roberson 	/*
2002e20a199fSJeff Roberson 	 * The new master must also use vtoslab().
2003e20a199fSJeff Roberson 	 */
2004e20a199fSJeff Roberson 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2005e20a199fSJeff Roberson 		error = EINVAL;
2006e20a199fSJeff Roberson 		goto out;
2007e20a199fSJeff Roberson 	}
2008cfcae3f8SGleb Smirnoff 
2009e20a199fSJeff Roberson 	/*
2010e20a199fSJeff Roberson 	 * The underlying object must be the same size.  rsize
2011e20a199fSJeff Roberson 	 * may be different.
2012e20a199fSJeff Roberson 	 */
2013e20a199fSJeff Roberson 	if (master->uz_size != zone->uz_size) {
2014e20a199fSJeff Roberson 		error = E2BIG;
2015e20a199fSJeff Roberson 		goto out;
2016e20a199fSJeff Roberson 	}
2017e20a199fSJeff Roberson 	/*
2018e20a199fSJeff Roberson 	 * Put it at the end of the list.
2019e20a199fSJeff Roberson 	 */
2020e20a199fSJeff Roberson 	klink->kl_keg = zone_first_keg(master);
2021e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2022e20a199fSJeff Roberson 		if (LIST_NEXT(kl, kl_link) == NULL) {
2023e20a199fSJeff Roberson 			LIST_INSERT_AFTER(kl, klink, kl_link);
2024e20a199fSJeff Roberson 			break;
2025e20a199fSJeff Roberson 		}
2026e20a199fSJeff Roberson 	}
2027e20a199fSJeff Roberson 	klink = NULL;
2028e20a199fSJeff Roberson 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2029e20a199fSJeff Roberson 	zone->uz_slab = zone_fetch_slab_multi;
2030e20a199fSJeff Roberson 
2031e20a199fSJeff Roberson out:
2032e20a199fSJeff Roberson 	zone_unlock_pair(zone, master);
2033e20a199fSJeff Roberson 	if (klink != NULL)
2034e20a199fSJeff Roberson 		free(klink, M_TEMP);
2035e20a199fSJeff Roberson 
2036e20a199fSJeff Roberson 	return (error);
2037e20a199fSJeff Roberson }
2038e20a199fSJeff Roberson 
2039e20a199fSJeff Roberson 
20408355f576SJeff Roberson /* See uma.h */
20419c2cd7e5SJeff Roberson void
20429c2cd7e5SJeff Roberson uma_zdestroy(uma_zone_t zone)
20439c2cd7e5SJeff Roberson {
2044f4ff923bSRobert Watson 
204595c4bf75SKonstantin Belousov 	sx_slock(&uma_drain_lock);
20460095a784SJeff Roberson 	zone_free_item(zones, zone, NULL, SKIP_NONE);
204795c4bf75SKonstantin Belousov 	sx_sunlock(&uma_drain_lock);
20489c2cd7e5SJeff Roberson }
20499c2cd7e5SJeff Roberson 
20508d6fbbb8SJeff Roberson void
20518d6fbbb8SJeff Roberson uma_zwait(uma_zone_t zone)
20528d6fbbb8SJeff Roberson {
20538d6fbbb8SJeff Roberson 	void *item;
20548d6fbbb8SJeff Roberson 
20558d6fbbb8SJeff Roberson 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
20568d6fbbb8SJeff Roberson 	uma_zfree(zone, item);
20578d6fbbb8SJeff Roberson }
20588d6fbbb8SJeff Roberson 
20599c2cd7e5SJeff Roberson /* See uma.h */
20608355f576SJeff Roberson void *
20612cc35ff9SJeff Roberson uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
20628355f576SJeff Roberson {
20638355f576SJeff Roberson 	void *item;
20648355f576SJeff Roberson 	uma_cache_t cache;
20658355f576SJeff Roberson 	uma_bucket_t bucket;
2066fc03d22bSJeff Roberson 	int lockfail;
20678355f576SJeff Roberson 	int cpu;
20688355f576SJeff Roberson 
2069e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2070e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
207110cb2424SMark Murray 
20728355f576SJeff Roberson 	/* This is the fast path allocation */
20731431a748SGleb Smirnoff 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
20741431a748SGleb Smirnoff 	    curthread, zone->uz_name, zone, flags);
2075a553d4b8SJeff Roberson 
2076635fd505SRobert Watson 	if (flags & M_WAITOK) {
2077b23f72e9SBrian Feldman 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2078635fd505SRobert Watson 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
20794c1cc01cSJohn Baldwin 	}
2080d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
20811067a2baSJonathan T. Looney 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
20821067a2baSJonathan T. Looney 
20838d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
20848d689e04SGleb Smirnoff 	if (memguard_cmp_zone(zone)) {
20858d689e04SGleb Smirnoff 		item = memguard_alloc(zone->uz_size, flags);
20868d689e04SGleb Smirnoff 		if (item != NULL) {
20878d689e04SGleb Smirnoff 			if (zone->uz_init != NULL &&
20888d689e04SGleb Smirnoff 			    zone->uz_init(item, zone->uz_size, flags) != 0)
20898d689e04SGleb Smirnoff 				return (NULL);
20908d689e04SGleb Smirnoff 			if (zone->uz_ctor != NULL &&
2091fc03d22bSJeff Roberson 			    zone->uz_ctor(item, zone->uz_size, udata,
2092fc03d22bSJeff Roberson 			    flags) != 0) {
20938d689e04SGleb Smirnoff 			    	zone->uz_fini(item, zone->uz_size);
20948d689e04SGleb Smirnoff 				return (NULL);
20958d689e04SGleb Smirnoff 			}
20968d689e04SGleb Smirnoff 			return (item);
20978d689e04SGleb Smirnoff 		}
20988d689e04SGleb Smirnoff 		/* This is unfortunate but should not be fatal. */
20998d689e04SGleb Smirnoff 	}
21008d689e04SGleb Smirnoff #endif
21015d1ae027SRobert Watson 	/*
21025d1ae027SRobert Watson 	 * If possible, allocate from the per-CPU cache.  There are two
21035d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
21045d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
21055d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
21065d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
21075d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
21085d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to allocate from
21095d1ae027SRobert Watson 	 * the current cache; when we re-acquire the critical section, we
21105d1ae027SRobert Watson 	 * must detect and handle migration if it has occurred.
21115d1ae027SRobert Watson 	 */
21125d1ae027SRobert Watson 	critical_enter();
21135d1ae027SRobert Watson 	cpu = curcpu;
21148355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
21158355f576SJeff Roberson 
21168355f576SJeff Roberson zalloc_start:
21178355f576SJeff Roberson 	bucket = cache->uc_allocbucket;
2118fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
2119cae33c14SJeff Roberson 		bucket->ub_cnt--;
2120cae33c14SJeff Roberson 		item = bucket->ub_bucket[bucket->ub_cnt];
21218355f576SJeff Roberson #ifdef INVARIANTS
2122cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
21238355f576SJeff Roberson #endif
2124fc03d22bSJeff Roberson 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
21258355f576SJeff Roberson 		cache->uc_allocs++;
21265d1ae027SRobert Watson 		critical_exit();
2127fc03d22bSJeff Roberson 		if (zone->uz_ctor != NULL &&
2128fc03d22bSJeff Roberson 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
21290095a784SJeff Roberson 			atomic_add_long(&zone->uz_fails, 1);
2130fc03d22bSJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
2131b23f72e9SBrian Feldman 			return (NULL);
2132b23f72e9SBrian Feldman 		}
2133ef72505eSJeff Roberson #ifdef INVARIANTS
2134ef72505eSJeff Roberson 		uma_dbg_alloc(zone, NULL, item);
2135ef72505eSJeff Roberson #endif
21362cc35ff9SJeff Roberson 		if (flags & M_ZERO)
213748343a2fSGleb Smirnoff 			uma_zero_item(item, zone);
21388355f576SJeff Roberson 		return (item);
2139fc03d22bSJeff Roberson 	}
2140fc03d22bSJeff Roberson 
21418355f576SJeff Roberson 	/*
21428355f576SJeff Roberson 	 * We have run out of items in our alloc bucket.
21438355f576SJeff Roberson 	 * See if we can switch with our free bucket.
21448355f576SJeff Roberson 	 */
2145b983089aSJeff Roberson 	bucket = cache->uc_freebucket;
2146fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt > 0) {
21471431a748SGleb Smirnoff 		CTR2(KTR_UMA,
21481431a748SGleb Smirnoff 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
21491431a748SGleb Smirnoff 		    zone->uz_name, zone);
21508355f576SJeff Roberson 		cache->uc_freebucket = cache->uc_allocbucket;
2151b983089aSJeff Roberson 		cache->uc_allocbucket = bucket;
21528355f576SJeff Roberson 		goto zalloc_start;
21538355f576SJeff Roberson 	}
2154fc03d22bSJeff Roberson 
2155fc03d22bSJeff Roberson 	/*
2156fc03d22bSJeff Roberson 	 * Discard any empty allocation bucket while we hold no locks.
2157fc03d22bSJeff Roberson 	 */
2158fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2159fc03d22bSJeff Roberson 	cache->uc_allocbucket = NULL;
2160fc03d22bSJeff Roberson 	critical_exit();
2161fc03d22bSJeff Roberson 	if (bucket != NULL)
21626fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2163fc03d22bSJeff Roberson 
2164fc03d22bSJeff Roberson 	/* Short-circuit for zones without buckets and low memory. */
2165fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2166fc03d22bSJeff Roberson 		goto zalloc_item;
2167fc03d22bSJeff Roberson 
21685d1ae027SRobert Watson 	/*
21695d1ae027SRobert Watson 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
21705d1ae027SRobert Watson 	 * we must go back to the zone.  This requires the zone lock, so we
21715d1ae027SRobert Watson 	 * must drop the critical section, then re-acquire it when we go back
21725d1ae027SRobert Watson 	 * to the cache.  Since the critical section is released, we may be
21735d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
21745d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
21755d1ae027SRobert Watson 	 * the critical section.
21765d1ae027SRobert Watson 	 */
2177fc03d22bSJeff Roberson 	lockfail = 0;
2178fc03d22bSJeff Roberson 	if (ZONE_TRYLOCK(zone) == 0) {
2179fc03d22bSJeff Roberson 		/* Record contention to size the buckets. */
2180a553d4b8SJeff Roberson 		ZONE_LOCK(zone);
2181fc03d22bSJeff Roberson 		lockfail = 1;
2182fc03d22bSJeff Roberson 	}
21835d1ae027SRobert Watson 	critical_enter();
21845d1ae027SRobert Watson 	cpu = curcpu;
21855d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
21865d1ae027SRobert Watson 
2187fc03d22bSJeff Roberson 	/*
2188fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2189fc03d22bSJeff Roberson 	 */
21900095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
21910095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2192a553d4b8SJeff Roberson 	cache->uc_allocs = 0;
2193773df9abSRobert Watson 	cache->uc_frees = 0;
21948355f576SJeff Roberson 
2195fc03d22bSJeff Roberson 	/* See if we lost the race to fill the cache. */
2196fc03d22bSJeff Roberson 	if (cache->uc_allocbucket != NULL) {
2197fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2198fc03d22bSJeff Roberson 		goto zalloc_start;
2199a553d4b8SJeff Roberson 	}
22008355f576SJeff Roberson 
2201fc03d22bSJeff Roberson 	/*
2202fc03d22bSJeff Roberson 	 * Check the zone's cache of buckets.
2203fc03d22bSJeff Roberson 	 */
2204fc03d22bSJeff Roberson 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2205cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
2206a553d4b8SJeff Roberson 		    ("uma_zalloc_arg: Returning an empty bucket."));
22078355f576SJeff Roberson 
2208a553d4b8SJeff Roberson 		LIST_REMOVE(bucket, ub_link);
2209a553d4b8SJeff Roberson 		cache->uc_allocbucket = bucket;
2210a553d4b8SJeff Roberson 		ZONE_UNLOCK(zone);
22118355f576SJeff Roberson 		goto zalloc_start;
2212a553d4b8SJeff Roberson 	}
22135d1ae027SRobert Watson 	/* We are no longer associated with this CPU. */
22145d1ae027SRobert Watson 	critical_exit();
2215bbee39c6SJeff Roberson 
2216fc03d22bSJeff Roberson 	/*
2217fc03d22bSJeff Roberson 	 * We bump the uz count when the cache size is insufficient to
2218fc03d22bSJeff Roberson 	 * handle the working set.
2219fc03d22bSJeff Roberson 	 */
22206fd34d6fSJeff Roberson 	if (lockfail && zone->uz_count < BUCKET_MAX)
2221a553d4b8SJeff Roberson 		zone->uz_count++;
2222fc03d22bSJeff Roberson 	ZONE_UNLOCK(zone);
2223099a0e58SBosko Milekic 
22248355f576SJeff Roberson 	/*
2225a553d4b8SJeff Roberson 	 * Now lets just fill a bucket and put it on the free list.  If that
2226763df3ecSPedro F. Giffuni 	 * works we'll restart the allocation from the beginning and it
2227fc03d22bSJeff Roberson 	 * will use the just filled bucket.
2228bbee39c6SJeff Roberson 	 */
22296fd34d6fSJeff Roberson 	bucket = zone_alloc_bucket(zone, udata, flags);
22301431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
22311431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
2232fc03d22bSJeff Roberson 	if (bucket != NULL) {
2233fc03d22bSJeff Roberson 		ZONE_LOCK(zone);
2234fc03d22bSJeff Roberson 		critical_enter();
2235fc03d22bSJeff Roberson 		cpu = curcpu;
2236fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2237fc03d22bSJeff Roberson 		/*
2238fc03d22bSJeff Roberson 		 * See if we lost the race or were migrated.  Cache the
2239fc03d22bSJeff Roberson 		 * initialized bucket to make this less likely or claim
2240fc03d22bSJeff Roberson 		 * the memory directly.
2241fc03d22bSJeff Roberson 		 */
2242fc03d22bSJeff Roberson 		if (cache->uc_allocbucket == NULL)
2243fc03d22bSJeff Roberson 			cache->uc_allocbucket = bucket;
2244fc03d22bSJeff Roberson 		else
2245fc03d22bSJeff Roberson 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2246bbee39c6SJeff Roberson 		ZONE_UNLOCK(zone);
2247fc03d22bSJeff Roberson 		goto zalloc_start;
2248bbee39c6SJeff Roberson 	}
2249fc03d22bSJeff Roberson 
2250bbee39c6SJeff Roberson 	/*
2251bbee39c6SJeff Roberson 	 * We may not be able to get a bucket so return an actual item.
2252bbee39c6SJeff Roberson 	 */
2253fc03d22bSJeff Roberson zalloc_item:
2254e20a199fSJeff Roberson 	item = zone_alloc_item(zone, udata, flags);
2255fc03d22bSJeff Roberson 
2256e20a199fSJeff Roberson 	return (item);
2257bbee39c6SJeff Roberson }
2258bbee39c6SJeff Roberson 
2259bbee39c6SJeff Roberson static uma_slab_t
2260e20a199fSJeff Roberson keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2261bbee39c6SJeff Roberson {
2262bbee39c6SJeff Roberson 	uma_slab_t slab;
22636fd34d6fSJeff Roberson 	int reserve;
2264099a0e58SBosko Milekic 
2265e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2266bbee39c6SJeff Roberson 	slab = NULL;
22676fd34d6fSJeff Roberson 	reserve = 0;
22686fd34d6fSJeff Roberson 	if ((flags & M_USE_RESERVE) == 0)
22696fd34d6fSJeff Roberson 		reserve = keg->uk_reserve;
2270bbee39c6SJeff Roberson 
2271bbee39c6SJeff Roberson 	for (;;) {
2272bbee39c6SJeff Roberson 		/*
2273bbee39c6SJeff Roberson 		 * Find a slab with some space.  Prefer slabs that are partially
2274bbee39c6SJeff Roberson 		 * used over those that are totally full.  This helps to reduce
2275bbee39c6SJeff Roberson 		 * fragmentation.
2276bbee39c6SJeff Roberson 		 */
22776fd34d6fSJeff Roberson 		if (keg->uk_free > reserve) {
2278099a0e58SBosko Milekic 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2279099a0e58SBosko Milekic 				slab = LIST_FIRST(&keg->uk_part_slab);
2280bbee39c6SJeff Roberson 			} else {
2281099a0e58SBosko Milekic 				slab = LIST_FIRST(&keg->uk_free_slab);
2282bbee39c6SJeff Roberson 				LIST_REMOVE(slab, us_link);
2283099a0e58SBosko Milekic 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2284bbee39c6SJeff Roberson 				    us_link);
2285bbee39c6SJeff Roberson 			}
2286e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2287bbee39c6SJeff Roberson 			return (slab);
2288bbee39c6SJeff Roberson 		}
2289bbee39c6SJeff Roberson 
2290bbee39c6SJeff Roberson 		/*
2291bbee39c6SJeff Roberson 		 * M_NOVM means don't ask at all!
2292bbee39c6SJeff Roberson 		 */
2293bbee39c6SJeff Roberson 		if (flags & M_NOVM)
2294bbee39c6SJeff Roberson 			break;
2295bbee39c6SJeff Roberson 
2296e20a199fSJeff Roberson 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2297099a0e58SBosko Milekic 			keg->uk_flags |= UMA_ZFLAG_FULL;
2298e20a199fSJeff Roberson 			/*
2299e20a199fSJeff Roberson 			 * If this is not a multi-zone, set the FULL bit.
2300e20a199fSJeff Roberson 			 * Otherwise slab_multi() takes care of it.
2301e20a199fSJeff Roberson 			 */
23022f891cd5SPawel Jakub Dawidek 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2303e20a199fSJeff Roberson 				zone->uz_flags |= UMA_ZFLAG_FULL;
23042f891cd5SPawel Jakub Dawidek 				zone_log_warning(zone);
230554503a13SJonathan T. Looney 				zone_maxaction(zone);
23062f891cd5SPawel Jakub Dawidek 			}
2307ebc85edfSJeff Roberson 			if (flags & M_NOWAIT)
2308bbee39c6SJeff Roberson 				break;
2309c288b548SEitan Adler 			zone->uz_sleeps++;
2310e20a199fSJeff Roberson 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2311bbee39c6SJeff Roberson 			continue;
2312bbee39c6SJeff Roberson 		}
2313e20a199fSJeff Roberson 		slab = keg_alloc_slab(keg, zone, flags);
2314bbee39c6SJeff Roberson 		/*
2315bbee39c6SJeff Roberson 		 * If we got a slab here it's safe to mark it partially used
2316bbee39c6SJeff Roberson 		 * and return.  We assume that the caller is going to remove
2317bbee39c6SJeff Roberson 		 * at least one item.
2318bbee39c6SJeff Roberson 		 */
2319bbee39c6SJeff Roberson 		if (slab) {
2320e20a199fSJeff Roberson 			MPASS(slab->us_keg == keg);
2321099a0e58SBosko Milekic 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2322bbee39c6SJeff Roberson 			return (slab);
2323bbee39c6SJeff Roberson 		}
2324bbee39c6SJeff Roberson 		/*
2325bbee39c6SJeff Roberson 		 * We might not have been able to get a slab but another cpu
2326bbee39c6SJeff Roberson 		 * could have while we were unlocked.  Check again before we
2327bbee39c6SJeff Roberson 		 * fail.
2328bbee39c6SJeff Roberson 		 */
2329bbee39c6SJeff Roberson 		flags |= M_NOVM;
2330bbee39c6SJeff Roberson 	}
2331bbee39c6SJeff Roberson 	return (slab);
2332bbee39c6SJeff Roberson }
2333bbee39c6SJeff Roberson 
2334e20a199fSJeff Roberson static uma_slab_t
2335e20a199fSJeff Roberson zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2336e20a199fSJeff Roberson {
2337e20a199fSJeff Roberson 	uma_slab_t slab;
2338e20a199fSJeff Roberson 
2339af526374SJeff Roberson 	if (keg == NULL) {
2340e20a199fSJeff Roberson 		keg = zone_first_keg(zone);
2341af526374SJeff Roberson 		KEG_LOCK(keg);
2342af526374SJeff Roberson 	}
2343e20a199fSJeff Roberson 
2344e20a199fSJeff Roberson 	for (;;) {
2345e20a199fSJeff Roberson 		slab = keg_fetch_slab(keg, zone, flags);
2346e20a199fSJeff Roberson 		if (slab)
2347e20a199fSJeff Roberson 			return (slab);
2348e20a199fSJeff Roberson 		if (flags & (M_NOWAIT | M_NOVM))
2349e20a199fSJeff Roberson 			break;
2350e20a199fSJeff Roberson 	}
2351af526374SJeff Roberson 	KEG_UNLOCK(keg);
2352e20a199fSJeff Roberson 	return (NULL);
2353e20a199fSJeff Roberson }
2354e20a199fSJeff Roberson 
2355e20a199fSJeff Roberson /*
2356e20a199fSJeff Roberson  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2357af526374SJeff Roberson  * with the keg locked.  On NULL no lock is held.
2358e20a199fSJeff Roberson  *
2359e20a199fSJeff Roberson  * The last pointer is used to seed the search.  It is not required.
2360e20a199fSJeff Roberson  */
2361e20a199fSJeff Roberson static uma_slab_t
2362e20a199fSJeff Roberson zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2363e20a199fSJeff Roberson {
2364e20a199fSJeff Roberson 	uma_klink_t klink;
2365e20a199fSJeff Roberson 	uma_slab_t slab;
2366e20a199fSJeff Roberson 	uma_keg_t keg;
2367e20a199fSJeff Roberson 	int flags;
2368e20a199fSJeff Roberson 	int empty;
2369e20a199fSJeff Roberson 	int full;
2370e20a199fSJeff Roberson 
2371e20a199fSJeff Roberson 	/*
2372e20a199fSJeff Roberson 	 * Don't wait on the first pass.  This will skip limit tests
2373e20a199fSJeff Roberson 	 * as well.  We don't want to block if we can find a provider
2374e20a199fSJeff Roberson 	 * without blocking.
2375e20a199fSJeff Roberson 	 */
2376e20a199fSJeff Roberson 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2377e20a199fSJeff Roberson 	/*
2378e20a199fSJeff Roberson 	 * Use the last slab allocated as a hint for where to start
2379e20a199fSJeff Roberson 	 * the search.
2380e20a199fSJeff Roberson 	 */
2381af526374SJeff Roberson 	if (last != NULL) {
2382e20a199fSJeff Roberson 		slab = keg_fetch_slab(last, zone, flags);
2383e20a199fSJeff Roberson 		if (slab)
2384e20a199fSJeff Roberson 			return (slab);
2385af526374SJeff Roberson 		KEG_UNLOCK(last);
2386e20a199fSJeff Roberson 	}
2387e20a199fSJeff Roberson 	/*
2388e20a199fSJeff Roberson 	 * Loop until we have a slab incase of transient failures
2389e20a199fSJeff Roberson 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2390e20a199fSJeff Roberson 	 * required but we've done it for so long now.
2391e20a199fSJeff Roberson 	 */
2392e20a199fSJeff Roberson 	for (;;) {
2393e20a199fSJeff Roberson 		empty = 0;
2394e20a199fSJeff Roberson 		full = 0;
2395e20a199fSJeff Roberson 		/*
2396e20a199fSJeff Roberson 		 * Search the available kegs for slabs.  Be careful to hold the
2397e20a199fSJeff Roberson 		 * correct lock while calling into the keg layer.
2398e20a199fSJeff Roberson 		 */
2399e20a199fSJeff Roberson 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2400e20a199fSJeff Roberson 			keg = klink->kl_keg;
2401af526374SJeff Roberson 			KEG_LOCK(keg);
2402e20a199fSJeff Roberson 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2403e20a199fSJeff Roberson 				slab = keg_fetch_slab(keg, zone, flags);
2404e20a199fSJeff Roberson 				if (slab)
2405e20a199fSJeff Roberson 					return (slab);
2406e20a199fSJeff Roberson 			}
2407e20a199fSJeff Roberson 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2408e20a199fSJeff Roberson 				full++;
2409e20a199fSJeff Roberson 			else
2410e20a199fSJeff Roberson 				empty++;
2411af526374SJeff Roberson 			KEG_UNLOCK(keg);
2412e20a199fSJeff Roberson 		}
2413e20a199fSJeff Roberson 		if (rflags & (M_NOWAIT | M_NOVM))
2414e20a199fSJeff Roberson 			break;
2415e20a199fSJeff Roberson 		flags = rflags;
2416e20a199fSJeff Roberson 		/*
2417e20a199fSJeff Roberson 		 * All kegs are full.  XXX We can't atomically check all kegs
2418e20a199fSJeff Roberson 		 * and sleep so just sleep for a short period and retry.
2419e20a199fSJeff Roberson 		 */
2420e20a199fSJeff Roberson 		if (full && !empty) {
2421af526374SJeff Roberson 			ZONE_LOCK(zone);
2422e20a199fSJeff Roberson 			zone->uz_flags |= UMA_ZFLAG_FULL;
2423bf965959SSean Bruno 			zone->uz_sleeps++;
24242f891cd5SPawel Jakub Dawidek 			zone_log_warning(zone);
242554503a13SJonathan T. Looney 			zone_maxaction(zone);
2426af526374SJeff Roberson 			msleep(zone, zone->uz_lockptr, PVM,
2427af526374SJeff Roberson 			    "zonelimit", hz/100);
2428e20a199fSJeff Roberson 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2429af526374SJeff Roberson 			ZONE_UNLOCK(zone);
2430e20a199fSJeff Roberson 			continue;
2431e20a199fSJeff Roberson 		}
2432e20a199fSJeff Roberson 	}
2433e20a199fSJeff Roberson 	return (NULL);
2434e20a199fSJeff Roberson }
2435e20a199fSJeff Roberson 
2436d56368d7SBosko Milekic static void *
24370095a784SJeff Roberson slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2438bbee39c6SJeff Roberson {
2439bbee39c6SJeff Roberson 	void *item;
244085dcf349SGleb Smirnoff 	uint8_t freei;
2441bbee39c6SJeff Roberson 
24420095a784SJeff Roberson 	MPASS(keg == slab->us_keg);
2443e20a199fSJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2444099a0e58SBosko Milekic 
2445ef72505eSJeff Roberson 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2446ef72505eSJeff Roberson 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2447099a0e58SBosko Milekic 	item = slab->us_data + (keg->uk_rsize * freei);
2448bbee39c6SJeff Roberson 	slab->us_freecount--;
2449099a0e58SBosko Milekic 	keg->uk_free--;
2450ef72505eSJeff Roberson 
2451bbee39c6SJeff Roberson 	/* Move this slab to the full list */
2452bbee39c6SJeff Roberson 	if (slab->us_freecount == 0) {
2453bbee39c6SJeff Roberson 		LIST_REMOVE(slab, us_link);
2454099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2455bbee39c6SJeff Roberson 	}
2456bbee39c6SJeff Roberson 
2457bbee39c6SJeff Roberson 	return (item);
2458bbee39c6SJeff Roberson }
2459bbee39c6SJeff Roberson 
2460bbee39c6SJeff Roberson static int
24610095a784SJeff Roberson zone_import(uma_zone_t zone, void **bucket, int max, int flags)
24620095a784SJeff Roberson {
24630095a784SJeff Roberson 	uma_slab_t slab;
24640095a784SJeff Roberson 	uma_keg_t keg;
24650095a784SJeff Roberson 	int i;
24660095a784SJeff Roberson 
24670095a784SJeff Roberson 	slab = NULL;
24680095a784SJeff Roberson 	keg = NULL;
2469af526374SJeff Roberson 	/* Try to keep the buckets totally full */
24700095a784SJeff Roberson 	for (i = 0; i < max; ) {
24710095a784SJeff Roberson 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
24720095a784SJeff Roberson 			break;
24730095a784SJeff Roberson 		keg = slab->us_keg;
24746fd34d6fSJeff Roberson 		while (slab->us_freecount && i < max) {
24750095a784SJeff Roberson 			bucket[i++] = slab_alloc_item(keg, slab);
24766fd34d6fSJeff Roberson 			if (keg->uk_free <= keg->uk_reserve)
24776fd34d6fSJeff Roberson 				break;
24786fd34d6fSJeff Roberson 		}
24796fd34d6fSJeff Roberson 		/* Don't grab more than one slab at a time. */
24800095a784SJeff Roberson 		flags &= ~M_WAITOK;
24810095a784SJeff Roberson 		flags |= M_NOWAIT;
24820095a784SJeff Roberson 	}
24830095a784SJeff Roberson 	if (slab != NULL)
24840095a784SJeff Roberson 		KEG_UNLOCK(keg);
24850095a784SJeff Roberson 
24860095a784SJeff Roberson 	return i;
24870095a784SJeff Roberson }
24880095a784SJeff Roberson 
2489fc03d22bSJeff Roberson static uma_bucket_t
24906fd34d6fSJeff Roberson zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2491bbee39c6SJeff Roberson {
2492bbee39c6SJeff Roberson 	uma_bucket_t bucket;
24930095a784SJeff Roberson 	int max;
2494bbee39c6SJeff Roberson 
24956fd34d6fSJeff Roberson 	/* Don't wait for buckets, preserve caller's NOVM setting. */
24966fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
24970095a784SJeff Roberson 	if (bucket == NULL)
2498f7104ccdSAlexander Motin 		return (NULL);
24990095a784SJeff Roberson 
2500af526374SJeff Roberson 	max = MIN(bucket->ub_entries, zone->uz_count);
25010095a784SJeff Roberson 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
25020095a784SJeff Roberson 	    max, flags);
25030095a784SJeff Roberson 
25040095a784SJeff Roberson 	/*
25050095a784SJeff Roberson 	 * Initialize the memory if necessary.
25060095a784SJeff Roberson 	 */
25070095a784SJeff Roberson 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2508099a0e58SBosko Milekic 		int i;
2509bbee39c6SJeff Roberson 
25100095a784SJeff Roberson 		for (i = 0; i < bucket->ub_cnt; i++)
2511e20a199fSJeff Roberson 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
25120095a784SJeff Roberson 			    flags) != 0)
2513b23f72e9SBrian Feldman 				break;
2514b23f72e9SBrian Feldman 		/*
2515b23f72e9SBrian Feldman 		 * If we couldn't initialize the whole bucket, put the
2516b23f72e9SBrian Feldman 		 * rest back onto the freelist.
2517b23f72e9SBrian Feldman 		 */
2518b23f72e9SBrian Feldman 		if (i != bucket->ub_cnt) {
2519af526374SJeff Roberson 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
25200095a784SJeff Roberson 			    bucket->ub_cnt - i);
2521a5a262c6SBosko Milekic #ifdef INVARIANTS
25220095a784SJeff Roberson 			bzero(&bucket->ub_bucket[i],
25230095a784SJeff Roberson 			    sizeof(void *) * (bucket->ub_cnt - i));
2524a5a262c6SBosko Milekic #endif
2525b23f72e9SBrian Feldman 			bucket->ub_cnt = i;
2526b23f72e9SBrian Feldman 		}
2527099a0e58SBosko Milekic 	}
2528099a0e58SBosko Milekic 
2529f7104ccdSAlexander Motin 	if (bucket->ub_cnt == 0) {
25306fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2531fc03d22bSJeff Roberson 		atomic_add_long(&zone->uz_fails, 1);
2532fc03d22bSJeff Roberson 		return (NULL);
2533bbee39c6SJeff Roberson 	}
2534fc03d22bSJeff Roberson 
2535fc03d22bSJeff Roberson 	return (bucket);
2536fc03d22bSJeff Roberson }
2537fc03d22bSJeff Roberson 
25388355f576SJeff Roberson /*
25390095a784SJeff Roberson  * Allocates a single item from a zone.
25408355f576SJeff Roberson  *
25418355f576SJeff Roberson  * Arguments
25428355f576SJeff Roberson  *	zone   The zone to alloc for.
25438355f576SJeff Roberson  *	udata  The data to be passed to the constructor.
2544a163d034SWarner Losh  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
25458355f576SJeff Roberson  *
25468355f576SJeff Roberson  * Returns
25478355f576SJeff Roberson  *	NULL if there is no memory and M_NOWAIT is set
2548bbee39c6SJeff Roberson  *	An item if successful
25498355f576SJeff Roberson  */
25508355f576SJeff Roberson 
25518355f576SJeff Roberson static void *
2552e20a199fSJeff Roberson zone_alloc_item(uma_zone_t zone, void *udata, int flags)
25538355f576SJeff Roberson {
25548355f576SJeff Roberson 	void *item;
25558355f576SJeff Roberson 
25568355f576SJeff Roberson 	item = NULL;
25578355f576SJeff Roberson 
25580095a784SJeff Roberson 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
25590095a784SJeff Roberson 		goto fail;
25600095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, 1);
25618355f576SJeff Roberson 
2562099a0e58SBosko Milekic 	/*
2563099a0e58SBosko Milekic 	 * We have to call both the zone's init (not the keg's init)
2564099a0e58SBosko Milekic 	 * and the zone's ctor.  This is because the item is going from
2565099a0e58SBosko Milekic 	 * a keg slab directly to the user, and the user is expecting it
2566099a0e58SBosko Milekic 	 * to be both zone-init'd as well as zone-ctor'd.
2567099a0e58SBosko Milekic 	 */
2568b23f72e9SBrian Feldman 	if (zone->uz_init != NULL) {
2569e20a199fSJeff Roberson 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
25700095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_FINI);
25710095a784SJeff Roberson 			goto fail;
2572b23f72e9SBrian Feldman 		}
2573b23f72e9SBrian Feldman 	}
2574b23f72e9SBrian Feldman 	if (zone->uz_ctor != NULL) {
2575e20a199fSJeff Roberson 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
25760095a784SJeff Roberson 			zone_free_item(zone, item, udata, SKIP_DTOR);
25770095a784SJeff Roberson 			goto fail;
2578b23f72e9SBrian Feldman 		}
2579b23f72e9SBrian Feldman 	}
2580ef72505eSJeff Roberson #ifdef INVARIANTS
25810095a784SJeff Roberson 	uma_dbg_alloc(zone, NULL, item);
2582ef72505eSJeff Roberson #endif
25832cc35ff9SJeff Roberson 	if (flags & M_ZERO)
258448343a2fSGleb Smirnoff 		uma_zero_item(item, zone);
25858355f576SJeff Roberson 
25861431a748SGleb Smirnoff 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
25871431a748SGleb Smirnoff 	    zone->uz_name, zone);
25881431a748SGleb Smirnoff 
25898355f576SJeff Roberson 	return (item);
25900095a784SJeff Roberson 
25910095a784SJeff Roberson fail:
25921431a748SGleb Smirnoff 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
25931431a748SGleb Smirnoff 	    zone->uz_name, zone);
25940095a784SJeff Roberson 	atomic_add_long(&zone->uz_fails, 1);
25950095a784SJeff Roberson 	return (NULL);
25968355f576SJeff Roberson }
25978355f576SJeff Roberson 
25988355f576SJeff Roberson /* See uma.h */
25998355f576SJeff Roberson void
26008355f576SJeff Roberson uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
26018355f576SJeff Roberson {
26028355f576SJeff Roberson 	uma_cache_t cache;
26038355f576SJeff Roberson 	uma_bucket_t bucket;
26044d104ba0SAlexander Motin 	int lockfail;
26058355f576SJeff Roberson 	int cpu;
26068355f576SJeff Roberson 
2607e866d8f0SMark Murray 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2608e866d8f0SMark Murray 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
260910cb2424SMark Murray 
26103659f747SRobert Watson 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
26113659f747SRobert Watson 	    zone->uz_name);
26123659f747SRobert Watson 
2613d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
26141067a2baSJonathan T. Looney 	    ("uma_zfree_arg: called with spinlock or critical section held"));
26151067a2baSJonathan T. Looney 
261620ed0cb0SMatthew D Fleming         /* uma_zfree(..., NULL) does nothing, to match free(9). */
261720ed0cb0SMatthew D Fleming         if (item == NULL)
261820ed0cb0SMatthew D Fleming                 return;
26198d689e04SGleb Smirnoff #ifdef DEBUG_MEMGUARD
26208d689e04SGleb Smirnoff 	if (is_memguard_addr(item)) {
2621bc9d08e1SMark Johnston 		if (zone->uz_dtor != NULL)
26228d689e04SGleb Smirnoff 			zone->uz_dtor(item, zone->uz_size, udata);
2623bc9d08e1SMark Johnston 		if (zone->uz_fini != NULL)
26248d689e04SGleb Smirnoff 			zone->uz_fini(item, zone->uz_size);
26258d689e04SGleb Smirnoff 		memguard_free(item);
26268d689e04SGleb Smirnoff 		return;
26278d689e04SGleb Smirnoff 	}
26288d689e04SGleb Smirnoff #endif
26295d1ae027SRobert Watson #ifdef INVARIANTS
2630e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZONE_MALLOC)
26315d1ae027SRobert Watson 		uma_dbg_free(zone, udata, item);
26325d1ae027SRobert Watson 	else
26335d1ae027SRobert Watson 		uma_dbg_free(zone, NULL, item);
26345d1ae027SRobert Watson #endif
2635fc03d22bSJeff Roberson 	if (zone->uz_dtor != NULL)
2636ef72505eSJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
2637ef72505eSJeff Roberson 
2638af7f9b97SJeff Roberson 	/*
2639af7f9b97SJeff Roberson 	 * The race here is acceptable.  If we miss it we'll just have to wait
2640af7f9b97SJeff Roberson 	 * a little longer for the limits to be reset.
2641af7f9b97SJeff Roberson 	 */
2642e20a199fSJeff Roberson 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2643fc03d22bSJeff Roberson 		goto zfree_item;
2644af7f9b97SJeff Roberson 
26455d1ae027SRobert Watson 	/*
26465d1ae027SRobert Watson 	 * If possible, free to the per-CPU cache.  There are two
26475d1ae027SRobert Watson 	 * requirements for safe access to the per-CPU cache: (1) the thread
26485d1ae027SRobert Watson 	 * accessing the cache must not be preempted or yield during access,
26495d1ae027SRobert Watson 	 * and (2) the thread must not migrate CPUs without switching which
26505d1ae027SRobert Watson 	 * cache it accesses.  We rely on a critical section to prevent
26515d1ae027SRobert Watson 	 * preemption and migration.  We release the critical section in
26525d1ae027SRobert Watson 	 * order to acquire the zone mutex if we are unable to free to the
26535d1ae027SRobert Watson 	 * current cache; when we re-acquire the critical section, we must
26545d1ae027SRobert Watson 	 * detect and handle migration if it has occurred.
26555d1ae027SRobert Watson 	 */
2656a553d4b8SJeff Roberson zfree_restart:
26575d1ae027SRobert Watson 	critical_enter();
26585d1ae027SRobert Watson 	cpu = curcpu;
26598355f576SJeff Roberson 	cache = &zone->uz_cpu[cpu];
26608355f576SJeff Roberson 
26618355f576SJeff Roberson zfree_start:
2662a553d4b8SJeff Roberson 	/*
2663fc03d22bSJeff Roberson 	 * Try to free into the allocbucket first to give LIFO ordering
2664fc03d22bSJeff Roberson 	 * for cache-hot datastructures.  Spill over into the freebucket
2665fc03d22bSJeff Roberson 	 * if necessary.  Alloc will swap them if one runs dry.
2666a553d4b8SJeff Roberson 	 */
2667fc03d22bSJeff Roberson 	bucket = cache->uc_allocbucket;
2668fc03d22bSJeff Roberson 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2669fc03d22bSJeff Roberson 		bucket = cache->uc_freebucket;
2670fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2671cae33c14SJeff Roberson 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
26728355f576SJeff Roberson 		    ("uma_zfree: Freeing to non free bucket index."));
2673cae33c14SJeff Roberson 		bucket->ub_bucket[bucket->ub_cnt] = item;
2674cae33c14SJeff Roberson 		bucket->ub_cnt++;
2675773df9abSRobert Watson 		cache->uc_frees++;
26765d1ae027SRobert Watson 		critical_exit();
26778355f576SJeff Roberson 		return;
2678fc03d22bSJeff Roberson 	}
2679fc03d22bSJeff Roberson 
26808355f576SJeff Roberson 	/*
26815d1ae027SRobert Watson 	 * We must go back the zone, which requires acquiring the zone lock,
26825d1ae027SRobert Watson 	 * which in turn means we must release and re-acquire the critical
26835d1ae027SRobert Watson 	 * section.  Since the critical section is released, we may be
26845d1ae027SRobert Watson 	 * preempted or migrate.  As such, make sure not to maintain any
26855d1ae027SRobert Watson 	 * thread-local state specific to the cache from prior to releasing
26865d1ae027SRobert Watson 	 * the critical section.
26878355f576SJeff Roberson 	 */
26885d1ae027SRobert Watson 	critical_exit();
2689fc03d22bSJeff Roberson 	if (zone->uz_count == 0 || bucketdisable)
2690fc03d22bSJeff Roberson 		goto zfree_item;
2691fc03d22bSJeff Roberson 
26924d104ba0SAlexander Motin 	lockfail = 0;
26934d104ba0SAlexander Motin 	if (ZONE_TRYLOCK(zone) == 0) {
26944d104ba0SAlexander Motin 		/* Record contention to size the buckets. */
26958355f576SJeff Roberson 		ZONE_LOCK(zone);
26964d104ba0SAlexander Motin 		lockfail = 1;
26974d104ba0SAlexander Motin 	}
26985d1ae027SRobert Watson 	critical_enter();
26995d1ae027SRobert Watson 	cpu = curcpu;
27005d1ae027SRobert Watson 	cache = &zone->uz_cpu[cpu];
27018355f576SJeff Roberson 
2702fc03d22bSJeff Roberson 	/*
2703fc03d22bSJeff Roberson 	 * Since we have locked the zone we may as well send back our stats.
2704fc03d22bSJeff Roberson 	 */
27050095a784SJeff Roberson 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
27060095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2707f4ff923bSRobert Watson 	cache->uc_allocs = 0;
2708f4ff923bSRobert Watson 	cache->uc_frees = 0;
2709f4ff923bSRobert Watson 
27108355f576SJeff Roberson 	bucket = cache->uc_freebucket;
2711fc03d22bSJeff Roberson 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2712fc03d22bSJeff Roberson 		ZONE_UNLOCK(zone);
2713fc03d22bSJeff Roberson 		goto zfree_start;
2714fc03d22bSJeff Roberson 	}
27158355f576SJeff Roberson 	cache->uc_freebucket = NULL;
2716afa5d703SMark Johnston 	/* We are no longer associated with this CPU. */
2717afa5d703SMark Johnston 	critical_exit();
27188355f576SJeff Roberson 
27198355f576SJeff Roberson 	/* Can we throw this on the zone full list? */
27208355f576SJeff Roberson 	if (bucket != NULL) {
27211431a748SGleb Smirnoff 		CTR3(KTR_UMA,
27221431a748SGleb Smirnoff 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
27231431a748SGleb Smirnoff 		    zone->uz_name, zone, bucket);
2724cae33c14SJeff Roberson 		/* ub_cnt is pointing to the last free item */
2725cae33c14SJeff Roberson 		KASSERT(bucket->ub_cnt != 0,
27268355f576SJeff Roberson 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2727fc03d22bSJeff Roberson 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
27288355f576SJeff Roberson 	}
2729fc03d22bSJeff Roberson 
27304d104ba0SAlexander Motin 	/*
27314d104ba0SAlexander Motin 	 * We bump the uz count when the cache size is insufficient to
27324d104ba0SAlexander Motin 	 * handle the working set.
27334d104ba0SAlexander Motin 	 */
27344d104ba0SAlexander Motin 	if (lockfail && zone->uz_count < BUCKET_MAX)
27354d104ba0SAlexander Motin 		zone->uz_count++;
2736a553d4b8SJeff Roberson 	ZONE_UNLOCK(zone);
2737a553d4b8SJeff Roberson 
27386fd34d6fSJeff Roberson 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
27391431a748SGleb Smirnoff 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
27401431a748SGleb Smirnoff 	    zone->uz_name, zone, bucket);
27414741dcbfSJeff Roberson 	if (bucket) {
2742fc03d22bSJeff Roberson 		critical_enter();
2743fc03d22bSJeff Roberson 		cpu = curcpu;
2744fc03d22bSJeff Roberson 		cache = &zone->uz_cpu[cpu];
2745fc03d22bSJeff Roberson 		if (cache->uc_freebucket == NULL) {
2746fc03d22bSJeff Roberson 			cache->uc_freebucket = bucket;
2747fc03d22bSJeff Roberson 			goto zfree_start;
2748fc03d22bSJeff Roberson 		}
2749fc03d22bSJeff Roberson 		/*
2750fc03d22bSJeff Roberson 		 * We lost the race, start over.  We have to drop our
2751fc03d22bSJeff Roberson 		 * critical section to free the bucket.
2752fc03d22bSJeff Roberson 		 */
2753fc03d22bSJeff Roberson 		critical_exit();
27546fd34d6fSJeff Roberson 		bucket_free(zone, bucket, udata);
2755a553d4b8SJeff Roberson 		goto zfree_restart;
27568355f576SJeff Roberson 	}
27578355f576SJeff Roberson 
2758a553d4b8SJeff Roberson 	/*
2759a553d4b8SJeff Roberson 	 * If nothing else caught this, we'll just do an internal free.
2760a553d4b8SJeff Roberson 	 */
2761fc03d22bSJeff Roberson zfree_item:
27620095a784SJeff Roberson 	zone_free_item(zone, item, udata, SKIP_DTOR);
27638355f576SJeff Roberson 
27648355f576SJeff Roberson 	return;
27658355f576SJeff Roberson }
27668355f576SJeff Roberson 
27678355f576SJeff Roberson static void
27680095a784SJeff Roberson slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
27698355f576SJeff Roberson {
277085dcf349SGleb Smirnoff 	uint8_t freei;
2771099a0e58SBosko Milekic 
27720095a784SJeff Roberson 	mtx_assert(&keg->uk_lock, MA_OWNED);
2773e20a199fSJeff Roberson 	MPASS(keg == slab->us_keg);
27748355f576SJeff Roberson 
27758355f576SJeff Roberson 	/* Do we need to remove from any lists? */
2776099a0e58SBosko Milekic 	if (slab->us_freecount+1 == keg->uk_ipers) {
27778355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2778099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
27798355f576SJeff Roberson 	} else if (slab->us_freecount == 0) {
27808355f576SJeff Roberson 		LIST_REMOVE(slab, us_link);
2781099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
27828355f576SJeff Roberson 	}
27838355f576SJeff Roberson 
2784ef72505eSJeff Roberson 	/* Slab management. */
2785ef72505eSJeff Roberson 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2786ef72505eSJeff Roberson 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
27878355f576SJeff Roberson 	slab->us_freecount++;
27888355f576SJeff Roberson 
2789ef72505eSJeff Roberson 	/* Keg statistics. */
2790099a0e58SBosko Milekic 	keg->uk_free++;
27910095a784SJeff Roberson }
27920095a784SJeff Roberson 
27930095a784SJeff Roberson static void
27940095a784SJeff Roberson zone_release(uma_zone_t zone, void **bucket, int cnt)
27950095a784SJeff Roberson {
27960095a784SJeff Roberson 	void *item;
27970095a784SJeff Roberson 	uma_slab_t slab;
27980095a784SJeff Roberson 	uma_keg_t keg;
27990095a784SJeff Roberson 	uint8_t *mem;
28000095a784SJeff Roberson 	int clearfull;
28010095a784SJeff Roberson 	int i;
28028355f576SJeff Roberson 
2803e20a199fSJeff Roberson 	clearfull = 0;
28040095a784SJeff Roberson 	keg = zone_first_keg(zone);
2805af526374SJeff Roberson 	KEG_LOCK(keg);
28060095a784SJeff Roberson 	for (i = 0; i < cnt; i++) {
28070095a784SJeff Roberson 		item = bucket[i];
28080095a784SJeff Roberson 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
28090095a784SJeff Roberson 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
28100095a784SJeff Roberson 			if (zone->uz_flags & UMA_ZONE_HASH) {
28110095a784SJeff Roberson 				slab = hash_sfind(&keg->uk_hash, mem);
28120095a784SJeff Roberson 			} else {
28130095a784SJeff Roberson 				mem += keg->uk_pgoff;
28140095a784SJeff Roberson 				slab = (uma_slab_t)mem;
28150095a784SJeff Roberson 			}
28160095a784SJeff Roberson 		} else {
28170095a784SJeff Roberson 			slab = vtoslab((vm_offset_t)item);
28180095a784SJeff Roberson 			if (slab->us_keg != keg) {
28190095a784SJeff Roberson 				KEG_UNLOCK(keg);
28200095a784SJeff Roberson 				keg = slab->us_keg;
28210095a784SJeff Roberson 				KEG_LOCK(keg);
28220095a784SJeff Roberson 			}
28230095a784SJeff Roberson 		}
28240095a784SJeff Roberson 		slab_free_item(keg, slab, item);
2825099a0e58SBosko Milekic 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2826e20a199fSJeff Roberson 			if (keg->uk_pages < keg->uk_maxpages) {
2827099a0e58SBosko Milekic 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2828e20a199fSJeff Roberson 				clearfull = 1;
2829e20a199fSJeff Roberson 			}
2830af7f9b97SJeff Roberson 
283177380291SMohan Srinivasan 			/*
2832ef72505eSJeff Roberson 			 * We can handle one more allocation. Since we're
2833ef72505eSJeff Roberson 			 * clearing ZFLAG_FULL, wake up all procs blocked
2834ef72505eSJeff Roberson 			 * on pages. This should be uncommon, so keeping this
2835ef72505eSJeff Roberson 			 * simple for now (rather than adding count of blocked
283677380291SMohan Srinivasan 			 * threads etc).
283777380291SMohan Srinivasan 			 */
283877380291SMohan Srinivasan 			wakeup(keg);
2839af7f9b97SJeff Roberson 		}
28400095a784SJeff Roberson 	}
2841af526374SJeff Roberson 	KEG_UNLOCK(keg);
28420095a784SJeff Roberson 	if (clearfull) {
2843af526374SJeff Roberson 		ZONE_LOCK(zone);
2844e20a199fSJeff Roberson 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2845e20a199fSJeff Roberson 		wakeup(zone);
2846605cbd6aSJeff Roberson 		ZONE_UNLOCK(zone);
2847af526374SJeff Roberson 	}
2848ef72505eSJeff Roberson 
28498355f576SJeff Roberson }
28508355f576SJeff Roberson 
28510095a784SJeff Roberson /*
28520095a784SJeff Roberson  * Frees a single item to any zone.
28530095a784SJeff Roberson  *
28540095a784SJeff Roberson  * Arguments:
28550095a784SJeff Roberson  *	zone   The zone to free to
28560095a784SJeff Roberson  *	item   The item we're freeing
28570095a784SJeff Roberson  *	udata  User supplied data for the dtor
28580095a784SJeff Roberson  *	skip   Skip dtors and finis
28590095a784SJeff Roberson  */
28600095a784SJeff Roberson static void
28610095a784SJeff Roberson zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
28620095a784SJeff Roberson {
28630095a784SJeff Roberson 
28640095a784SJeff Roberson #ifdef INVARIANTS
28650095a784SJeff Roberson 	if (skip == SKIP_NONE) {
28660095a784SJeff Roberson 		if (zone->uz_flags & UMA_ZONE_MALLOC)
28670095a784SJeff Roberson 			uma_dbg_free(zone, udata, item);
28680095a784SJeff Roberson 		else
28690095a784SJeff Roberson 			uma_dbg_free(zone, NULL, item);
28700095a784SJeff Roberson 	}
28710095a784SJeff Roberson #endif
28720095a784SJeff Roberson 	if (skip < SKIP_DTOR && zone->uz_dtor)
28730095a784SJeff Roberson 		zone->uz_dtor(item, zone->uz_size, udata);
28740095a784SJeff Roberson 
28750095a784SJeff Roberson 	if (skip < SKIP_FINI && zone->uz_fini)
28760095a784SJeff Roberson 		zone->uz_fini(item, zone->uz_size);
28770095a784SJeff Roberson 
28780095a784SJeff Roberson 	atomic_add_long(&zone->uz_frees, 1);
28790095a784SJeff Roberson 	zone->uz_release(zone->uz_arg, &item, 1);
28800095a784SJeff Roberson }
28810095a784SJeff Roberson 
28828355f576SJeff Roberson /* See uma.h */
28831c6cae97SLawrence Stewart int
2884736ee590SJeff Roberson uma_zone_set_max(uma_zone_t zone, int nitems)
2885736ee590SJeff Roberson {
2886099a0e58SBosko Milekic 	uma_keg_t keg;
2887099a0e58SBosko Milekic 
2888e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
28890095a784SJeff Roberson 	if (keg == NULL)
28900095a784SJeff Roberson 		return (0);
2891af526374SJeff Roberson 	KEG_LOCK(keg);
2892e20a199fSJeff Roberson 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2893099a0e58SBosko Milekic 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2894e20a199fSJeff Roberson 		keg->uk_maxpages += keg->uk_ppera;
289557223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2896af526374SJeff Roberson 	KEG_UNLOCK(keg);
28971c6cae97SLawrence Stewart 
28981c6cae97SLawrence Stewart 	return (nitems);
2899736ee590SJeff Roberson }
2900736ee590SJeff Roberson 
2901736ee590SJeff Roberson /* See uma.h */
2902e49471b0SAndre Oppermann int
2903e49471b0SAndre Oppermann uma_zone_get_max(uma_zone_t zone)
2904e49471b0SAndre Oppermann {
2905e49471b0SAndre Oppermann 	int nitems;
2906e49471b0SAndre Oppermann 	uma_keg_t keg;
2907e49471b0SAndre Oppermann 
2908e49471b0SAndre Oppermann 	keg = zone_first_keg(zone);
29090095a784SJeff Roberson 	if (keg == NULL)
29100095a784SJeff Roberson 		return (0);
2911af526374SJeff Roberson 	KEG_LOCK(keg);
291257223e99SAndriy Gapon 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2913af526374SJeff Roberson 	KEG_UNLOCK(keg);
2914e49471b0SAndre Oppermann 
2915e49471b0SAndre Oppermann 	return (nitems);
2916e49471b0SAndre Oppermann }
2917e49471b0SAndre Oppermann 
2918e49471b0SAndre Oppermann /* See uma.h */
29192f891cd5SPawel Jakub Dawidek void
29202f891cd5SPawel Jakub Dawidek uma_zone_set_warning(uma_zone_t zone, const char *warning)
29212f891cd5SPawel Jakub Dawidek {
29222f891cd5SPawel Jakub Dawidek 
29232f891cd5SPawel Jakub Dawidek 	ZONE_LOCK(zone);
29242f891cd5SPawel Jakub Dawidek 	zone->uz_warning = warning;
29252f891cd5SPawel Jakub Dawidek 	ZONE_UNLOCK(zone);
29262f891cd5SPawel Jakub Dawidek }
29272f891cd5SPawel Jakub Dawidek 
29282f891cd5SPawel Jakub Dawidek /* See uma.h */
292954503a13SJonathan T. Looney void
293054503a13SJonathan T. Looney uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
293154503a13SJonathan T. Looney {
293254503a13SJonathan T. Looney 
293354503a13SJonathan T. Looney 	ZONE_LOCK(zone);
2934e60b2fcbSGleb Smirnoff 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
293554503a13SJonathan T. Looney 	ZONE_UNLOCK(zone);
293654503a13SJonathan T. Looney }
293754503a13SJonathan T. Looney 
293854503a13SJonathan T. Looney /* See uma.h */
2939c4ae7908SLawrence Stewart int
2940c4ae7908SLawrence Stewart uma_zone_get_cur(uma_zone_t zone)
2941c4ae7908SLawrence Stewart {
2942c4ae7908SLawrence Stewart 	int64_t nitems;
2943c4ae7908SLawrence Stewart 	u_int i;
2944c4ae7908SLawrence Stewart 
2945c4ae7908SLawrence Stewart 	ZONE_LOCK(zone);
2946c4ae7908SLawrence Stewart 	nitems = zone->uz_allocs - zone->uz_frees;
2947c4ae7908SLawrence Stewart 	CPU_FOREACH(i) {
2948c4ae7908SLawrence Stewart 		/*
2949c4ae7908SLawrence Stewart 		 * See the comment in sysctl_vm_zone_stats() regarding the
2950c4ae7908SLawrence Stewart 		 * safety of accessing the per-cpu caches. With the zone lock
2951c4ae7908SLawrence Stewart 		 * held, it is safe, but can potentially result in stale data.
2952c4ae7908SLawrence Stewart 		 */
2953c4ae7908SLawrence Stewart 		nitems += zone->uz_cpu[i].uc_allocs -
2954c4ae7908SLawrence Stewart 		    zone->uz_cpu[i].uc_frees;
2955c4ae7908SLawrence Stewart 	}
2956c4ae7908SLawrence Stewart 	ZONE_UNLOCK(zone);
2957c4ae7908SLawrence Stewart 
2958c4ae7908SLawrence Stewart 	return (nitems < 0 ? 0 : nitems);
2959c4ae7908SLawrence Stewart }
2960c4ae7908SLawrence Stewart 
2961c4ae7908SLawrence Stewart /* See uma.h */
2962736ee590SJeff Roberson void
2963099a0e58SBosko Milekic uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2964099a0e58SBosko Milekic {
2965e20a199fSJeff Roberson 	uma_keg_t keg;
2966e20a199fSJeff Roberson 
2967e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
29680095a784SJeff Roberson 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2969af526374SJeff Roberson 	KEG_LOCK(keg);
2970e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
2971099a0e58SBosko Milekic 	    ("uma_zone_set_init on non-empty keg"));
2972e20a199fSJeff Roberson 	keg->uk_init = uminit;
2973af526374SJeff Roberson 	KEG_UNLOCK(keg);
2974099a0e58SBosko Milekic }
2975099a0e58SBosko Milekic 
2976099a0e58SBosko Milekic /* See uma.h */
2977099a0e58SBosko Milekic void
2978099a0e58SBosko Milekic uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2979099a0e58SBosko Milekic {
2980e20a199fSJeff Roberson 	uma_keg_t keg;
2981e20a199fSJeff Roberson 
2982e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
29831d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
2984af526374SJeff Roberson 	KEG_LOCK(keg);
2985e20a199fSJeff Roberson 	KASSERT(keg->uk_pages == 0,
2986099a0e58SBosko Milekic 	    ("uma_zone_set_fini on non-empty keg"));
2987e20a199fSJeff Roberson 	keg->uk_fini = fini;
2988af526374SJeff Roberson 	KEG_UNLOCK(keg);
2989099a0e58SBosko Milekic }
2990099a0e58SBosko Milekic 
2991099a0e58SBosko Milekic /* See uma.h */
2992099a0e58SBosko Milekic void
2993099a0e58SBosko Milekic uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2994099a0e58SBosko Milekic {
2995af526374SJeff Roberson 
2996099a0e58SBosko Milekic 	ZONE_LOCK(zone);
2997e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2998099a0e58SBosko Milekic 	    ("uma_zone_set_zinit on non-empty keg"));
2999099a0e58SBosko Milekic 	zone->uz_init = zinit;
3000099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3001099a0e58SBosko Milekic }
3002099a0e58SBosko Milekic 
3003099a0e58SBosko Milekic /* See uma.h */
3004099a0e58SBosko Milekic void
3005099a0e58SBosko Milekic uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3006099a0e58SBosko Milekic {
3007af526374SJeff Roberson 
3008099a0e58SBosko Milekic 	ZONE_LOCK(zone);
3009e20a199fSJeff Roberson 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3010099a0e58SBosko Milekic 	    ("uma_zone_set_zfini on non-empty keg"));
3011099a0e58SBosko Milekic 	zone->uz_fini = zfini;
3012099a0e58SBosko Milekic 	ZONE_UNLOCK(zone);
3013099a0e58SBosko Milekic }
3014099a0e58SBosko Milekic 
3015099a0e58SBosko Milekic /* See uma.h */
3016b23f72e9SBrian Feldman /* XXX uk_freef is not actually used with the zone locked */
3017099a0e58SBosko Milekic void
30188355f576SJeff Roberson uma_zone_set_freef(uma_zone_t zone, uma_free freef)
30198355f576SJeff Roberson {
30200095a784SJeff Roberson 	uma_keg_t keg;
3021e20a199fSJeff Roberson 
30220095a784SJeff Roberson 	keg = zone_first_keg(zone);
30231d2c0c46SDmitry Chagin 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3024af526374SJeff Roberson 	KEG_LOCK(keg);
30250095a784SJeff Roberson 	keg->uk_freef = freef;
3026af526374SJeff Roberson 	KEG_UNLOCK(keg);
30278355f576SJeff Roberson }
30288355f576SJeff Roberson 
30298355f576SJeff Roberson /* See uma.h */
3030b23f72e9SBrian Feldman /* XXX uk_allocf is not actually used with the zone locked */
30318355f576SJeff Roberson void
30328355f576SJeff Roberson uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
30338355f576SJeff Roberson {
3034e20a199fSJeff Roberson 	uma_keg_t keg;
3035e20a199fSJeff Roberson 
3036e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
3037af526374SJeff Roberson 	KEG_LOCK(keg);
3038e20a199fSJeff Roberson 	keg->uk_allocf = allocf;
3039af526374SJeff Roberson 	KEG_UNLOCK(keg);
30408355f576SJeff Roberson }
30418355f576SJeff Roberson 
30428355f576SJeff Roberson /* See uma.h */
30436fd34d6fSJeff Roberson void
30446fd34d6fSJeff Roberson uma_zone_reserve(uma_zone_t zone, int items)
30456fd34d6fSJeff Roberson {
30466fd34d6fSJeff Roberson 	uma_keg_t keg;
30476fd34d6fSJeff Roberson 
30486fd34d6fSJeff Roberson 	keg = zone_first_keg(zone);
30496fd34d6fSJeff Roberson 	if (keg == NULL)
30506fd34d6fSJeff Roberson 		return;
30516fd34d6fSJeff Roberson 	KEG_LOCK(keg);
30526fd34d6fSJeff Roberson 	keg->uk_reserve = items;
30536fd34d6fSJeff Roberson 	KEG_UNLOCK(keg);
30546fd34d6fSJeff Roberson 
30556fd34d6fSJeff Roberson 	return;
30566fd34d6fSJeff Roberson }
30576fd34d6fSJeff Roberson 
30586fd34d6fSJeff Roberson /* See uma.h */
30598355f576SJeff Roberson int
3060a4915c21SAttilio Rao uma_zone_reserve_kva(uma_zone_t zone, int count)
30618355f576SJeff Roberson {
3062099a0e58SBosko Milekic 	uma_keg_t keg;
30638355f576SJeff Roberson 	vm_offset_t kva;
30649ba30bcbSZbigniew Bodek 	u_int pages;
30658355f576SJeff Roberson 
3066e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
30670095a784SJeff Roberson 	if (keg == NULL)
30680095a784SJeff Roberson 		return (0);
3069099a0e58SBosko Milekic 	pages = count / keg->uk_ipers;
30708355f576SJeff Roberson 
3071099a0e58SBosko Milekic 	if (pages * keg->uk_ipers < count)
30728355f576SJeff Roberson 		pages++;
307357223e99SAndriy Gapon 	pages *= keg->uk_ppera;
3074a553d4b8SJeff Roberson 
3075a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3076a4915c21SAttilio Rao 	if (keg->uk_ppera > 1) {
3077a4915c21SAttilio Rao #else
3078a4915c21SAttilio Rao 	if (1) {
3079a4915c21SAttilio Rao #endif
308057223e99SAndriy Gapon 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3081d1f42ac2SAlan Cox 		if (kva == 0)
30828355f576SJeff Roberson 			return (0);
3083a4915c21SAttilio Rao 	} else
3084a4915c21SAttilio Rao 		kva = 0;
3085af526374SJeff Roberson 	KEG_LOCK(keg);
3086099a0e58SBosko Milekic 	keg->uk_kva = kva;
3087a4915c21SAttilio Rao 	keg->uk_offset = 0;
3088099a0e58SBosko Milekic 	keg->uk_maxpages = pages;
3089a4915c21SAttilio Rao #ifdef UMA_MD_SMALL_ALLOC
3090a4915c21SAttilio Rao 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3091a4915c21SAttilio Rao #else
3092a4915c21SAttilio Rao 	keg->uk_allocf = noobj_alloc;
3093a4915c21SAttilio Rao #endif
30946fd34d6fSJeff Roberson 	keg->uk_flags |= UMA_ZONE_NOFREE;
3095af526374SJeff Roberson 	KEG_UNLOCK(keg);
3096af526374SJeff Roberson 
30978355f576SJeff Roberson 	return (1);
30988355f576SJeff Roberson }
30998355f576SJeff Roberson 
31008355f576SJeff Roberson /* See uma.h */
31018355f576SJeff Roberson void
31028355f576SJeff Roberson uma_prealloc(uma_zone_t zone, int items)
31038355f576SJeff Roberson {
31048355f576SJeff Roberson 	int slabs;
31058355f576SJeff Roberson 	uma_slab_t slab;
3106099a0e58SBosko Milekic 	uma_keg_t keg;
31078355f576SJeff Roberson 
3108e20a199fSJeff Roberson 	keg = zone_first_keg(zone);
31090095a784SJeff Roberson 	if (keg == NULL)
31100095a784SJeff Roberson 		return;
3111af526374SJeff Roberson 	KEG_LOCK(keg);
3112099a0e58SBosko Milekic 	slabs = items / keg->uk_ipers;
3113099a0e58SBosko Milekic 	if (slabs * keg->uk_ipers < items)
31148355f576SJeff Roberson 		slabs++;
31158355f576SJeff Roberson 	while (slabs > 0) {
3116e20a199fSJeff Roberson 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3117e20a199fSJeff Roberson 		if (slab == NULL)
3118e20a199fSJeff Roberson 			break;
3119e20a199fSJeff Roberson 		MPASS(slab->us_keg == keg);
3120099a0e58SBosko Milekic 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
31218355f576SJeff Roberson 		slabs--;
31228355f576SJeff Roberson 	}
3123af526374SJeff Roberson 	KEG_UNLOCK(keg);
31248355f576SJeff Roberson }
31258355f576SJeff Roberson 
31268355f576SJeff Roberson /* See uma.h */
312744ec2b63SKonstantin Belousov static void
312844ec2b63SKonstantin Belousov uma_reclaim_locked(bool kmem_danger)
31298355f576SJeff Roberson {
313044ec2b63SKonstantin Belousov 
31311431a748SGleb Smirnoff 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
313244ec2b63SKonstantin Belousov 	sx_assert(&uma_drain_lock, SA_XLOCKED);
313386bbae32SJeff Roberson 	bucket_enable();
31348355f576SJeff Roberson 	zone_foreach(zone_drain);
313544ec2b63SKonstantin Belousov 	if (vm_page_count_min() || kmem_danger) {
3136a2de44abSAlexander Motin 		cache_drain_safe(NULL);
3137a2de44abSAlexander Motin 		zone_foreach(zone_drain);
3138a2de44abSAlexander Motin 	}
31398355f576SJeff Roberson 	/*
31408355f576SJeff Roberson 	 * Some slabs may have been freed but this zone will be visited early
31418355f576SJeff Roberson 	 * we visit again so that we can free pages that are empty once other
31428355f576SJeff Roberson 	 * zones are drained.  We have to do the same for buckets.
31438355f576SJeff Roberson 	 */
31449643769aSJeff Roberson 	zone_drain(slabzone);
3145cae33c14SJeff Roberson 	bucket_zone_drain();
314644ec2b63SKonstantin Belousov }
314744ec2b63SKonstantin Belousov 
314844ec2b63SKonstantin Belousov void
314944ec2b63SKonstantin Belousov uma_reclaim(void)
315044ec2b63SKonstantin Belousov {
315144ec2b63SKonstantin Belousov 
315244ec2b63SKonstantin Belousov 	sx_xlock(&uma_drain_lock);
315344ec2b63SKonstantin Belousov 	uma_reclaim_locked(false);
315495c4bf75SKonstantin Belousov 	sx_xunlock(&uma_drain_lock);
31558355f576SJeff Roberson }
31568355f576SJeff Roberson 
31572e47807cSJeff Roberson static volatile int uma_reclaim_needed;
315844ec2b63SKonstantin Belousov 
315944ec2b63SKonstantin Belousov void
316044ec2b63SKonstantin Belousov uma_reclaim_wakeup(void)
316144ec2b63SKonstantin Belousov {
316244ec2b63SKonstantin Belousov 
31632e47807cSJeff Roberson 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
31642e47807cSJeff Roberson 		wakeup(uma_reclaim);
316544ec2b63SKonstantin Belousov }
316644ec2b63SKonstantin Belousov 
316744ec2b63SKonstantin Belousov void
316844ec2b63SKonstantin Belousov uma_reclaim_worker(void *arg __unused)
316944ec2b63SKonstantin Belousov {
317044ec2b63SKonstantin Belousov 
317144ec2b63SKonstantin Belousov 	for (;;) {
31722e47807cSJeff Roberson 		sx_xlock(&uma_drain_lock);
3173*200f8117SKonstantin Belousov 		while (atomic_load_int(&uma_reclaim_needed) == 0)
31742e47807cSJeff Roberson 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
31752e47807cSJeff Roberson 			    hz);
31769b43bc27SAndriy Gapon 		sx_xunlock(&uma_drain_lock);
31779b43bc27SAndriy Gapon 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
31789b43bc27SAndriy Gapon 		sx_xlock(&uma_drain_lock);
317944ec2b63SKonstantin Belousov 		uma_reclaim_locked(true);
3180*200f8117SKonstantin Belousov 		atomic_store_int(&uma_reclaim_needed, 0);
31812e47807cSJeff Roberson 		sx_xunlock(&uma_drain_lock);
31822e47807cSJeff Roberson 		/* Don't fire more than once per-second. */
31832e47807cSJeff Roberson 		pause("umarclslp", hz);
318444ec2b63SKonstantin Belousov 	}
318544ec2b63SKonstantin Belousov }
318644ec2b63SKonstantin Belousov 
3187663b416fSJohn Baldwin /* See uma.h */
3188663b416fSJohn Baldwin int
3189663b416fSJohn Baldwin uma_zone_exhausted(uma_zone_t zone)
3190663b416fSJohn Baldwin {
3191663b416fSJohn Baldwin 	int full;
3192663b416fSJohn Baldwin 
3193663b416fSJohn Baldwin 	ZONE_LOCK(zone);
3194e20a199fSJeff Roberson 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3195663b416fSJohn Baldwin 	ZONE_UNLOCK(zone);
3196663b416fSJohn Baldwin 	return (full);
3197663b416fSJohn Baldwin }
3198663b416fSJohn Baldwin 
31996c125b8dSMohan Srinivasan int
32006c125b8dSMohan Srinivasan uma_zone_exhausted_nolock(uma_zone_t zone)
32016c125b8dSMohan Srinivasan {
3202e20a199fSJeff Roberson 	return (zone->uz_flags & UMA_ZFLAG_FULL);
32036c125b8dSMohan Srinivasan }
32046c125b8dSMohan Srinivasan 
32058355f576SJeff Roberson void *
3206f2c2231eSRyan Stone uma_large_malloc(vm_size_t size, int wait)
32078355f576SJeff Roberson {
32088355f576SJeff Roberson 	void *mem;
32098355f576SJeff Roberson 	uma_slab_t slab;
321085dcf349SGleb Smirnoff 	uint8_t flags;
32118355f576SJeff Roberson 
3212e20a199fSJeff Roberson 	slab = zone_alloc_item(slabzone, NULL, wait);
32138355f576SJeff Roberson 	if (slab == NULL)
32148355f576SJeff Roberson 		return (NULL);
32158355f576SJeff Roberson 	mem = page_alloc(NULL, size, &flags, wait);
32168355f576SJeff Roberson 	if (mem) {
321799571dc3SJeff Roberson 		vsetslab((vm_offset_t)mem, slab);
32188355f576SJeff Roberson 		slab->us_data = mem;
32198355f576SJeff Roberson 		slab->us_flags = flags | UMA_SLAB_MALLOC;
32208355f576SJeff Roberson 		slab->us_size = size;
32212e47807cSJeff Roberson 		uma_total_inc(size);
32228355f576SJeff Roberson 	} else {
32230095a784SJeff Roberson 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
32248355f576SJeff Roberson 	}
32258355f576SJeff Roberson 
32268355f576SJeff Roberson 	return (mem);
32278355f576SJeff Roberson }
32288355f576SJeff Roberson 
32298355f576SJeff Roberson void
32308355f576SJeff Roberson uma_large_free(uma_slab_t slab)
32318355f576SJeff Roberson {
3232c325e866SKonstantin Belousov 
32338355f576SJeff Roberson 	page_free(slab->us_data, slab->us_size, slab->us_flags);
32342e47807cSJeff Roberson 	uma_total_dec(slab->us_size);
32350095a784SJeff Roberson 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
32368355f576SJeff Roberson }
32378355f576SJeff Roberson 
323848343a2fSGleb Smirnoff static void
323948343a2fSGleb Smirnoff uma_zero_item(void *item, uma_zone_t zone)
324048343a2fSGleb Smirnoff {
324196c85efbSNathan Whitehorn 	int i;
324248343a2fSGleb Smirnoff 
324348343a2fSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_PCPU) {
324496c85efbSNathan Whitehorn 		CPU_FOREACH(i)
324548343a2fSGleb Smirnoff 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
324648343a2fSGleb Smirnoff 	} else
324748343a2fSGleb Smirnoff 		bzero(item, zone->uz_size);
324848343a2fSGleb Smirnoff }
324948343a2fSGleb Smirnoff 
32502e47807cSJeff Roberson unsigned long
32512e47807cSJeff Roberson uma_limit(void)
32522e47807cSJeff Roberson {
32532e47807cSJeff Roberson 
32542e47807cSJeff Roberson 	return (uma_kmem_limit);
32552e47807cSJeff Roberson }
32562e47807cSJeff Roberson 
32572e47807cSJeff Roberson void
32582e47807cSJeff Roberson uma_set_limit(unsigned long limit)
32592e47807cSJeff Roberson {
32602e47807cSJeff Roberson 
32612e47807cSJeff Roberson 	uma_kmem_limit = limit;
32622e47807cSJeff Roberson }
32632e47807cSJeff Roberson 
32642e47807cSJeff Roberson unsigned long
32652e47807cSJeff Roberson uma_size(void)
32662e47807cSJeff Roberson {
32672e47807cSJeff Roberson 
32682e47807cSJeff Roberson 	return uma_kmem_total;
32692e47807cSJeff Roberson }
32702e47807cSJeff Roberson 
32718355f576SJeff Roberson void
32728355f576SJeff Roberson uma_print_stats(void)
32738355f576SJeff Roberson {
32748355f576SJeff Roberson 	zone_foreach(uma_print_zone);
32758355f576SJeff Roberson }
32768355f576SJeff Roberson 
3277504d5de3SJeff Roberson static void
3278504d5de3SJeff Roberson slab_print(uma_slab_t slab)
3279504d5de3SJeff Roberson {
3280ef72505eSJeff Roberson 	printf("slab: keg %p, data %p, freecount %d\n",
3281ef72505eSJeff Roberson 		slab->us_keg, slab->us_data, slab->us_freecount);
3282504d5de3SJeff Roberson }
3283504d5de3SJeff Roberson 
3284504d5de3SJeff Roberson static void
3285504d5de3SJeff Roberson cache_print(uma_cache_t cache)
3286504d5de3SJeff Roberson {
3287504d5de3SJeff Roberson 	printf("alloc: %p(%d), free: %p(%d)\n",
3288504d5de3SJeff Roberson 		cache->uc_allocbucket,
3289504d5de3SJeff Roberson 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3290504d5de3SJeff Roberson 		cache->uc_freebucket,
3291504d5de3SJeff Roberson 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3292504d5de3SJeff Roberson }
3293504d5de3SJeff Roberson 
3294e20a199fSJeff Roberson static void
3295e20a199fSJeff Roberson uma_print_keg(uma_keg_t keg)
32968355f576SJeff Roberson {
3297504d5de3SJeff Roberson 	uma_slab_t slab;
3298504d5de3SJeff Roberson 
32990b80c1e4SEitan Adler 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3300e20a199fSJeff Roberson 	    "out %d free %d limit %d\n",
3301e20a199fSJeff Roberson 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3302099a0e58SBosko Milekic 	    keg->uk_ipers, keg->uk_ppera,
330357223e99SAndriy Gapon 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
330457223e99SAndriy Gapon 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3305504d5de3SJeff Roberson 	printf("Part slabs:\n");
3306099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3307504d5de3SJeff Roberson 		slab_print(slab);
3308504d5de3SJeff Roberson 	printf("Free slabs:\n");
3309099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3310504d5de3SJeff Roberson 		slab_print(slab);
3311504d5de3SJeff Roberson 	printf("Full slabs:\n");
3312099a0e58SBosko Milekic 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3313504d5de3SJeff Roberson 		slab_print(slab);
3314e20a199fSJeff Roberson }
3315e20a199fSJeff Roberson 
3316e20a199fSJeff Roberson void
3317e20a199fSJeff Roberson uma_print_zone(uma_zone_t zone)
3318e20a199fSJeff Roberson {
3319e20a199fSJeff Roberson 	uma_cache_t cache;
3320e20a199fSJeff Roberson 	uma_klink_t kl;
3321e20a199fSJeff Roberson 	int i;
3322e20a199fSJeff Roberson 
33230b80c1e4SEitan Adler 	printf("zone: %s(%p) size %d flags %#x\n",
3324e20a199fSJeff Roberson 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3325e20a199fSJeff Roberson 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3326e20a199fSJeff Roberson 		uma_print_keg(kl->kl_keg);
33273aa6d94eSJohn Baldwin 	CPU_FOREACH(i) {
3328504d5de3SJeff Roberson 		cache = &zone->uz_cpu[i];
3329504d5de3SJeff Roberson 		printf("CPU %d Cache:\n", i);
3330504d5de3SJeff Roberson 		cache_print(cache);
3331504d5de3SJeff Roberson 	}
33328355f576SJeff Roberson }
33338355f576SJeff Roberson 
3334a0d4b0aeSRobert Watson #ifdef DDB
33358355f576SJeff Roberson /*
33367a52a97eSRobert Watson  * Generate statistics across both the zone and its per-cpu cache's.  Return
33377a52a97eSRobert Watson  * desired statistics if the pointer is non-NULL for that statistic.
33387a52a97eSRobert Watson  *
33397a52a97eSRobert Watson  * Note: does not update the zone statistics, as it can't safely clear the
33407a52a97eSRobert Watson  * per-CPU cache statistic.
33417a52a97eSRobert Watson  *
33427a52a97eSRobert Watson  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
33437a52a97eSRobert Watson  * safe from off-CPU; we should modify the caches to track this information
33447a52a97eSRobert Watson  * directly so that we don't have to.
33457a52a97eSRobert Watson  */
33467a52a97eSRobert Watson static void
334785dcf349SGleb Smirnoff uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
334885dcf349SGleb Smirnoff     uint64_t *freesp, uint64_t *sleepsp)
33497a52a97eSRobert Watson {
33507a52a97eSRobert Watson 	uma_cache_t cache;
335185dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
33527a52a97eSRobert Watson 	int cachefree, cpu;
33537a52a97eSRobert Watson 
3354bf965959SSean Bruno 	allocs = frees = sleeps = 0;
33557a52a97eSRobert Watson 	cachefree = 0;
33563aa6d94eSJohn Baldwin 	CPU_FOREACH(cpu) {
33577a52a97eSRobert Watson 		cache = &z->uz_cpu[cpu];
33587a52a97eSRobert Watson 		if (cache->uc_allocbucket != NULL)
33597a52a97eSRobert Watson 			cachefree += cache->uc_allocbucket->ub_cnt;
33607a52a97eSRobert Watson 		if (cache->uc_freebucket != NULL)
33617a52a97eSRobert Watson 			cachefree += cache->uc_freebucket->ub_cnt;
33627a52a97eSRobert Watson 		allocs += cache->uc_allocs;
33637a52a97eSRobert Watson 		frees += cache->uc_frees;
33647a52a97eSRobert Watson 	}
33657a52a97eSRobert Watson 	allocs += z->uz_allocs;
33667a52a97eSRobert Watson 	frees += z->uz_frees;
3367bf965959SSean Bruno 	sleeps += z->uz_sleeps;
33687a52a97eSRobert Watson 	if (cachefreep != NULL)
33697a52a97eSRobert Watson 		*cachefreep = cachefree;
33707a52a97eSRobert Watson 	if (allocsp != NULL)
33717a52a97eSRobert Watson 		*allocsp = allocs;
33727a52a97eSRobert Watson 	if (freesp != NULL)
33737a52a97eSRobert Watson 		*freesp = frees;
3374bf965959SSean Bruno 	if (sleepsp != NULL)
3375bf965959SSean Bruno 		*sleepsp = sleeps;
33767a52a97eSRobert Watson }
3377a0d4b0aeSRobert Watson #endif /* DDB */
33787a52a97eSRobert Watson 
33797a52a97eSRobert Watson static int
33807a52a97eSRobert Watson sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
33817a52a97eSRobert Watson {
33827a52a97eSRobert Watson 	uma_keg_t kz;
33837a52a97eSRobert Watson 	uma_zone_t z;
33847a52a97eSRobert Watson 	int count;
33857a52a97eSRobert Watson 
33867a52a97eSRobert Watson 	count = 0;
3387111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
33887a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
33897a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
33907a52a97eSRobert Watson 			count++;
33917a52a97eSRobert Watson 	}
3392111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
33937a52a97eSRobert Watson 	return (sysctl_handle_int(oidp, &count, 0, req));
33947a52a97eSRobert Watson }
33957a52a97eSRobert Watson 
33967a52a97eSRobert Watson static int
33977a52a97eSRobert Watson sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
33987a52a97eSRobert Watson {
33997a52a97eSRobert Watson 	struct uma_stream_header ush;
34007a52a97eSRobert Watson 	struct uma_type_header uth;
34017a52a97eSRobert Watson 	struct uma_percpu_stat ups;
34027a52a97eSRobert Watson 	uma_bucket_t bucket;
34037a52a97eSRobert Watson 	struct sbuf sbuf;
34047a52a97eSRobert Watson 	uma_cache_t cache;
3405e20a199fSJeff Roberson 	uma_klink_t kl;
34067a52a97eSRobert Watson 	uma_keg_t kz;
34077a52a97eSRobert Watson 	uma_zone_t z;
3408e20a199fSJeff Roberson 	uma_keg_t k;
34094e657159SMatthew D Fleming 	int count, error, i;
34107a52a97eSRobert Watson 
341100f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
341200f0e671SMatthew D Fleming 	if (error != 0)
341300f0e671SMatthew D Fleming 		return (error);
34144e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
34151eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
34164e657159SMatthew D Fleming 
3417404a593eSMatthew D Fleming 	count = 0;
3418111fbcd5SBryan Venteicher 	rw_rlock(&uma_rwlock);
34197a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
34207a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
34217a52a97eSRobert Watson 			count++;
34227a52a97eSRobert Watson 	}
34237a52a97eSRobert Watson 
34247a52a97eSRobert Watson 	/*
34257a52a97eSRobert Watson 	 * Insert stream header.
34267a52a97eSRobert Watson 	 */
34277a52a97eSRobert Watson 	bzero(&ush, sizeof(ush));
34287a52a97eSRobert Watson 	ush.ush_version = UMA_STREAM_VERSION;
3429ab3a57c0SRobert Watson 	ush.ush_maxcpus = (mp_maxid + 1);
34307a52a97eSRobert Watson 	ush.ush_count = count;
34314e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
34327a52a97eSRobert Watson 
34337a52a97eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
34347a52a97eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
34357a52a97eSRobert Watson 			bzero(&uth, sizeof(uth));
34367a52a97eSRobert Watson 			ZONE_LOCK(z);
3437cbbb4a00SRobert Watson 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
34387a52a97eSRobert Watson 			uth.uth_align = kz->uk_align;
34397a52a97eSRobert Watson 			uth.uth_size = kz->uk_size;
34407a52a97eSRobert Watson 			uth.uth_rsize = kz->uk_rsize;
3441e20a199fSJeff Roberson 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3442e20a199fSJeff Roberson 				k = kl->kl_keg;
3443e20a199fSJeff Roberson 				uth.uth_maxpages += k->uk_maxpages;
3444e20a199fSJeff Roberson 				uth.uth_pages += k->uk_pages;
3445e20a199fSJeff Roberson 				uth.uth_keg_free += k->uk_free;
3446e20a199fSJeff Roberson 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3447e20a199fSJeff Roberson 				    * k->uk_ipers;
3448e20a199fSJeff Roberson 			}
3449cbbb4a00SRobert Watson 
3450cbbb4a00SRobert Watson 			/*
3451cbbb4a00SRobert Watson 			 * A zone is secondary is it is not the first entry
3452cbbb4a00SRobert Watson 			 * on the keg's zone list.
3453cbbb4a00SRobert Watson 			 */
3454e20a199fSJeff Roberson 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3455cbbb4a00SRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z))
3456cbbb4a00SRobert Watson 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3457cbbb4a00SRobert Watson 
3458fc03d22bSJeff Roberson 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
34597a52a97eSRobert Watson 				uth.uth_zone_free += bucket->ub_cnt;
34607a52a97eSRobert Watson 			uth.uth_allocs = z->uz_allocs;
34617a52a97eSRobert Watson 			uth.uth_frees = z->uz_frees;
34622019094aSRobert Watson 			uth.uth_fails = z->uz_fails;
3463bf965959SSean Bruno 			uth.uth_sleeps = z->uz_sleeps;
34644e657159SMatthew D Fleming 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
34657a52a97eSRobert Watson 			/*
34662450bbb8SRobert Watson 			 * While it is not normally safe to access the cache
34672450bbb8SRobert Watson 			 * bucket pointers while not on the CPU that owns the
34682450bbb8SRobert Watson 			 * cache, we only allow the pointers to be exchanged
34692450bbb8SRobert Watson 			 * without the zone lock held, not invalidated, so
34702450bbb8SRobert Watson 			 * accept the possible race associated with bucket
34712450bbb8SRobert Watson 			 * exchange during monitoring.
34727a52a97eSRobert Watson 			 */
3473ab3a57c0SRobert Watson 			for (i = 0; i < (mp_maxid + 1); i++) {
34747a52a97eSRobert Watson 				bzero(&ups, sizeof(ups));
34757a52a97eSRobert Watson 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
34767a52a97eSRobert Watson 					goto skip;
3477082dc776SRobert Watson 				if (CPU_ABSENT(i))
3478082dc776SRobert Watson 					goto skip;
34797a52a97eSRobert Watson 				cache = &z->uz_cpu[i];
34807a52a97eSRobert Watson 				if (cache->uc_allocbucket != NULL)
34817a52a97eSRobert Watson 					ups.ups_cache_free +=
34827a52a97eSRobert Watson 					    cache->uc_allocbucket->ub_cnt;
34837a52a97eSRobert Watson 				if (cache->uc_freebucket != NULL)
34847a52a97eSRobert Watson 					ups.ups_cache_free +=
34857a52a97eSRobert Watson 					    cache->uc_freebucket->ub_cnt;
34867a52a97eSRobert Watson 				ups.ups_allocs = cache->uc_allocs;
34877a52a97eSRobert Watson 				ups.ups_frees = cache->uc_frees;
34887a52a97eSRobert Watson skip:
34894e657159SMatthew D Fleming 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
34907a52a97eSRobert Watson 			}
34912450bbb8SRobert Watson 			ZONE_UNLOCK(z);
34927a52a97eSRobert Watson 		}
34937a52a97eSRobert Watson 	}
3494111fbcd5SBryan Venteicher 	rw_runlock(&uma_rwlock);
34954e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
34964e657159SMatthew D Fleming 	sbuf_delete(&sbuf);
34977a52a97eSRobert Watson 	return (error);
34987a52a97eSRobert Watson }
349948c5777eSRobert Watson 
35000a5a3ccbSGleb Smirnoff int
35010a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
35020a5a3ccbSGleb Smirnoff {
35030a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
350416be9f54SGleb Smirnoff 	int error, max;
35050a5a3ccbSGleb Smirnoff 
350616be9f54SGleb Smirnoff 	max = uma_zone_get_max(zone);
35070a5a3ccbSGleb Smirnoff 	error = sysctl_handle_int(oidp, &max, 0, req);
35080a5a3ccbSGleb Smirnoff 	if (error || !req->newptr)
35090a5a3ccbSGleb Smirnoff 		return (error);
35100a5a3ccbSGleb Smirnoff 
35110a5a3ccbSGleb Smirnoff 	uma_zone_set_max(zone, max);
35120a5a3ccbSGleb Smirnoff 
35130a5a3ccbSGleb Smirnoff 	return (0);
35140a5a3ccbSGleb Smirnoff }
35150a5a3ccbSGleb Smirnoff 
35160a5a3ccbSGleb Smirnoff int
35170a5a3ccbSGleb Smirnoff sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
35180a5a3ccbSGleb Smirnoff {
35190a5a3ccbSGleb Smirnoff 	uma_zone_t zone = *(uma_zone_t *)arg1;
35200a5a3ccbSGleb Smirnoff 	int cur;
35210a5a3ccbSGleb Smirnoff 
35220a5a3ccbSGleb Smirnoff 	cur = uma_zone_get_cur(zone);
35230a5a3ccbSGleb Smirnoff 	return (sysctl_handle_int(oidp, &cur, 0, req));
35240a5a3ccbSGleb Smirnoff }
35250a5a3ccbSGleb Smirnoff 
35269542ea7bSGleb Smirnoff #ifdef INVARIANTS
35279542ea7bSGleb Smirnoff static uma_slab_t
35289542ea7bSGleb Smirnoff uma_dbg_getslab(uma_zone_t zone, void *item)
35299542ea7bSGleb Smirnoff {
35309542ea7bSGleb Smirnoff 	uma_slab_t slab;
35319542ea7bSGleb Smirnoff 	uma_keg_t keg;
35329542ea7bSGleb Smirnoff 	uint8_t *mem;
35339542ea7bSGleb Smirnoff 
35349542ea7bSGleb Smirnoff 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
35359542ea7bSGleb Smirnoff 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
35369542ea7bSGleb Smirnoff 		slab = vtoslab((vm_offset_t)mem);
35379542ea7bSGleb Smirnoff 	} else {
35389542ea7bSGleb Smirnoff 		/*
35399542ea7bSGleb Smirnoff 		 * It is safe to return the slab here even though the
35409542ea7bSGleb Smirnoff 		 * zone is unlocked because the item's allocation state
35419542ea7bSGleb Smirnoff 		 * essentially holds a reference.
35429542ea7bSGleb Smirnoff 		 */
35439542ea7bSGleb Smirnoff 		ZONE_LOCK(zone);
35449542ea7bSGleb Smirnoff 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
35459542ea7bSGleb Smirnoff 		if (keg->uk_flags & UMA_ZONE_HASH)
35469542ea7bSGleb Smirnoff 			slab = hash_sfind(&keg->uk_hash, mem);
35479542ea7bSGleb Smirnoff 		else
35489542ea7bSGleb Smirnoff 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
35499542ea7bSGleb Smirnoff 		ZONE_UNLOCK(zone);
35509542ea7bSGleb Smirnoff 	}
35519542ea7bSGleb Smirnoff 
35529542ea7bSGleb Smirnoff 	return (slab);
35539542ea7bSGleb Smirnoff }
35549542ea7bSGleb Smirnoff 
35559542ea7bSGleb Smirnoff /*
35569542ea7bSGleb Smirnoff  * Set up the slab's freei data such that uma_dbg_free can function.
35579542ea7bSGleb Smirnoff  *
35589542ea7bSGleb Smirnoff  */
35599542ea7bSGleb Smirnoff static void
35609542ea7bSGleb Smirnoff uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
35619542ea7bSGleb Smirnoff {
35629542ea7bSGleb Smirnoff 	uma_keg_t keg;
35639542ea7bSGleb Smirnoff 	int freei;
35649542ea7bSGleb Smirnoff 
35659542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
35669542ea7bSGleb Smirnoff 		return;
35679542ea7bSGleb Smirnoff 	if (slab == NULL) {
35689542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
35699542ea7bSGleb Smirnoff 		if (slab == NULL)
35709542ea7bSGleb Smirnoff 			panic("uma: item %p did not belong to zone %s\n",
35719542ea7bSGleb Smirnoff 			    item, zone->uz_name);
35729542ea7bSGleb Smirnoff 	}
35739542ea7bSGleb Smirnoff 	keg = slab->us_keg;
35749542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
35759542ea7bSGleb Smirnoff 
35769542ea7bSGleb Smirnoff 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
35779542ea7bSGleb Smirnoff 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
35789542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
35799542ea7bSGleb Smirnoff 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
35809542ea7bSGleb Smirnoff 
35819542ea7bSGleb Smirnoff 	return;
35829542ea7bSGleb Smirnoff }
35839542ea7bSGleb Smirnoff 
35849542ea7bSGleb Smirnoff /*
35859542ea7bSGleb Smirnoff  * Verifies freed addresses.  Checks for alignment, valid slab membership
35869542ea7bSGleb Smirnoff  * and duplicate frees.
35879542ea7bSGleb Smirnoff  *
35889542ea7bSGleb Smirnoff  */
35899542ea7bSGleb Smirnoff static void
35909542ea7bSGleb Smirnoff uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
35919542ea7bSGleb Smirnoff {
35929542ea7bSGleb Smirnoff 	uma_keg_t keg;
35939542ea7bSGleb Smirnoff 	int freei;
35949542ea7bSGleb Smirnoff 
35959542ea7bSGleb Smirnoff 	if (zone_first_keg(zone) == NULL)
35969542ea7bSGleb Smirnoff 		return;
35979542ea7bSGleb Smirnoff 	if (slab == NULL) {
35989542ea7bSGleb Smirnoff 		slab = uma_dbg_getslab(zone, item);
35999542ea7bSGleb Smirnoff 		if (slab == NULL)
36009542ea7bSGleb Smirnoff 			panic("uma: Freed item %p did not belong to zone %s\n",
36019542ea7bSGleb Smirnoff 			    item, zone->uz_name);
36029542ea7bSGleb Smirnoff 	}
36039542ea7bSGleb Smirnoff 	keg = slab->us_keg;
36049542ea7bSGleb Smirnoff 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
36059542ea7bSGleb Smirnoff 
36069542ea7bSGleb Smirnoff 	if (freei >= keg->uk_ipers)
36079542ea7bSGleb Smirnoff 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
36089542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
36099542ea7bSGleb Smirnoff 
36109542ea7bSGleb Smirnoff 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
36119542ea7bSGleb Smirnoff 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
36129542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
36139542ea7bSGleb Smirnoff 
36149542ea7bSGleb Smirnoff 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
36159542ea7bSGleb Smirnoff 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
36169542ea7bSGleb Smirnoff 		    item, zone, zone->uz_name, slab, freei);
36179542ea7bSGleb Smirnoff 
36189542ea7bSGleb Smirnoff 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
36199542ea7bSGleb Smirnoff }
36209542ea7bSGleb Smirnoff #endif /* INVARIANTS */
36219542ea7bSGleb Smirnoff 
362248c5777eSRobert Watson #ifdef DDB
362348c5777eSRobert Watson DB_SHOW_COMMAND(uma, db_show_uma)
362448c5777eSRobert Watson {
362585dcf349SGleb Smirnoff 	uint64_t allocs, frees, sleeps;
362648c5777eSRobert Watson 	uma_bucket_t bucket;
362748c5777eSRobert Watson 	uma_keg_t kz;
362848c5777eSRobert Watson 	uma_zone_t z;
362948c5777eSRobert Watson 	int cachefree;
363048c5777eSRobert Watson 
363103175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
363203175483SAlexander Motin 	    "Free", "Requests", "Sleeps", "Bucket");
363348c5777eSRobert Watson 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
363448c5777eSRobert Watson 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
363548c5777eSRobert Watson 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
363648c5777eSRobert Watson 				allocs = z->uz_allocs;
363748c5777eSRobert Watson 				frees = z->uz_frees;
3638bf965959SSean Bruno 				sleeps = z->uz_sleeps;
363948c5777eSRobert Watson 				cachefree = 0;
364048c5777eSRobert Watson 			} else
364148c5777eSRobert Watson 				uma_zone_sumstat(z, &cachefree, &allocs,
3642bf965959SSean Bruno 				    &frees, &sleeps);
3643e20a199fSJeff Roberson 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
364448c5777eSRobert Watson 			    (LIST_FIRST(&kz->uk_zones) != z)))
364548c5777eSRobert Watson 				cachefree += kz->uk_free;
3646fc03d22bSJeff Roberson 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
364748c5777eSRobert Watson 				cachefree += bucket->ub_cnt;
364803175483SAlexander Motin 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
364903175483SAlexander Motin 			    z->uz_name, (uintmax_t)kz->uk_size,
3650ae4e9636SRobert Watson 			    (intmax_t)(allocs - frees), cachefree,
365103175483SAlexander Motin 			    (uintmax_t)allocs, sleeps, z->uz_count);
3652687c94aaSJohn Baldwin 			if (db_pager_quit)
3653687c94aaSJohn Baldwin 				return;
365448c5777eSRobert Watson 		}
365548c5777eSRobert Watson 	}
365648c5777eSRobert Watson }
365703175483SAlexander Motin 
365803175483SAlexander Motin DB_SHOW_COMMAND(umacache, db_show_umacache)
365903175483SAlexander Motin {
366003175483SAlexander Motin 	uint64_t allocs, frees;
366103175483SAlexander Motin 	uma_bucket_t bucket;
366203175483SAlexander Motin 	uma_zone_t z;
366303175483SAlexander Motin 	int cachefree;
366403175483SAlexander Motin 
366503175483SAlexander Motin 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
366603175483SAlexander Motin 	    "Requests", "Bucket");
366703175483SAlexander Motin 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
366803175483SAlexander Motin 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
366903175483SAlexander Motin 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
367003175483SAlexander Motin 			cachefree += bucket->ub_cnt;
367103175483SAlexander Motin 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
367203175483SAlexander Motin 		    z->uz_name, (uintmax_t)z->uz_size,
367303175483SAlexander Motin 		    (intmax_t)(allocs - frees), cachefree,
367403175483SAlexander Motin 		    (uintmax_t)allocs, z->uz_count);
367503175483SAlexander Motin 		if (db_pager_quit)
367603175483SAlexander Motin 			return;
367703175483SAlexander Motin 	}
367803175483SAlexander Motin }
36799542ea7bSGleb Smirnoff #endif	/* DDB */
3680