xref: /freebsd/sys/vm/uma_int.h (revision fe267a559009cbf34f9341666fe4d88a92c02d5e)
160727d8bSWarner Losh /*-
2*fe267a55SPedro F. Giffuni  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3*fe267a55SPedro F. Giffuni  *
4ef72505eSJeff Roberson  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
508ecce74SRobert Watson  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
608ecce74SRobert Watson  * All rights reserved.
78355f576SJeff Roberson  *
88355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
98355f576SJeff Roberson  * modification, are permitted provided that the following conditions
108355f576SJeff Roberson  * are met:
118355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
128355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
138355f576SJeff Roberson  *    disclaimer.
148355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
158355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
168355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
178355f576SJeff Roberson  *
188355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
198355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
208355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
218355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
228355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
238355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
248355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
258355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
268355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
278355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
288355f576SJeff Roberson  *
298355f576SJeff Roberson  * $FreeBSD$
308355f576SJeff Roberson  *
318355f576SJeff Roberson  */
328355f576SJeff Roberson 
33e04223bfSMark Johnston #include <sys/_bitset.h>
34b28cc462SGleb Smirnoff #include <sys/_task.h>
35b28cc462SGleb Smirnoff 
368355f576SJeff Roberson /*
378355f576SJeff Roberson  * This file includes definitions, structures, prototypes, and inlines that
388355f576SJeff Roberson  * should not be used outside of the actual implementation of UMA.
398355f576SJeff Roberson  */
408355f576SJeff Roberson 
418355f576SJeff Roberson /*
428355f576SJeff Roberson  * Here's a quick description of the relationship between the objects:
438355f576SJeff Roberson  *
44099a0e58SBosko Milekic  * Kegs contain lists of slabs which are stored in either the full bin, empty
458355f576SJeff Roberson  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
468355f576SJeff Roberson  * the user supplied value for size, which is adjusted for alignment purposes
47099a0e58SBosko Milekic  * and rsize is the result of that.  The Keg also stores information for
488355f576SJeff Roberson  * managing a hash of page addresses that maps pages to uma_slab_t structures
498355f576SJeff Roberson  * for pages that don't have embedded uma_slab_t's.
508355f576SJeff Roberson  *
518355f576SJeff Roberson  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
528355f576SJeff Roberson  * be allocated off the page from a special slab zone.  The free list within a
53ef72505eSJeff Roberson  * slab is managed with a bitmask.  For item sizes that would yield more than
54ef72505eSJeff Roberson  * 10% memory waste we potentially allocate a separate uma_slab_t if this will
55ef72505eSJeff Roberson  * improve the number of items per slab that will fit.
568355f576SJeff Roberson  *
578355f576SJeff Roberson  * The only really gross cases, with regards to memory waste, are for those
588355f576SJeff Roberson  * items that are just over half the page size.   You can get nearly 50% waste,
598355f576SJeff Roberson  * so you fall back to the memory footprint of the power of two allocator. I
608355f576SJeff Roberson  * have looked at memory allocation sizes on many of the machines available to
618355f576SJeff Roberson  * me, and there does not seem to be an abundance of allocations at this range
628355f576SJeff Roberson  * so at this time it may not make sense to optimize for it.  This can, of
638355f576SJeff Roberson  * course, be solved with dynamic slab sizes.
648355f576SJeff Roberson  *
65099a0e58SBosko Milekic  * Kegs may serve multiple Zones but by far most of the time they only serve
66099a0e58SBosko Milekic  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
67099a0e58SBosko Milekic  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
68099a0e58SBosko Milekic  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
69099a0e58SBosko Milekic  * pair, as well as with its own set of small per-CPU caches, layered above
70099a0e58SBosko Milekic  * the Zone's general Bucket cache.
71099a0e58SBosko Milekic  *
726ab3b958SRobert Watson  * The PCPU caches are protected by critical sections, and may be accessed
736ab3b958SRobert Watson  * safely only from their associated CPU, while the Zones backed by the same
746ab3b958SRobert Watson  * Keg all share a common Keg lock (to coalesce contention on the backing
756ab3b958SRobert Watson  * slabs).  The backing Keg typically only serves one Zone but in the case of
766ab3b958SRobert Watson  * multiple Zones, one of the Zones is considered the Master Zone and all
776ab3b958SRobert Watson  * Zone-related stats from the Keg are done in the Master Zone.  For an
786ab3b958SRobert Watson  * example of a Multi-Zone setup, refer to the Mbuf allocation code.
798355f576SJeff Roberson  */
808355f576SJeff Roberson 
818355f576SJeff Roberson /*
828355f576SJeff Roberson  *	This is the representation for normal (Non OFFPAGE slab)
838355f576SJeff Roberson  *
848355f576SJeff Roberson  *	i == item
858355f576SJeff Roberson  *	s == slab pointer
868355f576SJeff Roberson  *
878355f576SJeff Roberson  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
888355f576SJeff Roberson  *	___________________________________________________________
898355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
908355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
918355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
928355f576SJeff Roberson  *     |___________________________________________________________|
938355f576SJeff Roberson  *
948355f576SJeff Roberson  *
958355f576SJeff Roberson  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
968355f576SJeff Roberson  *
978355f576SJeff Roberson  *	___________________________________________________________
988355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
998355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
1008355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
1018355f576SJeff Roberson  *     |___________________________________________________________|
1028355f576SJeff Roberson  *       ___________    ^
1038355f576SJeff Roberson  *	|slab header|   |
1048355f576SJeff Roberson  *	|___________|---*
1058355f576SJeff Roberson  *
1068355f576SJeff Roberson  */
1078355f576SJeff Roberson 
1088355f576SJeff Roberson #ifndef VM_UMA_INT_H
1098355f576SJeff Roberson #define VM_UMA_INT_H
1108355f576SJeff Roberson 
1118355f576SJeff Roberson #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
1128355f576SJeff Roberson #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
1138355f576SJeff Roberson #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
1148355f576SJeff Roberson 
115342f1793SAlan Cox #define UMA_BOOT_PAGES		64	/* Pages allocated for startup */
11634caa842SColin Percival #define UMA_BOOT_PAGES_ZONES	32	/* Multiplier for pages to reserve */
11734caa842SColin Percival 					/* if uma_zone > PAGE_SIZE */
1188355f576SJeff Roberson 
119ad97af7eSGleb Smirnoff /* Max waste percentage before going to off page slab management */
120ad97af7eSGleb Smirnoff #define UMA_MAX_WASTE	10
1218355f576SJeff Roberson 
1228355f576SJeff Roberson /*
1238355f576SJeff Roberson  * I doubt there will be many cases where this is exceeded. This is the initial
1248355f576SJeff Roberson  * size of the hash table for uma_slabs that are managed off page. This hash
1258355f576SJeff Roberson  * does expand by powers of two.  Currently it doesn't get smaller.
1268355f576SJeff Roberson  */
1278355f576SJeff Roberson #define UMA_HASH_SIZE_INIT	32
1288355f576SJeff Roberson 
1298355f576SJeff Roberson /*
1308355f576SJeff Roberson  * I should investigate other hashing algorithms.  This should yield a low
1318355f576SJeff Roberson  * number of collisions if the pages are relatively contiguous.
1328355f576SJeff Roberson  */
1338355f576SJeff Roberson 
134ef72505eSJeff Roberson #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
1358355f576SJeff Roberson 
1368355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem)					\
1378355f576SJeff Roberson 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
1384e2d83fcSAntoine Brodin 		    (mem))], (s), us_hlink)
1398355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem)					\
1408355f576SJeff Roberson 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
1414e2d83fcSAntoine Brodin 		    (mem))], (s), uma_slab, us_hlink)
1428355f576SJeff Roberson 
1438355f576SJeff Roberson /* Hash table for freed address -> slab translation */
1448355f576SJeff Roberson 
1458355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab);
1468355f576SJeff Roberson 
1478355f576SJeff Roberson struct uma_hash {
1488355f576SJeff Roberson 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
1498355f576SJeff Roberson 	int		uh_hashsize;	/* Current size of the hash table */
1508355f576SJeff Roberson 	int		uh_hashmask;	/* Mask used during hashing */
1518355f576SJeff Roberson };
1528355f576SJeff Roberson 
1538355f576SJeff Roberson /*
1545e4bb93cSKip Macy  * align field or structure to cache line
1555e4bb93cSKip Macy  */
1561a23373cSKip Macy #if defined(__amd64__)
1571a23373cSKip Macy #define UMA_ALIGN	__aligned(CACHE_LINE_SIZE)
1581a23373cSKip Macy #else
1596b4391d7SKip Macy #define UMA_ALIGN
1601a23373cSKip Macy #endif
1615e4bb93cSKip Macy 
1625e4bb93cSKip Macy /*
1638355f576SJeff Roberson  * Structures for per cpu queues.
1648355f576SJeff Roberson  */
1658355f576SJeff Roberson 
1668355f576SJeff Roberson struct uma_bucket {
1678355f576SJeff Roberson 	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
168cae33c14SJeff Roberson 	int16_t	ub_cnt;				/* Count of free items. */
169cae33c14SJeff Roberson 	int16_t	ub_entries;			/* Max items. */
170cae33c14SJeff Roberson 	void	*ub_bucket[];			/* actual allocation storage */
1711a23373cSKip Macy };
1728355f576SJeff Roberson 
1738355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t;
1748355f576SJeff Roberson 
1758355f576SJeff Roberson struct uma_cache {
1768355f576SJeff Roberson 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
1778355f576SJeff Roberson 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
17885dcf349SGleb Smirnoff 	uint64_t	uc_allocs;	/* Count of allocations */
17985dcf349SGleb Smirnoff 	uint64_t	uc_frees;	/* Count of frees */
1805e4bb93cSKip Macy } UMA_ALIGN;
1818355f576SJeff Roberson 
1828355f576SJeff Roberson typedef struct uma_cache * uma_cache_t;
1838355f576SJeff Roberson 
1848355f576SJeff Roberson /*
185099a0e58SBosko Milekic  * Keg management structure
186099a0e58SBosko Milekic  *
187099a0e58SBosko Milekic  * TODO: Optimize for cache line size
188099a0e58SBosko Milekic  *
189099a0e58SBosko Milekic  */
190099a0e58SBosko Milekic struct uma_keg {
191af526374SJeff Roberson 	struct mtx_padalign	uk_lock;	/* Lock for the keg */
192099a0e58SBosko Milekic 	struct uma_hash	uk_hash;
193099a0e58SBosko Milekic 
194099a0e58SBosko Milekic 	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
195099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_part_slab;	/* partially allocated slabs */
196099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_free_slab;	/* empty slab list */
197099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_full_slab;	/* full slabs */
198099a0e58SBosko Milekic 
19985dcf349SGleb Smirnoff 	uint32_t	uk_align;	/* Alignment mask */
20085dcf349SGleb Smirnoff 	uint32_t	uk_pages;	/* Total page count */
20185dcf349SGleb Smirnoff 	uint32_t	uk_free;	/* Count of items free in slabs */
2026fd34d6fSJeff Roberson 	uint32_t	uk_reserve;	/* Number of reserved items. */
20385dcf349SGleb Smirnoff 	uint32_t	uk_size;	/* Requested size of each item */
20485dcf349SGleb Smirnoff 	uint32_t	uk_rsize;	/* Real size of each item */
20585dcf349SGleb Smirnoff 	uint32_t	uk_maxpages;	/* Maximum number of pages to alloc */
206099a0e58SBosko Milekic 
207099a0e58SBosko Milekic 	uma_init	uk_init;	/* Keg's init routine */
208099a0e58SBosko Milekic 	uma_fini	uk_fini;	/* Keg's fini routine */
209099a0e58SBosko Milekic 	uma_alloc	uk_allocf;	/* Allocation function */
210099a0e58SBosko Milekic 	uma_free	uk_freef;	/* Free routine */
211099a0e58SBosko Milekic 
212a4915c21SAttilio Rao 	u_long		uk_offset;	/* Next free offset from base KVA */
213a4915c21SAttilio Rao 	vm_offset_t	uk_kva;		/* Zone base KVA */
214099a0e58SBosko Milekic 	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
215099a0e58SBosko Milekic 
2162d54d4bbSMark Johnston 	uint32_t	uk_pgoff;	/* Offset to uma_slab struct */
21785dcf349SGleb Smirnoff 	uint16_t	uk_ppera;	/* pages per allocation from backend */
21885dcf349SGleb Smirnoff 	uint16_t	uk_ipers;	/* Items per slab */
21985dcf349SGleb Smirnoff 	uint32_t	uk_flags;	/* Internal flags */
220ad97af7eSGleb Smirnoff 
221ad97af7eSGleb Smirnoff 	/* Least used fields go to the last cache line. */
222ad97af7eSGleb Smirnoff 	const char	*uk_name;		/* Name of creating zone. */
223ad97af7eSGleb Smirnoff 	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
224099a0e58SBosko Milekic };
225099a0e58SBosko Milekic typedef struct uma_keg	* uma_keg_t;
226099a0e58SBosko Milekic 
227ef72505eSJeff Roberson /*
228ef72505eSJeff Roberson  * Free bits per-slab.
229ef72505eSJeff Roberson  */
230ef72505eSJeff Roberson #define	SLAB_SETSIZE	(PAGE_SIZE / UMA_SMALLEST_UNIT)
231ef72505eSJeff Roberson BITSET_DEFINE(slabbits, SLAB_SETSIZE);
232099a0e58SBosko Milekic 
233ef72505eSJeff Roberson /*
234ef72505eSJeff Roberson  * The slab structure manages a single contiguous allocation from backing
235ef72505eSJeff Roberson  * store and subdivides it into individually allocatable items.
236ef72505eSJeff Roberson  */
237ef72505eSJeff Roberson struct uma_slab {
238099a0e58SBosko Milekic 	uma_keg_t	us_keg;			/* Keg we live in */
239099a0e58SBosko Milekic 	union {
240099a0e58SBosko Milekic 		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
241099a0e58SBosko Milekic 		unsigned long	_us_size;	/* Size of allocation */
242099a0e58SBosko Milekic 	} us_type;
243099a0e58SBosko Milekic 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
24485dcf349SGleb Smirnoff 	uint8_t		*us_data;		/* First item */
245ef72505eSJeff Roberson 	struct slabbits	us_free;		/* Free bitmask. */
246ef72505eSJeff Roberson #ifdef INVARIANTS
247ef72505eSJeff Roberson 	struct slabbits	us_debugfree;		/* Debug bitmask. */
248ef72505eSJeff Roberson #endif
24985dcf349SGleb Smirnoff 	uint16_t	us_freecount;		/* How many are free? */
25085dcf349SGleb Smirnoff 	uint8_t		us_flags;		/* Page flags see uma.h */
251ef72505eSJeff Roberson 	uint8_t		us_pad;			/* Pad to 32bits, unused. */
252099a0e58SBosko Milekic };
253099a0e58SBosko Milekic 
254ef72505eSJeff Roberson #define	us_link	us_type._us_link
255ef72505eSJeff Roberson #define	us_size	us_type._us_size
256099a0e58SBosko Milekic 
257099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t;
258e20a199fSJeff Roberson typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
259e20a199fSJeff Roberson 
260e20a199fSJeff Roberson struct uma_klink {
261e20a199fSJeff Roberson 	LIST_ENTRY(uma_klink)	kl_link;
262e20a199fSJeff Roberson 	uma_keg_t		kl_keg;
263e20a199fSJeff Roberson };
264e20a199fSJeff Roberson typedef struct uma_klink *uma_klink_t;
265e20a199fSJeff Roberson 
266244f4554SBosko Milekic /*
2678355f576SJeff Roberson  * Zone management structure
2688355f576SJeff Roberson  *
2698355f576SJeff Roberson  * TODO: Optimize for cache line size
2708355f576SJeff Roberson  *
2718355f576SJeff Roberson  */
2728355f576SJeff Roberson struct uma_zone {
273af526374SJeff Roberson 	struct mtx_padalign	uz_lock;	/* Lock for the zone */
274af526374SJeff Roberson 	struct mtx_padalign	*uz_lockptr;
275bb196eb4SMatthew D Fleming 	const char		*uz_name;	/* Text name of the zone */
2768355f576SJeff Roberson 
277099a0e58SBosko Milekic 	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones in keg */
278fc03d22bSJeff Roberson 	LIST_HEAD(,uma_bucket)	uz_buckets;	/* full buckets */
2798355f576SJeff Roberson 
280e20a199fSJeff Roberson 	LIST_HEAD(,uma_klink)	uz_kegs;	/* List of kegs. */
281e20a199fSJeff Roberson 	struct uma_klink	uz_klink;	/* klink for first keg. */
282e20a199fSJeff Roberson 
283e20a199fSJeff Roberson 	uma_slaballoc	uz_slab;	/* Allocate a slab from the backend. */
2848355f576SJeff Roberson 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
2858355f576SJeff Roberson 	uma_dtor	uz_dtor;	/* Destructor */
2868355f576SJeff Roberson 	uma_init	uz_init;	/* Initializer for each item */
2870095a784SJeff Roberson 	uma_fini	uz_fini;	/* Finalizer for each item. */
2880095a784SJeff Roberson 	uma_import	uz_import;	/* Import new memory to cache. */
2890095a784SJeff Roberson 	uma_release	uz_release;	/* Release memory from cache. */
2900095a784SJeff Roberson 	void		*uz_arg;	/* Import/release argument. */
291099a0e58SBosko Milekic 
29285dcf349SGleb Smirnoff 	uint32_t	uz_flags;	/* Flags inherited from kegs */
29385dcf349SGleb Smirnoff 	uint32_t	uz_size;	/* Size inherited from kegs */
2945e4bb93cSKip Macy 
2950095a784SJeff Roberson 	volatile u_long	uz_allocs UMA_ALIGN; /* Total number of allocations */
2960095a784SJeff Roberson 	volatile u_long	uz_fails;	/* Total number of alloc failures */
2970095a784SJeff Roberson 	volatile u_long	uz_frees;	/* Total number of frees */
29885dcf349SGleb Smirnoff 	uint64_t	uz_sleeps;	/* Total number of alloc sleeps */
299ace66b56SAlexander Motin 	uint16_t	uz_count;	/* Amount of items in full bucket */
300ace66b56SAlexander Motin 	uint16_t	uz_count_min;	/* Minimal amount of items there */
301099a0e58SBosko Milekic 
30254503a13SJonathan T. Looney 	/* The next two fields are used to print a rate-limited warnings. */
3032f891cd5SPawel Jakub Dawidek 	const char	*uz_warning;	/* Warning to print on failure */
3042f891cd5SPawel Jakub Dawidek 	struct timeval	uz_ratecheck;	/* Warnings rate-limiting */
3052f891cd5SPawel Jakub Dawidek 
306e60b2fcbSGleb Smirnoff 	struct task	uz_maxaction;	/* Task to run when at limit */
30754503a13SJonathan T. Looney 
3088355f576SJeff Roberson 	/*
3098355f576SJeff Roberson 	 * This HAS to be the last item because we adjust the zone size
3108355f576SJeff Roberson 	 * based on NCPU and then allocate the space for the zones.
3118355f576SJeff Roberson 	 */
31243ffa928SScott Long 	struct uma_cache	uz_cpu[1]; /* Per cpu caches */
3138355f576SJeff Roberson };
3148355f576SJeff Roberson 
315b60f5b79SJeff Roberson /*
316b60f5b79SJeff Roberson  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
317b60f5b79SJeff Roberson  */
318e20a199fSJeff Roberson #define	UMA_ZFLAG_MULTI		0x04000000	/* Multiple kegs in the zone. */
319e20a199fSJeff Roberson #define	UMA_ZFLAG_DRAINING	0x08000000	/* Running zone_drain. */
3206fd34d6fSJeff Roberson #define	UMA_ZFLAG_BUCKET	0x10000000	/* Bucket zone. */
3212018f30cSMike Silbersack #define UMA_ZFLAG_INTERNAL	0x20000000	/* No offpage no PCPU. */
3222018f30cSMike Silbersack #define UMA_ZFLAG_FULL		0x40000000	/* Reached uz_maxpages */
3232018f30cSMike Silbersack #define UMA_ZFLAG_CACHEONLY	0x80000000	/* Don't ask VM for buckets. */
3248355f576SJeff Roberson 
3256fd34d6fSJeff Roberson #define	UMA_ZFLAG_INHERIT						\
3266fd34d6fSJeff Roberson     (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
327e20a199fSJeff Roberson 
3280095a784SJeff Roberson static inline uma_keg_t
3290095a784SJeff Roberson zone_first_keg(uma_zone_t zone)
3300095a784SJeff Roberson {
331af526374SJeff Roberson 	uma_klink_t klink;
3320095a784SJeff Roberson 
333af526374SJeff Roberson 	klink = LIST_FIRST(&zone->uz_kegs);
334af526374SJeff Roberson 	return (klink != NULL) ? klink->kl_keg : NULL;
3350095a784SJeff Roberson }
3360095a784SJeff Roberson 
3375e4bb93cSKip Macy #undef UMA_ALIGN
3385e4bb93cSKip Macy 
339af17e9a9SRobert Watson #ifdef _KERNEL
3408355f576SJeff Roberson /* Internal prototypes */
34185dcf349SGleb Smirnoff static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
342f2c2231eSRyan Stone void *uma_large_malloc(vm_size_t size, int wait);
3438355f576SJeff Roberson void uma_large_free(uma_slab_t slab);
3448355f576SJeff Roberson 
3458355f576SJeff Roberson /* Lock Macros */
3468355f576SJeff Roberson 
347e20a199fSJeff Roberson #define	KEG_LOCK_INIT(k, lc)					\
34828bc4419SJeff Roberson 	do {							\
34928bc4419SJeff Roberson 		if ((lc))					\
350e20a199fSJeff Roberson 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
351e20a199fSJeff Roberson 			    (k)->uk_name, MTX_DEF | MTX_DUPOK);	\
35228bc4419SJeff Roberson 		else						\
353e20a199fSJeff Roberson 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
35428bc4419SJeff Roberson 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
35528bc4419SJeff Roberson 	} while (0)
35628bc4419SJeff Roberson 
357e20a199fSJeff Roberson #define	KEG_LOCK_FINI(k)	mtx_destroy(&(k)->uk_lock)
358e20a199fSJeff Roberson #define	KEG_LOCK(k)	mtx_lock(&(k)->uk_lock)
359e20a199fSJeff Roberson #define	KEG_UNLOCK(k)	mtx_unlock(&(k)->uk_lock)
360af526374SJeff Roberson 
361af526374SJeff Roberson #define	ZONE_LOCK_INIT(z, lc)					\
362af526374SJeff Roberson 	do {							\
363af526374SJeff Roberson 		if ((lc))					\
364af526374SJeff Roberson 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
365af526374SJeff Roberson 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
366af526374SJeff Roberson 		else						\
367af526374SJeff Roberson 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
368af526374SJeff Roberson 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
369af526374SJeff Roberson 	} while (0)
370af526374SJeff Roberson 
371af526374SJeff Roberson #define	ZONE_LOCK(z)	mtx_lock((z)->uz_lockptr)
372af526374SJeff Roberson #define	ZONE_TRYLOCK(z)	mtx_trylock((z)->uz_lockptr)
373af526374SJeff Roberson #define	ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lockptr)
374af526374SJeff Roberson #define	ZONE_LOCK_FINI(z)	mtx_destroy(&(z)->uz_lock)
3758355f576SJeff Roberson 
3768355f576SJeff Roberson /*
3778355f576SJeff Roberson  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
3788355f576SJeff Roberson  * the slab structure.
3798355f576SJeff Roberson  *
3808355f576SJeff Roberson  * Arguments:
3818355f576SJeff Roberson  *	hash  The hash table to search.
3828355f576SJeff Roberson  *	data  The base page of the item.
3838355f576SJeff Roberson  *
3848355f576SJeff Roberson  * Returns:
3858355f576SJeff Roberson  *	A pointer to a slab if successful, else NULL.
3868355f576SJeff Roberson  */
3878355f576SJeff Roberson static __inline uma_slab_t
38885dcf349SGleb Smirnoff hash_sfind(struct uma_hash *hash, uint8_t *data)
3898355f576SJeff Roberson {
3908355f576SJeff Roberson         uma_slab_t slab;
3918355f576SJeff Roberson         int hval;
3928355f576SJeff Roberson 
3938355f576SJeff Roberson         hval = UMA_HASH(hash, data);
3948355f576SJeff Roberson 
3958355f576SJeff Roberson         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
39685dcf349SGleb Smirnoff                 if ((uint8_t *)slab->us_data == data)
3978355f576SJeff Roberson                         return (slab);
3988355f576SJeff Roberson         }
3998355f576SJeff Roberson         return (NULL);
4008355f576SJeff Roberson }
4018355f576SJeff Roberson 
40299571dc3SJeff Roberson static __inline uma_slab_t
40399571dc3SJeff Roberson vtoslab(vm_offset_t va)
40499571dc3SJeff Roberson {
40599571dc3SJeff Roberson 	vm_page_t p;
40699571dc3SJeff Roberson 
40799571dc3SJeff Roberson 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
4089eab5484SKonstantin Belousov 	return ((uma_slab_t)p->plinks.s.pv);
40999571dc3SJeff Roberson }
41099571dc3SJeff Roberson 
41199571dc3SJeff Roberson static __inline void
41299571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab)
41399571dc3SJeff Roberson {
41499571dc3SJeff Roberson 	vm_page_t p;
41599571dc3SJeff Roberson 
4166fc96493SOlivier Houchard 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
417c325e866SKonstantin Belousov 	p->plinks.s.pv = slab;
41899571dc3SJeff Roberson }
41999571dc3SJeff Roberson 
42048eea375SJeff Roberson /*
42148eea375SJeff Roberson  * The following two functions may be defined by architecture specific code
422763df3ecSPedro F. Giffuni  * if they can provide more efficient allocation functions.  This is useful
42348eea375SJeff Roberson  * for using direct mapped addresses.
42448eea375SJeff Roberson  */
425f2c2231eSRyan Stone void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
426f2c2231eSRyan Stone     int wait);
427f2c2231eSRyan Stone void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
428af17e9a9SRobert Watson #endif /* _KERNEL */
42948eea375SJeff Roberson 
4308355f576SJeff Roberson #endif /* VM_UMA_INT_H */
431