xref: /freebsd/sys/vm/uma_int.h (revision 60727d8b86294910d41cbda1a50a855ea0537449)
160727d8bSWarner Losh /*-
27b871205SBosko Milekic  * Copyright (c) 2004, 2005,
37b871205SBosko Milekic  *     Bosko Milekic <bmilekic@freebsd.org>
47b871205SBosko Milekic  * Copyright (c) 2002, 2003, 2004, 2005,
57b871205SBosko Milekic  *     Jeffrey Roberson <jeff@freebsd.org>
68355f576SJeff Roberson  *
78355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
88355f576SJeff Roberson  * modification, are permitted provided that the following conditions
98355f576SJeff Roberson  * are met:
108355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
118355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
128355f576SJeff Roberson  *    disclaimer.
138355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
148355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
158355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
168355f576SJeff Roberson  *
178355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
188355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
198355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
208355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
218355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
228355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
238355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
248355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
258355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
268355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
278355f576SJeff Roberson  *
288355f576SJeff Roberson  * $FreeBSD$
298355f576SJeff Roberson  *
308355f576SJeff Roberson  */
318355f576SJeff Roberson 
328355f576SJeff Roberson /*
338355f576SJeff Roberson  * This file includes definitions, structures, prototypes, and inlines that
348355f576SJeff Roberson  * should not be used outside of the actual implementation of UMA.
358355f576SJeff Roberson  */
368355f576SJeff Roberson 
378355f576SJeff Roberson /*
388355f576SJeff Roberson  * Here's a quick description of the relationship between the objects:
398355f576SJeff Roberson  *
40099a0e58SBosko Milekic  * Kegs contain lists of slabs which are stored in either the full bin, empty
418355f576SJeff Roberson  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
428355f576SJeff Roberson  * the user supplied value for size, which is adjusted for alignment purposes
43099a0e58SBosko Milekic  * and rsize is the result of that.  The Keg also stores information for
448355f576SJeff Roberson  * managing a hash of page addresses that maps pages to uma_slab_t structures
458355f576SJeff Roberson  * for pages that don't have embedded uma_slab_t's.
468355f576SJeff Roberson  *
478355f576SJeff Roberson  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
488355f576SJeff Roberson  * be allocated off the page from a special slab zone.  The free list within a
498355f576SJeff Roberson  * slab is managed with a linked list of indexes, which are 8 bit values.  If
508355f576SJeff Roberson  * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
518355f576SJeff Roberson  * values.  Currently on alpha you can get 250 or so 32 byte items and on x86
528355f576SJeff Roberson  * you can get 250 or so 16byte items.  For item sizes that would yield more
53c235bfa5SJeff Roberson  * than 10% memory waste we potentially allocate a separate uma_slab_t if this
548355f576SJeff Roberson  * will improve the number of items per slab that will fit.
558355f576SJeff Roberson  *
568355f576SJeff Roberson  * Other potential space optimizations are storing the 8bit of linkage in space
578355f576SJeff Roberson  * wasted between items due to alignment problems.  This may yield a much better
588355f576SJeff Roberson  * memory footprint for certain sizes of objects.  Another alternative is to
598355f576SJeff Roberson  * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes.  I prefer
608355f576SJeff Roberson  * dynamic slab sizes because we could stick with 8 bit indexes and only use
618355f576SJeff Roberson  * large slab sizes for zones with a lot of waste per slab.  This may create
628355f576SJeff Roberson  * ineffeciencies in the vm subsystem due to fragmentation in the address space.
638355f576SJeff Roberson  *
648355f576SJeff Roberson  * The only really gross cases, with regards to memory waste, are for those
658355f576SJeff Roberson  * items that are just over half the page size.   You can get nearly 50% waste,
668355f576SJeff Roberson  * so you fall back to the memory footprint of the power of two allocator. I
678355f576SJeff Roberson  * have looked at memory allocation sizes on many of the machines available to
688355f576SJeff Roberson  * me, and there does not seem to be an abundance of allocations at this range
698355f576SJeff Roberson  * so at this time it may not make sense to optimize for it.  This can, of
708355f576SJeff Roberson  * course, be solved with dynamic slab sizes.
718355f576SJeff Roberson  *
72099a0e58SBosko Milekic  * Kegs may serve multiple Zones but by far most of the time they only serve
73099a0e58SBosko Milekic  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
74099a0e58SBosko Milekic  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
75099a0e58SBosko Milekic  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
76099a0e58SBosko Milekic  * pair, as well as with its own set of small per-CPU caches, layered above
77099a0e58SBosko Milekic  * the Zone's general Bucket cache.
78099a0e58SBosko Milekic  *
79099a0e58SBosko Milekic  * The PCPU caches are protected by their own locks, while the Zones backed
80099a0e58SBosko Milekic  * by the same Keg all share a common Keg lock (to coalesce contention on
81099a0e58SBosko Milekic  * the backing slabs).  The backing Keg typically only serves one Zone but
82099a0e58SBosko Milekic  * in the case of multiple Zones, one of the Zones is considered the
83099a0e58SBosko Milekic  * Master Zone and all Zone-related stats from the Keg are done in the
84099a0e58SBosko Milekic  * Master Zone.  For an example of a Multi-Zone setup, refer to the
85099a0e58SBosko Milekic  * Mbuf allocation code.
868355f576SJeff Roberson  */
878355f576SJeff Roberson 
888355f576SJeff Roberson /*
898355f576SJeff Roberson  *	This is the representation for normal (Non OFFPAGE slab)
908355f576SJeff Roberson  *
918355f576SJeff Roberson  *	i == item
928355f576SJeff Roberson  *	s == slab pointer
938355f576SJeff Roberson  *
948355f576SJeff Roberson  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
958355f576SJeff Roberson  *	___________________________________________________________
968355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
978355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
988355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
998355f576SJeff Roberson  *     |___________________________________________________________|
1008355f576SJeff Roberson  *
1018355f576SJeff Roberson  *
1028355f576SJeff Roberson  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
1038355f576SJeff Roberson  *
1048355f576SJeff Roberson  *	___________________________________________________________
1058355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
1068355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
1078355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
1088355f576SJeff Roberson  *     |___________________________________________________________|
1098355f576SJeff Roberson  *       ___________    ^
1108355f576SJeff Roberson  *	|slab header|   |
1118355f576SJeff Roberson  *	|___________|---*
1128355f576SJeff Roberson  *
1138355f576SJeff Roberson  */
1148355f576SJeff Roberson 
1158355f576SJeff Roberson #ifndef VM_UMA_INT_H
1168355f576SJeff Roberson #define VM_UMA_INT_H
1178355f576SJeff Roberson 
1188355f576SJeff Roberson #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
1198355f576SJeff Roberson #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
1208355f576SJeff Roberson #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
1218355f576SJeff Roberson 
122c19aa340SAlan Cox #define UMA_BOOT_PAGES		40	/* Pages allocated for startup */
1238355f576SJeff Roberson 
1248355f576SJeff Roberson /* Max waste before going to off page slab management */
1258355f576SJeff Roberson #define UMA_MAX_WASTE	(UMA_SLAB_SIZE / 10)
1268355f576SJeff Roberson 
1278355f576SJeff Roberson /*
1288355f576SJeff Roberson  * I doubt there will be many cases where this is exceeded. This is the initial
1298355f576SJeff Roberson  * size of the hash table for uma_slabs that are managed off page. This hash
1308355f576SJeff Roberson  * does expand by powers of two.  Currently it doesn't get smaller.
1318355f576SJeff Roberson  */
1328355f576SJeff Roberson #define UMA_HASH_SIZE_INIT	32
1338355f576SJeff Roberson 
1348355f576SJeff Roberson /*
1358355f576SJeff Roberson  * I should investigate other hashing algorithms.  This should yield a low
1368355f576SJeff Roberson  * number of collisions if the pages are relatively contiguous.
1378355f576SJeff Roberson  *
1388355f576SJeff Roberson  * This is the same algorithm that most processor caches use.
1398355f576SJeff Roberson  *
1408355f576SJeff Roberson  * I'm shifting and masking instead of % because it should be faster.
1418355f576SJeff Roberson  */
1428355f576SJeff Roberson 
1438355f576SJeff Roberson #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) &	\
1448355f576SJeff Roberson     (h)->uh_hashmask)
1458355f576SJeff Roberson 
1468355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem)					\
1478355f576SJeff Roberson 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
1488355f576SJeff Roberson 		    (mem))], (s), us_hlink);
1498355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem)					\
1508355f576SJeff Roberson 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
1518355f576SJeff Roberson 		    (mem))], (s), uma_slab, us_hlink);
1528355f576SJeff Roberson 
1538355f576SJeff Roberson /* Hash table for freed address -> slab translation */
1548355f576SJeff Roberson 
1558355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab);
1568355f576SJeff Roberson 
1578355f576SJeff Roberson struct uma_hash {
1588355f576SJeff Roberson 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
1598355f576SJeff Roberson 	int		uh_hashsize;	/* Current size of the hash table */
1608355f576SJeff Roberson 	int		uh_hashmask;	/* Mask used during hashing */
1618355f576SJeff Roberson };
1628355f576SJeff Roberson 
1638355f576SJeff Roberson /*
1648355f576SJeff Roberson  * Structures for per cpu queues.
1658355f576SJeff Roberson  */
1668355f576SJeff Roberson 
1678355f576SJeff Roberson struct uma_bucket {
1688355f576SJeff Roberson 	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
169cae33c14SJeff Roberson 	int16_t	ub_cnt;				/* Count of free items. */
170cae33c14SJeff Roberson 	int16_t	ub_entries;			/* Max items. */
171cae33c14SJeff Roberson 	void	*ub_bucket[];			/* actual allocation storage */
1728355f576SJeff Roberson };
1738355f576SJeff Roberson 
1748355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t;
1758355f576SJeff Roberson 
1768355f576SJeff Roberson struct uma_cache {
1778355f576SJeff Roberson 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
1788355f576SJeff Roberson 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
1798355f576SJeff Roberson 	u_int64_t	uc_allocs;	/* Count of allocations */
1808355f576SJeff Roberson };
1818355f576SJeff Roberson 
1828355f576SJeff Roberson typedef struct uma_cache * uma_cache_t;
1838355f576SJeff Roberson 
1848355f576SJeff Roberson /*
185099a0e58SBosko Milekic  * Keg management structure
186099a0e58SBosko Milekic  *
187099a0e58SBosko Milekic  * TODO: Optimize for cache line size
188099a0e58SBosko Milekic  *
189099a0e58SBosko Milekic  */
190099a0e58SBosko Milekic struct uma_keg {
191099a0e58SBosko Milekic 	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
192099a0e58SBosko Milekic 
193099a0e58SBosko Milekic 	struct mtx	uk_lock;	/* Lock for the keg */
194099a0e58SBosko Milekic 	struct uma_hash	uk_hash;
195099a0e58SBosko Milekic 
196099a0e58SBosko Milekic 	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
197099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_part_slab;	/* partially allocated slabs */
198099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_free_slab;	/* empty slab list */
199099a0e58SBosko Milekic 	LIST_HEAD(,uma_slab)	uk_full_slab;	/* full slabs */
200099a0e58SBosko Milekic 
201099a0e58SBosko Milekic 	u_int32_t	uk_recurse;	/* Allocation recursion count */
202099a0e58SBosko Milekic 	u_int32_t	uk_align;	/* Alignment mask */
203099a0e58SBosko Milekic 	u_int32_t	uk_pages;	/* Total page count */
204099a0e58SBosko Milekic 	u_int32_t	uk_free;	/* Count of items free in slabs */
205099a0e58SBosko Milekic 	u_int32_t	uk_size;	/* Requested size of each item */
206099a0e58SBosko Milekic 	u_int32_t	uk_rsize;	/* Real size of each item */
207099a0e58SBosko Milekic 	u_int32_t	uk_maxpages;	/* Maximum number of pages to alloc */
208099a0e58SBosko Milekic 
209099a0e58SBosko Milekic 	uma_init	uk_init;	/* Keg's init routine */
210099a0e58SBosko Milekic 	uma_fini	uk_fini;	/* Keg's fini routine */
211099a0e58SBosko Milekic 	uma_alloc	uk_allocf;	/* Allocation function */
212099a0e58SBosko Milekic 	uma_free	uk_freef;	/* Free routine */
213099a0e58SBosko Milekic 
214099a0e58SBosko Milekic 	struct vm_object	*uk_obj;	/* Zone specific object */
215099a0e58SBosko Milekic 	vm_offset_t	uk_kva;		/* Base kva for zones with objs */
216099a0e58SBosko Milekic 	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
217099a0e58SBosko Milekic 
218099a0e58SBosko Milekic 	u_int16_t	uk_pgoff;	/* Offset to uma_slab struct */
219099a0e58SBosko Milekic 	u_int16_t	uk_ppera;	/* pages per allocation from backend */
220099a0e58SBosko Milekic 	u_int16_t	uk_ipers;	/* Items per slab */
221099a0e58SBosko Milekic 	u_int16_t	uk_flags;	/* Internal flags */
222099a0e58SBosko Milekic };
223099a0e58SBosko Milekic 
224099a0e58SBosko Milekic /* Simpler reference to uma_keg for internal use. */
225099a0e58SBosko Milekic typedef struct uma_keg * uma_keg_t;
226099a0e58SBosko Milekic 
227099a0e58SBosko Milekic /* Page management structure */
228099a0e58SBosko Milekic 
229099a0e58SBosko Milekic /* Sorry for the union, but space efficiency is important */
230099a0e58SBosko Milekic struct uma_slab_head {
231099a0e58SBosko Milekic 	uma_keg_t	us_keg;			/* Keg we live in */
232099a0e58SBosko Milekic 	union {
233099a0e58SBosko Milekic 		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
234099a0e58SBosko Milekic 		unsigned long	_us_size;	/* Size of allocation */
235099a0e58SBosko Milekic 	} us_type;
236099a0e58SBosko Milekic 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
237099a0e58SBosko Milekic 	u_int8_t	*us_data;		/* First item */
238099a0e58SBosko Milekic 	u_int8_t	us_flags;		/* Page flags see uma.h */
239099a0e58SBosko Milekic 	u_int8_t	us_freecount;	/* How many are free? */
240099a0e58SBosko Milekic 	u_int8_t	us_firstfree;	/* First free item index */
241099a0e58SBosko Milekic };
242099a0e58SBosko Milekic 
243099a0e58SBosko Milekic /* The standard slab structure */
244099a0e58SBosko Milekic struct uma_slab {
245099a0e58SBosko Milekic 	struct uma_slab_head	us_head;	/* slab header data */
246099a0e58SBosko Milekic 	struct {
247099a0e58SBosko Milekic 		u_int8_t	us_item;
248099a0e58SBosko Milekic 	} us_freelist[1];			/* actual number bigger */
249099a0e58SBosko Milekic };
250099a0e58SBosko Milekic 
251099a0e58SBosko Milekic /*
252099a0e58SBosko Milekic  * The slab structure for UMA_ZONE_REFCNT zones for whose items we
253099a0e58SBosko Milekic  * maintain reference counters in the slab for.
254099a0e58SBosko Milekic  */
255099a0e58SBosko Milekic struct uma_slab_refcnt {
256099a0e58SBosko Milekic 	struct uma_slab_head	us_head;	/* slab header data */
257099a0e58SBosko Milekic 	struct {
258099a0e58SBosko Milekic 		u_int8_t	us_item;
259099a0e58SBosko Milekic 		u_int32_t	us_refcnt;
260099a0e58SBosko Milekic 	} us_freelist[1];			/* actual number bigger */
261099a0e58SBosko Milekic };
262099a0e58SBosko Milekic 
263099a0e58SBosko Milekic #define	us_keg		us_head.us_keg
264099a0e58SBosko Milekic #define	us_link		us_head.us_type._us_link
265099a0e58SBosko Milekic #define	us_size		us_head.us_type._us_size
266099a0e58SBosko Milekic #define	us_hlink	us_head.us_hlink
267099a0e58SBosko Milekic #define	us_data		us_head.us_data
268099a0e58SBosko Milekic #define	us_flags	us_head.us_flags
269099a0e58SBosko Milekic #define	us_freecount	us_head.us_freecount
270099a0e58SBosko Milekic #define	us_firstfree	us_head.us_firstfree
271099a0e58SBosko Milekic 
272099a0e58SBosko Milekic typedef struct uma_slab * uma_slab_t;
273099a0e58SBosko Milekic typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
274099a0e58SBosko Milekic 
275099a0e58SBosko Milekic /*
276244f4554SBosko Milekic  * These give us the size of one free item reference within our corresponding
277244f4554SBosko Milekic  * uma_slab structures, so that our calculations during zone setup are correct
278244f4554SBosko Milekic  * regardless of what the compiler decides to do with padding the structure
279244f4554SBosko Milekic  * arrays within uma_slab.
280244f4554SBosko Milekic  */
281244f4554SBosko Milekic #define	UMA_FRITM_SZ	(sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
282244f4554SBosko Milekic #define	UMA_FRITMREF_SZ	(sizeof(struct uma_slab_refcnt) -	\
283244f4554SBosko Milekic     sizeof(struct uma_slab_head))
284244f4554SBosko Milekic 
285244f4554SBosko Milekic /*
2868355f576SJeff Roberson  * Zone management structure
2878355f576SJeff Roberson  *
2888355f576SJeff Roberson  * TODO: Optimize for cache line size
2898355f576SJeff Roberson  *
2908355f576SJeff Roberson  */
2918355f576SJeff Roberson struct uma_zone {
2928355f576SJeff Roberson 	char		*uz_name;	/* Text name of the zone */
293099a0e58SBosko Milekic 	struct mtx	*uz_lock;	/* Lock for the zone (keg's lock) */
294099a0e58SBosko Milekic 	uma_keg_t	uz_keg;		/* Our underlying Keg */
2958355f576SJeff Roberson 
296099a0e58SBosko Milekic 	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones in keg */
2978355f576SJeff Roberson 	LIST_HEAD(,uma_bucket)	uz_full_bucket;	/* full buckets */
2988355f576SJeff Roberson 	LIST_HEAD(,uma_bucket)	uz_free_bucket;	/* Buckets for frees */
2998355f576SJeff Roberson 
3008355f576SJeff Roberson 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
3018355f576SJeff Roberson 	uma_dtor	uz_dtor;	/* Destructor */
3028355f576SJeff Roberson 	uma_init	uz_init;	/* Initializer for each item */
3038355f576SJeff Roberson 	uma_fini	uz_fini;	/* Discards memory */
304099a0e58SBosko Milekic 
305099a0e58SBosko Milekic 	u_int64_t	uz_allocs;	/* Total number of allocations */
306a553d4b8SJeff Roberson 	uint16_t	uz_fills;	/* Outstanding bucket fills */
307a553d4b8SJeff Roberson 	uint16_t	uz_count;	/* Highest value ub_ptr can have */
308099a0e58SBosko Milekic 
3098355f576SJeff Roberson 	/*
3108355f576SJeff Roberson 	 * This HAS to be the last item because we adjust the zone size
3118355f576SJeff Roberson 	 * based on NCPU and then allocate the space for the zones.
3128355f576SJeff Roberson 	 */
3138355f576SJeff Roberson 	struct uma_cache	uz_cpu[1];	/* Per cpu caches */
3148355f576SJeff Roberson };
3158355f576SJeff Roberson 
316b60f5b79SJeff Roberson /*
317b60f5b79SJeff Roberson  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
318b60f5b79SJeff Roberson  */
319b60f5b79SJeff Roberson #define UMA_ZFLAG_PRIVALLOC	0x1000		/* Use uz_allocf. */
320b60f5b79SJeff Roberson #define UMA_ZFLAG_INTERNAL	0x2000		/* No offpage no PCPU. */
321b60f5b79SJeff Roberson #define UMA_ZFLAG_FULL		0x4000		/* Reached uz_maxpages */
322b60f5b79SJeff Roberson #define UMA_ZFLAG_CACHEONLY	0x8000		/* Don't ask VM for buckets. */
3238355f576SJeff Roberson 
3248355f576SJeff Roberson /* Internal prototypes */
3258355f576SJeff Roberson static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
3268355f576SJeff Roberson void *uma_large_malloc(int size, int wait);
3278355f576SJeff Roberson void uma_large_free(uma_slab_t slab);
3288355f576SJeff Roberson 
3298355f576SJeff Roberson /* Lock Macros */
3308355f576SJeff Roberson 
33128bc4419SJeff Roberson #define	ZONE_LOCK_INIT(z, lc)					\
33228bc4419SJeff Roberson 	do {							\
33328bc4419SJeff Roberson 		if ((lc))					\
334099a0e58SBosko Milekic 			mtx_init((z)->uz_lock, (z)->uz_name,	\
33528bc4419SJeff Roberson 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
33628bc4419SJeff Roberson 		else						\
337099a0e58SBosko Milekic 			mtx_init((z)->uz_lock, (z)->uz_name,	\
33828bc4419SJeff Roberson 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
33928bc4419SJeff Roberson 	} while (0)
34028bc4419SJeff Roberson 
341099a0e58SBosko Milekic #define	ZONE_LOCK_FINI(z)	mtx_destroy((z)->uz_lock)
342099a0e58SBosko Milekic #define	ZONE_LOCK(z)	mtx_lock((z)->uz_lock)
343099a0e58SBosko Milekic #define ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lock)
3448355f576SJeff Roberson 
345d88797c2SBosko Milekic #define	CPU_LOCK_INIT(cpu)					\
346d88797c2SBosko Milekic 	mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu",	\
347d88797c2SBosko Milekic 	    MTX_DEF | MTX_DUPOK)
3488355f576SJeff Roberson 
349d88797c2SBosko Milekic #define CPU_LOCK(cpu)						\
350d88797c2SBosko Milekic 	mtx_lock(&uma_pcpu_mtx[(cpu)])
3518355f576SJeff Roberson 
352d88797c2SBosko Milekic #define CPU_UNLOCK(cpu)						\
353d88797c2SBosko Milekic 	mtx_unlock(&uma_pcpu_mtx[(cpu)])
3548355f576SJeff Roberson 
3558355f576SJeff Roberson /*
3568355f576SJeff Roberson  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
3578355f576SJeff Roberson  * the slab structure.
3588355f576SJeff Roberson  *
3598355f576SJeff Roberson  * Arguments:
3608355f576SJeff Roberson  *	hash  The hash table to search.
3618355f576SJeff Roberson  *	data  The base page of the item.
3628355f576SJeff Roberson  *
3638355f576SJeff Roberson  * Returns:
3648355f576SJeff Roberson  *	A pointer to a slab if successful, else NULL.
3658355f576SJeff Roberson  */
3668355f576SJeff Roberson static __inline uma_slab_t
3678355f576SJeff Roberson hash_sfind(struct uma_hash *hash, u_int8_t *data)
3688355f576SJeff Roberson {
3698355f576SJeff Roberson         uma_slab_t slab;
3708355f576SJeff Roberson         int hval;
3718355f576SJeff Roberson 
3728355f576SJeff Roberson         hval = UMA_HASH(hash, data);
3738355f576SJeff Roberson 
3748355f576SJeff Roberson         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
3758355f576SJeff Roberson                 if ((u_int8_t *)slab->us_data == data)
3768355f576SJeff Roberson                         return (slab);
3778355f576SJeff Roberson         }
3788355f576SJeff Roberson         return (NULL);
3798355f576SJeff Roberson }
3808355f576SJeff Roberson 
38199571dc3SJeff Roberson static __inline uma_slab_t
38299571dc3SJeff Roberson vtoslab(vm_offset_t va)
38399571dc3SJeff Roberson {
38499571dc3SJeff Roberson 	vm_page_t p;
38599571dc3SJeff Roberson 	uma_slab_t slab;
38699571dc3SJeff Roberson 
38799571dc3SJeff Roberson 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
38899571dc3SJeff Roberson 	slab = (uma_slab_t )p->object;
38999571dc3SJeff Roberson 
39099571dc3SJeff Roberson 	if (p->flags & PG_SLAB)
39199571dc3SJeff Roberson 		return (slab);
39299571dc3SJeff Roberson 	else
39399571dc3SJeff Roberson 		return (NULL);
39499571dc3SJeff Roberson }
39599571dc3SJeff Roberson 
39699571dc3SJeff Roberson static __inline void
39799571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab)
39899571dc3SJeff Roberson {
39999571dc3SJeff Roberson 	vm_page_t p;
40099571dc3SJeff Roberson 
4016fc96493SOlivier Houchard 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
40299571dc3SJeff Roberson 	p->object = (vm_object_t)slab;
40399571dc3SJeff Roberson 	p->flags |= PG_SLAB;
40499571dc3SJeff Roberson }
40599571dc3SJeff Roberson 
40699571dc3SJeff Roberson static __inline void
40799571dc3SJeff Roberson vsetobj(vm_offset_t va, vm_object_t obj)
40899571dc3SJeff Roberson {
40999571dc3SJeff Roberson 	vm_page_t p;
41099571dc3SJeff Roberson 
4116fc96493SOlivier Houchard 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
41299571dc3SJeff Roberson 	p->object = obj;
41399571dc3SJeff Roberson 	p->flags &= ~PG_SLAB;
41499571dc3SJeff Roberson }
4158355f576SJeff Roberson 
41648eea375SJeff Roberson /*
41748eea375SJeff Roberson  * The following two functions may be defined by architecture specific code
41848eea375SJeff Roberson  * if they can provide more effecient allocation functions.  This is useful
41948eea375SJeff Roberson  * for using direct mapped addresses.
42048eea375SJeff Roberson  */
42148eea375SJeff Roberson void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
42248eea375SJeff Roberson void uma_small_free(void *mem, int size, u_int8_t flags);
42348eea375SJeff Roberson 
4248355f576SJeff Roberson #endif /* VM_UMA_INT_H */
425