xref: /freebsd/sys/vm/uma_int.h (revision 99571dc345c8eae8cea5eaace59ee22f41b22621)
18355f576SJeff Roberson /*
28355f576SJeff Roberson  * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
38355f576SJeff Roberson  * All rights reserved.
48355f576SJeff Roberson  *
58355f576SJeff Roberson  * Redistribution and use in source and binary forms, with or without
68355f576SJeff Roberson  * modification, are permitted provided that the following conditions
78355f576SJeff Roberson  * are met:
88355f576SJeff Roberson  * 1. Redistributions of source code must retain the above copyright
98355f576SJeff Roberson  *    notice unmodified, this list of conditions, and the following
108355f576SJeff Roberson  *    disclaimer.
118355f576SJeff Roberson  * 2. Redistributions in binary form must reproduce the above copyright
128355f576SJeff Roberson  *    notice, this list of conditions and the following disclaimer in the
138355f576SJeff Roberson  *    documentation and/or other materials provided with the distribution.
148355f576SJeff Roberson  *
158355f576SJeff Roberson  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
168355f576SJeff Roberson  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
178355f576SJeff Roberson  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
188355f576SJeff Roberson  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
198355f576SJeff Roberson  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
208355f576SJeff Roberson  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
218355f576SJeff Roberson  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
228355f576SJeff Roberson  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
238355f576SJeff Roberson  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
248355f576SJeff Roberson  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
258355f576SJeff Roberson  *
268355f576SJeff Roberson  * $FreeBSD$
278355f576SJeff Roberson  *
288355f576SJeff Roberson  */
298355f576SJeff Roberson 
308355f576SJeff Roberson /*
318355f576SJeff Roberson  *
328355f576SJeff Roberson  * Jeff Roberson <jroberson@chesapeake.net>
338355f576SJeff Roberson  *
348355f576SJeff Roberson  * This file includes definitions, structures, prototypes, and inlines that
358355f576SJeff Roberson  * should not be used outside of the actual implementation of UMA.
368355f576SJeff Roberson  *
378355f576SJeff Roberson  */
388355f576SJeff Roberson 
398355f576SJeff Roberson /*
408355f576SJeff Roberson  * Here's a quick description of the relationship between the objects:
418355f576SJeff Roberson  *
428355f576SJeff Roberson  * Zones contain lists of slabs which are stored in either the full bin, empty
438355f576SJeff Roberson  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
448355f576SJeff Roberson  * the user supplied value for size, which is adjusted for alignment purposes
458355f576SJeff Roberson  * and rsize is the result of that.  The zone also stores information for
468355f576SJeff Roberson  * managing a hash of page addresses that maps pages to uma_slab_t structures
478355f576SJeff Roberson  * for pages that don't have embedded uma_slab_t's.
488355f576SJeff Roberson  *
498355f576SJeff Roberson  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
508355f576SJeff Roberson  * be allocated off the page from a special slab zone.  The free list within a
518355f576SJeff Roberson  * slab is managed with a linked list of indexes, which are 8 bit values.  If
528355f576SJeff Roberson  * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
538355f576SJeff Roberson  * values.  Currently on alpha you can get 250 or so 32 byte items and on x86
548355f576SJeff Roberson  * you can get 250 or so 16byte items.  For item sizes that would yield more
55c235bfa5SJeff Roberson  * than 10% memory waste we potentially allocate a separate uma_slab_t if this
568355f576SJeff Roberson  * will improve the number of items per slab that will fit.
578355f576SJeff Roberson  *
588355f576SJeff Roberson  * Other potential space optimizations are storing the 8bit of linkage in space
598355f576SJeff Roberson  * wasted between items due to alignment problems.  This may yield a much better
608355f576SJeff Roberson  * memory footprint for certain sizes of objects.  Another alternative is to
618355f576SJeff Roberson  * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes.  I prefer
628355f576SJeff Roberson  * dynamic slab sizes because we could stick with 8 bit indexes and only use
638355f576SJeff Roberson  * large slab sizes for zones with a lot of waste per slab.  This may create
648355f576SJeff Roberson  * ineffeciencies in the vm subsystem due to fragmentation in the address space.
658355f576SJeff Roberson  *
668355f576SJeff Roberson  * The only really gross cases, with regards to memory waste, are for those
678355f576SJeff Roberson  * items that are just over half the page size.   You can get nearly 50% waste,
688355f576SJeff Roberson  * so you fall back to the memory footprint of the power of two allocator. I
698355f576SJeff Roberson  * have looked at memory allocation sizes on many of the machines available to
708355f576SJeff Roberson  * me, and there does not seem to be an abundance of allocations at this range
718355f576SJeff Roberson  * so at this time it may not make sense to optimize for it.  This can, of
728355f576SJeff Roberson  * course, be solved with dynamic slab sizes.
738355f576SJeff Roberson  *
748355f576SJeff Roberson  */
758355f576SJeff Roberson 
768355f576SJeff Roberson /*
778355f576SJeff Roberson  *	This is the representation for normal (Non OFFPAGE slab)
788355f576SJeff Roberson  *
798355f576SJeff Roberson  *	i == item
808355f576SJeff Roberson  *	s == slab pointer
818355f576SJeff Roberson  *
828355f576SJeff Roberson  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
838355f576SJeff Roberson  *	___________________________________________________________
848355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
858355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
868355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
878355f576SJeff Roberson  *     |___________________________________________________________|
888355f576SJeff Roberson  *
898355f576SJeff Roberson  *
908355f576SJeff Roberson  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
918355f576SJeff Roberson  *
928355f576SJeff Roberson  *	___________________________________________________________
938355f576SJeff Roberson  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
948355f576SJeff Roberson  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
958355f576SJeff Roberson  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
968355f576SJeff Roberson  *     |___________________________________________________________|
978355f576SJeff Roberson  *       ___________    ^
988355f576SJeff Roberson  *	|slab header|   |
998355f576SJeff Roberson  *	|___________|---*
1008355f576SJeff Roberson  *
1018355f576SJeff Roberson  */
1028355f576SJeff Roberson 
1038355f576SJeff Roberson #ifndef VM_UMA_INT_H
1048355f576SJeff Roberson #define VM_UMA_INT_H
1058355f576SJeff Roberson 
1068355f576SJeff Roberson #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
1078355f576SJeff Roberson #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
1088355f576SJeff Roberson #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
1098355f576SJeff Roberson 
110e602ba25SJulian Elischer #define UMA_BOOT_PAGES		30	/* Number of pages allocated for startup */
1118355f576SJeff Roberson #define UMA_WORKING_TIME	20	/* Seconds worth of items to keep */
1128355f576SJeff Roberson 
1138355f576SJeff Roberson 
1148355f576SJeff Roberson /* Max waste before going to off page slab management */
1158355f576SJeff Roberson #define UMA_MAX_WASTE	(UMA_SLAB_SIZE / 10)
1168355f576SJeff Roberson 
1178355f576SJeff Roberson /*
1188355f576SJeff Roberson  * I doubt there will be many cases where this is exceeded. This is the initial
1198355f576SJeff Roberson  * size of the hash table for uma_slabs that are managed off page. This hash
1208355f576SJeff Roberson  * does expand by powers of two.  Currently it doesn't get smaller.
1218355f576SJeff Roberson  */
1228355f576SJeff Roberson #define UMA_HASH_SIZE_INIT	32
1238355f576SJeff Roberson 
1248355f576SJeff Roberson 
1258355f576SJeff Roberson /*
1268355f576SJeff Roberson  * I should investigate other hashing algorithms.  This should yield a low
1278355f576SJeff Roberson  * number of collisions if the pages are relatively contiguous.
1288355f576SJeff Roberson  *
1298355f576SJeff Roberson  * This is the same algorithm that most processor caches use.
1308355f576SJeff Roberson  *
1318355f576SJeff Roberson  * I'm shifting and masking instead of % because it should be faster.
1328355f576SJeff Roberson  */
1338355f576SJeff Roberson 
1348355f576SJeff Roberson #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) &	\
1358355f576SJeff Roberson     (h)->uh_hashmask)
1368355f576SJeff Roberson 
1378355f576SJeff Roberson #define UMA_HASH_INSERT(h, s, mem)					\
1388355f576SJeff Roberson 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
1398355f576SJeff Roberson 		    (mem))], (s), us_hlink);
1408355f576SJeff Roberson #define UMA_HASH_REMOVE(h, s, mem)					\
1418355f576SJeff Roberson 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
1428355f576SJeff Roberson 		    (mem))], (s), uma_slab, us_hlink);
1438355f576SJeff Roberson 
1448355f576SJeff Roberson /* Page management structure */
1458355f576SJeff Roberson 
1468355f576SJeff Roberson /* Sorry for the union, but space efficiency is important */
1478355f576SJeff Roberson struct uma_slab {
1488355f576SJeff Roberson 	uma_zone_t	us_zone;		/* Zone we live in */
1498355f576SJeff Roberson 	union {
1508355f576SJeff Roberson 		LIST_ENTRY(uma_slab)	us_link;	/* slabs in zone */
1518355f576SJeff Roberson 		unsigned long	us_size;	/* Size of allocation */
1528355f576SJeff Roberson 	} us_type;
1538355f576SJeff Roberson 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
1548355f576SJeff Roberson 	u_int8_t	*us_data;		/* First item */
1558355f576SJeff Roberson 	u_int8_t	us_flags;		/* Page flags see uma.h */
1568355f576SJeff Roberson 	u_int8_t	us_freecount;	/* How many are free? */
1578355f576SJeff Roberson 	u_int8_t	us_firstfree;	/* First free item index */
1588355f576SJeff Roberson 	u_int8_t	us_freelist[1];	/* Free List (actually larger) */
1598355f576SJeff Roberson };
1608355f576SJeff Roberson 
1618355f576SJeff Roberson #define us_link	us_type.us_link
1628355f576SJeff Roberson #define us_size	us_type.us_size
1638355f576SJeff Roberson 
1648355f576SJeff Roberson typedef struct uma_slab * uma_slab_t;
1658355f576SJeff Roberson 
1668355f576SJeff Roberson /* Hash table for freed address -> slab translation */
1678355f576SJeff Roberson 
1688355f576SJeff Roberson SLIST_HEAD(slabhead, uma_slab);
1698355f576SJeff Roberson 
1708355f576SJeff Roberson struct uma_hash {
1718355f576SJeff Roberson 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
1728355f576SJeff Roberson 	int		uh_hashsize;	/* Current size of the hash table */
1738355f576SJeff Roberson 	int		uh_hashmask;	/* Mask used during hashing */
1748355f576SJeff Roberson };
1758355f576SJeff Roberson 
1768355f576SJeff Roberson /*
1778355f576SJeff Roberson  * Structures for per cpu queues.
1788355f576SJeff Roberson  */
1798355f576SJeff Roberson 
1808355f576SJeff Roberson /*
1818355f576SJeff Roberson  * This size was chosen so that the struct bucket size is roughly
1828355f576SJeff Roberson  * 128 * sizeof(void *).  This is exactly true for x86, and for alpha
1838355f576SJeff Roberson  * it will would be 32bits smaller if it didn't have alignment adjustments.
1848355f576SJeff Roberson  */
1858355f576SJeff Roberson 
1868355f576SJeff Roberson #define UMA_BUCKET_SIZE	125
1878355f576SJeff Roberson 
1888355f576SJeff Roberson struct uma_bucket {
1898355f576SJeff Roberson 	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
1908355f576SJeff Roberson 	int16_t	ub_ptr;				/* Pointer to current item */
1918355f576SJeff Roberson 	void	*ub_bucket[UMA_BUCKET_SIZE];	/* actual allocation storage */
1928355f576SJeff Roberson };
1938355f576SJeff Roberson 
1948355f576SJeff Roberson typedef struct uma_bucket * uma_bucket_t;
1958355f576SJeff Roberson 
1968355f576SJeff Roberson struct uma_cache {
1978355f576SJeff Roberson 	struct mtx	uc_lock;	/* Spin lock on this cpu's bucket */
1988355f576SJeff Roberson 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
1998355f576SJeff Roberson 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
2008355f576SJeff Roberson 	u_int64_t	uc_allocs;	/* Count of allocations */
2018355f576SJeff Roberson };
2028355f576SJeff Roberson 
2038355f576SJeff Roberson typedef struct uma_cache * uma_cache_t;
2048355f576SJeff Roberson 
2058355f576SJeff Roberson #define LOCKNAME_LEN	16		/* Length of the name for cpu locks */
2068355f576SJeff Roberson 
2078355f576SJeff Roberson /*
2088355f576SJeff Roberson  * Zone management structure
2098355f576SJeff Roberson  *
2108355f576SJeff Roberson  * TODO: Optimize for cache line size
2118355f576SJeff Roberson  *
2128355f576SJeff Roberson  */
2138355f576SJeff Roberson struct uma_zone {
2148355f576SJeff Roberson 	char		uz_lname[LOCKNAME_LEN];	/* Text name for the cpu lock */
2158355f576SJeff Roberson 	char		*uz_name;	/* Text name of the zone */
2168355f576SJeff Roberson 	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones */
2178355f576SJeff Roberson 	u_int32_t	uz_align;	/* Alignment mask */
2188355f576SJeff Roberson 	u_int32_t	uz_pages;	/* Total page count */
2198355f576SJeff Roberson 
2208355f576SJeff Roberson /* Used during alloc / free */
2218355f576SJeff Roberson 	struct mtx	uz_lock;	/* Lock for the zone */
2228355f576SJeff Roberson 	u_int32_t	uz_free;	/* Count of items free in slabs */
2238355f576SJeff Roberson 	u_int16_t	uz_ipers;	/* Items per slab */
2248355f576SJeff Roberson 	u_int16_t	uz_flags;	/* Internal flags */
2258355f576SJeff Roberson 
2268355f576SJeff Roberson 	LIST_HEAD(,uma_slab)	uz_part_slab;	/* partially allocated slabs */
2278355f576SJeff Roberson 	LIST_HEAD(,uma_slab)	uz_free_slab;	/* empty slab list */
2288355f576SJeff Roberson 	LIST_HEAD(,uma_slab)	uz_full_slab;	/* full slabs */
2298355f576SJeff Roberson 	LIST_HEAD(,uma_bucket)	uz_full_bucket;	/* full buckets */
2308355f576SJeff Roberson 	LIST_HEAD(,uma_bucket)	uz_free_bucket;	/* Buckets for frees */
2318355f576SJeff Roberson 	u_int32_t	uz_size;	/* Requested size of each item */
2328355f576SJeff Roberson 	u_int32_t	uz_rsize;	/* Real size of each item */
2338355f576SJeff Roberson 
2348355f576SJeff Roberson 	struct uma_hash	uz_hash;
2358355f576SJeff Roberson 	u_int16_t	uz_pgoff;	/* Offset to uma_slab struct */
2368355f576SJeff Roberson 	u_int16_t	uz_ppera;	/* pages per allocation from backend */
2378355f576SJeff Roberson 	u_int16_t	uz_cacheoff;	/* Next cache offset */
2388355f576SJeff Roberson 	u_int16_t	uz_cachemax;	/* Max cache offset */
2398355f576SJeff Roberson 
2408355f576SJeff Roberson 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
2418355f576SJeff Roberson 	uma_dtor	uz_dtor;	/* Destructor */
2428355f576SJeff Roberson 	u_int64_t	uz_allocs;	/* Total number of allocations */
2438355f576SJeff Roberson 
2448355f576SJeff Roberson 	uma_init	uz_init;	/* Initializer for each item */
2458355f576SJeff Roberson 	uma_fini	uz_fini;	/* Discards memory */
2468355f576SJeff Roberson 	uma_alloc	uz_allocf;	/* Allocation function */
2478355f576SJeff Roberson 	uma_free	uz_freef;	/* Free routine */
2488355f576SJeff Roberson 	struct vm_object	*uz_obj;	/* Zone specific object */
2498355f576SJeff Roberson 	vm_offset_t	uz_kva;		/* Base kva for zones with objs */
2508355f576SJeff Roberson 	u_int32_t	uz_maxpages;	/* Maximum number of pages to alloc */
2518355f576SJeff Roberson 	u_int32_t	uz_cachefree;	/* Last count of items free in caches */
2528355f576SJeff Roberson 	u_int64_t	uz_oallocs;	/* old allocs count */
2538355f576SJeff Roberson 	u_int64_t	uz_wssize;	/* Working set size */
2548355f576SJeff Roberson 	int		uz_recurse;	/* Allocation recursion count */
255a553d4b8SJeff Roberson 	uint16_t	uz_fills;	/* Outstanding bucket fills */
256a553d4b8SJeff Roberson 	uint16_t	uz_count;	/* Highest value ub_ptr can have */
2578355f576SJeff Roberson 	/*
2588355f576SJeff Roberson 	 * This HAS to be the last item because we adjust the zone size
2598355f576SJeff Roberson 	 * based on NCPU and then allocate the space for the zones.
2608355f576SJeff Roberson 	 */
2618355f576SJeff Roberson 	struct uma_cache	uz_cpu[1];	/* Per cpu caches */
2628355f576SJeff Roberson };
2638355f576SJeff Roberson 
2648355f576SJeff Roberson #define UMA_CACHE_INC	16	/* How much will we move data */
2658355f576SJeff Roberson 
2668355f576SJeff Roberson #define UMA_ZFLAG_OFFPAGE	0x0001	/* Struct slab/freelist off page */
2678355f576SJeff Roberson #define UMA_ZFLAG_PRIVALLOC	0x0002	/* Zone has supplied it's own alloc */
2688355f576SJeff Roberson #define UMA_ZFLAG_INTERNAL	0x0004	/* Internal zone, no offpage no PCPU */
2698355f576SJeff Roberson #define UMA_ZFLAG_MALLOC	0x0008	/* Zone created by malloc */
2708355f576SJeff Roberson #define UMA_ZFLAG_NOFREE	0x0010	/* Don't free data from this zone */
271af7f9b97SJeff Roberson #define UMA_ZFLAG_FULL		0x0020	/* This zone reached uz_maxpages */
27218aa2de5SJeff Roberson #define UMA_ZFLAG_BUCKETCACHE	0x0040	/* Only allocate buckets from cache */
27399571dc3SJeff Roberson #define	UMA_ZFLAG_HASH		0x0080	/* Look up slab via hash */
274af7f9b97SJeff Roberson 
2758355f576SJeff Roberson /* This lives in uflags */
2768355f576SJeff Roberson #define UMA_ZONE_INTERNAL	0x1000	/* Internal zone for uflags */
2778355f576SJeff Roberson 
2788355f576SJeff Roberson /* Internal prototypes */
2798355f576SJeff Roberson static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
2808355f576SJeff Roberson void *uma_large_malloc(int size, int wait);
2818355f576SJeff Roberson void uma_large_free(uma_slab_t slab);
2828355f576SJeff Roberson 
2838355f576SJeff Roberson /* Lock Macros */
2848355f576SJeff Roberson 
28528bc4419SJeff Roberson #define	ZONE_LOCK_INIT(z, lc)					\
28628bc4419SJeff Roberson 	do {							\
28728bc4419SJeff Roberson 		if ((lc))					\
28828bc4419SJeff Roberson 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
28928bc4419SJeff Roberson 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
29028bc4419SJeff Roberson 		else						\
29128bc4419SJeff Roberson 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
29228bc4419SJeff Roberson 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
29328bc4419SJeff Roberson 	} while (0)
29428bc4419SJeff Roberson 
2958355f576SJeff Roberson #define	ZONE_LOCK_FINI(z)	mtx_destroy(&(z)->uz_lock)
2968355f576SJeff Roberson #define	ZONE_LOCK(z)	mtx_lock(&(z)->uz_lock)
2978355f576SJeff Roberson #define ZONE_UNLOCK(z)	mtx_unlock(&(z)->uz_lock)
2988355f576SJeff Roberson 
29928bc4419SJeff Roberson #define	CPU_LOCK_INIT(z, cpu, lc)				\
30028bc4419SJeff Roberson 	do {							\
30128bc4419SJeff Roberson 		if ((lc))					\
30228bc4419SJeff Roberson 			mtx_init(&(z)->uz_cpu[(cpu)].uc_lock,	\
30328bc4419SJeff Roberson 			    (z)->uz_lname, (z)->uz_lname,	\
30428bc4419SJeff Roberson 			    MTX_DEF | MTX_DUPOK);		\
30528bc4419SJeff Roberson 		else						\
30628bc4419SJeff Roberson 			mtx_init(&(z)->uz_cpu[(cpu)].uc_lock,	\
30728bc4419SJeff Roberson 			    (z)->uz_lname, "UMA cpu",		\
30828bc4419SJeff Roberson 			    MTX_DEF | MTX_DUPOK);		\
30928bc4419SJeff Roberson 	} while (0)
3108355f576SJeff Roberson 
3118355f576SJeff Roberson #define	CPU_LOCK_FINI(z, cpu)	\
3128355f576SJeff Roberson 	mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)
3138355f576SJeff Roberson 
3148355f576SJeff Roberson #define CPU_LOCK(z, cpu)	\
3158355f576SJeff Roberson 	mtx_lock(&(z)->uz_cpu[(cpu)].uc_lock)
3168355f576SJeff Roberson 
3178355f576SJeff Roberson #define CPU_UNLOCK(z, cpu)	\
3188355f576SJeff Roberson 	mtx_unlock(&(z)->uz_cpu[(cpu)].uc_lock)
3198355f576SJeff Roberson 
3208355f576SJeff Roberson /*
3218355f576SJeff Roberson  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
3228355f576SJeff Roberson  * the slab structure.
3238355f576SJeff Roberson  *
3248355f576SJeff Roberson  * Arguments:
3258355f576SJeff Roberson  *	hash  The hash table to search.
3268355f576SJeff Roberson  *	data  The base page of the item.
3278355f576SJeff Roberson  *
3288355f576SJeff Roberson  * Returns:
3298355f576SJeff Roberson  *	A pointer to a slab if successful, else NULL.
3308355f576SJeff Roberson  */
3318355f576SJeff Roberson static __inline uma_slab_t
3328355f576SJeff Roberson hash_sfind(struct uma_hash *hash, u_int8_t *data)
3338355f576SJeff Roberson {
3348355f576SJeff Roberson         uma_slab_t slab;
3358355f576SJeff Roberson         int hval;
3368355f576SJeff Roberson 
3378355f576SJeff Roberson         hval = UMA_HASH(hash, data);
3388355f576SJeff Roberson 
3398355f576SJeff Roberson         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
3408355f576SJeff Roberson                 if ((u_int8_t *)slab->us_data == data)
3418355f576SJeff Roberson                         return (slab);
3428355f576SJeff Roberson         }
3438355f576SJeff Roberson         return (NULL);
3448355f576SJeff Roberson }
3458355f576SJeff Roberson 
34699571dc3SJeff Roberson static __inline uma_slab_t
34799571dc3SJeff Roberson vtoslab(vm_offset_t va)
34899571dc3SJeff Roberson {
34999571dc3SJeff Roberson 	vm_page_t p;
35099571dc3SJeff Roberson 	uma_slab_t slab;
35199571dc3SJeff Roberson 
35299571dc3SJeff Roberson 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
35399571dc3SJeff Roberson 	slab = (uma_slab_t )p->object;
35499571dc3SJeff Roberson 
35599571dc3SJeff Roberson 	if (p->flags & PG_SLAB)
35699571dc3SJeff Roberson 		return (slab);
35799571dc3SJeff Roberson 	else
35899571dc3SJeff Roberson 		return (NULL);
35999571dc3SJeff Roberson }
36099571dc3SJeff Roberson 
36199571dc3SJeff Roberson static __inline void
36299571dc3SJeff Roberson vsetslab(vm_offset_t va, uma_slab_t slab)
36399571dc3SJeff Roberson {
36499571dc3SJeff Roberson 	vm_page_t p;
36599571dc3SJeff Roberson 
36699571dc3SJeff Roberson 	p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
36799571dc3SJeff Roberson 	p->object = (vm_object_t)slab;
36899571dc3SJeff Roberson 	p->flags |= PG_SLAB;
36999571dc3SJeff Roberson }
37099571dc3SJeff Roberson 
37199571dc3SJeff Roberson static __inline void
37299571dc3SJeff Roberson vsetobj(vm_offset_t va, vm_object_t obj)
37399571dc3SJeff Roberson {
37499571dc3SJeff Roberson 	vm_page_t p;
37599571dc3SJeff Roberson 
37699571dc3SJeff Roberson 	p = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)va));
37799571dc3SJeff Roberson 	p->object = obj;
37899571dc3SJeff Roberson 	p->flags &= ~PG_SLAB;
37999571dc3SJeff Roberson }
3808355f576SJeff Roberson 
3818355f576SJeff Roberson #endif /* VM_UMA_INT_H */
382