xref: /freebsd/sys/vm/uma_int.h (revision 6af83ee0d2941d18880b6aaa2b4facd1d30c6106)
1 /*-
2  * Copyright (c) 2004, 2005,
3  *     Bosko Milekic <bmilekic@freebsd.org>
4  * Copyright (c) 2002, 2003, 2004, 2005,
5  *     Jeffrey Roberson <jeff@freebsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  *
30  */
31 
32 /*
33  * This file includes definitions, structures, prototypes, and inlines that
34  * should not be used outside of the actual implementation of UMA.
35  */
36 
37 /*
38  * Here's a quick description of the relationship between the objects:
39  *
40  * Kegs contain lists of slabs which are stored in either the full bin, empty
41  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
42  * the user supplied value for size, which is adjusted for alignment purposes
43  * and rsize is the result of that.  The Keg also stores information for
44  * managing a hash of page addresses that maps pages to uma_slab_t structures
45  * for pages that don't have embedded uma_slab_t's.
46  *
47  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
48  * be allocated off the page from a special slab zone.  The free list within a
49  * slab is managed with a linked list of indexes, which are 8 bit values.  If
50  * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
51  * values.  Currently on alpha you can get 250 or so 32 byte items and on x86
52  * you can get 250 or so 16byte items.  For item sizes that would yield more
53  * than 10% memory waste we potentially allocate a separate uma_slab_t if this
54  * will improve the number of items per slab that will fit.
55  *
56  * Other potential space optimizations are storing the 8bit of linkage in space
57  * wasted between items due to alignment problems.  This may yield a much better
58  * memory footprint for certain sizes of objects.  Another alternative is to
59  * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes.  I prefer
60  * dynamic slab sizes because we could stick with 8 bit indexes and only use
61  * large slab sizes for zones with a lot of waste per slab.  This may create
62  * ineffeciencies in the vm subsystem due to fragmentation in the address space.
63  *
64  * The only really gross cases, with regards to memory waste, are for those
65  * items that are just over half the page size.   You can get nearly 50% waste,
66  * so you fall back to the memory footprint of the power of two allocator. I
67  * have looked at memory allocation sizes on many of the machines available to
68  * me, and there does not seem to be an abundance of allocations at this range
69  * so at this time it may not make sense to optimize for it.  This can, of
70  * course, be solved with dynamic slab sizes.
71  *
72  * Kegs may serve multiple Zones but by far most of the time they only serve
73  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
74  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
75  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
76  * pair, as well as with its own set of small per-CPU caches, layered above
77  * the Zone's general Bucket cache.
78  *
79  * The PCPU caches are protected by their own locks, while the Zones backed
80  * by the same Keg all share a common Keg lock (to coalesce contention on
81  * the backing slabs).  The backing Keg typically only serves one Zone but
82  * in the case of multiple Zones, one of the Zones is considered the
83  * Master Zone and all Zone-related stats from the Keg are done in the
84  * Master Zone.  For an example of a Multi-Zone setup, refer to the
85  * Mbuf allocation code.
86  */
87 
88 /*
89  *	This is the representation for normal (Non OFFPAGE slab)
90  *
91  *	i == item
92  *	s == slab pointer
93  *
94  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
95  *	___________________________________________________________
96  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
97  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
98  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
99  *     |___________________________________________________________|
100  *
101  *
102  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
103  *
104  *	___________________________________________________________
105  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
106  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
107  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
108  *     |___________________________________________________________|
109  *       ___________    ^
110  *	|slab header|   |
111  *	|___________|---*
112  *
113  */
114 
115 #ifndef VM_UMA_INT_H
116 #define VM_UMA_INT_H
117 
118 #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
119 #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
120 #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
121 
122 #define UMA_BOOT_PAGES		40	/* Pages allocated for startup */
123 
124 /* Max waste before going to off page slab management */
125 #define UMA_MAX_WASTE	(UMA_SLAB_SIZE / 10)
126 
127 /*
128  * I doubt there will be many cases where this is exceeded. This is the initial
129  * size of the hash table for uma_slabs that are managed off page. This hash
130  * does expand by powers of two.  Currently it doesn't get smaller.
131  */
132 #define UMA_HASH_SIZE_INIT	32
133 
134 /*
135  * I should investigate other hashing algorithms.  This should yield a low
136  * number of collisions if the pages are relatively contiguous.
137  *
138  * This is the same algorithm that most processor caches use.
139  *
140  * I'm shifting and masking instead of % because it should be faster.
141  */
142 
143 #define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) &	\
144     (h)->uh_hashmask)
145 
146 #define UMA_HASH_INSERT(h, s, mem)					\
147 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
148 		    (mem))], (s), us_hlink);
149 #define UMA_HASH_REMOVE(h, s, mem)					\
150 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
151 		    (mem))], (s), uma_slab, us_hlink);
152 
153 /* Hash table for freed address -> slab translation */
154 
155 SLIST_HEAD(slabhead, uma_slab);
156 
157 struct uma_hash {
158 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
159 	int		uh_hashsize;	/* Current size of the hash table */
160 	int		uh_hashmask;	/* Mask used during hashing */
161 };
162 
163 /*
164  * Structures for per cpu queues.
165  */
166 
167 struct uma_bucket {
168 	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
169 	int16_t	ub_cnt;				/* Count of free items. */
170 	int16_t	ub_entries;			/* Max items. */
171 	void	*ub_bucket[];			/* actual allocation storage */
172 };
173 
174 typedef struct uma_bucket * uma_bucket_t;
175 
176 struct uma_cache {
177 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
178 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
179 	u_int64_t	uc_allocs;	/* Count of allocations */
180 };
181 
182 typedef struct uma_cache * uma_cache_t;
183 
184 /*
185  * Keg management structure
186  *
187  * TODO: Optimize for cache line size
188  *
189  */
190 struct uma_keg {
191 	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
192 
193 	struct mtx	uk_lock;	/* Lock for the keg */
194 	struct uma_hash	uk_hash;
195 
196 	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
197 	LIST_HEAD(,uma_slab)	uk_part_slab;	/* partially allocated slabs */
198 	LIST_HEAD(,uma_slab)	uk_free_slab;	/* empty slab list */
199 	LIST_HEAD(,uma_slab)	uk_full_slab;	/* full slabs */
200 
201 	u_int32_t	uk_recurse;	/* Allocation recursion count */
202 	u_int32_t	uk_align;	/* Alignment mask */
203 	u_int32_t	uk_pages;	/* Total page count */
204 	u_int32_t	uk_free;	/* Count of items free in slabs */
205 	u_int32_t	uk_size;	/* Requested size of each item */
206 	u_int32_t	uk_rsize;	/* Real size of each item */
207 	u_int32_t	uk_maxpages;	/* Maximum number of pages to alloc */
208 
209 	uma_init	uk_init;	/* Keg's init routine */
210 	uma_fini	uk_fini;	/* Keg's fini routine */
211 	uma_alloc	uk_allocf;	/* Allocation function */
212 	uma_free	uk_freef;	/* Free routine */
213 
214 	struct vm_object	*uk_obj;	/* Zone specific object */
215 	vm_offset_t	uk_kva;		/* Base kva for zones with objs */
216 	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
217 
218 	u_int16_t	uk_pgoff;	/* Offset to uma_slab struct */
219 	u_int16_t	uk_ppera;	/* pages per allocation from backend */
220 	u_int16_t	uk_ipers;	/* Items per slab */
221 	u_int16_t	uk_flags;	/* Internal flags */
222 };
223 
224 /* Simpler reference to uma_keg for internal use. */
225 typedef struct uma_keg * uma_keg_t;
226 
227 /* Page management structure */
228 
229 /* Sorry for the union, but space efficiency is important */
230 struct uma_slab_head {
231 	uma_keg_t	us_keg;			/* Keg we live in */
232 	union {
233 		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
234 		unsigned long	_us_size;	/* Size of allocation */
235 	} us_type;
236 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
237 	u_int8_t	*us_data;		/* First item */
238 	u_int8_t	us_flags;		/* Page flags see uma.h */
239 	u_int8_t	us_freecount;	/* How many are free? */
240 	u_int8_t	us_firstfree;	/* First free item index */
241 };
242 
243 /* The standard slab structure */
244 struct uma_slab {
245 	struct uma_slab_head	us_head;	/* slab header data */
246 	struct {
247 		u_int8_t	us_item;
248 	} us_freelist[1];			/* actual number bigger */
249 };
250 
251 /*
252  * The slab structure for UMA_ZONE_REFCNT zones for whose items we
253  * maintain reference counters in the slab for.
254  */
255 struct uma_slab_refcnt {
256 	struct uma_slab_head	us_head;	/* slab header data */
257 	struct {
258 		u_int8_t	us_item;
259 		u_int32_t	us_refcnt;
260 	} us_freelist[1];			/* actual number bigger */
261 };
262 
263 #define	us_keg		us_head.us_keg
264 #define	us_link		us_head.us_type._us_link
265 #define	us_size		us_head.us_type._us_size
266 #define	us_hlink	us_head.us_hlink
267 #define	us_data		us_head.us_data
268 #define	us_flags	us_head.us_flags
269 #define	us_freecount	us_head.us_freecount
270 #define	us_firstfree	us_head.us_firstfree
271 
272 typedef struct uma_slab * uma_slab_t;
273 typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
274 
275 /*
276  * These give us the size of one free item reference within our corresponding
277  * uma_slab structures, so that our calculations during zone setup are correct
278  * regardless of what the compiler decides to do with padding the structure
279  * arrays within uma_slab.
280  */
281 #define	UMA_FRITM_SZ	(sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
282 #define	UMA_FRITMREF_SZ	(sizeof(struct uma_slab_refcnt) -	\
283     sizeof(struct uma_slab_head))
284 
285 /*
286  * Zone management structure
287  *
288  * TODO: Optimize for cache line size
289  *
290  */
291 struct uma_zone {
292 	char		*uz_name;	/* Text name of the zone */
293 	struct mtx	*uz_lock;	/* Lock for the zone (keg's lock) */
294 	uma_keg_t	uz_keg;		/* Our underlying Keg */
295 
296 	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones in keg */
297 	LIST_HEAD(,uma_bucket)	uz_full_bucket;	/* full buckets */
298 	LIST_HEAD(,uma_bucket)	uz_free_bucket;	/* Buckets for frees */
299 
300 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
301 	uma_dtor	uz_dtor;	/* Destructor */
302 	uma_init	uz_init;	/* Initializer for each item */
303 	uma_fini	uz_fini;	/* Discards memory */
304 
305 	u_int64_t	uz_allocs;	/* Total number of allocations */
306 	uint16_t	uz_fills;	/* Outstanding bucket fills */
307 	uint16_t	uz_count;	/* Highest value ub_ptr can have */
308 
309 	/*
310 	 * This HAS to be the last item because we adjust the zone size
311 	 * based on NCPU and then allocate the space for the zones.
312 	 */
313 	struct uma_cache	uz_cpu[1];	/* Per cpu caches */
314 };
315 
316 /*
317  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
318  */
319 #define UMA_ZFLAG_PRIVALLOC	0x1000		/* Use uz_allocf. */
320 #define UMA_ZFLAG_INTERNAL	0x2000		/* No offpage no PCPU. */
321 #define UMA_ZFLAG_FULL		0x4000		/* Reached uz_maxpages */
322 #define UMA_ZFLAG_CACHEONLY	0x8000		/* Don't ask VM for buckets. */
323 
324 /* Internal prototypes */
325 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
326 void *uma_large_malloc(int size, int wait);
327 void uma_large_free(uma_slab_t slab);
328 
329 /* Lock Macros */
330 
331 #define	ZONE_LOCK_INIT(z, lc)					\
332 	do {							\
333 		if ((lc))					\
334 			mtx_init((z)->uz_lock, (z)->uz_name,	\
335 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
336 		else						\
337 			mtx_init((z)->uz_lock, (z)->uz_name,	\
338 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
339 	} while (0)
340 
341 #define	ZONE_LOCK_FINI(z)	mtx_destroy((z)->uz_lock)
342 #define	ZONE_LOCK(z)	mtx_lock((z)->uz_lock)
343 #define ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lock)
344 
345 #define	CPU_LOCK_INIT(cpu)					\
346 	mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu",	\
347 	    MTX_DEF | MTX_DUPOK)
348 
349 #define CPU_LOCK(cpu)						\
350 	mtx_lock(&uma_pcpu_mtx[(cpu)])
351 
352 #define CPU_UNLOCK(cpu)						\
353 	mtx_unlock(&uma_pcpu_mtx[(cpu)])
354 
355 /*
356  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
357  * the slab structure.
358  *
359  * Arguments:
360  *	hash  The hash table to search.
361  *	data  The base page of the item.
362  *
363  * Returns:
364  *	A pointer to a slab if successful, else NULL.
365  */
366 static __inline uma_slab_t
367 hash_sfind(struct uma_hash *hash, u_int8_t *data)
368 {
369         uma_slab_t slab;
370         int hval;
371 
372         hval = UMA_HASH(hash, data);
373 
374         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
375                 if ((u_int8_t *)slab->us_data == data)
376                         return (slab);
377         }
378         return (NULL);
379 }
380 
381 static __inline uma_slab_t
382 vtoslab(vm_offset_t va)
383 {
384 	vm_page_t p;
385 	uma_slab_t slab;
386 
387 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
388 	slab = (uma_slab_t )p->object;
389 
390 	if (p->flags & PG_SLAB)
391 		return (slab);
392 	else
393 		return (NULL);
394 }
395 
396 static __inline void
397 vsetslab(vm_offset_t va, uma_slab_t slab)
398 {
399 	vm_page_t p;
400 
401 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
402 	p->object = (vm_object_t)slab;
403 	p->flags |= PG_SLAB;
404 }
405 
406 static __inline void
407 vsetobj(vm_offset_t va, vm_object_t obj)
408 {
409 	vm_page_t p;
410 
411 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
412 	p->object = obj;
413 	p->flags &= ~PG_SLAB;
414 }
415 
416 /*
417  * The following two functions may be defined by architecture specific code
418  * if they can provide more effecient allocation functions.  This is useful
419  * for using direct mapped addresses.
420  */
421 void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
422 void uma_small_free(void *mem, int size, u_int8_t flags);
423 
424 #endif /* VM_UMA_INT_H */
425