xref: /freebsd/sys/vm/uma_int.h (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  *
31  */
32 
33 #include <sys/_bitset.h>
34 #include <sys/_task.h>
35 
36 /*
37  * This file includes definitions, structures, prototypes, and inlines that
38  * should not be used outside of the actual implementation of UMA.
39  */
40 
41 /*
42  * Here's a quick description of the relationship between the objects:
43  *
44  * Kegs contain lists of slabs which are stored in either the full bin, empty
45  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
46  * the user supplied value for size, which is adjusted for alignment purposes
47  * and rsize is the result of that.  The Keg also stores information for
48  * managing a hash of page addresses that maps pages to uma_slab_t structures
49  * for pages that don't have embedded uma_slab_t's.
50  *
51  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
52  * be allocated off the page from a special slab zone.  The free list within a
53  * slab is managed with a bitmask.  For item sizes that would yield more than
54  * 10% memory waste we potentially allocate a separate uma_slab_t if this will
55  * improve the number of items per slab that will fit.
56  *
57  * The only really gross cases, with regards to memory waste, are for those
58  * items that are just over half the page size.   You can get nearly 50% waste,
59  * so you fall back to the memory footprint of the power of two allocator. I
60  * have looked at memory allocation sizes on many of the machines available to
61  * me, and there does not seem to be an abundance of allocations at this range
62  * so at this time it may not make sense to optimize for it.  This can, of
63  * course, be solved with dynamic slab sizes.
64  *
65  * Kegs may serve multiple Zones but by far most of the time they only serve
66  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
67  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
68  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
69  * pair, as well as with its own set of small per-CPU caches, layered above
70  * the Zone's general Bucket cache.
71  *
72  * The PCPU caches are protected by critical sections, and may be accessed
73  * safely only from their associated CPU, while the Zones backed by the same
74  * Keg all share a common Keg lock (to coalesce contention on the backing
75  * slabs).  The backing Keg typically only serves one Zone but in the case of
76  * multiple Zones, one of the Zones is considered the Master Zone and all
77  * Zone-related stats from the Keg are done in the Master Zone.  For an
78  * example of a Multi-Zone setup, refer to the Mbuf allocation code.
79  */
80 
81 /*
82  *	This is the representation for normal (Non OFFPAGE slab)
83  *
84  *	i == item
85  *	s == slab pointer
86  *
87  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
88  *	___________________________________________________________
89  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
90  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
91  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
92  *     |___________________________________________________________|
93  *
94  *
95  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
96  *
97  *	___________________________________________________________
98  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
99  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
100  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
101  *     |___________________________________________________________|
102  *       ___________    ^
103  *	|slab header|   |
104  *	|___________|---*
105  *
106  */
107 
108 #ifndef VM_UMA_INT_H
109 #define VM_UMA_INT_H
110 
111 #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
112 #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
113 #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
114 
115 #define UMA_BOOT_PAGES		64	/* Pages allocated for startup */
116 #define UMA_BOOT_PAGES_ZONES	32	/* Multiplier for pages to reserve */
117 					/* if uma_zone > PAGE_SIZE */
118 
119 /* Max waste percentage before going to off page slab management */
120 #define UMA_MAX_WASTE	10
121 
122 /*
123  * I doubt there will be many cases where this is exceeded. This is the initial
124  * size of the hash table for uma_slabs that are managed off page. This hash
125  * does expand by powers of two.  Currently it doesn't get smaller.
126  */
127 #define UMA_HASH_SIZE_INIT	32
128 
129 /*
130  * I should investigate other hashing algorithms.  This should yield a low
131  * number of collisions if the pages are relatively contiguous.
132  */
133 
134 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
135 
136 #define UMA_HASH_INSERT(h, s, mem)					\
137 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
138 		    (mem))], (s), us_hlink)
139 #define UMA_HASH_REMOVE(h, s, mem)					\
140 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
141 		    (mem))], (s), uma_slab, us_hlink)
142 
143 /* Hash table for freed address -> slab translation */
144 
145 SLIST_HEAD(slabhead, uma_slab);
146 
147 struct uma_hash {
148 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
149 	int		uh_hashsize;	/* Current size of the hash table */
150 	int		uh_hashmask;	/* Mask used during hashing */
151 };
152 
153 /*
154  * align field or structure to cache line
155  */
156 #if defined(__amd64__)
157 #define UMA_ALIGN	__aligned(CACHE_LINE_SIZE)
158 #else
159 #define UMA_ALIGN
160 #endif
161 
162 /*
163  * Structures for per cpu queues.
164  */
165 
166 struct uma_bucket {
167 	LIST_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
168 	int16_t	ub_cnt;				/* Count of free items. */
169 	int16_t	ub_entries;			/* Max items. */
170 	void	*ub_bucket[];			/* actual allocation storage */
171 };
172 
173 typedef struct uma_bucket * uma_bucket_t;
174 
175 struct uma_cache {
176 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
177 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
178 	uint64_t	uc_allocs;	/* Count of allocations */
179 	uint64_t	uc_frees;	/* Count of frees */
180 } UMA_ALIGN;
181 
182 typedef struct uma_cache * uma_cache_t;
183 
184 /*
185  * Keg management structure
186  *
187  * TODO: Optimize for cache line size
188  *
189  */
190 struct uma_keg {
191 	struct mtx_padalign	uk_lock;	/* Lock for the keg */
192 	struct uma_hash	uk_hash;
193 
194 	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
195 	LIST_HEAD(,uma_slab)	uk_part_slab;	/* partially allocated slabs */
196 	LIST_HEAD(,uma_slab)	uk_free_slab;	/* empty slab list */
197 	LIST_HEAD(,uma_slab)	uk_full_slab;	/* full slabs */
198 
199 	uint32_t	uk_align;	/* Alignment mask */
200 	uint32_t	uk_pages;	/* Total page count */
201 	uint32_t	uk_free;	/* Count of items free in slabs */
202 	uint32_t	uk_reserve;	/* Number of reserved items. */
203 	uint32_t	uk_size;	/* Requested size of each item */
204 	uint32_t	uk_rsize;	/* Real size of each item */
205 	uint32_t	uk_maxpages;	/* Maximum number of pages to alloc */
206 
207 	uma_init	uk_init;	/* Keg's init routine */
208 	uma_fini	uk_fini;	/* Keg's fini routine */
209 	uma_alloc	uk_allocf;	/* Allocation function */
210 	uma_free	uk_freef;	/* Free routine */
211 
212 	u_long		uk_offset;	/* Next free offset from base KVA */
213 	vm_offset_t	uk_kva;		/* Zone base KVA */
214 	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
215 
216 	uint32_t	uk_pgoff;	/* Offset to uma_slab struct */
217 	uint16_t	uk_ppera;	/* pages per allocation from backend */
218 	uint16_t	uk_ipers;	/* Items per slab */
219 	uint32_t	uk_flags;	/* Internal flags */
220 
221 	/* Least used fields go to the last cache line. */
222 	const char	*uk_name;		/* Name of creating zone. */
223 	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
224 };
225 typedef struct uma_keg	* uma_keg_t;
226 
227 /*
228  * Free bits per-slab.
229  */
230 #define	SLAB_SETSIZE	(PAGE_SIZE / UMA_SMALLEST_UNIT)
231 BITSET_DEFINE(slabbits, SLAB_SETSIZE);
232 
233 /*
234  * The slab structure manages a single contiguous allocation from backing
235  * store and subdivides it into individually allocatable items.
236  */
237 struct uma_slab {
238 	uma_keg_t	us_keg;			/* Keg we live in */
239 	union {
240 		LIST_ENTRY(uma_slab)	_us_link;	/* slabs in zone */
241 		unsigned long	_us_size;	/* Size of allocation */
242 	} us_type;
243 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
244 	uint8_t		*us_data;		/* First item */
245 	struct slabbits	us_free;		/* Free bitmask. */
246 #ifdef INVARIANTS
247 	struct slabbits	us_debugfree;		/* Debug bitmask. */
248 #endif
249 	uint16_t	us_freecount;		/* How many are free? */
250 	uint8_t		us_flags;		/* Page flags see uma.h */
251 	uint8_t		us_pad;			/* Pad to 32bits, unused. */
252 };
253 
254 #define	us_link	us_type._us_link
255 #define	us_size	us_type._us_size
256 
257 typedef struct uma_slab * uma_slab_t;
258 typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
259 
260 struct uma_klink {
261 	LIST_ENTRY(uma_klink)	kl_link;
262 	uma_keg_t		kl_keg;
263 };
264 typedef struct uma_klink *uma_klink_t;
265 
266 /*
267  * Zone management structure
268  *
269  * TODO: Optimize for cache line size
270  *
271  */
272 struct uma_zone {
273 	struct mtx_padalign	uz_lock;	/* Lock for the zone */
274 	struct mtx_padalign	*uz_lockptr;
275 	const char		*uz_name;	/* Text name of the zone */
276 
277 	LIST_ENTRY(uma_zone)	uz_link;	/* List of all zones in keg */
278 	LIST_HEAD(,uma_bucket)	uz_buckets;	/* full buckets */
279 
280 	LIST_HEAD(,uma_klink)	uz_kegs;	/* List of kegs. */
281 	struct uma_klink	uz_klink;	/* klink for first keg. */
282 
283 	uma_slaballoc	uz_slab;	/* Allocate a slab from the backend. */
284 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
285 	uma_dtor	uz_dtor;	/* Destructor */
286 	uma_init	uz_init;	/* Initializer for each item */
287 	uma_fini	uz_fini;	/* Finalizer for each item. */
288 	uma_import	uz_import;	/* Import new memory to cache. */
289 	uma_release	uz_release;	/* Release memory from cache. */
290 	void		*uz_arg;	/* Import/release argument. */
291 
292 	uint32_t	uz_flags;	/* Flags inherited from kegs */
293 	uint32_t	uz_size;	/* Size inherited from kegs */
294 
295 	volatile u_long	uz_allocs UMA_ALIGN; /* Total number of allocations */
296 	volatile u_long	uz_fails;	/* Total number of alloc failures */
297 	volatile u_long	uz_frees;	/* Total number of frees */
298 	uint64_t	uz_sleeps;	/* Total number of alloc sleeps */
299 	uint16_t	uz_count;	/* Amount of items in full bucket */
300 	uint16_t	uz_count_min;	/* Minimal amount of items there */
301 
302 	/* The next two fields are used to print a rate-limited warnings. */
303 	const char	*uz_warning;	/* Warning to print on failure */
304 	struct timeval	uz_ratecheck;	/* Warnings rate-limiting */
305 
306 	struct task	uz_maxaction;	/* Task to run when at limit */
307 
308 	/*
309 	 * This HAS to be the last item because we adjust the zone size
310 	 * based on NCPU and then allocate the space for the zones.
311 	 */
312 	struct uma_cache	uz_cpu[1]; /* Per cpu caches */
313 };
314 
315 /*
316  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
317  */
318 #define	UMA_ZFLAG_MULTI		0x04000000	/* Multiple kegs in the zone. */
319 #define	UMA_ZFLAG_DRAINING	0x08000000	/* Running zone_drain. */
320 #define	UMA_ZFLAG_BUCKET	0x10000000	/* Bucket zone. */
321 #define UMA_ZFLAG_INTERNAL	0x20000000	/* No offpage no PCPU. */
322 #define UMA_ZFLAG_FULL		0x40000000	/* Reached uz_maxpages */
323 #define UMA_ZFLAG_CACHEONLY	0x80000000	/* Don't ask VM for buckets. */
324 
325 #define	UMA_ZFLAG_INHERIT						\
326     (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
327 
328 static inline uma_keg_t
329 zone_first_keg(uma_zone_t zone)
330 {
331 	uma_klink_t klink;
332 
333 	klink = LIST_FIRST(&zone->uz_kegs);
334 	return (klink != NULL) ? klink->kl_keg : NULL;
335 }
336 
337 #undef UMA_ALIGN
338 
339 #ifdef _KERNEL
340 /* Internal prototypes */
341 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
342 void *uma_large_malloc(vm_size_t size, int wait);
343 void uma_large_free(uma_slab_t slab);
344 
345 /* Lock Macros */
346 
347 #define	KEG_LOCK_INIT(k, lc)					\
348 	do {							\
349 		if ((lc))					\
350 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
351 			    (k)->uk_name, MTX_DEF | MTX_DUPOK);	\
352 		else						\
353 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
354 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
355 	} while (0)
356 
357 #define	KEG_LOCK_FINI(k)	mtx_destroy(&(k)->uk_lock)
358 #define	KEG_LOCK(k)	mtx_lock(&(k)->uk_lock)
359 #define	KEG_UNLOCK(k)	mtx_unlock(&(k)->uk_lock)
360 
361 #define	ZONE_LOCK_INIT(z, lc)					\
362 	do {							\
363 		if ((lc))					\
364 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
365 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
366 		else						\
367 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
368 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
369 	} while (0)
370 
371 #define	ZONE_LOCK(z)	mtx_lock((z)->uz_lockptr)
372 #define	ZONE_TRYLOCK(z)	mtx_trylock((z)->uz_lockptr)
373 #define	ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lockptr)
374 #define	ZONE_LOCK_FINI(z)	mtx_destroy(&(z)->uz_lock)
375 
376 /*
377  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
378  * the slab structure.
379  *
380  * Arguments:
381  *	hash  The hash table to search.
382  *	data  The base page of the item.
383  *
384  * Returns:
385  *	A pointer to a slab if successful, else NULL.
386  */
387 static __inline uma_slab_t
388 hash_sfind(struct uma_hash *hash, uint8_t *data)
389 {
390         uma_slab_t slab;
391         int hval;
392 
393         hval = UMA_HASH(hash, data);
394 
395         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
396                 if ((uint8_t *)slab->us_data == data)
397                         return (slab);
398         }
399         return (NULL);
400 }
401 
402 static __inline uma_slab_t
403 vtoslab(vm_offset_t va)
404 {
405 	vm_page_t p;
406 
407 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
408 	return ((uma_slab_t)p->plinks.s.pv);
409 }
410 
411 static __inline void
412 vsetslab(vm_offset_t va, uma_slab_t slab)
413 {
414 	vm_page_t p;
415 
416 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
417 	p->plinks.s.pv = slab;
418 }
419 
420 /*
421  * The following two functions may be defined by architecture specific code
422  * if they can provide more efficient allocation functions.  This is useful
423  * for using direct mapped addresses.
424  */
425 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
426     int wait);
427 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
428 #endif /* _KERNEL */
429 
430 #endif /* VM_UMA_INT_H */
431