xref: /freebsd/sys/vm/uma_int.h (revision 324cdd9320f58837c2fbaa7f6ceb9ea5c33d5b2a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  *
31  */
32 
33 #include <sys/counter.h>
34 #include <sys/_bitset.h>
35 #include <sys/_domainset.h>
36 #include <sys/_task.h>
37 
38 /*
39  * This file includes definitions, structures, prototypes, and inlines that
40  * should not be used outside of the actual implementation of UMA.
41  */
42 
43 /*
44  * The brief summary;  Zones describe unique allocation types.  Zones are
45  * organized into per-CPU caches which are filled by buckets.  Buckets are
46  * organized according to memory domains.  Buckets are filled from kegs which
47  * are also organized according to memory domains.  Kegs describe a unique
48  * allocation type, backend memory provider, and layout.  Kegs are associated
49  * with one or more zones and zones reference one or more kegs.  Kegs provide
50  * slabs which are virtually contiguous collections of pages.  Each slab is
51  * broken down int one or more items that will satisfy an individual allocation.
52  *
53  * Allocation is satisfied in the following order:
54  * 1) Per-CPU cache
55  * 2) Per-domain cache of buckets
56  * 3) Slab from any of N kegs
57  * 4) Backend page provider
58  *
59  * More detail on individual objects is contained below:
60  *
61  * Kegs contain lists of slabs which are stored in either the full bin, empty
62  * bin, or partially allocated bin, to reduce fragmentation.  They also contain
63  * the user supplied value for size, which is adjusted for alignment purposes
64  * and rsize is the result of that.  The Keg also stores information for
65  * managing a hash of page addresses that maps pages to uma_slab_t structures
66  * for pages that don't have embedded uma_slab_t's.
67  *
68  * Keg slab lists are organized by memory domain to support NUMA allocation
69  * policies.  By default allocations are spread across domains to reduce the
70  * potential for hotspots.  Special keg creation flags may be specified to
71  * prefer location allocation.  However there is no strict enforcement as frees
72  * may happen on any CPU and these are returned to the CPU-local cache
73  * regardless of the originating domain.
74  *
75  * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
76  * be allocated off the page from a special slab zone.  The free list within a
77  * slab is managed with a bitmask.  For item sizes that would yield more than
78  * 10% memory waste we potentially allocate a separate uma_slab_t if this will
79  * improve the number of items per slab that will fit.
80  *
81  * The only really gross cases, with regards to memory waste, are for those
82  * items that are just over half the page size.   You can get nearly 50% waste,
83  * so you fall back to the memory footprint of the power of two allocator. I
84  * have looked at memory allocation sizes on many of the machines available to
85  * me, and there does not seem to be an abundance of allocations at this range
86  * so at this time it may not make sense to optimize for it.  This can, of
87  * course, be solved with dynamic slab sizes.
88  *
89  * Kegs may serve multiple Zones but by far most of the time they only serve
90  * one.  When a Zone is created, a Keg is allocated and setup for it.  While
91  * the backing Keg stores slabs, the Zone caches Buckets of items allocated
92  * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
93  * pair, as well as with its own set of small per-CPU caches, layered above
94  * the Zone's general Bucket cache.
95  *
96  * The PCPU caches are protected by critical sections, and may be accessed
97  * safely only from their associated CPU, while the Zones backed by the same
98  * Keg all share a common Keg lock (to coalesce contention on the backing
99  * slabs).  The backing Keg typically only serves one Zone but in the case of
100  * multiple Zones, one of the Zones is considered the Master Zone and all
101  * Zone-related stats from the Keg are done in the Master Zone.  For an
102  * example of a Multi-Zone setup, refer to the Mbuf allocation code.
103  */
104 
105 /*
106  *	This is the representation for normal (Non OFFPAGE slab)
107  *
108  *	i == item
109  *	s == slab pointer
110  *
111  *	<----------------  Page (UMA_SLAB_SIZE) ------------------>
112  *	___________________________________________________________
113  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
114  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
115  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
116  *     |___________________________________________________________|
117  *
118  *
119  *	This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
120  *
121  *	___________________________________________________________
122  *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
123  *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
124  *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
125  *     |___________________________________________________________|
126  *       ___________    ^
127  *	|slab header|   |
128  *	|___________|---*
129  *
130  */
131 
132 #ifndef VM_UMA_INT_H
133 #define VM_UMA_INT_H
134 
135 #define UMA_SLAB_SIZE	PAGE_SIZE	/* How big are our slabs? */
136 #define UMA_SLAB_MASK	(PAGE_SIZE - 1)	/* Mask to get back to the page */
137 #define UMA_SLAB_SHIFT	PAGE_SHIFT	/* Number of bits PAGE_MASK */
138 
139 /* Max waste percentage before going to off page slab management */
140 #define UMA_MAX_WASTE	10
141 
142 /*
143  * I doubt there will be many cases where this is exceeded. This is the initial
144  * size of the hash table for uma_slabs that are managed off page. This hash
145  * does expand by powers of two.  Currently it doesn't get smaller.
146  */
147 #define UMA_HASH_SIZE_INIT	32
148 
149 /*
150  * I should investigate other hashing algorithms.  This should yield a low
151  * number of collisions if the pages are relatively contiguous.
152  */
153 
154 #define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
155 
156 #define UMA_HASH_INSERT(h, s, mem)					\
157 		SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),	\
158 		    (mem))], (s), us_hlink)
159 #define UMA_HASH_REMOVE(h, s, mem)					\
160 		SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),		\
161 		    (mem))], (s), uma_slab, us_hlink)
162 
163 /* Hash table for freed address -> slab translation */
164 
165 SLIST_HEAD(slabhead, uma_slab);
166 
167 struct uma_hash {
168 	struct slabhead	*uh_slab_hash;	/* Hash table for slabs */
169 	u_int		uh_hashsize;	/* Current size of the hash table */
170 	u_int		uh_hashmask;	/* Mask used during hashing */
171 };
172 
173 /*
174  * align field or structure to cache line
175  */
176 #if defined(__amd64__) || defined(__powerpc64__)
177 #define UMA_ALIGN	__aligned(128)
178 #else
179 #define UMA_ALIGN
180 #endif
181 
182 /*
183  * Structures for per cpu queues.
184  */
185 
186 struct uma_bucket {
187 	TAILQ_ENTRY(uma_bucket)	ub_link;	/* Link into the zone */
188 	int16_t	ub_cnt;				/* Count of items in bucket. */
189 	int16_t	ub_entries;			/* Max items. */
190 	void	*ub_bucket[];			/* actual allocation storage */
191 };
192 
193 typedef struct uma_bucket * uma_bucket_t;
194 
195 struct uma_cache {
196 	uma_bucket_t	uc_freebucket;	/* Bucket we're freeing to */
197 	uma_bucket_t	uc_allocbucket;	/* Bucket to allocate from */
198 	uma_bucket_t	uc_crossbucket;	/* cross domain bucket */
199 	uint64_t	uc_allocs;	/* Count of allocations */
200 	uint64_t	uc_frees;	/* Count of frees */
201 } UMA_ALIGN;
202 
203 typedef struct uma_cache * uma_cache_t;
204 
205 /*
206  * Per-domain memory list.  Embedded in the kegs.
207  */
208 struct uma_domain {
209 	LIST_HEAD(,uma_slab)	ud_part_slab;	/* partially allocated slabs */
210 	LIST_HEAD(,uma_slab)	ud_free_slab;	/* empty slab list */
211 	LIST_HEAD(,uma_slab)	ud_full_slab;	/* full slabs */
212 };
213 
214 typedef struct uma_domain * uma_domain_t;
215 
216 /*
217  * Keg management structure
218  *
219  * TODO: Optimize for cache line size
220  *
221  */
222 struct uma_keg {
223 	struct mtx	uk_lock;	/* Lock for the keg must be first.
224 					 * See shared uz_keg/uz_lockptr
225 					 * member of struct uma_zone. */
226 	struct uma_hash	uk_hash;
227 	LIST_HEAD(,uma_zone)	uk_zones;	/* Keg's zones */
228 
229 	struct domainset_ref uk_dr;	/* Domain selection policy. */
230 	uint32_t	uk_align;	/* Alignment mask */
231 	uint32_t	uk_pages;	/* Total page count */
232 	uint32_t	uk_free;	/* Count of items free in slabs */
233 	uint32_t	uk_reserve;	/* Number of reserved items. */
234 	uint32_t	uk_size;	/* Requested size of each item */
235 	uint32_t	uk_rsize;	/* Real size of each item */
236 
237 	uma_init	uk_init;	/* Keg's init routine */
238 	uma_fini	uk_fini;	/* Keg's fini routine */
239 	uma_alloc	uk_allocf;	/* Allocation function */
240 	uma_free	uk_freef;	/* Free routine */
241 
242 	u_long		uk_offset;	/* Next free offset from base KVA */
243 	vm_offset_t	uk_kva;		/* Zone base KVA */
244 	uma_zone_t	uk_slabzone;	/* Slab zone backing us, if OFFPAGE */
245 
246 	uint32_t	uk_pgoff;	/* Offset to uma_slab struct */
247 	uint16_t	uk_ppera;	/* pages per allocation from backend */
248 	uint16_t	uk_ipers;	/* Items per slab */
249 	uint32_t	uk_flags;	/* Internal flags */
250 
251 	/* Least used fields go to the last cache line. */
252 	const char	*uk_name;		/* Name of creating zone. */
253 	LIST_ENTRY(uma_keg)	uk_link;	/* List of all kegs */
254 
255 	/* Must be last, variable sized. */
256 	struct uma_domain	uk_domain[];	/* Keg's slab lists. */
257 };
258 typedef struct uma_keg	* uma_keg_t;
259 
260 /*
261  * Free bits per-slab.
262  */
263 #define	SLAB_MAX_SETSIZE	(PAGE_SIZE / UMA_SMALLEST_UNIT)
264 #define	SLAB_MIN_SETSIZE	_BITSET_BITS
265 BITSET_DEFINE(slabbits, SLAB_MAX_SETSIZE);
266 BITSET_DEFINE(noslabbits, 0);
267 
268 /*
269  * The slab structure manages a single contiguous allocation from backing
270  * store and subdivides it into individually allocatable items.
271  */
272 struct uma_slab {
273 	LIST_ENTRY(uma_slab)	us_link;	/* slabs in zone */
274 	SLIST_ENTRY(uma_slab)	us_hlink;	/* Link for hash table */
275 	uint8_t		*us_data;		/* First item */
276 	uint16_t	us_freecount;		/* How many are free? */
277 	uint8_t		us_flags;		/* Page flags see uma.h */
278 	uint8_t		us_domain;		/* Backing NUMA domain. */
279 #ifdef INVARIANTS
280 	struct slabbits	us_debugfree;		/* Debug bitmask. */
281 #endif
282 	struct noslabbits us_free;		/* Free bitmask. */
283 };
284 
285 #if MAXMEMDOM >= 255
286 #error "Slab domain type insufficient"
287 #endif
288 
289 typedef struct uma_slab * uma_slab_t;
290 
291 /* These three functions are for embedded (!OFFPAGE) use only. */
292 size_t slab_sizeof(int nitems);
293 size_t slab_space(int nitems);
294 int slab_ipers(size_t size, int align);
295 
296 TAILQ_HEAD(uma_bucketlist, uma_bucket);
297 
298 struct uma_zone_domain {
299 	struct uma_bucketlist uzd_buckets; /* full buckets */
300 	long		uzd_nitems;	/* total item count */
301 	long		uzd_imax;	/* maximum item count this period */
302 	long		uzd_imin;	/* minimum item count this period */
303 	long		uzd_wss;	/* working set size estimate */
304 };
305 
306 typedef struct uma_zone_domain * uma_zone_domain_t;
307 
308 /*
309  * Zone management structure
310  *
311  * TODO: Optimize for cache line size
312  *
313  */
314 struct uma_zone {
315 	/* Offset 0, used in alloc/free fast/medium fast path and const. */
316 	union {
317 		uma_keg_t	uz_keg;		/* This zone's keg */
318 		struct mtx 	*uz_lockptr;	/* To keg or to self */
319 	};
320 	struct uma_zone_domain	*uz_domain;	/* per-domain buckets */
321 	uint32_t	uz_flags;	/* Flags inherited from kegs */
322 	uint32_t	uz_size;	/* Size inherited from kegs */
323 	uma_ctor	uz_ctor;	/* Constructor for each allocation */
324 	uma_dtor	uz_dtor;	/* Destructor */
325 	uint64_t	uz_items;	/* Total items count */
326 	uint64_t	uz_max_items;	/* Maximum number of items to alloc */
327 	uint32_t	uz_sleepers;	/* Number of sleepers on memory */
328 	uint16_t	uz_bucket_size;	/* Number of items in full bucket */
329 	uint16_t	uz_bucket_size_max; /* Maximum number of bucket items */
330 
331 	/* Offset 64, used in bucket replenish. */
332 	uma_import	uz_import;	/* Import new memory to cache. */
333 	uma_release	uz_release;	/* Release memory from cache. */
334 	void		*uz_arg;	/* Import/release argument. */
335 	uma_init	uz_init;	/* Initializer for each item */
336 	uma_fini	uz_fini;	/* Finalizer for each item. */
337 	void		*uz_spare;
338 	uint64_t	uz_bkt_count;    /* Items in bucket cache */
339 	uint64_t	uz_bkt_max;	/* Maximum bucket cache size */
340 
341 	/* Offset 128 Rare. */
342 	/*
343 	 * The lock is placed here to avoid adjacent line prefetcher
344 	 * in fast paths and to take up space near infrequently accessed
345 	 * members to reduce alignment overhead.
346 	 */
347 	struct mtx	uz_lock;	/* Lock for the zone */
348 	LIST_ENTRY(uma_zone) uz_link;	/* List of all zones in keg */
349 	const char	*uz_name;	/* Text name of the zone */
350 	/* The next two fields are used to print a rate-limited warnings. */
351 	const char	*uz_warning;	/* Warning to print on failure */
352 	struct timeval	uz_ratecheck;	/* Warnings rate-limiting */
353 	struct task	uz_maxaction;	/* Task to run when at limit */
354 	uint16_t	uz_bucket_size_min; /* Min number of items in bucket */
355 
356 	/* Offset 256+, stats and misc. */
357 	counter_u64_t	uz_allocs;	/* Total number of allocations */
358 	counter_u64_t	uz_frees;	/* Total number of frees */
359 	counter_u64_t	uz_fails;	/* Total number of alloc failures */
360 	uint64_t	uz_sleeps;	/* Total number of alloc sleeps */
361 	uint64_t	uz_xdomain;	/* Total number of cross-domain frees */
362 	char		*uz_ctlname;	/* sysctl safe name string. */
363 	struct sysctl_oid *uz_oid;	/* sysctl oid pointer. */
364 	int		uz_namecnt;	/* duplicate name count. */
365 
366 	/*
367 	 * This HAS to be the last item because we adjust the zone size
368 	 * based on NCPU and then allocate the space for the zones.
369 	 */
370 	struct uma_cache	uz_cpu[]; /* Per cpu caches */
371 
372 	/* uz_domain follows here. */
373 };
374 
375 /*
376  * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
377  */
378 #define	UMA_ZFLAG_CACHE		0x04000000	/* uma_zcache_create()d it */
379 #define	UMA_ZFLAG_RECLAIMING	0x08000000	/* Running zone_reclaim(). */
380 #define	UMA_ZFLAG_BUCKET	0x10000000	/* Bucket zone. */
381 #define UMA_ZFLAG_INTERNAL	0x20000000	/* No offpage no PCPU. */
382 #define UMA_ZFLAG_TRASH		0x40000000	/* Add trash ctor/dtor. */
383 #define UMA_ZFLAG_CACHEONLY	0x80000000	/* Don't ask VM for buckets. */
384 
385 #define	UMA_ZFLAG_INHERIT						\
386     (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
387 
388 #undef UMA_ALIGN
389 
390 #ifdef _KERNEL
391 /* Internal prototypes */
392 static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
393 
394 /* Lock Macros */
395 
396 #define	KEG_LOCK_INIT(k, lc)					\
397 	do {							\
398 		if ((lc))					\
399 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
400 			    (k)->uk_name, MTX_DEF | MTX_DUPOK);	\
401 		else						\
402 			mtx_init(&(k)->uk_lock, (k)->uk_name,	\
403 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
404 	} while (0)
405 
406 #define	KEG_LOCK_FINI(k)	mtx_destroy(&(k)->uk_lock)
407 #define	KEG_LOCK(k)	mtx_lock(&(k)->uk_lock)
408 #define	KEG_UNLOCK(k)	mtx_unlock(&(k)->uk_lock)
409 #define	KEG_LOCK_ASSERT(k)	mtx_assert(&(k)->uk_lock, MA_OWNED)
410 
411 #define	KEG_GET(zone, keg) do {					\
412 	(keg) = (zone)->uz_keg;					\
413 	KASSERT((void *)(keg) != (void *)&(zone)->uz_lock,	\
414 	    ("%s: Invalid zone %p type", __func__, (zone)));	\
415 	} while (0)
416 
417 #define	ZONE_LOCK_INIT(z, lc)					\
418 	do {							\
419 		if ((lc))					\
420 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
421 			    (z)->uz_name, MTX_DEF | MTX_DUPOK);	\
422 		else						\
423 			mtx_init(&(z)->uz_lock, (z)->uz_name,	\
424 			    "UMA zone", MTX_DEF | MTX_DUPOK);	\
425 	} while (0)
426 
427 #define	ZONE_LOCK(z)	mtx_lock((z)->uz_lockptr)
428 #define	ZONE_TRYLOCK(z)	mtx_trylock((z)->uz_lockptr)
429 #define	ZONE_UNLOCK(z)	mtx_unlock((z)->uz_lockptr)
430 #define	ZONE_LOCK_FINI(z)	mtx_destroy(&(z)->uz_lock)
431 #define	ZONE_LOCK_ASSERT(z)	mtx_assert((z)->uz_lockptr, MA_OWNED)
432 
433 /*
434  * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
435  * the slab structure.
436  *
437  * Arguments:
438  *	hash  The hash table to search.
439  *	data  The base page of the item.
440  *
441  * Returns:
442  *	A pointer to a slab if successful, else NULL.
443  */
444 static __inline uma_slab_t
445 hash_sfind(struct uma_hash *hash, uint8_t *data)
446 {
447         uma_slab_t slab;
448         u_int hval;
449 
450         hval = UMA_HASH(hash, data);
451 
452         SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
453                 if ((uint8_t *)slab->us_data == data)
454                         return (slab);
455         }
456         return (NULL);
457 }
458 
459 static __inline uma_slab_t
460 vtoslab(vm_offset_t va)
461 {
462 	vm_page_t p;
463 
464 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
465 	return (p->plinks.uma.slab);
466 }
467 
468 static __inline void
469 vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
470 {
471 	vm_page_t p;
472 
473 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
474 	*slab = p->plinks.uma.slab;
475 	*zone = p->plinks.uma.zone;
476 }
477 
478 static __inline void
479 vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
480 {
481 	vm_page_t p;
482 
483 	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
484 	p->plinks.uma.slab = slab;
485 	p->plinks.uma.zone = zone;
486 }
487 
488 extern unsigned long uma_kmem_limit;
489 extern unsigned long uma_kmem_total;
490 
491 /* Adjust bytes under management by UMA. */
492 static inline void
493 uma_total_dec(unsigned long size)
494 {
495 
496 	atomic_subtract_long(&uma_kmem_total, size);
497 }
498 
499 static inline void
500 uma_total_inc(unsigned long size)
501 {
502 
503 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
504 		uma_reclaim_wakeup();
505 }
506 
507 /*
508  * The following two functions may be defined by architecture specific code
509  * if they can provide more efficient allocation functions.  This is useful
510  * for using direct mapped addresses.
511  */
512 void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
513     uint8_t *pflag, int wait);
514 void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
515 
516 /* Set a global soft limit on UMA managed memory. */
517 void uma_set_limit(unsigned long limit);
518 #endif /* _KERNEL */
519 
520 #endif /* VM_UMA_INT_H */
521