xref: /freebsd/sys/vm/uma_core.c (revision 5eb61f6c6549f134a4f3bed4c164345d4f616bad)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/asan.h>
62 #include <sys/bitset.h>
63 #include <sys/domainset.h>
64 #include <sys/eventhandler.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/limits.h>
68 #include <sys/queue.h>
69 #include <sys/malloc.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/sysctl.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/random.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/sched.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/smp.h>
81 #include <sys/smr.h>
82 #include <sys/taskqueue.h>
83 #include <sys/vmmeter.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_domainset.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_phys.h>
92 #include <vm/vm_pagequeue.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_dumpset.h>
97 #include <vm/uma.h>
98 #include <vm/uma_int.h>
99 #include <vm/uma_dbg.h>
100 
101 #include <ddb/ddb.h>
102 
103 #ifdef DEBUG_MEMGUARD
104 #include <vm/memguard.h>
105 #endif
106 
107 #include <machine/md_var.h>
108 
109 #ifdef INVARIANTS
110 #define	UMA_ALWAYS_CTORDTOR	1
111 #else
112 #define	UMA_ALWAYS_CTORDTOR	0
113 #endif
114 
115 /*
116  * This is the zone and keg from which all zones are spawned.
117  */
118 static uma_zone_t kegs;
119 static uma_zone_t zones;
120 
121 /*
122  * On INVARIANTS builds, the slab contains a second bitset of the same size,
123  * "dbg_bits", which is laid out immediately after us_free.
124  */
125 #ifdef INVARIANTS
126 #define	SLAB_BITSETS	2
127 #else
128 #define	SLAB_BITSETS	1
129 #endif
130 
131 /*
132  * These are the two zones from which all offpage uma_slab_ts are allocated.
133  *
134  * One zone is for slab headers that can represent a larger number of items,
135  * making the slabs themselves more efficient, and the other zone is for
136  * headers that are smaller and represent fewer items, making the headers more
137  * efficient.
138  */
139 #define	SLABZONE_SIZE(setsize)					\
140     (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS)
141 #define	SLABZONE0_SETSIZE	(PAGE_SIZE / 16)
142 #define	SLABZONE1_SETSIZE	SLAB_MAX_SETSIZE
143 #define	SLABZONE0_SIZE	SLABZONE_SIZE(SLABZONE0_SETSIZE)
144 #define	SLABZONE1_SIZE	SLABZONE_SIZE(SLABZONE1_SETSIZE)
145 static uma_zone_t slabzones[2];
146 
147 /*
148  * The initial hash tables come out of this zone so they can be allocated
149  * prior to malloc coming up.
150  */
151 static uma_zone_t hashzone;
152 
153 /* The boot-time adjusted value for cache line alignment. */
154 int uma_align_cache = 64 - 1;
155 
156 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
157 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
158 
159 /*
160  * Are we allowed to allocate buckets?
161  */
162 static int bucketdisable = 1;
163 
164 /* Linked list of all kegs in the system */
165 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
166 
167 /* Linked list of all cache-only zones in the system */
168 static LIST_HEAD(,uma_zone) uma_cachezones =
169     LIST_HEAD_INITIALIZER(uma_cachezones);
170 
171 /*
172  * Mutex for global lists: uma_kegs, uma_cachezones, and the per-keg list of
173  * zones.
174  */
175 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
176 
177 static struct sx uma_reclaim_lock;
178 
179 /*
180  * First available virual address for boot time allocations.
181  */
182 static vm_offset_t bootstart;
183 static vm_offset_t bootmem;
184 
185 /*
186  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
187  * allocations don't trigger a wakeup of the reclaim thread.
188  */
189 unsigned long uma_kmem_limit = LONG_MAX;
190 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
191     "UMA kernel memory soft limit");
192 unsigned long uma_kmem_total;
193 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
194     "UMA kernel memory usage");
195 
196 /* Is the VM done starting up? */
197 static enum {
198 	BOOT_COLD,
199 	BOOT_KVA,
200 	BOOT_PCPU,
201 	BOOT_RUNNING,
202 	BOOT_SHUTDOWN,
203 } booted = BOOT_COLD;
204 
205 /*
206  * This is the handle used to schedule events that need to happen
207  * outside of the allocation fast path.
208  */
209 static struct callout uma_callout;
210 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
211 
212 /*
213  * This structure is passed as the zone ctor arg so that I don't have to create
214  * a special allocation function just for zones.
215  */
216 struct uma_zctor_args {
217 	const char *name;
218 	size_t size;
219 	uma_ctor ctor;
220 	uma_dtor dtor;
221 	uma_init uminit;
222 	uma_fini fini;
223 	uma_import import;
224 	uma_release release;
225 	void *arg;
226 	uma_keg_t keg;
227 	int align;
228 	uint32_t flags;
229 };
230 
231 struct uma_kctor_args {
232 	uma_zone_t zone;
233 	size_t size;
234 	uma_init uminit;
235 	uma_fini fini;
236 	int align;
237 	uint32_t flags;
238 };
239 
240 struct uma_bucket_zone {
241 	uma_zone_t	ubz_zone;
242 	const char	*ubz_name;
243 	int		ubz_entries;	/* Number of items it can hold. */
244 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
245 };
246 
247 /*
248  * Compute the actual number of bucket entries to pack them in power
249  * of two sizes for more efficient space utilization.
250  */
251 #define	BUCKET_SIZE(n)						\
252     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
253 
254 #define	BUCKET_MAX	BUCKET_SIZE(256)
255 
256 struct uma_bucket_zone bucket_zones[] = {
257 	/* Literal bucket sizes. */
258 	{ NULL, "2 Bucket", 2, 4096 },
259 	{ NULL, "4 Bucket", 4, 3072 },
260 	{ NULL, "8 Bucket", 8, 2048 },
261 	{ NULL, "16 Bucket", 16, 1024 },
262 	/* Rounded down power of 2 sizes for efficiency. */
263 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
264 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
265 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
266 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
267 	{ NULL, NULL, 0}
268 };
269 
270 /*
271  * Flags and enumerations to be passed to internal functions.
272  */
273 enum zfreeskip {
274 	SKIP_NONE =	0,
275 	SKIP_CNT =	0x00000001,
276 	SKIP_DTOR =	0x00010000,
277 	SKIP_FINI =	0x00020000,
278 };
279 
280 /* Prototypes.. */
281 
282 void	uma_startup1(vm_offset_t);
283 void	uma_startup2(void);
284 
285 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
286 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
287 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
288 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
289 static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
290 static void page_free(void *, vm_size_t, uint8_t);
291 static void pcpu_page_free(void *, vm_size_t, uint8_t);
292 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
293 static void cache_drain(uma_zone_t);
294 static void bucket_drain(uma_zone_t, uma_bucket_t);
295 static void bucket_cache_reclaim(uma_zone_t zone, bool, int);
296 static bool bucket_cache_reclaim_domain(uma_zone_t, bool, bool, int);
297 static int keg_ctor(void *, int, void *, int);
298 static void keg_dtor(void *, int, void *);
299 static void keg_drain(uma_keg_t keg, int domain);
300 static int zone_ctor(void *, int, void *, int);
301 static void zone_dtor(void *, int, void *);
302 static inline void item_dtor(uma_zone_t zone, void *item, int size,
303     void *udata, enum zfreeskip skip);
304 static int zero_init(void *, int, int);
305 static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
306     int itemdomain, bool ws);
307 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
308 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *);
309 static void zone_timeout(uma_zone_t zone, void *);
310 static int hash_alloc(struct uma_hash *, u_int);
311 static int hash_expand(struct uma_hash *, struct uma_hash *);
312 static void hash_free(struct uma_hash *hash);
313 static void uma_timeout(void *);
314 static void uma_shutdown(void);
315 static void *zone_alloc_item(uma_zone_t, void *, int, int);
316 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
317 static int zone_alloc_limit(uma_zone_t zone, int count, int flags);
318 static void zone_free_limit(uma_zone_t zone, int count);
319 static void bucket_enable(void);
320 static void bucket_init(void);
321 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
322 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
323 static void bucket_zone_drain(int domain);
324 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
325 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
326 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
327 static size_t slab_sizeof(int nitems);
328 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
329     uma_fini fini, int align, uint32_t flags);
330 static int zone_import(void *, void **, int, int, int);
331 static void zone_release(void *, void **, int);
332 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
333 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
334 
335 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
336 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
337 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
338 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
339 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
340 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS);
341 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS);
342 
343 static uint64_t uma_zone_get_allocs(uma_zone_t zone);
344 
345 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
346     "Memory allocation debugging");
347 
348 #ifdef INVARIANTS
349 static uint64_t uma_keg_get_allocs(uma_keg_t zone);
350 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
351 
352 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
353 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
354 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
355 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
356 
357 static u_int dbg_divisor = 1;
358 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
359     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
360     "Debug & thrash every this item in memory allocator");
361 
362 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
363 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
364 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
365     &uma_dbg_cnt, "memory items debugged");
366 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
367     &uma_skip_cnt, "memory items skipped, not debugged");
368 #endif
369 
370 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
371     "Universal Memory Allocator");
372 
373 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT,
374     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
375 
376 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT,
377     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
378 
379 static int zone_warnings = 1;
380 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
381     "Warn when UMA zones becomes full");
382 
383 static int multipage_slabs = 1;
384 TUNABLE_INT("vm.debug.uma_multipage_slabs", &multipage_slabs);
385 SYSCTL_INT(_vm_debug, OID_AUTO, uma_multipage_slabs,
386     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &multipage_slabs, 0,
387     "UMA may choose larger slab sizes for better efficiency");
388 
389 /*
390  * Select the slab zone for an offpage slab with the given maximum item count.
391  */
392 static inline uma_zone_t
393 slabzone(int ipers)
394 {
395 
396 	return (slabzones[ipers > SLABZONE0_SETSIZE]);
397 }
398 
399 /*
400  * This routine checks to see whether or not it's safe to enable buckets.
401  */
402 static void
403 bucket_enable(void)
404 {
405 
406 	KASSERT(booted >= BOOT_KVA, ("Bucket enable before init"));
407 	bucketdisable = vm_page_count_min();
408 }
409 
410 /*
411  * Initialize bucket_zones, the array of zones of buckets of various sizes.
412  *
413  * For each zone, calculate the memory required for each bucket, consisting
414  * of the header and an array of pointers.
415  */
416 static void
417 bucket_init(void)
418 {
419 	struct uma_bucket_zone *ubz;
420 	int size;
421 
422 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
423 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
424 		size += sizeof(void *) * ubz->ubz_entries;
425 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
426 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
427 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET |
428 		    UMA_ZONE_FIRSTTOUCH);
429 	}
430 }
431 
432 /*
433  * Given a desired number of entries for a bucket, return the zone from which
434  * to allocate the bucket.
435  */
436 static struct uma_bucket_zone *
437 bucket_zone_lookup(int entries)
438 {
439 	struct uma_bucket_zone *ubz;
440 
441 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
442 		if (ubz->ubz_entries >= entries)
443 			return (ubz);
444 	ubz--;
445 	return (ubz);
446 }
447 
448 static int
449 bucket_select(int size)
450 {
451 	struct uma_bucket_zone *ubz;
452 
453 	ubz = &bucket_zones[0];
454 	if (size > ubz->ubz_maxsize)
455 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
456 
457 	for (; ubz->ubz_entries != 0; ubz++)
458 		if (ubz->ubz_maxsize < size)
459 			break;
460 	ubz--;
461 	return (ubz->ubz_entries);
462 }
463 
464 static uma_bucket_t
465 bucket_alloc(uma_zone_t zone, void *udata, int flags)
466 {
467 	struct uma_bucket_zone *ubz;
468 	uma_bucket_t bucket;
469 
470 	/*
471 	 * Don't allocate buckets early in boot.
472 	 */
473 	if (__predict_false(booted < BOOT_KVA))
474 		return (NULL);
475 
476 	/*
477 	 * To limit bucket recursion we store the original zone flags
478 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
479 	 * NOVM flag to persist even through deep recursions.  We also
480 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
481 	 * a bucket for a bucket zone so we do not allow infinite bucket
482 	 * recursion.  This cookie will even persist to frees of unused
483 	 * buckets via the allocation path or bucket allocations in the
484 	 * free path.
485 	 */
486 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
487 		udata = (void *)(uintptr_t)zone->uz_flags;
488 	else {
489 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
490 			return (NULL);
491 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
492 	}
493 	if (((uintptr_t)udata & UMA_ZONE_VM) != 0)
494 		flags |= M_NOVM;
495 	ubz = bucket_zone_lookup(atomic_load_16(&zone->uz_bucket_size));
496 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
497 		ubz++;
498 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
499 	if (bucket) {
500 #ifdef INVARIANTS
501 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
502 #endif
503 		bucket->ub_cnt = 0;
504 		bucket->ub_entries = min(ubz->ubz_entries,
505 		    zone->uz_bucket_size_max);
506 		bucket->ub_seq = SMR_SEQ_INVALID;
507 		CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
508 		    zone->uz_name, zone, bucket);
509 	}
510 
511 	return (bucket);
512 }
513 
514 static void
515 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
516 {
517 	struct uma_bucket_zone *ubz;
518 
519 	if (bucket->ub_cnt != 0)
520 		bucket_drain(zone, bucket);
521 
522 	KASSERT(bucket->ub_cnt == 0,
523 	    ("bucket_free: Freeing a non free bucket."));
524 	KASSERT(bucket->ub_seq == SMR_SEQ_INVALID,
525 	    ("bucket_free: Freeing an SMR bucket."));
526 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
527 		udata = (void *)(uintptr_t)zone->uz_flags;
528 	ubz = bucket_zone_lookup(bucket->ub_entries);
529 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
530 }
531 
532 static void
533 bucket_zone_drain(int domain)
534 {
535 	struct uma_bucket_zone *ubz;
536 
537 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
538 		uma_zone_reclaim_domain(ubz->ubz_zone, UMA_RECLAIM_DRAIN,
539 		    domain);
540 }
541 
542 #ifdef KASAN
543 _Static_assert(UMA_SMALLEST_UNIT % KASAN_SHADOW_SCALE == 0,
544     "Base UMA allocation size not a multiple of the KASAN scale factor");
545 
546 static void
547 kasan_mark_item_valid(uma_zone_t zone, void *item)
548 {
549 	void *pcpu_item;
550 	size_t sz, rsz;
551 	int i;
552 
553 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
554 		return;
555 
556 	sz = zone->uz_size;
557 	rsz = roundup2(sz, KASAN_SHADOW_SCALE);
558 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
559 		kasan_mark(item, sz, rsz, KASAN_GENERIC_REDZONE);
560 	} else {
561 		pcpu_item = zpcpu_base_to_offset(item);
562 		for (i = 0; i <= mp_maxid; i++)
563 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), sz, rsz,
564 			    KASAN_GENERIC_REDZONE);
565 	}
566 }
567 
568 static void
569 kasan_mark_item_invalid(uma_zone_t zone, void *item)
570 {
571 	void *pcpu_item;
572 	size_t sz;
573 	int i;
574 
575 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
576 		return;
577 
578 	sz = roundup2(zone->uz_size, KASAN_SHADOW_SCALE);
579 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
580 		kasan_mark(item, 0, sz, KASAN_UMA_FREED);
581 	} else {
582 		pcpu_item = zpcpu_base_to_offset(item);
583 		for (i = 0; i <= mp_maxid; i++)
584 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), 0, sz,
585 			    KASAN_UMA_FREED);
586 	}
587 }
588 
589 static void
590 kasan_mark_slab_valid(uma_keg_t keg, void *mem)
591 {
592 	size_t sz;
593 
594 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
595 		sz = keg->uk_ppera * PAGE_SIZE;
596 		kasan_mark(mem, sz, sz, 0);
597 	}
598 }
599 
600 static void
601 kasan_mark_slab_invalid(uma_keg_t keg, void *mem)
602 {
603 	size_t sz;
604 
605 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
606 		if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
607 			sz = keg->uk_ppera * PAGE_SIZE;
608 		else
609 			sz = keg->uk_pgoff;
610 		kasan_mark(mem, 0, sz, KASAN_UMA_FREED);
611 	}
612 }
613 #else /* !KASAN */
614 static void
615 kasan_mark_item_valid(uma_zone_t zone __unused, void *item __unused)
616 {
617 }
618 
619 static void
620 kasan_mark_item_invalid(uma_zone_t zone __unused, void *item __unused)
621 {
622 }
623 
624 static void
625 kasan_mark_slab_valid(uma_keg_t keg __unused, void *mem __unused)
626 {
627 }
628 
629 static void
630 kasan_mark_slab_invalid(uma_keg_t keg __unused, void *mem __unused)
631 {
632 }
633 #endif /* KASAN */
634 
635 /*
636  * Acquire the domain lock and record contention.
637  */
638 static uma_zone_domain_t
639 zone_domain_lock(uma_zone_t zone, int domain)
640 {
641 	uma_zone_domain_t zdom;
642 	bool lockfail;
643 
644 	zdom = ZDOM_GET(zone, domain);
645 	lockfail = false;
646 	if (ZDOM_OWNED(zdom))
647 		lockfail = true;
648 	ZDOM_LOCK(zdom);
649 	/* This is unsynchronized.  The counter does not need to be precise. */
650 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
651 		zone->uz_bucket_size++;
652 	return (zdom);
653 }
654 
655 /*
656  * Search for the domain with the least cached items and return it if it
657  * is out of balance with the preferred domain.
658  */
659 static __noinline int
660 zone_domain_lowest(uma_zone_t zone, int pref)
661 {
662 	long least, nitems, prefitems;
663 	int domain;
664 	int i;
665 
666 	prefitems = least = LONG_MAX;
667 	domain = 0;
668 	for (i = 0; i < vm_ndomains; i++) {
669 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
670 		if (nitems < least) {
671 			domain = i;
672 			least = nitems;
673 		}
674 		if (domain == pref)
675 			prefitems = nitems;
676 	}
677 	if (prefitems < least * 2)
678 		return (pref);
679 
680 	return (domain);
681 }
682 
683 /*
684  * Search for the domain with the most cached items and return it or the
685  * preferred domain if it has enough to proceed.
686  */
687 static __noinline int
688 zone_domain_highest(uma_zone_t zone, int pref)
689 {
690 	long most, nitems;
691 	int domain;
692 	int i;
693 
694 	if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX)
695 		return (pref);
696 
697 	most = 0;
698 	domain = 0;
699 	for (i = 0; i < vm_ndomains; i++) {
700 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
701 		if (nitems > most) {
702 			domain = i;
703 			most = nitems;
704 		}
705 	}
706 
707 	return (domain);
708 }
709 
710 /*
711  * Set the maximum imax value.
712  */
713 static void
714 zone_domain_imax_set(uma_zone_domain_t zdom, int nitems)
715 {
716 	long old;
717 
718 	old = zdom->uzd_imax;
719 	do {
720 		if (old >= nitems)
721 			return;
722 	} while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0);
723 
724 	/*
725 	 * We are at new maximum, so do the last WSS update for the old
726 	 * bimin and prepare to measure next allocation batch.
727 	 */
728 	if (zdom->uzd_wss < old - zdom->uzd_bimin)
729 		zdom->uzd_wss = old - zdom->uzd_bimin;
730 	zdom->uzd_bimin = nitems;
731 }
732 
733 /*
734  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
735  * zone's caches.  If a bucket is found the zone is not locked on return.
736  */
737 static uma_bucket_t
738 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim)
739 {
740 	uma_bucket_t bucket;
741 	long cnt;
742 	int i;
743 	bool dtor = false;
744 
745 	ZDOM_LOCK_ASSERT(zdom);
746 
747 	if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
748 		return (NULL);
749 
750 	/* SMR Buckets can not be re-used until readers expire. */
751 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
752 	    bucket->ub_seq != SMR_SEQ_INVALID) {
753 		if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
754 			return (NULL);
755 		bucket->ub_seq = SMR_SEQ_INVALID;
756 		dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR;
757 		if (STAILQ_NEXT(bucket, ub_link) != NULL)
758 			zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq;
759 	}
760 	STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
761 
762 	KASSERT(zdom->uzd_nitems >= bucket->ub_cnt,
763 	    ("%s: item count underflow (%ld, %d)",
764 	    __func__, zdom->uzd_nitems, bucket->ub_cnt));
765 	KASSERT(bucket->ub_cnt > 0,
766 	    ("%s: empty bucket in bucket cache", __func__));
767 	zdom->uzd_nitems -= bucket->ub_cnt;
768 
769 	if (reclaim) {
770 		/*
771 		 * Shift the bounds of the current WSS interval to avoid
772 		 * perturbing the estimates.
773 		 */
774 		cnt = lmin(zdom->uzd_bimin, bucket->ub_cnt);
775 		atomic_subtract_long(&zdom->uzd_imax, cnt);
776 		zdom->uzd_bimin -= cnt;
777 		zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt);
778 		if (zdom->uzd_limin >= bucket->ub_cnt) {
779 			zdom->uzd_limin -= bucket->ub_cnt;
780 		} else {
781 			zdom->uzd_limin = 0;
782 			zdom->uzd_timin = 0;
783 		}
784 	} else if (zdom->uzd_bimin > zdom->uzd_nitems) {
785 		zdom->uzd_bimin = zdom->uzd_nitems;
786 		if (zdom->uzd_imin > zdom->uzd_nitems)
787 			zdom->uzd_imin = zdom->uzd_nitems;
788 	}
789 
790 	ZDOM_UNLOCK(zdom);
791 	if (dtor)
792 		for (i = 0; i < bucket->ub_cnt; i++)
793 			item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
794 			    NULL, SKIP_NONE);
795 
796 	return (bucket);
797 }
798 
799 /*
800  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
801  * whether the bucket's contents should be counted as part of the zone's working
802  * set.  The bucket may be freed if it exceeds the bucket limit.
803  */
804 static void
805 zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata,
806     const bool ws)
807 {
808 	uma_zone_domain_t zdom;
809 
810 	/* We don't cache empty buckets.  This can happen after a reclaim. */
811 	if (bucket->ub_cnt == 0)
812 		goto out;
813 	zdom = zone_domain_lock(zone, domain);
814 
815 	/*
816 	 * Conditionally set the maximum number of items.
817 	 */
818 	zdom->uzd_nitems += bucket->ub_cnt;
819 	if (__predict_true(zdom->uzd_nitems < zone->uz_bucket_max)) {
820 		if (ws) {
821 			zone_domain_imax_set(zdom, zdom->uzd_nitems);
822 		} else {
823 			/*
824 			 * Shift the bounds of the current WSS interval to
825 			 * avoid perturbing the estimates.
826 			 */
827 			atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt);
828 			zdom->uzd_imin += bucket->ub_cnt;
829 			zdom->uzd_bimin += bucket->ub_cnt;
830 			zdom->uzd_limin += bucket->ub_cnt;
831 		}
832 		if (STAILQ_EMPTY(&zdom->uzd_buckets))
833 			zdom->uzd_seq = bucket->ub_seq;
834 
835 		/*
836 		 * Try to promote reuse of recently used items.  For items
837 		 * protected by SMR, try to defer reuse to minimize polling.
838 		 */
839 		if (bucket->ub_seq == SMR_SEQ_INVALID)
840 			STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
841 		else
842 			STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
843 		ZDOM_UNLOCK(zdom);
844 		return;
845 	}
846 	zdom->uzd_nitems -= bucket->ub_cnt;
847 	ZDOM_UNLOCK(zdom);
848 out:
849 	bucket_free(zone, bucket, udata);
850 }
851 
852 /* Pops an item out of a per-cpu cache bucket. */
853 static inline void *
854 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
855 {
856 	void *item;
857 
858 	CRITICAL_ASSERT(curthread);
859 
860 	bucket->ucb_cnt--;
861 	item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
862 #ifdef INVARIANTS
863 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
864 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
865 #endif
866 	cache->uc_allocs++;
867 
868 	return (item);
869 }
870 
871 /* Pushes an item into a per-cpu cache bucket. */
872 static inline void
873 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
874 {
875 
876 	CRITICAL_ASSERT(curthread);
877 	KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
878 	    ("uma_zfree: Freeing to non free bucket index."));
879 
880 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
881 	bucket->ucb_cnt++;
882 	cache->uc_frees++;
883 }
884 
885 /*
886  * Unload a UMA bucket from a per-cpu cache.
887  */
888 static inline uma_bucket_t
889 cache_bucket_unload(uma_cache_bucket_t bucket)
890 {
891 	uma_bucket_t b;
892 
893 	b = bucket->ucb_bucket;
894 	if (b != NULL) {
895 		MPASS(b->ub_entries == bucket->ucb_entries);
896 		b->ub_cnt = bucket->ucb_cnt;
897 		bucket->ucb_bucket = NULL;
898 		bucket->ucb_entries = bucket->ucb_cnt = 0;
899 	}
900 
901 	return (b);
902 }
903 
904 static inline uma_bucket_t
905 cache_bucket_unload_alloc(uma_cache_t cache)
906 {
907 
908 	return (cache_bucket_unload(&cache->uc_allocbucket));
909 }
910 
911 static inline uma_bucket_t
912 cache_bucket_unload_free(uma_cache_t cache)
913 {
914 
915 	return (cache_bucket_unload(&cache->uc_freebucket));
916 }
917 
918 static inline uma_bucket_t
919 cache_bucket_unload_cross(uma_cache_t cache)
920 {
921 
922 	return (cache_bucket_unload(&cache->uc_crossbucket));
923 }
924 
925 /*
926  * Load a bucket into a per-cpu cache bucket.
927  */
928 static inline void
929 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
930 {
931 
932 	CRITICAL_ASSERT(curthread);
933 	MPASS(bucket->ucb_bucket == NULL);
934 	MPASS(b->ub_seq == SMR_SEQ_INVALID);
935 
936 	bucket->ucb_bucket = b;
937 	bucket->ucb_cnt = b->ub_cnt;
938 	bucket->ucb_entries = b->ub_entries;
939 }
940 
941 static inline void
942 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b)
943 {
944 
945 	cache_bucket_load(&cache->uc_allocbucket, b);
946 }
947 
948 static inline void
949 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b)
950 {
951 
952 	cache_bucket_load(&cache->uc_freebucket, b);
953 }
954 
955 #ifdef NUMA
956 static inline void
957 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b)
958 {
959 
960 	cache_bucket_load(&cache->uc_crossbucket, b);
961 }
962 #endif
963 
964 /*
965  * Copy and preserve ucb_spare.
966  */
967 static inline void
968 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
969 {
970 
971 	b1->ucb_bucket = b2->ucb_bucket;
972 	b1->ucb_entries = b2->ucb_entries;
973 	b1->ucb_cnt = b2->ucb_cnt;
974 }
975 
976 /*
977  * Swap two cache buckets.
978  */
979 static inline void
980 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
981 {
982 	struct uma_cache_bucket b3;
983 
984 	CRITICAL_ASSERT(curthread);
985 
986 	cache_bucket_copy(&b3, b1);
987 	cache_bucket_copy(b1, b2);
988 	cache_bucket_copy(b2, &b3);
989 }
990 
991 /*
992  * Attempt to fetch a bucket from a zone on behalf of the current cpu cache.
993  */
994 static uma_bucket_t
995 cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain)
996 {
997 	uma_zone_domain_t zdom;
998 	uma_bucket_t bucket;
999 
1000 	/*
1001 	 * Avoid the lock if possible.
1002 	 */
1003 	zdom = ZDOM_GET(zone, domain);
1004 	if (zdom->uzd_nitems == 0)
1005 		return (NULL);
1006 
1007 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 &&
1008 	    !smr_poll(zone->uz_smr, zdom->uzd_seq, false))
1009 		return (NULL);
1010 
1011 	/*
1012 	 * Check the zone's cache of buckets.
1013 	 */
1014 	zdom = zone_domain_lock(zone, domain);
1015 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL)
1016 		return (bucket);
1017 	ZDOM_UNLOCK(zdom);
1018 
1019 	return (NULL);
1020 }
1021 
1022 static void
1023 zone_log_warning(uma_zone_t zone)
1024 {
1025 	static const struct timeval warninterval = { 300, 0 };
1026 
1027 	if (!zone_warnings || zone->uz_warning == NULL)
1028 		return;
1029 
1030 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
1031 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
1032 }
1033 
1034 static inline void
1035 zone_maxaction(uma_zone_t zone)
1036 {
1037 
1038 	if (zone->uz_maxaction.ta_func != NULL)
1039 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
1040 }
1041 
1042 /*
1043  * Routine called by timeout which is used to fire off some time interval
1044  * based calculations.  (stats, hash size, etc.)
1045  *
1046  * Arguments:
1047  *	arg   Unused
1048  *
1049  * Returns:
1050  *	Nothing
1051  */
1052 static void
1053 uma_timeout(void *unused)
1054 {
1055 	bucket_enable();
1056 	zone_foreach(zone_timeout, NULL);
1057 
1058 	/* Reschedule this event */
1059 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1060 }
1061 
1062 /*
1063  * Update the working set size estimates for the zone's bucket cache.
1064  * The constants chosen here are somewhat arbitrary.
1065  */
1066 static void
1067 zone_domain_update_wss(uma_zone_domain_t zdom)
1068 {
1069 	long m;
1070 
1071 	ZDOM_LOCK_ASSERT(zdom);
1072 	MPASS(zdom->uzd_imax >= zdom->uzd_nitems);
1073 	MPASS(zdom->uzd_nitems >= zdom->uzd_bimin);
1074 	MPASS(zdom->uzd_bimin >= zdom->uzd_imin);
1075 
1076 	/*
1077 	 * Estimate WSS as modified moving average of biggest allocation
1078 	 * batches for each period over few minutes (UMA_TIMEOUT of 20s).
1079 	 */
1080 	zdom->uzd_wss = lmax(zdom->uzd_wss * 3 / 4,
1081 	    zdom->uzd_imax - zdom->uzd_bimin);
1082 
1083 	/*
1084 	 * Estimate longtime minimum item count as a combination of recent
1085 	 * minimum item count, adjusted by WSS for safety, and the modified
1086 	 * moving average over the last several hours (UMA_TIMEOUT of 20s).
1087 	 * timin measures time since limin tried to go negative, that means
1088 	 * we were dangerously close to or got out of cache.
1089 	 */
1090 	m = zdom->uzd_imin - zdom->uzd_wss;
1091 	if (m >= 0) {
1092 		if (zdom->uzd_limin >= m)
1093 			zdom->uzd_limin = m;
1094 		else
1095 			zdom->uzd_limin = (m + zdom->uzd_limin * 255) / 256;
1096 		zdom->uzd_timin++;
1097 	} else {
1098 		zdom->uzd_limin = 0;
1099 		zdom->uzd_timin = 0;
1100 	}
1101 
1102 	/* To reduce period edge effects on WSS keep half of the imax. */
1103 	atomic_subtract_long(&zdom->uzd_imax,
1104 	    (zdom->uzd_imax - zdom->uzd_nitems + 1) / 2);
1105 	zdom->uzd_imin = zdom->uzd_bimin = zdom->uzd_nitems;
1106 }
1107 
1108 /*
1109  * Routine to perform timeout driven calculations.  This expands the
1110  * hashes and does per cpu statistics aggregation.
1111  *
1112  *  Returns nothing.
1113  */
1114 static void
1115 zone_timeout(uma_zone_t zone, void *unused)
1116 {
1117 	uma_keg_t keg;
1118 	u_int slabs, pages;
1119 
1120 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
1121 		goto trim;
1122 
1123 	keg = zone->uz_keg;
1124 
1125 	/*
1126 	 * Hash zones are non-numa by definition so the first domain
1127 	 * is the only one present.
1128 	 */
1129 	KEG_LOCK(keg, 0);
1130 	pages = keg->uk_domain[0].ud_pages;
1131 
1132 	/*
1133 	 * Expand the keg hash table.
1134 	 *
1135 	 * This is done if the number of slabs is larger than the hash size.
1136 	 * What I'm trying to do here is completely reduce collisions.  This
1137 	 * may be a little aggressive.  Should I allow for two collisions max?
1138 	 */
1139 	if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) {
1140 		struct uma_hash newhash;
1141 		struct uma_hash oldhash;
1142 		int ret;
1143 
1144 		/*
1145 		 * This is so involved because allocating and freeing
1146 		 * while the keg lock is held will lead to deadlock.
1147 		 * I have to do everything in stages and check for
1148 		 * races.
1149 		 */
1150 		KEG_UNLOCK(keg, 0);
1151 		ret = hash_alloc(&newhash, 1 << fls(slabs));
1152 		KEG_LOCK(keg, 0);
1153 		if (ret) {
1154 			if (hash_expand(&keg->uk_hash, &newhash)) {
1155 				oldhash = keg->uk_hash;
1156 				keg->uk_hash = newhash;
1157 			} else
1158 				oldhash = newhash;
1159 
1160 			KEG_UNLOCK(keg, 0);
1161 			hash_free(&oldhash);
1162 			goto trim;
1163 		}
1164 	}
1165 	KEG_UNLOCK(keg, 0);
1166 
1167 trim:
1168 	/* Trim caches not used for a long time. */
1169 	for (int i = 0; i < vm_ndomains; i++) {
1170 		if (bucket_cache_reclaim_domain(zone, false, false, i) &&
1171 		    (zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1172 			keg_drain(zone->uz_keg, i);
1173 	}
1174 }
1175 
1176 /*
1177  * Allocate and zero fill the next sized hash table from the appropriate
1178  * backing store.
1179  *
1180  * Arguments:
1181  *	hash  A new hash structure with the old hash size in uh_hashsize
1182  *
1183  * Returns:
1184  *	1 on success and 0 on failure.
1185  */
1186 static int
1187 hash_alloc(struct uma_hash *hash, u_int size)
1188 {
1189 	size_t alloc;
1190 
1191 	KASSERT(powerof2(size), ("hash size must be power of 2"));
1192 	if (size > UMA_HASH_SIZE_INIT)  {
1193 		hash->uh_hashsize = size;
1194 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
1195 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
1196 	} else {
1197 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
1198 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
1199 		    UMA_ANYDOMAIN, M_WAITOK);
1200 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
1201 	}
1202 	if (hash->uh_slab_hash) {
1203 		bzero(hash->uh_slab_hash, alloc);
1204 		hash->uh_hashmask = hash->uh_hashsize - 1;
1205 		return (1);
1206 	}
1207 
1208 	return (0);
1209 }
1210 
1211 /*
1212  * Expands the hash table for HASH zones.  This is done from zone_timeout
1213  * to reduce collisions.  This must not be done in the regular allocation
1214  * path, otherwise, we can recurse on the vm while allocating pages.
1215  *
1216  * Arguments:
1217  *	oldhash  The hash you want to expand
1218  *	newhash  The hash structure for the new table
1219  *
1220  * Returns:
1221  *	Nothing
1222  *
1223  * Discussion:
1224  */
1225 static int
1226 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
1227 {
1228 	uma_hash_slab_t slab;
1229 	u_int hval;
1230 	u_int idx;
1231 
1232 	if (!newhash->uh_slab_hash)
1233 		return (0);
1234 
1235 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
1236 		return (0);
1237 
1238 	/*
1239 	 * I need to investigate hash algorithms for resizing without a
1240 	 * full rehash.
1241 	 */
1242 
1243 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
1244 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
1245 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
1246 			LIST_REMOVE(slab, uhs_hlink);
1247 			hval = UMA_HASH(newhash, slab->uhs_data);
1248 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
1249 			    slab, uhs_hlink);
1250 		}
1251 
1252 	return (1);
1253 }
1254 
1255 /*
1256  * Free the hash bucket to the appropriate backing store.
1257  *
1258  * Arguments:
1259  *	slab_hash  The hash bucket we're freeing
1260  *	hashsize   The number of entries in that hash bucket
1261  *
1262  * Returns:
1263  *	Nothing
1264  */
1265 static void
1266 hash_free(struct uma_hash *hash)
1267 {
1268 	if (hash->uh_slab_hash == NULL)
1269 		return;
1270 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
1271 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
1272 	else
1273 		free(hash->uh_slab_hash, M_UMAHASH);
1274 }
1275 
1276 /*
1277  * Frees all outstanding items in a bucket
1278  *
1279  * Arguments:
1280  *	zone   The zone to free to, must be unlocked.
1281  *	bucket The free/alloc bucket with items.
1282  *
1283  * Returns:
1284  *	Nothing
1285  */
1286 static void
1287 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
1288 {
1289 	int i;
1290 
1291 	if (bucket->ub_cnt == 0)
1292 		return;
1293 
1294 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
1295 	    bucket->ub_seq != SMR_SEQ_INVALID) {
1296 		smr_wait(zone->uz_smr, bucket->ub_seq);
1297 		bucket->ub_seq = SMR_SEQ_INVALID;
1298 		for (i = 0; i < bucket->ub_cnt; i++)
1299 			item_dtor(zone, bucket->ub_bucket[i],
1300 			    zone->uz_size, NULL, SKIP_NONE);
1301 	}
1302 	if (zone->uz_fini)
1303 		for (i = 0; i < bucket->ub_cnt; i++) {
1304 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
1305 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
1306 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
1307 		}
1308 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
1309 	if (zone->uz_max_items > 0)
1310 		zone_free_limit(zone, bucket->ub_cnt);
1311 #ifdef INVARIANTS
1312 	bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
1313 #endif
1314 	bucket->ub_cnt = 0;
1315 }
1316 
1317 /*
1318  * Drains the per cpu caches for a zone.
1319  *
1320  * NOTE: This may only be called while the zone is being torn down, and not
1321  * during normal operation.  This is necessary in order that we do not have
1322  * to migrate CPUs to drain the per-CPU caches.
1323  *
1324  * Arguments:
1325  *	zone     The zone to drain, must be unlocked.
1326  *
1327  * Returns:
1328  *	Nothing
1329  */
1330 static void
1331 cache_drain(uma_zone_t zone)
1332 {
1333 	uma_cache_t cache;
1334 	uma_bucket_t bucket;
1335 	smr_seq_t seq;
1336 	int cpu;
1337 
1338 	/*
1339 	 * XXX: It is safe to not lock the per-CPU caches, because we're
1340 	 * tearing down the zone anyway.  I.e., there will be no further use
1341 	 * of the caches at this point.
1342 	 *
1343 	 * XXX: It would good to be able to assert that the zone is being
1344 	 * torn down to prevent improper use of cache_drain().
1345 	 */
1346 	seq = SMR_SEQ_INVALID;
1347 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
1348 		seq = smr_advance(zone->uz_smr);
1349 	CPU_FOREACH(cpu) {
1350 		cache = &zone->uz_cpu[cpu];
1351 		bucket = cache_bucket_unload_alloc(cache);
1352 		if (bucket != NULL)
1353 			bucket_free(zone, bucket, NULL);
1354 		bucket = cache_bucket_unload_free(cache);
1355 		if (bucket != NULL) {
1356 			bucket->ub_seq = seq;
1357 			bucket_free(zone, bucket, NULL);
1358 		}
1359 		bucket = cache_bucket_unload_cross(cache);
1360 		if (bucket != NULL) {
1361 			bucket->ub_seq = seq;
1362 			bucket_free(zone, bucket, NULL);
1363 		}
1364 	}
1365 	bucket_cache_reclaim(zone, true, UMA_ANYDOMAIN);
1366 }
1367 
1368 static void
1369 cache_shrink(uma_zone_t zone, void *unused)
1370 {
1371 
1372 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1373 		return;
1374 
1375 	ZONE_LOCK(zone);
1376 	zone->uz_bucket_size =
1377 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
1378 	ZONE_UNLOCK(zone);
1379 }
1380 
1381 static void
1382 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
1383 {
1384 	uma_cache_t cache;
1385 	uma_bucket_t b1, b2, b3;
1386 	int domain;
1387 
1388 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1389 		return;
1390 
1391 	b1 = b2 = b3 = NULL;
1392 	critical_enter();
1393 	cache = &zone->uz_cpu[curcpu];
1394 	domain = PCPU_GET(domain);
1395 	b1 = cache_bucket_unload_alloc(cache);
1396 
1397 	/*
1398 	 * Don't flush SMR zone buckets.  This leaves the zone without a
1399 	 * bucket and forces every free to synchronize().
1400 	 */
1401 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0) {
1402 		b2 = cache_bucket_unload_free(cache);
1403 		b3 = cache_bucket_unload_cross(cache);
1404 	}
1405 	critical_exit();
1406 
1407 	if (b1 != NULL)
1408 		zone_free_bucket(zone, b1, NULL, domain, false);
1409 	if (b2 != NULL)
1410 		zone_free_bucket(zone, b2, NULL, domain, false);
1411 	if (b3 != NULL) {
1412 		/* Adjust the domain so it goes to zone_free_cross. */
1413 		domain = (domain + 1) % vm_ndomains;
1414 		zone_free_bucket(zone, b3, NULL, domain, false);
1415 	}
1416 }
1417 
1418 /*
1419  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1420  * This is an expensive call because it needs to bind to all CPUs
1421  * one by one and enter a critical section on each of them in order
1422  * to safely access their cache buckets.
1423  * Zone lock must not be held on call this function.
1424  */
1425 static void
1426 pcpu_cache_drain_safe(uma_zone_t zone)
1427 {
1428 	int cpu;
1429 
1430 	/*
1431 	 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1432 	 */
1433 	if (zone)
1434 		cache_shrink(zone, NULL);
1435 	else
1436 		zone_foreach(cache_shrink, NULL);
1437 
1438 	CPU_FOREACH(cpu) {
1439 		thread_lock(curthread);
1440 		sched_bind(curthread, cpu);
1441 		thread_unlock(curthread);
1442 
1443 		if (zone)
1444 			cache_drain_safe_cpu(zone, NULL);
1445 		else
1446 			zone_foreach(cache_drain_safe_cpu, NULL);
1447 	}
1448 	thread_lock(curthread);
1449 	sched_unbind(curthread);
1450 	thread_unlock(curthread);
1451 }
1452 
1453 /*
1454  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
1455  * requested a drain, otherwise the per-domain caches are trimmed to either
1456  * estimated working set size.
1457  */
1458 static bool
1459 bucket_cache_reclaim_domain(uma_zone_t zone, bool drain, bool trim, int domain)
1460 {
1461 	uma_zone_domain_t zdom;
1462 	uma_bucket_t bucket;
1463 	long target;
1464 	bool done = false;
1465 
1466 	/*
1467 	 * The cross bucket is partially filled and not part of
1468 	 * the item count.  Reclaim it individually here.
1469 	 */
1470 	zdom = ZDOM_GET(zone, domain);
1471 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) {
1472 		ZONE_CROSS_LOCK(zone);
1473 		bucket = zdom->uzd_cross;
1474 		zdom->uzd_cross = NULL;
1475 		ZONE_CROSS_UNLOCK(zone);
1476 		if (bucket != NULL)
1477 			bucket_free(zone, bucket, NULL);
1478 	}
1479 
1480 	/*
1481 	 * If we were asked to drain the zone, we are done only once
1482 	 * this bucket cache is empty.  If trim, we reclaim items in
1483 	 * excess of the zone's estimated working set size.  Multiple
1484 	 * consecutive calls will shrink the WSS and so reclaim more.
1485 	 * If neither drain nor trim, then voluntarily reclaim 1/4
1486 	 * (to reduce first spike) of items not used for a long time.
1487 	 */
1488 	ZDOM_LOCK(zdom);
1489 	zone_domain_update_wss(zdom);
1490 	if (drain)
1491 		target = 0;
1492 	else if (trim)
1493 		target = zdom->uzd_wss;
1494 	else if (zdom->uzd_timin > 900 / UMA_TIMEOUT)
1495 		target = zdom->uzd_nitems - zdom->uzd_limin / 4;
1496 	else {
1497 		ZDOM_UNLOCK(zdom);
1498 		return (done);
1499 	}
1500 	while ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) != NULL &&
1501 	    zdom->uzd_nitems >= target + bucket->ub_cnt) {
1502 		bucket = zone_fetch_bucket(zone, zdom, true);
1503 		if (bucket == NULL)
1504 			break;
1505 		bucket_free(zone, bucket, NULL);
1506 		done = true;
1507 		ZDOM_LOCK(zdom);
1508 	}
1509 	ZDOM_UNLOCK(zdom);
1510 	return (done);
1511 }
1512 
1513 static void
1514 bucket_cache_reclaim(uma_zone_t zone, bool drain, int domain)
1515 {
1516 	int i;
1517 
1518 	/*
1519 	 * Shrink the zone bucket size to ensure that the per-CPU caches
1520 	 * don't grow too large.
1521 	 */
1522 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
1523 		zone->uz_bucket_size--;
1524 
1525 	if (domain != UMA_ANYDOMAIN &&
1526 	    (zone->uz_flags & UMA_ZONE_ROUNDROBIN) == 0) {
1527 		bucket_cache_reclaim_domain(zone, drain, true, domain);
1528 	} else {
1529 		for (i = 0; i < vm_ndomains; i++)
1530 			bucket_cache_reclaim_domain(zone, drain, true, i);
1531 	}
1532 }
1533 
1534 static void
1535 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
1536 {
1537 	uint8_t *mem;
1538 	size_t size;
1539 	int i;
1540 	uint8_t flags;
1541 
1542 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
1543 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
1544 
1545 	mem = slab_data(slab, keg);
1546 	size = PAGE_SIZE * keg->uk_ppera;
1547 
1548 	kasan_mark_slab_valid(keg, mem);
1549 	if (keg->uk_fini != NULL) {
1550 		for (i = start - 1; i > -1; i--)
1551 #ifdef INVARIANTS
1552 		/*
1553 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1554 		 * would check that memory hasn't been modified since free,
1555 		 * which executed trash_dtor.
1556 		 * That's why we need to run uma_dbg_kskip() check here,
1557 		 * albeit we don't make skip check for other init/fini
1558 		 * invocations.
1559 		 */
1560 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1561 		    keg->uk_fini != trash_fini)
1562 #endif
1563 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1564 	}
1565 	flags = slab->us_flags;
1566 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1567 		zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab),
1568 		    NULL, SKIP_NONE);
1569 	}
1570 	keg->uk_freef(mem, size, flags);
1571 	uma_total_dec(size);
1572 }
1573 
1574 static void
1575 keg_drain_domain(uma_keg_t keg, int domain)
1576 {
1577 	struct slabhead freeslabs;
1578 	uma_domain_t dom;
1579 	uma_slab_t slab, tmp;
1580 	uint32_t i, stofree, stokeep, partial;
1581 
1582 	dom = &keg->uk_domain[domain];
1583 	LIST_INIT(&freeslabs);
1584 
1585 	CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u",
1586 	    keg->uk_name, keg, domain, dom->ud_free_items);
1587 
1588 	KEG_LOCK(keg, domain);
1589 
1590 	/*
1591 	 * Are the free items in partially allocated slabs sufficient to meet
1592 	 * the reserve? If not, compute the number of fully free slabs that must
1593 	 * be kept.
1594 	 */
1595 	partial = dom->ud_free_items - dom->ud_free_slabs * keg->uk_ipers;
1596 	if (partial < keg->uk_reserve) {
1597 		stokeep = min(dom->ud_free_slabs,
1598 		    howmany(keg->uk_reserve - partial, keg->uk_ipers));
1599 	} else {
1600 		stokeep = 0;
1601 	}
1602 	stofree = dom->ud_free_slabs - stokeep;
1603 
1604 	/*
1605 	 * Partition the free slabs into two sets: those that must be kept in
1606 	 * order to maintain the reserve, and those that may be released back to
1607 	 * the system.  Since one set may be much larger than the other,
1608 	 * populate the smaller of the two sets and swap them if necessary.
1609 	 */
1610 	for (i = min(stofree, stokeep); i > 0; i--) {
1611 		slab = LIST_FIRST(&dom->ud_free_slab);
1612 		LIST_REMOVE(slab, us_link);
1613 		LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1614 	}
1615 	if (stofree > stokeep)
1616 		LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link);
1617 
1618 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) {
1619 		LIST_FOREACH(slab, &freeslabs, us_link)
1620 			UMA_HASH_REMOVE(&keg->uk_hash, slab);
1621 	}
1622 	dom->ud_free_items -= stofree * keg->uk_ipers;
1623 	dom->ud_free_slabs -= stofree;
1624 	dom->ud_pages -= stofree * keg->uk_ppera;
1625 	KEG_UNLOCK(keg, domain);
1626 
1627 	LIST_FOREACH_SAFE(slab, &freeslabs, us_link, tmp)
1628 		keg_free_slab(keg, slab, keg->uk_ipers);
1629 }
1630 
1631 /*
1632  * Frees pages from a keg back to the system.  This is done on demand from
1633  * the pageout daemon.
1634  *
1635  * Returns nothing.
1636  */
1637 static void
1638 keg_drain(uma_keg_t keg, int domain)
1639 {
1640 	int i;
1641 
1642 	if ((keg->uk_flags & UMA_ZONE_NOFREE) != 0)
1643 		return;
1644 	if (domain != UMA_ANYDOMAIN) {
1645 		keg_drain_domain(keg, domain);
1646 	} else {
1647 		for (i = 0; i < vm_ndomains; i++)
1648 			keg_drain_domain(keg, i);
1649 	}
1650 }
1651 
1652 static void
1653 zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain)
1654 {
1655 	/*
1656 	 * Count active reclaim operations in order to interlock with
1657 	 * zone_dtor(), which removes the zone from global lists before
1658 	 * attempting to reclaim items itself.
1659 	 *
1660 	 * The zone may be destroyed while sleeping, so only zone_dtor() should
1661 	 * specify M_WAITOK.
1662 	 */
1663 	ZONE_LOCK(zone);
1664 	if (waitok == M_WAITOK) {
1665 		while (zone->uz_reclaimers > 0)
1666 			msleep(zone, ZONE_LOCKPTR(zone), PVM, "zonedrain", 1);
1667 	}
1668 	zone->uz_reclaimers++;
1669 	ZONE_UNLOCK(zone);
1670 	bucket_cache_reclaim(zone, drain, domain);
1671 
1672 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1673 		keg_drain(zone->uz_keg, domain);
1674 	ZONE_LOCK(zone);
1675 	zone->uz_reclaimers--;
1676 	if (zone->uz_reclaimers == 0)
1677 		wakeup(zone);
1678 	ZONE_UNLOCK(zone);
1679 }
1680 
1681 static void
1682 zone_drain(uma_zone_t zone, void *arg)
1683 {
1684 	int domain;
1685 
1686 	domain = (int)(uintptr_t)arg;
1687 	zone_reclaim(zone, domain, M_NOWAIT, true);
1688 }
1689 
1690 static void
1691 zone_trim(uma_zone_t zone, void *arg)
1692 {
1693 	int domain;
1694 
1695 	domain = (int)(uintptr_t)arg;
1696 	zone_reclaim(zone, domain, M_NOWAIT, false);
1697 }
1698 
1699 /*
1700  * Allocate a new slab for a keg and inserts it into the partial slab list.
1701  * The keg should be unlocked on entry.  If the allocation succeeds it will
1702  * be locked on return.
1703  *
1704  * Arguments:
1705  *	flags   Wait flags for the item initialization routine
1706  *	aflags  Wait flags for the slab allocation
1707  *
1708  * Returns:
1709  *	The slab that was allocated or NULL if there is no memory and the
1710  *	caller specified M_NOWAIT.
1711  */
1712 static uma_slab_t
1713 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1714     int aflags)
1715 {
1716 	uma_domain_t dom;
1717 	uma_slab_t slab;
1718 	unsigned long size;
1719 	uint8_t *mem;
1720 	uint8_t sflags;
1721 	int i;
1722 
1723 	KASSERT(domain >= 0 && domain < vm_ndomains,
1724 	    ("keg_alloc_slab: domain %d out of range", domain));
1725 
1726 	slab = NULL;
1727 	mem = NULL;
1728 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1729 		uma_hash_slab_t hslab;
1730 		hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL,
1731 		    domain, aflags);
1732 		if (hslab == NULL)
1733 			goto fail;
1734 		slab = &hslab->uhs_slab;
1735 	}
1736 
1737 	/*
1738 	 * This reproduces the old vm_zone behavior of zero filling pages the
1739 	 * first time they are added to a zone.
1740 	 *
1741 	 * Malloced items are zeroed in uma_zalloc.
1742 	 */
1743 
1744 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1745 		aflags |= M_ZERO;
1746 	else
1747 		aflags &= ~M_ZERO;
1748 
1749 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1750 		aflags |= M_NODUMP;
1751 
1752 	/* zone is passed for legacy reasons. */
1753 	size = keg->uk_ppera * PAGE_SIZE;
1754 	mem = keg->uk_allocf(zone, size, domain, &sflags, aflags);
1755 	if (mem == NULL) {
1756 		if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1757 			zone_free_item(slabzone(keg->uk_ipers),
1758 			    slab_tohashslab(slab), NULL, SKIP_NONE);
1759 		goto fail;
1760 	}
1761 	uma_total_inc(size);
1762 
1763 	/* For HASH zones all pages go to the same uma_domain. */
1764 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
1765 		domain = 0;
1766 
1767 	/* Point the slab into the allocated memory */
1768 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
1769 		slab = (uma_slab_t)(mem + keg->uk_pgoff);
1770 	else
1771 		slab_tohashslab(slab)->uhs_data = mem;
1772 
1773 	if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
1774 		for (i = 0; i < keg->uk_ppera; i++)
1775 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1776 			    zone, slab);
1777 
1778 	slab->us_freecount = keg->uk_ipers;
1779 	slab->us_flags = sflags;
1780 	slab->us_domain = domain;
1781 
1782 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1783 #ifdef INVARIANTS
1784 	BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
1785 #endif
1786 
1787 	if (keg->uk_init != NULL) {
1788 		for (i = 0; i < keg->uk_ipers; i++)
1789 			if (keg->uk_init(slab_item(slab, keg, i),
1790 			    keg->uk_size, flags) != 0)
1791 				break;
1792 		if (i != keg->uk_ipers) {
1793 			keg_free_slab(keg, slab, i);
1794 			goto fail;
1795 		}
1796 	}
1797 	kasan_mark_slab_invalid(keg, mem);
1798 	KEG_LOCK(keg, domain);
1799 
1800 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1801 	    slab, keg->uk_name, keg);
1802 
1803 	if (keg->uk_flags & UMA_ZFLAG_HASH)
1804 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1805 
1806 	/*
1807 	 * If we got a slab here it's safe to mark it partially used
1808 	 * and return.  We assume that the caller is going to remove
1809 	 * at least one item.
1810 	 */
1811 	dom = &keg->uk_domain[domain];
1812 	LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
1813 	dom->ud_pages += keg->uk_ppera;
1814 	dom->ud_free_items += keg->uk_ipers;
1815 
1816 	return (slab);
1817 
1818 fail:
1819 	return (NULL);
1820 }
1821 
1822 /*
1823  * This function is intended to be used early on in place of page_alloc().  It
1824  * performs contiguous physical memory allocations and uses a bump allocator for
1825  * KVA, so is usable before the kernel map is initialized.
1826  */
1827 static void *
1828 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1829     int wait)
1830 {
1831 	vm_paddr_t pa;
1832 	vm_page_t m;
1833 	void *mem;
1834 	int pages;
1835 	int i;
1836 
1837 	pages = howmany(bytes, PAGE_SIZE);
1838 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1839 
1840 	*pflag = UMA_SLAB_BOOT;
1841 	m = vm_page_alloc_contig_domain(NULL, 0, domain,
1842 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages,
1843 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT);
1844 	if (m == NULL)
1845 		return (NULL);
1846 
1847 	pa = VM_PAGE_TO_PHYS(m);
1848 	for (i = 0; i < pages; i++, pa += PAGE_SIZE) {
1849 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1850     defined(__riscv) || defined(__powerpc64__)
1851 		if ((wait & M_NODUMP) == 0)
1852 			dump_add_page(pa);
1853 #endif
1854 	}
1855 	/* Allocate KVA and indirectly advance bootmem. */
1856 	mem = (void *)pmap_map(&bootmem, m->phys_addr,
1857 	    m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE);
1858         if ((wait & M_ZERO) != 0)
1859                 bzero(mem, pages * PAGE_SIZE);
1860 
1861         return (mem);
1862 }
1863 
1864 static void
1865 startup_free(void *mem, vm_size_t bytes)
1866 {
1867 	vm_offset_t va;
1868 	vm_page_t m;
1869 
1870 	va = (vm_offset_t)mem;
1871 	m = PHYS_TO_VM_PAGE(pmap_kextract(va));
1872 
1873 	/*
1874 	 * startup_alloc() returns direct-mapped slabs on some platforms.  Avoid
1875 	 * unmapping ranges of the direct map.
1876 	 */
1877 	if (va >= bootstart && va + bytes <= bootmem)
1878 		pmap_remove(kernel_pmap, va, va + bytes);
1879 	for (; bytes != 0; bytes -= PAGE_SIZE, m++) {
1880 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1881     defined(__riscv) || defined(__powerpc64__)
1882 		dump_drop_page(VM_PAGE_TO_PHYS(m));
1883 #endif
1884 		vm_page_unwire_noq(m);
1885 		vm_page_free(m);
1886 	}
1887 }
1888 
1889 /*
1890  * Allocates a number of pages from the system
1891  *
1892  * Arguments:
1893  *	bytes  The number of bytes requested
1894  *	wait  Shall we wait?
1895  *
1896  * Returns:
1897  *	A pointer to the alloced memory or possibly
1898  *	NULL if M_NOWAIT is set.
1899  */
1900 static void *
1901 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1902     int wait)
1903 {
1904 	void *p;	/* Returned page */
1905 
1906 	*pflag = UMA_SLAB_KERNEL;
1907 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1908 
1909 	return (p);
1910 }
1911 
1912 static void *
1913 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1914     int wait)
1915 {
1916 	struct pglist alloctail;
1917 	vm_offset_t addr, zkva;
1918 	int cpu, flags;
1919 	vm_page_t p, p_next;
1920 #ifdef NUMA
1921 	struct pcpu *pc;
1922 #endif
1923 
1924 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1925 
1926 	TAILQ_INIT(&alloctail);
1927 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1928 	    malloc2vm_flags(wait);
1929 	*pflag = UMA_SLAB_KERNEL;
1930 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1931 		if (CPU_ABSENT(cpu)) {
1932 			p = vm_page_alloc(NULL, 0, flags);
1933 		} else {
1934 #ifndef NUMA
1935 			p = vm_page_alloc(NULL, 0, flags);
1936 #else
1937 			pc = pcpu_find(cpu);
1938 			if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain)))
1939 				p = NULL;
1940 			else
1941 				p = vm_page_alloc_domain(NULL, 0,
1942 				    pc->pc_domain, flags);
1943 			if (__predict_false(p == NULL))
1944 				p = vm_page_alloc(NULL, 0, flags);
1945 #endif
1946 		}
1947 		if (__predict_false(p == NULL))
1948 			goto fail;
1949 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1950 	}
1951 	if ((addr = kva_alloc(bytes)) == 0)
1952 		goto fail;
1953 	zkva = addr;
1954 	TAILQ_FOREACH(p, &alloctail, listq) {
1955 		pmap_qenter(zkva, &p, 1);
1956 		zkva += PAGE_SIZE;
1957 	}
1958 	return ((void*)addr);
1959 fail:
1960 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1961 		vm_page_unwire_noq(p);
1962 		vm_page_free(p);
1963 	}
1964 	return (NULL);
1965 }
1966 
1967 /*
1968  * Allocates a number of pages from within an object
1969  *
1970  * Arguments:
1971  *	bytes  The number of bytes requested
1972  *	wait   Shall we wait?
1973  *
1974  * Returns:
1975  *	A pointer to the alloced memory or possibly
1976  *	NULL if M_NOWAIT is set.
1977  */
1978 static void *
1979 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1980     int wait)
1981 {
1982 	TAILQ_HEAD(, vm_page) alloctail;
1983 	u_long npages;
1984 	vm_offset_t retkva, zkva;
1985 	vm_page_t p, p_next;
1986 	uma_keg_t keg;
1987 
1988 	TAILQ_INIT(&alloctail);
1989 	keg = zone->uz_keg;
1990 
1991 	npages = howmany(bytes, PAGE_SIZE);
1992 	while (npages > 0) {
1993 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1994 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1995 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1996 		    VM_ALLOC_NOWAIT));
1997 		if (p != NULL) {
1998 			/*
1999 			 * Since the page does not belong to an object, its
2000 			 * listq is unused.
2001 			 */
2002 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
2003 			npages--;
2004 			continue;
2005 		}
2006 		/*
2007 		 * Page allocation failed, free intermediate pages and
2008 		 * exit.
2009 		 */
2010 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
2011 			vm_page_unwire_noq(p);
2012 			vm_page_free(p);
2013 		}
2014 		return (NULL);
2015 	}
2016 	*flags = UMA_SLAB_PRIV;
2017 	zkva = keg->uk_kva +
2018 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
2019 	retkva = zkva;
2020 	TAILQ_FOREACH(p, &alloctail, listq) {
2021 		pmap_qenter(zkva, &p, 1);
2022 		zkva += PAGE_SIZE;
2023 	}
2024 
2025 	return ((void *)retkva);
2026 }
2027 
2028 /*
2029  * Allocate physically contiguous pages.
2030  */
2031 static void *
2032 contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
2033     int wait)
2034 {
2035 
2036 	*pflag = UMA_SLAB_KERNEL;
2037 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
2038 	    bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
2039 }
2040 
2041 /*
2042  * Frees a number of pages to the system
2043  *
2044  * Arguments:
2045  *	mem   A pointer to the memory to be freed
2046  *	size  The size of the memory being freed
2047  *	flags The original p->us_flags field
2048  *
2049  * Returns:
2050  *	Nothing
2051  */
2052 static void
2053 page_free(void *mem, vm_size_t size, uint8_t flags)
2054 {
2055 
2056 	if ((flags & UMA_SLAB_BOOT) != 0) {
2057 		startup_free(mem, size);
2058 		return;
2059 	}
2060 
2061 	KASSERT((flags & UMA_SLAB_KERNEL) != 0,
2062 	    ("UMA: page_free used with invalid flags %x", flags));
2063 
2064 	kmem_free((vm_offset_t)mem, size);
2065 }
2066 
2067 /*
2068  * Frees pcpu zone allocations
2069  *
2070  * Arguments:
2071  *	mem   A pointer to the memory to be freed
2072  *	size  The size of the memory being freed
2073  *	flags The original p->us_flags field
2074  *
2075  * Returns:
2076  *	Nothing
2077  */
2078 static void
2079 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
2080 {
2081 	vm_offset_t sva, curva;
2082 	vm_paddr_t paddr;
2083 	vm_page_t m;
2084 
2085 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
2086 
2087 	if ((flags & UMA_SLAB_BOOT) != 0) {
2088 		startup_free(mem, size);
2089 		return;
2090 	}
2091 
2092 	sva = (vm_offset_t)mem;
2093 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
2094 		paddr = pmap_kextract(curva);
2095 		m = PHYS_TO_VM_PAGE(paddr);
2096 		vm_page_unwire_noq(m);
2097 		vm_page_free(m);
2098 	}
2099 	pmap_qremove(sva, size >> PAGE_SHIFT);
2100 	kva_free(sva, size);
2101 }
2102 
2103 /*
2104  * Zero fill initializer
2105  *
2106  * Arguments/Returns follow uma_init specifications
2107  */
2108 static int
2109 zero_init(void *mem, int size, int flags)
2110 {
2111 	bzero(mem, size);
2112 	return (0);
2113 }
2114 
2115 #ifdef INVARIANTS
2116 static struct noslabbits *
2117 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
2118 {
2119 
2120 	return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
2121 }
2122 #endif
2123 
2124 /*
2125  * Actual size of embedded struct slab (!OFFPAGE).
2126  */
2127 static size_t
2128 slab_sizeof(int nitems)
2129 {
2130 	size_t s;
2131 
2132 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
2133 	return (roundup(s, UMA_ALIGN_PTR + 1));
2134 }
2135 
2136 #define	UMA_FIXPT_SHIFT	31
2137 #define	UMA_FRAC_FIXPT(n, d)						\
2138 	((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
2139 #define	UMA_FIXPT_PCT(f)						\
2140 	((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
2141 #define	UMA_PCT_FIXPT(pct)	UMA_FRAC_FIXPT((pct), 100)
2142 #define	UMA_MIN_EFF	UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
2143 
2144 /*
2145  * Compute the number of items that will fit in a slab.  If hdr is true, the
2146  * item count may be limited to provide space in the slab for an inline slab
2147  * header.  Otherwise, all slab space will be provided for item storage.
2148  */
2149 static u_int
2150 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
2151 {
2152 	u_int ipers;
2153 	u_int padpi;
2154 
2155 	/* The padding between items is not needed after the last item. */
2156 	padpi = rsize - size;
2157 
2158 	if (hdr) {
2159 		/*
2160 		 * Start with the maximum item count and remove items until
2161 		 * the slab header first alongside the allocatable memory.
2162 		 */
2163 		for (ipers = MIN(SLAB_MAX_SETSIZE,
2164 		    (slabsize + padpi - slab_sizeof(1)) / rsize);
2165 		    ipers > 0 &&
2166 		    ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
2167 		    ipers--)
2168 			continue;
2169 	} else {
2170 		ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
2171 	}
2172 
2173 	return (ipers);
2174 }
2175 
2176 struct keg_layout_result {
2177 	u_int format;
2178 	u_int slabsize;
2179 	u_int ipers;
2180 	u_int eff;
2181 };
2182 
2183 static void
2184 keg_layout_one(uma_keg_t keg, u_int rsize, u_int slabsize, u_int fmt,
2185     struct keg_layout_result *kl)
2186 {
2187 	u_int total;
2188 
2189 	kl->format = fmt;
2190 	kl->slabsize = slabsize;
2191 
2192 	/* Handle INTERNAL as inline with an extra page. */
2193 	if ((fmt & UMA_ZFLAG_INTERNAL) != 0) {
2194 		kl->format &= ~UMA_ZFLAG_INTERNAL;
2195 		kl->slabsize += PAGE_SIZE;
2196 	}
2197 
2198 	kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize,
2199 	    (fmt & UMA_ZFLAG_OFFPAGE) == 0);
2200 
2201 	/* Account for memory used by an offpage slab header. */
2202 	total = kl->slabsize;
2203 	if ((fmt & UMA_ZFLAG_OFFPAGE) != 0)
2204 		total += slabzone(kl->ipers)->uz_keg->uk_rsize;
2205 
2206 	kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total);
2207 }
2208 
2209 /*
2210  * Determine the format of a uma keg.  This determines where the slab header
2211  * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
2212  *
2213  * Arguments
2214  *	keg  The zone we should initialize
2215  *
2216  * Returns
2217  *	Nothing
2218  */
2219 static void
2220 keg_layout(uma_keg_t keg)
2221 {
2222 	struct keg_layout_result kl = {}, kl_tmp;
2223 	u_int fmts[2];
2224 	u_int alignsize;
2225 	u_int nfmt;
2226 	u_int pages;
2227 	u_int rsize;
2228 	u_int slabsize;
2229 	u_int i, j;
2230 
2231 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
2232 	    (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
2233 	     (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
2234 	    ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
2235 	     __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
2236 	     PRINT_UMA_ZFLAGS));
2237 	KASSERT((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) == 0 ||
2238 	    (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
2239 	    ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
2240 	     PRINT_UMA_ZFLAGS));
2241 
2242 	alignsize = keg->uk_align + 1;
2243 #ifdef KASAN
2244 	/*
2245 	 * ASAN requires that each allocation be aligned to the shadow map
2246 	 * scale factor.
2247 	 */
2248 	if (alignsize < KASAN_SHADOW_SCALE)
2249 		alignsize = KASAN_SHADOW_SCALE;
2250 #endif
2251 
2252 	/*
2253 	 * Calculate the size of each allocation (rsize) according to
2254 	 * alignment.  If the requested size is smaller than we have
2255 	 * allocation bits for we round it up.
2256 	 */
2257 	rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT);
2258 	rsize = roundup2(rsize, alignsize);
2259 
2260 	if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
2261 		/*
2262 		 * We want one item to start on every align boundary in a page.
2263 		 * To do this we will span pages.  We will also extend the item
2264 		 * by the size of align if it is an even multiple of align.
2265 		 * Otherwise, it would fall on the same boundary every time.
2266 		 */
2267 		if ((rsize & alignsize) == 0)
2268 			rsize += alignsize;
2269 		slabsize = rsize * (PAGE_SIZE / alignsize);
2270 		slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
2271 		slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
2272 		slabsize = round_page(slabsize);
2273 	} else {
2274 		/*
2275 		 * Start with a slab size of as many pages as it takes to
2276 		 * represent a single item.  We will try to fit as many
2277 		 * additional items into the slab as possible.
2278 		 */
2279 		slabsize = round_page(keg->uk_size);
2280 	}
2281 
2282 	/* Build a list of all of the available formats for this keg. */
2283 	nfmt = 0;
2284 
2285 	/* Evaluate an inline slab layout. */
2286 	if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
2287 		fmts[nfmt++] = 0;
2288 
2289 	/* TODO: vm_page-embedded slab. */
2290 
2291 	/*
2292 	 * We can't do OFFPAGE if we're internal or if we've been
2293 	 * asked to not go to the VM for buckets.  If we do this we
2294 	 * may end up going to the VM for slabs which we do not want
2295 	 * to do if we're UMA_ZONE_VM, which clearly forbids it.
2296 	 * In those cases, evaluate a pseudo-format called INTERNAL
2297 	 * which has an inline slab header and one extra page to
2298 	 * guarantee that it fits.
2299 	 *
2300 	 * Otherwise, see if using an OFFPAGE slab will improve our
2301 	 * efficiency.
2302 	 */
2303 	if ((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) != 0)
2304 		fmts[nfmt++] = UMA_ZFLAG_INTERNAL;
2305 	else
2306 		fmts[nfmt++] = UMA_ZFLAG_OFFPAGE;
2307 
2308 	/*
2309 	 * Choose a slab size and format which satisfy the minimum efficiency.
2310 	 * Prefer the smallest slab size that meets the constraints.
2311 	 *
2312 	 * Start with a minimum slab size, to accommodate CACHESPREAD.  Then,
2313 	 * for small items (up to PAGE_SIZE), the iteration increment is one
2314 	 * page; and for large items, the increment is one item.
2315 	 */
2316 	i = (slabsize + rsize - keg->uk_size) / MAX(PAGE_SIZE, rsize);
2317 	KASSERT(i >= 1, ("keg %s(%p) flags=0x%b slabsize=%u, rsize=%u, i=%u",
2318 	    keg->uk_name, keg, keg->uk_flags, PRINT_UMA_ZFLAGS, slabsize,
2319 	    rsize, i));
2320 	for ( ; ; i++) {
2321 		slabsize = (rsize <= PAGE_SIZE) ? ptoa(i) :
2322 		    round_page(rsize * (i - 1) + keg->uk_size);
2323 
2324 		for (j = 0; j < nfmt; j++) {
2325 			/* Only if we have no viable format yet. */
2326 			if ((fmts[j] & UMA_ZFLAG_INTERNAL) != 0 &&
2327 			    kl.ipers > 0)
2328 				continue;
2329 
2330 			keg_layout_one(keg, rsize, slabsize, fmts[j], &kl_tmp);
2331 			if (kl_tmp.eff <= kl.eff)
2332 				continue;
2333 
2334 			kl = kl_tmp;
2335 
2336 			CTR6(KTR_UMA, "keg %s layout: format %#x "
2337 			    "(ipers %u * rsize %u) / slabsize %#x = %u%% eff",
2338 			    keg->uk_name, kl.format, kl.ipers, rsize,
2339 			    kl.slabsize, UMA_FIXPT_PCT(kl.eff));
2340 
2341 			/* Stop when we reach the minimum efficiency. */
2342 			if (kl.eff >= UMA_MIN_EFF)
2343 				break;
2344 		}
2345 
2346 		if (kl.eff >= UMA_MIN_EFF || !multipage_slabs ||
2347 		    slabsize >= SLAB_MAX_SETSIZE * rsize ||
2348 		    (keg->uk_flags & (UMA_ZONE_PCPU | UMA_ZONE_CONTIG)) != 0)
2349 			break;
2350 	}
2351 
2352 	pages = atop(kl.slabsize);
2353 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
2354 		pages *= mp_maxid + 1;
2355 
2356 	keg->uk_rsize = rsize;
2357 	keg->uk_ipers = kl.ipers;
2358 	keg->uk_ppera = pages;
2359 	keg->uk_flags |= kl.format;
2360 
2361 	/*
2362 	 * How do we find the slab header if it is offpage or if not all item
2363 	 * start addresses are in the same page?  We could solve the latter
2364 	 * case with vaddr alignment, but we don't.
2365 	 */
2366 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 ||
2367 	    (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) {
2368 		if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
2369 			keg->uk_flags |= UMA_ZFLAG_HASH;
2370 		else
2371 			keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2372 	}
2373 
2374 	CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u",
2375 	    __func__, keg->uk_name, keg->uk_flags, rsize, keg->uk_ipers,
2376 	    pages);
2377 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
2378 	    ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
2379 	     keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize,
2380 	     keg->uk_ipers, pages));
2381 }
2382 
2383 /*
2384  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
2385  * the keg onto the global keg list.
2386  *
2387  * Arguments/Returns follow uma_ctor specifications
2388  *	udata  Actually uma_kctor_args
2389  */
2390 static int
2391 keg_ctor(void *mem, int size, void *udata, int flags)
2392 {
2393 	struct uma_kctor_args *arg = udata;
2394 	uma_keg_t keg = mem;
2395 	uma_zone_t zone;
2396 	int i;
2397 
2398 	bzero(keg, size);
2399 	keg->uk_size = arg->size;
2400 	keg->uk_init = arg->uminit;
2401 	keg->uk_fini = arg->fini;
2402 	keg->uk_align = arg->align;
2403 	keg->uk_reserve = 0;
2404 	keg->uk_flags = arg->flags;
2405 
2406 	/*
2407 	 * We use a global round-robin policy by default.  Zones with
2408 	 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which
2409 	 * case the iterator is never run.
2410 	 */
2411 	keg->uk_dr.dr_policy = DOMAINSET_RR();
2412 	keg->uk_dr.dr_iter = 0;
2413 
2414 	/*
2415 	 * The primary zone is passed to us at keg-creation time.
2416 	 */
2417 	zone = arg->zone;
2418 	keg->uk_name = zone->uz_name;
2419 
2420 	if (arg->flags & UMA_ZONE_ZINIT)
2421 		keg->uk_init = zero_init;
2422 
2423 	if (arg->flags & UMA_ZONE_MALLOC)
2424 		keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2425 
2426 #ifndef SMP
2427 	keg->uk_flags &= ~UMA_ZONE_PCPU;
2428 #endif
2429 
2430 	keg_layout(keg);
2431 
2432 	/*
2433 	 * Use a first-touch NUMA policy for kegs that pmap_extract() will
2434 	 * work on.  Use round-robin for everything else.
2435 	 *
2436 	 * Zones may override the default by specifying either.
2437 	 */
2438 #ifdef NUMA
2439 	if ((keg->uk_flags &
2440 	    (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_CACHE | UMA_ZONE_NOTPAGE)) == 0)
2441 		keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
2442 	else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2443 		keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
2444 #endif
2445 
2446 	/*
2447 	 * If we haven't booted yet we need allocations to go through the
2448 	 * startup cache until the vm is ready.
2449 	 */
2450 #ifdef UMA_MD_SMALL_ALLOC
2451 	if (keg->uk_ppera == 1)
2452 		keg->uk_allocf = uma_small_alloc;
2453 	else
2454 #endif
2455 	if (booted < BOOT_KVA)
2456 		keg->uk_allocf = startup_alloc;
2457 	else if (keg->uk_flags & UMA_ZONE_PCPU)
2458 		keg->uk_allocf = pcpu_page_alloc;
2459 	else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && keg->uk_ppera > 1)
2460 		keg->uk_allocf = contig_alloc;
2461 	else
2462 		keg->uk_allocf = page_alloc;
2463 #ifdef UMA_MD_SMALL_ALLOC
2464 	if (keg->uk_ppera == 1)
2465 		keg->uk_freef = uma_small_free;
2466 	else
2467 #endif
2468 	if (keg->uk_flags & UMA_ZONE_PCPU)
2469 		keg->uk_freef = pcpu_page_free;
2470 	else
2471 		keg->uk_freef = page_free;
2472 
2473 	/*
2474 	 * Initialize keg's locks.
2475 	 */
2476 	for (i = 0; i < vm_ndomains; i++)
2477 		KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS));
2478 
2479 	/*
2480 	 * If we're putting the slab header in the actual page we need to
2481 	 * figure out where in each page it goes.  See slab_sizeof
2482 	 * definition.
2483 	 */
2484 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
2485 		size_t shsize;
2486 
2487 		shsize = slab_sizeof(keg->uk_ipers);
2488 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
2489 		/*
2490 		 * The only way the following is possible is if with our
2491 		 * UMA_ALIGN_PTR adjustments we are now bigger than
2492 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
2493 		 * mathematically possible for all cases, so we make
2494 		 * sure here anyway.
2495 		 */
2496 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
2497 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
2498 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
2499 	}
2500 
2501 	if (keg->uk_flags & UMA_ZFLAG_HASH)
2502 		hash_alloc(&keg->uk_hash, 0);
2503 
2504 	CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone);
2505 
2506 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
2507 
2508 	rw_wlock(&uma_rwlock);
2509 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
2510 	rw_wunlock(&uma_rwlock);
2511 	return (0);
2512 }
2513 
2514 static void
2515 zone_kva_available(uma_zone_t zone, void *unused)
2516 {
2517 	uma_keg_t keg;
2518 
2519 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
2520 		return;
2521 	KEG_GET(zone, keg);
2522 
2523 	if (keg->uk_allocf == startup_alloc) {
2524 		/* Switch to the real allocator. */
2525 		if (keg->uk_flags & UMA_ZONE_PCPU)
2526 			keg->uk_allocf = pcpu_page_alloc;
2527 		else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 &&
2528 		    keg->uk_ppera > 1)
2529 			keg->uk_allocf = contig_alloc;
2530 		else
2531 			keg->uk_allocf = page_alloc;
2532 	}
2533 }
2534 
2535 static void
2536 zone_alloc_counters(uma_zone_t zone, void *unused)
2537 {
2538 
2539 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
2540 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
2541 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
2542 	zone->uz_xdomain = counter_u64_alloc(M_WAITOK);
2543 }
2544 
2545 static void
2546 zone_alloc_sysctl(uma_zone_t zone, void *unused)
2547 {
2548 	uma_zone_domain_t zdom;
2549 	uma_domain_t dom;
2550 	uma_keg_t keg;
2551 	struct sysctl_oid *oid, *domainoid;
2552 	int domains, i, cnt;
2553 	static const char *nokeg = "cache zone";
2554 	char *c;
2555 
2556 	/*
2557 	 * Make a sysctl safe copy of the zone name by removing
2558 	 * any special characters and handling dups by appending
2559 	 * an index.
2560 	 */
2561 	if (zone->uz_namecnt != 0) {
2562 		/* Count the number of decimal digits and '_' separator. */
2563 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
2564 			cnt /= 10;
2565 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
2566 		    M_UMA, M_WAITOK);
2567 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
2568 		    zone->uz_namecnt);
2569 	} else
2570 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
2571 	for (c = zone->uz_ctlname; *c != '\0'; c++)
2572 		if (strchr("./\\ -", *c) != NULL)
2573 			*c = '_';
2574 
2575 	/*
2576 	 * Basic parameters at the root.
2577 	 */
2578 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
2579 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2580 	oid = zone->uz_oid;
2581 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2582 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
2583 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2584 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
2585 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
2586 	    "Allocator configuration flags");
2587 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2588 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
2589 	    "Desired per-cpu cache size");
2590 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2591 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
2592 	    "Maximum allowed per-cpu cache size");
2593 
2594 	/*
2595 	 * keg if present.
2596 	 */
2597 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
2598 		domains = vm_ndomains;
2599 	else
2600 		domains = 1;
2601 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2602 	    "keg", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2603 	keg = zone->uz_keg;
2604 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
2605 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2606 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
2607 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2608 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
2609 		    "Real object size with alignment");
2610 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2611 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
2612 		    "pages per-slab allocation");
2613 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2614 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
2615 		    "items available per-slab");
2616 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2617 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
2618 		    "item alignment mask");
2619 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2620 		    "reserve", CTLFLAG_RD, &keg->uk_reserve, 0,
2621 		    "number of reserved items");
2622 		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2623 		    "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2624 		    keg, 0, sysctl_handle_uma_slab_efficiency, "I",
2625 		    "Slab utilization (100 - internal fragmentation %)");
2626 		domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid),
2627 		    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2628 		for (i = 0; i < domains; i++) {
2629 			dom = &keg->uk_domain[i];
2630 			oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2631 			    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2632 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2633 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2634 			    "pages", CTLFLAG_RD, &dom->ud_pages, 0,
2635 			    "Total pages currently allocated from VM");
2636 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2637 			    "free_items", CTLFLAG_RD, &dom->ud_free_items, 0,
2638 			    "items free in the slab layer");
2639 		}
2640 	} else
2641 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2642 		    "name", CTLFLAG_RD, nokeg, "Keg name");
2643 
2644 	/*
2645 	 * Information about zone limits.
2646 	 */
2647 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2648 	    "limit", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2649 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2650 	    "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2651 	    zone, 0, sysctl_handle_uma_zone_items, "QU",
2652 	    "Current number of allocated items if limit is set");
2653 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2654 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
2655 	    "Maximum number of allocated and cached items");
2656 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2657 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
2658 	    "Number of threads sleeping at limit");
2659 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2660 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
2661 	    "Total zone limit sleeps");
2662 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2663 	    "bucket_max", CTLFLAG_RD, &zone->uz_bucket_max, 0,
2664 	    "Maximum number of items in each domain's bucket cache");
2665 
2666 	/*
2667 	 * Per-domain zone information.
2668 	 */
2669 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
2670 	    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2671 	for (i = 0; i < domains; i++) {
2672 		zdom = ZDOM_GET(zone, i);
2673 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2674 		    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2675 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2676 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2677 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
2678 		    "number of items in this domain");
2679 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2680 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
2681 		    "maximum item count in this period");
2682 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2683 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
2684 		    "minimum item count in this period");
2685 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2686 		    "bimin", CTLFLAG_RD, &zdom->uzd_bimin,
2687 		    "Minimum item count in this batch");
2688 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2689 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
2690 		    "Working set size");
2691 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2692 		    "limin", CTLFLAG_RD, &zdom->uzd_limin,
2693 		    "Long time minimum item count");
2694 		SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2695 		    "timin", CTLFLAG_RD, &zdom->uzd_timin, 0,
2696 		    "Time since zero long time minimum item count");
2697 	}
2698 
2699 	/*
2700 	 * General statistics.
2701 	 */
2702 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2703 	    "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2704 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2705 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2706 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
2707 	    "Current number of allocated items");
2708 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2709 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2710 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
2711 	    "Total allocation calls");
2712 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2713 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2714 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2715 	    "Total free calls");
2716 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2717 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2718 	    "Number of allocation failures");
2719 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2720 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain,
2721 	    "Free calls from the wrong domain");
2722 }
2723 
2724 struct uma_zone_count {
2725 	const char	*name;
2726 	int		count;
2727 };
2728 
2729 static void
2730 zone_count(uma_zone_t zone, void *arg)
2731 {
2732 	struct uma_zone_count *cnt;
2733 
2734 	cnt = arg;
2735 	/*
2736 	 * Some zones are rapidly created with identical names and
2737 	 * destroyed out of order.  This can lead to gaps in the count.
2738 	 * Use one greater than the maximum observed for this name.
2739 	 */
2740 	if (strcmp(zone->uz_name, cnt->name) == 0)
2741 		cnt->count = MAX(cnt->count,
2742 		    zone->uz_namecnt + 1);
2743 }
2744 
2745 static void
2746 zone_update_caches(uma_zone_t zone)
2747 {
2748 	int i;
2749 
2750 	for (i = 0; i <= mp_maxid; i++) {
2751 		cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size);
2752 		cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags);
2753 	}
2754 }
2755 
2756 /*
2757  * Zone header ctor.  This initializes all fields, locks, etc.
2758  *
2759  * Arguments/Returns follow uma_ctor specifications
2760  *	udata  Actually uma_zctor_args
2761  */
2762 static int
2763 zone_ctor(void *mem, int size, void *udata, int flags)
2764 {
2765 	struct uma_zone_count cnt;
2766 	struct uma_zctor_args *arg = udata;
2767 	uma_zone_domain_t zdom;
2768 	uma_zone_t zone = mem;
2769 	uma_zone_t z;
2770 	uma_keg_t keg;
2771 	int i;
2772 
2773 	bzero(zone, size);
2774 	zone->uz_name = arg->name;
2775 	zone->uz_ctor = arg->ctor;
2776 	zone->uz_dtor = arg->dtor;
2777 	zone->uz_init = NULL;
2778 	zone->uz_fini = NULL;
2779 	zone->uz_sleeps = 0;
2780 	zone->uz_bucket_size = 0;
2781 	zone->uz_bucket_size_min = 0;
2782 	zone->uz_bucket_size_max = BUCKET_MAX;
2783 	zone->uz_flags = (arg->flags & UMA_ZONE_SMR);
2784 	zone->uz_warning = NULL;
2785 	/* The domain structures follow the cpu structures. */
2786 	zone->uz_bucket_max = ULONG_MAX;
2787 	timevalclear(&zone->uz_ratecheck);
2788 
2789 	/* Count the number of duplicate names. */
2790 	cnt.name = arg->name;
2791 	cnt.count = 0;
2792 	zone_foreach(zone_count, &cnt);
2793 	zone->uz_namecnt = cnt.count;
2794 	ZONE_CROSS_LOCK_INIT(zone);
2795 
2796 	for (i = 0; i < vm_ndomains; i++) {
2797 		zdom = ZDOM_GET(zone, i);
2798 		ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS));
2799 		STAILQ_INIT(&zdom->uzd_buckets);
2800 	}
2801 
2802 #if defined(INVARIANTS) && !defined(KASAN)
2803 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2804 		zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
2805 #elif defined(KASAN)
2806 	if ((arg->flags & (UMA_ZONE_NOFREE | UMA_ZFLAG_CACHE)) != 0)
2807 		arg->flags |= UMA_ZONE_NOKASAN;
2808 #endif
2809 
2810 	/*
2811 	 * This is a pure cache zone, no kegs.
2812 	 */
2813 	if (arg->import) {
2814 		KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0,
2815 		    ("zone_ctor: Import specified for non-cache zone."));
2816 		zone->uz_flags = arg->flags;
2817 		zone->uz_size = arg->size;
2818 		zone->uz_import = arg->import;
2819 		zone->uz_release = arg->release;
2820 		zone->uz_arg = arg->arg;
2821 #ifdef NUMA
2822 		/*
2823 		 * Cache zones are round-robin unless a policy is
2824 		 * specified because they may have incompatible
2825 		 * constraints.
2826 		 */
2827 		if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2828 			zone->uz_flags |= UMA_ZONE_ROUNDROBIN;
2829 #endif
2830 		rw_wlock(&uma_rwlock);
2831 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2832 		rw_wunlock(&uma_rwlock);
2833 		goto out;
2834 	}
2835 
2836 	/*
2837 	 * Use the regular zone/keg/slab allocator.
2838 	 */
2839 	zone->uz_import = zone_import;
2840 	zone->uz_release = zone_release;
2841 	zone->uz_arg = zone;
2842 	keg = arg->keg;
2843 
2844 	if (arg->flags & UMA_ZONE_SECONDARY) {
2845 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2846 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2847 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2848 		zone->uz_init = arg->uminit;
2849 		zone->uz_fini = arg->fini;
2850 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2851 		rw_wlock(&uma_rwlock);
2852 		ZONE_LOCK(zone);
2853 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2854 			if (LIST_NEXT(z, uz_link) == NULL) {
2855 				LIST_INSERT_AFTER(z, zone, uz_link);
2856 				break;
2857 			}
2858 		}
2859 		ZONE_UNLOCK(zone);
2860 		rw_wunlock(&uma_rwlock);
2861 	} else if (keg == NULL) {
2862 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2863 		    arg->align, arg->flags)) == NULL)
2864 			return (ENOMEM);
2865 	} else {
2866 		struct uma_kctor_args karg;
2867 		int error;
2868 
2869 		/* We should only be here from uma_startup() */
2870 		karg.size = arg->size;
2871 		karg.uminit = arg->uminit;
2872 		karg.fini = arg->fini;
2873 		karg.align = arg->align;
2874 		karg.flags = (arg->flags & ~UMA_ZONE_SMR);
2875 		karg.zone = zone;
2876 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2877 		    flags);
2878 		if (error)
2879 			return (error);
2880 	}
2881 
2882 	/* Inherit properties from the keg. */
2883 	zone->uz_keg = keg;
2884 	zone->uz_size = keg->uk_size;
2885 	zone->uz_flags |= (keg->uk_flags &
2886 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2887 
2888 out:
2889 	if (booted >= BOOT_PCPU) {
2890 		zone_alloc_counters(zone, NULL);
2891 		if (booted >= BOOT_RUNNING)
2892 			zone_alloc_sysctl(zone, NULL);
2893 	} else {
2894 		zone->uz_allocs = EARLY_COUNTER;
2895 		zone->uz_frees = EARLY_COUNTER;
2896 		zone->uz_fails = EARLY_COUNTER;
2897 	}
2898 
2899 	/* Caller requests a private SMR context. */
2900 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
2901 		zone->uz_smr = smr_create(zone->uz_name, 0, 0);
2902 
2903 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2904 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2905 	    ("Invalid zone flag combination"));
2906 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2907 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2908 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2909 		zone->uz_bucket_size = BUCKET_MAX;
2910 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2911 		zone->uz_bucket_size = 0;
2912 	else
2913 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2914 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2915 	if (zone->uz_dtor != NULL || zone->uz_ctor != NULL)
2916 		zone->uz_flags |= UMA_ZFLAG_CTORDTOR;
2917 	zone_update_caches(zone);
2918 
2919 	return (0);
2920 }
2921 
2922 /*
2923  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2924  * table and removes the keg from the global list.
2925  *
2926  * Arguments/Returns follow uma_dtor specifications
2927  *	udata  unused
2928  */
2929 static void
2930 keg_dtor(void *arg, int size, void *udata)
2931 {
2932 	uma_keg_t keg;
2933 	uint32_t free, pages;
2934 	int i;
2935 
2936 	keg = (uma_keg_t)arg;
2937 	free = pages = 0;
2938 	for (i = 0; i < vm_ndomains; i++) {
2939 		free += keg->uk_domain[i].ud_free_items;
2940 		pages += keg->uk_domain[i].ud_pages;
2941 		KEG_LOCK_FINI(keg, i);
2942 	}
2943 	if (pages != 0)
2944 		printf("Freed UMA keg (%s) was not empty (%u items). "
2945 		    " Lost %u pages of memory.\n",
2946 		    keg->uk_name ? keg->uk_name : "",
2947 		    pages / keg->uk_ppera * keg->uk_ipers - free, pages);
2948 
2949 	hash_free(&keg->uk_hash);
2950 }
2951 
2952 /*
2953  * Zone header dtor.
2954  *
2955  * Arguments/Returns follow uma_dtor specifications
2956  *	udata  unused
2957  */
2958 static void
2959 zone_dtor(void *arg, int size, void *udata)
2960 {
2961 	uma_zone_t zone;
2962 	uma_keg_t keg;
2963 	int i;
2964 
2965 	zone = (uma_zone_t)arg;
2966 
2967 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2968 
2969 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2970 		cache_drain(zone);
2971 
2972 	rw_wlock(&uma_rwlock);
2973 	LIST_REMOVE(zone, uz_link);
2974 	rw_wunlock(&uma_rwlock);
2975 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2976 		keg = zone->uz_keg;
2977 		keg->uk_reserve = 0;
2978 	}
2979 	zone_reclaim(zone, UMA_ANYDOMAIN, M_WAITOK, true);
2980 
2981 	/*
2982 	 * We only destroy kegs from non secondary/non cache zones.
2983 	 */
2984 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2985 		keg = zone->uz_keg;
2986 		rw_wlock(&uma_rwlock);
2987 		LIST_REMOVE(keg, uk_link);
2988 		rw_wunlock(&uma_rwlock);
2989 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2990 	}
2991 	counter_u64_free(zone->uz_allocs);
2992 	counter_u64_free(zone->uz_frees);
2993 	counter_u64_free(zone->uz_fails);
2994 	counter_u64_free(zone->uz_xdomain);
2995 	free(zone->uz_ctlname, M_UMA);
2996 	for (i = 0; i < vm_ndomains; i++)
2997 		ZDOM_LOCK_FINI(ZDOM_GET(zone, i));
2998 	ZONE_CROSS_LOCK_FINI(zone);
2999 }
3000 
3001 static void
3002 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg)
3003 {
3004 	uma_keg_t keg;
3005 	uma_zone_t zone;
3006 
3007 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
3008 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
3009 			zfunc(zone, arg);
3010 	}
3011 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
3012 		zfunc(zone, arg);
3013 }
3014 
3015 /*
3016  * Traverses every zone in the system and calls a callback
3017  *
3018  * Arguments:
3019  *	zfunc  A pointer to a function which accepts a zone
3020  *		as an argument.
3021  *
3022  * Returns:
3023  *	Nothing
3024  */
3025 static void
3026 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
3027 {
3028 
3029 	rw_rlock(&uma_rwlock);
3030 	zone_foreach_unlocked(zfunc, arg);
3031 	rw_runlock(&uma_rwlock);
3032 }
3033 
3034 /*
3035  * Initialize the kernel memory allocator.  This is done after pages can be
3036  * allocated but before general KVA is available.
3037  */
3038 void
3039 uma_startup1(vm_offset_t virtual_avail)
3040 {
3041 	struct uma_zctor_args args;
3042 	size_t ksize, zsize, size;
3043 	uma_keg_t primarykeg;
3044 	uintptr_t m;
3045 	int domain;
3046 	uint8_t pflag;
3047 
3048 	bootstart = bootmem = virtual_avail;
3049 
3050 	rw_init(&uma_rwlock, "UMA lock");
3051 	sx_init(&uma_reclaim_lock, "umareclaim");
3052 
3053 	ksize = sizeof(struct uma_keg) +
3054 	    (sizeof(struct uma_domain) * vm_ndomains);
3055 	ksize = roundup(ksize, UMA_SUPER_ALIGN);
3056 	zsize = sizeof(struct uma_zone) +
3057 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
3058 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
3059 	zsize = roundup(zsize, UMA_SUPER_ALIGN);
3060 
3061 	/* Allocate the zone of zones, zone of kegs, and zone of zones keg. */
3062 	size = (zsize * 2) + ksize;
3063 	for (domain = 0; domain < vm_ndomains; domain++) {
3064 		m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag,
3065 		    M_NOWAIT | M_ZERO);
3066 		if (m != 0)
3067 			break;
3068 	}
3069 	zones = (uma_zone_t)m;
3070 	m += zsize;
3071 	kegs = (uma_zone_t)m;
3072 	m += zsize;
3073 	primarykeg = (uma_keg_t)m;
3074 
3075 	/* "manually" create the initial zone */
3076 	memset(&args, 0, sizeof(args));
3077 	args.name = "UMA Kegs";
3078 	args.size = ksize;
3079 	args.ctor = keg_ctor;
3080 	args.dtor = keg_dtor;
3081 	args.uminit = zero_init;
3082 	args.fini = NULL;
3083 	args.keg = primarykeg;
3084 	args.align = UMA_SUPER_ALIGN - 1;
3085 	args.flags = UMA_ZFLAG_INTERNAL;
3086 	zone_ctor(kegs, zsize, &args, M_WAITOK);
3087 
3088 	args.name = "UMA Zones";
3089 	args.size = zsize;
3090 	args.ctor = zone_ctor;
3091 	args.dtor = zone_dtor;
3092 	args.uminit = zero_init;
3093 	args.fini = NULL;
3094 	args.keg = NULL;
3095 	args.align = UMA_SUPER_ALIGN - 1;
3096 	args.flags = UMA_ZFLAG_INTERNAL;
3097 	zone_ctor(zones, zsize, &args, M_WAITOK);
3098 
3099 	/* Now make zones for slab headers */
3100 	slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE,
3101 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3102 	slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE,
3103 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3104 
3105 	hashzone = uma_zcreate("UMA Hash",
3106 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
3107 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3108 
3109 	bucket_init();
3110 	smr_init();
3111 }
3112 
3113 #ifndef UMA_MD_SMALL_ALLOC
3114 extern void vm_radix_reserve_kva(void);
3115 #endif
3116 
3117 /*
3118  * Advertise the availability of normal kva allocations and switch to
3119  * the default back-end allocator.  Marks the KVA we consumed on startup
3120  * as used in the map.
3121  */
3122 void
3123 uma_startup2(void)
3124 {
3125 
3126 	if (bootstart != bootmem) {
3127 		vm_map_lock(kernel_map);
3128 		(void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem,
3129 		    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
3130 		vm_map_unlock(kernel_map);
3131 	}
3132 
3133 #ifndef UMA_MD_SMALL_ALLOC
3134 	/* Set up radix zone to use noobj_alloc. */
3135 	vm_radix_reserve_kva();
3136 #endif
3137 
3138 	booted = BOOT_KVA;
3139 	zone_foreach_unlocked(zone_kva_available, NULL);
3140 	bucket_enable();
3141 }
3142 
3143 /*
3144  * Allocate counters as early as possible so that boot-time allocations are
3145  * accounted more precisely.
3146  */
3147 static void
3148 uma_startup_pcpu(void *arg __unused)
3149 {
3150 
3151 	zone_foreach_unlocked(zone_alloc_counters, NULL);
3152 	booted = BOOT_PCPU;
3153 }
3154 SYSINIT(uma_startup_pcpu, SI_SUB_COUNTER, SI_ORDER_ANY, uma_startup_pcpu, NULL);
3155 
3156 /*
3157  * Finish our initialization steps.
3158  */
3159 static void
3160 uma_startup3(void *arg __unused)
3161 {
3162 
3163 #ifdef INVARIANTS
3164 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
3165 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
3166 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
3167 #endif
3168 	zone_foreach_unlocked(zone_alloc_sysctl, NULL);
3169 	callout_init(&uma_callout, 1);
3170 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
3171 	booted = BOOT_RUNNING;
3172 
3173 	EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
3174 	    EVENTHANDLER_PRI_FIRST);
3175 }
3176 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
3177 
3178 static void
3179 uma_shutdown(void)
3180 {
3181 
3182 	booted = BOOT_SHUTDOWN;
3183 }
3184 
3185 static uma_keg_t
3186 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
3187 		int align, uint32_t flags)
3188 {
3189 	struct uma_kctor_args args;
3190 
3191 	args.size = size;
3192 	args.uminit = uminit;
3193 	args.fini = fini;
3194 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
3195 	args.flags = flags;
3196 	args.zone = zone;
3197 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
3198 }
3199 
3200 /* Public functions */
3201 /* See uma.h */
3202 void
3203 uma_set_align(int align)
3204 {
3205 
3206 	if (align != UMA_ALIGN_CACHE)
3207 		uma_align_cache = align;
3208 }
3209 
3210 /* See uma.h */
3211 uma_zone_t
3212 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
3213 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
3214 
3215 {
3216 	struct uma_zctor_args args;
3217 	uma_zone_t res;
3218 
3219 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
3220 	    align, name));
3221 
3222 	/* This stuff is essential for the zone ctor */
3223 	memset(&args, 0, sizeof(args));
3224 	args.name = name;
3225 	args.size = size;
3226 	args.ctor = ctor;
3227 	args.dtor = dtor;
3228 	args.uminit = uminit;
3229 	args.fini = fini;
3230 #if defined(INVARIANTS) && !defined(KASAN)
3231 	/*
3232 	 * Inject procedures which check for memory use after free if we are
3233 	 * allowed to scramble the memory while it is not allocated.  This
3234 	 * requires that: UMA is actually able to access the memory, no init
3235 	 * or fini procedures, no dependency on the initial value of the
3236 	 * memory, and no (legitimate) use of the memory after free.  Note,
3237 	 * the ctor and dtor do not need to be empty.
3238 	 */
3239 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
3240 	    UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
3241 		args.uminit = trash_init;
3242 		args.fini = trash_fini;
3243 	}
3244 #endif
3245 	args.align = align;
3246 	args.flags = flags;
3247 	args.keg = NULL;
3248 
3249 	sx_xlock(&uma_reclaim_lock);
3250 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3251 	sx_xunlock(&uma_reclaim_lock);
3252 
3253 	return (res);
3254 }
3255 
3256 /* See uma.h */
3257 uma_zone_t
3258 uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor,
3259     uma_init zinit, uma_fini zfini, uma_zone_t primary)
3260 {
3261 	struct uma_zctor_args args;
3262 	uma_keg_t keg;
3263 	uma_zone_t res;
3264 
3265 	keg = primary->uz_keg;
3266 	memset(&args, 0, sizeof(args));
3267 	args.name = name;
3268 	args.size = keg->uk_size;
3269 	args.ctor = ctor;
3270 	args.dtor = dtor;
3271 	args.uminit = zinit;
3272 	args.fini = zfini;
3273 	args.align = keg->uk_align;
3274 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
3275 	args.keg = keg;
3276 
3277 	sx_xlock(&uma_reclaim_lock);
3278 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3279 	sx_xunlock(&uma_reclaim_lock);
3280 
3281 	return (res);
3282 }
3283 
3284 /* See uma.h */
3285 uma_zone_t
3286 uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor,
3287     uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease,
3288     void *arg, int flags)
3289 {
3290 	struct uma_zctor_args args;
3291 
3292 	memset(&args, 0, sizeof(args));
3293 	args.name = name;
3294 	args.size = size;
3295 	args.ctor = ctor;
3296 	args.dtor = dtor;
3297 	args.uminit = zinit;
3298 	args.fini = zfini;
3299 	args.import = zimport;
3300 	args.release = zrelease;
3301 	args.arg = arg;
3302 	args.align = 0;
3303 	args.flags = flags | UMA_ZFLAG_CACHE;
3304 
3305 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
3306 }
3307 
3308 /* See uma.h */
3309 void
3310 uma_zdestroy(uma_zone_t zone)
3311 {
3312 
3313 	/*
3314 	 * Large slabs are expensive to reclaim, so don't bother doing
3315 	 * unnecessary work if we're shutting down.
3316 	 */
3317 	if (booted == BOOT_SHUTDOWN &&
3318 	    zone->uz_fini == NULL && zone->uz_release == zone_release)
3319 		return;
3320 	sx_xlock(&uma_reclaim_lock);
3321 	zone_free_item(zones, zone, NULL, SKIP_NONE);
3322 	sx_xunlock(&uma_reclaim_lock);
3323 }
3324 
3325 void
3326 uma_zwait(uma_zone_t zone)
3327 {
3328 
3329 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
3330 		uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK));
3331 	else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0)
3332 		uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK));
3333 	else
3334 		uma_zfree(zone, uma_zalloc(zone, M_WAITOK));
3335 }
3336 
3337 void *
3338 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
3339 {
3340 	void *item, *pcpu_item;
3341 #ifdef SMP
3342 	int i;
3343 
3344 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3345 #endif
3346 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
3347 	if (item == NULL)
3348 		return (NULL);
3349 	pcpu_item = zpcpu_base_to_offset(item);
3350 	if (flags & M_ZERO) {
3351 #ifdef SMP
3352 		for (i = 0; i <= mp_maxid; i++)
3353 			bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size);
3354 #else
3355 		bzero(item, zone->uz_size);
3356 #endif
3357 	}
3358 	return (pcpu_item);
3359 }
3360 
3361 /*
3362  * A stub while both regular and pcpu cases are identical.
3363  */
3364 void
3365 uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata)
3366 {
3367 	void *item;
3368 
3369 #ifdef SMP
3370 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3371 #endif
3372 
3373         /* uma_zfree_pcu_*(..., NULL) does nothing, to match free(9). */
3374         if (pcpu_item == NULL)
3375                 return;
3376 
3377 	item = zpcpu_offset_to_base(pcpu_item);
3378 	uma_zfree_arg(zone, item, udata);
3379 }
3380 
3381 static inline void *
3382 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags,
3383     void *item)
3384 {
3385 #ifdef INVARIANTS
3386 	bool skipdbg;
3387 #endif
3388 
3389 	kasan_mark_item_valid(zone, item);
3390 
3391 #ifdef INVARIANTS
3392 	skipdbg = uma_dbg_zskip(zone, item);
3393 	if (!skipdbg && (uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3394 	    zone->uz_ctor != trash_ctor)
3395 		trash_ctor(item, size, udata, flags);
3396 #endif
3397 
3398 	/* Check flags before loading ctor pointer. */
3399 	if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) &&
3400 	    __predict_false(zone->uz_ctor != NULL) &&
3401 	    zone->uz_ctor(item, size, udata, flags) != 0) {
3402 		counter_u64_add(zone->uz_fails, 1);
3403 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
3404 		return (NULL);
3405 	}
3406 #ifdef INVARIANTS
3407 	if (!skipdbg)
3408 		uma_dbg_alloc(zone, NULL, item);
3409 #endif
3410 	if (__predict_false(flags & M_ZERO))
3411 		return (memset(item, 0, size));
3412 
3413 	return (item);
3414 }
3415 
3416 static inline void
3417 item_dtor(uma_zone_t zone, void *item, int size, void *udata,
3418     enum zfreeskip skip)
3419 {
3420 #ifdef INVARIANTS
3421 	bool skipdbg;
3422 
3423 	skipdbg = uma_dbg_zskip(zone, item);
3424 	if (skip == SKIP_NONE && !skipdbg) {
3425 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
3426 			uma_dbg_free(zone, udata, item);
3427 		else
3428 			uma_dbg_free(zone, NULL, item);
3429 	}
3430 #endif
3431 	if (__predict_true(skip < SKIP_DTOR)) {
3432 		if (zone->uz_dtor != NULL)
3433 			zone->uz_dtor(item, size, udata);
3434 #ifdef INVARIANTS
3435 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3436 		    zone->uz_dtor != trash_dtor)
3437 			trash_dtor(item, size, udata);
3438 #endif
3439 	}
3440 	kasan_mark_item_invalid(zone, item);
3441 }
3442 
3443 #ifdef NUMA
3444 static int
3445 item_domain(void *item)
3446 {
3447 	int domain;
3448 
3449 	domain = vm_phys_domain(vtophys(item));
3450 	KASSERT(domain >= 0 && domain < vm_ndomains,
3451 	    ("%s: unknown domain for item %p", __func__, item));
3452 	return (domain);
3453 }
3454 #endif
3455 
3456 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS)
3457 #define	UMA_ZALLOC_DEBUG
3458 static int
3459 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags)
3460 {
3461 	int error;
3462 
3463 	error = 0;
3464 #ifdef WITNESS
3465 	if (flags & M_WAITOK) {
3466 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3467 		    "uma_zalloc_debug: zone \"%s\"", zone->uz_name);
3468 	}
3469 #endif
3470 
3471 #ifdef INVARIANTS
3472 	KASSERT((flags & M_EXEC) == 0,
3473 	    ("uma_zalloc_debug: called with M_EXEC"));
3474 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3475 	    ("uma_zalloc_debug: called within spinlock or critical section"));
3476 	KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0,
3477 	    ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO"));
3478 #endif
3479 
3480 #ifdef DEBUG_MEMGUARD
3481 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) {
3482 		void *item;
3483 		item = memguard_alloc(zone->uz_size, flags);
3484 		if (item != NULL) {
3485 			error = EJUSTRETURN;
3486 			if (zone->uz_init != NULL &&
3487 			    zone->uz_init(item, zone->uz_size, flags) != 0) {
3488 				*itemp = NULL;
3489 				return (error);
3490 			}
3491 			if (zone->uz_ctor != NULL &&
3492 			    zone->uz_ctor(item, zone->uz_size, udata,
3493 			    flags) != 0) {
3494 				counter_u64_add(zone->uz_fails, 1);
3495 			    	zone->uz_fini(item, zone->uz_size);
3496 				*itemp = NULL;
3497 				return (error);
3498 			}
3499 			*itemp = item;
3500 			return (error);
3501 		}
3502 		/* This is unfortunate but should not be fatal. */
3503 	}
3504 #endif
3505 	return (error);
3506 }
3507 
3508 static int
3509 uma_zfree_debug(uma_zone_t zone, void *item, void *udata)
3510 {
3511 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3512 	    ("uma_zfree_debug: called with spinlock or critical section held"));
3513 
3514 #ifdef DEBUG_MEMGUARD
3515 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) {
3516 		if (zone->uz_dtor != NULL)
3517 			zone->uz_dtor(item, zone->uz_size, udata);
3518 		if (zone->uz_fini != NULL)
3519 			zone->uz_fini(item, zone->uz_size);
3520 		memguard_free(item);
3521 		return (EJUSTRETURN);
3522 	}
3523 #endif
3524 	return (0);
3525 }
3526 #endif
3527 
3528 static inline void *
3529 cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket,
3530     void *udata, int flags)
3531 {
3532 	void *item;
3533 	int size, uz_flags;
3534 
3535 	item = cache_bucket_pop(cache, bucket);
3536 	size = cache_uz_size(cache);
3537 	uz_flags = cache_uz_flags(cache);
3538 	critical_exit();
3539 	return (item_ctor(zone, uz_flags, size, udata, flags, item));
3540 }
3541 
3542 static __noinline void *
3543 cache_alloc_retry(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3544 {
3545 	uma_cache_bucket_t bucket;
3546 	int domain;
3547 
3548 	while (cache_alloc(zone, cache, udata, flags)) {
3549 		cache = &zone->uz_cpu[curcpu];
3550 		bucket = &cache->uc_allocbucket;
3551 		if (__predict_false(bucket->ucb_cnt == 0))
3552 			continue;
3553 		return (cache_alloc_item(zone, cache, bucket, udata, flags));
3554 	}
3555 	critical_exit();
3556 
3557 	/*
3558 	 * We can not get a bucket so try to return a single item.
3559 	 */
3560 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
3561 		domain = PCPU_GET(domain);
3562 	else
3563 		domain = UMA_ANYDOMAIN;
3564 	return (zone_alloc_item(zone, udata, domain, flags));
3565 }
3566 
3567 /* See uma.h */
3568 void *
3569 uma_zalloc_smr(uma_zone_t zone, int flags)
3570 {
3571 	uma_cache_bucket_t bucket;
3572 	uma_cache_t cache;
3573 
3574 #ifdef UMA_ZALLOC_DEBUG
3575 	void *item;
3576 
3577 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
3578 	    ("uma_zalloc_arg: called with non-SMR zone."));
3579 	if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN)
3580 		return (item);
3581 #endif
3582 
3583 	critical_enter();
3584 	cache = &zone->uz_cpu[curcpu];
3585 	bucket = &cache->uc_allocbucket;
3586 	if (__predict_false(bucket->ucb_cnt == 0))
3587 		return (cache_alloc_retry(zone, cache, NULL, flags));
3588 	return (cache_alloc_item(zone, cache, bucket, NULL, flags));
3589 }
3590 
3591 /* See uma.h */
3592 void *
3593 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
3594 {
3595 	uma_cache_bucket_t bucket;
3596 	uma_cache_t cache;
3597 
3598 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3599 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3600 
3601 	/* This is the fast path allocation */
3602 	CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name,
3603 	    zone, flags);
3604 
3605 #ifdef UMA_ZALLOC_DEBUG
3606 	void *item;
3607 
3608 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3609 	    ("uma_zalloc_arg: called with SMR zone."));
3610 	if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN)
3611 		return (item);
3612 #endif
3613 
3614 	/*
3615 	 * If possible, allocate from the per-CPU cache.  There are two
3616 	 * requirements for safe access to the per-CPU cache: (1) the thread
3617 	 * accessing the cache must not be preempted or yield during access,
3618 	 * and (2) the thread must not migrate CPUs without switching which
3619 	 * cache it accesses.  We rely on a critical section to prevent
3620 	 * preemption and migration.  We release the critical section in
3621 	 * order to acquire the zone mutex if we are unable to allocate from
3622 	 * the current cache; when we re-acquire the critical section, we
3623 	 * must detect and handle migration if it has occurred.
3624 	 */
3625 	critical_enter();
3626 	cache = &zone->uz_cpu[curcpu];
3627 	bucket = &cache->uc_allocbucket;
3628 	if (__predict_false(bucket->ucb_cnt == 0))
3629 		return (cache_alloc_retry(zone, cache, udata, flags));
3630 	return (cache_alloc_item(zone, cache, bucket, udata, flags));
3631 }
3632 
3633 /*
3634  * Replenish an alloc bucket and possibly restore an old one.  Called in
3635  * a critical section.  Returns in a critical section.
3636  *
3637  * A false return value indicates an allocation failure.
3638  * A true return value indicates success and the caller should retry.
3639  */
3640 static __noinline bool
3641 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3642 {
3643 	uma_bucket_t bucket;
3644 	int curdomain, domain;
3645 	bool new;
3646 
3647 	CRITICAL_ASSERT(curthread);
3648 
3649 	/*
3650 	 * If we have run out of items in our alloc bucket see
3651 	 * if we can switch with the free bucket.
3652 	 *
3653 	 * SMR Zones can't re-use the free bucket until the sequence has
3654 	 * expired.
3655 	 */
3656 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 &&
3657 	    cache->uc_freebucket.ucb_cnt != 0) {
3658 		cache_bucket_swap(&cache->uc_freebucket,
3659 		    &cache->uc_allocbucket);
3660 		return (true);
3661 	}
3662 
3663 	/*
3664 	 * Discard any empty allocation bucket while we hold no locks.
3665 	 */
3666 	bucket = cache_bucket_unload_alloc(cache);
3667 	critical_exit();
3668 
3669 	if (bucket != NULL) {
3670 		KASSERT(bucket->ub_cnt == 0,
3671 		    ("cache_alloc: Entered with non-empty alloc bucket."));
3672 		bucket_free(zone, bucket, udata);
3673 	}
3674 
3675 	/*
3676 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
3677 	 * we must go back to the zone.  This requires the zdom lock, so we
3678 	 * must drop the critical section, then re-acquire it when we go back
3679 	 * to the cache.  Since the critical section is released, we may be
3680 	 * preempted or migrate.  As such, make sure not to maintain any
3681 	 * thread-local state specific to the cache from prior to releasing
3682 	 * the critical section.
3683 	 */
3684 	domain = PCPU_GET(domain);
3685 	if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0 ||
3686 	    VM_DOMAIN_EMPTY(domain))
3687 		domain = zone_domain_highest(zone, domain);
3688 	bucket = cache_fetch_bucket(zone, cache, domain);
3689 	if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) {
3690 		bucket = zone_alloc_bucket(zone, udata, domain, flags);
3691 		new = true;
3692 	} else {
3693 		new = false;
3694 	}
3695 
3696 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3697 	    zone->uz_name, zone, bucket);
3698 	if (bucket == NULL) {
3699 		critical_enter();
3700 		return (false);
3701 	}
3702 
3703 	/*
3704 	 * See if we lost the race or were migrated.  Cache the
3705 	 * initialized bucket to make this less likely or claim
3706 	 * the memory directly.
3707 	 */
3708 	critical_enter();
3709 	cache = &zone->uz_cpu[curcpu];
3710 	if (cache->uc_allocbucket.ucb_bucket == NULL &&
3711 	    ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 ||
3712 	    (curdomain = PCPU_GET(domain)) == domain ||
3713 	    VM_DOMAIN_EMPTY(curdomain))) {
3714 		if (new)
3715 			atomic_add_long(&ZDOM_GET(zone, domain)->uzd_imax,
3716 			    bucket->ub_cnt);
3717 		cache_bucket_load_alloc(cache, bucket);
3718 		return (true);
3719 	}
3720 
3721 	/*
3722 	 * We lost the race, release this bucket and start over.
3723 	 */
3724 	critical_exit();
3725 	zone_put_bucket(zone, domain, bucket, udata, !new);
3726 	critical_enter();
3727 
3728 	return (true);
3729 }
3730 
3731 void *
3732 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
3733 {
3734 #ifdef NUMA
3735 	uma_bucket_t bucket;
3736 	uma_zone_domain_t zdom;
3737 	void *item;
3738 #endif
3739 
3740 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3741 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3742 
3743 	/* This is the fast path allocation */
3744 	CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d",
3745 	    zone->uz_name, zone, domain, flags);
3746 
3747 	if (flags & M_WAITOK) {
3748 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3749 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
3750 	}
3751 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3752 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
3753 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3754 	    ("uma_zalloc_domain: called with SMR zone."));
3755 #ifdef NUMA
3756 	KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0,
3757 	    ("uma_zalloc_domain: called with non-FIRSTTOUCH zone."));
3758 
3759 	if (vm_ndomains == 1)
3760 		return (uma_zalloc_arg(zone, udata, flags));
3761 
3762 	/*
3763 	 * Try to allocate from the bucket cache before falling back to the keg.
3764 	 * We could try harder and attempt to allocate from per-CPU caches or
3765 	 * the per-domain cross-domain buckets, but the complexity is probably
3766 	 * not worth it.  It is more important that frees of previous
3767 	 * cross-domain allocations do not blow up the cache.
3768 	 */
3769 	zdom = zone_domain_lock(zone, domain);
3770 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
3771 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
3772 #ifdef INVARIANTS
3773 		bucket->ub_bucket[bucket->ub_cnt - 1] = NULL;
3774 #endif
3775 		bucket->ub_cnt--;
3776 		zone_put_bucket(zone, domain, bucket, udata, true);
3777 		item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata,
3778 		    flags, item);
3779 		if (item != NULL) {
3780 			KASSERT(item_domain(item) == domain,
3781 			    ("%s: bucket cache item %p from wrong domain",
3782 			    __func__, item));
3783 			counter_u64_add(zone->uz_allocs, 1);
3784 		}
3785 		return (item);
3786 	}
3787 	ZDOM_UNLOCK(zdom);
3788 	return (zone_alloc_item(zone, udata, domain, flags));
3789 #else
3790 	return (uma_zalloc_arg(zone, udata, flags));
3791 #endif
3792 }
3793 
3794 /*
3795  * Find a slab with some space.  Prefer slabs that are partially used over those
3796  * that are totally full.  This helps to reduce fragmentation.
3797  *
3798  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
3799  * only 'domain'.
3800  */
3801 static uma_slab_t
3802 keg_first_slab(uma_keg_t keg, int domain, bool rr)
3803 {
3804 	uma_domain_t dom;
3805 	uma_slab_t slab;
3806 	int start;
3807 
3808 	KASSERT(domain >= 0 && domain < vm_ndomains,
3809 	    ("keg_first_slab: domain %d out of range", domain));
3810 	KEG_LOCK_ASSERT(keg, domain);
3811 
3812 	slab = NULL;
3813 	start = domain;
3814 	do {
3815 		dom = &keg->uk_domain[domain];
3816 		if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL)
3817 			return (slab);
3818 		if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) {
3819 			LIST_REMOVE(slab, us_link);
3820 			dom->ud_free_slabs--;
3821 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3822 			return (slab);
3823 		}
3824 		if (rr)
3825 			domain = (domain + 1) % vm_ndomains;
3826 	} while (domain != start);
3827 
3828 	return (NULL);
3829 }
3830 
3831 /*
3832  * Fetch an existing slab from a free or partial list.  Returns with the
3833  * keg domain lock held if a slab was found or unlocked if not.
3834  */
3835 static uma_slab_t
3836 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3837 {
3838 	uma_slab_t slab;
3839 	uint32_t reserve;
3840 
3841 	/* HASH has a single free list. */
3842 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
3843 		domain = 0;
3844 
3845 	KEG_LOCK(keg, domain);
3846 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3847 	if (keg->uk_domain[domain].ud_free_items <= reserve ||
3848 	    (slab = keg_first_slab(keg, domain, rr)) == NULL) {
3849 		KEG_UNLOCK(keg, domain);
3850 		return (NULL);
3851 	}
3852 	return (slab);
3853 }
3854 
3855 static uma_slab_t
3856 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3857 {
3858 	struct vm_domainset_iter di;
3859 	uma_slab_t slab;
3860 	int aflags, domain;
3861 	bool rr;
3862 
3863 restart:
3864 	/*
3865 	 * Use the keg's policy if upper layers haven't already specified a
3866 	 * domain (as happens with first-touch zones).
3867 	 *
3868 	 * To avoid races we run the iterator with the keg lock held, but that
3869 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3870 	 * clear M_WAITOK and handle low memory conditions locally.
3871 	 */
3872 	rr = rdomain == UMA_ANYDOMAIN;
3873 	if (rr) {
3874 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3875 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3876 		    &aflags);
3877 	} else {
3878 		aflags = flags;
3879 		domain = rdomain;
3880 	}
3881 
3882 	for (;;) {
3883 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3884 		if (slab != NULL)
3885 			return (slab);
3886 
3887 		/*
3888 		 * M_NOVM means don't ask at all!
3889 		 */
3890 		if (flags & M_NOVM)
3891 			break;
3892 
3893 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3894 		if (slab != NULL)
3895 			return (slab);
3896 		if (!rr && (flags & M_WAITOK) == 0)
3897 			break;
3898 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3899 			if ((flags & M_WAITOK) != 0) {
3900 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
3901 				goto restart;
3902 			}
3903 			break;
3904 		}
3905 	}
3906 
3907 	/*
3908 	 * We might not have been able to get a slab but another cpu
3909 	 * could have while we were unlocked.  Check again before we
3910 	 * fail.
3911 	 */
3912 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL)
3913 		return (slab);
3914 
3915 	return (NULL);
3916 }
3917 
3918 static void *
3919 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3920 {
3921 	uma_domain_t dom;
3922 	void *item;
3923 	int freei;
3924 
3925 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3926 
3927 	dom = &keg->uk_domain[slab->us_domain];
3928 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3929 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3930 	item = slab_item(slab, keg, freei);
3931 	slab->us_freecount--;
3932 	dom->ud_free_items--;
3933 
3934 	/*
3935 	 * Move this slab to the full list.  It must be on the partial list, so
3936 	 * we do not need to update the free slab count.  In particular,
3937 	 * keg_fetch_slab() always returns slabs on the partial list.
3938 	 */
3939 	if (slab->us_freecount == 0) {
3940 		LIST_REMOVE(slab, us_link);
3941 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3942 	}
3943 
3944 	return (item);
3945 }
3946 
3947 static int
3948 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3949 {
3950 	uma_domain_t dom;
3951 	uma_zone_t zone;
3952 	uma_slab_t slab;
3953 	uma_keg_t keg;
3954 #ifdef NUMA
3955 	int stripe;
3956 #endif
3957 	int i;
3958 
3959 	zone = arg;
3960 	slab = NULL;
3961 	keg = zone->uz_keg;
3962 	/* Try to keep the buckets totally full */
3963 	for (i = 0; i < max; ) {
3964 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3965 			break;
3966 #ifdef NUMA
3967 		stripe = howmany(max, vm_ndomains);
3968 #endif
3969 		dom = &keg->uk_domain[slab->us_domain];
3970 		do {
3971 			bucket[i++] = slab_alloc_item(keg, slab);
3972 			if (dom->ud_free_items <= keg->uk_reserve) {
3973 				/*
3974 				 * Avoid depleting the reserve after a
3975 				 * successful item allocation, even if
3976 				 * M_USE_RESERVE is specified.
3977 				 */
3978 				KEG_UNLOCK(keg, slab->us_domain);
3979 				goto out;
3980 			}
3981 #ifdef NUMA
3982 			/*
3983 			 * If the zone is striped we pick a new slab for every
3984 			 * N allocations.  Eliminating this conditional will
3985 			 * instead pick a new domain for each bucket rather
3986 			 * than stripe within each bucket.  The current option
3987 			 * produces more fragmentation and requires more cpu
3988 			 * time but yields better distribution.
3989 			 */
3990 			if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 &&
3991 			    vm_ndomains > 1 && --stripe == 0)
3992 				break;
3993 #endif
3994 		} while (slab->us_freecount != 0 && i < max);
3995 		KEG_UNLOCK(keg, slab->us_domain);
3996 
3997 		/* Don't block if we allocated any successfully. */
3998 		flags &= ~M_WAITOK;
3999 		flags |= M_NOWAIT;
4000 	}
4001 out:
4002 	return i;
4003 }
4004 
4005 static int
4006 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags)
4007 {
4008 	uint64_t old, new, total, max;
4009 
4010 	/*
4011 	 * The hard case.  We're going to sleep because there were existing
4012 	 * sleepers or because we ran out of items.  This routine enforces
4013 	 * fairness by keeping fifo order.
4014 	 *
4015 	 * First release our ill gotten gains and make some noise.
4016 	 */
4017 	for (;;) {
4018 		zone_free_limit(zone, count);
4019 		zone_log_warning(zone);
4020 		zone_maxaction(zone);
4021 		if (flags & M_NOWAIT)
4022 			return (0);
4023 
4024 		/*
4025 		 * We need to allocate an item or set ourself as a sleeper
4026 		 * while the sleepq lock is held to avoid wakeup races.  This
4027 		 * is essentially a home rolled semaphore.
4028 		 */
4029 		sleepq_lock(&zone->uz_max_items);
4030 		old = zone->uz_items;
4031 		do {
4032 			MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX);
4033 			/* Cache the max since we will evaluate twice. */
4034 			max = zone->uz_max_items;
4035 			if (UZ_ITEMS_SLEEPERS(old) != 0 ||
4036 			    UZ_ITEMS_COUNT(old) >= max)
4037 				new = old + UZ_ITEMS_SLEEPER;
4038 			else
4039 				new = old + MIN(count, max - old);
4040 		} while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0);
4041 
4042 		/* We may have successfully allocated under the sleepq lock. */
4043 		if (UZ_ITEMS_SLEEPERS(new) == 0) {
4044 			sleepq_release(&zone->uz_max_items);
4045 			return (new - old);
4046 		}
4047 
4048 		/*
4049 		 * This is in a different cacheline from uz_items so that we
4050 		 * don't constantly invalidate the fastpath cacheline when we
4051 		 * adjust item counts.  This could be limited to toggling on
4052 		 * transitions.
4053 		 */
4054 		atomic_add_32(&zone->uz_sleepers, 1);
4055 		atomic_add_64(&zone->uz_sleeps, 1);
4056 
4057 		/*
4058 		 * We have added ourselves as a sleeper.  The sleepq lock
4059 		 * protects us from wakeup races.  Sleep now and then retry.
4060 		 */
4061 		sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0);
4062 		sleepq_wait(&zone->uz_max_items, PVM);
4063 
4064 		/*
4065 		 * After wakeup, remove ourselves as a sleeper and try
4066 		 * again.  We no longer have the sleepq lock for protection.
4067 		 *
4068 		 * Subract ourselves as a sleeper while attempting to add
4069 		 * our count.
4070 		 */
4071 		atomic_subtract_32(&zone->uz_sleepers, 1);
4072 		old = atomic_fetchadd_64(&zone->uz_items,
4073 		    -(UZ_ITEMS_SLEEPER - count));
4074 		/* We're no longer a sleeper. */
4075 		old -= UZ_ITEMS_SLEEPER;
4076 
4077 		/*
4078 		 * If we're still at the limit, restart.  Notably do not
4079 		 * block on other sleepers.  Cache the max value to protect
4080 		 * against changes via sysctl.
4081 		 */
4082 		total = UZ_ITEMS_COUNT(old);
4083 		max = zone->uz_max_items;
4084 		if (total >= max)
4085 			continue;
4086 		/* Truncate if necessary, otherwise wake other sleepers. */
4087 		if (total + count > max) {
4088 			zone_free_limit(zone, total + count - max);
4089 			count = max - total;
4090 		} else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0)
4091 			wakeup_one(&zone->uz_max_items);
4092 
4093 		return (count);
4094 	}
4095 }
4096 
4097 /*
4098  * Allocate 'count' items from our max_items limit.  Returns the number
4099  * available.  If M_NOWAIT is not specified it will sleep until at least
4100  * one item can be allocated.
4101  */
4102 static int
4103 zone_alloc_limit(uma_zone_t zone, int count, int flags)
4104 {
4105 	uint64_t old;
4106 	uint64_t max;
4107 
4108 	max = zone->uz_max_items;
4109 	MPASS(max > 0);
4110 
4111 	/*
4112 	 * We expect normal allocations to succeed with a simple
4113 	 * fetchadd.
4114 	 */
4115 	old = atomic_fetchadd_64(&zone->uz_items, count);
4116 	if (__predict_true(old + count <= max))
4117 		return (count);
4118 
4119 	/*
4120 	 * If we had some items and no sleepers just return the
4121 	 * truncated value.  We have to release the excess space
4122 	 * though because that may wake sleepers who weren't woken
4123 	 * because we were temporarily over the limit.
4124 	 */
4125 	if (old < max) {
4126 		zone_free_limit(zone, (old + count) - max);
4127 		return (max - old);
4128 	}
4129 	return (zone_alloc_limit_hard(zone, count, flags));
4130 }
4131 
4132 /*
4133  * Free a number of items back to the limit.
4134  */
4135 static void
4136 zone_free_limit(uma_zone_t zone, int count)
4137 {
4138 	uint64_t old;
4139 
4140 	MPASS(count > 0);
4141 
4142 	/*
4143 	 * In the common case we either have no sleepers or
4144 	 * are still over the limit and can just return.
4145 	 */
4146 	old = atomic_fetchadd_64(&zone->uz_items, -count);
4147 	if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 ||
4148 	   UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items))
4149 		return;
4150 
4151 	/*
4152 	 * Moderate the rate of wakeups.  Sleepers will continue
4153 	 * to generate wakeups if necessary.
4154 	 */
4155 	wakeup_one(&zone->uz_max_items);
4156 }
4157 
4158 static uma_bucket_t
4159 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
4160 {
4161 	uma_bucket_t bucket;
4162 	int error, maxbucket, cnt;
4163 
4164 	CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name,
4165 	    zone, domain);
4166 
4167 	/* Avoid allocs targeting empty domains. */
4168 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4169 		domain = UMA_ANYDOMAIN;
4170 	else if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4171 		domain = UMA_ANYDOMAIN;
4172 
4173 	if (zone->uz_max_items > 0)
4174 		maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size,
4175 		    M_NOWAIT);
4176 	else
4177 		maxbucket = zone->uz_bucket_size;
4178 	if (maxbucket == 0)
4179 		return (false);
4180 
4181 	/* Don't wait for buckets, preserve caller's NOVM setting. */
4182 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
4183 	if (bucket == NULL) {
4184 		cnt = 0;
4185 		goto out;
4186 	}
4187 
4188 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
4189 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
4190 
4191 	/*
4192 	 * Initialize the memory if necessary.
4193 	 */
4194 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
4195 		int i;
4196 
4197 		for (i = 0; i < bucket->ub_cnt; i++) {
4198 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
4199 			error = zone->uz_init(bucket->ub_bucket[i],
4200 			    zone->uz_size, flags);
4201 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
4202 			if (error != 0)
4203 				break;
4204 		}
4205 
4206 		/*
4207 		 * If we couldn't initialize the whole bucket, put the
4208 		 * rest back onto the freelist.
4209 		 */
4210 		if (i != bucket->ub_cnt) {
4211 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
4212 			    bucket->ub_cnt - i);
4213 #ifdef INVARIANTS
4214 			bzero(&bucket->ub_bucket[i],
4215 			    sizeof(void *) * (bucket->ub_cnt - i));
4216 #endif
4217 			bucket->ub_cnt = i;
4218 		}
4219 	}
4220 
4221 	cnt = bucket->ub_cnt;
4222 	if (bucket->ub_cnt == 0) {
4223 		bucket_free(zone, bucket, udata);
4224 		counter_u64_add(zone->uz_fails, 1);
4225 		bucket = NULL;
4226 	}
4227 out:
4228 	if (zone->uz_max_items > 0 && cnt < maxbucket)
4229 		zone_free_limit(zone, maxbucket - cnt);
4230 
4231 	return (bucket);
4232 }
4233 
4234 /*
4235  * Allocates a single item from a zone.
4236  *
4237  * Arguments
4238  *	zone   The zone to alloc for.
4239  *	udata  The data to be passed to the constructor.
4240  *	domain The domain to allocate from or UMA_ANYDOMAIN.
4241  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
4242  *
4243  * Returns
4244  *	NULL if there is no memory and M_NOWAIT is set
4245  *	An item if successful
4246  */
4247 
4248 static void *
4249 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
4250 {
4251 	void *item;
4252 
4253 	if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) {
4254 		counter_u64_add(zone->uz_fails, 1);
4255 		return (NULL);
4256 	}
4257 
4258 	/* Avoid allocs targeting empty domains. */
4259 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4260 		domain = UMA_ANYDOMAIN;
4261 
4262 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
4263 		goto fail_cnt;
4264 
4265 	/*
4266 	 * We have to call both the zone's init (not the keg's init)
4267 	 * and the zone's ctor.  This is because the item is going from
4268 	 * a keg slab directly to the user, and the user is expecting it
4269 	 * to be both zone-init'd as well as zone-ctor'd.
4270 	 */
4271 	if (zone->uz_init != NULL) {
4272 		int error;
4273 
4274 		kasan_mark_item_valid(zone, item);
4275 		error = zone->uz_init(item, zone->uz_size, flags);
4276 		kasan_mark_item_invalid(zone, item);
4277 		if (error != 0) {
4278 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
4279 			goto fail_cnt;
4280 		}
4281 	}
4282 	item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags,
4283 	    item);
4284 	if (item == NULL)
4285 		goto fail;
4286 
4287 	counter_u64_add(zone->uz_allocs, 1);
4288 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
4289 	    zone->uz_name, zone);
4290 
4291 	return (item);
4292 
4293 fail_cnt:
4294 	counter_u64_add(zone->uz_fails, 1);
4295 fail:
4296 	if (zone->uz_max_items > 0)
4297 		zone_free_limit(zone, 1);
4298 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
4299 	    zone->uz_name, zone);
4300 
4301 	return (NULL);
4302 }
4303 
4304 /* See uma.h */
4305 void
4306 uma_zfree_smr(uma_zone_t zone, void *item)
4307 {
4308 	uma_cache_t cache;
4309 	uma_cache_bucket_t bucket;
4310 	int itemdomain, uz_flags;
4311 
4312 #ifdef UMA_ZALLOC_DEBUG
4313 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
4314 	    ("uma_zfree_smr: called with non-SMR zone."));
4315 	KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer."));
4316 	SMR_ASSERT_NOT_ENTERED(zone->uz_smr);
4317 	if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN)
4318 		return;
4319 #endif
4320 	cache = &zone->uz_cpu[curcpu];
4321 	uz_flags = cache_uz_flags(cache);
4322 	itemdomain = 0;
4323 #ifdef NUMA
4324 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4325 		itemdomain = item_domain(item);
4326 #endif
4327 	critical_enter();
4328 	do {
4329 		cache = &zone->uz_cpu[curcpu];
4330 		/* SMR Zones must free to the free bucket. */
4331 		bucket = &cache->uc_freebucket;
4332 #ifdef NUMA
4333 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4334 		    PCPU_GET(domain) != itemdomain) {
4335 			bucket = &cache->uc_crossbucket;
4336 		}
4337 #endif
4338 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4339 			cache_bucket_push(cache, bucket, item);
4340 			critical_exit();
4341 			return;
4342 		}
4343 	} while (cache_free(zone, cache, NULL, item, itemdomain));
4344 	critical_exit();
4345 
4346 	/*
4347 	 * If nothing else caught this, we'll just do an internal free.
4348 	 */
4349 	zone_free_item(zone, item, NULL, SKIP_NONE);
4350 }
4351 
4352 /* See uma.h */
4353 void
4354 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
4355 {
4356 	uma_cache_t cache;
4357 	uma_cache_bucket_t bucket;
4358 	int itemdomain, uz_flags;
4359 
4360 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
4361 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
4362 
4363 	CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone);
4364 
4365 #ifdef UMA_ZALLOC_DEBUG
4366 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
4367 	    ("uma_zfree_arg: called with SMR zone."));
4368 	if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN)
4369 		return;
4370 #endif
4371         /* uma_zfree(..., NULL) does nothing, to match free(9). */
4372         if (item == NULL)
4373                 return;
4374 
4375 	/*
4376 	 * We are accessing the per-cpu cache without a critical section to
4377 	 * fetch size and flags.  This is acceptable, if we are preempted we
4378 	 * will simply read another cpu's line.
4379 	 */
4380 	cache = &zone->uz_cpu[curcpu];
4381 	uz_flags = cache_uz_flags(cache);
4382 	if (UMA_ALWAYS_CTORDTOR ||
4383 	    __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0))
4384 		item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
4385 
4386 	/*
4387 	 * The race here is acceptable.  If we miss it we'll just have to wait
4388 	 * a little longer for the limits to be reset.
4389 	 */
4390 	if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) {
4391 		if (atomic_load_32(&zone->uz_sleepers) > 0)
4392 			goto zfree_item;
4393 	}
4394 
4395 	/*
4396 	 * If possible, free to the per-CPU cache.  There are two
4397 	 * requirements for safe access to the per-CPU cache: (1) the thread
4398 	 * accessing the cache must not be preempted or yield during access,
4399 	 * and (2) the thread must not migrate CPUs without switching which
4400 	 * cache it accesses.  We rely on a critical section to prevent
4401 	 * preemption and migration.  We release the critical section in
4402 	 * order to acquire the zone mutex if we are unable to free to the
4403 	 * current cache; when we re-acquire the critical section, we must
4404 	 * detect and handle migration if it has occurred.
4405 	 */
4406 	itemdomain = 0;
4407 #ifdef NUMA
4408 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4409 		itemdomain = item_domain(item);
4410 #endif
4411 	critical_enter();
4412 	do {
4413 		cache = &zone->uz_cpu[curcpu];
4414 		/*
4415 		 * Try to free into the allocbucket first to give LIFO
4416 		 * ordering for cache-hot datastructures.  Spill over
4417 		 * into the freebucket if necessary.  Alloc will swap
4418 		 * them if one runs dry.
4419 		 */
4420 		bucket = &cache->uc_allocbucket;
4421 #ifdef NUMA
4422 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4423 		    PCPU_GET(domain) != itemdomain) {
4424 			bucket = &cache->uc_crossbucket;
4425 		} else
4426 #endif
4427 		if (bucket->ucb_cnt == bucket->ucb_entries &&
4428 		   cache->uc_freebucket.ucb_cnt <
4429 		   cache->uc_freebucket.ucb_entries)
4430 			cache_bucket_swap(&cache->uc_freebucket,
4431 			    &cache->uc_allocbucket);
4432 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4433 			cache_bucket_push(cache, bucket, item);
4434 			critical_exit();
4435 			return;
4436 		}
4437 	} while (cache_free(zone, cache, udata, item, itemdomain));
4438 	critical_exit();
4439 
4440 	/*
4441 	 * If nothing else caught this, we'll just do an internal free.
4442 	 */
4443 zfree_item:
4444 	zone_free_item(zone, item, udata, SKIP_DTOR);
4445 }
4446 
4447 #ifdef NUMA
4448 /*
4449  * sort crossdomain free buckets to domain correct buckets and cache
4450  * them.
4451  */
4452 static void
4453 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
4454 {
4455 	struct uma_bucketlist emptybuckets, fullbuckets;
4456 	uma_zone_domain_t zdom;
4457 	uma_bucket_t b;
4458 	smr_seq_t seq;
4459 	void *item;
4460 	int domain;
4461 
4462 	CTR3(KTR_UMA,
4463 	    "uma_zfree: zone %s(%p) draining cross bucket %p",
4464 	    zone->uz_name, zone, bucket);
4465 
4466 	/*
4467 	 * It is possible for buckets to arrive here out of order so we fetch
4468 	 * the current smr seq rather than accepting the bucket's.
4469 	 */
4470 	seq = SMR_SEQ_INVALID;
4471 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
4472 		seq = smr_advance(zone->uz_smr);
4473 
4474 	/*
4475 	 * To avoid having ndomain * ndomain buckets for sorting we have a
4476 	 * lock on the current crossfree bucket.  A full matrix with
4477 	 * per-domain locking could be used if necessary.
4478 	 */
4479 	STAILQ_INIT(&emptybuckets);
4480 	STAILQ_INIT(&fullbuckets);
4481 	ZONE_CROSS_LOCK(zone);
4482 	for (; bucket->ub_cnt > 0; bucket->ub_cnt--) {
4483 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
4484 		domain = item_domain(item);
4485 		zdom = ZDOM_GET(zone, domain);
4486 		if (zdom->uzd_cross == NULL) {
4487 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4488 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4489 				zdom->uzd_cross = b;
4490 			} else {
4491 				/*
4492 				 * Avoid allocating a bucket with the cross lock
4493 				 * held, since allocation can trigger a
4494 				 * cross-domain free and bucket zones may
4495 				 * allocate from each other.
4496 				 */
4497 				ZONE_CROSS_UNLOCK(zone);
4498 				b = bucket_alloc(zone, udata, M_NOWAIT);
4499 				if (b == NULL)
4500 					goto out;
4501 				ZONE_CROSS_LOCK(zone);
4502 				if (zdom->uzd_cross != NULL) {
4503 					STAILQ_INSERT_HEAD(&emptybuckets, b,
4504 					    ub_link);
4505 				} else {
4506 					zdom->uzd_cross = b;
4507 				}
4508 			}
4509 		}
4510 		b = zdom->uzd_cross;
4511 		b->ub_bucket[b->ub_cnt++] = item;
4512 		b->ub_seq = seq;
4513 		if (b->ub_cnt == b->ub_entries) {
4514 			STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link);
4515 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL)
4516 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4517 			zdom->uzd_cross = b;
4518 		}
4519 	}
4520 	ZONE_CROSS_UNLOCK(zone);
4521 out:
4522 	if (bucket->ub_cnt == 0)
4523 		bucket->ub_seq = SMR_SEQ_INVALID;
4524 	bucket_free(zone, bucket, udata);
4525 
4526 	while ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4527 		STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4528 		bucket_free(zone, b, udata);
4529 	}
4530 	while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) {
4531 		STAILQ_REMOVE_HEAD(&fullbuckets, ub_link);
4532 		domain = item_domain(b->ub_bucket[0]);
4533 		zone_put_bucket(zone, domain, b, udata, true);
4534 	}
4535 }
4536 #endif
4537 
4538 static void
4539 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
4540     int itemdomain, bool ws)
4541 {
4542 
4543 #ifdef NUMA
4544 	/*
4545 	 * Buckets coming from the wrong domain will be entirely for the
4546 	 * only other domain on two domain systems.  In this case we can
4547 	 * simply cache them.  Otherwise we need to sort them back to
4548 	 * correct domains.
4549 	 */
4550 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4551 	    vm_ndomains > 2 && PCPU_GET(domain) != itemdomain) {
4552 		zone_free_cross(zone, bucket, udata);
4553 		return;
4554 	}
4555 #endif
4556 
4557 	/*
4558 	 * Attempt to save the bucket in the zone's domain bucket cache.
4559 	 */
4560 	CTR3(KTR_UMA,
4561 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
4562 	    zone->uz_name, zone, bucket);
4563 	/* ub_cnt is pointing to the last free item */
4564 	if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4565 		itemdomain = zone_domain_lowest(zone, itemdomain);
4566 	zone_put_bucket(zone, itemdomain, bucket, udata, ws);
4567 }
4568 
4569 /*
4570  * Populate a free or cross bucket for the current cpu cache.  Free any
4571  * existing full bucket either to the zone cache or back to the slab layer.
4572  *
4573  * Enters and returns in a critical section.  false return indicates that
4574  * we can not satisfy this free in the cache layer.  true indicates that
4575  * the caller should retry.
4576  */
4577 static __noinline bool
4578 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
4579     int itemdomain)
4580 {
4581 	uma_cache_bucket_t cbucket;
4582 	uma_bucket_t newbucket, bucket;
4583 
4584 	CRITICAL_ASSERT(curthread);
4585 
4586 	if (zone->uz_bucket_size == 0)
4587 		return false;
4588 
4589 	cache = &zone->uz_cpu[curcpu];
4590 	newbucket = NULL;
4591 
4592 	/*
4593 	 * FIRSTTOUCH domains need to free to the correct zdom.  When
4594 	 * enabled this is the zdom of the item.   The bucket is the
4595 	 * cross bucket if the current domain and itemdomain do not match.
4596 	 */
4597 	cbucket = &cache->uc_freebucket;
4598 #ifdef NUMA
4599 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4600 		if (PCPU_GET(domain) != itemdomain) {
4601 			cbucket = &cache->uc_crossbucket;
4602 			if (cbucket->ucb_cnt != 0)
4603 				counter_u64_add(zone->uz_xdomain,
4604 				    cbucket->ucb_cnt);
4605 		}
4606 	}
4607 #endif
4608 	bucket = cache_bucket_unload(cbucket);
4609 	KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries,
4610 	    ("cache_free: Entered with non-full free bucket."));
4611 
4612 	/* We are no longer associated with this CPU. */
4613 	critical_exit();
4614 
4615 	/*
4616 	 * Don't let SMR zones operate without a free bucket.  Force
4617 	 * a synchronize and re-use this one.  We will only degrade
4618 	 * to a synchronize every bucket_size items rather than every
4619 	 * item if we fail to allocate a bucket.
4620 	 */
4621 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
4622 		if (bucket != NULL)
4623 			bucket->ub_seq = smr_advance(zone->uz_smr);
4624 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4625 		if (newbucket == NULL && bucket != NULL) {
4626 			bucket_drain(zone, bucket);
4627 			newbucket = bucket;
4628 			bucket = NULL;
4629 		}
4630 	} else if (!bucketdisable)
4631 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4632 
4633 	if (bucket != NULL)
4634 		zone_free_bucket(zone, bucket, udata, itemdomain, true);
4635 
4636 	critical_enter();
4637 	if ((bucket = newbucket) == NULL)
4638 		return (false);
4639 	cache = &zone->uz_cpu[curcpu];
4640 #ifdef NUMA
4641 	/*
4642 	 * Check to see if we should be populating the cross bucket.  If it
4643 	 * is already populated we will fall through and attempt to populate
4644 	 * the free bucket.
4645 	 */
4646 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4647 		if (PCPU_GET(domain) != itemdomain &&
4648 		    cache->uc_crossbucket.ucb_bucket == NULL) {
4649 			cache_bucket_load_cross(cache, bucket);
4650 			return (true);
4651 		}
4652 	}
4653 #endif
4654 	/*
4655 	 * We may have lost the race to fill the bucket or switched CPUs.
4656 	 */
4657 	if (cache->uc_freebucket.ucb_bucket != NULL) {
4658 		critical_exit();
4659 		bucket_free(zone, bucket, udata);
4660 		critical_enter();
4661 	} else
4662 		cache_bucket_load_free(cache, bucket);
4663 
4664 	return (true);
4665 }
4666 
4667 static void
4668 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
4669 {
4670 	uma_keg_t keg;
4671 	uma_domain_t dom;
4672 	int freei;
4673 
4674 	keg = zone->uz_keg;
4675 	KEG_LOCK_ASSERT(keg, slab->us_domain);
4676 
4677 	/* Do we need to remove from any lists? */
4678 	dom = &keg->uk_domain[slab->us_domain];
4679 	if (slab->us_freecount + 1 == keg->uk_ipers) {
4680 		LIST_REMOVE(slab, us_link);
4681 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
4682 		dom->ud_free_slabs++;
4683 	} else if (slab->us_freecount == 0) {
4684 		LIST_REMOVE(slab, us_link);
4685 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
4686 	}
4687 
4688 	/* Slab management. */
4689 	freei = slab_item_index(slab, keg, item);
4690 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
4691 	slab->us_freecount++;
4692 
4693 	/* Keg statistics. */
4694 	dom->ud_free_items++;
4695 }
4696 
4697 static void
4698 zone_release(void *arg, void **bucket, int cnt)
4699 {
4700 	struct mtx *lock;
4701 	uma_zone_t zone;
4702 	uma_slab_t slab;
4703 	uma_keg_t keg;
4704 	uint8_t *mem;
4705 	void *item;
4706 	int i;
4707 
4708 	zone = arg;
4709 	keg = zone->uz_keg;
4710 	lock = NULL;
4711 	if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
4712 		lock = KEG_LOCK(keg, 0);
4713 	for (i = 0; i < cnt; i++) {
4714 		item = bucket[i];
4715 		if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
4716 			slab = vtoslab((vm_offset_t)item);
4717 		} else {
4718 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4719 			if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
4720 				slab = hash_sfind(&keg->uk_hash, mem);
4721 			else
4722 				slab = (uma_slab_t)(mem + keg->uk_pgoff);
4723 		}
4724 		if (lock != KEG_LOCKPTR(keg, slab->us_domain)) {
4725 			if (lock != NULL)
4726 				mtx_unlock(lock);
4727 			lock = KEG_LOCK(keg, slab->us_domain);
4728 		}
4729 		slab_free_item(zone, slab, item);
4730 	}
4731 	if (lock != NULL)
4732 		mtx_unlock(lock);
4733 }
4734 
4735 /*
4736  * Frees a single item to any zone.
4737  *
4738  * Arguments:
4739  *	zone   The zone to free to
4740  *	item   The item we're freeing
4741  *	udata  User supplied data for the dtor
4742  *	skip   Skip dtors and finis
4743  */
4744 static __noinline void
4745 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
4746 {
4747 
4748 	/*
4749 	 * If a free is sent directly to an SMR zone we have to
4750 	 * synchronize immediately because the item can instantly
4751 	 * be reallocated. This should only happen in degenerate
4752 	 * cases when no memory is available for per-cpu caches.
4753 	 */
4754 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE)
4755 		smr_synchronize(zone->uz_smr);
4756 
4757 	item_dtor(zone, item, zone->uz_size, udata, skip);
4758 
4759 	if (skip < SKIP_FINI && zone->uz_fini) {
4760 		kasan_mark_item_valid(zone, item);
4761 		zone->uz_fini(item, zone->uz_size);
4762 		kasan_mark_item_invalid(zone, item);
4763 	}
4764 
4765 	zone->uz_release(zone->uz_arg, &item, 1);
4766 
4767 	if (skip & SKIP_CNT)
4768 		return;
4769 
4770 	counter_u64_add(zone->uz_frees, 1);
4771 
4772 	if (zone->uz_max_items > 0)
4773 		zone_free_limit(zone, 1);
4774 }
4775 
4776 /* See uma.h */
4777 int
4778 uma_zone_set_max(uma_zone_t zone, int nitems)
4779 {
4780 
4781 	/*
4782 	 * If the limit is small, we may need to constrain the maximum per-CPU
4783 	 * cache size, or disable caching entirely.
4784 	 */
4785 	uma_zone_set_maxcache(zone, nitems);
4786 
4787 	/*
4788 	 * XXX This can misbehave if the zone has any allocations with
4789 	 * no limit and a limit is imposed.  There is currently no
4790 	 * way to clear a limit.
4791 	 */
4792 	ZONE_LOCK(zone);
4793 	zone->uz_max_items = nitems;
4794 	zone->uz_flags |= UMA_ZFLAG_LIMIT;
4795 	zone_update_caches(zone);
4796 	/* We may need to wake waiters. */
4797 	wakeup(&zone->uz_max_items);
4798 	ZONE_UNLOCK(zone);
4799 
4800 	return (nitems);
4801 }
4802 
4803 /* See uma.h */
4804 void
4805 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
4806 {
4807 	int bpcpu, bpdom, bsize, nb;
4808 
4809 	ZONE_LOCK(zone);
4810 
4811 	/*
4812 	 * Compute a lower bound on the number of items that may be cached in
4813 	 * the zone.  Each CPU gets at least two buckets, and for cross-domain
4814 	 * frees we use an additional bucket per CPU and per domain.  Select the
4815 	 * largest bucket size that does not exceed half of the requested limit,
4816 	 * with the left over space given to the full bucket cache.
4817 	 */
4818 	bpdom = 0;
4819 	bpcpu = 2;
4820 #ifdef NUMA
4821 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && vm_ndomains > 1) {
4822 		bpcpu++;
4823 		bpdom++;
4824 	}
4825 #endif
4826 	nb = bpcpu * mp_ncpus + bpdom * vm_ndomains;
4827 	bsize = nitems / nb / 2;
4828 	if (bsize > BUCKET_MAX)
4829 		bsize = BUCKET_MAX;
4830 	else if (bsize == 0 && nitems / nb > 0)
4831 		bsize = 1;
4832 	zone->uz_bucket_size_max = zone->uz_bucket_size = bsize;
4833 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4834 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4835 	zone->uz_bucket_max = nitems - nb * bsize;
4836 	ZONE_UNLOCK(zone);
4837 }
4838 
4839 /* See uma.h */
4840 int
4841 uma_zone_get_max(uma_zone_t zone)
4842 {
4843 	int nitems;
4844 
4845 	nitems = atomic_load_64(&zone->uz_max_items);
4846 
4847 	return (nitems);
4848 }
4849 
4850 /* See uma.h */
4851 void
4852 uma_zone_set_warning(uma_zone_t zone, const char *warning)
4853 {
4854 
4855 	ZONE_ASSERT_COLD(zone);
4856 	zone->uz_warning = warning;
4857 }
4858 
4859 /* See uma.h */
4860 void
4861 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
4862 {
4863 
4864 	ZONE_ASSERT_COLD(zone);
4865 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
4866 }
4867 
4868 /* See uma.h */
4869 int
4870 uma_zone_get_cur(uma_zone_t zone)
4871 {
4872 	int64_t nitems;
4873 	u_int i;
4874 
4875 	nitems = 0;
4876 	if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER)
4877 		nitems = counter_u64_fetch(zone->uz_allocs) -
4878 		    counter_u64_fetch(zone->uz_frees);
4879 	CPU_FOREACH(i)
4880 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) -
4881 		    atomic_load_64(&zone->uz_cpu[i].uc_frees);
4882 
4883 	return (nitems < 0 ? 0 : nitems);
4884 }
4885 
4886 static uint64_t
4887 uma_zone_get_allocs(uma_zone_t zone)
4888 {
4889 	uint64_t nitems;
4890 	u_int i;
4891 
4892 	nitems = 0;
4893 	if (zone->uz_allocs != EARLY_COUNTER)
4894 		nitems = counter_u64_fetch(zone->uz_allocs);
4895 	CPU_FOREACH(i)
4896 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs);
4897 
4898 	return (nitems);
4899 }
4900 
4901 static uint64_t
4902 uma_zone_get_frees(uma_zone_t zone)
4903 {
4904 	uint64_t nitems;
4905 	u_int i;
4906 
4907 	nitems = 0;
4908 	if (zone->uz_frees != EARLY_COUNTER)
4909 		nitems = counter_u64_fetch(zone->uz_frees);
4910 	CPU_FOREACH(i)
4911 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees);
4912 
4913 	return (nitems);
4914 }
4915 
4916 #ifdef INVARIANTS
4917 /* Used only for KEG_ASSERT_COLD(). */
4918 static uint64_t
4919 uma_keg_get_allocs(uma_keg_t keg)
4920 {
4921 	uma_zone_t z;
4922 	uint64_t nitems;
4923 
4924 	nitems = 0;
4925 	LIST_FOREACH(z, &keg->uk_zones, uz_link)
4926 		nitems += uma_zone_get_allocs(z);
4927 
4928 	return (nitems);
4929 }
4930 #endif
4931 
4932 /* See uma.h */
4933 void
4934 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
4935 {
4936 	uma_keg_t keg;
4937 
4938 	KEG_GET(zone, keg);
4939 	KEG_ASSERT_COLD(keg);
4940 	keg->uk_init = uminit;
4941 }
4942 
4943 /* See uma.h */
4944 void
4945 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
4946 {
4947 	uma_keg_t keg;
4948 
4949 	KEG_GET(zone, keg);
4950 	KEG_ASSERT_COLD(keg);
4951 	keg->uk_fini = fini;
4952 }
4953 
4954 /* See uma.h */
4955 void
4956 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
4957 {
4958 
4959 	ZONE_ASSERT_COLD(zone);
4960 	zone->uz_init = zinit;
4961 }
4962 
4963 /* See uma.h */
4964 void
4965 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
4966 {
4967 
4968 	ZONE_ASSERT_COLD(zone);
4969 	zone->uz_fini = zfini;
4970 }
4971 
4972 /* See uma.h */
4973 void
4974 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
4975 {
4976 	uma_keg_t keg;
4977 
4978 	KEG_GET(zone, keg);
4979 	KEG_ASSERT_COLD(keg);
4980 	keg->uk_freef = freef;
4981 }
4982 
4983 /* See uma.h */
4984 void
4985 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
4986 {
4987 	uma_keg_t keg;
4988 
4989 	KEG_GET(zone, keg);
4990 	KEG_ASSERT_COLD(keg);
4991 	keg->uk_allocf = allocf;
4992 }
4993 
4994 /* See uma.h */
4995 void
4996 uma_zone_set_smr(uma_zone_t zone, smr_t smr)
4997 {
4998 
4999 	ZONE_ASSERT_COLD(zone);
5000 
5001 	KASSERT(smr != NULL, ("Got NULL smr"));
5002 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
5003 	    ("zone %p (%s) already uses SMR", zone, zone->uz_name));
5004 	zone->uz_flags |= UMA_ZONE_SMR;
5005 	zone->uz_smr = smr;
5006 	zone_update_caches(zone);
5007 }
5008 
5009 smr_t
5010 uma_zone_get_smr(uma_zone_t zone)
5011 {
5012 
5013 	return (zone->uz_smr);
5014 }
5015 
5016 /* See uma.h */
5017 void
5018 uma_zone_reserve(uma_zone_t zone, int items)
5019 {
5020 	uma_keg_t keg;
5021 
5022 	KEG_GET(zone, keg);
5023 	KEG_ASSERT_COLD(keg);
5024 	keg->uk_reserve = items;
5025 }
5026 
5027 /* See uma.h */
5028 int
5029 uma_zone_reserve_kva(uma_zone_t zone, int count)
5030 {
5031 	uma_keg_t keg;
5032 	vm_offset_t kva;
5033 	u_int pages;
5034 
5035 	KEG_GET(zone, keg);
5036 	KEG_ASSERT_COLD(keg);
5037 	ZONE_ASSERT_COLD(zone);
5038 
5039 	pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
5040 
5041 #ifdef UMA_MD_SMALL_ALLOC
5042 	if (keg->uk_ppera > 1) {
5043 #else
5044 	if (1) {
5045 #endif
5046 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
5047 		if (kva == 0)
5048 			return (0);
5049 	} else
5050 		kva = 0;
5051 
5052 	MPASS(keg->uk_kva == 0);
5053 	keg->uk_kva = kva;
5054 	keg->uk_offset = 0;
5055 	zone->uz_max_items = pages * keg->uk_ipers;
5056 #ifdef UMA_MD_SMALL_ALLOC
5057 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
5058 #else
5059 	keg->uk_allocf = noobj_alloc;
5060 #endif
5061 	keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5062 	zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5063 	zone_update_caches(zone);
5064 
5065 	return (1);
5066 }
5067 
5068 /* See uma.h */
5069 void
5070 uma_prealloc(uma_zone_t zone, int items)
5071 {
5072 	struct vm_domainset_iter di;
5073 	uma_domain_t dom;
5074 	uma_slab_t slab;
5075 	uma_keg_t keg;
5076 	int aflags, domain, slabs;
5077 
5078 	KEG_GET(zone, keg);
5079 	slabs = howmany(items, keg->uk_ipers);
5080 	while (slabs-- > 0) {
5081 		aflags = M_NOWAIT;
5082 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
5083 		    &aflags);
5084 		for (;;) {
5085 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
5086 			    aflags);
5087 			if (slab != NULL) {
5088 				dom = &keg->uk_domain[slab->us_domain];
5089 				/*
5090 				 * keg_alloc_slab() always returns a slab on the
5091 				 * partial list.
5092 				 */
5093 				LIST_REMOVE(slab, us_link);
5094 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
5095 				    us_link);
5096 				dom->ud_free_slabs++;
5097 				KEG_UNLOCK(keg, slab->us_domain);
5098 				break;
5099 			}
5100 			if (vm_domainset_iter_policy(&di, &domain) != 0)
5101 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
5102 		}
5103 	}
5104 }
5105 
5106 /*
5107  * Returns a snapshot of memory consumption in bytes.
5108  */
5109 size_t
5110 uma_zone_memory(uma_zone_t zone)
5111 {
5112 	size_t sz;
5113 	int i;
5114 
5115 	sz = 0;
5116 	if (zone->uz_flags & UMA_ZFLAG_CACHE) {
5117 		for (i = 0; i < vm_ndomains; i++)
5118 			sz += ZDOM_GET(zone, i)->uzd_nitems;
5119 		return (sz * zone->uz_size);
5120 	}
5121 	for (i = 0; i < vm_ndomains; i++)
5122 		sz += zone->uz_keg->uk_domain[i].ud_pages;
5123 
5124 	return (sz * PAGE_SIZE);
5125 }
5126 
5127 /* See uma.h */
5128 void
5129 uma_reclaim(int req)
5130 {
5131 	uma_reclaim_domain(req, UMA_ANYDOMAIN);
5132 }
5133 
5134 void
5135 uma_reclaim_domain(int req, int domain)
5136 {
5137 	void *arg;
5138 
5139 	bucket_enable();
5140 
5141 	arg = (void *)(uintptr_t)domain;
5142 	sx_slock(&uma_reclaim_lock);
5143 	switch (req) {
5144 	case UMA_RECLAIM_TRIM:
5145 		zone_foreach(zone_trim, arg);
5146 		break;
5147 	case UMA_RECLAIM_DRAIN:
5148 		zone_foreach(zone_drain, arg);
5149 		break;
5150 	case UMA_RECLAIM_DRAIN_CPU:
5151 		zone_foreach(zone_drain, arg);
5152 		pcpu_cache_drain_safe(NULL);
5153 		zone_foreach(zone_drain, arg);
5154 		break;
5155 	default:
5156 		panic("unhandled reclamation request %d", req);
5157 	}
5158 
5159 	/*
5160 	 * Some slabs may have been freed but this zone will be visited early
5161 	 * we visit again so that we can free pages that are empty once other
5162 	 * zones are drained.  We have to do the same for buckets.
5163 	 */
5164 	zone_drain(slabzones[0], arg);
5165 	zone_drain(slabzones[1], arg);
5166 	bucket_zone_drain(domain);
5167 	sx_sunlock(&uma_reclaim_lock);
5168 }
5169 
5170 static volatile int uma_reclaim_needed;
5171 
5172 void
5173 uma_reclaim_wakeup(void)
5174 {
5175 
5176 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
5177 		wakeup(uma_reclaim);
5178 }
5179 
5180 void
5181 uma_reclaim_worker(void *arg __unused)
5182 {
5183 
5184 	for (;;) {
5185 		sx_xlock(&uma_reclaim_lock);
5186 		while (atomic_load_int(&uma_reclaim_needed) == 0)
5187 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
5188 			    hz);
5189 		sx_xunlock(&uma_reclaim_lock);
5190 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
5191 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
5192 		atomic_store_int(&uma_reclaim_needed, 0);
5193 		/* Don't fire more than once per-second. */
5194 		pause("umarclslp", hz);
5195 	}
5196 }
5197 
5198 /* See uma.h */
5199 void
5200 uma_zone_reclaim(uma_zone_t zone, int req)
5201 {
5202 	uma_zone_reclaim_domain(zone, req, UMA_ANYDOMAIN);
5203 }
5204 
5205 void
5206 uma_zone_reclaim_domain(uma_zone_t zone, int req, int domain)
5207 {
5208 	void *arg;
5209 
5210 	arg = (void *)(uintptr_t)domain;
5211 	switch (req) {
5212 	case UMA_RECLAIM_TRIM:
5213 		zone_trim(zone, arg);
5214 		break;
5215 	case UMA_RECLAIM_DRAIN:
5216 		zone_drain(zone, arg);
5217 		break;
5218 	case UMA_RECLAIM_DRAIN_CPU:
5219 		pcpu_cache_drain_safe(zone);
5220 		zone_drain(zone, arg);
5221 		break;
5222 	default:
5223 		panic("unhandled reclamation request %d", req);
5224 	}
5225 }
5226 
5227 /* See uma.h */
5228 int
5229 uma_zone_exhausted(uma_zone_t zone)
5230 {
5231 
5232 	return (atomic_load_32(&zone->uz_sleepers) > 0);
5233 }
5234 
5235 unsigned long
5236 uma_limit(void)
5237 {
5238 
5239 	return (uma_kmem_limit);
5240 }
5241 
5242 void
5243 uma_set_limit(unsigned long limit)
5244 {
5245 
5246 	uma_kmem_limit = limit;
5247 }
5248 
5249 unsigned long
5250 uma_size(void)
5251 {
5252 
5253 	return (atomic_load_long(&uma_kmem_total));
5254 }
5255 
5256 long
5257 uma_avail(void)
5258 {
5259 
5260 	return (uma_kmem_limit - uma_size());
5261 }
5262 
5263 #ifdef DDB
5264 /*
5265  * Generate statistics across both the zone and its per-cpu cache's.  Return
5266  * desired statistics if the pointer is non-NULL for that statistic.
5267  *
5268  * Note: does not update the zone statistics, as it can't safely clear the
5269  * per-CPU cache statistic.
5270  *
5271  */
5272 static void
5273 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
5274     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
5275 {
5276 	uma_cache_t cache;
5277 	uint64_t allocs, frees, sleeps, xdomain;
5278 	int cachefree, cpu;
5279 
5280 	allocs = frees = sleeps = xdomain = 0;
5281 	cachefree = 0;
5282 	CPU_FOREACH(cpu) {
5283 		cache = &z->uz_cpu[cpu];
5284 		cachefree += cache->uc_allocbucket.ucb_cnt;
5285 		cachefree += cache->uc_freebucket.ucb_cnt;
5286 		xdomain += cache->uc_crossbucket.ucb_cnt;
5287 		cachefree += cache->uc_crossbucket.ucb_cnt;
5288 		allocs += cache->uc_allocs;
5289 		frees += cache->uc_frees;
5290 	}
5291 	allocs += counter_u64_fetch(z->uz_allocs);
5292 	frees += counter_u64_fetch(z->uz_frees);
5293 	xdomain += counter_u64_fetch(z->uz_xdomain);
5294 	sleeps += z->uz_sleeps;
5295 	if (cachefreep != NULL)
5296 		*cachefreep = cachefree;
5297 	if (allocsp != NULL)
5298 		*allocsp = allocs;
5299 	if (freesp != NULL)
5300 		*freesp = frees;
5301 	if (sleepsp != NULL)
5302 		*sleepsp = sleeps;
5303 	if (xdomainp != NULL)
5304 		*xdomainp = xdomain;
5305 }
5306 #endif /* DDB */
5307 
5308 static int
5309 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
5310 {
5311 	uma_keg_t kz;
5312 	uma_zone_t z;
5313 	int count;
5314 
5315 	count = 0;
5316 	rw_rlock(&uma_rwlock);
5317 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5318 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5319 			count++;
5320 	}
5321 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5322 		count++;
5323 
5324 	rw_runlock(&uma_rwlock);
5325 	return (sysctl_handle_int(oidp, &count, 0, req));
5326 }
5327 
5328 static void
5329 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
5330     struct uma_percpu_stat *ups, bool internal)
5331 {
5332 	uma_zone_domain_t zdom;
5333 	uma_cache_t cache;
5334 	int i;
5335 
5336 	for (i = 0; i < vm_ndomains; i++) {
5337 		zdom = ZDOM_GET(z, i);
5338 		uth->uth_zone_free += zdom->uzd_nitems;
5339 	}
5340 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
5341 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
5342 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
5343 	uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain);
5344 	uth->uth_sleeps = z->uz_sleeps;
5345 
5346 	for (i = 0; i < mp_maxid + 1; i++) {
5347 		bzero(&ups[i], sizeof(*ups));
5348 		if (internal || CPU_ABSENT(i))
5349 			continue;
5350 		cache = &z->uz_cpu[i];
5351 		ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt;
5352 		ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt;
5353 		ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt;
5354 		ups[i].ups_allocs = cache->uc_allocs;
5355 		ups[i].ups_frees = cache->uc_frees;
5356 	}
5357 }
5358 
5359 static int
5360 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
5361 {
5362 	struct uma_stream_header ush;
5363 	struct uma_type_header uth;
5364 	struct uma_percpu_stat *ups;
5365 	struct sbuf sbuf;
5366 	uma_keg_t kz;
5367 	uma_zone_t z;
5368 	uint64_t items;
5369 	uint32_t kfree, pages;
5370 	int count, error, i;
5371 
5372 	error = sysctl_wire_old_buffer(req, 0);
5373 	if (error != 0)
5374 		return (error);
5375 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
5376 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
5377 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
5378 
5379 	count = 0;
5380 	rw_rlock(&uma_rwlock);
5381 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5382 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5383 			count++;
5384 	}
5385 
5386 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5387 		count++;
5388 
5389 	/*
5390 	 * Insert stream header.
5391 	 */
5392 	bzero(&ush, sizeof(ush));
5393 	ush.ush_version = UMA_STREAM_VERSION;
5394 	ush.ush_maxcpus = (mp_maxid + 1);
5395 	ush.ush_count = count;
5396 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
5397 
5398 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5399 		kfree = pages = 0;
5400 		for (i = 0; i < vm_ndomains; i++) {
5401 			kfree += kz->uk_domain[i].ud_free_items;
5402 			pages += kz->uk_domain[i].ud_pages;
5403 		}
5404 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5405 			bzero(&uth, sizeof(uth));
5406 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5407 			uth.uth_align = kz->uk_align;
5408 			uth.uth_size = kz->uk_size;
5409 			uth.uth_rsize = kz->uk_rsize;
5410 			if (z->uz_max_items > 0) {
5411 				items = UZ_ITEMS_COUNT(z->uz_items);
5412 				uth.uth_pages = (items / kz->uk_ipers) *
5413 					kz->uk_ppera;
5414 			} else
5415 				uth.uth_pages = pages;
5416 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
5417 			    kz->uk_ppera;
5418 			uth.uth_limit = z->uz_max_items;
5419 			uth.uth_keg_free = kfree;
5420 
5421 			/*
5422 			 * A zone is secondary is it is not the first entry
5423 			 * on the keg's zone list.
5424 			 */
5425 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
5426 			    (LIST_FIRST(&kz->uk_zones) != z))
5427 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
5428 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
5429 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
5430 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5431 			for (i = 0; i < mp_maxid + 1; i++)
5432 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5433 		}
5434 	}
5435 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5436 		bzero(&uth, sizeof(uth));
5437 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5438 		uth.uth_size = z->uz_size;
5439 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
5440 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5441 		for (i = 0; i < mp_maxid + 1; i++)
5442 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5443 	}
5444 
5445 	rw_runlock(&uma_rwlock);
5446 	error = sbuf_finish(&sbuf);
5447 	sbuf_delete(&sbuf);
5448 	free(ups, M_TEMP);
5449 	return (error);
5450 }
5451 
5452 int
5453 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
5454 {
5455 	uma_zone_t zone = *(uma_zone_t *)arg1;
5456 	int error, max;
5457 
5458 	max = uma_zone_get_max(zone);
5459 	error = sysctl_handle_int(oidp, &max, 0, req);
5460 	if (error || !req->newptr)
5461 		return (error);
5462 
5463 	uma_zone_set_max(zone, max);
5464 
5465 	return (0);
5466 }
5467 
5468 int
5469 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
5470 {
5471 	uma_zone_t zone;
5472 	int cur;
5473 
5474 	/*
5475 	 * Some callers want to add sysctls for global zones that
5476 	 * may not yet exist so they pass a pointer to a pointer.
5477 	 */
5478 	if (arg2 == 0)
5479 		zone = *(uma_zone_t *)arg1;
5480 	else
5481 		zone = arg1;
5482 	cur = uma_zone_get_cur(zone);
5483 	return (sysctl_handle_int(oidp, &cur, 0, req));
5484 }
5485 
5486 static int
5487 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
5488 {
5489 	uma_zone_t zone = arg1;
5490 	uint64_t cur;
5491 
5492 	cur = uma_zone_get_allocs(zone);
5493 	return (sysctl_handle_64(oidp, &cur, 0, req));
5494 }
5495 
5496 static int
5497 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
5498 {
5499 	uma_zone_t zone = arg1;
5500 	uint64_t cur;
5501 
5502 	cur = uma_zone_get_frees(zone);
5503 	return (sysctl_handle_64(oidp, &cur, 0, req));
5504 }
5505 
5506 static int
5507 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
5508 {
5509 	struct sbuf sbuf;
5510 	uma_zone_t zone = arg1;
5511 	int error;
5512 
5513 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
5514 	if (zone->uz_flags != 0)
5515 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
5516 	else
5517 		sbuf_printf(&sbuf, "0");
5518 	error = sbuf_finish(&sbuf);
5519 	sbuf_delete(&sbuf);
5520 
5521 	return (error);
5522 }
5523 
5524 static int
5525 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
5526 {
5527 	uma_keg_t keg = arg1;
5528 	int avail, effpct, total;
5529 
5530 	total = keg->uk_ppera * PAGE_SIZE;
5531 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
5532 		total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize;
5533 	/*
5534 	 * We consider the client's requested size and alignment here, not the
5535 	 * real size determination uk_rsize, because we also adjust the real
5536 	 * size for internal implementation reasons (max bitset size).
5537 	 */
5538 	avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1);
5539 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
5540 		avail *= mp_maxid + 1;
5541 	effpct = 100 * avail / total;
5542 	return (sysctl_handle_int(oidp, &effpct, 0, req));
5543 }
5544 
5545 static int
5546 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS)
5547 {
5548 	uma_zone_t zone = arg1;
5549 	uint64_t cur;
5550 
5551 	cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items));
5552 	return (sysctl_handle_64(oidp, &cur, 0, req));
5553 }
5554 
5555 #ifdef INVARIANTS
5556 static uma_slab_t
5557 uma_dbg_getslab(uma_zone_t zone, void *item)
5558 {
5559 	uma_slab_t slab;
5560 	uma_keg_t keg;
5561 	uint8_t *mem;
5562 
5563 	/*
5564 	 * It is safe to return the slab here even though the
5565 	 * zone is unlocked because the item's allocation state
5566 	 * essentially holds a reference.
5567 	 */
5568 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
5569 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5570 		return (NULL);
5571 	if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
5572 		return (vtoslab((vm_offset_t)mem));
5573 	keg = zone->uz_keg;
5574 	if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
5575 		return ((uma_slab_t)(mem + keg->uk_pgoff));
5576 	KEG_LOCK(keg, 0);
5577 	slab = hash_sfind(&keg->uk_hash, mem);
5578 	KEG_UNLOCK(keg, 0);
5579 
5580 	return (slab);
5581 }
5582 
5583 static bool
5584 uma_dbg_zskip(uma_zone_t zone, void *mem)
5585 {
5586 
5587 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5588 		return (true);
5589 
5590 	return (uma_dbg_kskip(zone->uz_keg, mem));
5591 }
5592 
5593 static bool
5594 uma_dbg_kskip(uma_keg_t keg, void *mem)
5595 {
5596 	uintptr_t idx;
5597 
5598 	if (dbg_divisor == 0)
5599 		return (true);
5600 
5601 	if (dbg_divisor == 1)
5602 		return (false);
5603 
5604 	idx = (uintptr_t)mem >> PAGE_SHIFT;
5605 	if (keg->uk_ipers > 1) {
5606 		idx *= keg->uk_ipers;
5607 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
5608 	}
5609 
5610 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
5611 		counter_u64_add(uma_skip_cnt, 1);
5612 		return (true);
5613 	}
5614 	counter_u64_add(uma_dbg_cnt, 1);
5615 
5616 	return (false);
5617 }
5618 
5619 /*
5620  * Set up the slab's freei data such that uma_dbg_free can function.
5621  *
5622  */
5623 static void
5624 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
5625 {
5626 	uma_keg_t keg;
5627 	int freei;
5628 
5629 	if (slab == NULL) {
5630 		slab = uma_dbg_getslab(zone, item);
5631 		if (slab == NULL)
5632 			panic("uma: item %p did not belong to zone %s",
5633 			    item, zone->uz_name);
5634 	}
5635 	keg = zone->uz_keg;
5636 	freei = slab_item_index(slab, keg, item);
5637 
5638 	if (BIT_TEST_SET_ATOMIC(keg->uk_ipers, freei,
5639 	    slab_dbg_bits(slab, keg)))
5640 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)",
5641 		    item, zone, zone->uz_name, slab, freei);
5642 }
5643 
5644 /*
5645  * Verifies freed addresses.  Checks for alignment, valid slab membership
5646  * and duplicate frees.
5647  *
5648  */
5649 static void
5650 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
5651 {
5652 	uma_keg_t keg;
5653 	int freei;
5654 
5655 	if (slab == NULL) {
5656 		slab = uma_dbg_getslab(zone, item);
5657 		if (slab == NULL)
5658 			panic("uma: Freed item %p did not belong to zone %s",
5659 			    item, zone->uz_name);
5660 	}
5661 	keg = zone->uz_keg;
5662 	freei = slab_item_index(slab, keg, item);
5663 
5664 	if (freei >= keg->uk_ipers)
5665 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)",
5666 		    item, zone, zone->uz_name, slab, freei);
5667 
5668 	if (slab_item(slab, keg, freei) != item)
5669 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)",
5670 		    item, zone, zone->uz_name, slab, freei);
5671 
5672 	if (!BIT_TEST_CLR_ATOMIC(keg->uk_ipers, freei,
5673 	    slab_dbg_bits(slab, keg)))
5674 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)",
5675 		    item, zone, zone->uz_name, slab, freei);
5676 }
5677 #endif /* INVARIANTS */
5678 
5679 #ifdef DDB
5680 static int64_t
5681 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
5682     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
5683 {
5684 	uint64_t frees;
5685 	int i;
5686 
5687 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
5688 		*allocs = counter_u64_fetch(z->uz_allocs);
5689 		frees = counter_u64_fetch(z->uz_frees);
5690 		*sleeps = z->uz_sleeps;
5691 		*cachefree = 0;
5692 		*xdomain = 0;
5693 	} else
5694 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
5695 		    xdomain);
5696 	for (i = 0; i < vm_ndomains; i++) {
5697 		*cachefree += ZDOM_GET(z, i)->uzd_nitems;
5698 		if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
5699 		    (LIST_FIRST(&kz->uk_zones) != z)))
5700 			*cachefree += kz->uk_domain[i].ud_free_items;
5701 	}
5702 	*used = *allocs - frees;
5703 	return (((int64_t)*used + *cachefree) * kz->uk_size);
5704 }
5705 
5706 DB_SHOW_COMMAND(uma, db_show_uma)
5707 {
5708 	const char *fmt_hdr, *fmt_entry;
5709 	uma_keg_t kz;
5710 	uma_zone_t z;
5711 	uint64_t allocs, used, sleeps, xdomain;
5712 	long cachefree;
5713 	/* variables for sorting */
5714 	uma_keg_t cur_keg;
5715 	uma_zone_t cur_zone, last_zone;
5716 	int64_t cur_size, last_size, size;
5717 	int ties;
5718 
5719 	/* /i option produces machine-parseable CSV output */
5720 	if (modif[0] == 'i') {
5721 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
5722 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
5723 	} else {
5724 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
5725 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
5726 	}
5727 
5728 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
5729 	    "Sleeps", "Bucket", "Total Mem", "XFree");
5730 
5731 	/* Sort the zones with largest size first. */
5732 	last_zone = NULL;
5733 	last_size = INT64_MAX;
5734 	for (;;) {
5735 		cur_zone = NULL;
5736 		cur_size = -1;
5737 		ties = 0;
5738 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
5739 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5740 				/*
5741 				 * In the case of size ties, print out zones
5742 				 * in the order they are encountered.  That is,
5743 				 * when we encounter the most recently output
5744 				 * zone, we have already printed all preceding
5745 				 * ties, and we must print all following ties.
5746 				 */
5747 				if (z == last_zone) {
5748 					ties = 1;
5749 					continue;
5750 				}
5751 				size = get_uma_stats(kz, z, &allocs, &used,
5752 				    &sleeps, &cachefree, &xdomain);
5753 				if (size > cur_size && size < last_size + ties)
5754 				{
5755 					cur_size = size;
5756 					cur_zone = z;
5757 					cur_keg = kz;
5758 				}
5759 			}
5760 		}
5761 		if (cur_zone == NULL)
5762 			break;
5763 
5764 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
5765 		    &sleeps, &cachefree, &xdomain);
5766 		db_printf(fmt_entry, cur_zone->uz_name,
5767 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
5768 		    (uintmax_t)allocs, (uintmax_t)sleeps,
5769 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
5770 		    xdomain);
5771 
5772 		if (db_pager_quit)
5773 			return;
5774 		last_zone = cur_zone;
5775 		last_size = cur_size;
5776 	}
5777 }
5778 
5779 DB_SHOW_COMMAND(umacache, db_show_umacache)
5780 {
5781 	uma_zone_t z;
5782 	uint64_t allocs, frees;
5783 	long cachefree;
5784 	int i;
5785 
5786 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
5787 	    "Requests", "Bucket");
5788 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5789 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
5790 		for (i = 0; i < vm_ndomains; i++)
5791 			cachefree += ZDOM_GET(z, i)->uzd_nitems;
5792 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
5793 		    z->uz_name, (uintmax_t)z->uz_size,
5794 		    (intmax_t)(allocs - frees), cachefree,
5795 		    (uintmax_t)allocs, z->uz_bucket_size);
5796 		if (db_pager_quit)
5797 			return;
5798 	}
5799 }
5800 #endif	/* DDB */
5801