xref: /freebsd/sys/vm/uma_core.c (revision 25ecdc7d52770caf1c9b44b5ec11f468f6b636f3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/asan.h>
62 #include <sys/bitset.h>
63 #include <sys/domainset.h>
64 #include <sys/eventhandler.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/limits.h>
68 #include <sys/queue.h>
69 #include <sys/malloc.h>
70 #include <sys/ktr.h>
71 #include <sys/lock.h>
72 #include <sys/sysctl.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/random.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/sched.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/smp.h>
81 #include <sys/smr.h>
82 #include <sys/taskqueue.h>
83 #include <sys/vmmeter.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_domainset.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_phys.h>
92 #include <vm/vm_pagequeue.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_dumpset.h>
97 #include <vm/uma.h>
98 #include <vm/uma_int.h>
99 #include <vm/uma_dbg.h>
100 
101 #include <ddb/ddb.h>
102 
103 #ifdef DEBUG_MEMGUARD
104 #include <vm/memguard.h>
105 #endif
106 
107 #include <machine/md_var.h>
108 
109 #ifdef INVARIANTS
110 #define	UMA_ALWAYS_CTORDTOR	1
111 #else
112 #define	UMA_ALWAYS_CTORDTOR	0
113 #endif
114 
115 /*
116  * This is the zone and keg from which all zones are spawned.
117  */
118 static uma_zone_t kegs;
119 static uma_zone_t zones;
120 
121 /*
122  * On INVARIANTS builds, the slab contains a second bitset of the same size,
123  * "dbg_bits", which is laid out immediately after us_free.
124  */
125 #ifdef INVARIANTS
126 #define	SLAB_BITSETS	2
127 #else
128 #define	SLAB_BITSETS	1
129 #endif
130 
131 /*
132  * These are the two zones from which all offpage uma_slab_ts are allocated.
133  *
134  * One zone is for slab headers that can represent a larger number of items,
135  * making the slabs themselves more efficient, and the other zone is for
136  * headers that are smaller and represent fewer items, making the headers more
137  * efficient.
138  */
139 #define	SLABZONE_SIZE(setsize)					\
140     (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS)
141 #define	SLABZONE0_SETSIZE	(PAGE_SIZE / 16)
142 #define	SLABZONE1_SETSIZE	SLAB_MAX_SETSIZE
143 #define	SLABZONE0_SIZE	SLABZONE_SIZE(SLABZONE0_SETSIZE)
144 #define	SLABZONE1_SIZE	SLABZONE_SIZE(SLABZONE1_SETSIZE)
145 static uma_zone_t slabzones[2];
146 
147 /*
148  * The initial hash tables come out of this zone so they can be allocated
149  * prior to malloc coming up.
150  */
151 static uma_zone_t hashzone;
152 
153 /* The boot-time adjusted value for cache line alignment. */
154 int uma_align_cache = 64 - 1;
155 
156 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
157 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
158 
159 /*
160  * Are we allowed to allocate buckets?
161  */
162 static int bucketdisable = 1;
163 
164 /* Linked list of all kegs in the system */
165 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
166 
167 /* Linked list of all cache-only zones in the system */
168 static LIST_HEAD(,uma_zone) uma_cachezones =
169     LIST_HEAD_INITIALIZER(uma_cachezones);
170 
171 /*
172  * Mutex for global lists: uma_kegs, uma_cachezones, and the per-keg list of
173  * zones.
174  */
175 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
176 
177 static struct sx uma_reclaim_lock;
178 
179 /*
180  * First available virual address for boot time allocations.
181  */
182 static vm_offset_t bootstart;
183 static vm_offset_t bootmem;
184 
185 /*
186  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
187  * allocations don't trigger a wakeup of the reclaim thread.
188  */
189 unsigned long uma_kmem_limit = LONG_MAX;
190 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
191     "UMA kernel memory soft limit");
192 unsigned long uma_kmem_total;
193 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
194     "UMA kernel memory usage");
195 
196 /* Is the VM done starting up? */
197 static enum {
198 	BOOT_COLD,
199 	BOOT_KVA,
200 	BOOT_PCPU,
201 	BOOT_RUNNING,
202 	BOOT_SHUTDOWN,
203 } booted = BOOT_COLD;
204 
205 /*
206  * This is the handle used to schedule events that need to happen
207  * outside of the allocation fast path.
208  */
209 static struct callout uma_callout;
210 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
211 
212 /*
213  * This structure is passed as the zone ctor arg so that I don't have to create
214  * a special allocation function just for zones.
215  */
216 struct uma_zctor_args {
217 	const char *name;
218 	size_t size;
219 	uma_ctor ctor;
220 	uma_dtor dtor;
221 	uma_init uminit;
222 	uma_fini fini;
223 	uma_import import;
224 	uma_release release;
225 	void *arg;
226 	uma_keg_t keg;
227 	int align;
228 	uint32_t flags;
229 };
230 
231 struct uma_kctor_args {
232 	uma_zone_t zone;
233 	size_t size;
234 	uma_init uminit;
235 	uma_fini fini;
236 	int align;
237 	uint32_t flags;
238 };
239 
240 struct uma_bucket_zone {
241 	uma_zone_t	ubz_zone;
242 	const char	*ubz_name;
243 	int		ubz_entries;	/* Number of items it can hold. */
244 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
245 };
246 
247 /*
248  * Compute the actual number of bucket entries to pack them in power
249  * of two sizes for more efficient space utilization.
250  */
251 #define	BUCKET_SIZE(n)						\
252     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
253 
254 #define	BUCKET_MAX	BUCKET_SIZE(256)
255 
256 struct uma_bucket_zone bucket_zones[] = {
257 	/* Literal bucket sizes. */
258 	{ NULL, "2 Bucket", 2, 4096 },
259 	{ NULL, "4 Bucket", 4, 3072 },
260 	{ NULL, "8 Bucket", 8, 2048 },
261 	{ NULL, "16 Bucket", 16, 1024 },
262 	/* Rounded down power of 2 sizes for efficiency. */
263 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
264 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
265 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
266 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
267 	{ NULL, NULL, 0}
268 };
269 
270 /*
271  * Flags and enumerations to be passed to internal functions.
272  */
273 enum zfreeskip {
274 	SKIP_NONE =	0,
275 	SKIP_CNT =	0x00000001,
276 	SKIP_DTOR =	0x00010000,
277 	SKIP_FINI =	0x00020000,
278 };
279 
280 /* Prototypes.. */
281 
282 void	uma_startup1(vm_offset_t);
283 void	uma_startup2(void);
284 
285 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
286 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
287 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
288 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
289 static void *contig_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
290 static void page_free(void *, vm_size_t, uint8_t);
291 static void pcpu_page_free(void *, vm_size_t, uint8_t);
292 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
293 static void cache_drain(uma_zone_t);
294 static void bucket_drain(uma_zone_t, uma_bucket_t);
295 static void bucket_cache_reclaim(uma_zone_t zone, bool, int);
296 static bool bucket_cache_reclaim_domain(uma_zone_t, bool, bool, int);
297 static int keg_ctor(void *, int, void *, int);
298 static void keg_dtor(void *, int, void *);
299 static void keg_drain(uma_keg_t keg, int domain);
300 static int zone_ctor(void *, int, void *, int);
301 static void zone_dtor(void *, int, void *);
302 static inline void item_dtor(uma_zone_t zone, void *item, int size,
303     void *udata, enum zfreeskip skip);
304 static int zero_init(void *, int, int);
305 static void zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
306     int itemdomain, bool ws);
307 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
308 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *);
309 static void zone_timeout(uma_zone_t zone, void *);
310 static int hash_alloc(struct uma_hash *, u_int);
311 static int hash_expand(struct uma_hash *, struct uma_hash *);
312 static void hash_free(struct uma_hash *hash);
313 static void uma_timeout(void *);
314 static void uma_shutdown(void);
315 static void *zone_alloc_item(uma_zone_t, void *, int, int);
316 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
317 static int zone_alloc_limit(uma_zone_t zone, int count, int flags);
318 static void zone_free_limit(uma_zone_t zone, int count);
319 static void bucket_enable(void);
320 static void bucket_init(void);
321 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
322 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
323 static void bucket_zone_drain(int domain);
324 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
325 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
326 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
327 static size_t slab_sizeof(int nitems);
328 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
329     uma_fini fini, int align, uint32_t flags);
330 static int zone_import(void *, void **, int, int, int);
331 static void zone_release(void *, void **, int);
332 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
333 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
334 
335 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
336 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
337 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
338 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
339 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
340 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS);
341 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS);
342 
343 static uint64_t uma_zone_get_allocs(uma_zone_t zone);
344 
345 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
346     "Memory allocation debugging");
347 
348 #ifdef INVARIANTS
349 static uint64_t uma_keg_get_allocs(uma_keg_t zone);
350 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
351 
352 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
353 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
354 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
355 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
356 
357 static u_int dbg_divisor = 1;
358 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
359     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
360     "Debug & thrash every this item in memory allocator");
361 
362 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
363 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
364 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
365     &uma_dbg_cnt, "memory items debugged");
366 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
367     &uma_skip_cnt, "memory items skipped, not debugged");
368 #endif
369 
370 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
371     "Universal Memory Allocator");
372 
373 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT,
374     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
375 
376 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT,
377     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
378 
379 static int zone_warnings = 1;
380 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
381     "Warn when UMA zones becomes full");
382 
383 static int multipage_slabs = 1;
384 TUNABLE_INT("vm.debug.uma_multipage_slabs", &multipage_slabs);
385 SYSCTL_INT(_vm_debug, OID_AUTO, uma_multipage_slabs,
386     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &multipage_slabs, 0,
387     "UMA may choose larger slab sizes for better efficiency");
388 
389 /*
390  * Select the slab zone for an offpage slab with the given maximum item count.
391  */
392 static inline uma_zone_t
393 slabzone(int ipers)
394 {
395 
396 	return (slabzones[ipers > SLABZONE0_SETSIZE]);
397 }
398 
399 /*
400  * This routine checks to see whether or not it's safe to enable buckets.
401  */
402 static void
403 bucket_enable(void)
404 {
405 
406 	KASSERT(booted >= BOOT_KVA, ("Bucket enable before init"));
407 	bucketdisable = vm_page_count_min();
408 }
409 
410 /*
411  * Initialize bucket_zones, the array of zones of buckets of various sizes.
412  *
413  * For each zone, calculate the memory required for each bucket, consisting
414  * of the header and an array of pointers.
415  */
416 static void
417 bucket_init(void)
418 {
419 	struct uma_bucket_zone *ubz;
420 	int size;
421 
422 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
423 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
424 		size += sizeof(void *) * ubz->ubz_entries;
425 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
426 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
427 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET |
428 		    UMA_ZONE_FIRSTTOUCH);
429 	}
430 }
431 
432 /*
433  * Given a desired number of entries for a bucket, return the zone from which
434  * to allocate the bucket.
435  */
436 static struct uma_bucket_zone *
437 bucket_zone_lookup(int entries)
438 {
439 	struct uma_bucket_zone *ubz;
440 
441 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
442 		if (ubz->ubz_entries >= entries)
443 			return (ubz);
444 	ubz--;
445 	return (ubz);
446 }
447 
448 static int
449 bucket_select(int size)
450 {
451 	struct uma_bucket_zone *ubz;
452 
453 	ubz = &bucket_zones[0];
454 	if (size > ubz->ubz_maxsize)
455 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
456 
457 	for (; ubz->ubz_entries != 0; ubz++)
458 		if (ubz->ubz_maxsize < size)
459 			break;
460 	ubz--;
461 	return (ubz->ubz_entries);
462 }
463 
464 static uma_bucket_t
465 bucket_alloc(uma_zone_t zone, void *udata, int flags)
466 {
467 	struct uma_bucket_zone *ubz;
468 	uma_bucket_t bucket;
469 
470 	/*
471 	 * Don't allocate buckets early in boot.
472 	 */
473 	if (__predict_false(booted < BOOT_KVA))
474 		return (NULL);
475 
476 	/*
477 	 * To limit bucket recursion we store the original zone flags
478 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
479 	 * NOVM flag to persist even through deep recursions.  We also
480 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
481 	 * a bucket for a bucket zone so we do not allow infinite bucket
482 	 * recursion.  This cookie will even persist to frees of unused
483 	 * buckets via the allocation path or bucket allocations in the
484 	 * free path.
485 	 */
486 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
487 		udata = (void *)(uintptr_t)zone->uz_flags;
488 	else {
489 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
490 			return (NULL);
491 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
492 	}
493 	if (((uintptr_t)udata & UMA_ZONE_VM) != 0)
494 		flags |= M_NOVM;
495 	ubz = bucket_zone_lookup(atomic_load_16(&zone->uz_bucket_size));
496 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
497 		ubz++;
498 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
499 	if (bucket) {
500 #ifdef INVARIANTS
501 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
502 #endif
503 		bucket->ub_cnt = 0;
504 		bucket->ub_entries = min(ubz->ubz_entries,
505 		    zone->uz_bucket_size_max);
506 		bucket->ub_seq = SMR_SEQ_INVALID;
507 		CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
508 		    zone->uz_name, zone, bucket);
509 	}
510 
511 	return (bucket);
512 }
513 
514 static void
515 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
516 {
517 	struct uma_bucket_zone *ubz;
518 
519 	if (bucket->ub_cnt != 0)
520 		bucket_drain(zone, bucket);
521 
522 	KASSERT(bucket->ub_cnt == 0,
523 	    ("bucket_free: Freeing a non free bucket."));
524 	KASSERT(bucket->ub_seq == SMR_SEQ_INVALID,
525 	    ("bucket_free: Freeing an SMR bucket."));
526 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
527 		udata = (void *)(uintptr_t)zone->uz_flags;
528 	ubz = bucket_zone_lookup(bucket->ub_entries);
529 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
530 }
531 
532 static void
533 bucket_zone_drain(int domain)
534 {
535 	struct uma_bucket_zone *ubz;
536 
537 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
538 		uma_zone_reclaim_domain(ubz->ubz_zone, UMA_RECLAIM_DRAIN,
539 		    domain);
540 }
541 
542 #ifdef KASAN
543 _Static_assert(UMA_SMALLEST_UNIT % KASAN_SHADOW_SCALE == 0,
544     "Base UMA allocation size not a multiple of the KASAN scale factor");
545 
546 static void
547 kasan_mark_item_valid(uma_zone_t zone, void *item)
548 {
549 	void *pcpu_item;
550 	size_t sz, rsz;
551 	int i;
552 
553 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
554 		return;
555 
556 	sz = zone->uz_size;
557 	rsz = roundup2(sz, KASAN_SHADOW_SCALE);
558 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
559 		kasan_mark(item, sz, rsz, 0);
560 	} else {
561 		pcpu_item = zpcpu_base_to_offset(item);
562 		for (i = 0; i <= mp_maxid; i++)
563 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), sz, rsz, 0);
564 	}
565 }
566 
567 static void
568 kasan_mark_item_invalid(uma_zone_t zone, void *item)
569 {
570 	void *pcpu_item;
571 	size_t sz;
572 	int i;
573 
574 	if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
575 		return;
576 
577 	sz = roundup2(zone->uz_size, KASAN_SHADOW_SCALE);
578 	if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
579 		kasan_mark(item, 0, sz, KASAN_UMA_FREED);
580 	} else {
581 		pcpu_item = zpcpu_base_to_offset(item);
582 		for (i = 0; i <= mp_maxid; i++)
583 			kasan_mark(zpcpu_get_cpu(pcpu_item, i), 0, sz, 0);
584 	}
585 }
586 
587 static void
588 kasan_mark_slab_valid(uma_keg_t keg, void *mem)
589 {
590 	size_t sz;
591 
592 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
593 		sz = keg->uk_ppera * PAGE_SIZE;
594 		kasan_mark(mem, sz, sz, 0);
595 	}
596 }
597 
598 static void
599 kasan_mark_slab_invalid(uma_keg_t keg, void *mem)
600 {
601 	size_t sz;
602 
603 	if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
604 		if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
605 			sz = keg->uk_ppera * PAGE_SIZE;
606 		else
607 			sz = keg->uk_pgoff;
608 		kasan_mark(mem, 0, sz, KASAN_UMA_FREED);
609 	}
610 }
611 #else /* !KASAN */
612 static void
613 kasan_mark_item_valid(uma_zone_t zone __unused, void *item __unused)
614 {
615 }
616 
617 static void
618 kasan_mark_item_invalid(uma_zone_t zone __unused, void *item __unused)
619 {
620 }
621 
622 static void
623 kasan_mark_slab_valid(uma_keg_t keg __unused, void *mem __unused)
624 {
625 }
626 
627 static void
628 kasan_mark_slab_invalid(uma_keg_t keg __unused, void *mem __unused)
629 {
630 }
631 #endif /* KASAN */
632 
633 /*
634  * Acquire the domain lock and record contention.
635  */
636 static uma_zone_domain_t
637 zone_domain_lock(uma_zone_t zone, int domain)
638 {
639 	uma_zone_domain_t zdom;
640 	bool lockfail;
641 
642 	zdom = ZDOM_GET(zone, domain);
643 	lockfail = false;
644 	if (ZDOM_OWNED(zdom))
645 		lockfail = true;
646 	ZDOM_LOCK(zdom);
647 	/* This is unsynchronized.  The counter does not need to be precise. */
648 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
649 		zone->uz_bucket_size++;
650 	return (zdom);
651 }
652 
653 /*
654  * Search for the domain with the least cached items and return it if it
655  * is out of balance with the preferred domain.
656  */
657 static __noinline int
658 zone_domain_lowest(uma_zone_t zone, int pref)
659 {
660 	long least, nitems, prefitems;
661 	int domain;
662 	int i;
663 
664 	prefitems = least = LONG_MAX;
665 	domain = 0;
666 	for (i = 0; i < vm_ndomains; i++) {
667 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
668 		if (nitems < least) {
669 			domain = i;
670 			least = nitems;
671 		}
672 		if (domain == pref)
673 			prefitems = nitems;
674 	}
675 	if (prefitems < least * 2)
676 		return (pref);
677 
678 	return (domain);
679 }
680 
681 /*
682  * Search for the domain with the most cached items and return it or the
683  * preferred domain if it has enough to proceed.
684  */
685 static __noinline int
686 zone_domain_highest(uma_zone_t zone, int pref)
687 {
688 	long most, nitems;
689 	int domain;
690 	int i;
691 
692 	if (ZDOM_GET(zone, pref)->uzd_nitems > BUCKET_MAX)
693 		return (pref);
694 
695 	most = 0;
696 	domain = 0;
697 	for (i = 0; i < vm_ndomains; i++) {
698 		nitems = ZDOM_GET(zone, i)->uzd_nitems;
699 		if (nitems > most) {
700 			domain = i;
701 			most = nitems;
702 		}
703 	}
704 
705 	return (domain);
706 }
707 
708 /*
709  * Set the maximum imax value.
710  */
711 static void
712 zone_domain_imax_set(uma_zone_domain_t zdom, int nitems)
713 {
714 	long old;
715 
716 	old = zdom->uzd_imax;
717 	do {
718 		if (old >= nitems)
719 			return;
720 	} while (atomic_fcmpset_long(&zdom->uzd_imax, &old, nitems) == 0);
721 
722 	/*
723 	 * We are at new maximum, so do the last WSS update for the old
724 	 * bimin and prepare to measure next allocation batch.
725 	 */
726 	if (zdom->uzd_wss < old - zdom->uzd_bimin)
727 		zdom->uzd_wss = old - zdom->uzd_bimin;
728 	zdom->uzd_bimin = nitems;
729 }
730 
731 /*
732  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
733  * zone's caches.  If a bucket is found the zone is not locked on return.
734  */
735 static uma_bucket_t
736 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim)
737 {
738 	uma_bucket_t bucket;
739 	long cnt;
740 	int i;
741 	bool dtor = false;
742 
743 	ZDOM_LOCK_ASSERT(zdom);
744 
745 	if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
746 		return (NULL);
747 
748 	/* SMR Buckets can not be re-used until readers expire. */
749 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
750 	    bucket->ub_seq != SMR_SEQ_INVALID) {
751 		if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
752 			return (NULL);
753 		bucket->ub_seq = SMR_SEQ_INVALID;
754 		dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR;
755 		if (STAILQ_NEXT(bucket, ub_link) != NULL)
756 			zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq;
757 	}
758 	STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
759 
760 	KASSERT(zdom->uzd_nitems >= bucket->ub_cnt,
761 	    ("%s: item count underflow (%ld, %d)",
762 	    __func__, zdom->uzd_nitems, bucket->ub_cnt));
763 	KASSERT(bucket->ub_cnt > 0,
764 	    ("%s: empty bucket in bucket cache", __func__));
765 	zdom->uzd_nitems -= bucket->ub_cnt;
766 
767 	if (reclaim) {
768 		/*
769 		 * Shift the bounds of the current WSS interval to avoid
770 		 * perturbing the estimates.
771 		 */
772 		cnt = lmin(zdom->uzd_bimin, bucket->ub_cnt);
773 		atomic_subtract_long(&zdom->uzd_imax, cnt);
774 		zdom->uzd_bimin -= cnt;
775 		zdom->uzd_imin -= lmin(zdom->uzd_imin, bucket->ub_cnt);
776 		if (zdom->uzd_limin >= bucket->ub_cnt) {
777 			zdom->uzd_limin -= bucket->ub_cnt;
778 		} else {
779 			zdom->uzd_limin = 0;
780 			zdom->uzd_timin = 0;
781 		}
782 	} else if (zdom->uzd_bimin > zdom->uzd_nitems) {
783 		zdom->uzd_bimin = zdom->uzd_nitems;
784 		if (zdom->uzd_imin > zdom->uzd_nitems)
785 			zdom->uzd_imin = zdom->uzd_nitems;
786 	}
787 
788 	ZDOM_UNLOCK(zdom);
789 	if (dtor)
790 		for (i = 0; i < bucket->ub_cnt; i++)
791 			item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
792 			    NULL, SKIP_NONE);
793 
794 	return (bucket);
795 }
796 
797 /*
798  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
799  * whether the bucket's contents should be counted as part of the zone's working
800  * set.  The bucket may be freed if it exceeds the bucket limit.
801  */
802 static void
803 zone_put_bucket(uma_zone_t zone, int domain, uma_bucket_t bucket, void *udata,
804     const bool ws)
805 {
806 	uma_zone_domain_t zdom;
807 
808 	/* We don't cache empty buckets.  This can happen after a reclaim. */
809 	if (bucket->ub_cnt == 0)
810 		goto out;
811 	zdom = zone_domain_lock(zone, domain);
812 
813 	/*
814 	 * Conditionally set the maximum number of items.
815 	 */
816 	zdom->uzd_nitems += bucket->ub_cnt;
817 	if (__predict_true(zdom->uzd_nitems < zone->uz_bucket_max)) {
818 		if (ws) {
819 			zone_domain_imax_set(zdom, zdom->uzd_nitems);
820 		} else {
821 			/*
822 			 * Shift the bounds of the current WSS interval to
823 			 * avoid perturbing the estimates.
824 			 */
825 			atomic_add_long(&zdom->uzd_imax, bucket->ub_cnt);
826 			zdom->uzd_imin += bucket->ub_cnt;
827 			zdom->uzd_bimin += bucket->ub_cnt;
828 			zdom->uzd_limin += bucket->ub_cnt;
829 		}
830 		if (STAILQ_EMPTY(&zdom->uzd_buckets))
831 			zdom->uzd_seq = bucket->ub_seq;
832 
833 		/*
834 		 * Try to promote reuse of recently used items.  For items
835 		 * protected by SMR, try to defer reuse to minimize polling.
836 		 */
837 		if (bucket->ub_seq == SMR_SEQ_INVALID)
838 			STAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
839 		else
840 			STAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
841 		ZDOM_UNLOCK(zdom);
842 		return;
843 	}
844 	zdom->uzd_nitems -= bucket->ub_cnt;
845 	ZDOM_UNLOCK(zdom);
846 out:
847 	bucket_free(zone, bucket, udata);
848 }
849 
850 /* Pops an item out of a per-cpu cache bucket. */
851 static inline void *
852 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
853 {
854 	void *item;
855 
856 	CRITICAL_ASSERT(curthread);
857 
858 	bucket->ucb_cnt--;
859 	item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
860 #ifdef INVARIANTS
861 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
862 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
863 #endif
864 	cache->uc_allocs++;
865 
866 	return (item);
867 }
868 
869 /* Pushes an item into a per-cpu cache bucket. */
870 static inline void
871 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
872 {
873 
874 	CRITICAL_ASSERT(curthread);
875 	KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
876 	    ("uma_zfree: Freeing to non free bucket index."));
877 
878 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
879 	bucket->ucb_cnt++;
880 	cache->uc_frees++;
881 }
882 
883 /*
884  * Unload a UMA bucket from a per-cpu cache.
885  */
886 static inline uma_bucket_t
887 cache_bucket_unload(uma_cache_bucket_t bucket)
888 {
889 	uma_bucket_t b;
890 
891 	b = bucket->ucb_bucket;
892 	if (b != NULL) {
893 		MPASS(b->ub_entries == bucket->ucb_entries);
894 		b->ub_cnt = bucket->ucb_cnt;
895 		bucket->ucb_bucket = NULL;
896 		bucket->ucb_entries = bucket->ucb_cnt = 0;
897 	}
898 
899 	return (b);
900 }
901 
902 static inline uma_bucket_t
903 cache_bucket_unload_alloc(uma_cache_t cache)
904 {
905 
906 	return (cache_bucket_unload(&cache->uc_allocbucket));
907 }
908 
909 static inline uma_bucket_t
910 cache_bucket_unload_free(uma_cache_t cache)
911 {
912 
913 	return (cache_bucket_unload(&cache->uc_freebucket));
914 }
915 
916 static inline uma_bucket_t
917 cache_bucket_unload_cross(uma_cache_t cache)
918 {
919 
920 	return (cache_bucket_unload(&cache->uc_crossbucket));
921 }
922 
923 /*
924  * Load a bucket into a per-cpu cache bucket.
925  */
926 static inline void
927 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
928 {
929 
930 	CRITICAL_ASSERT(curthread);
931 	MPASS(bucket->ucb_bucket == NULL);
932 	MPASS(b->ub_seq == SMR_SEQ_INVALID);
933 
934 	bucket->ucb_bucket = b;
935 	bucket->ucb_cnt = b->ub_cnt;
936 	bucket->ucb_entries = b->ub_entries;
937 }
938 
939 static inline void
940 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b)
941 {
942 
943 	cache_bucket_load(&cache->uc_allocbucket, b);
944 }
945 
946 static inline void
947 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b)
948 {
949 
950 	cache_bucket_load(&cache->uc_freebucket, b);
951 }
952 
953 #ifdef NUMA
954 static inline void
955 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b)
956 {
957 
958 	cache_bucket_load(&cache->uc_crossbucket, b);
959 }
960 #endif
961 
962 /*
963  * Copy and preserve ucb_spare.
964  */
965 static inline void
966 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
967 {
968 
969 	b1->ucb_bucket = b2->ucb_bucket;
970 	b1->ucb_entries = b2->ucb_entries;
971 	b1->ucb_cnt = b2->ucb_cnt;
972 }
973 
974 /*
975  * Swap two cache buckets.
976  */
977 static inline void
978 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
979 {
980 	struct uma_cache_bucket b3;
981 
982 	CRITICAL_ASSERT(curthread);
983 
984 	cache_bucket_copy(&b3, b1);
985 	cache_bucket_copy(b1, b2);
986 	cache_bucket_copy(b2, &b3);
987 }
988 
989 /*
990  * Attempt to fetch a bucket from a zone on behalf of the current cpu cache.
991  */
992 static uma_bucket_t
993 cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain)
994 {
995 	uma_zone_domain_t zdom;
996 	uma_bucket_t bucket;
997 
998 	/*
999 	 * Avoid the lock if possible.
1000 	 */
1001 	zdom = ZDOM_GET(zone, domain);
1002 	if (zdom->uzd_nitems == 0)
1003 		return (NULL);
1004 
1005 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) != 0 &&
1006 	    !smr_poll(zone->uz_smr, zdom->uzd_seq, false))
1007 		return (NULL);
1008 
1009 	/*
1010 	 * Check the zone's cache of buckets.
1011 	 */
1012 	zdom = zone_domain_lock(zone, domain);
1013 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL)
1014 		return (bucket);
1015 	ZDOM_UNLOCK(zdom);
1016 
1017 	return (NULL);
1018 }
1019 
1020 static void
1021 zone_log_warning(uma_zone_t zone)
1022 {
1023 	static const struct timeval warninterval = { 300, 0 };
1024 
1025 	if (!zone_warnings || zone->uz_warning == NULL)
1026 		return;
1027 
1028 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
1029 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
1030 }
1031 
1032 static inline void
1033 zone_maxaction(uma_zone_t zone)
1034 {
1035 
1036 	if (zone->uz_maxaction.ta_func != NULL)
1037 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
1038 }
1039 
1040 /*
1041  * Routine called by timeout which is used to fire off some time interval
1042  * based calculations.  (stats, hash size, etc.)
1043  *
1044  * Arguments:
1045  *	arg   Unused
1046  *
1047  * Returns:
1048  *	Nothing
1049  */
1050 static void
1051 uma_timeout(void *unused)
1052 {
1053 	bucket_enable();
1054 	zone_foreach(zone_timeout, NULL);
1055 
1056 	/* Reschedule this event */
1057 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1058 }
1059 
1060 /*
1061  * Update the working set size estimates for the zone's bucket cache.
1062  * The constants chosen here are somewhat arbitrary.
1063  */
1064 static void
1065 zone_domain_update_wss(uma_zone_domain_t zdom)
1066 {
1067 	long m;
1068 
1069 	ZDOM_LOCK_ASSERT(zdom);
1070 	MPASS(zdom->uzd_imax >= zdom->uzd_nitems);
1071 	MPASS(zdom->uzd_nitems >= zdom->uzd_bimin);
1072 	MPASS(zdom->uzd_bimin >= zdom->uzd_imin);
1073 
1074 	/*
1075 	 * Estimate WSS as modified moving average of biggest allocation
1076 	 * batches for each period over few minutes (UMA_TIMEOUT of 20s).
1077 	 */
1078 	zdom->uzd_wss = lmax(zdom->uzd_wss * 3 / 4,
1079 	    zdom->uzd_imax - zdom->uzd_bimin);
1080 
1081 	/*
1082 	 * Estimate longtime minimum item count as a combination of recent
1083 	 * minimum item count, adjusted by WSS for safety, and the modified
1084 	 * moving average over the last several hours (UMA_TIMEOUT of 20s).
1085 	 * timin measures time since limin tried to go negative, that means
1086 	 * we were dangerously close to or got out of cache.
1087 	 */
1088 	m = zdom->uzd_imin - zdom->uzd_wss;
1089 	if (m >= 0) {
1090 		if (zdom->uzd_limin >= m)
1091 			zdom->uzd_limin = m;
1092 		else
1093 			zdom->uzd_limin = (m + zdom->uzd_limin * 255) / 256;
1094 		zdom->uzd_timin++;
1095 	} else {
1096 		zdom->uzd_limin = 0;
1097 		zdom->uzd_timin = 0;
1098 	}
1099 
1100 	/* To reduce period edge effects on WSS keep half of the imax. */
1101 	atomic_subtract_long(&zdom->uzd_imax,
1102 	    (zdom->uzd_imax - zdom->uzd_nitems + 1) / 2);
1103 	zdom->uzd_imin = zdom->uzd_bimin = zdom->uzd_nitems;
1104 }
1105 
1106 /*
1107  * Routine to perform timeout driven calculations.  This expands the
1108  * hashes and does per cpu statistics aggregation.
1109  *
1110  *  Returns nothing.
1111  */
1112 static void
1113 zone_timeout(uma_zone_t zone, void *unused)
1114 {
1115 	uma_keg_t keg;
1116 	u_int slabs, pages;
1117 
1118 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
1119 		goto trim;
1120 
1121 	keg = zone->uz_keg;
1122 
1123 	/*
1124 	 * Hash zones are non-numa by definition so the first domain
1125 	 * is the only one present.
1126 	 */
1127 	KEG_LOCK(keg, 0);
1128 	pages = keg->uk_domain[0].ud_pages;
1129 
1130 	/*
1131 	 * Expand the keg hash table.
1132 	 *
1133 	 * This is done if the number of slabs is larger than the hash size.
1134 	 * What I'm trying to do here is completely reduce collisions.  This
1135 	 * may be a little aggressive.  Should I allow for two collisions max?
1136 	 */
1137 	if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) {
1138 		struct uma_hash newhash;
1139 		struct uma_hash oldhash;
1140 		int ret;
1141 
1142 		/*
1143 		 * This is so involved because allocating and freeing
1144 		 * while the keg lock is held will lead to deadlock.
1145 		 * I have to do everything in stages and check for
1146 		 * races.
1147 		 */
1148 		KEG_UNLOCK(keg, 0);
1149 		ret = hash_alloc(&newhash, 1 << fls(slabs));
1150 		KEG_LOCK(keg, 0);
1151 		if (ret) {
1152 			if (hash_expand(&keg->uk_hash, &newhash)) {
1153 				oldhash = keg->uk_hash;
1154 				keg->uk_hash = newhash;
1155 			} else
1156 				oldhash = newhash;
1157 
1158 			KEG_UNLOCK(keg, 0);
1159 			hash_free(&oldhash);
1160 			goto trim;
1161 		}
1162 	}
1163 	KEG_UNLOCK(keg, 0);
1164 
1165 trim:
1166 	/* Trim caches not used for a long time. */
1167 	for (int i = 0; i < vm_ndomains; i++) {
1168 		if (bucket_cache_reclaim_domain(zone, false, false, i) &&
1169 		    (zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1170 			keg_drain(zone->uz_keg, i);
1171 	}
1172 }
1173 
1174 /*
1175  * Allocate and zero fill the next sized hash table from the appropriate
1176  * backing store.
1177  *
1178  * Arguments:
1179  *	hash  A new hash structure with the old hash size in uh_hashsize
1180  *
1181  * Returns:
1182  *	1 on success and 0 on failure.
1183  */
1184 static int
1185 hash_alloc(struct uma_hash *hash, u_int size)
1186 {
1187 	size_t alloc;
1188 
1189 	KASSERT(powerof2(size), ("hash size must be power of 2"));
1190 	if (size > UMA_HASH_SIZE_INIT)  {
1191 		hash->uh_hashsize = size;
1192 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
1193 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
1194 	} else {
1195 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
1196 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
1197 		    UMA_ANYDOMAIN, M_WAITOK);
1198 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
1199 	}
1200 	if (hash->uh_slab_hash) {
1201 		bzero(hash->uh_slab_hash, alloc);
1202 		hash->uh_hashmask = hash->uh_hashsize - 1;
1203 		return (1);
1204 	}
1205 
1206 	return (0);
1207 }
1208 
1209 /*
1210  * Expands the hash table for HASH zones.  This is done from zone_timeout
1211  * to reduce collisions.  This must not be done in the regular allocation
1212  * path, otherwise, we can recurse on the vm while allocating pages.
1213  *
1214  * Arguments:
1215  *	oldhash  The hash you want to expand
1216  *	newhash  The hash structure for the new table
1217  *
1218  * Returns:
1219  *	Nothing
1220  *
1221  * Discussion:
1222  */
1223 static int
1224 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
1225 {
1226 	uma_hash_slab_t slab;
1227 	u_int hval;
1228 	u_int idx;
1229 
1230 	if (!newhash->uh_slab_hash)
1231 		return (0);
1232 
1233 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
1234 		return (0);
1235 
1236 	/*
1237 	 * I need to investigate hash algorithms for resizing without a
1238 	 * full rehash.
1239 	 */
1240 
1241 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
1242 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
1243 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
1244 			LIST_REMOVE(slab, uhs_hlink);
1245 			hval = UMA_HASH(newhash, slab->uhs_data);
1246 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
1247 			    slab, uhs_hlink);
1248 		}
1249 
1250 	return (1);
1251 }
1252 
1253 /*
1254  * Free the hash bucket to the appropriate backing store.
1255  *
1256  * Arguments:
1257  *	slab_hash  The hash bucket we're freeing
1258  *	hashsize   The number of entries in that hash bucket
1259  *
1260  * Returns:
1261  *	Nothing
1262  */
1263 static void
1264 hash_free(struct uma_hash *hash)
1265 {
1266 	if (hash->uh_slab_hash == NULL)
1267 		return;
1268 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
1269 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
1270 	else
1271 		free(hash->uh_slab_hash, M_UMAHASH);
1272 }
1273 
1274 /*
1275  * Frees all outstanding items in a bucket
1276  *
1277  * Arguments:
1278  *	zone   The zone to free to, must be unlocked.
1279  *	bucket The free/alloc bucket with items.
1280  *
1281  * Returns:
1282  *	Nothing
1283  */
1284 static void
1285 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
1286 {
1287 	int i;
1288 
1289 	if (bucket->ub_cnt == 0)
1290 		return;
1291 
1292 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
1293 	    bucket->ub_seq != SMR_SEQ_INVALID) {
1294 		smr_wait(zone->uz_smr, bucket->ub_seq);
1295 		bucket->ub_seq = SMR_SEQ_INVALID;
1296 		for (i = 0; i < bucket->ub_cnt; i++)
1297 			item_dtor(zone, bucket->ub_bucket[i],
1298 			    zone->uz_size, NULL, SKIP_NONE);
1299 	}
1300 	if (zone->uz_fini)
1301 		for (i = 0; i < bucket->ub_cnt; i++) {
1302 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
1303 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
1304 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
1305 		}
1306 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
1307 	if (zone->uz_max_items > 0)
1308 		zone_free_limit(zone, bucket->ub_cnt);
1309 #ifdef INVARIANTS
1310 	bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
1311 #endif
1312 	bucket->ub_cnt = 0;
1313 }
1314 
1315 /*
1316  * Drains the per cpu caches for a zone.
1317  *
1318  * NOTE: This may only be called while the zone is being torn down, and not
1319  * during normal operation.  This is necessary in order that we do not have
1320  * to migrate CPUs to drain the per-CPU caches.
1321  *
1322  * Arguments:
1323  *	zone     The zone to drain, must be unlocked.
1324  *
1325  * Returns:
1326  *	Nothing
1327  */
1328 static void
1329 cache_drain(uma_zone_t zone)
1330 {
1331 	uma_cache_t cache;
1332 	uma_bucket_t bucket;
1333 	smr_seq_t seq;
1334 	int cpu;
1335 
1336 	/*
1337 	 * XXX: It is safe to not lock the per-CPU caches, because we're
1338 	 * tearing down the zone anyway.  I.e., there will be no further use
1339 	 * of the caches at this point.
1340 	 *
1341 	 * XXX: It would good to be able to assert that the zone is being
1342 	 * torn down to prevent improper use of cache_drain().
1343 	 */
1344 	seq = SMR_SEQ_INVALID;
1345 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
1346 		seq = smr_advance(zone->uz_smr);
1347 	CPU_FOREACH(cpu) {
1348 		cache = &zone->uz_cpu[cpu];
1349 		bucket = cache_bucket_unload_alloc(cache);
1350 		if (bucket != NULL)
1351 			bucket_free(zone, bucket, NULL);
1352 		bucket = cache_bucket_unload_free(cache);
1353 		if (bucket != NULL) {
1354 			bucket->ub_seq = seq;
1355 			bucket_free(zone, bucket, NULL);
1356 		}
1357 		bucket = cache_bucket_unload_cross(cache);
1358 		if (bucket != NULL) {
1359 			bucket->ub_seq = seq;
1360 			bucket_free(zone, bucket, NULL);
1361 		}
1362 	}
1363 	bucket_cache_reclaim(zone, true, UMA_ANYDOMAIN);
1364 }
1365 
1366 static void
1367 cache_shrink(uma_zone_t zone, void *unused)
1368 {
1369 
1370 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1371 		return;
1372 
1373 	ZONE_LOCK(zone);
1374 	zone->uz_bucket_size =
1375 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
1376 	ZONE_UNLOCK(zone);
1377 }
1378 
1379 static void
1380 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
1381 {
1382 	uma_cache_t cache;
1383 	uma_bucket_t b1, b2, b3;
1384 	int domain;
1385 
1386 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1387 		return;
1388 
1389 	b1 = b2 = b3 = NULL;
1390 	critical_enter();
1391 	cache = &zone->uz_cpu[curcpu];
1392 	domain = PCPU_GET(domain);
1393 	b1 = cache_bucket_unload_alloc(cache);
1394 
1395 	/*
1396 	 * Don't flush SMR zone buckets.  This leaves the zone without a
1397 	 * bucket and forces every free to synchronize().
1398 	 */
1399 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0) {
1400 		b2 = cache_bucket_unload_free(cache);
1401 		b3 = cache_bucket_unload_cross(cache);
1402 	}
1403 	critical_exit();
1404 
1405 	if (b1 != NULL)
1406 		zone_free_bucket(zone, b1, NULL, domain, false);
1407 	if (b2 != NULL)
1408 		zone_free_bucket(zone, b2, NULL, domain, false);
1409 	if (b3 != NULL) {
1410 		/* Adjust the domain so it goes to zone_free_cross. */
1411 		domain = (domain + 1) % vm_ndomains;
1412 		zone_free_bucket(zone, b3, NULL, domain, false);
1413 	}
1414 }
1415 
1416 /*
1417  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1418  * This is an expensive call because it needs to bind to all CPUs
1419  * one by one and enter a critical section on each of them in order
1420  * to safely access their cache buckets.
1421  * Zone lock must not be held on call this function.
1422  */
1423 static void
1424 pcpu_cache_drain_safe(uma_zone_t zone)
1425 {
1426 	int cpu;
1427 
1428 	/*
1429 	 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1430 	 */
1431 	if (zone)
1432 		cache_shrink(zone, NULL);
1433 	else
1434 		zone_foreach(cache_shrink, NULL);
1435 
1436 	CPU_FOREACH(cpu) {
1437 		thread_lock(curthread);
1438 		sched_bind(curthread, cpu);
1439 		thread_unlock(curthread);
1440 
1441 		if (zone)
1442 			cache_drain_safe_cpu(zone, NULL);
1443 		else
1444 			zone_foreach(cache_drain_safe_cpu, NULL);
1445 	}
1446 	thread_lock(curthread);
1447 	sched_unbind(curthread);
1448 	thread_unlock(curthread);
1449 }
1450 
1451 /*
1452  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
1453  * requested a drain, otherwise the per-domain caches are trimmed to either
1454  * estimated working set size.
1455  */
1456 static bool
1457 bucket_cache_reclaim_domain(uma_zone_t zone, bool drain, bool trim, int domain)
1458 {
1459 	uma_zone_domain_t zdom;
1460 	uma_bucket_t bucket;
1461 	long target;
1462 	bool done = false;
1463 
1464 	/*
1465 	 * The cross bucket is partially filled and not part of
1466 	 * the item count.  Reclaim it individually here.
1467 	 */
1468 	zdom = ZDOM_GET(zone, domain);
1469 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 || drain) {
1470 		ZONE_CROSS_LOCK(zone);
1471 		bucket = zdom->uzd_cross;
1472 		zdom->uzd_cross = NULL;
1473 		ZONE_CROSS_UNLOCK(zone);
1474 		if (bucket != NULL)
1475 			bucket_free(zone, bucket, NULL);
1476 	}
1477 
1478 	/*
1479 	 * If we were asked to drain the zone, we are done only once
1480 	 * this bucket cache is empty.  If trim, we reclaim items in
1481 	 * excess of the zone's estimated working set size.  Multiple
1482 	 * consecutive calls will shrink the WSS and so reclaim more.
1483 	 * If neither drain nor trim, then voluntarily reclaim 1/4
1484 	 * (to reduce first spike) of items not used for a long time.
1485 	 */
1486 	ZDOM_LOCK(zdom);
1487 	zone_domain_update_wss(zdom);
1488 	if (drain)
1489 		target = 0;
1490 	else if (trim)
1491 		target = zdom->uzd_wss;
1492 	else if (zdom->uzd_timin > 900 / UMA_TIMEOUT)
1493 		target = zdom->uzd_nitems - zdom->uzd_limin / 4;
1494 	else {
1495 		ZDOM_UNLOCK(zdom);
1496 		return (done);
1497 	}
1498 	while ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) != NULL &&
1499 	    zdom->uzd_nitems >= target + bucket->ub_cnt) {
1500 		bucket = zone_fetch_bucket(zone, zdom, true);
1501 		if (bucket == NULL)
1502 			break;
1503 		bucket_free(zone, bucket, NULL);
1504 		done = true;
1505 		ZDOM_LOCK(zdom);
1506 	}
1507 	ZDOM_UNLOCK(zdom);
1508 	return (done);
1509 }
1510 
1511 static void
1512 bucket_cache_reclaim(uma_zone_t zone, bool drain, int domain)
1513 {
1514 	int i;
1515 
1516 	/*
1517 	 * Shrink the zone bucket size to ensure that the per-CPU caches
1518 	 * don't grow too large.
1519 	 */
1520 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
1521 		zone->uz_bucket_size--;
1522 
1523 	if (domain != UMA_ANYDOMAIN &&
1524 	    (zone->uz_flags & UMA_ZONE_ROUNDROBIN) == 0) {
1525 		bucket_cache_reclaim_domain(zone, drain, true, domain);
1526 	} else {
1527 		for (i = 0; i < vm_ndomains; i++)
1528 			bucket_cache_reclaim_domain(zone, drain, true, i);
1529 	}
1530 }
1531 
1532 static void
1533 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
1534 {
1535 	uint8_t *mem;
1536 	size_t size;
1537 	int i;
1538 	uint8_t flags;
1539 
1540 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
1541 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
1542 
1543 	mem = slab_data(slab, keg);
1544 	size = PAGE_SIZE * keg->uk_ppera;
1545 
1546 	kasan_mark_slab_valid(keg, mem);
1547 	if (keg->uk_fini != NULL) {
1548 		for (i = start - 1; i > -1; i--)
1549 #ifdef INVARIANTS
1550 		/*
1551 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1552 		 * would check that memory hasn't been modified since free,
1553 		 * which executed trash_dtor.
1554 		 * That's why we need to run uma_dbg_kskip() check here,
1555 		 * albeit we don't make skip check for other init/fini
1556 		 * invocations.
1557 		 */
1558 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1559 		    keg->uk_fini != trash_fini)
1560 #endif
1561 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1562 	}
1563 	flags = slab->us_flags;
1564 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1565 		zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab),
1566 		    NULL, SKIP_NONE);
1567 	}
1568 	keg->uk_freef(mem, size, flags);
1569 	uma_total_dec(size);
1570 }
1571 
1572 static void
1573 keg_drain_domain(uma_keg_t keg, int domain)
1574 {
1575 	struct slabhead freeslabs;
1576 	uma_domain_t dom;
1577 	uma_slab_t slab, tmp;
1578 	uint32_t i, stofree, stokeep, partial;
1579 
1580 	dom = &keg->uk_domain[domain];
1581 	LIST_INIT(&freeslabs);
1582 
1583 	CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u",
1584 	    keg->uk_name, keg, domain, dom->ud_free_items);
1585 
1586 	KEG_LOCK(keg, domain);
1587 
1588 	/*
1589 	 * Are the free items in partially allocated slabs sufficient to meet
1590 	 * the reserve? If not, compute the number of fully free slabs that must
1591 	 * be kept.
1592 	 */
1593 	partial = dom->ud_free_items - dom->ud_free_slabs * keg->uk_ipers;
1594 	if (partial < keg->uk_reserve) {
1595 		stokeep = min(dom->ud_free_slabs,
1596 		    howmany(keg->uk_reserve - partial, keg->uk_ipers));
1597 	} else {
1598 		stokeep = 0;
1599 	}
1600 	stofree = dom->ud_free_slabs - stokeep;
1601 
1602 	/*
1603 	 * Partition the free slabs into two sets: those that must be kept in
1604 	 * order to maintain the reserve, and those that may be released back to
1605 	 * the system.  Since one set may be much larger than the other,
1606 	 * populate the smaller of the two sets and swap them if necessary.
1607 	 */
1608 	for (i = min(stofree, stokeep); i > 0; i--) {
1609 		slab = LIST_FIRST(&dom->ud_free_slab);
1610 		LIST_REMOVE(slab, us_link);
1611 		LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1612 	}
1613 	if (stofree > stokeep)
1614 		LIST_SWAP(&freeslabs, &dom->ud_free_slab, uma_slab, us_link);
1615 
1616 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0) {
1617 		LIST_FOREACH(slab, &freeslabs, us_link)
1618 			UMA_HASH_REMOVE(&keg->uk_hash, slab);
1619 	}
1620 	dom->ud_free_items -= stofree * keg->uk_ipers;
1621 	dom->ud_free_slabs -= stofree;
1622 	dom->ud_pages -= stofree * keg->uk_ppera;
1623 	KEG_UNLOCK(keg, domain);
1624 
1625 	LIST_FOREACH_SAFE(slab, &freeslabs, us_link, tmp)
1626 		keg_free_slab(keg, slab, keg->uk_ipers);
1627 }
1628 
1629 /*
1630  * Frees pages from a keg back to the system.  This is done on demand from
1631  * the pageout daemon.
1632  *
1633  * Returns nothing.
1634  */
1635 static void
1636 keg_drain(uma_keg_t keg, int domain)
1637 {
1638 	int i;
1639 
1640 	if ((keg->uk_flags & UMA_ZONE_NOFREE) != 0)
1641 		return;
1642 	if (domain != UMA_ANYDOMAIN) {
1643 		keg_drain_domain(keg, domain);
1644 	} else {
1645 		for (i = 0; i < vm_ndomains; i++)
1646 			keg_drain_domain(keg, i);
1647 	}
1648 }
1649 
1650 static void
1651 zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain)
1652 {
1653 	/*
1654 	 * Count active reclaim operations in order to interlock with
1655 	 * zone_dtor(), which removes the zone from global lists before
1656 	 * attempting to reclaim items itself.
1657 	 *
1658 	 * The zone may be destroyed while sleeping, so only zone_dtor() should
1659 	 * specify M_WAITOK.
1660 	 */
1661 	ZONE_LOCK(zone);
1662 	if (waitok == M_WAITOK) {
1663 		while (zone->uz_reclaimers > 0)
1664 			msleep(zone, ZONE_LOCKPTR(zone), PVM, "zonedrain", 1);
1665 	}
1666 	zone->uz_reclaimers++;
1667 	ZONE_UNLOCK(zone);
1668 	bucket_cache_reclaim(zone, drain, domain);
1669 
1670 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1671 		keg_drain(zone->uz_keg, domain);
1672 	ZONE_LOCK(zone);
1673 	zone->uz_reclaimers--;
1674 	if (zone->uz_reclaimers == 0)
1675 		wakeup(zone);
1676 	ZONE_UNLOCK(zone);
1677 }
1678 
1679 static void
1680 zone_drain(uma_zone_t zone, void *arg)
1681 {
1682 	int domain;
1683 
1684 	domain = (int)(uintptr_t)arg;
1685 	zone_reclaim(zone, domain, M_NOWAIT, true);
1686 }
1687 
1688 static void
1689 zone_trim(uma_zone_t zone, void *arg)
1690 {
1691 	int domain;
1692 
1693 	domain = (int)(uintptr_t)arg;
1694 	zone_reclaim(zone, domain, M_NOWAIT, false);
1695 }
1696 
1697 /*
1698  * Allocate a new slab for a keg and inserts it into the partial slab list.
1699  * The keg should be unlocked on entry.  If the allocation succeeds it will
1700  * be locked on return.
1701  *
1702  * Arguments:
1703  *	flags   Wait flags for the item initialization routine
1704  *	aflags  Wait flags for the slab allocation
1705  *
1706  * Returns:
1707  *	The slab that was allocated or NULL if there is no memory and the
1708  *	caller specified M_NOWAIT.
1709  */
1710 static uma_slab_t
1711 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1712     int aflags)
1713 {
1714 	uma_domain_t dom;
1715 	uma_slab_t slab;
1716 	unsigned long size;
1717 	uint8_t *mem;
1718 	uint8_t sflags;
1719 	int i;
1720 
1721 	KASSERT(domain >= 0 && domain < vm_ndomains,
1722 	    ("keg_alloc_slab: domain %d out of range", domain));
1723 
1724 	slab = NULL;
1725 	mem = NULL;
1726 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1727 		uma_hash_slab_t hslab;
1728 		hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL,
1729 		    domain, aflags);
1730 		if (hslab == NULL)
1731 			goto fail;
1732 		slab = &hslab->uhs_slab;
1733 	}
1734 
1735 	/*
1736 	 * This reproduces the old vm_zone behavior of zero filling pages the
1737 	 * first time they are added to a zone.
1738 	 *
1739 	 * Malloced items are zeroed in uma_zalloc.
1740 	 */
1741 
1742 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1743 		aflags |= M_ZERO;
1744 	else
1745 		aflags &= ~M_ZERO;
1746 
1747 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1748 		aflags |= M_NODUMP;
1749 
1750 	/* zone is passed for legacy reasons. */
1751 	size = keg->uk_ppera * PAGE_SIZE;
1752 	mem = keg->uk_allocf(zone, size, domain, &sflags, aflags);
1753 	if (mem == NULL) {
1754 		if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1755 			zone_free_item(slabzone(keg->uk_ipers),
1756 			    slab_tohashslab(slab), NULL, SKIP_NONE);
1757 		goto fail;
1758 	}
1759 	uma_total_inc(size);
1760 
1761 	/* For HASH zones all pages go to the same uma_domain. */
1762 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
1763 		domain = 0;
1764 
1765 	/* Point the slab into the allocated memory */
1766 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
1767 		slab = (uma_slab_t)(mem + keg->uk_pgoff);
1768 	else
1769 		slab_tohashslab(slab)->uhs_data = mem;
1770 
1771 	if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
1772 		for (i = 0; i < keg->uk_ppera; i++)
1773 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1774 			    zone, slab);
1775 
1776 	slab->us_freecount = keg->uk_ipers;
1777 	slab->us_flags = sflags;
1778 	slab->us_domain = domain;
1779 
1780 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1781 #ifdef INVARIANTS
1782 	BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
1783 #endif
1784 
1785 	if (keg->uk_init != NULL) {
1786 		for (i = 0; i < keg->uk_ipers; i++)
1787 			if (keg->uk_init(slab_item(slab, keg, i),
1788 			    keg->uk_size, flags) != 0)
1789 				break;
1790 		if (i != keg->uk_ipers) {
1791 			keg_free_slab(keg, slab, i);
1792 			goto fail;
1793 		}
1794 	}
1795 	kasan_mark_slab_invalid(keg, mem);
1796 	KEG_LOCK(keg, domain);
1797 
1798 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1799 	    slab, keg->uk_name, keg);
1800 
1801 	if (keg->uk_flags & UMA_ZFLAG_HASH)
1802 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1803 
1804 	/*
1805 	 * If we got a slab here it's safe to mark it partially used
1806 	 * and return.  We assume that the caller is going to remove
1807 	 * at least one item.
1808 	 */
1809 	dom = &keg->uk_domain[domain];
1810 	LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
1811 	dom->ud_pages += keg->uk_ppera;
1812 	dom->ud_free_items += keg->uk_ipers;
1813 
1814 	return (slab);
1815 
1816 fail:
1817 	return (NULL);
1818 }
1819 
1820 /*
1821  * This function is intended to be used early on in place of page_alloc().  It
1822  * performs contiguous physical memory allocations and uses a bump allocator for
1823  * KVA, so is usable before the kernel map is initialized.
1824  */
1825 static void *
1826 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1827     int wait)
1828 {
1829 	vm_paddr_t pa;
1830 	vm_page_t m;
1831 	void *mem;
1832 	int pages;
1833 	int i;
1834 
1835 	pages = howmany(bytes, PAGE_SIZE);
1836 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1837 
1838 	*pflag = UMA_SLAB_BOOT;
1839 	m = vm_page_alloc_contig_domain(NULL, 0, domain,
1840 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages,
1841 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT);
1842 	if (m == NULL)
1843 		return (NULL);
1844 
1845 	pa = VM_PAGE_TO_PHYS(m);
1846 	for (i = 0; i < pages; i++, pa += PAGE_SIZE) {
1847 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1848     defined(__riscv) || defined(__powerpc64__)
1849 		if ((wait & M_NODUMP) == 0)
1850 			dump_add_page(pa);
1851 #endif
1852 	}
1853 	/* Allocate KVA and indirectly advance bootmem. */
1854 	mem = (void *)pmap_map(&bootmem, m->phys_addr,
1855 	    m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE);
1856         if ((wait & M_ZERO) != 0)
1857                 bzero(mem, pages * PAGE_SIZE);
1858 
1859         return (mem);
1860 }
1861 
1862 static void
1863 startup_free(void *mem, vm_size_t bytes)
1864 {
1865 	vm_offset_t va;
1866 	vm_page_t m;
1867 
1868 	va = (vm_offset_t)mem;
1869 	m = PHYS_TO_VM_PAGE(pmap_kextract(va));
1870 
1871 	/*
1872 	 * startup_alloc() returns direct-mapped slabs on some platforms.  Avoid
1873 	 * unmapping ranges of the direct map.
1874 	 */
1875 	if (va >= bootstart && va + bytes <= bootmem)
1876 		pmap_remove(kernel_pmap, va, va + bytes);
1877 	for (; bytes != 0; bytes -= PAGE_SIZE, m++) {
1878 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1879     defined(__riscv) || defined(__powerpc64__)
1880 		dump_drop_page(VM_PAGE_TO_PHYS(m));
1881 #endif
1882 		vm_page_unwire_noq(m);
1883 		vm_page_free(m);
1884 	}
1885 }
1886 
1887 /*
1888  * Allocates a number of pages from the system
1889  *
1890  * Arguments:
1891  *	bytes  The number of bytes requested
1892  *	wait  Shall we wait?
1893  *
1894  * Returns:
1895  *	A pointer to the alloced memory or possibly
1896  *	NULL if M_NOWAIT is set.
1897  */
1898 static void *
1899 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1900     int wait)
1901 {
1902 	void *p;	/* Returned page */
1903 
1904 	*pflag = UMA_SLAB_KERNEL;
1905 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1906 
1907 	return (p);
1908 }
1909 
1910 static void *
1911 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1912     int wait)
1913 {
1914 	struct pglist alloctail;
1915 	vm_offset_t addr, zkva;
1916 	int cpu, flags;
1917 	vm_page_t p, p_next;
1918 #ifdef NUMA
1919 	struct pcpu *pc;
1920 #endif
1921 
1922 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1923 
1924 	TAILQ_INIT(&alloctail);
1925 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1926 	    malloc2vm_flags(wait);
1927 	*pflag = UMA_SLAB_KERNEL;
1928 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1929 		if (CPU_ABSENT(cpu)) {
1930 			p = vm_page_alloc(NULL, 0, flags);
1931 		} else {
1932 #ifndef NUMA
1933 			p = vm_page_alloc(NULL, 0, flags);
1934 #else
1935 			pc = pcpu_find(cpu);
1936 			if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain)))
1937 				p = NULL;
1938 			else
1939 				p = vm_page_alloc_domain(NULL, 0,
1940 				    pc->pc_domain, flags);
1941 			if (__predict_false(p == NULL))
1942 				p = vm_page_alloc(NULL, 0, flags);
1943 #endif
1944 		}
1945 		if (__predict_false(p == NULL))
1946 			goto fail;
1947 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1948 	}
1949 	if ((addr = kva_alloc(bytes)) == 0)
1950 		goto fail;
1951 	zkva = addr;
1952 	TAILQ_FOREACH(p, &alloctail, listq) {
1953 		pmap_qenter(zkva, &p, 1);
1954 		zkva += PAGE_SIZE;
1955 	}
1956 	return ((void*)addr);
1957 fail:
1958 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1959 		vm_page_unwire_noq(p);
1960 		vm_page_free(p);
1961 	}
1962 	return (NULL);
1963 }
1964 
1965 /*
1966  * Allocates a number of pages from within an object
1967  *
1968  * Arguments:
1969  *	bytes  The number of bytes requested
1970  *	wait   Shall we wait?
1971  *
1972  * Returns:
1973  *	A pointer to the alloced memory or possibly
1974  *	NULL if M_NOWAIT is set.
1975  */
1976 static void *
1977 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1978     int wait)
1979 {
1980 	TAILQ_HEAD(, vm_page) alloctail;
1981 	u_long npages;
1982 	vm_offset_t retkva, zkva;
1983 	vm_page_t p, p_next;
1984 	uma_keg_t keg;
1985 
1986 	TAILQ_INIT(&alloctail);
1987 	keg = zone->uz_keg;
1988 
1989 	npages = howmany(bytes, PAGE_SIZE);
1990 	while (npages > 0) {
1991 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1992 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1993 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1994 		    VM_ALLOC_NOWAIT));
1995 		if (p != NULL) {
1996 			/*
1997 			 * Since the page does not belong to an object, its
1998 			 * listq is unused.
1999 			 */
2000 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
2001 			npages--;
2002 			continue;
2003 		}
2004 		/*
2005 		 * Page allocation failed, free intermediate pages and
2006 		 * exit.
2007 		 */
2008 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
2009 			vm_page_unwire_noq(p);
2010 			vm_page_free(p);
2011 		}
2012 		return (NULL);
2013 	}
2014 	*flags = UMA_SLAB_PRIV;
2015 	zkva = keg->uk_kva +
2016 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
2017 	retkva = zkva;
2018 	TAILQ_FOREACH(p, &alloctail, listq) {
2019 		pmap_qenter(zkva, &p, 1);
2020 		zkva += PAGE_SIZE;
2021 	}
2022 
2023 	return ((void *)retkva);
2024 }
2025 
2026 /*
2027  * Allocate physically contiguous pages.
2028  */
2029 static void *
2030 contig_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
2031     int wait)
2032 {
2033 
2034 	*pflag = UMA_SLAB_KERNEL;
2035 	return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
2036 	    bytes, wait, 0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
2037 }
2038 
2039 /*
2040  * Frees a number of pages to the system
2041  *
2042  * Arguments:
2043  *	mem   A pointer to the memory to be freed
2044  *	size  The size of the memory being freed
2045  *	flags The original p->us_flags field
2046  *
2047  * Returns:
2048  *	Nothing
2049  */
2050 static void
2051 page_free(void *mem, vm_size_t size, uint8_t flags)
2052 {
2053 
2054 	if ((flags & UMA_SLAB_BOOT) != 0) {
2055 		startup_free(mem, size);
2056 		return;
2057 	}
2058 
2059 	KASSERT((flags & UMA_SLAB_KERNEL) != 0,
2060 	    ("UMA: page_free used with invalid flags %x", flags));
2061 
2062 	kmem_free((vm_offset_t)mem, size);
2063 }
2064 
2065 /*
2066  * Frees pcpu zone allocations
2067  *
2068  * Arguments:
2069  *	mem   A pointer to the memory to be freed
2070  *	size  The size of the memory being freed
2071  *	flags The original p->us_flags field
2072  *
2073  * Returns:
2074  *	Nothing
2075  */
2076 static void
2077 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
2078 {
2079 	vm_offset_t sva, curva;
2080 	vm_paddr_t paddr;
2081 	vm_page_t m;
2082 
2083 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
2084 
2085 	if ((flags & UMA_SLAB_BOOT) != 0) {
2086 		startup_free(mem, size);
2087 		return;
2088 	}
2089 
2090 	sva = (vm_offset_t)mem;
2091 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
2092 		paddr = pmap_kextract(curva);
2093 		m = PHYS_TO_VM_PAGE(paddr);
2094 		vm_page_unwire_noq(m);
2095 		vm_page_free(m);
2096 	}
2097 	pmap_qremove(sva, size >> PAGE_SHIFT);
2098 	kva_free(sva, size);
2099 }
2100 
2101 /*
2102  * Zero fill initializer
2103  *
2104  * Arguments/Returns follow uma_init specifications
2105  */
2106 static int
2107 zero_init(void *mem, int size, int flags)
2108 {
2109 	bzero(mem, size);
2110 	return (0);
2111 }
2112 
2113 #ifdef INVARIANTS
2114 static struct noslabbits *
2115 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
2116 {
2117 
2118 	return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
2119 }
2120 #endif
2121 
2122 /*
2123  * Actual size of embedded struct slab (!OFFPAGE).
2124  */
2125 static size_t
2126 slab_sizeof(int nitems)
2127 {
2128 	size_t s;
2129 
2130 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
2131 	return (roundup(s, UMA_ALIGN_PTR + 1));
2132 }
2133 
2134 #define	UMA_FIXPT_SHIFT	31
2135 #define	UMA_FRAC_FIXPT(n, d)						\
2136 	((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
2137 #define	UMA_FIXPT_PCT(f)						\
2138 	((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
2139 #define	UMA_PCT_FIXPT(pct)	UMA_FRAC_FIXPT((pct), 100)
2140 #define	UMA_MIN_EFF	UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
2141 
2142 /*
2143  * Compute the number of items that will fit in a slab.  If hdr is true, the
2144  * item count may be limited to provide space in the slab for an inline slab
2145  * header.  Otherwise, all slab space will be provided for item storage.
2146  */
2147 static u_int
2148 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
2149 {
2150 	u_int ipers;
2151 	u_int padpi;
2152 
2153 	/* The padding between items is not needed after the last item. */
2154 	padpi = rsize - size;
2155 
2156 	if (hdr) {
2157 		/*
2158 		 * Start with the maximum item count and remove items until
2159 		 * the slab header first alongside the allocatable memory.
2160 		 */
2161 		for (ipers = MIN(SLAB_MAX_SETSIZE,
2162 		    (slabsize + padpi - slab_sizeof(1)) / rsize);
2163 		    ipers > 0 &&
2164 		    ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
2165 		    ipers--)
2166 			continue;
2167 	} else {
2168 		ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
2169 	}
2170 
2171 	return (ipers);
2172 }
2173 
2174 struct keg_layout_result {
2175 	u_int format;
2176 	u_int slabsize;
2177 	u_int ipers;
2178 	u_int eff;
2179 };
2180 
2181 static void
2182 keg_layout_one(uma_keg_t keg, u_int rsize, u_int slabsize, u_int fmt,
2183     struct keg_layout_result *kl)
2184 {
2185 	u_int total;
2186 
2187 	kl->format = fmt;
2188 	kl->slabsize = slabsize;
2189 
2190 	/* Handle INTERNAL as inline with an extra page. */
2191 	if ((fmt & UMA_ZFLAG_INTERNAL) != 0) {
2192 		kl->format &= ~UMA_ZFLAG_INTERNAL;
2193 		kl->slabsize += PAGE_SIZE;
2194 	}
2195 
2196 	kl->ipers = slab_ipers_hdr(keg->uk_size, rsize, kl->slabsize,
2197 	    (fmt & UMA_ZFLAG_OFFPAGE) == 0);
2198 
2199 	/* Account for memory used by an offpage slab header. */
2200 	total = kl->slabsize;
2201 	if ((fmt & UMA_ZFLAG_OFFPAGE) != 0)
2202 		total += slabzone(kl->ipers)->uz_keg->uk_rsize;
2203 
2204 	kl->eff = UMA_FRAC_FIXPT(kl->ipers * rsize, total);
2205 }
2206 
2207 /*
2208  * Determine the format of a uma keg.  This determines where the slab header
2209  * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
2210  *
2211  * Arguments
2212  *	keg  The zone we should initialize
2213  *
2214  * Returns
2215  *	Nothing
2216  */
2217 static void
2218 keg_layout(uma_keg_t keg)
2219 {
2220 	struct keg_layout_result kl = {}, kl_tmp;
2221 	u_int fmts[2];
2222 	u_int alignsize;
2223 	u_int nfmt;
2224 	u_int pages;
2225 	u_int rsize;
2226 	u_int slabsize;
2227 	u_int i, j;
2228 
2229 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
2230 	    (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
2231 	     (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
2232 	    ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
2233 	     __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
2234 	     PRINT_UMA_ZFLAGS));
2235 	KASSERT((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) == 0 ||
2236 	    (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
2237 	    ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
2238 	     PRINT_UMA_ZFLAGS));
2239 
2240 	alignsize = keg->uk_align + 1;
2241 
2242 	/*
2243 	 * Calculate the size of each allocation (rsize) according to
2244 	 * alignment.  If the requested size is smaller than we have
2245 	 * allocation bits for we round it up.
2246 	 */
2247 	rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT);
2248 	rsize = roundup2(rsize, alignsize);
2249 
2250 	if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
2251 		/*
2252 		 * We want one item to start on every align boundary in a page.
2253 		 * To do this we will span pages.  We will also extend the item
2254 		 * by the size of align if it is an even multiple of align.
2255 		 * Otherwise, it would fall on the same boundary every time.
2256 		 */
2257 		if ((rsize & alignsize) == 0)
2258 			rsize += alignsize;
2259 		slabsize = rsize * (PAGE_SIZE / alignsize);
2260 		slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
2261 		slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
2262 		slabsize = round_page(slabsize);
2263 	} else {
2264 		/*
2265 		 * Start with a slab size of as many pages as it takes to
2266 		 * represent a single item.  We will try to fit as many
2267 		 * additional items into the slab as possible.
2268 		 */
2269 		slabsize = round_page(keg->uk_size);
2270 	}
2271 
2272 	/* Build a list of all of the available formats for this keg. */
2273 	nfmt = 0;
2274 
2275 	/* Evaluate an inline slab layout. */
2276 	if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
2277 		fmts[nfmt++] = 0;
2278 
2279 	/* TODO: vm_page-embedded slab. */
2280 
2281 	/*
2282 	 * We can't do OFFPAGE if we're internal or if we've been
2283 	 * asked to not go to the VM for buckets.  If we do this we
2284 	 * may end up going to the VM for slabs which we do not want
2285 	 * to do if we're UMA_ZONE_VM, which clearly forbids it.
2286 	 * In those cases, evaluate a pseudo-format called INTERNAL
2287 	 * which has an inline slab header and one extra page to
2288 	 * guarantee that it fits.
2289 	 *
2290 	 * Otherwise, see if using an OFFPAGE slab will improve our
2291 	 * efficiency.
2292 	 */
2293 	if ((keg->uk_flags & (UMA_ZFLAG_INTERNAL | UMA_ZONE_VM)) != 0)
2294 		fmts[nfmt++] = UMA_ZFLAG_INTERNAL;
2295 	else
2296 		fmts[nfmt++] = UMA_ZFLAG_OFFPAGE;
2297 
2298 	/*
2299 	 * Choose a slab size and format which satisfy the minimum efficiency.
2300 	 * Prefer the smallest slab size that meets the constraints.
2301 	 *
2302 	 * Start with a minimum slab size, to accommodate CACHESPREAD.  Then,
2303 	 * for small items (up to PAGE_SIZE), the iteration increment is one
2304 	 * page; and for large items, the increment is one item.
2305 	 */
2306 	i = (slabsize + rsize - keg->uk_size) / MAX(PAGE_SIZE, rsize);
2307 	KASSERT(i >= 1, ("keg %s(%p) flags=0x%b slabsize=%u, rsize=%u, i=%u",
2308 	    keg->uk_name, keg, keg->uk_flags, PRINT_UMA_ZFLAGS, slabsize,
2309 	    rsize, i));
2310 	for ( ; ; i++) {
2311 		slabsize = (rsize <= PAGE_SIZE) ? ptoa(i) :
2312 		    round_page(rsize * (i - 1) + keg->uk_size);
2313 
2314 		for (j = 0; j < nfmt; j++) {
2315 			/* Only if we have no viable format yet. */
2316 			if ((fmts[j] & UMA_ZFLAG_INTERNAL) != 0 &&
2317 			    kl.ipers > 0)
2318 				continue;
2319 
2320 			keg_layout_one(keg, rsize, slabsize, fmts[j], &kl_tmp);
2321 			if (kl_tmp.eff <= kl.eff)
2322 				continue;
2323 
2324 			kl = kl_tmp;
2325 
2326 			CTR6(KTR_UMA, "keg %s layout: format %#x "
2327 			    "(ipers %u * rsize %u) / slabsize %#x = %u%% eff",
2328 			    keg->uk_name, kl.format, kl.ipers, rsize,
2329 			    kl.slabsize, UMA_FIXPT_PCT(kl.eff));
2330 
2331 			/* Stop when we reach the minimum efficiency. */
2332 			if (kl.eff >= UMA_MIN_EFF)
2333 				break;
2334 		}
2335 
2336 		if (kl.eff >= UMA_MIN_EFF || !multipage_slabs ||
2337 		    slabsize >= SLAB_MAX_SETSIZE * rsize ||
2338 		    (keg->uk_flags & (UMA_ZONE_PCPU | UMA_ZONE_CONTIG)) != 0)
2339 			break;
2340 	}
2341 
2342 	pages = atop(kl.slabsize);
2343 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
2344 		pages *= mp_maxid + 1;
2345 
2346 	keg->uk_rsize = rsize;
2347 	keg->uk_ipers = kl.ipers;
2348 	keg->uk_ppera = pages;
2349 	keg->uk_flags |= kl.format;
2350 
2351 	/*
2352 	 * How do we find the slab header if it is offpage or if not all item
2353 	 * start addresses are in the same page?  We could solve the latter
2354 	 * case with vaddr alignment, but we don't.
2355 	 */
2356 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0 ||
2357 	    (keg->uk_ipers - 1) * rsize >= PAGE_SIZE) {
2358 		if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
2359 			keg->uk_flags |= UMA_ZFLAG_HASH;
2360 		else
2361 			keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2362 	}
2363 
2364 	CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u",
2365 	    __func__, keg->uk_name, keg->uk_flags, rsize, keg->uk_ipers,
2366 	    pages);
2367 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
2368 	    ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
2369 	     keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize,
2370 	     keg->uk_ipers, pages));
2371 }
2372 
2373 /*
2374  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
2375  * the keg onto the global keg list.
2376  *
2377  * Arguments/Returns follow uma_ctor specifications
2378  *	udata  Actually uma_kctor_args
2379  */
2380 static int
2381 keg_ctor(void *mem, int size, void *udata, int flags)
2382 {
2383 	struct uma_kctor_args *arg = udata;
2384 	uma_keg_t keg = mem;
2385 	uma_zone_t zone;
2386 	int i;
2387 
2388 	bzero(keg, size);
2389 	keg->uk_size = arg->size;
2390 	keg->uk_init = arg->uminit;
2391 	keg->uk_fini = arg->fini;
2392 	keg->uk_align = arg->align;
2393 	keg->uk_reserve = 0;
2394 	keg->uk_flags = arg->flags;
2395 
2396 	/*
2397 	 * We use a global round-robin policy by default.  Zones with
2398 	 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which
2399 	 * case the iterator is never run.
2400 	 */
2401 	keg->uk_dr.dr_policy = DOMAINSET_RR();
2402 	keg->uk_dr.dr_iter = 0;
2403 
2404 	/*
2405 	 * The primary zone is passed to us at keg-creation time.
2406 	 */
2407 	zone = arg->zone;
2408 	keg->uk_name = zone->uz_name;
2409 
2410 	if (arg->flags & UMA_ZONE_ZINIT)
2411 		keg->uk_init = zero_init;
2412 
2413 	if (arg->flags & UMA_ZONE_MALLOC)
2414 		keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2415 
2416 #ifndef SMP
2417 	keg->uk_flags &= ~UMA_ZONE_PCPU;
2418 #endif
2419 
2420 	keg_layout(keg);
2421 
2422 	/*
2423 	 * Use a first-touch NUMA policy for kegs that pmap_extract() will
2424 	 * work on.  Use round-robin for everything else.
2425 	 *
2426 	 * Zones may override the default by specifying either.
2427 	 */
2428 #ifdef NUMA
2429 	if ((keg->uk_flags &
2430 	    (UMA_ZONE_ROUNDROBIN | UMA_ZFLAG_CACHE | UMA_ZONE_NOTPAGE)) == 0)
2431 		keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
2432 	else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2433 		keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
2434 #endif
2435 
2436 	/*
2437 	 * If we haven't booted yet we need allocations to go through the
2438 	 * startup cache until the vm is ready.
2439 	 */
2440 #ifdef UMA_MD_SMALL_ALLOC
2441 	if (keg->uk_ppera == 1)
2442 		keg->uk_allocf = uma_small_alloc;
2443 	else
2444 #endif
2445 	if (booted < BOOT_KVA)
2446 		keg->uk_allocf = startup_alloc;
2447 	else if (keg->uk_flags & UMA_ZONE_PCPU)
2448 		keg->uk_allocf = pcpu_page_alloc;
2449 	else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 && keg->uk_ppera > 1)
2450 		keg->uk_allocf = contig_alloc;
2451 	else
2452 		keg->uk_allocf = page_alloc;
2453 #ifdef UMA_MD_SMALL_ALLOC
2454 	if (keg->uk_ppera == 1)
2455 		keg->uk_freef = uma_small_free;
2456 	else
2457 #endif
2458 	if (keg->uk_flags & UMA_ZONE_PCPU)
2459 		keg->uk_freef = pcpu_page_free;
2460 	else
2461 		keg->uk_freef = page_free;
2462 
2463 	/*
2464 	 * Initialize keg's locks.
2465 	 */
2466 	for (i = 0; i < vm_ndomains; i++)
2467 		KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS));
2468 
2469 	/*
2470 	 * If we're putting the slab header in the actual page we need to
2471 	 * figure out where in each page it goes.  See slab_sizeof
2472 	 * definition.
2473 	 */
2474 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
2475 		size_t shsize;
2476 
2477 		shsize = slab_sizeof(keg->uk_ipers);
2478 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
2479 		/*
2480 		 * The only way the following is possible is if with our
2481 		 * UMA_ALIGN_PTR adjustments we are now bigger than
2482 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
2483 		 * mathematically possible for all cases, so we make
2484 		 * sure here anyway.
2485 		 */
2486 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
2487 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
2488 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
2489 	}
2490 
2491 	if (keg->uk_flags & UMA_ZFLAG_HASH)
2492 		hash_alloc(&keg->uk_hash, 0);
2493 
2494 	CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone);
2495 
2496 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
2497 
2498 	rw_wlock(&uma_rwlock);
2499 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
2500 	rw_wunlock(&uma_rwlock);
2501 	return (0);
2502 }
2503 
2504 static void
2505 zone_kva_available(uma_zone_t zone, void *unused)
2506 {
2507 	uma_keg_t keg;
2508 
2509 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
2510 		return;
2511 	KEG_GET(zone, keg);
2512 
2513 	if (keg->uk_allocf == startup_alloc) {
2514 		/* Switch to the real allocator. */
2515 		if (keg->uk_flags & UMA_ZONE_PCPU)
2516 			keg->uk_allocf = pcpu_page_alloc;
2517 		else if ((keg->uk_flags & UMA_ZONE_CONTIG) != 0 &&
2518 		    keg->uk_ppera > 1)
2519 			keg->uk_allocf = contig_alloc;
2520 		else
2521 			keg->uk_allocf = page_alloc;
2522 	}
2523 }
2524 
2525 static void
2526 zone_alloc_counters(uma_zone_t zone, void *unused)
2527 {
2528 
2529 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
2530 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
2531 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
2532 	zone->uz_xdomain = counter_u64_alloc(M_WAITOK);
2533 }
2534 
2535 static void
2536 zone_alloc_sysctl(uma_zone_t zone, void *unused)
2537 {
2538 	uma_zone_domain_t zdom;
2539 	uma_domain_t dom;
2540 	uma_keg_t keg;
2541 	struct sysctl_oid *oid, *domainoid;
2542 	int domains, i, cnt;
2543 	static const char *nokeg = "cache zone";
2544 	char *c;
2545 
2546 	/*
2547 	 * Make a sysctl safe copy of the zone name by removing
2548 	 * any special characters and handling dups by appending
2549 	 * an index.
2550 	 */
2551 	if (zone->uz_namecnt != 0) {
2552 		/* Count the number of decimal digits and '_' separator. */
2553 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
2554 			cnt /= 10;
2555 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
2556 		    M_UMA, M_WAITOK);
2557 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
2558 		    zone->uz_namecnt);
2559 	} else
2560 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
2561 	for (c = zone->uz_ctlname; *c != '\0'; c++)
2562 		if (strchr("./\\ -", *c) != NULL)
2563 			*c = '_';
2564 
2565 	/*
2566 	 * Basic parameters at the root.
2567 	 */
2568 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
2569 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2570 	oid = zone->uz_oid;
2571 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2572 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
2573 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2574 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
2575 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
2576 	    "Allocator configuration flags");
2577 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2578 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
2579 	    "Desired per-cpu cache size");
2580 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2581 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
2582 	    "Maximum allowed per-cpu cache size");
2583 
2584 	/*
2585 	 * keg if present.
2586 	 */
2587 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
2588 		domains = vm_ndomains;
2589 	else
2590 		domains = 1;
2591 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2592 	    "keg", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2593 	keg = zone->uz_keg;
2594 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
2595 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2596 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
2597 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2598 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
2599 		    "Real object size with alignment");
2600 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2601 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
2602 		    "pages per-slab allocation");
2603 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2604 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
2605 		    "items available per-slab");
2606 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2607 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
2608 		    "item alignment mask");
2609 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2610 		    "reserve", CTLFLAG_RD, &keg->uk_reserve, 0,
2611 		    "number of reserved items");
2612 		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2613 		    "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2614 		    keg, 0, sysctl_handle_uma_slab_efficiency, "I",
2615 		    "Slab utilization (100 - internal fragmentation %)");
2616 		domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid),
2617 		    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2618 		for (i = 0; i < domains; i++) {
2619 			dom = &keg->uk_domain[i];
2620 			oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2621 			    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2622 			    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2623 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2624 			    "pages", CTLFLAG_RD, &dom->ud_pages, 0,
2625 			    "Total pages currently allocated from VM");
2626 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2627 			    "free_items", CTLFLAG_RD, &dom->ud_free_items, 0,
2628 			    "items free in the slab layer");
2629 		}
2630 	} else
2631 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2632 		    "name", CTLFLAG_RD, nokeg, "Keg name");
2633 
2634 	/*
2635 	 * Information about zone limits.
2636 	 */
2637 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2638 	    "limit", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2639 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2640 	    "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2641 	    zone, 0, sysctl_handle_uma_zone_items, "QU",
2642 	    "Current number of allocated items if limit is set");
2643 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2644 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
2645 	    "Maximum number of allocated and cached items");
2646 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2647 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
2648 	    "Number of threads sleeping at limit");
2649 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2650 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
2651 	    "Total zone limit sleeps");
2652 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2653 	    "bucket_max", CTLFLAG_RD, &zone->uz_bucket_max, 0,
2654 	    "Maximum number of items in each domain's bucket cache");
2655 
2656 	/*
2657 	 * Per-domain zone information.
2658 	 */
2659 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
2660 	    OID_AUTO, "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2661 	for (i = 0; i < domains; i++) {
2662 		zdom = ZDOM_GET(zone, i);
2663 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2664 		    OID_AUTO, VM_DOMAIN(i)->vmd_name,
2665 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2666 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2667 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
2668 		    "number of items in this domain");
2669 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2670 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
2671 		    "maximum item count in this period");
2672 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2673 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
2674 		    "minimum item count in this period");
2675 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2676 		    "bimin", CTLFLAG_RD, &zdom->uzd_bimin,
2677 		    "Minimum item count in this batch");
2678 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2679 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
2680 		    "Working set size");
2681 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2682 		    "limin", CTLFLAG_RD, &zdom->uzd_limin,
2683 		    "Long time minimum item count");
2684 		SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2685 		    "timin", CTLFLAG_RD, &zdom->uzd_timin, 0,
2686 		    "Time since zero long time minimum item count");
2687 	}
2688 
2689 	/*
2690 	 * General statistics.
2691 	 */
2692 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2693 	    "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "");
2694 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2695 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2696 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
2697 	    "Current number of allocated items");
2698 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2699 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2700 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
2701 	    "Total allocation calls");
2702 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2703 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2704 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2705 	    "Total free calls");
2706 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2707 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2708 	    "Number of allocation failures");
2709 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2710 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain,
2711 	    "Free calls from the wrong domain");
2712 }
2713 
2714 struct uma_zone_count {
2715 	const char	*name;
2716 	int		count;
2717 };
2718 
2719 static void
2720 zone_count(uma_zone_t zone, void *arg)
2721 {
2722 	struct uma_zone_count *cnt;
2723 
2724 	cnt = arg;
2725 	/*
2726 	 * Some zones are rapidly created with identical names and
2727 	 * destroyed out of order.  This can lead to gaps in the count.
2728 	 * Use one greater than the maximum observed for this name.
2729 	 */
2730 	if (strcmp(zone->uz_name, cnt->name) == 0)
2731 		cnt->count = MAX(cnt->count,
2732 		    zone->uz_namecnt + 1);
2733 }
2734 
2735 static void
2736 zone_update_caches(uma_zone_t zone)
2737 {
2738 	int i;
2739 
2740 	for (i = 0; i <= mp_maxid; i++) {
2741 		cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size);
2742 		cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags);
2743 	}
2744 }
2745 
2746 /*
2747  * Zone header ctor.  This initializes all fields, locks, etc.
2748  *
2749  * Arguments/Returns follow uma_ctor specifications
2750  *	udata  Actually uma_zctor_args
2751  */
2752 static int
2753 zone_ctor(void *mem, int size, void *udata, int flags)
2754 {
2755 	struct uma_zone_count cnt;
2756 	struct uma_zctor_args *arg = udata;
2757 	uma_zone_domain_t zdom;
2758 	uma_zone_t zone = mem;
2759 	uma_zone_t z;
2760 	uma_keg_t keg;
2761 	int i;
2762 
2763 	bzero(zone, size);
2764 	zone->uz_name = arg->name;
2765 	zone->uz_ctor = arg->ctor;
2766 	zone->uz_dtor = arg->dtor;
2767 	zone->uz_init = NULL;
2768 	zone->uz_fini = NULL;
2769 	zone->uz_sleeps = 0;
2770 	zone->uz_bucket_size = 0;
2771 	zone->uz_bucket_size_min = 0;
2772 	zone->uz_bucket_size_max = BUCKET_MAX;
2773 	zone->uz_flags = (arg->flags & UMA_ZONE_SMR);
2774 	zone->uz_warning = NULL;
2775 	/* The domain structures follow the cpu structures. */
2776 	zone->uz_bucket_max = ULONG_MAX;
2777 	timevalclear(&zone->uz_ratecheck);
2778 
2779 	/* Count the number of duplicate names. */
2780 	cnt.name = arg->name;
2781 	cnt.count = 0;
2782 	zone_foreach(zone_count, &cnt);
2783 	zone->uz_namecnt = cnt.count;
2784 	ZONE_CROSS_LOCK_INIT(zone);
2785 
2786 	for (i = 0; i < vm_ndomains; i++) {
2787 		zdom = ZDOM_GET(zone, i);
2788 		ZDOM_LOCK_INIT(zone, zdom, (arg->flags & UMA_ZONE_MTXCLASS));
2789 		STAILQ_INIT(&zdom->uzd_buckets);
2790 	}
2791 
2792 #if defined(INVARIANTS) && !defined(KASAN)
2793 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2794 		zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
2795 #elif defined(KASAN)
2796 	if ((arg->flags & (UMA_ZONE_NOFREE | UMA_ZFLAG_CACHE)) != 0)
2797 		arg->flags |= UMA_ZONE_NOKASAN;
2798 #endif
2799 
2800 	/*
2801 	 * This is a pure cache zone, no kegs.
2802 	 */
2803 	if (arg->import) {
2804 		KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0,
2805 		    ("zone_ctor: Import specified for non-cache zone."));
2806 		zone->uz_flags = arg->flags;
2807 		zone->uz_size = arg->size;
2808 		zone->uz_import = arg->import;
2809 		zone->uz_release = arg->release;
2810 		zone->uz_arg = arg->arg;
2811 #ifdef NUMA
2812 		/*
2813 		 * Cache zones are round-robin unless a policy is
2814 		 * specified because they may have incompatible
2815 		 * constraints.
2816 		 */
2817 		if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2818 			zone->uz_flags |= UMA_ZONE_ROUNDROBIN;
2819 #endif
2820 		rw_wlock(&uma_rwlock);
2821 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2822 		rw_wunlock(&uma_rwlock);
2823 		goto out;
2824 	}
2825 
2826 	/*
2827 	 * Use the regular zone/keg/slab allocator.
2828 	 */
2829 	zone->uz_import = zone_import;
2830 	zone->uz_release = zone_release;
2831 	zone->uz_arg = zone;
2832 	keg = arg->keg;
2833 
2834 	if (arg->flags & UMA_ZONE_SECONDARY) {
2835 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2836 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2837 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2838 		zone->uz_init = arg->uminit;
2839 		zone->uz_fini = arg->fini;
2840 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2841 		rw_wlock(&uma_rwlock);
2842 		ZONE_LOCK(zone);
2843 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2844 			if (LIST_NEXT(z, uz_link) == NULL) {
2845 				LIST_INSERT_AFTER(z, zone, uz_link);
2846 				break;
2847 			}
2848 		}
2849 		ZONE_UNLOCK(zone);
2850 		rw_wunlock(&uma_rwlock);
2851 	} else if (keg == NULL) {
2852 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2853 		    arg->align, arg->flags)) == NULL)
2854 			return (ENOMEM);
2855 	} else {
2856 		struct uma_kctor_args karg;
2857 		int error;
2858 
2859 		/* We should only be here from uma_startup() */
2860 		karg.size = arg->size;
2861 		karg.uminit = arg->uminit;
2862 		karg.fini = arg->fini;
2863 		karg.align = arg->align;
2864 		karg.flags = (arg->flags & ~UMA_ZONE_SMR);
2865 		karg.zone = zone;
2866 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2867 		    flags);
2868 		if (error)
2869 			return (error);
2870 	}
2871 
2872 	/* Inherit properties from the keg. */
2873 	zone->uz_keg = keg;
2874 	zone->uz_size = keg->uk_size;
2875 	zone->uz_flags |= (keg->uk_flags &
2876 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2877 
2878 out:
2879 	if (booted >= BOOT_PCPU) {
2880 		zone_alloc_counters(zone, NULL);
2881 		if (booted >= BOOT_RUNNING)
2882 			zone_alloc_sysctl(zone, NULL);
2883 	} else {
2884 		zone->uz_allocs = EARLY_COUNTER;
2885 		zone->uz_frees = EARLY_COUNTER;
2886 		zone->uz_fails = EARLY_COUNTER;
2887 	}
2888 
2889 	/* Caller requests a private SMR context. */
2890 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
2891 		zone->uz_smr = smr_create(zone->uz_name, 0, 0);
2892 
2893 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2894 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2895 	    ("Invalid zone flag combination"));
2896 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2897 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2898 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2899 		zone->uz_bucket_size = BUCKET_MAX;
2900 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2901 		zone->uz_bucket_size = 0;
2902 	else
2903 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2904 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2905 	if (zone->uz_dtor != NULL || zone->uz_ctor != NULL)
2906 		zone->uz_flags |= UMA_ZFLAG_CTORDTOR;
2907 	zone_update_caches(zone);
2908 
2909 	return (0);
2910 }
2911 
2912 /*
2913  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2914  * table and removes the keg from the global list.
2915  *
2916  * Arguments/Returns follow uma_dtor specifications
2917  *	udata  unused
2918  */
2919 static void
2920 keg_dtor(void *arg, int size, void *udata)
2921 {
2922 	uma_keg_t keg;
2923 	uint32_t free, pages;
2924 	int i;
2925 
2926 	keg = (uma_keg_t)arg;
2927 	free = pages = 0;
2928 	for (i = 0; i < vm_ndomains; i++) {
2929 		free += keg->uk_domain[i].ud_free_items;
2930 		pages += keg->uk_domain[i].ud_pages;
2931 		KEG_LOCK_FINI(keg, i);
2932 	}
2933 	if (pages != 0)
2934 		printf("Freed UMA keg (%s) was not empty (%u items). "
2935 		    " Lost %u pages of memory.\n",
2936 		    keg->uk_name ? keg->uk_name : "",
2937 		    pages / keg->uk_ppera * keg->uk_ipers - free, pages);
2938 
2939 	hash_free(&keg->uk_hash);
2940 }
2941 
2942 /*
2943  * Zone header dtor.
2944  *
2945  * Arguments/Returns follow uma_dtor specifications
2946  *	udata  unused
2947  */
2948 static void
2949 zone_dtor(void *arg, int size, void *udata)
2950 {
2951 	uma_zone_t zone;
2952 	uma_keg_t keg;
2953 	int i;
2954 
2955 	zone = (uma_zone_t)arg;
2956 
2957 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2958 
2959 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2960 		cache_drain(zone);
2961 
2962 	rw_wlock(&uma_rwlock);
2963 	LIST_REMOVE(zone, uz_link);
2964 	rw_wunlock(&uma_rwlock);
2965 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2966 		keg = zone->uz_keg;
2967 		keg->uk_reserve = 0;
2968 	}
2969 	zone_reclaim(zone, UMA_ANYDOMAIN, M_WAITOK, true);
2970 
2971 	/*
2972 	 * We only destroy kegs from non secondary/non cache zones.
2973 	 */
2974 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2975 		keg = zone->uz_keg;
2976 		rw_wlock(&uma_rwlock);
2977 		LIST_REMOVE(keg, uk_link);
2978 		rw_wunlock(&uma_rwlock);
2979 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2980 	}
2981 	counter_u64_free(zone->uz_allocs);
2982 	counter_u64_free(zone->uz_frees);
2983 	counter_u64_free(zone->uz_fails);
2984 	counter_u64_free(zone->uz_xdomain);
2985 	free(zone->uz_ctlname, M_UMA);
2986 	for (i = 0; i < vm_ndomains; i++)
2987 		ZDOM_LOCK_FINI(ZDOM_GET(zone, i));
2988 	ZONE_CROSS_LOCK_FINI(zone);
2989 }
2990 
2991 static void
2992 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2993 {
2994 	uma_keg_t keg;
2995 	uma_zone_t zone;
2996 
2997 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2998 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2999 			zfunc(zone, arg);
3000 	}
3001 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
3002 		zfunc(zone, arg);
3003 }
3004 
3005 /*
3006  * Traverses every zone in the system and calls a callback
3007  *
3008  * Arguments:
3009  *	zfunc  A pointer to a function which accepts a zone
3010  *		as an argument.
3011  *
3012  * Returns:
3013  *	Nothing
3014  */
3015 static void
3016 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
3017 {
3018 
3019 	rw_rlock(&uma_rwlock);
3020 	zone_foreach_unlocked(zfunc, arg);
3021 	rw_runlock(&uma_rwlock);
3022 }
3023 
3024 /*
3025  * Initialize the kernel memory allocator.  This is done after pages can be
3026  * allocated but before general KVA is available.
3027  */
3028 void
3029 uma_startup1(vm_offset_t virtual_avail)
3030 {
3031 	struct uma_zctor_args args;
3032 	size_t ksize, zsize, size;
3033 	uma_keg_t primarykeg;
3034 	uintptr_t m;
3035 	int domain;
3036 	uint8_t pflag;
3037 
3038 	bootstart = bootmem = virtual_avail;
3039 
3040 	rw_init(&uma_rwlock, "UMA lock");
3041 	sx_init(&uma_reclaim_lock, "umareclaim");
3042 
3043 	ksize = sizeof(struct uma_keg) +
3044 	    (sizeof(struct uma_domain) * vm_ndomains);
3045 	ksize = roundup(ksize, UMA_SUPER_ALIGN);
3046 	zsize = sizeof(struct uma_zone) +
3047 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
3048 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
3049 	zsize = roundup(zsize, UMA_SUPER_ALIGN);
3050 
3051 	/* Allocate the zone of zones, zone of kegs, and zone of zones keg. */
3052 	size = (zsize * 2) + ksize;
3053 	for (domain = 0; domain < vm_ndomains; domain++) {
3054 		m = (uintptr_t)startup_alloc(NULL, size, domain, &pflag,
3055 		    M_NOWAIT | M_ZERO);
3056 		if (m != 0)
3057 			break;
3058 	}
3059 	zones = (uma_zone_t)m;
3060 	m += zsize;
3061 	kegs = (uma_zone_t)m;
3062 	m += zsize;
3063 	primarykeg = (uma_keg_t)m;
3064 
3065 	/* "manually" create the initial zone */
3066 	memset(&args, 0, sizeof(args));
3067 	args.name = "UMA Kegs";
3068 	args.size = ksize;
3069 	args.ctor = keg_ctor;
3070 	args.dtor = keg_dtor;
3071 	args.uminit = zero_init;
3072 	args.fini = NULL;
3073 	args.keg = primarykeg;
3074 	args.align = UMA_SUPER_ALIGN - 1;
3075 	args.flags = UMA_ZFLAG_INTERNAL;
3076 	zone_ctor(kegs, zsize, &args, M_WAITOK);
3077 
3078 	args.name = "UMA Zones";
3079 	args.size = zsize;
3080 	args.ctor = zone_ctor;
3081 	args.dtor = zone_dtor;
3082 	args.uminit = zero_init;
3083 	args.fini = NULL;
3084 	args.keg = NULL;
3085 	args.align = UMA_SUPER_ALIGN - 1;
3086 	args.flags = UMA_ZFLAG_INTERNAL;
3087 	zone_ctor(zones, zsize, &args, M_WAITOK);
3088 
3089 	/* Now make zones for slab headers */
3090 	slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE,
3091 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3092 	slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE,
3093 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3094 
3095 	hashzone = uma_zcreate("UMA Hash",
3096 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
3097 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
3098 
3099 	bucket_init();
3100 	smr_init();
3101 }
3102 
3103 #ifndef UMA_MD_SMALL_ALLOC
3104 extern void vm_radix_reserve_kva(void);
3105 #endif
3106 
3107 /*
3108  * Advertise the availability of normal kva allocations and switch to
3109  * the default back-end allocator.  Marks the KVA we consumed on startup
3110  * as used in the map.
3111  */
3112 void
3113 uma_startup2(void)
3114 {
3115 
3116 	if (bootstart != bootmem) {
3117 		vm_map_lock(kernel_map);
3118 		(void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem,
3119 		    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
3120 		vm_map_unlock(kernel_map);
3121 	}
3122 
3123 #ifndef UMA_MD_SMALL_ALLOC
3124 	/* Set up radix zone to use noobj_alloc. */
3125 	vm_radix_reserve_kva();
3126 #endif
3127 
3128 	booted = BOOT_KVA;
3129 	zone_foreach_unlocked(zone_kva_available, NULL);
3130 	bucket_enable();
3131 }
3132 
3133 /*
3134  * Allocate counters as early as possible so that boot-time allocations are
3135  * accounted more precisely.
3136  */
3137 static void
3138 uma_startup_pcpu(void *arg __unused)
3139 {
3140 
3141 	zone_foreach_unlocked(zone_alloc_counters, NULL);
3142 	booted = BOOT_PCPU;
3143 }
3144 SYSINIT(uma_startup_pcpu, SI_SUB_COUNTER, SI_ORDER_ANY, uma_startup_pcpu, NULL);
3145 
3146 /*
3147  * Finish our initialization steps.
3148  */
3149 static void
3150 uma_startup3(void *arg __unused)
3151 {
3152 
3153 #ifdef INVARIANTS
3154 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
3155 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
3156 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
3157 #endif
3158 	zone_foreach_unlocked(zone_alloc_sysctl, NULL);
3159 	callout_init(&uma_callout, 1);
3160 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
3161 	booted = BOOT_RUNNING;
3162 
3163 	EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
3164 	    EVENTHANDLER_PRI_FIRST);
3165 }
3166 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
3167 
3168 static void
3169 uma_shutdown(void)
3170 {
3171 
3172 	booted = BOOT_SHUTDOWN;
3173 }
3174 
3175 static uma_keg_t
3176 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
3177 		int align, uint32_t flags)
3178 {
3179 	struct uma_kctor_args args;
3180 
3181 	args.size = size;
3182 	args.uminit = uminit;
3183 	args.fini = fini;
3184 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
3185 	args.flags = flags;
3186 	args.zone = zone;
3187 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
3188 }
3189 
3190 /* Public functions */
3191 /* See uma.h */
3192 void
3193 uma_set_align(int align)
3194 {
3195 
3196 	if (align != UMA_ALIGN_CACHE)
3197 		uma_align_cache = align;
3198 }
3199 
3200 /* See uma.h */
3201 uma_zone_t
3202 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
3203 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
3204 
3205 {
3206 	struct uma_zctor_args args;
3207 	uma_zone_t res;
3208 
3209 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
3210 	    align, name));
3211 
3212 	/* This stuff is essential for the zone ctor */
3213 	memset(&args, 0, sizeof(args));
3214 	args.name = name;
3215 	args.size = size;
3216 	args.ctor = ctor;
3217 	args.dtor = dtor;
3218 	args.uminit = uminit;
3219 	args.fini = fini;
3220 #if defined(INVARIANTS) && !defined(KASAN)
3221 	/*
3222 	 * Inject procedures which check for memory use after free if we are
3223 	 * allowed to scramble the memory while it is not allocated.  This
3224 	 * requires that: UMA is actually able to access the memory, no init
3225 	 * or fini procedures, no dependency on the initial value of the
3226 	 * memory, and no (legitimate) use of the memory after free.  Note,
3227 	 * the ctor and dtor do not need to be empty.
3228 	 */
3229 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
3230 	    UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
3231 		args.uminit = trash_init;
3232 		args.fini = trash_fini;
3233 	}
3234 #endif
3235 	args.align = align;
3236 	args.flags = flags;
3237 	args.keg = NULL;
3238 
3239 	sx_xlock(&uma_reclaim_lock);
3240 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3241 	sx_xunlock(&uma_reclaim_lock);
3242 
3243 	return (res);
3244 }
3245 
3246 /* See uma.h */
3247 uma_zone_t
3248 uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor,
3249     uma_init zinit, uma_fini zfini, uma_zone_t primary)
3250 {
3251 	struct uma_zctor_args args;
3252 	uma_keg_t keg;
3253 	uma_zone_t res;
3254 
3255 	keg = primary->uz_keg;
3256 	memset(&args, 0, sizeof(args));
3257 	args.name = name;
3258 	args.size = keg->uk_size;
3259 	args.ctor = ctor;
3260 	args.dtor = dtor;
3261 	args.uminit = zinit;
3262 	args.fini = zfini;
3263 	args.align = keg->uk_align;
3264 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
3265 	args.keg = keg;
3266 
3267 	sx_xlock(&uma_reclaim_lock);
3268 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
3269 	sx_xunlock(&uma_reclaim_lock);
3270 
3271 	return (res);
3272 }
3273 
3274 /* See uma.h */
3275 uma_zone_t
3276 uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor,
3277     uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease,
3278     void *arg, int flags)
3279 {
3280 	struct uma_zctor_args args;
3281 
3282 	memset(&args, 0, sizeof(args));
3283 	args.name = name;
3284 	args.size = size;
3285 	args.ctor = ctor;
3286 	args.dtor = dtor;
3287 	args.uminit = zinit;
3288 	args.fini = zfini;
3289 	args.import = zimport;
3290 	args.release = zrelease;
3291 	args.arg = arg;
3292 	args.align = 0;
3293 	args.flags = flags | UMA_ZFLAG_CACHE;
3294 
3295 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
3296 }
3297 
3298 /* See uma.h */
3299 void
3300 uma_zdestroy(uma_zone_t zone)
3301 {
3302 
3303 	/*
3304 	 * Large slabs are expensive to reclaim, so don't bother doing
3305 	 * unnecessary work if we're shutting down.
3306 	 */
3307 	if (booted == BOOT_SHUTDOWN &&
3308 	    zone->uz_fini == NULL && zone->uz_release == zone_release)
3309 		return;
3310 	sx_xlock(&uma_reclaim_lock);
3311 	zone_free_item(zones, zone, NULL, SKIP_NONE);
3312 	sx_xunlock(&uma_reclaim_lock);
3313 }
3314 
3315 void
3316 uma_zwait(uma_zone_t zone)
3317 {
3318 
3319 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
3320 		uma_zfree_smr(zone, uma_zalloc_smr(zone, M_WAITOK));
3321 	else if ((zone->uz_flags & UMA_ZONE_PCPU) != 0)
3322 		uma_zfree_pcpu(zone, uma_zalloc_pcpu(zone, M_WAITOK));
3323 	else
3324 		uma_zfree(zone, uma_zalloc(zone, M_WAITOK));
3325 }
3326 
3327 void *
3328 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
3329 {
3330 	void *item, *pcpu_item;
3331 #ifdef SMP
3332 	int i;
3333 
3334 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3335 #endif
3336 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
3337 	if (item == NULL)
3338 		return (NULL);
3339 	pcpu_item = zpcpu_base_to_offset(item);
3340 	if (flags & M_ZERO) {
3341 #ifdef SMP
3342 		for (i = 0; i <= mp_maxid; i++)
3343 			bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size);
3344 #else
3345 		bzero(item, zone->uz_size);
3346 #endif
3347 	}
3348 	return (pcpu_item);
3349 }
3350 
3351 /*
3352  * A stub while both regular and pcpu cases are identical.
3353  */
3354 void
3355 uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata)
3356 {
3357 	void *item;
3358 
3359 #ifdef SMP
3360 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
3361 #endif
3362 
3363         /* uma_zfree_pcu_*(..., NULL) does nothing, to match free(9). */
3364         if (pcpu_item == NULL)
3365                 return;
3366 
3367 	item = zpcpu_offset_to_base(pcpu_item);
3368 	uma_zfree_arg(zone, item, udata);
3369 }
3370 
3371 static inline void *
3372 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags,
3373     void *item)
3374 {
3375 #ifdef INVARIANTS
3376 	bool skipdbg;
3377 #endif
3378 
3379 	kasan_mark_item_valid(zone, item);
3380 
3381 #ifdef INVARIANTS
3382 	skipdbg = uma_dbg_zskip(zone, item);
3383 	if (!skipdbg && (uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3384 	    zone->uz_ctor != trash_ctor)
3385 		trash_ctor(item, size, udata, flags);
3386 #endif
3387 
3388 	/* Check flags before loading ctor pointer. */
3389 	if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) &&
3390 	    __predict_false(zone->uz_ctor != NULL) &&
3391 	    zone->uz_ctor(item, size, udata, flags) != 0) {
3392 		counter_u64_add(zone->uz_fails, 1);
3393 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
3394 		return (NULL);
3395 	}
3396 #ifdef INVARIANTS
3397 	if (!skipdbg)
3398 		uma_dbg_alloc(zone, NULL, item);
3399 #endif
3400 	if (__predict_false(flags & M_ZERO))
3401 		return (memset(item, 0, size));
3402 
3403 	return (item);
3404 }
3405 
3406 static inline void
3407 item_dtor(uma_zone_t zone, void *item, int size, void *udata,
3408     enum zfreeskip skip)
3409 {
3410 #ifdef INVARIANTS
3411 	bool skipdbg;
3412 
3413 	skipdbg = uma_dbg_zskip(zone, item);
3414 	if (skip == SKIP_NONE && !skipdbg) {
3415 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
3416 			uma_dbg_free(zone, udata, item);
3417 		else
3418 			uma_dbg_free(zone, NULL, item);
3419 	}
3420 #endif
3421 	if (__predict_true(skip < SKIP_DTOR)) {
3422 		if (zone->uz_dtor != NULL)
3423 			zone->uz_dtor(item, size, udata);
3424 #ifdef INVARIANTS
3425 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
3426 		    zone->uz_dtor != trash_dtor)
3427 			trash_dtor(item, size, udata);
3428 #endif
3429 	}
3430 	kasan_mark_item_invalid(zone, item);
3431 }
3432 
3433 #ifdef NUMA
3434 static int
3435 item_domain(void *item)
3436 {
3437 	int domain;
3438 
3439 	domain = vm_phys_domain(vtophys(item));
3440 	KASSERT(domain >= 0 && domain < vm_ndomains,
3441 	    ("%s: unknown domain for item %p", __func__, item));
3442 	return (domain);
3443 }
3444 #endif
3445 
3446 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS)
3447 #define	UMA_ZALLOC_DEBUG
3448 static int
3449 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags)
3450 {
3451 	int error;
3452 
3453 	error = 0;
3454 #ifdef WITNESS
3455 	if (flags & M_WAITOK) {
3456 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3457 		    "uma_zalloc_debug: zone \"%s\"", zone->uz_name);
3458 	}
3459 #endif
3460 
3461 #ifdef INVARIANTS
3462 	KASSERT((flags & M_EXEC) == 0,
3463 	    ("uma_zalloc_debug: called with M_EXEC"));
3464 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3465 	    ("uma_zalloc_debug: called within spinlock or critical section"));
3466 	KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0,
3467 	    ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO"));
3468 #endif
3469 
3470 #ifdef DEBUG_MEMGUARD
3471 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) {
3472 		void *item;
3473 		item = memguard_alloc(zone->uz_size, flags);
3474 		if (item != NULL) {
3475 			error = EJUSTRETURN;
3476 			if (zone->uz_init != NULL &&
3477 			    zone->uz_init(item, zone->uz_size, flags) != 0) {
3478 				*itemp = NULL;
3479 				return (error);
3480 			}
3481 			if (zone->uz_ctor != NULL &&
3482 			    zone->uz_ctor(item, zone->uz_size, udata,
3483 			    flags) != 0) {
3484 				counter_u64_add(zone->uz_fails, 1);
3485 			    	zone->uz_fini(item, zone->uz_size);
3486 				*itemp = NULL;
3487 				return (error);
3488 			}
3489 			*itemp = item;
3490 			return (error);
3491 		}
3492 		/* This is unfortunate but should not be fatal. */
3493 	}
3494 #endif
3495 	return (error);
3496 }
3497 
3498 static int
3499 uma_zfree_debug(uma_zone_t zone, void *item, void *udata)
3500 {
3501 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3502 	    ("uma_zfree_debug: called with spinlock or critical section held"));
3503 
3504 #ifdef DEBUG_MEMGUARD
3505 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) {
3506 		if (zone->uz_dtor != NULL)
3507 			zone->uz_dtor(item, zone->uz_size, udata);
3508 		if (zone->uz_fini != NULL)
3509 			zone->uz_fini(item, zone->uz_size);
3510 		memguard_free(item);
3511 		return (EJUSTRETURN);
3512 	}
3513 #endif
3514 	return (0);
3515 }
3516 #endif
3517 
3518 static inline void *
3519 cache_alloc_item(uma_zone_t zone, uma_cache_t cache, uma_cache_bucket_t bucket,
3520     void *udata, int flags)
3521 {
3522 	void *item;
3523 	int size, uz_flags;
3524 
3525 	item = cache_bucket_pop(cache, bucket);
3526 	size = cache_uz_size(cache);
3527 	uz_flags = cache_uz_flags(cache);
3528 	critical_exit();
3529 	return (item_ctor(zone, uz_flags, size, udata, flags, item));
3530 }
3531 
3532 static __noinline void *
3533 cache_alloc_retry(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3534 {
3535 	uma_cache_bucket_t bucket;
3536 	int domain;
3537 
3538 	while (cache_alloc(zone, cache, udata, flags)) {
3539 		cache = &zone->uz_cpu[curcpu];
3540 		bucket = &cache->uc_allocbucket;
3541 		if (__predict_false(bucket->ucb_cnt == 0))
3542 			continue;
3543 		return (cache_alloc_item(zone, cache, bucket, udata, flags));
3544 	}
3545 	critical_exit();
3546 
3547 	/*
3548 	 * We can not get a bucket so try to return a single item.
3549 	 */
3550 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
3551 		domain = PCPU_GET(domain);
3552 	else
3553 		domain = UMA_ANYDOMAIN;
3554 	return (zone_alloc_item(zone, udata, domain, flags));
3555 }
3556 
3557 /* See uma.h */
3558 void *
3559 uma_zalloc_smr(uma_zone_t zone, int flags)
3560 {
3561 	uma_cache_bucket_t bucket;
3562 	uma_cache_t cache;
3563 
3564 #ifdef UMA_ZALLOC_DEBUG
3565 	void *item;
3566 
3567 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
3568 	    ("uma_zalloc_arg: called with non-SMR zone."));
3569 	if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN)
3570 		return (item);
3571 #endif
3572 
3573 	critical_enter();
3574 	cache = &zone->uz_cpu[curcpu];
3575 	bucket = &cache->uc_allocbucket;
3576 	if (__predict_false(bucket->ucb_cnt == 0))
3577 		return (cache_alloc_retry(zone, cache, NULL, flags));
3578 	return (cache_alloc_item(zone, cache, bucket, NULL, flags));
3579 }
3580 
3581 /* See uma.h */
3582 void *
3583 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
3584 {
3585 	uma_cache_bucket_t bucket;
3586 	uma_cache_t cache;
3587 
3588 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3589 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3590 
3591 	/* This is the fast path allocation */
3592 	CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name,
3593 	    zone, flags);
3594 
3595 #ifdef UMA_ZALLOC_DEBUG
3596 	void *item;
3597 
3598 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3599 	    ("uma_zalloc_arg: called with SMR zone."));
3600 	if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN)
3601 		return (item);
3602 #endif
3603 
3604 	/*
3605 	 * If possible, allocate from the per-CPU cache.  There are two
3606 	 * requirements for safe access to the per-CPU cache: (1) the thread
3607 	 * accessing the cache must not be preempted or yield during access,
3608 	 * and (2) the thread must not migrate CPUs without switching which
3609 	 * cache it accesses.  We rely on a critical section to prevent
3610 	 * preemption and migration.  We release the critical section in
3611 	 * order to acquire the zone mutex if we are unable to allocate from
3612 	 * the current cache; when we re-acquire the critical section, we
3613 	 * must detect and handle migration if it has occurred.
3614 	 */
3615 	critical_enter();
3616 	cache = &zone->uz_cpu[curcpu];
3617 	bucket = &cache->uc_allocbucket;
3618 	if (__predict_false(bucket->ucb_cnt == 0))
3619 		return (cache_alloc_retry(zone, cache, udata, flags));
3620 	return (cache_alloc_item(zone, cache, bucket, udata, flags));
3621 }
3622 
3623 /*
3624  * Replenish an alloc bucket and possibly restore an old one.  Called in
3625  * a critical section.  Returns in a critical section.
3626  *
3627  * A false return value indicates an allocation failure.
3628  * A true return value indicates success and the caller should retry.
3629  */
3630 static __noinline bool
3631 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3632 {
3633 	uma_bucket_t bucket;
3634 	int curdomain, domain;
3635 	bool new;
3636 
3637 	CRITICAL_ASSERT(curthread);
3638 
3639 	/*
3640 	 * If we have run out of items in our alloc bucket see
3641 	 * if we can switch with the free bucket.
3642 	 *
3643 	 * SMR Zones can't re-use the free bucket until the sequence has
3644 	 * expired.
3645 	 */
3646 	if ((cache_uz_flags(cache) & UMA_ZONE_SMR) == 0 &&
3647 	    cache->uc_freebucket.ucb_cnt != 0) {
3648 		cache_bucket_swap(&cache->uc_freebucket,
3649 		    &cache->uc_allocbucket);
3650 		return (true);
3651 	}
3652 
3653 	/*
3654 	 * Discard any empty allocation bucket while we hold no locks.
3655 	 */
3656 	bucket = cache_bucket_unload_alloc(cache);
3657 	critical_exit();
3658 
3659 	if (bucket != NULL) {
3660 		KASSERT(bucket->ub_cnt == 0,
3661 		    ("cache_alloc: Entered with non-empty alloc bucket."));
3662 		bucket_free(zone, bucket, udata);
3663 	}
3664 
3665 	/*
3666 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
3667 	 * we must go back to the zone.  This requires the zdom lock, so we
3668 	 * must drop the critical section, then re-acquire it when we go back
3669 	 * to the cache.  Since the critical section is released, we may be
3670 	 * preempted or migrate.  As such, make sure not to maintain any
3671 	 * thread-local state specific to the cache from prior to releasing
3672 	 * the critical section.
3673 	 */
3674 	domain = PCPU_GET(domain);
3675 	if ((cache_uz_flags(cache) & UMA_ZONE_ROUNDROBIN) != 0 ||
3676 	    VM_DOMAIN_EMPTY(domain))
3677 		domain = zone_domain_highest(zone, domain);
3678 	bucket = cache_fetch_bucket(zone, cache, domain);
3679 	if (bucket == NULL && zone->uz_bucket_size != 0 && !bucketdisable) {
3680 		bucket = zone_alloc_bucket(zone, udata, domain, flags);
3681 		new = true;
3682 	} else {
3683 		new = false;
3684 	}
3685 
3686 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3687 	    zone->uz_name, zone, bucket);
3688 	if (bucket == NULL) {
3689 		critical_enter();
3690 		return (false);
3691 	}
3692 
3693 	/*
3694 	 * See if we lost the race or were migrated.  Cache the
3695 	 * initialized bucket to make this less likely or claim
3696 	 * the memory directly.
3697 	 */
3698 	critical_enter();
3699 	cache = &zone->uz_cpu[curcpu];
3700 	if (cache->uc_allocbucket.ucb_bucket == NULL &&
3701 	    ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) == 0 ||
3702 	    (curdomain = PCPU_GET(domain)) == domain ||
3703 	    VM_DOMAIN_EMPTY(curdomain))) {
3704 		if (new)
3705 			atomic_add_long(&ZDOM_GET(zone, domain)->uzd_imax,
3706 			    bucket->ub_cnt);
3707 		cache_bucket_load_alloc(cache, bucket);
3708 		return (true);
3709 	}
3710 
3711 	/*
3712 	 * We lost the race, release this bucket and start over.
3713 	 */
3714 	critical_exit();
3715 	zone_put_bucket(zone, domain, bucket, udata, !new);
3716 	critical_enter();
3717 
3718 	return (true);
3719 }
3720 
3721 void *
3722 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
3723 {
3724 #ifdef NUMA
3725 	uma_bucket_t bucket;
3726 	uma_zone_domain_t zdom;
3727 	void *item;
3728 #endif
3729 
3730 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3731 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3732 
3733 	/* This is the fast path allocation */
3734 	CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d",
3735 	    zone->uz_name, zone, domain, flags);
3736 
3737 	if (flags & M_WAITOK) {
3738 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3739 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
3740 	}
3741 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3742 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
3743 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3744 	    ("uma_zalloc_domain: called with SMR zone."));
3745 #ifdef NUMA
3746 	KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0,
3747 	    ("uma_zalloc_domain: called with non-FIRSTTOUCH zone."));
3748 
3749 	if (vm_ndomains == 1)
3750 		return (uma_zalloc_arg(zone, udata, flags));
3751 
3752 	/*
3753 	 * Try to allocate from the bucket cache before falling back to the keg.
3754 	 * We could try harder and attempt to allocate from per-CPU caches or
3755 	 * the per-domain cross-domain buckets, but the complexity is probably
3756 	 * not worth it.  It is more important that frees of previous
3757 	 * cross-domain allocations do not blow up the cache.
3758 	 */
3759 	zdom = zone_domain_lock(zone, domain);
3760 	if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
3761 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
3762 #ifdef INVARIANTS
3763 		bucket->ub_bucket[bucket->ub_cnt - 1] = NULL;
3764 #endif
3765 		bucket->ub_cnt--;
3766 		zone_put_bucket(zone, domain, bucket, udata, true);
3767 		item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata,
3768 		    flags, item);
3769 		if (item != NULL) {
3770 			KASSERT(item_domain(item) == domain,
3771 			    ("%s: bucket cache item %p from wrong domain",
3772 			    __func__, item));
3773 			counter_u64_add(zone->uz_allocs, 1);
3774 		}
3775 		return (item);
3776 	}
3777 	ZDOM_UNLOCK(zdom);
3778 	return (zone_alloc_item(zone, udata, domain, flags));
3779 #else
3780 	return (uma_zalloc_arg(zone, udata, flags));
3781 #endif
3782 }
3783 
3784 /*
3785  * Find a slab with some space.  Prefer slabs that are partially used over those
3786  * that are totally full.  This helps to reduce fragmentation.
3787  *
3788  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
3789  * only 'domain'.
3790  */
3791 static uma_slab_t
3792 keg_first_slab(uma_keg_t keg, int domain, bool rr)
3793 {
3794 	uma_domain_t dom;
3795 	uma_slab_t slab;
3796 	int start;
3797 
3798 	KASSERT(domain >= 0 && domain < vm_ndomains,
3799 	    ("keg_first_slab: domain %d out of range", domain));
3800 	KEG_LOCK_ASSERT(keg, domain);
3801 
3802 	slab = NULL;
3803 	start = domain;
3804 	do {
3805 		dom = &keg->uk_domain[domain];
3806 		if ((slab = LIST_FIRST(&dom->ud_part_slab)) != NULL)
3807 			return (slab);
3808 		if ((slab = LIST_FIRST(&dom->ud_free_slab)) != NULL) {
3809 			LIST_REMOVE(slab, us_link);
3810 			dom->ud_free_slabs--;
3811 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3812 			return (slab);
3813 		}
3814 		if (rr)
3815 			domain = (domain + 1) % vm_ndomains;
3816 	} while (domain != start);
3817 
3818 	return (NULL);
3819 }
3820 
3821 /*
3822  * Fetch an existing slab from a free or partial list.  Returns with the
3823  * keg domain lock held if a slab was found or unlocked if not.
3824  */
3825 static uma_slab_t
3826 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3827 {
3828 	uma_slab_t slab;
3829 	uint32_t reserve;
3830 
3831 	/* HASH has a single free list. */
3832 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
3833 		domain = 0;
3834 
3835 	KEG_LOCK(keg, domain);
3836 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3837 	if (keg->uk_domain[domain].ud_free_items <= reserve ||
3838 	    (slab = keg_first_slab(keg, domain, rr)) == NULL) {
3839 		KEG_UNLOCK(keg, domain);
3840 		return (NULL);
3841 	}
3842 	return (slab);
3843 }
3844 
3845 static uma_slab_t
3846 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3847 {
3848 	struct vm_domainset_iter di;
3849 	uma_slab_t slab;
3850 	int aflags, domain;
3851 	bool rr;
3852 
3853 restart:
3854 	/*
3855 	 * Use the keg's policy if upper layers haven't already specified a
3856 	 * domain (as happens with first-touch zones).
3857 	 *
3858 	 * To avoid races we run the iterator with the keg lock held, but that
3859 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3860 	 * clear M_WAITOK and handle low memory conditions locally.
3861 	 */
3862 	rr = rdomain == UMA_ANYDOMAIN;
3863 	if (rr) {
3864 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3865 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3866 		    &aflags);
3867 	} else {
3868 		aflags = flags;
3869 		domain = rdomain;
3870 	}
3871 
3872 	for (;;) {
3873 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3874 		if (slab != NULL)
3875 			return (slab);
3876 
3877 		/*
3878 		 * M_NOVM means don't ask at all!
3879 		 */
3880 		if (flags & M_NOVM)
3881 			break;
3882 
3883 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3884 		if (slab != NULL)
3885 			return (slab);
3886 		if (!rr && (flags & M_WAITOK) == 0)
3887 			break;
3888 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3889 			if ((flags & M_WAITOK) != 0) {
3890 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
3891 				goto restart;
3892 			}
3893 			break;
3894 		}
3895 	}
3896 
3897 	/*
3898 	 * We might not have been able to get a slab but another cpu
3899 	 * could have while we were unlocked.  Check again before we
3900 	 * fail.
3901 	 */
3902 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL)
3903 		return (slab);
3904 
3905 	return (NULL);
3906 }
3907 
3908 static void *
3909 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3910 {
3911 	uma_domain_t dom;
3912 	void *item;
3913 	int freei;
3914 
3915 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3916 
3917 	dom = &keg->uk_domain[slab->us_domain];
3918 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3919 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3920 	item = slab_item(slab, keg, freei);
3921 	slab->us_freecount--;
3922 	dom->ud_free_items--;
3923 
3924 	/*
3925 	 * Move this slab to the full list.  It must be on the partial list, so
3926 	 * we do not need to update the free slab count.  In particular,
3927 	 * keg_fetch_slab() always returns slabs on the partial list.
3928 	 */
3929 	if (slab->us_freecount == 0) {
3930 		LIST_REMOVE(slab, us_link);
3931 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3932 	}
3933 
3934 	return (item);
3935 }
3936 
3937 static int
3938 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3939 {
3940 	uma_domain_t dom;
3941 	uma_zone_t zone;
3942 	uma_slab_t slab;
3943 	uma_keg_t keg;
3944 #ifdef NUMA
3945 	int stripe;
3946 #endif
3947 	int i;
3948 
3949 	zone = arg;
3950 	slab = NULL;
3951 	keg = zone->uz_keg;
3952 	/* Try to keep the buckets totally full */
3953 	for (i = 0; i < max; ) {
3954 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3955 			break;
3956 #ifdef NUMA
3957 		stripe = howmany(max, vm_ndomains);
3958 #endif
3959 		dom = &keg->uk_domain[slab->us_domain];
3960 		do {
3961 			bucket[i++] = slab_alloc_item(keg, slab);
3962 			if (dom->ud_free_items <= keg->uk_reserve) {
3963 				/*
3964 				 * Avoid depleting the reserve after a
3965 				 * successful item allocation, even if
3966 				 * M_USE_RESERVE is specified.
3967 				 */
3968 				KEG_UNLOCK(keg, slab->us_domain);
3969 				goto out;
3970 			}
3971 #ifdef NUMA
3972 			/*
3973 			 * If the zone is striped we pick a new slab for every
3974 			 * N allocations.  Eliminating this conditional will
3975 			 * instead pick a new domain for each bucket rather
3976 			 * than stripe within each bucket.  The current option
3977 			 * produces more fragmentation and requires more cpu
3978 			 * time but yields better distribution.
3979 			 */
3980 			if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 &&
3981 			    vm_ndomains > 1 && --stripe == 0)
3982 				break;
3983 #endif
3984 		} while (slab->us_freecount != 0 && i < max);
3985 		KEG_UNLOCK(keg, slab->us_domain);
3986 
3987 		/* Don't block if we allocated any successfully. */
3988 		flags &= ~M_WAITOK;
3989 		flags |= M_NOWAIT;
3990 	}
3991 out:
3992 	return i;
3993 }
3994 
3995 static int
3996 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags)
3997 {
3998 	uint64_t old, new, total, max;
3999 
4000 	/*
4001 	 * The hard case.  We're going to sleep because there were existing
4002 	 * sleepers or because we ran out of items.  This routine enforces
4003 	 * fairness by keeping fifo order.
4004 	 *
4005 	 * First release our ill gotten gains and make some noise.
4006 	 */
4007 	for (;;) {
4008 		zone_free_limit(zone, count);
4009 		zone_log_warning(zone);
4010 		zone_maxaction(zone);
4011 		if (flags & M_NOWAIT)
4012 			return (0);
4013 
4014 		/*
4015 		 * We need to allocate an item or set ourself as a sleeper
4016 		 * while the sleepq lock is held to avoid wakeup races.  This
4017 		 * is essentially a home rolled semaphore.
4018 		 */
4019 		sleepq_lock(&zone->uz_max_items);
4020 		old = zone->uz_items;
4021 		do {
4022 			MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX);
4023 			/* Cache the max since we will evaluate twice. */
4024 			max = zone->uz_max_items;
4025 			if (UZ_ITEMS_SLEEPERS(old) != 0 ||
4026 			    UZ_ITEMS_COUNT(old) >= max)
4027 				new = old + UZ_ITEMS_SLEEPER;
4028 			else
4029 				new = old + MIN(count, max - old);
4030 		} while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0);
4031 
4032 		/* We may have successfully allocated under the sleepq lock. */
4033 		if (UZ_ITEMS_SLEEPERS(new) == 0) {
4034 			sleepq_release(&zone->uz_max_items);
4035 			return (new - old);
4036 		}
4037 
4038 		/*
4039 		 * This is in a different cacheline from uz_items so that we
4040 		 * don't constantly invalidate the fastpath cacheline when we
4041 		 * adjust item counts.  This could be limited to toggling on
4042 		 * transitions.
4043 		 */
4044 		atomic_add_32(&zone->uz_sleepers, 1);
4045 		atomic_add_64(&zone->uz_sleeps, 1);
4046 
4047 		/*
4048 		 * We have added ourselves as a sleeper.  The sleepq lock
4049 		 * protects us from wakeup races.  Sleep now and then retry.
4050 		 */
4051 		sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0);
4052 		sleepq_wait(&zone->uz_max_items, PVM);
4053 
4054 		/*
4055 		 * After wakeup, remove ourselves as a sleeper and try
4056 		 * again.  We no longer have the sleepq lock for protection.
4057 		 *
4058 		 * Subract ourselves as a sleeper while attempting to add
4059 		 * our count.
4060 		 */
4061 		atomic_subtract_32(&zone->uz_sleepers, 1);
4062 		old = atomic_fetchadd_64(&zone->uz_items,
4063 		    -(UZ_ITEMS_SLEEPER - count));
4064 		/* We're no longer a sleeper. */
4065 		old -= UZ_ITEMS_SLEEPER;
4066 
4067 		/*
4068 		 * If we're still at the limit, restart.  Notably do not
4069 		 * block on other sleepers.  Cache the max value to protect
4070 		 * against changes via sysctl.
4071 		 */
4072 		total = UZ_ITEMS_COUNT(old);
4073 		max = zone->uz_max_items;
4074 		if (total >= max)
4075 			continue;
4076 		/* Truncate if necessary, otherwise wake other sleepers. */
4077 		if (total + count > max) {
4078 			zone_free_limit(zone, total + count - max);
4079 			count = max - total;
4080 		} else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0)
4081 			wakeup_one(&zone->uz_max_items);
4082 
4083 		return (count);
4084 	}
4085 }
4086 
4087 /*
4088  * Allocate 'count' items from our max_items limit.  Returns the number
4089  * available.  If M_NOWAIT is not specified it will sleep until at least
4090  * one item can be allocated.
4091  */
4092 static int
4093 zone_alloc_limit(uma_zone_t zone, int count, int flags)
4094 {
4095 	uint64_t old;
4096 	uint64_t max;
4097 
4098 	max = zone->uz_max_items;
4099 	MPASS(max > 0);
4100 
4101 	/*
4102 	 * We expect normal allocations to succeed with a simple
4103 	 * fetchadd.
4104 	 */
4105 	old = atomic_fetchadd_64(&zone->uz_items, count);
4106 	if (__predict_true(old + count <= max))
4107 		return (count);
4108 
4109 	/*
4110 	 * If we had some items and no sleepers just return the
4111 	 * truncated value.  We have to release the excess space
4112 	 * though because that may wake sleepers who weren't woken
4113 	 * because we were temporarily over the limit.
4114 	 */
4115 	if (old < max) {
4116 		zone_free_limit(zone, (old + count) - max);
4117 		return (max - old);
4118 	}
4119 	return (zone_alloc_limit_hard(zone, count, flags));
4120 }
4121 
4122 /*
4123  * Free a number of items back to the limit.
4124  */
4125 static void
4126 zone_free_limit(uma_zone_t zone, int count)
4127 {
4128 	uint64_t old;
4129 
4130 	MPASS(count > 0);
4131 
4132 	/*
4133 	 * In the common case we either have no sleepers or
4134 	 * are still over the limit and can just return.
4135 	 */
4136 	old = atomic_fetchadd_64(&zone->uz_items, -count);
4137 	if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 ||
4138 	   UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items))
4139 		return;
4140 
4141 	/*
4142 	 * Moderate the rate of wakeups.  Sleepers will continue
4143 	 * to generate wakeups if necessary.
4144 	 */
4145 	wakeup_one(&zone->uz_max_items);
4146 }
4147 
4148 static uma_bucket_t
4149 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
4150 {
4151 	uma_bucket_t bucket;
4152 	int error, maxbucket, cnt;
4153 
4154 	CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name,
4155 	    zone, domain);
4156 
4157 	/* Avoid allocs targeting empty domains. */
4158 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4159 		domain = UMA_ANYDOMAIN;
4160 	else if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4161 		domain = UMA_ANYDOMAIN;
4162 
4163 	if (zone->uz_max_items > 0)
4164 		maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size,
4165 		    M_NOWAIT);
4166 	else
4167 		maxbucket = zone->uz_bucket_size;
4168 	if (maxbucket == 0)
4169 		return (false);
4170 
4171 	/* Don't wait for buckets, preserve caller's NOVM setting. */
4172 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
4173 	if (bucket == NULL) {
4174 		cnt = 0;
4175 		goto out;
4176 	}
4177 
4178 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
4179 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
4180 
4181 	/*
4182 	 * Initialize the memory if necessary.
4183 	 */
4184 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
4185 		int i;
4186 
4187 		for (i = 0; i < bucket->ub_cnt; i++) {
4188 			kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
4189 			error = zone->uz_init(bucket->ub_bucket[i],
4190 			    zone->uz_size, flags);
4191 			kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
4192 			if (error != 0)
4193 				break;
4194 		}
4195 
4196 		/*
4197 		 * If we couldn't initialize the whole bucket, put the
4198 		 * rest back onto the freelist.
4199 		 */
4200 		if (i != bucket->ub_cnt) {
4201 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
4202 			    bucket->ub_cnt - i);
4203 #ifdef INVARIANTS
4204 			bzero(&bucket->ub_bucket[i],
4205 			    sizeof(void *) * (bucket->ub_cnt - i));
4206 #endif
4207 			bucket->ub_cnt = i;
4208 		}
4209 	}
4210 
4211 	cnt = bucket->ub_cnt;
4212 	if (bucket->ub_cnt == 0) {
4213 		bucket_free(zone, bucket, udata);
4214 		counter_u64_add(zone->uz_fails, 1);
4215 		bucket = NULL;
4216 	}
4217 out:
4218 	if (zone->uz_max_items > 0 && cnt < maxbucket)
4219 		zone_free_limit(zone, maxbucket - cnt);
4220 
4221 	return (bucket);
4222 }
4223 
4224 /*
4225  * Allocates a single item from a zone.
4226  *
4227  * Arguments
4228  *	zone   The zone to alloc for.
4229  *	udata  The data to be passed to the constructor.
4230  *	domain The domain to allocate from or UMA_ANYDOMAIN.
4231  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
4232  *
4233  * Returns
4234  *	NULL if there is no memory and M_NOWAIT is set
4235  *	An item if successful
4236  */
4237 
4238 static void *
4239 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
4240 {
4241 	void *item;
4242 
4243 	if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0) {
4244 		counter_u64_add(zone->uz_fails, 1);
4245 		return (NULL);
4246 	}
4247 
4248 	/* Avoid allocs targeting empty domains. */
4249 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
4250 		domain = UMA_ANYDOMAIN;
4251 
4252 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
4253 		goto fail_cnt;
4254 
4255 	/*
4256 	 * We have to call both the zone's init (not the keg's init)
4257 	 * and the zone's ctor.  This is because the item is going from
4258 	 * a keg slab directly to the user, and the user is expecting it
4259 	 * to be both zone-init'd as well as zone-ctor'd.
4260 	 */
4261 	if (zone->uz_init != NULL) {
4262 		int error;
4263 
4264 		kasan_mark_item_valid(zone, item);
4265 		error = zone->uz_init(item, zone->uz_size, flags);
4266 		kasan_mark_item_invalid(zone, item);
4267 		if (error != 0) {
4268 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
4269 			goto fail_cnt;
4270 		}
4271 	}
4272 	item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags,
4273 	    item);
4274 	if (item == NULL)
4275 		goto fail;
4276 
4277 	counter_u64_add(zone->uz_allocs, 1);
4278 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
4279 	    zone->uz_name, zone);
4280 
4281 	return (item);
4282 
4283 fail_cnt:
4284 	counter_u64_add(zone->uz_fails, 1);
4285 fail:
4286 	if (zone->uz_max_items > 0)
4287 		zone_free_limit(zone, 1);
4288 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
4289 	    zone->uz_name, zone);
4290 
4291 	return (NULL);
4292 }
4293 
4294 /* See uma.h */
4295 void
4296 uma_zfree_smr(uma_zone_t zone, void *item)
4297 {
4298 	uma_cache_t cache;
4299 	uma_cache_bucket_t bucket;
4300 	int itemdomain, uz_flags;
4301 
4302 #ifdef UMA_ZALLOC_DEBUG
4303 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
4304 	    ("uma_zfree_smr: called with non-SMR zone."));
4305 	KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer."));
4306 	SMR_ASSERT_NOT_ENTERED(zone->uz_smr);
4307 	if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN)
4308 		return;
4309 #endif
4310 	cache = &zone->uz_cpu[curcpu];
4311 	uz_flags = cache_uz_flags(cache);
4312 	itemdomain = 0;
4313 #ifdef NUMA
4314 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4315 		itemdomain = item_domain(item);
4316 #endif
4317 	critical_enter();
4318 	do {
4319 		cache = &zone->uz_cpu[curcpu];
4320 		/* SMR Zones must free to the free bucket. */
4321 		bucket = &cache->uc_freebucket;
4322 #ifdef NUMA
4323 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4324 		    PCPU_GET(domain) != itemdomain) {
4325 			bucket = &cache->uc_crossbucket;
4326 		}
4327 #endif
4328 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4329 			cache_bucket_push(cache, bucket, item);
4330 			critical_exit();
4331 			return;
4332 		}
4333 	} while (cache_free(zone, cache, NULL, item, itemdomain));
4334 	critical_exit();
4335 
4336 	/*
4337 	 * If nothing else caught this, we'll just do an internal free.
4338 	 */
4339 	zone_free_item(zone, item, NULL, SKIP_NONE);
4340 }
4341 
4342 /* See uma.h */
4343 void
4344 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
4345 {
4346 	uma_cache_t cache;
4347 	uma_cache_bucket_t bucket;
4348 	int itemdomain, uz_flags;
4349 
4350 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
4351 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
4352 
4353 	CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone);
4354 
4355 #ifdef UMA_ZALLOC_DEBUG
4356 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
4357 	    ("uma_zfree_arg: called with SMR zone."));
4358 	if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN)
4359 		return;
4360 #endif
4361         /* uma_zfree(..., NULL) does nothing, to match free(9). */
4362         if (item == NULL)
4363                 return;
4364 
4365 	/*
4366 	 * We are accessing the per-cpu cache without a critical section to
4367 	 * fetch size and flags.  This is acceptable, if we are preempted we
4368 	 * will simply read another cpu's line.
4369 	 */
4370 	cache = &zone->uz_cpu[curcpu];
4371 	uz_flags = cache_uz_flags(cache);
4372 	if (UMA_ALWAYS_CTORDTOR ||
4373 	    __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0))
4374 		item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
4375 
4376 	/*
4377 	 * The race here is acceptable.  If we miss it we'll just have to wait
4378 	 * a little longer for the limits to be reset.
4379 	 */
4380 	if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) {
4381 		if (atomic_load_32(&zone->uz_sleepers) > 0)
4382 			goto zfree_item;
4383 	}
4384 
4385 	/*
4386 	 * If possible, free to the per-CPU cache.  There are two
4387 	 * requirements for safe access to the per-CPU cache: (1) the thread
4388 	 * accessing the cache must not be preempted or yield during access,
4389 	 * and (2) the thread must not migrate CPUs without switching which
4390 	 * cache it accesses.  We rely on a critical section to prevent
4391 	 * preemption and migration.  We release the critical section in
4392 	 * order to acquire the zone mutex if we are unable to free to the
4393 	 * current cache; when we re-acquire the critical section, we must
4394 	 * detect and handle migration if it has occurred.
4395 	 */
4396 	itemdomain = 0;
4397 #ifdef NUMA
4398 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4399 		itemdomain = item_domain(item);
4400 #endif
4401 	critical_enter();
4402 	do {
4403 		cache = &zone->uz_cpu[curcpu];
4404 		/*
4405 		 * Try to free into the allocbucket first to give LIFO
4406 		 * ordering for cache-hot datastructures.  Spill over
4407 		 * into the freebucket if necessary.  Alloc will swap
4408 		 * them if one runs dry.
4409 		 */
4410 		bucket = &cache->uc_allocbucket;
4411 #ifdef NUMA
4412 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4413 		    PCPU_GET(domain) != itemdomain) {
4414 			bucket = &cache->uc_crossbucket;
4415 		} else
4416 #endif
4417 		if (bucket->ucb_cnt == bucket->ucb_entries &&
4418 		   cache->uc_freebucket.ucb_cnt <
4419 		   cache->uc_freebucket.ucb_entries)
4420 			cache_bucket_swap(&cache->uc_freebucket,
4421 			    &cache->uc_allocbucket);
4422 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
4423 			cache_bucket_push(cache, bucket, item);
4424 			critical_exit();
4425 			return;
4426 		}
4427 	} while (cache_free(zone, cache, udata, item, itemdomain));
4428 	critical_exit();
4429 
4430 	/*
4431 	 * If nothing else caught this, we'll just do an internal free.
4432 	 */
4433 zfree_item:
4434 	zone_free_item(zone, item, udata, SKIP_DTOR);
4435 }
4436 
4437 #ifdef NUMA
4438 /*
4439  * sort crossdomain free buckets to domain correct buckets and cache
4440  * them.
4441  */
4442 static void
4443 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
4444 {
4445 	struct uma_bucketlist emptybuckets, fullbuckets;
4446 	uma_zone_domain_t zdom;
4447 	uma_bucket_t b;
4448 	smr_seq_t seq;
4449 	void *item;
4450 	int domain;
4451 
4452 	CTR3(KTR_UMA,
4453 	    "uma_zfree: zone %s(%p) draining cross bucket %p",
4454 	    zone->uz_name, zone, bucket);
4455 
4456 	/*
4457 	 * It is possible for buckets to arrive here out of order so we fetch
4458 	 * the current smr seq rather than accepting the bucket's.
4459 	 */
4460 	seq = SMR_SEQ_INVALID;
4461 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
4462 		seq = smr_advance(zone->uz_smr);
4463 
4464 	/*
4465 	 * To avoid having ndomain * ndomain buckets for sorting we have a
4466 	 * lock on the current crossfree bucket.  A full matrix with
4467 	 * per-domain locking could be used if necessary.
4468 	 */
4469 	STAILQ_INIT(&emptybuckets);
4470 	STAILQ_INIT(&fullbuckets);
4471 	ZONE_CROSS_LOCK(zone);
4472 	for (; bucket->ub_cnt > 0; bucket->ub_cnt--) {
4473 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
4474 		domain = item_domain(item);
4475 		zdom = ZDOM_GET(zone, domain);
4476 		if (zdom->uzd_cross == NULL) {
4477 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4478 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4479 				zdom->uzd_cross = b;
4480 			} else {
4481 				/*
4482 				 * Avoid allocating a bucket with the cross lock
4483 				 * held, since allocation can trigger a
4484 				 * cross-domain free and bucket zones may
4485 				 * allocate from each other.
4486 				 */
4487 				ZONE_CROSS_UNLOCK(zone);
4488 				b = bucket_alloc(zone, udata, M_NOWAIT);
4489 				if (b == NULL)
4490 					goto out;
4491 				ZONE_CROSS_LOCK(zone);
4492 				if (zdom->uzd_cross != NULL) {
4493 					STAILQ_INSERT_HEAD(&emptybuckets, b,
4494 					    ub_link);
4495 				} else {
4496 					zdom->uzd_cross = b;
4497 				}
4498 			}
4499 		}
4500 		b = zdom->uzd_cross;
4501 		b->ub_bucket[b->ub_cnt++] = item;
4502 		b->ub_seq = seq;
4503 		if (b->ub_cnt == b->ub_entries) {
4504 			STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link);
4505 			if ((b = STAILQ_FIRST(&emptybuckets)) != NULL)
4506 				STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4507 			zdom->uzd_cross = b;
4508 		}
4509 	}
4510 	ZONE_CROSS_UNLOCK(zone);
4511 out:
4512 	if (bucket->ub_cnt == 0)
4513 		bucket->ub_seq = SMR_SEQ_INVALID;
4514 	bucket_free(zone, bucket, udata);
4515 
4516 	while ((b = STAILQ_FIRST(&emptybuckets)) != NULL) {
4517 		STAILQ_REMOVE_HEAD(&emptybuckets, ub_link);
4518 		bucket_free(zone, b, udata);
4519 	}
4520 	while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) {
4521 		STAILQ_REMOVE_HEAD(&fullbuckets, ub_link);
4522 		domain = item_domain(b->ub_bucket[0]);
4523 		zone_put_bucket(zone, domain, b, udata, true);
4524 	}
4525 }
4526 #endif
4527 
4528 static void
4529 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
4530     int itemdomain, bool ws)
4531 {
4532 
4533 #ifdef NUMA
4534 	/*
4535 	 * Buckets coming from the wrong domain will be entirely for the
4536 	 * only other domain on two domain systems.  In this case we can
4537 	 * simply cache them.  Otherwise we need to sort them back to
4538 	 * correct domains.
4539 	 */
4540 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
4541 	    vm_ndomains > 2 && PCPU_GET(domain) != itemdomain) {
4542 		zone_free_cross(zone, bucket, udata);
4543 		return;
4544 	}
4545 #endif
4546 
4547 	/*
4548 	 * Attempt to save the bucket in the zone's domain bucket cache.
4549 	 */
4550 	CTR3(KTR_UMA,
4551 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
4552 	    zone->uz_name, zone, bucket);
4553 	/* ub_cnt is pointing to the last free item */
4554 	if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0)
4555 		itemdomain = zone_domain_lowest(zone, itemdomain);
4556 	zone_put_bucket(zone, itemdomain, bucket, udata, ws);
4557 }
4558 
4559 /*
4560  * Populate a free or cross bucket for the current cpu cache.  Free any
4561  * existing full bucket either to the zone cache or back to the slab layer.
4562  *
4563  * Enters and returns in a critical section.  false return indicates that
4564  * we can not satisfy this free in the cache layer.  true indicates that
4565  * the caller should retry.
4566  */
4567 static __noinline bool
4568 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
4569     int itemdomain)
4570 {
4571 	uma_cache_bucket_t cbucket;
4572 	uma_bucket_t newbucket, bucket;
4573 
4574 	CRITICAL_ASSERT(curthread);
4575 
4576 	if (zone->uz_bucket_size == 0)
4577 		return false;
4578 
4579 	cache = &zone->uz_cpu[curcpu];
4580 	newbucket = NULL;
4581 
4582 	/*
4583 	 * FIRSTTOUCH domains need to free to the correct zdom.  When
4584 	 * enabled this is the zdom of the item.   The bucket is the
4585 	 * cross bucket if the current domain and itemdomain do not match.
4586 	 */
4587 	cbucket = &cache->uc_freebucket;
4588 #ifdef NUMA
4589 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4590 		if (PCPU_GET(domain) != itemdomain) {
4591 			cbucket = &cache->uc_crossbucket;
4592 			if (cbucket->ucb_cnt != 0)
4593 				counter_u64_add(zone->uz_xdomain,
4594 				    cbucket->ucb_cnt);
4595 		}
4596 	}
4597 #endif
4598 	bucket = cache_bucket_unload(cbucket);
4599 	KASSERT(bucket == NULL || bucket->ub_cnt == bucket->ub_entries,
4600 	    ("cache_free: Entered with non-full free bucket."));
4601 
4602 	/* We are no longer associated with this CPU. */
4603 	critical_exit();
4604 
4605 	/*
4606 	 * Don't let SMR zones operate without a free bucket.  Force
4607 	 * a synchronize and re-use this one.  We will only degrade
4608 	 * to a synchronize every bucket_size items rather than every
4609 	 * item if we fail to allocate a bucket.
4610 	 */
4611 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
4612 		if (bucket != NULL)
4613 			bucket->ub_seq = smr_advance(zone->uz_smr);
4614 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4615 		if (newbucket == NULL && bucket != NULL) {
4616 			bucket_drain(zone, bucket);
4617 			newbucket = bucket;
4618 			bucket = NULL;
4619 		}
4620 	} else if (!bucketdisable)
4621 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4622 
4623 	if (bucket != NULL)
4624 		zone_free_bucket(zone, bucket, udata, itemdomain, true);
4625 
4626 	critical_enter();
4627 	if ((bucket = newbucket) == NULL)
4628 		return (false);
4629 	cache = &zone->uz_cpu[curcpu];
4630 #ifdef NUMA
4631 	/*
4632 	 * Check to see if we should be populating the cross bucket.  If it
4633 	 * is already populated we will fall through and attempt to populate
4634 	 * the free bucket.
4635 	 */
4636 	if ((cache_uz_flags(cache) & UMA_ZONE_FIRSTTOUCH) != 0) {
4637 		if (PCPU_GET(domain) != itemdomain &&
4638 		    cache->uc_crossbucket.ucb_bucket == NULL) {
4639 			cache_bucket_load_cross(cache, bucket);
4640 			return (true);
4641 		}
4642 	}
4643 #endif
4644 	/*
4645 	 * We may have lost the race to fill the bucket or switched CPUs.
4646 	 */
4647 	if (cache->uc_freebucket.ucb_bucket != NULL) {
4648 		critical_exit();
4649 		bucket_free(zone, bucket, udata);
4650 		critical_enter();
4651 	} else
4652 		cache_bucket_load_free(cache, bucket);
4653 
4654 	return (true);
4655 }
4656 
4657 static void
4658 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
4659 {
4660 	uma_keg_t keg;
4661 	uma_domain_t dom;
4662 	int freei;
4663 
4664 	keg = zone->uz_keg;
4665 	KEG_LOCK_ASSERT(keg, slab->us_domain);
4666 
4667 	/* Do we need to remove from any lists? */
4668 	dom = &keg->uk_domain[slab->us_domain];
4669 	if (slab->us_freecount + 1 == keg->uk_ipers) {
4670 		LIST_REMOVE(slab, us_link);
4671 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
4672 		dom->ud_free_slabs++;
4673 	} else if (slab->us_freecount == 0) {
4674 		LIST_REMOVE(slab, us_link);
4675 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
4676 	}
4677 
4678 	/* Slab management. */
4679 	freei = slab_item_index(slab, keg, item);
4680 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
4681 	slab->us_freecount++;
4682 
4683 	/* Keg statistics. */
4684 	dom->ud_free_items++;
4685 }
4686 
4687 static void
4688 zone_release(void *arg, void **bucket, int cnt)
4689 {
4690 	struct mtx *lock;
4691 	uma_zone_t zone;
4692 	uma_slab_t slab;
4693 	uma_keg_t keg;
4694 	uint8_t *mem;
4695 	void *item;
4696 	int i;
4697 
4698 	zone = arg;
4699 	keg = zone->uz_keg;
4700 	lock = NULL;
4701 	if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
4702 		lock = KEG_LOCK(keg, 0);
4703 	for (i = 0; i < cnt; i++) {
4704 		item = bucket[i];
4705 		if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
4706 			slab = vtoslab((vm_offset_t)item);
4707 		} else {
4708 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4709 			if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
4710 				slab = hash_sfind(&keg->uk_hash, mem);
4711 			else
4712 				slab = (uma_slab_t)(mem + keg->uk_pgoff);
4713 		}
4714 		if (lock != KEG_LOCKPTR(keg, slab->us_domain)) {
4715 			if (lock != NULL)
4716 				mtx_unlock(lock);
4717 			lock = KEG_LOCK(keg, slab->us_domain);
4718 		}
4719 		slab_free_item(zone, slab, item);
4720 	}
4721 	if (lock != NULL)
4722 		mtx_unlock(lock);
4723 }
4724 
4725 /*
4726  * Frees a single item to any zone.
4727  *
4728  * Arguments:
4729  *	zone   The zone to free to
4730  *	item   The item we're freeing
4731  *	udata  User supplied data for the dtor
4732  *	skip   Skip dtors and finis
4733  */
4734 static __noinline void
4735 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
4736 {
4737 
4738 	/*
4739 	 * If a free is sent directly to an SMR zone we have to
4740 	 * synchronize immediately because the item can instantly
4741 	 * be reallocated. This should only happen in degenerate
4742 	 * cases when no memory is available for per-cpu caches.
4743 	 */
4744 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE)
4745 		smr_synchronize(zone->uz_smr);
4746 
4747 	item_dtor(zone, item, zone->uz_size, udata, skip);
4748 
4749 	if (skip < SKIP_FINI && zone->uz_fini) {
4750 		kasan_mark_item_valid(zone, item);
4751 		zone->uz_fini(item, zone->uz_size);
4752 		kasan_mark_item_invalid(zone, item);
4753 	}
4754 
4755 	zone->uz_release(zone->uz_arg, &item, 1);
4756 
4757 	if (skip & SKIP_CNT)
4758 		return;
4759 
4760 	counter_u64_add(zone->uz_frees, 1);
4761 
4762 	if (zone->uz_max_items > 0)
4763 		zone_free_limit(zone, 1);
4764 }
4765 
4766 /* See uma.h */
4767 int
4768 uma_zone_set_max(uma_zone_t zone, int nitems)
4769 {
4770 
4771 	/*
4772 	 * If the limit is small, we may need to constrain the maximum per-CPU
4773 	 * cache size, or disable caching entirely.
4774 	 */
4775 	uma_zone_set_maxcache(zone, nitems);
4776 
4777 	/*
4778 	 * XXX This can misbehave if the zone has any allocations with
4779 	 * no limit and a limit is imposed.  There is currently no
4780 	 * way to clear a limit.
4781 	 */
4782 	ZONE_LOCK(zone);
4783 	zone->uz_max_items = nitems;
4784 	zone->uz_flags |= UMA_ZFLAG_LIMIT;
4785 	zone_update_caches(zone);
4786 	/* We may need to wake waiters. */
4787 	wakeup(&zone->uz_max_items);
4788 	ZONE_UNLOCK(zone);
4789 
4790 	return (nitems);
4791 }
4792 
4793 /* See uma.h */
4794 void
4795 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
4796 {
4797 	int bpcpu, bpdom, bsize, nb;
4798 
4799 	ZONE_LOCK(zone);
4800 
4801 	/*
4802 	 * Compute a lower bound on the number of items that may be cached in
4803 	 * the zone.  Each CPU gets at least two buckets, and for cross-domain
4804 	 * frees we use an additional bucket per CPU and per domain.  Select the
4805 	 * largest bucket size that does not exceed half of the requested limit,
4806 	 * with the left over space given to the full bucket cache.
4807 	 */
4808 	bpdom = 0;
4809 	bpcpu = 2;
4810 #ifdef NUMA
4811 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 && vm_ndomains > 1) {
4812 		bpcpu++;
4813 		bpdom++;
4814 	}
4815 #endif
4816 	nb = bpcpu * mp_ncpus + bpdom * vm_ndomains;
4817 	bsize = nitems / nb / 2;
4818 	if (bsize > BUCKET_MAX)
4819 		bsize = BUCKET_MAX;
4820 	else if (bsize == 0 && nitems / nb > 0)
4821 		bsize = 1;
4822 	zone->uz_bucket_size_max = zone->uz_bucket_size = bsize;
4823 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4824 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4825 	zone->uz_bucket_max = nitems - nb * bsize;
4826 	ZONE_UNLOCK(zone);
4827 }
4828 
4829 /* See uma.h */
4830 int
4831 uma_zone_get_max(uma_zone_t zone)
4832 {
4833 	int nitems;
4834 
4835 	nitems = atomic_load_64(&zone->uz_max_items);
4836 
4837 	return (nitems);
4838 }
4839 
4840 /* See uma.h */
4841 void
4842 uma_zone_set_warning(uma_zone_t zone, const char *warning)
4843 {
4844 
4845 	ZONE_ASSERT_COLD(zone);
4846 	zone->uz_warning = warning;
4847 }
4848 
4849 /* See uma.h */
4850 void
4851 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
4852 {
4853 
4854 	ZONE_ASSERT_COLD(zone);
4855 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
4856 }
4857 
4858 /* See uma.h */
4859 int
4860 uma_zone_get_cur(uma_zone_t zone)
4861 {
4862 	int64_t nitems;
4863 	u_int i;
4864 
4865 	nitems = 0;
4866 	if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER)
4867 		nitems = counter_u64_fetch(zone->uz_allocs) -
4868 		    counter_u64_fetch(zone->uz_frees);
4869 	CPU_FOREACH(i)
4870 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) -
4871 		    atomic_load_64(&zone->uz_cpu[i].uc_frees);
4872 
4873 	return (nitems < 0 ? 0 : nitems);
4874 }
4875 
4876 static uint64_t
4877 uma_zone_get_allocs(uma_zone_t zone)
4878 {
4879 	uint64_t nitems;
4880 	u_int i;
4881 
4882 	nitems = 0;
4883 	if (zone->uz_allocs != EARLY_COUNTER)
4884 		nitems = counter_u64_fetch(zone->uz_allocs);
4885 	CPU_FOREACH(i)
4886 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs);
4887 
4888 	return (nitems);
4889 }
4890 
4891 static uint64_t
4892 uma_zone_get_frees(uma_zone_t zone)
4893 {
4894 	uint64_t nitems;
4895 	u_int i;
4896 
4897 	nitems = 0;
4898 	if (zone->uz_frees != EARLY_COUNTER)
4899 		nitems = counter_u64_fetch(zone->uz_frees);
4900 	CPU_FOREACH(i)
4901 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees);
4902 
4903 	return (nitems);
4904 }
4905 
4906 #ifdef INVARIANTS
4907 /* Used only for KEG_ASSERT_COLD(). */
4908 static uint64_t
4909 uma_keg_get_allocs(uma_keg_t keg)
4910 {
4911 	uma_zone_t z;
4912 	uint64_t nitems;
4913 
4914 	nitems = 0;
4915 	LIST_FOREACH(z, &keg->uk_zones, uz_link)
4916 		nitems += uma_zone_get_allocs(z);
4917 
4918 	return (nitems);
4919 }
4920 #endif
4921 
4922 /* See uma.h */
4923 void
4924 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
4925 {
4926 	uma_keg_t keg;
4927 
4928 	KEG_GET(zone, keg);
4929 	KEG_ASSERT_COLD(keg);
4930 	keg->uk_init = uminit;
4931 }
4932 
4933 /* See uma.h */
4934 void
4935 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
4936 {
4937 	uma_keg_t keg;
4938 
4939 	KEG_GET(zone, keg);
4940 	KEG_ASSERT_COLD(keg);
4941 	keg->uk_fini = fini;
4942 }
4943 
4944 /* See uma.h */
4945 void
4946 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
4947 {
4948 
4949 	ZONE_ASSERT_COLD(zone);
4950 	zone->uz_init = zinit;
4951 }
4952 
4953 /* See uma.h */
4954 void
4955 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
4956 {
4957 
4958 	ZONE_ASSERT_COLD(zone);
4959 	zone->uz_fini = zfini;
4960 }
4961 
4962 /* See uma.h */
4963 void
4964 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
4965 {
4966 	uma_keg_t keg;
4967 
4968 	KEG_GET(zone, keg);
4969 	KEG_ASSERT_COLD(keg);
4970 	keg->uk_freef = freef;
4971 }
4972 
4973 /* See uma.h */
4974 void
4975 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
4976 {
4977 	uma_keg_t keg;
4978 
4979 	KEG_GET(zone, keg);
4980 	KEG_ASSERT_COLD(keg);
4981 	keg->uk_allocf = allocf;
4982 }
4983 
4984 /* See uma.h */
4985 void
4986 uma_zone_set_smr(uma_zone_t zone, smr_t smr)
4987 {
4988 
4989 	ZONE_ASSERT_COLD(zone);
4990 
4991 	KASSERT(smr != NULL, ("Got NULL smr"));
4992 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
4993 	    ("zone %p (%s) already uses SMR", zone, zone->uz_name));
4994 	zone->uz_flags |= UMA_ZONE_SMR;
4995 	zone->uz_smr = smr;
4996 	zone_update_caches(zone);
4997 }
4998 
4999 smr_t
5000 uma_zone_get_smr(uma_zone_t zone)
5001 {
5002 
5003 	return (zone->uz_smr);
5004 }
5005 
5006 /* See uma.h */
5007 void
5008 uma_zone_reserve(uma_zone_t zone, int items)
5009 {
5010 	uma_keg_t keg;
5011 
5012 	KEG_GET(zone, keg);
5013 	KEG_ASSERT_COLD(keg);
5014 	keg->uk_reserve = items;
5015 }
5016 
5017 /* See uma.h */
5018 int
5019 uma_zone_reserve_kva(uma_zone_t zone, int count)
5020 {
5021 	uma_keg_t keg;
5022 	vm_offset_t kva;
5023 	u_int pages;
5024 
5025 	KEG_GET(zone, keg);
5026 	KEG_ASSERT_COLD(keg);
5027 	ZONE_ASSERT_COLD(zone);
5028 
5029 	pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
5030 
5031 #ifdef UMA_MD_SMALL_ALLOC
5032 	if (keg->uk_ppera > 1) {
5033 #else
5034 	if (1) {
5035 #endif
5036 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
5037 		if (kva == 0)
5038 			return (0);
5039 	} else
5040 		kva = 0;
5041 
5042 	MPASS(keg->uk_kva == 0);
5043 	keg->uk_kva = kva;
5044 	keg->uk_offset = 0;
5045 	zone->uz_max_items = pages * keg->uk_ipers;
5046 #ifdef UMA_MD_SMALL_ALLOC
5047 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
5048 #else
5049 	keg->uk_allocf = noobj_alloc;
5050 #endif
5051 	keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5052 	zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
5053 	zone_update_caches(zone);
5054 
5055 	return (1);
5056 }
5057 
5058 /* See uma.h */
5059 void
5060 uma_prealloc(uma_zone_t zone, int items)
5061 {
5062 	struct vm_domainset_iter di;
5063 	uma_domain_t dom;
5064 	uma_slab_t slab;
5065 	uma_keg_t keg;
5066 	int aflags, domain, slabs;
5067 
5068 	KEG_GET(zone, keg);
5069 	slabs = howmany(items, keg->uk_ipers);
5070 	while (slabs-- > 0) {
5071 		aflags = M_NOWAIT;
5072 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
5073 		    &aflags);
5074 		for (;;) {
5075 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
5076 			    aflags);
5077 			if (slab != NULL) {
5078 				dom = &keg->uk_domain[slab->us_domain];
5079 				/*
5080 				 * keg_alloc_slab() always returns a slab on the
5081 				 * partial list.
5082 				 */
5083 				LIST_REMOVE(slab, us_link);
5084 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
5085 				    us_link);
5086 				dom->ud_free_slabs++;
5087 				KEG_UNLOCK(keg, slab->us_domain);
5088 				break;
5089 			}
5090 			if (vm_domainset_iter_policy(&di, &domain) != 0)
5091 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask, 0);
5092 		}
5093 	}
5094 }
5095 
5096 /*
5097  * Returns a snapshot of memory consumption in bytes.
5098  */
5099 size_t
5100 uma_zone_memory(uma_zone_t zone)
5101 {
5102 	size_t sz;
5103 	int i;
5104 
5105 	sz = 0;
5106 	if (zone->uz_flags & UMA_ZFLAG_CACHE) {
5107 		for (i = 0; i < vm_ndomains; i++)
5108 			sz += ZDOM_GET(zone, i)->uzd_nitems;
5109 		return (sz * zone->uz_size);
5110 	}
5111 	for (i = 0; i < vm_ndomains; i++)
5112 		sz += zone->uz_keg->uk_domain[i].ud_pages;
5113 
5114 	return (sz * PAGE_SIZE);
5115 }
5116 
5117 /* See uma.h */
5118 void
5119 uma_reclaim(int req)
5120 {
5121 	uma_reclaim_domain(req, UMA_ANYDOMAIN);
5122 }
5123 
5124 void
5125 uma_reclaim_domain(int req, int domain)
5126 {
5127 	void *arg;
5128 
5129 	bucket_enable();
5130 
5131 	arg = (void *)(uintptr_t)domain;
5132 	sx_slock(&uma_reclaim_lock);
5133 	switch (req) {
5134 	case UMA_RECLAIM_TRIM:
5135 		zone_foreach(zone_trim, arg);
5136 		break;
5137 	case UMA_RECLAIM_DRAIN:
5138 		zone_foreach(zone_drain, arg);
5139 		break;
5140 	case UMA_RECLAIM_DRAIN_CPU:
5141 		zone_foreach(zone_drain, arg);
5142 		pcpu_cache_drain_safe(NULL);
5143 		zone_foreach(zone_drain, arg);
5144 		break;
5145 	default:
5146 		panic("unhandled reclamation request %d", req);
5147 	}
5148 
5149 	/*
5150 	 * Some slabs may have been freed but this zone will be visited early
5151 	 * we visit again so that we can free pages that are empty once other
5152 	 * zones are drained.  We have to do the same for buckets.
5153 	 */
5154 	zone_drain(slabzones[0], arg);
5155 	zone_drain(slabzones[1], arg);
5156 	bucket_zone_drain(domain);
5157 	sx_sunlock(&uma_reclaim_lock);
5158 }
5159 
5160 static volatile int uma_reclaim_needed;
5161 
5162 void
5163 uma_reclaim_wakeup(void)
5164 {
5165 
5166 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
5167 		wakeup(uma_reclaim);
5168 }
5169 
5170 void
5171 uma_reclaim_worker(void *arg __unused)
5172 {
5173 
5174 	for (;;) {
5175 		sx_xlock(&uma_reclaim_lock);
5176 		while (atomic_load_int(&uma_reclaim_needed) == 0)
5177 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
5178 			    hz);
5179 		sx_xunlock(&uma_reclaim_lock);
5180 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
5181 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
5182 		atomic_store_int(&uma_reclaim_needed, 0);
5183 		/* Don't fire more than once per-second. */
5184 		pause("umarclslp", hz);
5185 	}
5186 }
5187 
5188 /* See uma.h */
5189 void
5190 uma_zone_reclaim(uma_zone_t zone, int req)
5191 {
5192 	uma_zone_reclaim_domain(zone, req, UMA_ANYDOMAIN);
5193 }
5194 
5195 void
5196 uma_zone_reclaim_domain(uma_zone_t zone, int req, int domain)
5197 {
5198 	void *arg;
5199 
5200 	arg = (void *)(uintptr_t)domain;
5201 	switch (req) {
5202 	case UMA_RECLAIM_TRIM:
5203 		zone_trim(zone, arg);
5204 		break;
5205 	case UMA_RECLAIM_DRAIN:
5206 		zone_drain(zone, arg);
5207 		break;
5208 	case UMA_RECLAIM_DRAIN_CPU:
5209 		pcpu_cache_drain_safe(zone);
5210 		zone_drain(zone, arg);
5211 		break;
5212 	default:
5213 		panic("unhandled reclamation request %d", req);
5214 	}
5215 }
5216 
5217 /* See uma.h */
5218 int
5219 uma_zone_exhausted(uma_zone_t zone)
5220 {
5221 
5222 	return (atomic_load_32(&zone->uz_sleepers) > 0);
5223 }
5224 
5225 unsigned long
5226 uma_limit(void)
5227 {
5228 
5229 	return (uma_kmem_limit);
5230 }
5231 
5232 void
5233 uma_set_limit(unsigned long limit)
5234 {
5235 
5236 	uma_kmem_limit = limit;
5237 }
5238 
5239 unsigned long
5240 uma_size(void)
5241 {
5242 
5243 	return (atomic_load_long(&uma_kmem_total));
5244 }
5245 
5246 long
5247 uma_avail(void)
5248 {
5249 
5250 	return (uma_kmem_limit - uma_size());
5251 }
5252 
5253 #ifdef DDB
5254 /*
5255  * Generate statistics across both the zone and its per-cpu cache's.  Return
5256  * desired statistics if the pointer is non-NULL for that statistic.
5257  *
5258  * Note: does not update the zone statistics, as it can't safely clear the
5259  * per-CPU cache statistic.
5260  *
5261  */
5262 static void
5263 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
5264     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
5265 {
5266 	uma_cache_t cache;
5267 	uint64_t allocs, frees, sleeps, xdomain;
5268 	int cachefree, cpu;
5269 
5270 	allocs = frees = sleeps = xdomain = 0;
5271 	cachefree = 0;
5272 	CPU_FOREACH(cpu) {
5273 		cache = &z->uz_cpu[cpu];
5274 		cachefree += cache->uc_allocbucket.ucb_cnt;
5275 		cachefree += cache->uc_freebucket.ucb_cnt;
5276 		xdomain += cache->uc_crossbucket.ucb_cnt;
5277 		cachefree += cache->uc_crossbucket.ucb_cnt;
5278 		allocs += cache->uc_allocs;
5279 		frees += cache->uc_frees;
5280 	}
5281 	allocs += counter_u64_fetch(z->uz_allocs);
5282 	frees += counter_u64_fetch(z->uz_frees);
5283 	xdomain += counter_u64_fetch(z->uz_xdomain);
5284 	sleeps += z->uz_sleeps;
5285 	if (cachefreep != NULL)
5286 		*cachefreep = cachefree;
5287 	if (allocsp != NULL)
5288 		*allocsp = allocs;
5289 	if (freesp != NULL)
5290 		*freesp = frees;
5291 	if (sleepsp != NULL)
5292 		*sleepsp = sleeps;
5293 	if (xdomainp != NULL)
5294 		*xdomainp = xdomain;
5295 }
5296 #endif /* DDB */
5297 
5298 static int
5299 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
5300 {
5301 	uma_keg_t kz;
5302 	uma_zone_t z;
5303 	int count;
5304 
5305 	count = 0;
5306 	rw_rlock(&uma_rwlock);
5307 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5308 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5309 			count++;
5310 	}
5311 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5312 		count++;
5313 
5314 	rw_runlock(&uma_rwlock);
5315 	return (sysctl_handle_int(oidp, &count, 0, req));
5316 }
5317 
5318 static void
5319 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
5320     struct uma_percpu_stat *ups, bool internal)
5321 {
5322 	uma_zone_domain_t zdom;
5323 	uma_cache_t cache;
5324 	int i;
5325 
5326 	for (i = 0; i < vm_ndomains; i++) {
5327 		zdom = ZDOM_GET(z, i);
5328 		uth->uth_zone_free += zdom->uzd_nitems;
5329 	}
5330 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
5331 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
5332 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
5333 	uth->uth_xdomain = counter_u64_fetch(z->uz_xdomain);
5334 	uth->uth_sleeps = z->uz_sleeps;
5335 
5336 	for (i = 0; i < mp_maxid + 1; i++) {
5337 		bzero(&ups[i], sizeof(*ups));
5338 		if (internal || CPU_ABSENT(i))
5339 			continue;
5340 		cache = &z->uz_cpu[i];
5341 		ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt;
5342 		ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt;
5343 		ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt;
5344 		ups[i].ups_allocs = cache->uc_allocs;
5345 		ups[i].ups_frees = cache->uc_frees;
5346 	}
5347 }
5348 
5349 static int
5350 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
5351 {
5352 	struct uma_stream_header ush;
5353 	struct uma_type_header uth;
5354 	struct uma_percpu_stat *ups;
5355 	struct sbuf sbuf;
5356 	uma_keg_t kz;
5357 	uma_zone_t z;
5358 	uint64_t items;
5359 	uint32_t kfree, pages;
5360 	int count, error, i;
5361 
5362 	error = sysctl_wire_old_buffer(req, 0);
5363 	if (error != 0)
5364 		return (error);
5365 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
5366 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
5367 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
5368 
5369 	count = 0;
5370 	rw_rlock(&uma_rwlock);
5371 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5372 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
5373 			count++;
5374 	}
5375 
5376 	LIST_FOREACH(z, &uma_cachezones, uz_link)
5377 		count++;
5378 
5379 	/*
5380 	 * Insert stream header.
5381 	 */
5382 	bzero(&ush, sizeof(ush));
5383 	ush.ush_version = UMA_STREAM_VERSION;
5384 	ush.ush_maxcpus = (mp_maxid + 1);
5385 	ush.ush_count = count;
5386 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
5387 
5388 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
5389 		kfree = pages = 0;
5390 		for (i = 0; i < vm_ndomains; i++) {
5391 			kfree += kz->uk_domain[i].ud_free_items;
5392 			pages += kz->uk_domain[i].ud_pages;
5393 		}
5394 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5395 			bzero(&uth, sizeof(uth));
5396 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5397 			uth.uth_align = kz->uk_align;
5398 			uth.uth_size = kz->uk_size;
5399 			uth.uth_rsize = kz->uk_rsize;
5400 			if (z->uz_max_items > 0) {
5401 				items = UZ_ITEMS_COUNT(z->uz_items);
5402 				uth.uth_pages = (items / kz->uk_ipers) *
5403 					kz->uk_ppera;
5404 			} else
5405 				uth.uth_pages = pages;
5406 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
5407 			    kz->uk_ppera;
5408 			uth.uth_limit = z->uz_max_items;
5409 			uth.uth_keg_free = kfree;
5410 
5411 			/*
5412 			 * A zone is secondary is it is not the first entry
5413 			 * on the keg's zone list.
5414 			 */
5415 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
5416 			    (LIST_FIRST(&kz->uk_zones) != z))
5417 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
5418 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
5419 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
5420 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5421 			for (i = 0; i < mp_maxid + 1; i++)
5422 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5423 		}
5424 	}
5425 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5426 		bzero(&uth, sizeof(uth));
5427 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
5428 		uth.uth_size = z->uz_size;
5429 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
5430 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
5431 		for (i = 0; i < mp_maxid + 1; i++)
5432 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
5433 	}
5434 
5435 	rw_runlock(&uma_rwlock);
5436 	error = sbuf_finish(&sbuf);
5437 	sbuf_delete(&sbuf);
5438 	free(ups, M_TEMP);
5439 	return (error);
5440 }
5441 
5442 int
5443 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
5444 {
5445 	uma_zone_t zone = *(uma_zone_t *)arg1;
5446 	int error, max;
5447 
5448 	max = uma_zone_get_max(zone);
5449 	error = sysctl_handle_int(oidp, &max, 0, req);
5450 	if (error || !req->newptr)
5451 		return (error);
5452 
5453 	uma_zone_set_max(zone, max);
5454 
5455 	return (0);
5456 }
5457 
5458 int
5459 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
5460 {
5461 	uma_zone_t zone;
5462 	int cur;
5463 
5464 	/*
5465 	 * Some callers want to add sysctls for global zones that
5466 	 * may not yet exist so they pass a pointer to a pointer.
5467 	 */
5468 	if (arg2 == 0)
5469 		zone = *(uma_zone_t *)arg1;
5470 	else
5471 		zone = arg1;
5472 	cur = uma_zone_get_cur(zone);
5473 	return (sysctl_handle_int(oidp, &cur, 0, req));
5474 }
5475 
5476 static int
5477 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
5478 {
5479 	uma_zone_t zone = arg1;
5480 	uint64_t cur;
5481 
5482 	cur = uma_zone_get_allocs(zone);
5483 	return (sysctl_handle_64(oidp, &cur, 0, req));
5484 }
5485 
5486 static int
5487 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
5488 {
5489 	uma_zone_t zone = arg1;
5490 	uint64_t cur;
5491 
5492 	cur = uma_zone_get_frees(zone);
5493 	return (sysctl_handle_64(oidp, &cur, 0, req));
5494 }
5495 
5496 static int
5497 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
5498 {
5499 	struct sbuf sbuf;
5500 	uma_zone_t zone = arg1;
5501 	int error;
5502 
5503 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
5504 	if (zone->uz_flags != 0)
5505 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
5506 	else
5507 		sbuf_printf(&sbuf, "0");
5508 	error = sbuf_finish(&sbuf);
5509 	sbuf_delete(&sbuf);
5510 
5511 	return (error);
5512 }
5513 
5514 static int
5515 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
5516 {
5517 	uma_keg_t keg = arg1;
5518 	int avail, effpct, total;
5519 
5520 	total = keg->uk_ppera * PAGE_SIZE;
5521 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
5522 		total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize;
5523 	/*
5524 	 * We consider the client's requested size and alignment here, not the
5525 	 * real size determination uk_rsize, because we also adjust the real
5526 	 * size for internal implementation reasons (max bitset size).
5527 	 */
5528 	avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1);
5529 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
5530 		avail *= mp_maxid + 1;
5531 	effpct = 100 * avail / total;
5532 	return (sysctl_handle_int(oidp, &effpct, 0, req));
5533 }
5534 
5535 static int
5536 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS)
5537 {
5538 	uma_zone_t zone = arg1;
5539 	uint64_t cur;
5540 
5541 	cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items));
5542 	return (sysctl_handle_64(oidp, &cur, 0, req));
5543 }
5544 
5545 #ifdef INVARIANTS
5546 static uma_slab_t
5547 uma_dbg_getslab(uma_zone_t zone, void *item)
5548 {
5549 	uma_slab_t slab;
5550 	uma_keg_t keg;
5551 	uint8_t *mem;
5552 
5553 	/*
5554 	 * It is safe to return the slab here even though the
5555 	 * zone is unlocked because the item's allocation state
5556 	 * essentially holds a reference.
5557 	 */
5558 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
5559 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5560 		return (NULL);
5561 	if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
5562 		return (vtoslab((vm_offset_t)mem));
5563 	keg = zone->uz_keg;
5564 	if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
5565 		return ((uma_slab_t)(mem + keg->uk_pgoff));
5566 	KEG_LOCK(keg, 0);
5567 	slab = hash_sfind(&keg->uk_hash, mem);
5568 	KEG_UNLOCK(keg, 0);
5569 
5570 	return (slab);
5571 }
5572 
5573 static bool
5574 uma_dbg_zskip(uma_zone_t zone, void *mem)
5575 {
5576 
5577 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5578 		return (true);
5579 
5580 	return (uma_dbg_kskip(zone->uz_keg, mem));
5581 }
5582 
5583 static bool
5584 uma_dbg_kskip(uma_keg_t keg, void *mem)
5585 {
5586 	uintptr_t idx;
5587 
5588 	if (dbg_divisor == 0)
5589 		return (true);
5590 
5591 	if (dbg_divisor == 1)
5592 		return (false);
5593 
5594 	idx = (uintptr_t)mem >> PAGE_SHIFT;
5595 	if (keg->uk_ipers > 1) {
5596 		idx *= keg->uk_ipers;
5597 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
5598 	}
5599 
5600 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
5601 		counter_u64_add(uma_skip_cnt, 1);
5602 		return (true);
5603 	}
5604 	counter_u64_add(uma_dbg_cnt, 1);
5605 
5606 	return (false);
5607 }
5608 
5609 /*
5610  * Set up the slab's freei data such that uma_dbg_free can function.
5611  *
5612  */
5613 static void
5614 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
5615 {
5616 	uma_keg_t keg;
5617 	int freei;
5618 
5619 	if (slab == NULL) {
5620 		slab = uma_dbg_getslab(zone, item);
5621 		if (slab == NULL)
5622 			panic("uma: item %p did not belong to zone %s",
5623 			    item, zone->uz_name);
5624 	}
5625 	keg = zone->uz_keg;
5626 	freei = slab_item_index(slab, keg, item);
5627 
5628 	if (BIT_TEST_SET_ATOMIC(keg->uk_ipers, freei,
5629 	    slab_dbg_bits(slab, keg)))
5630 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)",
5631 		    item, zone, zone->uz_name, slab, freei);
5632 }
5633 
5634 /*
5635  * Verifies freed addresses.  Checks for alignment, valid slab membership
5636  * and duplicate frees.
5637  *
5638  */
5639 static void
5640 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
5641 {
5642 	uma_keg_t keg;
5643 	int freei;
5644 
5645 	if (slab == NULL) {
5646 		slab = uma_dbg_getslab(zone, item);
5647 		if (slab == NULL)
5648 			panic("uma: Freed item %p did not belong to zone %s",
5649 			    item, zone->uz_name);
5650 	}
5651 	keg = zone->uz_keg;
5652 	freei = slab_item_index(slab, keg, item);
5653 
5654 	if (freei >= keg->uk_ipers)
5655 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)",
5656 		    item, zone, zone->uz_name, slab, freei);
5657 
5658 	if (slab_item(slab, keg, freei) != item)
5659 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)",
5660 		    item, zone, zone->uz_name, slab, freei);
5661 
5662 	if (!BIT_TEST_CLR_ATOMIC(keg->uk_ipers, freei,
5663 	    slab_dbg_bits(slab, keg)))
5664 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)",
5665 		    item, zone, zone->uz_name, slab, freei);
5666 }
5667 #endif /* INVARIANTS */
5668 
5669 #ifdef DDB
5670 static int64_t
5671 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
5672     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
5673 {
5674 	uint64_t frees;
5675 	int i;
5676 
5677 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
5678 		*allocs = counter_u64_fetch(z->uz_allocs);
5679 		frees = counter_u64_fetch(z->uz_frees);
5680 		*sleeps = z->uz_sleeps;
5681 		*cachefree = 0;
5682 		*xdomain = 0;
5683 	} else
5684 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
5685 		    xdomain);
5686 	for (i = 0; i < vm_ndomains; i++) {
5687 		*cachefree += ZDOM_GET(z, i)->uzd_nitems;
5688 		if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
5689 		    (LIST_FIRST(&kz->uk_zones) != z)))
5690 			*cachefree += kz->uk_domain[i].ud_free_items;
5691 	}
5692 	*used = *allocs - frees;
5693 	return (((int64_t)*used + *cachefree) * kz->uk_size);
5694 }
5695 
5696 DB_SHOW_COMMAND(uma, db_show_uma)
5697 {
5698 	const char *fmt_hdr, *fmt_entry;
5699 	uma_keg_t kz;
5700 	uma_zone_t z;
5701 	uint64_t allocs, used, sleeps, xdomain;
5702 	long cachefree;
5703 	/* variables for sorting */
5704 	uma_keg_t cur_keg;
5705 	uma_zone_t cur_zone, last_zone;
5706 	int64_t cur_size, last_size, size;
5707 	int ties;
5708 
5709 	/* /i option produces machine-parseable CSV output */
5710 	if (modif[0] == 'i') {
5711 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
5712 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
5713 	} else {
5714 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
5715 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
5716 	}
5717 
5718 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
5719 	    "Sleeps", "Bucket", "Total Mem", "XFree");
5720 
5721 	/* Sort the zones with largest size first. */
5722 	last_zone = NULL;
5723 	last_size = INT64_MAX;
5724 	for (;;) {
5725 		cur_zone = NULL;
5726 		cur_size = -1;
5727 		ties = 0;
5728 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
5729 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5730 				/*
5731 				 * In the case of size ties, print out zones
5732 				 * in the order they are encountered.  That is,
5733 				 * when we encounter the most recently output
5734 				 * zone, we have already printed all preceding
5735 				 * ties, and we must print all following ties.
5736 				 */
5737 				if (z == last_zone) {
5738 					ties = 1;
5739 					continue;
5740 				}
5741 				size = get_uma_stats(kz, z, &allocs, &used,
5742 				    &sleeps, &cachefree, &xdomain);
5743 				if (size > cur_size && size < last_size + ties)
5744 				{
5745 					cur_size = size;
5746 					cur_zone = z;
5747 					cur_keg = kz;
5748 				}
5749 			}
5750 		}
5751 		if (cur_zone == NULL)
5752 			break;
5753 
5754 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
5755 		    &sleeps, &cachefree, &xdomain);
5756 		db_printf(fmt_entry, cur_zone->uz_name,
5757 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
5758 		    (uintmax_t)allocs, (uintmax_t)sleeps,
5759 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
5760 		    xdomain);
5761 
5762 		if (db_pager_quit)
5763 			return;
5764 		last_zone = cur_zone;
5765 		last_size = cur_size;
5766 	}
5767 }
5768 
5769 DB_SHOW_COMMAND(umacache, db_show_umacache)
5770 {
5771 	uma_zone_t z;
5772 	uint64_t allocs, frees;
5773 	long cachefree;
5774 	int i;
5775 
5776 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
5777 	    "Requests", "Bucket");
5778 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5779 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
5780 		for (i = 0; i < vm_ndomains; i++)
5781 			cachefree += ZDOM_GET(z, i)->uzd_nitems;
5782 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
5783 		    z->uz_name, (uintmax_t)z->uz_size,
5784 		    (intmax_t)(allocs - frees), cachefree,
5785 		    (uintmax_t)allocs, z->uz_bucket_size);
5786 		if (db_pager_quit)
5787 			return;
5788 	}
5789 }
5790 #endif	/* DDB */
5791