xref: /freebsd/sys/vm/uma_core.c (revision 43e8403953cb97ffa6306dd13d2e92fdec91e47c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/smp.h>
80 #include <sys/smr.h>
81 #include <sys/taskqueue.h>
82 #include <sys/vmmeter.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_domainset.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_param.h>
90 #include <vm/vm_phys.h>
91 #include <vm/vm_pagequeue.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/uma.h>
96 #include <vm/uma_int.h>
97 #include <vm/uma_dbg.h>
98 
99 #include <ddb/ddb.h>
100 
101 #ifdef DEBUG_MEMGUARD
102 #include <vm/memguard.h>
103 #endif
104 
105 #include <machine/md_var.h>
106 
107 #ifdef INVARIANTS
108 #define	UMA_ALWAYS_CTORDTOR	1
109 #else
110 #define	UMA_ALWAYS_CTORDTOR	0
111 #endif
112 
113 /*
114  * This is the zone and keg from which all zones are spawned.
115  */
116 static uma_zone_t kegs;
117 static uma_zone_t zones;
118 
119 /*
120  * These are the two zones from which all offpage uma_slab_ts are allocated.
121  *
122  * One zone is for slab headers that can represent a larger number of items,
123  * making the slabs themselves more efficient, and the other zone is for
124  * headers that are smaller and represent fewer items, making the headers more
125  * efficient.
126  */
127 #define	SLABZONE_SIZE(setsize)					\
128     (sizeof(struct uma_hash_slab) + BITSET_SIZE(setsize) * SLAB_BITSETS)
129 #define	SLABZONE0_SETSIZE	(PAGE_SIZE / 16)
130 #define	SLABZONE1_SETSIZE	SLAB_MAX_SETSIZE
131 #define	SLABZONE0_SIZE	SLABZONE_SIZE(SLABZONE0_SETSIZE)
132 #define	SLABZONE1_SIZE	SLABZONE_SIZE(SLABZONE1_SETSIZE)
133 static uma_zone_t slabzones[2];
134 
135 /*
136  * The initial hash tables come out of this zone so they can be allocated
137  * prior to malloc coming up.
138  */
139 static uma_zone_t hashzone;
140 
141 /* The boot-time adjusted value for cache line alignment. */
142 int uma_align_cache = 64 - 1;
143 
144 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
145 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
146 
147 /*
148  * Are we allowed to allocate buckets?
149  */
150 static int bucketdisable = 1;
151 
152 /* Linked list of all kegs in the system */
153 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
154 
155 /* Linked list of all cache-only zones in the system */
156 static LIST_HEAD(,uma_zone) uma_cachezones =
157     LIST_HEAD_INITIALIZER(uma_cachezones);
158 
159 /* This RW lock protects the keg list */
160 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
161 
162 /*
163  * First available virual address for boot time allocations.
164  */
165 static vm_offset_t bootstart;
166 static vm_offset_t bootmem;
167 
168 static struct sx uma_reclaim_lock;
169 
170 /*
171  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
172  * allocations don't trigger a wakeup of the reclaim thread.
173  */
174 unsigned long uma_kmem_limit = LONG_MAX;
175 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
176     "UMA kernel memory soft limit");
177 unsigned long uma_kmem_total;
178 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
179     "UMA kernel memory usage");
180 
181 /* Is the VM done starting up? */
182 static enum {
183 	BOOT_COLD,
184 	BOOT_KVA,
185 	BOOT_RUNNING,
186 	BOOT_SHUTDOWN,
187 } booted = BOOT_COLD;
188 
189 /*
190  * This is the handle used to schedule events that need to happen
191  * outside of the allocation fast path.
192  */
193 static struct callout uma_callout;
194 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
195 
196 /*
197  * This structure is passed as the zone ctor arg so that I don't have to create
198  * a special allocation function just for zones.
199  */
200 struct uma_zctor_args {
201 	const char *name;
202 	size_t size;
203 	uma_ctor ctor;
204 	uma_dtor dtor;
205 	uma_init uminit;
206 	uma_fini fini;
207 	uma_import import;
208 	uma_release release;
209 	void *arg;
210 	uma_keg_t keg;
211 	int align;
212 	uint32_t flags;
213 };
214 
215 struct uma_kctor_args {
216 	uma_zone_t zone;
217 	size_t size;
218 	uma_init uminit;
219 	uma_fini fini;
220 	int align;
221 	uint32_t flags;
222 };
223 
224 struct uma_bucket_zone {
225 	uma_zone_t	ubz_zone;
226 	char		*ubz_name;
227 	int		ubz_entries;	/* Number of items it can hold. */
228 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
229 };
230 
231 /*
232  * Compute the actual number of bucket entries to pack them in power
233  * of two sizes for more efficient space utilization.
234  */
235 #define	BUCKET_SIZE(n)						\
236     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
237 
238 #define	BUCKET_MAX	BUCKET_SIZE(256)
239 #define	BUCKET_MIN	BUCKET_SIZE(4)
240 
241 struct uma_bucket_zone bucket_zones[] = {
242 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
243 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
244 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
245 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
246 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
247 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
248 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
249 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
250 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
251 	{ NULL, NULL, 0}
252 };
253 
254 /*
255  * Flags and enumerations to be passed to internal functions.
256  */
257 enum zfreeskip {
258 	SKIP_NONE =	0,
259 	SKIP_CNT =	0x00000001,
260 	SKIP_DTOR =	0x00010000,
261 	SKIP_FINI =	0x00020000,
262 };
263 
264 /* Prototypes.. */
265 
266 void	uma_startup1(vm_offset_t);
267 void	uma_startup2(void);
268 
269 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
270 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
271 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
272 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
273 static void page_free(void *, vm_size_t, uint8_t);
274 static void pcpu_page_free(void *, vm_size_t, uint8_t);
275 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
276 static void cache_drain(uma_zone_t);
277 static void bucket_drain(uma_zone_t, uma_bucket_t);
278 static void bucket_cache_reclaim(uma_zone_t zone, bool);
279 static int keg_ctor(void *, int, void *, int);
280 static void keg_dtor(void *, int, void *);
281 static int zone_ctor(void *, int, void *, int);
282 static void zone_dtor(void *, int, void *);
283 static inline void item_dtor(uma_zone_t zone, void *item, int size,
284     void *udata, enum zfreeskip skip);
285 static int zero_init(void *, int, int);
286 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
287 static void zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *), void *);
288 static void zone_timeout(uma_zone_t zone, void *);
289 static int hash_alloc(struct uma_hash *, u_int);
290 static int hash_expand(struct uma_hash *, struct uma_hash *);
291 static void hash_free(struct uma_hash *hash);
292 static void uma_timeout(void *);
293 static void uma_startup3(void);
294 static void uma_shutdown(void);
295 static void *zone_alloc_item(uma_zone_t, void *, int, int);
296 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
297 static int zone_alloc_limit(uma_zone_t zone, int count, int flags);
298 static void zone_free_limit(uma_zone_t zone, int count);
299 static void bucket_enable(void);
300 static void bucket_init(void);
301 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
302 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
303 static void bucket_zone_drain(void);
304 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
305 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
306 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
307 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
308     uma_fini fini, int align, uint32_t flags);
309 static int zone_import(void *, void **, int, int, int);
310 static void zone_release(void *, void **, int);
311 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
312 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
313 
314 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
315 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
316 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
317 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
318 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
319 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS);
320 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS);
321 
322 static uint64_t uma_zone_get_allocs(uma_zone_t zone);
323 
324 #ifdef INVARIANTS
325 static uint64_t uma_keg_get_allocs(uma_keg_t zone);
326 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
327 
328 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
329 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
330 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
331 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
332 
333 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
334     "Memory allocation debugging");
335 
336 static u_int dbg_divisor = 1;
337 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
338     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
339     "Debug & thrash every this item in memory allocator");
340 
341 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
342 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
343 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
344     &uma_dbg_cnt, "memory items debugged");
345 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
346     &uma_skip_cnt, "memory items skipped, not debugged");
347 #endif
348 
349 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
350 
351 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
352 
353 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_INT,
354     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
355 
356 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLTYPE_STRUCT,
357     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
358 
359 static int zone_warnings = 1;
360 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
361     "Warn when UMA zones becomes full");
362 
363 /*
364  * Select the slab zone for an offpage slab with the given maximum item count.
365  */
366 static inline uma_zone_t
367 slabzone(int ipers)
368 {
369 
370 	return (slabzones[ipers > SLABZONE0_SETSIZE]);
371 }
372 
373 /*
374  * This routine checks to see whether or not it's safe to enable buckets.
375  */
376 static void
377 bucket_enable(void)
378 {
379 
380 	KASSERT(booted >= BOOT_KVA, ("Bucket enable before init"));
381 	bucketdisable = vm_page_count_min();
382 }
383 
384 /*
385  * Initialize bucket_zones, the array of zones of buckets of various sizes.
386  *
387  * For each zone, calculate the memory required for each bucket, consisting
388  * of the header and an array of pointers.
389  */
390 static void
391 bucket_init(void)
392 {
393 	struct uma_bucket_zone *ubz;
394 	int size;
395 
396 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
397 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
398 		size += sizeof(void *) * ubz->ubz_entries;
399 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
400 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
401 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET |
402 		    UMA_ZONE_FIRSTTOUCH);
403 	}
404 }
405 
406 /*
407  * Given a desired number of entries for a bucket, return the zone from which
408  * to allocate the bucket.
409  */
410 static struct uma_bucket_zone *
411 bucket_zone_lookup(int entries)
412 {
413 	struct uma_bucket_zone *ubz;
414 
415 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
416 		if (ubz->ubz_entries >= entries)
417 			return (ubz);
418 	ubz--;
419 	return (ubz);
420 }
421 
422 static struct uma_bucket_zone *
423 bucket_zone_max(uma_zone_t zone, int nitems)
424 {
425 	struct uma_bucket_zone *ubz;
426 	int bpcpu;
427 
428 	bpcpu = 2;
429 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
430 		/* Count the cross-domain bucket. */
431 		bpcpu++;
432 
433 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
434 		if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
435 			break;
436 	if (ubz == &bucket_zones[0])
437 		ubz = NULL;
438 	else
439 		ubz--;
440 	return (ubz);
441 }
442 
443 static int
444 bucket_select(int size)
445 {
446 	struct uma_bucket_zone *ubz;
447 
448 	ubz = &bucket_zones[0];
449 	if (size > ubz->ubz_maxsize)
450 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
451 
452 	for (; ubz->ubz_entries != 0; ubz++)
453 		if (ubz->ubz_maxsize < size)
454 			break;
455 	ubz--;
456 	return (ubz->ubz_entries);
457 }
458 
459 static uma_bucket_t
460 bucket_alloc(uma_zone_t zone, void *udata, int flags)
461 {
462 	struct uma_bucket_zone *ubz;
463 	uma_bucket_t bucket;
464 
465 	/*
466 	 * Don't allocate buckets early in boot.
467 	 */
468 	if (__predict_false(booted < BOOT_KVA))
469 		return (NULL);
470 
471 	/*
472 	 * To limit bucket recursion we store the original zone flags
473 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
474 	 * NOVM flag to persist even through deep recursions.  We also
475 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
476 	 * a bucket for a bucket zone so we do not allow infinite bucket
477 	 * recursion.  This cookie will even persist to frees of unused
478 	 * buckets via the allocation path or bucket allocations in the
479 	 * free path.
480 	 */
481 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
482 		udata = (void *)(uintptr_t)zone->uz_flags;
483 	else {
484 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
485 			return (NULL);
486 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
487 	}
488 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
489 		flags |= M_NOVM;
490 	ubz = bucket_zone_lookup(zone->uz_bucket_size);
491 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
492 		ubz++;
493 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
494 	if (bucket) {
495 #ifdef INVARIANTS
496 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
497 #endif
498 		bucket->ub_cnt = 0;
499 		bucket->ub_entries = ubz->ubz_entries;
500 		bucket->ub_seq = SMR_SEQ_INVALID;
501 		CTR3(KTR_UMA, "bucket_alloc: zone %s(%p) allocated bucket %p",
502 		    zone->uz_name, zone, bucket);
503 	}
504 
505 	return (bucket);
506 }
507 
508 static void
509 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
510 {
511 	struct uma_bucket_zone *ubz;
512 
513 	KASSERT(bucket->ub_cnt == 0,
514 	    ("bucket_free: Freeing a non free bucket."));
515 	KASSERT(bucket->ub_seq == SMR_SEQ_INVALID,
516 	    ("bucket_free: Freeing an SMR bucket."));
517 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
518 		udata = (void *)(uintptr_t)zone->uz_flags;
519 	ubz = bucket_zone_lookup(bucket->ub_entries);
520 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
521 }
522 
523 static void
524 bucket_zone_drain(void)
525 {
526 	struct uma_bucket_zone *ubz;
527 
528 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
529 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
530 }
531 
532 /*
533  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
534  * zone's caches.  If a bucket is found the zone is not locked on return.
535  */
536 static uma_bucket_t
537 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
538 {
539 	uma_bucket_t bucket;
540 	int i;
541 	bool dtor = false;
542 
543 	ZONE_LOCK_ASSERT(zone);
544 
545 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
546 		return (NULL);
547 
548 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
549 	    bucket->ub_seq != SMR_SEQ_INVALID) {
550 		if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
551 			return (NULL);
552 		bucket->ub_seq = SMR_SEQ_INVALID;
553 		dtor = (zone->uz_dtor != NULL) | UMA_ALWAYS_CTORDTOR;
554 	}
555 	MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
556 	TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
557 	zdom->uzd_nitems -= bucket->ub_cnt;
558 	if (zdom->uzd_imin > zdom->uzd_nitems)
559 		zdom->uzd_imin = zdom->uzd_nitems;
560 	zone->uz_bkt_count -= bucket->ub_cnt;
561 	ZONE_UNLOCK(zone);
562 	if (dtor)
563 		for (i = 0; i < bucket->ub_cnt; i++)
564 			item_dtor(zone, bucket->ub_bucket[i], zone->uz_size,
565 			    NULL, SKIP_NONE);
566 
567 	return (bucket);
568 }
569 
570 /*
571  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
572  * whether the bucket's contents should be counted as part of the zone's working
573  * set.
574  */
575 static void
576 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
577     const bool ws)
578 {
579 
580 	ZONE_LOCK_ASSERT(zone);
581 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
582 	    ("%s: zone %p overflow", __func__, zone));
583 
584 	if (ws && bucket->ub_seq == SMR_SEQ_INVALID)
585 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
586 	else
587 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
588 	zdom->uzd_nitems += bucket->ub_cnt;
589 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
590 		zdom->uzd_imax = zdom->uzd_nitems;
591 	zone->uz_bkt_count += bucket->ub_cnt;
592 }
593 
594 /* Pops an item out of a per-cpu cache bucket. */
595 static inline void *
596 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
597 {
598 	void *item;
599 
600 	CRITICAL_ASSERT(curthread);
601 
602 	bucket->ucb_cnt--;
603 	item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
604 #ifdef INVARIANTS
605 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
606 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
607 #endif
608 	cache->uc_allocs++;
609 
610 	return (item);
611 }
612 
613 /* Pushes an item into a per-cpu cache bucket. */
614 static inline void
615 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
616 {
617 
618 	CRITICAL_ASSERT(curthread);
619 	KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
620 	    ("uma_zfree: Freeing to non free bucket index."));
621 
622 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
623 	bucket->ucb_cnt++;
624 	cache->uc_frees++;
625 }
626 
627 /*
628  * Unload a UMA bucket from a per-cpu cache.
629  */
630 static inline uma_bucket_t
631 cache_bucket_unload(uma_cache_bucket_t bucket)
632 {
633 	uma_bucket_t b;
634 
635 	b = bucket->ucb_bucket;
636 	if (b != NULL) {
637 		MPASS(b->ub_entries == bucket->ucb_entries);
638 		b->ub_cnt = bucket->ucb_cnt;
639 		bucket->ucb_bucket = NULL;
640 		bucket->ucb_entries = bucket->ucb_cnt = 0;
641 	}
642 
643 	return (b);
644 }
645 
646 static inline uma_bucket_t
647 cache_bucket_unload_alloc(uma_cache_t cache)
648 {
649 
650 	return (cache_bucket_unload(&cache->uc_allocbucket));
651 }
652 
653 static inline uma_bucket_t
654 cache_bucket_unload_free(uma_cache_t cache)
655 {
656 
657 	return (cache_bucket_unload(&cache->uc_freebucket));
658 }
659 
660 static inline uma_bucket_t
661 cache_bucket_unload_cross(uma_cache_t cache)
662 {
663 
664 	return (cache_bucket_unload(&cache->uc_crossbucket));
665 }
666 
667 /*
668  * Load a bucket into a per-cpu cache bucket.
669  */
670 static inline void
671 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
672 {
673 
674 	CRITICAL_ASSERT(curthread);
675 	MPASS(bucket->ucb_bucket == NULL);
676 
677 	bucket->ucb_bucket = b;
678 	bucket->ucb_cnt = b->ub_cnt;
679 	bucket->ucb_entries = b->ub_entries;
680 }
681 
682 static inline void
683 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b)
684 {
685 
686 	cache_bucket_load(&cache->uc_allocbucket, b);
687 }
688 
689 static inline void
690 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b)
691 {
692 
693 	cache_bucket_load(&cache->uc_freebucket, b);
694 }
695 
696 #ifdef NUMA
697 static inline void
698 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b)
699 {
700 
701 	cache_bucket_load(&cache->uc_crossbucket, b);
702 }
703 #endif
704 
705 /*
706  * Copy and preserve ucb_spare.
707  */
708 static inline void
709 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
710 {
711 
712 	b1->ucb_bucket = b2->ucb_bucket;
713 	b1->ucb_entries = b2->ucb_entries;
714 	b1->ucb_cnt = b2->ucb_cnt;
715 }
716 
717 /*
718  * Swap two cache buckets.
719  */
720 static inline void
721 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
722 {
723 	struct uma_cache_bucket b3;
724 
725 	CRITICAL_ASSERT(curthread);
726 
727 	cache_bucket_copy(&b3, b1);
728 	cache_bucket_copy(b1, b2);
729 	cache_bucket_copy(b2, &b3);
730 }
731 
732 static void
733 zone_log_warning(uma_zone_t zone)
734 {
735 	static const struct timeval warninterval = { 300, 0 };
736 
737 	if (!zone_warnings || zone->uz_warning == NULL)
738 		return;
739 
740 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
741 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
742 }
743 
744 static inline void
745 zone_maxaction(uma_zone_t zone)
746 {
747 
748 	if (zone->uz_maxaction.ta_func != NULL)
749 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
750 }
751 
752 /*
753  * Routine called by timeout which is used to fire off some time interval
754  * based calculations.  (stats, hash size, etc.)
755  *
756  * Arguments:
757  *	arg   Unused
758  *
759  * Returns:
760  *	Nothing
761  */
762 static void
763 uma_timeout(void *unused)
764 {
765 	bucket_enable();
766 	zone_foreach(zone_timeout, NULL);
767 
768 	/* Reschedule this event */
769 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
770 }
771 
772 /*
773  * Update the working set size estimate for the zone's bucket cache.
774  * The constants chosen here are somewhat arbitrary.  With an update period of
775  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
776  * last 100s.
777  */
778 static void
779 zone_domain_update_wss(uma_zone_domain_t zdom)
780 {
781 	long wss;
782 
783 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
784 	wss = zdom->uzd_imax - zdom->uzd_imin;
785 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
786 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
787 }
788 
789 /*
790  * Routine to perform timeout driven calculations.  This expands the
791  * hashes and does per cpu statistics aggregation.
792  *
793  *  Returns nothing.
794  */
795 static void
796 zone_timeout(uma_zone_t zone, void *unused)
797 {
798 	uma_keg_t keg;
799 	u_int slabs, pages;
800 
801 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
802 		goto update_wss;
803 
804 	keg = zone->uz_keg;
805 
806 	/*
807 	 * Hash zones are non-numa by definition so the first domain
808 	 * is the only one present.
809 	 */
810 	KEG_LOCK(keg, 0);
811 	pages = keg->uk_domain[0].ud_pages;
812 
813 	/*
814 	 * Expand the keg hash table.
815 	 *
816 	 * This is done if the number of slabs is larger than the hash size.
817 	 * What I'm trying to do here is completely reduce collisions.  This
818 	 * may be a little aggressive.  Should I allow for two collisions max?
819 	 */
820 	if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) {
821 		struct uma_hash newhash;
822 		struct uma_hash oldhash;
823 		int ret;
824 
825 		/*
826 		 * This is so involved because allocating and freeing
827 		 * while the keg lock is held will lead to deadlock.
828 		 * I have to do everything in stages and check for
829 		 * races.
830 		 */
831 		KEG_UNLOCK(keg, 0);
832 		ret = hash_alloc(&newhash, 1 << fls(slabs));
833 		KEG_LOCK(keg, 0);
834 		if (ret) {
835 			if (hash_expand(&keg->uk_hash, &newhash)) {
836 				oldhash = keg->uk_hash;
837 				keg->uk_hash = newhash;
838 			} else
839 				oldhash = newhash;
840 
841 			KEG_UNLOCK(keg, 0);
842 			hash_free(&oldhash);
843 			goto update_wss;
844 		}
845 	}
846 	KEG_UNLOCK(keg, 0);
847 
848 update_wss:
849 	ZONE_LOCK(zone);
850 	for (int i = 0; i < vm_ndomains; i++)
851 		zone_domain_update_wss(&zone->uz_domain[i]);
852 	ZONE_UNLOCK(zone);
853 }
854 
855 /*
856  * Allocate and zero fill the next sized hash table from the appropriate
857  * backing store.
858  *
859  * Arguments:
860  *	hash  A new hash structure with the old hash size in uh_hashsize
861  *
862  * Returns:
863  *	1 on success and 0 on failure.
864  */
865 static int
866 hash_alloc(struct uma_hash *hash, u_int size)
867 {
868 	size_t alloc;
869 
870 	KASSERT(powerof2(size), ("hash size must be power of 2"));
871 	if (size > UMA_HASH_SIZE_INIT)  {
872 		hash->uh_hashsize = size;
873 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
874 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
875 	} else {
876 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
877 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
878 		    UMA_ANYDOMAIN, M_WAITOK);
879 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
880 	}
881 	if (hash->uh_slab_hash) {
882 		bzero(hash->uh_slab_hash, alloc);
883 		hash->uh_hashmask = hash->uh_hashsize - 1;
884 		return (1);
885 	}
886 
887 	return (0);
888 }
889 
890 /*
891  * Expands the hash table for HASH zones.  This is done from zone_timeout
892  * to reduce collisions.  This must not be done in the regular allocation
893  * path, otherwise, we can recurse on the vm while allocating pages.
894  *
895  * Arguments:
896  *	oldhash  The hash you want to expand
897  *	newhash  The hash structure for the new table
898  *
899  * Returns:
900  *	Nothing
901  *
902  * Discussion:
903  */
904 static int
905 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
906 {
907 	uma_hash_slab_t slab;
908 	u_int hval;
909 	u_int idx;
910 
911 	if (!newhash->uh_slab_hash)
912 		return (0);
913 
914 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
915 		return (0);
916 
917 	/*
918 	 * I need to investigate hash algorithms for resizing without a
919 	 * full rehash.
920 	 */
921 
922 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
923 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
924 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
925 			LIST_REMOVE(slab, uhs_hlink);
926 			hval = UMA_HASH(newhash, slab->uhs_data);
927 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
928 			    slab, uhs_hlink);
929 		}
930 
931 	return (1);
932 }
933 
934 /*
935  * Free the hash bucket to the appropriate backing store.
936  *
937  * Arguments:
938  *	slab_hash  The hash bucket we're freeing
939  *	hashsize   The number of entries in that hash bucket
940  *
941  * Returns:
942  *	Nothing
943  */
944 static void
945 hash_free(struct uma_hash *hash)
946 {
947 	if (hash->uh_slab_hash == NULL)
948 		return;
949 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
950 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
951 	else
952 		free(hash->uh_slab_hash, M_UMAHASH);
953 }
954 
955 /*
956  * Frees all outstanding items in a bucket
957  *
958  * Arguments:
959  *	zone   The zone to free to, must be unlocked.
960  *	bucket The free/alloc bucket with items.
961  *
962  * Returns:
963  *	Nothing
964  */
965 
966 static void
967 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
968 {
969 	int i;
970 
971 	if (bucket == NULL || bucket->ub_cnt == 0)
972 		return;
973 
974 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
975 	    bucket->ub_seq != SMR_SEQ_INVALID) {
976 		smr_wait(zone->uz_smr, bucket->ub_seq);
977 		for (i = 0; i < bucket->ub_cnt; i++)
978 			item_dtor(zone, bucket->ub_bucket[i],
979 			    zone->uz_size, NULL, SKIP_NONE);
980 		bucket->ub_seq = SMR_SEQ_INVALID;
981 	}
982 	if (zone->uz_fini)
983 		for (i = 0; i < bucket->ub_cnt; i++)
984 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
985 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
986 	if (zone->uz_max_items > 0)
987 		zone_free_limit(zone, bucket->ub_cnt);
988 #ifdef INVARIANTS
989 	bzero(bucket->ub_bucket, sizeof(void *) * bucket->ub_cnt);
990 #endif
991 	bucket->ub_cnt = 0;
992 }
993 
994 /*
995  * Drains the per cpu caches for a zone.
996  *
997  * NOTE: This may only be called while the zone is being torn down, and not
998  * during normal operation.  This is necessary in order that we do not have
999  * to migrate CPUs to drain the per-CPU caches.
1000  *
1001  * Arguments:
1002  *	zone     The zone to drain, must be unlocked.
1003  *
1004  * Returns:
1005  *	Nothing
1006  */
1007 static void
1008 cache_drain(uma_zone_t zone)
1009 {
1010 	uma_cache_t cache;
1011 	uma_bucket_t bucket;
1012 	int cpu;
1013 
1014 	/*
1015 	 * XXX: It is safe to not lock the per-CPU caches, because we're
1016 	 * tearing down the zone anyway.  I.e., there will be no further use
1017 	 * of the caches at this point.
1018 	 *
1019 	 * XXX: It would good to be able to assert that the zone is being
1020 	 * torn down to prevent improper use of cache_drain().
1021 	 */
1022 	CPU_FOREACH(cpu) {
1023 		cache = &zone->uz_cpu[cpu];
1024 		bucket = cache_bucket_unload_alloc(cache);
1025 		if (bucket != NULL) {
1026 			bucket_drain(zone, bucket);
1027 			bucket_free(zone, bucket, NULL);
1028 		}
1029 		bucket = cache_bucket_unload_free(cache);
1030 		if (bucket != NULL) {
1031 			bucket_drain(zone, bucket);
1032 			bucket_free(zone, bucket, NULL);
1033 		}
1034 		bucket = cache_bucket_unload_cross(cache);
1035 		if (bucket != NULL) {
1036 			bucket_drain(zone, bucket);
1037 			bucket_free(zone, bucket, NULL);
1038 		}
1039 	}
1040 	bucket_cache_reclaim(zone, true);
1041 }
1042 
1043 static void
1044 cache_shrink(uma_zone_t zone, void *unused)
1045 {
1046 
1047 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1048 		return;
1049 
1050 	ZONE_LOCK(zone);
1051 	zone->uz_bucket_size =
1052 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
1053 	ZONE_UNLOCK(zone);
1054 }
1055 
1056 static void
1057 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
1058 {
1059 	uma_cache_t cache;
1060 	uma_bucket_t b1, b2, b3;
1061 	int domain;
1062 
1063 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1064 		return;
1065 
1066 	b1 = b2 = b3 = NULL;
1067 	ZONE_LOCK(zone);
1068 	critical_enter();
1069 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
1070 		domain = PCPU_GET(domain);
1071 	else
1072 		domain = 0;
1073 	cache = &zone->uz_cpu[curcpu];
1074 	b1 = cache_bucket_unload_alloc(cache);
1075 	if (b1 != NULL && b1->ub_cnt != 0) {
1076 		zone_put_bucket(zone, &zone->uz_domain[domain], b1, false);
1077 		b1 = NULL;
1078 	}
1079 
1080 	/*
1081 	 * Don't flush SMR zone buckets.  This leaves the zone without a
1082 	 * bucket and forces every free to synchronize().
1083 	 */
1084 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
1085 		goto out;
1086 	b2 = cache_bucket_unload_free(cache);
1087 	if (b2 != NULL && b2->ub_cnt != 0) {
1088 		zone_put_bucket(zone, &zone->uz_domain[domain], b2, false);
1089 		b2 = NULL;
1090 	}
1091 	b3 = cache_bucket_unload_cross(cache);
1092 
1093 out:
1094 	critical_exit();
1095 	ZONE_UNLOCK(zone);
1096 	if (b1)
1097 		bucket_free(zone, b1, NULL);
1098 	if (b2)
1099 		bucket_free(zone, b2, NULL);
1100 	if (b3) {
1101 		bucket_drain(zone, b3);
1102 		bucket_free(zone, b3, NULL);
1103 	}
1104 }
1105 
1106 /*
1107  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1108  * This is an expensive call because it needs to bind to all CPUs
1109  * one by one and enter a critical section on each of them in order
1110  * to safely access their cache buckets.
1111  * Zone lock must not be held on call this function.
1112  */
1113 static void
1114 pcpu_cache_drain_safe(uma_zone_t zone)
1115 {
1116 	int cpu;
1117 
1118 	/*
1119 	 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1120 	 */
1121 	if (zone)
1122 		cache_shrink(zone, NULL);
1123 	else
1124 		zone_foreach(cache_shrink, NULL);
1125 
1126 	CPU_FOREACH(cpu) {
1127 		thread_lock(curthread);
1128 		sched_bind(curthread, cpu);
1129 		thread_unlock(curthread);
1130 
1131 		if (zone)
1132 			cache_drain_safe_cpu(zone, NULL);
1133 		else
1134 			zone_foreach(cache_drain_safe_cpu, NULL);
1135 	}
1136 	thread_lock(curthread);
1137 	sched_unbind(curthread);
1138 	thread_unlock(curthread);
1139 }
1140 
1141 /*
1142  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
1143  * requested a drain, otherwise the per-domain caches are trimmed to either
1144  * estimated working set size.
1145  */
1146 static void
1147 bucket_cache_reclaim(uma_zone_t zone, bool drain)
1148 {
1149 	uma_zone_domain_t zdom;
1150 	uma_bucket_t bucket;
1151 	long target, tofree;
1152 	int i;
1153 
1154 	for (i = 0; i < vm_ndomains; i++) {
1155 		/*
1156 		 * The cross bucket is partially filled and not part of
1157 		 * the item count.  Reclaim it individually here.
1158 		 */
1159 		zdom = &zone->uz_domain[i];
1160 		ZONE_CROSS_LOCK(zone);
1161 		bucket = zdom->uzd_cross;
1162 		zdom->uzd_cross = NULL;
1163 		ZONE_CROSS_UNLOCK(zone);
1164 		if (bucket != NULL) {
1165 			bucket_drain(zone, bucket);
1166 			bucket_free(zone, bucket, NULL);
1167 		}
1168 
1169 		/*
1170 		 * Shrink the zone bucket size to ensure that the per-CPU caches
1171 		 * don't grow too large.
1172 		 */
1173 		ZONE_LOCK(zone);
1174 		if (i == 0 && zone->uz_bucket_size > zone->uz_bucket_size_min)
1175 			zone->uz_bucket_size--;
1176 
1177 		/*
1178 		 * If we were asked to drain the zone, we are done only once
1179 		 * this bucket cache is empty.  Otherwise, we reclaim items in
1180 		 * excess of the zone's estimated working set size.  If the
1181 		 * difference nitems - imin is larger than the WSS estimate,
1182 		 * then the estimate will grow at the end of this interval and
1183 		 * we ignore the historical average.
1184 		 */
1185 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
1186 		    zdom->uzd_imin);
1187 		while (zdom->uzd_nitems > target) {
1188 			bucket = TAILQ_FIRST(&zdom->uzd_buckets);
1189 			if (bucket == NULL)
1190 				break;
1191 			tofree = bucket->ub_cnt;
1192 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
1193 			zdom->uzd_nitems -= tofree;
1194 
1195 			/*
1196 			 * Shift the bounds of the current WSS interval to avoid
1197 			 * perturbing the estimate.
1198 			 */
1199 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
1200 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
1201 
1202 			ZONE_UNLOCK(zone);
1203 			bucket_drain(zone, bucket);
1204 			bucket_free(zone, bucket, NULL);
1205 			ZONE_LOCK(zone);
1206 		}
1207 		ZONE_UNLOCK(zone);
1208 	}
1209 }
1210 
1211 static void
1212 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
1213 {
1214 	uint8_t *mem;
1215 	int i;
1216 	uint8_t flags;
1217 
1218 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
1219 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
1220 
1221 	mem = slab_data(slab, keg);
1222 	flags = slab->us_flags;
1223 	i = start;
1224 	if (keg->uk_fini != NULL) {
1225 		for (i--; i > -1; i--)
1226 #ifdef INVARIANTS
1227 		/*
1228 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1229 		 * would check that memory hasn't been modified since free,
1230 		 * which executed trash_dtor.
1231 		 * That's why we need to run uma_dbg_kskip() check here,
1232 		 * albeit we don't make skip check for other init/fini
1233 		 * invocations.
1234 		 */
1235 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1236 		    keg->uk_fini != trash_fini)
1237 #endif
1238 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1239 	}
1240 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1241 		zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab),
1242 		    NULL, SKIP_NONE);
1243 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1244 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1245 }
1246 
1247 /*
1248  * Frees pages from a keg back to the system.  This is done on demand from
1249  * the pageout daemon.
1250  *
1251  * Returns nothing.
1252  */
1253 static void
1254 keg_drain(uma_keg_t keg)
1255 {
1256 	struct slabhead freeslabs = { 0 };
1257 	uma_domain_t dom;
1258 	uma_slab_t slab, tmp;
1259 	int i, n;
1260 
1261 	/*
1262 	 * We don't want to take pages from statically allocated kegs at this
1263 	 * time
1264 	 */
1265 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1266 		return;
1267 
1268 	for (i = 0; i < vm_ndomains; i++) {
1269 		CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u",
1270 		    keg->uk_name, keg, i, dom->ud_free);
1271 		n = 0;
1272 		dom = &keg->uk_domain[i];
1273 		KEG_LOCK(keg, i);
1274 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1275 			if (keg->uk_flags & UMA_ZFLAG_HASH)
1276 				UMA_HASH_REMOVE(&keg->uk_hash, slab);
1277 			n++;
1278 			LIST_REMOVE(slab, us_link);
1279 			LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1280 		}
1281 		dom->ud_pages -= n * keg->uk_ppera;
1282 		dom->ud_free -= n * keg->uk_ipers;
1283 		KEG_UNLOCK(keg, i);
1284 	}
1285 
1286 	while ((slab = LIST_FIRST(&freeslabs)) != NULL) {
1287 		LIST_REMOVE(slab, us_link);
1288 		keg_free_slab(keg, slab, keg->uk_ipers);
1289 	}
1290 }
1291 
1292 static void
1293 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1294 {
1295 
1296 	/*
1297 	 * Set draining to interlock with zone_dtor() so we can release our
1298 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1299 	 * is the only call that knows the structure will still be available
1300 	 * when it wakes up.
1301 	 */
1302 	ZONE_LOCK(zone);
1303 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1304 		if (waitok == M_NOWAIT)
1305 			goto out;
1306 		msleep(zone, &zone->uz_lock, PVM, "zonedrain", 1);
1307 	}
1308 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1309 	ZONE_UNLOCK(zone);
1310 	bucket_cache_reclaim(zone, drain);
1311 
1312 	/*
1313 	 * The DRAINING flag protects us from being freed while
1314 	 * we're running.  Normally the uma_rwlock would protect us but we
1315 	 * must be able to release and acquire the right lock for each keg.
1316 	 */
1317 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1318 		keg_drain(zone->uz_keg);
1319 	ZONE_LOCK(zone);
1320 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1321 	wakeup(zone);
1322 out:
1323 	ZONE_UNLOCK(zone);
1324 }
1325 
1326 static void
1327 zone_drain(uma_zone_t zone, void *unused)
1328 {
1329 
1330 	zone_reclaim(zone, M_NOWAIT, true);
1331 }
1332 
1333 static void
1334 zone_trim(uma_zone_t zone, void *unused)
1335 {
1336 
1337 	zone_reclaim(zone, M_NOWAIT, false);
1338 }
1339 
1340 /*
1341  * Allocate a new slab for a keg and inserts it into the partial slab list.
1342  * The keg should be unlocked on entry.  If the allocation succeeds it will
1343  * be locked on return.
1344  *
1345  * Arguments:
1346  *	flags   Wait flags for the item initialization routine
1347  *	aflags  Wait flags for the slab allocation
1348  *
1349  * Returns:
1350  *	The slab that was allocated or NULL if there is no memory and the
1351  *	caller specified M_NOWAIT.
1352  */
1353 static uma_slab_t
1354 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1355     int aflags)
1356 {
1357 	uma_domain_t dom;
1358 	uma_alloc allocf;
1359 	uma_slab_t slab;
1360 	unsigned long size;
1361 	uint8_t *mem;
1362 	uint8_t sflags;
1363 	int i;
1364 
1365 	KASSERT(domain >= 0 && domain < vm_ndomains,
1366 	    ("keg_alloc_slab: domain %d out of range", domain));
1367 
1368 	allocf = keg->uk_allocf;
1369 	slab = NULL;
1370 	mem = NULL;
1371 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1372 		uma_hash_slab_t hslab;
1373 		hslab = zone_alloc_item(slabzone(keg->uk_ipers), NULL,
1374 		    domain, aflags);
1375 		if (hslab == NULL)
1376 			goto fail;
1377 		slab = &hslab->uhs_slab;
1378 	}
1379 
1380 	/*
1381 	 * This reproduces the old vm_zone behavior of zero filling pages the
1382 	 * first time they are added to a zone.
1383 	 *
1384 	 * Malloced items are zeroed in uma_zalloc.
1385 	 */
1386 
1387 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1388 		aflags |= M_ZERO;
1389 	else
1390 		aflags &= ~M_ZERO;
1391 
1392 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1393 		aflags |= M_NODUMP;
1394 
1395 	/* zone is passed for legacy reasons. */
1396 	size = keg->uk_ppera * PAGE_SIZE;
1397 	mem = allocf(zone, size, domain, &sflags, aflags);
1398 	if (mem == NULL) {
1399 		if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1400 			zone_free_item(slabzone(keg->uk_ipers),
1401 			    slab_tohashslab(slab), NULL, SKIP_NONE);
1402 		goto fail;
1403 	}
1404 	uma_total_inc(size);
1405 
1406 	/* For HASH zones all pages go to the same uma_domain. */
1407 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
1408 		domain = 0;
1409 
1410 	/* Point the slab into the allocated memory */
1411 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
1412 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1413 	else
1414 		slab_tohashslab(slab)->uhs_data = mem;
1415 
1416 	if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
1417 		for (i = 0; i < keg->uk_ppera; i++)
1418 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1419 			    zone, slab);
1420 
1421 	slab->us_freecount = keg->uk_ipers;
1422 	slab->us_flags = sflags;
1423 	slab->us_domain = domain;
1424 
1425 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1426 #ifdef INVARIANTS
1427 	BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
1428 #endif
1429 
1430 	if (keg->uk_init != NULL) {
1431 		for (i = 0; i < keg->uk_ipers; i++)
1432 			if (keg->uk_init(slab_item(slab, keg, i),
1433 			    keg->uk_size, flags) != 0)
1434 				break;
1435 		if (i != keg->uk_ipers) {
1436 			keg_free_slab(keg, slab, i);
1437 			goto fail;
1438 		}
1439 	}
1440 	KEG_LOCK(keg, domain);
1441 
1442 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1443 	    slab, keg->uk_name, keg);
1444 
1445 	if (keg->uk_flags & UMA_ZFLAG_HASH)
1446 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1447 
1448 	/*
1449 	 * If we got a slab here it's safe to mark it partially used
1450 	 * and return.  We assume that the caller is going to remove
1451 	 * at least one item.
1452 	 */
1453 	dom = &keg->uk_domain[domain];
1454 	LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
1455 	dom->ud_pages += keg->uk_ppera;
1456 	dom->ud_free += keg->uk_ipers;
1457 
1458 	return (slab);
1459 
1460 fail:
1461 	return (NULL);
1462 }
1463 
1464 /*
1465  * This function is intended to be used early on in place of page_alloc() so
1466  * that we may use the boot time page cache to satisfy allocations before
1467  * the VM is ready.
1468  */
1469 static void *
1470 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1471     int wait)
1472 {
1473 	vm_paddr_t pa;
1474 	vm_page_t m;
1475 	void *mem;
1476 	int pages;
1477 	int i;
1478 
1479 	pages = howmany(bytes, PAGE_SIZE);
1480 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1481 
1482 	*pflag = UMA_SLAB_BOOT;
1483 	m = vm_page_alloc_contig_domain(NULL, 0, domain,
1484 	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages,
1485 	    (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT);
1486 	if (m == NULL)
1487 		return (NULL);
1488 
1489 	pa = VM_PAGE_TO_PHYS(m);
1490 	for (i = 0; i < pages; i++, pa += PAGE_SIZE) {
1491 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1492     defined(__riscv) || defined(__powerpc64__)
1493 		if ((wait & M_NODUMP) == 0)
1494 			dump_add_page(pa);
1495 #endif
1496 	}
1497 	/* Allocate KVA and indirectly advance bootmem. */
1498 	mem = (void *)pmap_map(&bootmem, m->phys_addr,
1499 	    m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE);
1500         if ((wait & M_ZERO) != 0)
1501                 bzero(mem, pages * PAGE_SIZE);
1502 
1503         return (mem);
1504 }
1505 
1506 static void
1507 startup_free(void *mem, vm_size_t bytes)
1508 {
1509 	vm_offset_t va;
1510 	vm_page_t m;
1511 
1512 	va = (vm_offset_t)mem;
1513 	m = PHYS_TO_VM_PAGE(pmap_kextract(va));
1514 	pmap_remove(kernel_pmap, va, va + bytes);
1515 	for (; bytes != 0; bytes -= PAGE_SIZE, m++) {
1516 #if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
1517     defined(__riscv) || defined(__powerpc64__)
1518 		dump_drop_page(VM_PAGE_TO_PHYS(m));
1519 #endif
1520 		vm_page_unwire_noq(m);
1521 		vm_page_free(m);
1522 	}
1523 }
1524 
1525 /*
1526  * Allocates a number of pages from the system
1527  *
1528  * Arguments:
1529  *	bytes  The number of bytes requested
1530  *	wait  Shall we wait?
1531  *
1532  * Returns:
1533  *	A pointer to the alloced memory or possibly
1534  *	NULL if M_NOWAIT is set.
1535  */
1536 static void *
1537 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1538     int wait)
1539 {
1540 	void *p;	/* Returned page */
1541 
1542 	*pflag = UMA_SLAB_KERNEL;
1543 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1544 
1545 	return (p);
1546 }
1547 
1548 static void *
1549 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1550     int wait)
1551 {
1552 	struct pglist alloctail;
1553 	vm_offset_t addr, zkva;
1554 	int cpu, flags;
1555 	vm_page_t p, p_next;
1556 #ifdef NUMA
1557 	struct pcpu *pc;
1558 #endif
1559 
1560 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1561 
1562 	TAILQ_INIT(&alloctail);
1563 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1564 	    malloc2vm_flags(wait);
1565 	*pflag = UMA_SLAB_KERNEL;
1566 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1567 		if (CPU_ABSENT(cpu)) {
1568 			p = vm_page_alloc(NULL, 0, flags);
1569 		} else {
1570 #ifndef NUMA
1571 			p = vm_page_alloc(NULL, 0, flags);
1572 #else
1573 			pc = pcpu_find(cpu);
1574 			if (__predict_false(VM_DOMAIN_EMPTY(pc->pc_domain)))
1575 				p = NULL;
1576 			else
1577 				p = vm_page_alloc_domain(NULL, 0,
1578 				    pc->pc_domain, flags);
1579 			if (__predict_false(p == NULL))
1580 				p = vm_page_alloc(NULL, 0, flags);
1581 #endif
1582 		}
1583 		if (__predict_false(p == NULL))
1584 			goto fail;
1585 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1586 	}
1587 	if ((addr = kva_alloc(bytes)) == 0)
1588 		goto fail;
1589 	zkva = addr;
1590 	TAILQ_FOREACH(p, &alloctail, listq) {
1591 		pmap_qenter(zkva, &p, 1);
1592 		zkva += PAGE_SIZE;
1593 	}
1594 	return ((void*)addr);
1595 fail:
1596 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1597 		vm_page_unwire_noq(p);
1598 		vm_page_free(p);
1599 	}
1600 	return (NULL);
1601 }
1602 
1603 /*
1604  * Allocates a number of pages from within an object
1605  *
1606  * Arguments:
1607  *	bytes  The number of bytes requested
1608  *	wait   Shall we wait?
1609  *
1610  * Returns:
1611  *	A pointer to the alloced memory or possibly
1612  *	NULL if M_NOWAIT is set.
1613  */
1614 static void *
1615 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1616     int wait)
1617 {
1618 	TAILQ_HEAD(, vm_page) alloctail;
1619 	u_long npages;
1620 	vm_offset_t retkva, zkva;
1621 	vm_page_t p, p_next;
1622 	uma_keg_t keg;
1623 
1624 	TAILQ_INIT(&alloctail);
1625 	keg = zone->uz_keg;
1626 
1627 	npages = howmany(bytes, PAGE_SIZE);
1628 	while (npages > 0) {
1629 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1630 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1631 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1632 		    VM_ALLOC_NOWAIT));
1633 		if (p != NULL) {
1634 			/*
1635 			 * Since the page does not belong to an object, its
1636 			 * listq is unused.
1637 			 */
1638 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1639 			npages--;
1640 			continue;
1641 		}
1642 		/*
1643 		 * Page allocation failed, free intermediate pages and
1644 		 * exit.
1645 		 */
1646 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1647 			vm_page_unwire_noq(p);
1648 			vm_page_free(p);
1649 		}
1650 		return (NULL);
1651 	}
1652 	*flags = UMA_SLAB_PRIV;
1653 	zkva = keg->uk_kva +
1654 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1655 	retkva = zkva;
1656 	TAILQ_FOREACH(p, &alloctail, listq) {
1657 		pmap_qenter(zkva, &p, 1);
1658 		zkva += PAGE_SIZE;
1659 	}
1660 
1661 	return ((void *)retkva);
1662 }
1663 
1664 /*
1665  * Frees a number of pages to the system
1666  *
1667  * Arguments:
1668  *	mem   A pointer to the memory to be freed
1669  *	size  The size of the memory being freed
1670  *	flags The original p->us_flags field
1671  *
1672  * Returns:
1673  *	Nothing
1674  */
1675 static void
1676 page_free(void *mem, vm_size_t size, uint8_t flags)
1677 {
1678 
1679 	if ((flags & UMA_SLAB_BOOT) != 0) {
1680 		startup_free(mem, size);
1681 		return;
1682 	}
1683 
1684 	if ((flags & UMA_SLAB_KERNEL) == 0)
1685 		panic("UMA: page_free used with invalid flags %x", flags);
1686 
1687 	kmem_free((vm_offset_t)mem, size);
1688 }
1689 
1690 /*
1691  * Frees pcpu zone allocations
1692  *
1693  * Arguments:
1694  *	mem   A pointer to the memory to be freed
1695  *	size  The size of the memory being freed
1696  *	flags The original p->us_flags field
1697  *
1698  * Returns:
1699  *	Nothing
1700  */
1701 static void
1702 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1703 {
1704 	vm_offset_t sva, curva;
1705 	vm_paddr_t paddr;
1706 	vm_page_t m;
1707 
1708 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1709 	sva = (vm_offset_t)mem;
1710 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1711 		paddr = pmap_kextract(curva);
1712 		m = PHYS_TO_VM_PAGE(paddr);
1713 		vm_page_unwire_noq(m);
1714 		vm_page_free(m);
1715 	}
1716 	pmap_qremove(sva, size >> PAGE_SHIFT);
1717 	kva_free(sva, size);
1718 }
1719 
1720 
1721 /*
1722  * Zero fill initializer
1723  *
1724  * Arguments/Returns follow uma_init specifications
1725  */
1726 static int
1727 zero_init(void *mem, int size, int flags)
1728 {
1729 	bzero(mem, size);
1730 	return (0);
1731 }
1732 
1733 #ifdef INVARIANTS
1734 struct noslabbits *
1735 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
1736 {
1737 
1738 	return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
1739 }
1740 #endif
1741 
1742 /*
1743  * Actual size of embedded struct slab (!OFFPAGE).
1744  */
1745 size_t
1746 slab_sizeof(int nitems)
1747 {
1748 	size_t s;
1749 
1750 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
1751 	return (roundup(s, UMA_ALIGN_PTR + 1));
1752 }
1753 
1754 /*
1755  * Size of memory for embedded slabs (!OFFPAGE).
1756  */
1757 size_t
1758 slab_space(int nitems)
1759 {
1760 	return (UMA_SLAB_SIZE - slab_sizeof(nitems));
1761 }
1762 
1763 #define	UMA_FIXPT_SHIFT	31
1764 #define	UMA_FRAC_FIXPT(n, d)						\
1765 	((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
1766 #define	UMA_FIXPT_PCT(f)						\
1767 	((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
1768 #define	UMA_PCT_FIXPT(pct)	UMA_FRAC_FIXPT((pct), 100)
1769 #define	UMA_MIN_EFF	UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
1770 
1771 /*
1772  * Compute the number of items that will fit in a slab.  If hdr is true, the
1773  * item count may be limited to provide space in the slab for an inline slab
1774  * header.  Otherwise, all slab space will be provided for item storage.
1775  */
1776 static u_int
1777 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
1778 {
1779 	u_int ipers;
1780 	u_int padpi;
1781 
1782 	/* The padding between items is not needed after the last item. */
1783 	padpi = rsize - size;
1784 
1785 	if (hdr) {
1786 		/*
1787 		 * Start with the maximum item count and remove items until
1788 		 * the slab header first alongside the allocatable memory.
1789 		 */
1790 		for (ipers = MIN(SLAB_MAX_SETSIZE,
1791 		    (slabsize + padpi - slab_sizeof(1)) / rsize);
1792 		    ipers > 0 &&
1793 		    ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
1794 		    ipers--)
1795 			continue;
1796 	} else {
1797 		ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
1798 	}
1799 
1800 	return (ipers);
1801 }
1802 
1803 /*
1804  * Compute the number of items that will fit in a slab for a startup zone.
1805  */
1806 int
1807 slab_ipers(size_t size, int align)
1808 {
1809 	int rsize;
1810 
1811 	rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */
1812 	return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true));
1813 }
1814 
1815 /*
1816  * Determine the format of a uma keg.  This determines where the slab header
1817  * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
1818  *
1819  * Arguments
1820  *	keg  The zone we should initialize
1821  *
1822  * Returns
1823  *	Nothing
1824  */
1825 static void
1826 keg_layout(uma_keg_t keg)
1827 {
1828 	u_int alignsize;
1829 	u_int eff;
1830 	u_int eff_offpage;
1831 	u_int format;
1832 	u_int ipers;
1833 	u_int ipers_offpage;
1834 	u_int pages;
1835 	u_int rsize;
1836 	u_int slabsize;
1837 
1838 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1839 	    (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
1840 	     (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
1841 	    ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
1842 	     __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
1843 	     PRINT_UMA_ZFLAGS));
1844 	KASSERT((keg->uk_flags &
1845 	    (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) == 0 ||
1846 	    (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
1847 	    ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
1848 	     PRINT_UMA_ZFLAGS));
1849 
1850 	alignsize = keg->uk_align + 1;
1851 	format = 0;
1852 	ipers = 0;
1853 
1854 	/*
1855 	 * Calculate the size of each allocation (rsize) according to
1856 	 * alignment.  If the requested size is smaller than we have
1857 	 * allocation bits for we round it up.
1858 	 */
1859 	rsize = MAX(keg->uk_size, UMA_SMALLEST_UNIT);
1860 	rsize = roundup2(rsize, alignsize);
1861 
1862 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) {
1863 		slabsize = UMA_PCPU_ALLOC_SIZE;
1864 		pages = mp_maxid + 1;
1865 	} else if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
1866 		/*
1867 		 * We want one item to start on every align boundary in a page.
1868 		 * To do this we will span pages.  We will also extend the item
1869 		 * by the size of align if it is an even multiple of align.
1870 		 * Otherwise, it would fall on the same boundary every time.
1871 		 */
1872 		if ((rsize & alignsize) == 0)
1873 			rsize += alignsize;
1874 		slabsize = rsize * (PAGE_SIZE / alignsize);
1875 		slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
1876 		slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
1877 		pages = howmany(slabsize, PAGE_SIZE);
1878 		slabsize = ptoa(pages);
1879 	} else {
1880 		/*
1881 		 * Choose a slab size of as many pages as it takes to represent
1882 		 * a single item.  We will then try to fit as many additional
1883 		 * items into the slab as possible.  At some point, we may want
1884 		 * to increase the slab size for awkward item sizes in order to
1885 		 * increase efficiency.
1886 		 */
1887 		pages = howmany(keg->uk_size, PAGE_SIZE);
1888 		slabsize = ptoa(pages);
1889 	}
1890 
1891 	/* Evaluate an inline slab layout. */
1892 	if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
1893 		ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize, true);
1894 
1895 	/* TODO: vm_page-embedded slab. */
1896 
1897 	/*
1898 	 * We can't do OFFPAGE if we're internal or if we've been
1899 	 * asked to not go to the VM for buckets.  If we do this we
1900 	 * may end up going to the VM  for slabs which we do not
1901 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1902 	 * of UMA_ZONE_VM, which clearly forbids it.
1903 	 */
1904 	if ((keg->uk_flags &
1905 	    (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) != 0) {
1906 		if (ipers == 0) {
1907 			/* We need an extra page for the slab header. */
1908 			pages++;
1909 			slabsize = ptoa(pages);
1910 			ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize,
1911 			    true);
1912 		}
1913 		goto out;
1914 	}
1915 
1916 	/*
1917 	 * See if using an OFFPAGE slab will improve our efficiency.
1918 	 * Only do this if we are below our efficiency threshold.
1919 	 *
1920 	 * XXX We could try growing slabsize to limit max waste as well.
1921 	 * Historically this was not done because the VM could not
1922 	 * efficiently handle contiguous allocations.
1923 	 */
1924 	eff = UMA_FRAC_FIXPT(ipers * rsize, slabsize);
1925 	ipers_offpage = slab_ipers_hdr(keg->uk_size, rsize, slabsize, false);
1926 	eff_offpage = UMA_FRAC_FIXPT(ipers_offpage * rsize,
1927 	    slabsize + slabzone(ipers_offpage)->uz_keg->uk_rsize);
1928 	if (ipers == 0 || (eff < UMA_MIN_EFF && eff < eff_offpage)) {
1929 		CTR5(KTR_UMA, "UMA decided we need offpage slab headers for "
1930 		    "keg: %s(%p), minimum efficiency allowed = %u%%, "
1931 		    "old efficiency = %u%%, offpage efficiency = %u%%",
1932 		    keg->uk_name, keg, UMA_FIXPT_PCT(UMA_MIN_EFF),
1933 		    UMA_FIXPT_PCT(eff), UMA_FIXPT_PCT(eff_offpage));
1934 		format = UMA_ZFLAG_OFFPAGE;
1935 		ipers = ipers_offpage;
1936 	}
1937 
1938 out:
1939 	/*
1940 	 * How do we find the slab header if it is offpage or if not all item
1941 	 * start addresses are in the same page?  We could solve the latter
1942 	 * case with vaddr alignment, but we don't.
1943 	 */
1944 	if ((format & UMA_ZFLAG_OFFPAGE) != 0 ||
1945 	    (ipers - 1) * rsize >= PAGE_SIZE) {
1946 		if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
1947 			format |= UMA_ZFLAG_HASH;
1948 		else
1949 			format |= UMA_ZFLAG_VTOSLAB;
1950 	}
1951 	keg->uk_ipers = ipers;
1952 	keg->uk_rsize = rsize;
1953 	keg->uk_flags |= format;
1954 	keg->uk_ppera = pages;
1955 	CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u",
1956 	    __func__, keg->uk_name, keg->uk_flags, rsize, ipers, pages);
1957 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1958 	    ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
1959 	     keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, ipers,
1960 	     pages));
1961 }
1962 
1963 /*
1964  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1965  * the keg onto the global keg list.
1966  *
1967  * Arguments/Returns follow uma_ctor specifications
1968  *	udata  Actually uma_kctor_args
1969  */
1970 static int
1971 keg_ctor(void *mem, int size, void *udata, int flags)
1972 {
1973 	struct uma_kctor_args *arg = udata;
1974 	uma_keg_t keg = mem;
1975 	uma_zone_t zone;
1976 	int i;
1977 
1978 	bzero(keg, size);
1979 	keg->uk_size = arg->size;
1980 	keg->uk_init = arg->uminit;
1981 	keg->uk_fini = arg->fini;
1982 	keg->uk_align = arg->align;
1983 	keg->uk_reserve = 0;
1984 	keg->uk_flags = arg->flags;
1985 
1986 	/*
1987 	 * We use a global round-robin policy by default.  Zones with
1988 	 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which
1989 	 * case the iterator is never run.
1990 	 */
1991 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1992 	keg->uk_dr.dr_iter = 0;
1993 
1994 	/*
1995 	 * The master zone is passed to us at keg-creation time.
1996 	 */
1997 	zone = arg->zone;
1998 	keg->uk_name = zone->uz_name;
1999 
2000 	if (arg->flags & UMA_ZONE_VM)
2001 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
2002 
2003 	if (arg->flags & UMA_ZONE_ZINIT)
2004 		keg->uk_init = zero_init;
2005 
2006 	if (arg->flags & UMA_ZONE_MALLOC)
2007 		keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
2008 
2009 #ifndef SMP
2010 	keg->uk_flags &= ~UMA_ZONE_PCPU;
2011 #endif
2012 
2013 	keg_layout(keg);
2014 
2015 	/*
2016 	 * Use a first-touch NUMA policy for all kegs that pmap_extract()
2017 	 * will work on with the exception of critical VM structures
2018 	 * necessary for paging.
2019 	 *
2020 	 * Zones may override the default by specifying either.
2021 	 */
2022 #ifdef NUMA
2023 	if ((keg->uk_flags &
2024 	    (UMA_ZFLAG_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0)
2025 		keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
2026 	else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2027 		keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
2028 #endif
2029 
2030 	/*
2031 	 * If we haven't booted yet we need allocations to go through the
2032 	 * startup cache until the vm is ready.
2033 	 */
2034 #ifdef UMA_MD_SMALL_ALLOC
2035 	if (keg->uk_ppera == 1)
2036 		keg->uk_allocf = uma_small_alloc;
2037 	else
2038 #endif
2039 	if (booted < BOOT_KVA)
2040 		keg->uk_allocf = startup_alloc;
2041 	else if (keg->uk_flags & UMA_ZONE_PCPU)
2042 		keg->uk_allocf = pcpu_page_alloc;
2043 	else
2044 		keg->uk_allocf = page_alloc;
2045 #ifdef UMA_MD_SMALL_ALLOC
2046 	if (keg->uk_ppera == 1)
2047 		keg->uk_freef = uma_small_free;
2048 	else
2049 #endif
2050 	if (keg->uk_flags & UMA_ZONE_PCPU)
2051 		keg->uk_freef = pcpu_page_free;
2052 	else
2053 		keg->uk_freef = page_free;
2054 
2055 	/*
2056 	 * Initialize keg's locks.
2057 	 */
2058 	for (i = 0; i < vm_ndomains; i++)
2059 		KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS));
2060 
2061 	/*
2062 	 * If we're putting the slab header in the actual page we need to
2063 	 * figure out where in each page it goes.  See slab_sizeof
2064 	 * definition.
2065 	 */
2066 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
2067 		size_t shsize;
2068 
2069 		shsize = slab_sizeof(keg->uk_ipers);
2070 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
2071 		/*
2072 		 * The only way the following is possible is if with our
2073 		 * UMA_ALIGN_PTR adjustments we are now bigger than
2074 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
2075 		 * mathematically possible for all cases, so we make
2076 		 * sure here anyway.
2077 		 */
2078 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
2079 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
2080 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
2081 	}
2082 
2083 	if (keg->uk_flags & UMA_ZFLAG_HASH)
2084 		hash_alloc(&keg->uk_hash, 0);
2085 
2086 	CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)", keg, zone->uz_name, zone);
2087 
2088 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
2089 
2090 	rw_wlock(&uma_rwlock);
2091 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
2092 	rw_wunlock(&uma_rwlock);
2093 	return (0);
2094 }
2095 
2096 static void
2097 zone_kva_available(uma_zone_t zone, void *unused)
2098 {
2099 	uma_keg_t keg;
2100 
2101 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
2102 		return;
2103 	KEG_GET(zone, keg);
2104 	if (keg->uk_allocf == startup_alloc)
2105 		keg->uk_allocf = page_alloc;
2106 }
2107 
2108 static void
2109 zone_alloc_counters(uma_zone_t zone, void *unused)
2110 {
2111 
2112 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
2113 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
2114 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
2115 }
2116 
2117 static void
2118 zone_alloc_sysctl(uma_zone_t zone, void *unused)
2119 {
2120 	uma_zone_domain_t zdom;
2121 	uma_domain_t dom;
2122 	uma_keg_t keg;
2123 	struct sysctl_oid *oid, *domainoid;
2124 	int domains, i, cnt;
2125 	static const char *nokeg = "cache zone";
2126 	char *c;
2127 
2128 	/*
2129 	 * Make a sysctl safe copy of the zone name by removing
2130 	 * any special characters and handling dups by appending
2131 	 * an index.
2132 	 */
2133 	if (zone->uz_namecnt != 0) {
2134 		/* Count the number of decimal digits and '_' separator. */
2135 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
2136 			cnt /= 10;
2137 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
2138 		    M_UMA, M_WAITOK);
2139 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
2140 		    zone->uz_namecnt);
2141 	} else
2142 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
2143 	for (c = zone->uz_ctlname; *c != '\0'; c++)
2144 		if (strchr("./\\ -", *c) != NULL)
2145 			*c = '_';
2146 
2147 	/*
2148 	 * Basic parameters at the root.
2149 	 */
2150 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
2151 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
2152 	oid = zone->uz_oid;
2153 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2154 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
2155 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2156 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
2157 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
2158 	    "Allocator configuration flags");
2159 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2160 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
2161 	    "Desired per-cpu cache size");
2162 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2163 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
2164 	    "Maximum allowed per-cpu cache size");
2165 
2166 	/*
2167 	 * keg if present.
2168 	 */
2169 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
2170 		domains = vm_ndomains;
2171 	else
2172 		domains = 1;
2173 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2174 	    "keg", CTLFLAG_RD, NULL, "");
2175 	keg = zone->uz_keg;
2176 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
2177 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2178 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
2179 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2180 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
2181 		    "Real object size with alignment");
2182 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2183 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
2184 		    "pages per-slab allocation");
2185 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2186 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
2187 		    "items available per-slab");
2188 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2189 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
2190 		    "item alignment mask");
2191 		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2192 		    "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2193 		    keg, 0, sysctl_handle_uma_slab_efficiency, "I",
2194 		    "Slab utilization (100 - internal fragmentation %)");
2195 		domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid),
2196 		    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
2197 		for (i = 0; i < domains; i++) {
2198 			dom = &keg->uk_domain[i];
2199 			oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2200 			    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD,
2201 			    NULL, "");
2202 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2203 			    "pages", CTLFLAG_RD, &dom->ud_pages, 0,
2204 			    "Total pages currently allocated from VM");
2205 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2206 			    "free", CTLFLAG_RD, &dom->ud_free, 0,
2207 			    "items free in the slab layer");
2208 		}
2209 	} else
2210 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2211 		    "name", CTLFLAG_RD, nokeg, "Keg name");
2212 
2213 	/*
2214 	 * Information about zone limits.
2215 	 */
2216 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2217 	    "limit", CTLFLAG_RD, NULL, "");
2218 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2219 	    "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2220 	    zone, 0, sysctl_handle_uma_zone_items, "QU",
2221 	    "current number of allocated items if limit is set");
2222 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2223 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
2224 	    "Maximum number of cached items");
2225 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2226 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
2227 	    "Number of threads sleeping at limit");
2228 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2229 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
2230 	    "Total zone limit sleeps");
2231 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2232 	    "bucket_max", CTLFLAG_RD, &zone->uz_bkt_max, 0,
2233 	    "Maximum number of items in the bucket cache");
2234 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2235 	    "bucket_cnt", CTLFLAG_RD, &zone->uz_bkt_count, 0,
2236 	    "Number of items in the bucket cache");
2237 
2238 	/*
2239 	 * Per-domain zone information.
2240 	 */
2241 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
2242 	    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
2243 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2244 		domains = 1;
2245 	for (i = 0; i < domains; i++) {
2246 		zdom = &zone->uz_domain[i];
2247 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2248 		    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
2249 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2250 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
2251 		    "number of items in this domain");
2252 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2253 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
2254 		    "maximum item count in this period");
2255 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2256 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
2257 		    "minimum item count in this period");
2258 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2259 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
2260 		    "Working set size");
2261 	}
2262 
2263 	/*
2264 	 * General statistics.
2265 	 */
2266 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2267 	    "stats", CTLFLAG_RD, NULL, "");
2268 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2269 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2270 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
2271 	    "Current number of allocated items");
2272 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2273 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2274 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
2275 	    "Total allocation calls");
2276 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2277 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2278 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2279 	    "Total free calls");
2280 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2281 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2282 	    "Number of allocation failures");
2283 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2284 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
2285 	    "Free calls from the wrong domain");
2286 }
2287 
2288 struct uma_zone_count {
2289 	const char	*name;
2290 	int		count;
2291 };
2292 
2293 static void
2294 zone_count(uma_zone_t zone, void *arg)
2295 {
2296 	struct uma_zone_count *cnt;
2297 
2298 	cnt = arg;
2299 	/*
2300 	 * Some zones are rapidly created with identical names and
2301 	 * destroyed out of order.  This can lead to gaps in the count.
2302 	 * Use one greater than the maximum observed for this name.
2303 	 */
2304 	if (strcmp(zone->uz_name, cnt->name) == 0)
2305 		cnt->count = MAX(cnt->count,
2306 		    zone->uz_namecnt + 1);
2307 }
2308 
2309 static void
2310 zone_update_caches(uma_zone_t zone)
2311 {
2312 	int i;
2313 
2314 	for (i = 0; i <= mp_maxid; i++) {
2315 		cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size);
2316 		cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags);
2317 	}
2318 }
2319 
2320 /*
2321  * Zone header ctor.  This initializes all fields, locks, etc.
2322  *
2323  * Arguments/Returns follow uma_ctor specifications
2324  *	udata  Actually uma_zctor_args
2325  */
2326 static int
2327 zone_ctor(void *mem, int size, void *udata, int flags)
2328 {
2329 	struct uma_zone_count cnt;
2330 	struct uma_zctor_args *arg = udata;
2331 	uma_zone_t zone = mem;
2332 	uma_zone_t z;
2333 	uma_keg_t keg;
2334 	int i;
2335 
2336 	bzero(zone, size);
2337 	zone->uz_name = arg->name;
2338 	zone->uz_ctor = arg->ctor;
2339 	zone->uz_dtor = arg->dtor;
2340 	zone->uz_init = NULL;
2341 	zone->uz_fini = NULL;
2342 	zone->uz_sleeps = 0;
2343 	zone->uz_xdomain = 0;
2344 	zone->uz_bucket_size = 0;
2345 	zone->uz_bucket_size_min = 0;
2346 	zone->uz_bucket_size_max = BUCKET_MAX;
2347 	zone->uz_flags = (arg->flags & UMA_ZONE_SMR);
2348 	zone->uz_warning = NULL;
2349 	/* The domain structures follow the cpu structures. */
2350 	zone->uz_domain =
2351 	    (struct uma_zone_domain *)&zone->uz_cpu[mp_maxid + 1];
2352 	zone->uz_bkt_max = ULONG_MAX;
2353 	timevalclear(&zone->uz_ratecheck);
2354 
2355 	/* Count the number of duplicate names. */
2356 	cnt.name = arg->name;
2357 	cnt.count = 0;
2358 	zone_foreach(zone_count, &cnt);
2359 	zone->uz_namecnt = cnt.count;
2360 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
2361 	ZONE_CROSS_LOCK_INIT(zone);
2362 
2363 	for (i = 0; i < vm_ndomains; i++)
2364 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
2365 
2366 #ifdef INVARIANTS
2367 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2368 		zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
2369 #endif
2370 
2371 	/*
2372 	 * This is a pure cache zone, no kegs.
2373 	 */
2374 	if (arg->import) {
2375 		KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0,
2376 		    ("zone_ctor: Import specified for non-cache zone."));
2377 		if (arg->flags & UMA_ZONE_VM)
2378 			arg->flags |= UMA_ZFLAG_CACHEONLY;
2379 		zone->uz_flags = arg->flags;
2380 		zone->uz_size = arg->size;
2381 		zone->uz_import = arg->import;
2382 		zone->uz_release = arg->release;
2383 		zone->uz_arg = arg->arg;
2384 		rw_wlock(&uma_rwlock);
2385 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2386 		rw_wunlock(&uma_rwlock);
2387 		goto out;
2388 	}
2389 
2390 	/*
2391 	 * Use the regular zone/keg/slab allocator.
2392 	 */
2393 	zone->uz_import = zone_import;
2394 	zone->uz_release = zone_release;
2395 	zone->uz_arg = zone;
2396 	keg = arg->keg;
2397 
2398 	if (arg->flags & UMA_ZONE_SECONDARY) {
2399 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2400 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2401 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2402 		zone->uz_init = arg->uminit;
2403 		zone->uz_fini = arg->fini;
2404 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2405 		rw_wlock(&uma_rwlock);
2406 		ZONE_LOCK(zone);
2407 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2408 			if (LIST_NEXT(z, uz_link) == NULL) {
2409 				LIST_INSERT_AFTER(z, zone, uz_link);
2410 				break;
2411 			}
2412 		}
2413 		ZONE_UNLOCK(zone);
2414 		rw_wunlock(&uma_rwlock);
2415 	} else if (keg == NULL) {
2416 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2417 		    arg->align, arg->flags)) == NULL)
2418 			return (ENOMEM);
2419 	} else {
2420 		struct uma_kctor_args karg;
2421 		int error;
2422 
2423 		/* We should only be here from uma_startup() */
2424 		karg.size = arg->size;
2425 		karg.uminit = arg->uminit;
2426 		karg.fini = arg->fini;
2427 		karg.align = arg->align;
2428 		karg.flags = (arg->flags & ~UMA_ZONE_SMR);
2429 		karg.zone = zone;
2430 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2431 		    flags);
2432 		if (error)
2433 			return (error);
2434 	}
2435 
2436 	/* Inherit properties from the keg. */
2437 	zone->uz_keg = keg;
2438 	zone->uz_size = keg->uk_size;
2439 	zone->uz_flags |= (keg->uk_flags &
2440 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2441 
2442 out:
2443 	if (__predict_true(booted >= BOOT_RUNNING)) {
2444 		zone_alloc_counters(zone, NULL);
2445 		zone_alloc_sysctl(zone, NULL);
2446 	} else {
2447 		zone->uz_allocs = EARLY_COUNTER;
2448 		zone->uz_frees = EARLY_COUNTER;
2449 		zone->uz_fails = EARLY_COUNTER;
2450 	}
2451 
2452 	/* Caller requests a private SMR context. */
2453 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
2454 		zone->uz_smr = smr_create(zone->uz_name);
2455 
2456 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2457 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2458 	    ("Invalid zone flag combination"));
2459 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2460 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2461 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2462 		zone->uz_bucket_size = BUCKET_MAX;
2463 	else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
2464 		zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
2465 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2466 		zone->uz_bucket_size = 0;
2467 	else
2468 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2469 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2470 	if (zone->uz_dtor != NULL || zone->uz_ctor != NULL)
2471 		zone->uz_flags |= UMA_ZFLAG_CTORDTOR;
2472 	zone_update_caches(zone);
2473 
2474 	return (0);
2475 }
2476 
2477 /*
2478  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2479  * table and removes the keg from the global list.
2480  *
2481  * Arguments/Returns follow uma_dtor specifications
2482  *	udata  unused
2483  */
2484 static void
2485 keg_dtor(void *arg, int size, void *udata)
2486 {
2487 	uma_keg_t keg;
2488 	uint32_t free, pages;
2489 	int i;
2490 
2491 	keg = (uma_keg_t)arg;
2492 	free = pages = 0;
2493 	for (i = 0; i < vm_ndomains; i++) {
2494 		free += keg->uk_domain[i].ud_free;
2495 		pages += keg->uk_domain[i].ud_pages;
2496 		KEG_LOCK_FINI(keg, i);
2497 	}
2498 	if (pages != 0)
2499 		printf("Freed UMA keg (%s) was not empty (%u items). "
2500 		    " Lost %u pages of memory.\n",
2501 		    keg->uk_name ? keg->uk_name : "",
2502 		    pages / keg->uk_ppera * keg->uk_ipers - free, pages);
2503 
2504 	hash_free(&keg->uk_hash);
2505 }
2506 
2507 /*
2508  * Zone header dtor.
2509  *
2510  * Arguments/Returns follow uma_dtor specifications
2511  *	udata  unused
2512  */
2513 static void
2514 zone_dtor(void *arg, int size, void *udata)
2515 {
2516 	uma_zone_t zone;
2517 	uma_keg_t keg;
2518 
2519 	zone = (uma_zone_t)arg;
2520 
2521 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2522 
2523 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2524 		cache_drain(zone);
2525 
2526 	rw_wlock(&uma_rwlock);
2527 	LIST_REMOVE(zone, uz_link);
2528 	rw_wunlock(&uma_rwlock);
2529 	/*
2530 	 * XXX there are some races here where
2531 	 * the zone can be drained but zone lock
2532 	 * released and then refilled before we
2533 	 * remove it... we dont care for now
2534 	 */
2535 	zone_reclaim(zone, M_WAITOK, true);
2536 	/*
2537 	 * We only destroy kegs from non secondary/non cache zones.
2538 	 */
2539 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2540 		keg = zone->uz_keg;
2541 		rw_wlock(&uma_rwlock);
2542 		LIST_REMOVE(keg, uk_link);
2543 		rw_wunlock(&uma_rwlock);
2544 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2545 	}
2546 	counter_u64_free(zone->uz_allocs);
2547 	counter_u64_free(zone->uz_frees);
2548 	counter_u64_free(zone->uz_fails);
2549 	free(zone->uz_ctlname, M_UMA);
2550 	ZONE_LOCK_FINI(zone);
2551 	ZONE_CROSS_LOCK_FINI(zone);
2552 }
2553 
2554 static void
2555 zone_foreach_unlocked(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2556 {
2557 	uma_keg_t keg;
2558 	uma_zone_t zone;
2559 
2560 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2561 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2562 			zfunc(zone, arg);
2563 	}
2564 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2565 		zfunc(zone, arg);
2566 }
2567 
2568 /*
2569  * Traverses every zone in the system and calls a callback
2570  *
2571  * Arguments:
2572  *	zfunc  A pointer to a function which accepts a zone
2573  *		as an argument.
2574  *
2575  * Returns:
2576  *	Nothing
2577  */
2578 static void
2579 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2580 {
2581 
2582 	rw_rlock(&uma_rwlock);
2583 	zone_foreach_unlocked(zfunc, arg);
2584 	rw_runlock(&uma_rwlock);
2585 }
2586 
2587 /*
2588  * Initialize the kernel memory allocator.  This is done after pages can be
2589  * allocated but before general KVA is available.
2590  */
2591 void
2592 uma_startup1(vm_offset_t virtual_avail)
2593 {
2594 	struct uma_zctor_args args;
2595 	size_t ksize, zsize, size;
2596 	uma_keg_t masterkeg;
2597 	uintptr_t m;
2598 	uint8_t pflag;
2599 
2600 	bootstart = bootmem = virtual_avail;
2601 
2602 	rw_init(&uma_rwlock, "UMA lock");
2603 	sx_init(&uma_reclaim_lock, "umareclaim");
2604 
2605 	ksize = sizeof(struct uma_keg) +
2606 	    (sizeof(struct uma_domain) * vm_ndomains);
2607 	ksize = roundup(ksize, UMA_SUPER_ALIGN);
2608 	zsize = sizeof(struct uma_zone) +
2609 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2610 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2611 	zsize = roundup(zsize, UMA_SUPER_ALIGN);
2612 
2613 	/* Allocate the zone of zones, zone of kegs, and zone of zones keg. */
2614 	size = (zsize * 2) + ksize;
2615 	m = (uintptr_t)startup_alloc(NULL, size, 0, &pflag, M_NOWAIT | M_ZERO);
2616 	zones = (uma_zone_t)m;
2617 	m += zsize;
2618 	kegs = (uma_zone_t)m;
2619 	m += zsize;
2620 	masterkeg = (uma_keg_t)m;
2621 
2622 	/* "manually" create the initial zone */
2623 	memset(&args, 0, sizeof(args));
2624 	args.name = "UMA Kegs";
2625 	args.size = ksize;
2626 	args.ctor = keg_ctor;
2627 	args.dtor = keg_dtor;
2628 	args.uminit = zero_init;
2629 	args.fini = NULL;
2630 	args.keg = masterkeg;
2631 	args.align = UMA_SUPER_ALIGN - 1;
2632 	args.flags = UMA_ZFLAG_INTERNAL;
2633 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2634 
2635 	args.name = "UMA Zones";
2636 	args.size = zsize;
2637 	args.ctor = zone_ctor;
2638 	args.dtor = zone_dtor;
2639 	args.uminit = zero_init;
2640 	args.fini = NULL;
2641 	args.keg = NULL;
2642 	args.align = UMA_SUPER_ALIGN - 1;
2643 	args.flags = UMA_ZFLAG_INTERNAL;
2644 	zone_ctor(zones, zsize, &args, M_WAITOK);
2645 
2646 	/* Now make zones for slab headers */
2647 	slabzones[0] = uma_zcreate("UMA Slabs 0", SLABZONE0_SIZE,
2648 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2649 	slabzones[1] = uma_zcreate("UMA Slabs 1", SLABZONE1_SIZE,
2650 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2651 
2652 	hashzone = uma_zcreate("UMA Hash",
2653 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2654 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2655 
2656 	bucket_init();
2657 	smr_init();
2658 }
2659 
2660 #ifndef UMA_MD_SMALL_ALLOC
2661 extern void vm_radix_reserve_kva(void);
2662 #endif
2663 
2664 /*
2665  * Advertise the availability of normal kva allocations and switch to
2666  * the default back-end allocator.  Marks the KVA we consumed on startup
2667  * as used in the map.
2668  */
2669 void
2670 uma_startup2(void)
2671 {
2672 
2673 	if (bootstart != bootmem) {
2674 		vm_map_lock(kernel_map);
2675 		(void)vm_map_insert(kernel_map, NULL, 0, bootstart, bootmem,
2676 		    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
2677 		vm_map_unlock(kernel_map);
2678 	}
2679 
2680 #ifndef UMA_MD_SMALL_ALLOC
2681 	/* Set up radix zone to use noobj_alloc. */
2682 	vm_radix_reserve_kva();
2683 #endif
2684 
2685 	booted = BOOT_KVA;
2686 	zone_foreach_unlocked(zone_kva_available, NULL);
2687 	bucket_enable();
2688 }
2689 
2690 /*
2691  * Finish our initialization steps.
2692  */
2693 static void
2694 uma_startup3(void)
2695 {
2696 
2697 #ifdef INVARIANTS
2698 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2699 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2700 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2701 #endif
2702 	zone_foreach_unlocked(zone_alloc_counters, NULL);
2703 	zone_foreach_unlocked(zone_alloc_sysctl, NULL);
2704 	callout_init(&uma_callout, 1);
2705 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2706 	booted = BOOT_RUNNING;
2707 
2708 	EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
2709 	    EVENTHANDLER_PRI_FIRST);
2710 }
2711 
2712 static void
2713 uma_shutdown(void)
2714 {
2715 
2716 	booted = BOOT_SHUTDOWN;
2717 }
2718 
2719 static uma_keg_t
2720 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2721 		int align, uint32_t flags)
2722 {
2723 	struct uma_kctor_args args;
2724 
2725 	args.size = size;
2726 	args.uminit = uminit;
2727 	args.fini = fini;
2728 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2729 	args.flags = flags;
2730 	args.zone = zone;
2731 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2732 }
2733 
2734 /* Public functions */
2735 /* See uma.h */
2736 void
2737 uma_set_align(int align)
2738 {
2739 
2740 	if (align != UMA_ALIGN_CACHE)
2741 		uma_align_cache = align;
2742 }
2743 
2744 /* See uma.h */
2745 uma_zone_t
2746 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2747 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2748 
2749 {
2750 	struct uma_zctor_args args;
2751 	uma_zone_t res;
2752 
2753 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2754 	    align, name));
2755 
2756 	/* This stuff is essential for the zone ctor */
2757 	memset(&args, 0, sizeof(args));
2758 	args.name = name;
2759 	args.size = size;
2760 	args.ctor = ctor;
2761 	args.dtor = dtor;
2762 	args.uminit = uminit;
2763 	args.fini = fini;
2764 #ifdef  INVARIANTS
2765 	/*
2766 	 * Inject procedures which check for memory use after free if we are
2767 	 * allowed to scramble the memory while it is not allocated.  This
2768 	 * requires that: UMA is actually able to access the memory, no init
2769 	 * or fini procedures, no dependency on the initial value of the
2770 	 * memory, and no (legitimate) use of the memory after free.  Note,
2771 	 * the ctor and dtor do not need to be empty.
2772 	 */
2773 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
2774 	    UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
2775 		args.uminit = trash_init;
2776 		args.fini = trash_fini;
2777 	}
2778 #endif
2779 	args.align = align;
2780 	args.flags = flags;
2781 	args.keg = NULL;
2782 
2783 	sx_slock(&uma_reclaim_lock);
2784 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2785 	sx_sunlock(&uma_reclaim_lock);
2786 
2787 	return (res);
2788 }
2789 
2790 /* See uma.h */
2791 uma_zone_t
2792 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2793 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2794 {
2795 	struct uma_zctor_args args;
2796 	uma_keg_t keg;
2797 	uma_zone_t res;
2798 
2799 	keg = master->uz_keg;
2800 	memset(&args, 0, sizeof(args));
2801 	args.name = name;
2802 	args.size = keg->uk_size;
2803 	args.ctor = ctor;
2804 	args.dtor = dtor;
2805 	args.uminit = zinit;
2806 	args.fini = zfini;
2807 	args.align = keg->uk_align;
2808 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2809 	args.keg = keg;
2810 
2811 	sx_slock(&uma_reclaim_lock);
2812 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2813 	sx_sunlock(&uma_reclaim_lock);
2814 
2815 	return (res);
2816 }
2817 
2818 /* See uma.h */
2819 uma_zone_t
2820 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2821 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2822 		    uma_release zrelease, void *arg, int flags)
2823 {
2824 	struct uma_zctor_args args;
2825 
2826 	memset(&args, 0, sizeof(args));
2827 	args.name = name;
2828 	args.size = size;
2829 	args.ctor = ctor;
2830 	args.dtor = dtor;
2831 	args.uminit = zinit;
2832 	args.fini = zfini;
2833 	args.import = zimport;
2834 	args.release = zrelease;
2835 	args.arg = arg;
2836 	args.align = 0;
2837 	args.flags = flags | UMA_ZFLAG_CACHE;
2838 
2839 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2840 }
2841 
2842 /* See uma.h */
2843 void
2844 uma_zdestroy(uma_zone_t zone)
2845 {
2846 
2847 	/*
2848 	 * Large slabs are expensive to reclaim, so don't bother doing
2849 	 * unnecessary work if we're shutting down.
2850 	 */
2851 	if (booted == BOOT_SHUTDOWN &&
2852 	    zone->uz_fini == NULL && zone->uz_release == zone_release)
2853 		return;
2854 	sx_slock(&uma_reclaim_lock);
2855 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2856 	sx_sunlock(&uma_reclaim_lock);
2857 }
2858 
2859 void
2860 uma_zwait(uma_zone_t zone)
2861 {
2862 	void *item;
2863 
2864 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2865 	uma_zfree(zone, item);
2866 }
2867 
2868 void *
2869 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2870 {
2871 	void *item;
2872 #ifdef SMP
2873 	int i;
2874 
2875 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2876 #endif
2877 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2878 	if (item != NULL && (flags & M_ZERO)) {
2879 #ifdef SMP
2880 		for (i = 0; i <= mp_maxid; i++)
2881 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2882 #else
2883 		bzero(item, zone->uz_size);
2884 #endif
2885 	}
2886 	return (item);
2887 }
2888 
2889 /*
2890  * A stub while both regular and pcpu cases are identical.
2891  */
2892 void
2893 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2894 {
2895 
2896 #ifdef SMP
2897 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2898 #endif
2899 	uma_zfree_arg(zone, item, udata);
2900 }
2901 
2902 static inline void *
2903 item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags,
2904     void *item)
2905 {
2906 #ifdef INVARIANTS
2907 	bool skipdbg;
2908 
2909 	skipdbg = uma_dbg_zskip(zone, item);
2910 	if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2911 	    zone->uz_ctor != trash_ctor)
2912 		trash_ctor(item, size, udata, flags);
2913 #endif
2914 	/* Check flags before loading ctor pointer. */
2915 	if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) &&
2916 	    __predict_false(zone->uz_ctor != NULL) &&
2917 	    zone->uz_ctor(item, size, udata, flags) != 0) {
2918 		counter_u64_add(zone->uz_fails, 1);
2919 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2920 		return (NULL);
2921 	}
2922 #ifdef INVARIANTS
2923 	if (!skipdbg)
2924 		uma_dbg_alloc(zone, NULL, item);
2925 #endif
2926 	if (flags & M_ZERO)
2927 		bzero(item, size);
2928 
2929 	return (item);
2930 }
2931 
2932 static inline void
2933 item_dtor(uma_zone_t zone, void *item, int size, void *udata,
2934     enum zfreeskip skip)
2935 {
2936 #ifdef INVARIANTS
2937 	bool skipdbg;
2938 
2939 	skipdbg = uma_dbg_zskip(zone, item);
2940 	if (skip == SKIP_NONE && !skipdbg) {
2941 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
2942 			uma_dbg_free(zone, udata, item);
2943 		else
2944 			uma_dbg_free(zone, NULL, item);
2945 	}
2946 #endif
2947 	if (__predict_true(skip < SKIP_DTOR)) {
2948 		if (zone->uz_dtor != NULL)
2949 			zone->uz_dtor(item, size, udata);
2950 #ifdef INVARIANTS
2951 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2952 		    zone->uz_dtor != trash_dtor)
2953 			trash_dtor(item, size, udata);
2954 #endif
2955 	}
2956 }
2957 
2958 #if defined(INVARIANTS) || defined(DEBUG_MEMGUARD) || defined(WITNESS)
2959 #define	UMA_ZALLOC_DEBUG
2960 static int
2961 uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags)
2962 {
2963 	int error;
2964 
2965 	error = 0;
2966 #ifdef WITNESS
2967 	if (flags & M_WAITOK) {
2968 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2969 		    "uma_zalloc_debug: zone \"%s\"", zone->uz_name);
2970 	}
2971 #endif
2972 
2973 #ifdef INVARIANTS
2974 	KASSERT((flags & M_EXEC) == 0,
2975 	    ("uma_zalloc_debug: called with M_EXEC"));
2976 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2977 	    ("uma_zalloc_debug: called within spinlock or critical section"));
2978 	KASSERT((zone->uz_flags & UMA_ZONE_PCPU) == 0 || (flags & M_ZERO) == 0,
2979 	    ("uma_zalloc_debug: allocating from a pcpu zone with M_ZERO"));
2980 #endif
2981 
2982 #ifdef DEBUG_MEMGUARD
2983 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && memguard_cmp_zone(zone)) {
2984 		void *item;
2985 		item = memguard_alloc(zone->uz_size, flags);
2986 		if (item != NULL) {
2987 			error = EJUSTRETURN;
2988 			if (zone->uz_init != NULL &&
2989 			    zone->uz_init(item, zone->uz_size, flags) != 0) {
2990 				*itemp = NULL;
2991 				return (error);
2992 			}
2993 			if (zone->uz_ctor != NULL &&
2994 			    zone->uz_ctor(item, zone->uz_size, udata,
2995 			    flags) != 0) {
2996 				counter_u64_add(zone->uz_fails, 1);
2997 			    	zone->uz_fini(item, zone->uz_size);
2998 				*itemp = NULL;
2999 				return (error);
3000 			}
3001 			*itemp = item;
3002 			return (error);
3003 		}
3004 		/* This is unfortunate but should not be fatal. */
3005 	}
3006 #endif
3007 	return (error);
3008 }
3009 
3010 static int
3011 uma_zfree_debug(uma_zone_t zone, void *item, void *udata)
3012 {
3013 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3014 	    ("uma_zfree_debug: called with spinlock or critical section held"));
3015 
3016 #ifdef DEBUG_MEMGUARD
3017 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 && is_memguard_addr(item)) {
3018 		if (zone->uz_dtor != NULL)
3019 			zone->uz_dtor(item, zone->uz_size, udata);
3020 		if (zone->uz_fini != NULL)
3021 			zone->uz_fini(item, zone->uz_size);
3022 		memguard_free(item);
3023 		return (EJUSTRETURN);
3024 	}
3025 #endif
3026 	return (0);
3027 }
3028 #endif
3029 
3030 static __noinline void *
3031 uma_zalloc_single(uma_zone_t zone, void *udata, int flags)
3032 {
3033 	int domain;
3034 
3035 	/*
3036 	 * We can not get a bucket so try to return a single item.
3037 	 */
3038 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
3039 		domain = PCPU_GET(domain);
3040 	else
3041 		domain = UMA_ANYDOMAIN;
3042 	return (zone_alloc_item(zone, udata, domain, flags));
3043 }
3044 
3045 /* See uma.h */
3046 void *
3047 uma_zalloc_smr(uma_zone_t zone, int flags)
3048 {
3049 	uma_cache_bucket_t bucket;
3050 	uma_cache_t cache;
3051 	void *item;
3052 	int size, uz_flags;
3053 
3054 #ifdef UMA_ZALLOC_DEBUG
3055 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
3056 	    ("uma_zalloc_arg: called with non-SMR zone.\n"));
3057 	if (uma_zalloc_debug(zone, &item, NULL, flags) == EJUSTRETURN)
3058 		return (item);
3059 #endif
3060 
3061 	critical_enter();
3062 	do {
3063 		cache = &zone->uz_cpu[curcpu];
3064 		bucket = &cache->uc_allocbucket;
3065 		size = cache_uz_size(cache);
3066 		uz_flags = cache_uz_flags(cache);
3067 		if (__predict_true(bucket->ucb_cnt != 0)) {
3068 			item = cache_bucket_pop(cache, bucket);
3069 			critical_exit();
3070 			return (item_ctor(zone, uz_flags, size, NULL, flags,
3071 			    item));
3072 		}
3073 	} while (cache_alloc(zone, cache, NULL, flags));
3074 	critical_exit();
3075 
3076 	return (uma_zalloc_single(zone, NULL, flags));
3077 }
3078 
3079 /* See uma.h */
3080 void *
3081 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
3082 {
3083 	uma_cache_bucket_t bucket;
3084 	uma_cache_t cache;
3085 	void *item;
3086 	int size, uz_flags;
3087 
3088 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3089 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3090 
3091 	/* This is the fast path allocation */
3092 	CTR3(KTR_UMA, "uma_zalloc_arg zone %s(%p) flags %d", zone->uz_name,
3093 	    zone, flags);
3094 
3095 #ifdef UMA_ZALLOC_DEBUG
3096 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3097 	    ("uma_zalloc_arg: called with SMR zone.\n"));
3098 	if (uma_zalloc_debug(zone, &item, udata, flags) == EJUSTRETURN)
3099 		return (item);
3100 #endif
3101 
3102 	/*
3103 	 * If possible, allocate from the per-CPU cache.  There are two
3104 	 * requirements for safe access to the per-CPU cache: (1) the thread
3105 	 * accessing the cache must not be preempted or yield during access,
3106 	 * and (2) the thread must not migrate CPUs without switching which
3107 	 * cache it accesses.  We rely on a critical section to prevent
3108 	 * preemption and migration.  We release the critical section in
3109 	 * order to acquire the zone mutex if we are unable to allocate from
3110 	 * the current cache; when we re-acquire the critical section, we
3111 	 * must detect and handle migration if it has occurred.
3112 	 */
3113 	critical_enter();
3114 	do {
3115 		cache = &zone->uz_cpu[curcpu];
3116 		bucket = &cache->uc_allocbucket;
3117 		size = cache_uz_size(cache);
3118 		uz_flags = cache_uz_flags(cache);
3119 		if (__predict_true(bucket->ucb_cnt != 0)) {
3120 			item = cache_bucket_pop(cache, bucket);
3121 			critical_exit();
3122 			return (item_ctor(zone, uz_flags, size, udata, flags,
3123 			    item));
3124 		}
3125 	} while (cache_alloc(zone, cache, udata, flags));
3126 	critical_exit();
3127 
3128 	return (uma_zalloc_single(zone, udata, flags));
3129 }
3130 
3131 /*
3132  * Replenish an alloc bucket and possibly restore an old one.  Called in
3133  * a critical section.  Returns in a critical section.
3134  *
3135  * A false return value indicates an allocation failure.
3136  * A true return value indicates success and the caller should retry.
3137  */
3138 static __noinline bool
3139 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3140 {
3141 	uma_zone_domain_t zdom;
3142 	uma_bucket_t bucket;
3143 	int domain;
3144 	bool lockfail;
3145 
3146 	CRITICAL_ASSERT(curthread);
3147 
3148 	/*
3149 	 * If we have run out of items in our alloc bucket see
3150 	 * if we can switch with the free bucket.
3151 	 *
3152 	 * SMR Zones can't re-use the free bucket until the sequence has
3153 	 * expired.
3154 	 */
3155 	if ((zone->uz_flags & UMA_ZONE_SMR) == 0 &&
3156 	    cache->uc_freebucket.ucb_cnt != 0) {
3157 		cache_bucket_swap(&cache->uc_freebucket,
3158 		    &cache->uc_allocbucket);
3159 		return (true);
3160 	}
3161 
3162 	/*
3163 	 * Discard any empty allocation bucket while we hold no locks.
3164 	 */
3165 	bucket = cache_bucket_unload_alloc(cache);
3166 	critical_exit();
3167 	if (bucket != NULL)
3168 		bucket_free(zone, bucket, udata);
3169 
3170 	/* Short-circuit for zones without buckets and low memory. */
3171 	if (zone->uz_bucket_size == 0 || bucketdisable) {
3172 		critical_enter();
3173 		return (false);
3174 	}
3175 
3176 	/*
3177 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
3178 	 * we must go back to the zone.  This requires the zone lock, so we
3179 	 * must drop the critical section, then re-acquire it when we go back
3180 	 * to the cache.  Since the critical section is released, we may be
3181 	 * preempted or migrate.  As such, make sure not to maintain any
3182 	 * thread-local state specific to the cache from prior to releasing
3183 	 * the critical section.
3184 	 */
3185 	lockfail = 0;
3186 	if (ZONE_TRYLOCK(zone) == 0) {
3187 		/* Record contention to size the buckets. */
3188 		ZONE_LOCK(zone);
3189 		lockfail = 1;
3190 	}
3191 
3192 	/* See if we lost the race to fill the cache. */
3193 	critical_enter();
3194 	cache = &zone->uz_cpu[curcpu];
3195 	if (cache->uc_allocbucket.ucb_bucket != NULL) {
3196 		ZONE_UNLOCK(zone);
3197 		return (true);
3198 	}
3199 
3200 	/*
3201 	 * Check the zone's cache of buckets.
3202 	 */
3203 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) {
3204 		domain = PCPU_GET(domain);
3205 		zdom = &zone->uz_domain[domain];
3206 	} else {
3207 		domain = UMA_ANYDOMAIN;
3208 		zdom = &zone->uz_domain[0];
3209 	}
3210 
3211 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
3212 		KASSERT(bucket->ub_cnt != 0,
3213 		    ("uma_zalloc_arg: Returning an empty bucket."));
3214 		cache_bucket_load_alloc(cache, bucket);
3215 		return (true);
3216 	}
3217 	/* We are no longer associated with this CPU. */
3218 	critical_exit();
3219 
3220 	/*
3221 	 * We bump the uz count when the cache size is insufficient to
3222 	 * handle the working set.
3223 	 */
3224 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
3225 		zone->uz_bucket_size++;
3226 	ZONE_UNLOCK(zone);
3227 
3228 	/*
3229 	 * Fill a bucket and attempt to use it as the alloc bucket.
3230 	 */
3231 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
3232 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3233 	    zone->uz_name, zone, bucket);
3234 	if (bucket == NULL) {
3235 		critical_enter();
3236 		return (false);
3237 	}
3238 
3239 	/*
3240 	 * See if we lost the race or were migrated.  Cache the
3241 	 * initialized bucket to make this less likely or claim
3242 	 * the memory directly.
3243 	 */
3244 	ZONE_LOCK(zone);
3245 	critical_enter();
3246 	cache = &zone->uz_cpu[curcpu];
3247 	if (cache->uc_allocbucket.ucb_bucket == NULL &&
3248 	    ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0 ||
3249 	    domain == PCPU_GET(domain))) {
3250 		cache_bucket_load_alloc(cache, bucket);
3251 		zdom->uzd_imax += bucket->ub_cnt;
3252 	} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3253 		critical_exit();
3254 		ZONE_UNLOCK(zone);
3255 		bucket_drain(zone, bucket);
3256 		bucket_free(zone, bucket, udata);
3257 		critical_enter();
3258 		return (true);
3259 	} else
3260 		zone_put_bucket(zone, zdom, bucket, false);
3261 	ZONE_UNLOCK(zone);
3262 	return (true);
3263 }
3264 
3265 void *
3266 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
3267 {
3268 
3269 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3270 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3271 
3272 	/* This is the fast path allocation */
3273 	CTR4(KTR_UMA, "uma_zalloc_domain zone %s(%p) domain %d flags %d",
3274 	    zone->uz_name, zone, domain, flags);
3275 
3276 	if (flags & M_WAITOK) {
3277 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3278 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
3279 	}
3280 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3281 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
3282 
3283 	return (zone_alloc_item(zone, udata, domain, flags));
3284 }
3285 
3286 /*
3287  * Find a slab with some space.  Prefer slabs that are partially used over those
3288  * that are totally full.  This helps to reduce fragmentation.
3289  *
3290  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
3291  * only 'domain'.
3292  */
3293 static uma_slab_t
3294 keg_first_slab(uma_keg_t keg, int domain, bool rr)
3295 {
3296 	uma_domain_t dom;
3297 	uma_slab_t slab;
3298 	int start;
3299 
3300 	KASSERT(domain >= 0 && domain < vm_ndomains,
3301 	    ("keg_first_slab: domain %d out of range", domain));
3302 	KEG_LOCK_ASSERT(keg, domain);
3303 
3304 	slab = NULL;
3305 	start = domain;
3306 	do {
3307 		dom = &keg->uk_domain[domain];
3308 		if (!LIST_EMPTY(&dom->ud_part_slab))
3309 			return (LIST_FIRST(&dom->ud_part_slab));
3310 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
3311 			slab = LIST_FIRST(&dom->ud_free_slab);
3312 			LIST_REMOVE(slab, us_link);
3313 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3314 			return (slab);
3315 		}
3316 		if (rr)
3317 			domain = (domain + 1) % vm_ndomains;
3318 	} while (domain != start);
3319 
3320 	return (NULL);
3321 }
3322 
3323 /*
3324  * Fetch an existing slab from a free or partial list.  Returns with the
3325  * keg domain lock held if a slab was found or unlocked if not.
3326  */
3327 static uma_slab_t
3328 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3329 {
3330 	uma_slab_t slab;
3331 	uint32_t reserve;
3332 
3333 	/* HASH has a single free list. */
3334 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
3335 		domain = 0;
3336 
3337 	KEG_LOCK(keg, domain);
3338 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3339 	if (keg->uk_domain[domain].ud_free <= reserve ||
3340 	    (slab = keg_first_slab(keg, domain, rr)) == NULL) {
3341 		KEG_UNLOCK(keg, domain);
3342 		return (NULL);
3343 	}
3344 	return (slab);
3345 }
3346 
3347 static uma_slab_t
3348 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3349 {
3350 	struct vm_domainset_iter di;
3351 	uma_slab_t slab;
3352 	int aflags, domain;
3353 	bool rr;
3354 
3355 restart:
3356 	/*
3357 	 * Use the keg's policy if upper layers haven't already specified a
3358 	 * domain (as happens with first-touch zones).
3359 	 *
3360 	 * To avoid races we run the iterator with the keg lock held, but that
3361 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3362 	 * clear M_WAITOK and handle low memory conditions locally.
3363 	 */
3364 	rr = rdomain == UMA_ANYDOMAIN;
3365 	if (rr) {
3366 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3367 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3368 		    &aflags);
3369 	} else {
3370 		aflags = flags;
3371 		domain = rdomain;
3372 	}
3373 
3374 	for (;;) {
3375 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3376 		if (slab != NULL)
3377 			return (slab);
3378 
3379 		/*
3380 		 * M_NOVM means don't ask at all!
3381 		 */
3382 		if (flags & M_NOVM)
3383 			break;
3384 
3385 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3386 		if (slab != NULL)
3387 			return (slab);
3388 		if (!rr && (flags & M_WAITOK) == 0)
3389 			break;
3390 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3391 			if ((flags & M_WAITOK) != 0) {
3392 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3393 				goto restart;
3394 			}
3395 			break;
3396 		}
3397 	}
3398 
3399 	/*
3400 	 * We might not have been able to get a slab but another cpu
3401 	 * could have while we were unlocked.  Check again before we
3402 	 * fail.
3403 	 */
3404 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL)
3405 		return (slab);
3406 
3407 	return (NULL);
3408 }
3409 
3410 static void *
3411 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3412 {
3413 	uma_domain_t dom;
3414 	void *item;
3415 	int freei;
3416 
3417 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3418 
3419 	dom = &keg->uk_domain[slab->us_domain];
3420 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3421 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3422 	item = slab_item(slab, keg, freei);
3423 	slab->us_freecount--;
3424 	dom->ud_free--;
3425 
3426 	/* Move this slab to the full list */
3427 	if (slab->us_freecount == 0) {
3428 		LIST_REMOVE(slab, us_link);
3429 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3430 	}
3431 
3432 	return (item);
3433 }
3434 
3435 static int
3436 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3437 {
3438 	uma_domain_t dom;
3439 	uma_zone_t zone;
3440 	uma_slab_t slab;
3441 	uma_keg_t keg;
3442 #ifdef NUMA
3443 	int stripe;
3444 #endif
3445 	int i;
3446 
3447 	zone = arg;
3448 	slab = NULL;
3449 	keg = zone->uz_keg;
3450 	/* Try to keep the buckets totally full */
3451 	for (i = 0; i < max; ) {
3452 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3453 			break;
3454 #ifdef NUMA
3455 		stripe = howmany(max, vm_ndomains);
3456 #endif
3457 		dom = &keg->uk_domain[slab->us_domain];
3458 		while (slab->us_freecount && i < max) {
3459 			bucket[i++] = slab_alloc_item(keg, slab);
3460 			if (dom->ud_free <= keg->uk_reserve)
3461 				break;
3462 #ifdef NUMA
3463 			/*
3464 			 * If the zone is striped we pick a new slab for every
3465 			 * N allocations.  Eliminating this conditional will
3466 			 * instead pick a new domain for each bucket rather
3467 			 * than stripe within each bucket.  The current option
3468 			 * produces more fragmentation and requires more cpu
3469 			 * time but yields better distribution.
3470 			 */
3471 			if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 &&
3472 			    vm_ndomains > 1 && --stripe == 0)
3473 				break;
3474 #endif
3475 		}
3476 		KEG_UNLOCK(keg, slab->us_domain);
3477 		/* Don't block if we allocated any successfully. */
3478 		flags &= ~M_WAITOK;
3479 		flags |= M_NOWAIT;
3480 	}
3481 
3482 	return i;
3483 }
3484 
3485 static int
3486 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags)
3487 {
3488 	uint64_t old, new, total, max;
3489 
3490 	/*
3491 	 * The hard case.  We're going to sleep because there were existing
3492 	 * sleepers or because we ran out of items.  This routine enforces
3493 	 * fairness by keeping fifo order.
3494 	 *
3495 	 * First release our ill gotten gains and make some noise.
3496 	 */
3497 	for (;;) {
3498 		zone_free_limit(zone, count);
3499 		zone_log_warning(zone);
3500 		zone_maxaction(zone);
3501 		if (flags & M_NOWAIT)
3502 			return (0);
3503 
3504 		/*
3505 		 * We need to allocate an item or set ourself as a sleeper
3506 		 * while the sleepq lock is held to avoid wakeup races.  This
3507 		 * is essentially a home rolled semaphore.
3508 		 */
3509 		sleepq_lock(&zone->uz_max_items);
3510 		old = zone->uz_items;
3511 		do {
3512 			MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX);
3513 			/* Cache the max since we will evaluate twice. */
3514 			max = zone->uz_max_items;
3515 			if (UZ_ITEMS_SLEEPERS(old) != 0 ||
3516 			    UZ_ITEMS_COUNT(old) >= max)
3517 				new = old + UZ_ITEMS_SLEEPER;
3518 			else
3519 				new = old + MIN(count, max - old);
3520 		} while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0);
3521 
3522 		/* We may have successfully allocated under the sleepq lock. */
3523 		if (UZ_ITEMS_SLEEPERS(new) == 0) {
3524 			sleepq_release(&zone->uz_max_items);
3525 			return (new - old);
3526 		}
3527 
3528 		/*
3529 		 * This is in a different cacheline from uz_items so that we
3530 		 * don't constantly invalidate the fastpath cacheline when we
3531 		 * adjust item counts.  This could be limited to toggling on
3532 		 * transitions.
3533 		 */
3534 		atomic_add_32(&zone->uz_sleepers, 1);
3535 		atomic_add_64(&zone->uz_sleeps, 1);
3536 
3537 		/*
3538 		 * We have added ourselves as a sleeper.  The sleepq lock
3539 		 * protects us from wakeup races.  Sleep now and then retry.
3540 		 */
3541 		sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0);
3542 		sleepq_wait(&zone->uz_max_items, PVM);
3543 
3544 		/*
3545 		 * After wakeup, remove ourselves as a sleeper and try
3546 		 * again.  We no longer have the sleepq lock for protection.
3547 		 *
3548 		 * Subract ourselves as a sleeper while attempting to add
3549 		 * our count.
3550 		 */
3551 		atomic_subtract_32(&zone->uz_sleepers, 1);
3552 		old = atomic_fetchadd_64(&zone->uz_items,
3553 		    -(UZ_ITEMS_SLEEPER - count));
3554 		/* We're no longer a sleeper. */
3555 		old -= UZ_ITEMS_SLEEPER;
3556 
3557 		/*
3558 		 * If we're still at the limit, restart.  Notably do not
3559 		 * block on other sleepers.  Cache the max value to protect
3560 		 * against changes via sysctl.
3561 		 */
3562 		total = UZ_ITEMS_COUNT(old);
3563 		max = zone->uz_max_items;
3564 		if (total >= max)
3565 			continue;
3566 		/* Truncate if necessary, otherwise wake other sleepers. */
3567 		if (total + count > max) {
3568 			zone_free_limit(zone, total + count - max);
3569 			count = max - total;
3570 		} else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0)
3571 			wakeup_one(&zone->uz_max_items);
3572 
3573 		return (count);
3574 	}
3575 }
3576 
3577 /*
3578  * Allocate 'count' items from our max_items limit.  Returns the number
3579  * available.  If M_NOWAIT is not specified it will sleep until at least
3580  * one item can be allocated.
3581  */
3582 static int
3583 zone_alloc_limit(uma_zone_t zone, int count, int flags)
3584 {
3585 	uint64_t old;
3586 	uint64_t max;
3587 
3588 	max = zone->uz_max_items;
3589 	MPASS(max > 0);
3590 
3591 	/*
3592 	 * We expect normal allocations to succeed with a simple
3593 	 * fetchadd.
3594 	 */
3595 	old = atomic_fetchadd_64(&zone->uz_items, count);
3596 	if (__predict_true(old + count <= max))
3597 		return (count);
3598 
3599 	/*
3600 	 * If we had some items and no sleepers just return the
3601 	 * truncated value.  We have to release the excess space
3602 	 * though because that may wake sleepers who weren't woken
3603 	 * because we were temporarily over the limit.
3604 	 */
3605 	if (old < max) {
3606 		zone_free_limit(zone, (old + count) - max);
3607 		return (max - old);
3608 	}
3609 	return (zone_alloc_limit_hard(zone, count, flags));
3610 }
3611 
3612 /*
3613  * Free a number of items back to the limit.
3614  */
3615 static void
3616 zone_free_limit(uma_zone_t zone, int count)
3617 {
3618 	uint64_t old;
3619 
3620 	MPASS(count > 0);
3621 
3622 	/*
3623 	 * In the common case we either have no sleepers or
3624 	 * are still over the limit and can just return.
3625 	 */
3626 	old = atomic_fetchadd_64(&zone->uz_items, -count);
3627 	if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 ||
3628 	   UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items))
3629 		return;
3630 
3631 	/*
3632 	 * Moderate the rate of wakeups.  Sleepers will continue
3633 	 * to generate wakeups if necessary.
3634 	 */
3635 	wakeup_one(&zone->uz_max_items);
3636 }
3637 
3638 static uma_bucket_t
3639 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
3640 {
3641 	uma_bucket_t bucket;
3642 	int maxbucket, cnt;
3643 
3644 	CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name,
3645 	    zone, domain);
3646 
3647 	/* Avoid allocs targeting empty domains. */
3648 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3649 		domain = UMA_ANYDOMAIN;
3650 
3651 	if (zone->uz_max_items > 0)
3652 		maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size,
3653 		    M_NOWAIT);
3654 	else
3655 		maxbucket = zone->uz_bucket_size;
3656 	if (maxbucket == 0)
3657 		return (false);
3658 
3659 	/* Don't wait for buckets, preserve caller's NOVM setting. */
3660 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
3661 	if (bucket == NULL) {
3662 		cnt = 0;
3663 		goto out;
3664 	}
3665 
3666 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
3667 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
3668 
3669 	/*
3670 	 * Initialize the memory if necessary.
3671 	 */
3672 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
3673 		int i;
3674 
3675 		for (i = 0; i < bucket->ub_cnt; i++)
3676 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
3677 			    flags) != 0)
3678 				break;
3679 		/*
3680 		 * If we couldn't initialize the whole bucket, put the
3681 		 * rest back onto the freelist.
3682 		 */
3683 		if (i != bucket->ub_cnt) {
3684 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
3685 			    bucket->ub_cnt - i);
3686 #ifdef INVARIANTS
3687 			bzero(&bucket->ub_bucket[i],
3688 			    sizeof(void *) * (bucket->ub_cnt - i));
3689 #endif
3690 			bucket->ub_cnt = i;
3691 		}
3692 	}
3693 
3694 	cnt = bucket->ub_cnt;
3695 	if (bucket->ub_cnt == 0) {
3696 		bucket_free(zone, bucket, udata);
3697 		counter_u64_add(zone->uz_fails, 1);
3698 		bucket = NULL;
3699 	}
3700 out:
3701 	if (zone->uz_max_items > 0 && cnt < maxbucket)
3702 		zone_free_limit(zone, maxbucket - cnt);
3703 
3704 	return (bucket);
3705 }
3706 
3707 /*
3708  * Allocates a single item from a zone.
3709  *
3710  * Arguments
3711  *	zone   The zone to alloc for.
3712  *	udata  The data to be passed to the constructor.
3713  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3714  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
3715  *
3716  * Returns
3717  *	NULL if there is no memory and M_NOWAIT is set
3718  *	An item if successful
3719  */
3720 
3721 static void *
3722 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
3723 {
3724 	void *item;
3725 
3726 	if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0)
3727 		return (NULL);
3728 
3729 	/* Avoid allocs targeting empty domains. */
3730 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3731 		domain = UMA_ANYDOMAIN;
3732 
3733 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3734 		goto fail_cnt;
3735 
3736 	/*
3737 	 * We have to call both the zone's init (not the keg's init)
3738 	 * and the zone's ctor.  This is because the item is going from
3739 	 * a keg slab directly to the user, and the user is expecting it
3740 	 * to be both zone-init'd as well as zone-ctor'd.
3741 	 */
3742 	if (zone->uz_init != NULL) {
3743 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3744 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3745 			goto fail_cnt;
3746 		}
3747 	}
3748 	item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, flags,
3749 	    item);
3750 	if (item == NULL)
3751 		goto fail;
3752 
3753 	counter_u64_add(zone->uz_allocs, 1);
3754 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3755 	    zone->uz_name, zone);
3756 
3757 	return (item);
3758 
3759 fail_cnt:
3760 	counter_u64_add(zone->uz_fails, 1);
3761 fail:
3762 	if (zone->uz_max_items > 0)
3763 		zone_free_limit(zone, 1);
3764 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3765 	    zone->uz_name, zone);
3766 
3767 	return (NULL);
3768 }
3769 
3770 /* See uma.h */
3771 void
3772 uma_zfree_smr(uma_zone_t zone, void *item)
3773 {
3774 	uma_cache_t cache;
3775 	uma_cache_bucket_t bucket;
3776 	int domain, itemdomain, uz_flags;
3777 
3778 #ifdef UMA_ZALLOC_DEBUG
3779 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) != 0,
3780 	    ("uma_zfree_smr: called with non-SMR zone.\n"));
3781 	KASSERT(item != NULL, ("uma_zfree_smr: Called with NULL pointer."));
3782 	if (uma_zfree_debug(zone, item, NULL) == EJUSTRETURN)
3783 		return;
3784 #endif
3785 	cache = &zone->uz_cpu[curcpu];
3786 	uz_flags = cache_uz_flags(cache);
3787 	domain = itemdomain = 0;
3788 #ifdef NUMA
3789 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
3790 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3791 #endif
3792 	critical_enter();
3793 	do {
3794 		cache = &zone->uz_cpu[curcpu];
3795 		/* SMR Zones must free to the free bucket. */
3796 		bucket = &cache->uc_freebucket;
3797 #ifdef NUMA
3798 		domain = PCPU_GET(domain);
3799 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
3800 		    domain != itemdomain) {
3801 			bucket = &cache->uc_crossbucket;
3802 		}
3803 #endif
3804 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
3805 			cache_bucket_push(cache, bucket, item);
3806 			critical_exit();
3807 			return;
3808 		}
3809 	} while (cache_free(zone, cache, NULL, item, itemdomain));
3810 	critical_exit();
3811 
3812 	/*
3813 	 * If nothing else caught this, we'll just do an internal free.
3814 	 */
3815 	zone_free_item(zone, item, NULL, SKIP_NONE);
3816 }
3817 
3818 /* See uma.h */
3819 void
3820 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3821 {
3822 	uma_cache_t cache;
3823 	uma_cache_bucket_t bucket;
3824 	int domain, itemdomain, uz_flags;
3825 
3826 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3827 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3828 
3829 	CTR2(KTR_UMA, "uma_zfree_arg zone %s(%p)", zone->uz_name, zone);
3830 
3831 #ifdef UMA_ZALLOC_DEBUG
3832 	KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
3833 	    ("uma_zfree_arg: called with SMR zone.\n"));
3834 	if (uma_zfree_debug(zone, item, udata) == EJUSTRETURN)
3835 		return;
3836 #endif
3837         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3838         if (item == NULL)
3839                 return;
3840 
3841 	/*
3842 	 * We are accessing the per-cpu cache without a critical section to
3843 	 * fetch size and flags.  This is acceptable, if we are preempted we
3844 	 * will simply read another cpu's line.
3845 	 */
3846 	cache = &zone->uz_cpu[curcpu];
3847 	uz_flags = cache_uz_flags(cache);
3848 	if (UMA_ALWAYS_CTORDTOR ||
3849 	    __predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0))
3850 		item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
3851 
3852 	/*
3853 	 * The race here is acceptable.  If we miss it we'll just have to wait
3854 	 * a little longer for the limits to be reset.
3855 	 */
3856 	if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) {
3857 		if (zone->uz_sleepers > 0)
3858 			goto zfree_item;
3859 	}
3860 
3861 	/*
3862 	 * If possible, free to the per-CPU cache.  There are two
3863 	 * requirements for safe access to the per-CPU cache: (1) the thread
3864 	 * accessing the cache must not be preempted or yield during access,
3865 	 * and (2) the thread must not migrate CPUs without switching which
3866 	 * cache it accesses.  We rely on a critical section to prevent
3867 	 * preemption and migration.  We release the critical section in
3868 	 * order to acquire the zone mutex if we are unable to free to the
3869 	 * current cache; when we re-acquire the critical section, we must
3870 	 * detect and handle migration if it has occurred.
3871 	 */
3872 	domain = itemdomain = 0;
3873 #ifdef NUMA
3874 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
3875 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3876 #endif
3877 	critical_enter();
3878 	do {
3879 		cache = &zone->uz_cpu[curcpu];
3880 		/*
3881 		 * Try to free into the allocbucket first to give LIFO
3882 		 * ordering for cache-hot datastructures.  Spill over
3883 		 * into the freebucket if necessary.  Alloc will swap
3884 		 * them if one runs dry.
3885 		 */
3886 		bucket = &cache->uc_allocbucket;
3887 #ifdef NUMA
3888 		domain = PCPU_GET(domain);
3889 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
3890 		    domain != itemdomain) {
3891 			bucket = &cache->uc_crossbucket;
3892 		} else
3893 #endif
3894 		if (bucket->ucb_cnt >= bucket->ucb_entries)
3895 			bucket = &cache->uc_freebucket;
3896 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
3897 			cache_bucket_push(cache, bucket, item);
3898 			critical_exit();
3899 			return;
3900 		}
3901 	} while (cache_free(zone, cache, udata, item, itemdomain));
3902 	critical_exit();
3903 
3904 	/*
3905 	 * If nothing else caught this, we'll just do an internal free.
3906 	 */
3907 zfree_item:
3908 	zone_free_item(zone, item, udata, SKIP_DTOR);
3909 }
3910 
3911 #ifdef NUMA
3912 /*
3913  * sort crossdomain free buckets to domain correct buckets and cache
3914  * them.
3915  */
3916 static void
3917 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
3918 {
3919 	struct uma_bucketlist fullbuckets;
3920 	uma_zone_domain_t zdom;
3921 	uma_bucket_t b;
3922 	void *item;
3923 	int domain;
3924 
3925 	CTR3(KTR_UMA,
3926 	    "uma_zfree: zone %s(%p) draining cross bucket %p",
3927 	    zone->uz_name, zone, bucket);
3928 
3929 	TAILQ_INIT(&fullbuckets);
3930 
3931 	/*
3932 	 * To avoid having ndomain * ndomain buckets for sorting we have a
3933 	 * lock on the current crossfree bucket.  A full matrix with
3934 	 * per-domain locking could be used if necessary.
3935 	 */
3936 	ZONE_CROSS_LOCK(zone);
3937 	while (bucket->ub_cnt > 0) {
3938 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
3939 		domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3940 		zdom = &zone->uz_domain[domain];
3941 		if (zdom->uzd_cross == NULL) {
3942 			zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT);
3943 			if (zdom->uzd_cross == NULL)
3944 				break;
3945 		}
3946 		zdom->uzd_cross->ub_bucket[zdom->uzd_cross->ub_cnt++] = item;
3947 		if (zdom->uzd_cross->ub_cnt == zdom->uzd_cross->ub_entries) {
3948 			TAILQ_INSERT_HEAD(&fullbuckets, zdom->uzd_cross,
3949 			    ub_link);
3950 			zdom->uzd_cross = NULL;
3951 		}
3952 		bucket->ub_cnt--;
3953 	}
3954 	ZONE_CROSS_UNLOCK(zone);
3955 	if (!TAILQ_EMPTY(&fullbuckets)) {
3956 		ZONE_LOCK(zone);
3957 		while ((b = TAILQ_FIRST(&fullbuckets)) != NULL) {
3958 			if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
3959 				bucket->ub_seq = smr_current(zone->uz_smr);
3960 			TAILQ_REMOVE(&fullbuckets, b, ub_link);
3961 			if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3962 				ZONE_UNLOCK(zone);
3963 				bucket_drain(zone, b);
3964 				bucket_free(zone, b, udata);
3965 				ZONE_LOCK(zone);
3966 			} else {
3967 				domain = _vm_phys_domain(
3968 				    pmap_kextract(
3969 				    (vm_offset_t)b->ub_bucket[0]));
3970 				zdom = &zone->uz_domain[domain];
3971 				zone_put_bucket(zone, zdom, b, true);
3972 			}
3973 		}
3974 		ZONE_UNLOCK(zone);
3975 	}
3976 	if (bucket->ub_cnt != 0)
3977 		bucket_drain(zone, bucket);
3978 	bucket->ub_seq = SMR_SEQ_INVALID;
3979 	bucket_free(zone, bucket, udata);
3980 }
3981 #endif
3982 
3983 static void
3984 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
3985     int domain, int itemdomain)
3986 {
3987 	uma_zone_domain_t zdom;
3988 
3989 #ifdef NUMA
3990 	/*
3991 	 * Buckets coming from the wrong domain will be entirely for the
3992 	 * only other domain on two domain systems.  In this case we can
3993 	 * simply cache them.  Otherwise we need to sort them back to
3994 	 * correct domains.
3995 	 */
3996 	if (domain != itemdomain && vm_ndomains > 2) {
3997 		zone_free_cross(zone, bucket, udata);
3998 		return;
3999 	}
4000 #endif
4001 
4002 	/*
4003 	 * Attempt to save the bucket in the zone's domain bucket cache.
4004 	 *
4005 	 * We bump the uz count when the cache size is insufficient to
4006 	 * handle the working set.
4007 	 */
4008 	if (ZONE_TRYLOCK(zone) == 0) {
4009 		/* Record contention to size the buckets. */
4010 		ZONE_LOCK(zone);
4011 		if (zone->uz_bucket_size < zone->uz_bucket_size_max)
4012 			zone->uz_bucket_size++;
4013 	}
4014 
4015 	CTR3(KTR_UMA,
4016 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
4017 	    zone->uz_name, zone, bucket);
4018 	/* ub_cnt is pointing to the last free item */
4019 	KASSERT(bucket->ub_cnt == bucket->ub_entries,
4020 	    ("uma_zfree: Attempting to insert partial  bucket onto the full list.\n"));
4021 	if (zone->uz_bkt_count >= zone->uz_bkt_max) {
4022 		ZONE_UNLOCK(zone);
4023 		bucket_drain(zone, bucket);
4024 		bucket_free(zone, bucket, udata);
4025 	} else {
4026 		zdom = &zone->uz_domain[itemdomain];
4027 		zone_put_bucket(zone, zdom, bucket, true);
4028 		ZONE_UNLOCK(zone);
4029 	}
4030 }
4031 
4032 /*
4033  * Populate a free or cross bucket for the current cpu cache.  Free any
4034  * existing full bucket either to the zone cache or back to the slab layer.
4035  *
4036  * Enters and returns in a critical section.  false return indicates that
4037  * we can not satisfy this free in the cache layer.  true indicates that
4038  * the caller should retry.
4039  */
4040 static __noinline bool
4041 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
4042     int itemdomain)
4043 {
4044 	uma_cache_bucket_t cbucket;
4045 	uma_bucket_t newbucket, bucket;
4046 	int domain;
4047 
4048 	CRITICAL_ASSERT(curthread);
4049 
4050 	if (zone->uz_bucket_size == 0)
4051 		return false;
4052 
4053 	cache = &zone->uz_cpu[curcpu];
4054 	newbucket = NULL;
4055 
4056 	/*
4057 	 * FIRSTTOUCH domains need to free to the correct zdom.  When
4058 	 * enabled this is the zdom of the item.   The bucket is the
4059 	 * cross bucket if the current domain and itemdomain do not match.
4060 	 */
4061 	cbucket = &cache->uc_freebucket;
4062 #ifdef NUMA
4063 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) {
4064 		domain = PCPU_GET(domain);
4065 		if (domain != itemdomain) {
4066 			cbucket = &cache->uc_crossbucket;
4067 			if (cbucket->ucb_cnt != 0)
4068 				atomic_add_64(&zone->uz_xdomain,
4069 				    cbucket->ucb_cnt);
4070 		}
4071 	} else
4072 #endif
4073 		itemdomain = domain = 0;
4074 	bucket = cache_bucket_unload(cbucket);
4075 
4076 	/* We are no longer associated with this CPU. */
4077 	critical_exit();
4078 
4079 	/*
4080 	 * Don't let SMR zones operate without a free bucket.  Force
4081 	 * a synchronize and re-use this one.  We will only degrade
4082 	 * to a synchronize every bucket_size items rather than every
4083 	 * item if we fail to allocate a bucket.
4084 	 */
4085 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0) {
4086 		if (bucket != NULL)
4087 			bucket->ub_seq = smr_advance(zone->uz_smr);
4088 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4089 		if (newbucket == NULL && bucket != NULL) {
4090 			bucket_drain(zone, bucket);
4091 			newbucket = bucket;
4092 			bucket = NULL;
4093 		}
4094 	} else if (!bucketdisable)
4095 		newbucket = bucket_alloc(zone, udata, M_NOWAIT);
4096 
4097 	if (bucket != NULL)
4098 		zone_free_bucket(zone, bucket, udata, domain, itemdomain);
4099 
4100 	critical_enter();
4101 	if ((bucket = newbucket) == NULL)
4102 		return (false);
4103 	cache = &zone->uz_cpu[curcpu];
4104 #ifdef NUMA
4105 	/*
4106 	 * Check to see if we should be populating the cross bucket.  If it
4107 	 * is already populated we will fall through and attempt to populate
4108 	 * the free bucket.
4109 	 */
4110 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) {
4111 		domain = PCPU_GET(domain);
4112 		if (domain != itemdomain &&
4113 		    cache->uc_crossbucket.ucb_bucket == NULL) {
4114 			cache_bucket_load_cross(cache, bucket);
4115 			return (true);
4116 		}
4117 	}
4118 #endif
4119 	/*
4120 	 * We may have lost the race to fill the bucket or switched CPUs.
4121 	 */
4122 	if (cache->uc_freebucket.ucb_bucket != NULL) {
4123 		critical_exit();
4124 		bucket_free(zone, bucket, udata);
4125 		critical_enter();
4126 	} else
4127 		cache_bucket_load_free(cache, bucket);
4128 
4129 	return (true);
4130 }
4131 
4132 void
4133 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
4134 {
4135 
4136 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
4137 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
4138 
4139 	CTR2(KTR_UMA, "uma_zfree_domain zone %s(%p)", zone->uz_name, zone);
4140 
4141 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
4142 	    ("uma_zfree_domain: called with spinlock or critical section held"));
4143 
4144         /* uma_zfree(..., NULL) does nothing, to match free(9). */
4145         if (item == NULL)
4146                 return;
4147 	zone_free_item(zone, item, udata, SKIP_NONE);
4148 }
4149 
4150 static void
4151 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
4152 {
4153 	uma_keg_t keg;
4154 	uma_domain_t dom;
4155 	int freei;
4156 
4157 	keg = zone->uz_keg;
4158 	KEG_LOCK_ASSERT(keg, slab->us_domain);
4159 
4160 	/* Do we need to remove from any lists? */
4161 	dom = &keg->uk_domain[slab->us_domain];
4162 	if (slab->us_freecount+1 == keg->uk_ipers) {
4163 		LIST_REMOVE(slab, us_link);
4164 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
4165 	} else if (slab->us_freecount == 0) {
4166 		LIST_REMOVE(slab, us_link);
4167 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
4168 	}
4169 
4170 	/* Slab management. */
4171 	freei = slab_item_index(slab, keg, item);
4172 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
4173 	slab->us_freecount++;
4174 
4175 	/* Keg statistics. */
4176 	dom->ud_free++;
4177 }
4178 
4179 static void
4180 zone_release(void *arg, void **bucket, int cnt)
4181 {
4182 	struct mtx *lock;
4183 	uma_zone_t zone;
4184 	uma_slab_t slab;
4185 	uma_keg_t keg;
4186 	uint8_t *mem;
4187 	void *item;
4188 	int i;
4189 
4190 	zone = arg;
4191 	keg = zone->uz_keg;
4192 	lock = NULL;
4193 	if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
4194 		lock = KEG_LOCK(keg, 0);
4195 	for (i = 0; i < cnt; i++) {
4196 		item = bucket[i];
4197 		if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
4198 			slab = vtoslab((vm_offset_t)item);
4199 		} else {
4200 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4201 			if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
4202 				slab = hash_sfind(&keg->uk_hash, mem);
4203 			else
4204 				slab = (uma_slab_t)(mem + keg->uk_pgoff);
4205 		}
4206 		if (lock != KEG_LOCKPTR(keg, slab->us_domain)) {
4207 			if (lock != NULL)
4208 				mtx_unlock(lock);
4209 			lock = KEG_LOCK(keg, slab->us_domain);
4210 		}
4211 		slab_free_item(zone, slab, item);
4212 	}
4213 	if (lock != NULL)
4214 		mtx_unlock(lock);
4215 }
4216 
4217 /*
4218  * Frees a single item to any zone.
4219  *
4220  * Arguments:
4221  *	zone   The zone to free to
4222  *	item   The item we're freeing
4223  *	udata  User supplied data for the dtor
4224  *	skip   Skip dtors and finis
4225  */
4226 static void
4227 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
4228 {
4229 
4230 	/*
4231 	 * If a free is sent directly to an SMR zone we have to
4232 	 * synchronize immediately because the item can instantly
4233 	 * be reallocated. This should only happen in degenerate
4234 	 * cases when no memory is available for per-cpu caches.
4235 	 */
4236 	if ((zone->uz_flags & UMA_ZONE_SMR) != 0 && skip == SKIP_NONE)
4237 		smr_synchronize(zone->uz_smr);
4238 
4239 	item_dtor(zone, item, zone->uz_size, udata, skip);
4240 
4241 	if (skip < SKIP_FINI && zone->uz_fini)
4242 		zone->uz_fini(item, zone->uz_size);
4243 
4244 	zone->uz_release(zone->uz_arg, &item, 1);
4245 
4246 	if (skip & SKIP_CNT)
4247 		return;
4248 
4249 	counter_u64_add(zone->uz_frees, 1);
4250 
4251 	if (zone->uz_max_items > 0)
4252 		zone_free_limit(zone, 1);
4253 }
4254 
4255 /* See uma.h */
4256 int
4257 uma_zone_set_max(uma_zone_t zone, int nitems)
4258 {
4259 	struct uma_bucket_zone *ubz;
4260 	int count;
4261 
4262 	/*
4263 	 * XXX This can misbehave if the zone has any allocations with
4264 	 * no limit and a limit is imposed.  There is currently no
4265 	 * way to clear a limit.
4266 	 */
4267 	ZONE_LOCK(zone);
4268 	ubz = bucket_zone_max(zone, nitems);
4269 	count = ubz != NULL ? ubz->ubz_entries : 0;
4270 	zone->uz_bucket_size_max = zone->uz_bucket_size = count;
4271 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4272 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4273 	zone->uz_max_items = nitems;
4274 	zone->uz_flags |= UMA_ZFLAG_LIMIT;
4275 	zone_update_caches(zone);
4276 	/* We may need to wake waiters. */
4277 	wakeup(&zone->uz_max_items);
4278 	ZONE_UNLOCK(zone);
4279 
4280 	return (nitems);
4281 }
4282 
4283 /* See uma.h */
4284 void
4285 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
4286 {
4287 	struct uma_bucket_zone *ubz;
4288 	int bpcpu;
4289 
4290 	ZONE_LOCK(zone);
4291 	ubz = bucket_zone_max(zone, nitems);
4292 	if (ubz != NULL) {
4293 		bpcpu = 2;
4294 		if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4295 			/* Count the cross-domain bucket. */
4296 			bpcpu++;
4297 		nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
4298 		zone->uz_bucket_size_max = ubz->ubz_entries;
4299 	} else {
4300 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
4301 	}
4302 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4303 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4304 	zone->uz_bkt_max = nitems;
4305 	ZONE_UNLOCK(zone);
4306 }
4307 
4308 /* See uma.h */
4309 int
4310 uma_zone_get_max(uma_zone_t zone)
4311 {
4312 	int nitems;
4313 
4314 	nitems = atomic_load_64(&zone->uz_max_items);
4315 
4316 	return (nitems);
4317 }
4318 
4319 /* See uma.h */
4320 void
4321 uma_zone_set_warning(uma_zone_t zone, const char *warning)
4322 {
4323 
4324 	ZONE_ASSERT_COLD(zone);
4325 	zone->uz_warning = warning;
4326 }
4327 
4328 /* See uma.h */
4329 void
4330 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
4331 {
4332 
4333 	ZONE_ASSERT_COLD(zone);
4334 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
4335 }
4336 
4337 /* See uma.h */
4338 int
4339 uma_zone_get_cur(uma_zone_t zone)
4340 {
4341 	int64_t nitems;
4342 	u_int i;
4343 
4344 	nitems = 0;
4345 	if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER)
4346 		nitems = counter_u64_fetch(zone->uz_allocs) -
4347 		    counter_u64_fetch(zone->uz_frees);
4348 	CPU_FOREACH(i)
4349 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) -
4350 		    atomic_load_64(&zone->uz_cpu[i].uc_frees);
4351 
4352 	return (nitems < 0 ? 0 : nitems);
4353 }
4354 
4355 static uint64_t
4356 uma_zone_get_allocs(uma_zone_t zone)
4357 {
4358 	uint64_t nitems;
4359 	u_int i;
4360 
4361 	nitems = 0;
4362 	if (zone->uz_allocs != EARLY_COUNTER)
4363 		nitems = counter_u64_fetch(zone->uz_allocs);
4364 	CPU_FOREACH(i)
4365 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs);
4366 
4367 	return (nitems);
4368 }
4369 
4370 static uint64_t
4371 uma_zone_get_frees(uma_zone_t zone)
4372 {
4373 	uint64_t nitems;
4374 	u_int i;
4375 
4376 	nitems = 0;
4377 	if (zone->uz_frees != EARLY_COUNTER)
4378 		nitems = counter_u64_fetch(zone->uz_frees);
4379 	CPU_FOREACH(i)
4380 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees);
4381 
4382 	return (nitems);
4383 }
4384 
4385 #ifdef INVARIANTS
4386 /* Used only for KEG_ASSERT_COLD(). */
4387 static uint64_t
4388 uma_keg_get_allocs(uma_keg_t keg)
4389 {
4390 	uma_zone_t z;
4391 	uint64_t nitems;
4392 
4393 	nitems = 0;
4394 	LIST_FOREACH(z, &keg->uk_zones, uz_link)
4395 		nitems += uma_zone_get_allocs(z);
4396 
4397 	return (nitems);
4398 }
4399 #endif
4400 
4401 /* See uma.h */
4402 void
4403 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
4404 {
4405 	uma_keg_t keg;
4406 
4407 	KEG_GET(zone, keg);
4408 	KEG_ASSERT_COLD(keg);
4409 	keg->uk_init = uminit;
4410 }
4411 
4412 /* See uma.h */
4413 void
4414 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
4415 {
4416 	uma_keg_t keg;
4417 
4418 	KEG_GET(zone, keg);
4419 	KEG_ASSERT_COLD(keg);
4420 	keg->uk_fini = fini;
4421 }
4422 
4423 /* See uma.h */
4424 void
4425 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
4426 {
4427 
4428 	ZONE_ASSERT_COLD(zone);
4429 	zone->uz_init = zinit;
4430 }
4431 
4432 /* See uma.h */
4433 void
4434 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
4435 {
4436 
4437 	ZONE_ASSERT_COLD(zone);
4438 	zone->uz_fini = zfini;
4439 }
4440 
4441 /* See uma.h */
4442 void
4443 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
4444 {
4445 	uma_keg_t keg;
4446 
4447 	KEG_GET(zone, keg);
4448 	KEG_ASSERT_COLD(keg);
4449 	keg->uk_freef = freef;
4450 }
4451 
4452 /* See uma.h */
4453 void
4454 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
4455 {
4456 	uma_keg_t keg;
4457 
4458 	KEG_GET(zone, keg);
4459 	KEG_ASSERT_COLD(keg);
4460 	keg->uk_allocf = allocf;
4461 }
4462 
4463 /* See uma.h */
4464 void
4465 uma_zone_set_smr(uma_zone_t zone, smr_t smr)
4466 {
4467 
4468 	ZONE_ASSERT_COLD(zone);
4469 
4470 	zone->uz_flags |= UMA_ZONE_SMR;
4471 	zone->uz_smr = smr;
4472 	zone_update_caches(zone);
4473 }
4474 
4475 smr_t
4476 uma_zone_get_smr(uma_zone_t zone)
4477 {
4478 
4479 	return (zone->uz_smr);
4480 }
4481 
4482 /* See uma.h */
4483 void
4484 uma_zone_reserve(uma_zone_t zone, int items)
4485 {
4486 	uma_keg_t keg;
4487 
4488 	KEG_GET(zone, keg);
4489 	KEG_ASSERT_COLD(keg);
4490 	keg->uk_reserve = items;
4491 }
4492 
4493 /* See uma.h */
4494 int
4495 uma_zone_reserve_kva(uma_zone_t zone, int count)
4496 {
4497 	uma_keg_t keg;
4498 	vm_offset_t kva;
4499 	u_int pages;
4500 
4501 	KEG_GET(zone, keg);
4502 	KEG_ASSERT_COLD(keg);
4503 	ZONE_ASSERT_COLD(zone);
4504 
4505 	pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
4506 
4507 #ifdef UMA_MD_SMALL_ALLOC
4508 	if (keg->uk_ppera > 1) {
4509 #else
4510 	if (1) {
4511 #endif
4512 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
4513 		if (kva == 0)
4514 			return (0);
4515 	} else
4516 		kva = 0;
4517 
4518 	ZONE_LOCK(zone);
4519 	MPASS(keg->uk_kva == 0);
4520 	keg->uk_kva = kva;
4521 	keg->uk_offset = 0;
4522 	zone->uz_max_items = pages * keg->uk_ipers;
4523 #ifdef UMA_MD_SMALL_ALLOC
4524 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
4525 #else
4526 	keg->uk_allocf = noobj_alloc;
4527 #endif
4528 	keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
4529 	zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
4530 	zone_update_caches(zone);
4531 	ZONE_UNLOCK(zone);
4532 
4533 	return (1);
4534 }
4535 
4536 /* See uma.h */
4537 void
4538 uma_prealloc(uma_zone_t zone, int items)
4539 {
4540 	struct vm_domainset_iter di;
4541 	uma_domain_t dom;
4542 	uma_slab_t slab;
4543 	uma_keg_t keg;
4544 	int aflags, domain, slabs;
4545 
4546 	KEG_GET(zone, keg);
4547 	slabs = howmany(items, keg->uk_ipers);
4548 	while (slabs-- > 0) {
4549 		aflags = M_NOWAIT;
4550 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
4551 		    &aflags);
4552 		for (;;) {
4553 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
4554 			    aflags);
4555 			if (slab != NULL) {
4556 				dom = &keg->uk_domain[slab->us_domain];
4557 				LIST_REMOVE(slab, us_link);
4558 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
4559 				    us_link);
4560 				KEG_UNLOCK(keg, slab->us_domain);
4561 				break;
4562 			}
4563 			if (vm_domainset_iter_policy(&di, &domain) != 0)
4564 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
4565 		}
4566 	}
4567 }
4568 
4569 /* See uma.h */
4570 void
4571 uma_reclaim(int req)
4572 {
4573 
4574 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
4575 	sx_xlock(&uma_reclaim_lock);
4576 	bucket_enable();
4577 
4578 	switch (req) {
4579 	case UMA_RECLAIM_TRIM:
4580 		zone_foreach(zone_trim, NULL);
4581 		break;
4582 	case UMA_RECLAIM_DRAIN:
4583 	case UMA_RECLAIM_DRAIN_CPU:
4584 		zone_foreach(zone_drain, NULL);
4585 		if (req == UMA_RECLAIM_DRAIN_CPU) {
4586 			pcpu_cache_drain_safe(NULL);
4587 			zone_foreach(zone_drain, NULL);
4588 		}
4589 		break;
4590 	default:
4591 		panic("unhandled reclamation request %d", req);
4592 	}
4593 
4594 	/*
4595 	 * Some slabs may have been freed but this zone will be visited early
4596 	 * we visit again so that we can free pages that are empty once other
4597 	 * zones are drained.  We have to do the same for buckets.
4598 	 */
4599 	zone_drain(slabzones[0], NULL);
4600 	zone_drain(slabzones[1], NULL);
4601 	bucket_zone_drain();
4602 	sx_xunlock(&uma_reclaim_lock);
4603 }
4604 
4605 static volatile int uma_reclaim_needed;
4606 
4607 void
4608 uma_reclaim_wakeup(void)
4609 {
4610 
4611 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
4612 		wakeup(uma_reclaim);
4613 }
4614 
4615 void
4616 uma_reclaim_worker(void *arg __unused)
4617 {
4618 
4619 	for (;;) {
4620 		sx_xlock(&uma_reclaim_lock);
4621 		while (atomic_load_int(&uma_reclaim_needed) == 0)
4622 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
4623 			    hz);
4624 		sx_xunlock(&uma_reclaim_lock);
4625 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
4626 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
4627 		atomic_store_int(&uma_reclaim_needed, 0);
4628 		/* Don't fire more than once per-second. */
4629 		pause("umarclslp", hz);
4630 	}
4631 }
4632 
4633 /* See uma.h */
4634 void
4635 uma_zone_reclaim(uma_zone_t zone, int req)
4636 {
4637 
4638 	switch (req) {
4639 	case UMA_RECLAIM_TRIM:
4640 		zone_trim(zone, NULL);
4641 		break;
4642 	case UMA_RECLAIM_DRAIN:
4643 		zone_drain(zone, NULL);
4644 		break;
4645 	case UMA_RECLAIM_DRAIN_CPU:
4646 		pcpu_cache_drain_safe(zone);
4647 		zone_drain(zone, NULL);
4648 		break;
4649 	default:
4650 		panic("unhandled reclamation request %d", req);
4651 	}
4652 }
4653 
4654 /* See uma.h */
4655 int
4656 uma_zone_exhausted(uma_zone_t zone)
4657 {
4658 
4659 	return (atomic_load_32(&zone->uz_sleepers) > 0);
4660 }
4661 
4662 unsigned long
4663 uma_limit(void)
4664 {
4665 
4666 	return (uma_kmem_limit);
4667 }
4668 
4669 void
4670 uma_set_limit(unsigned long limit)
4671 {
4672 
4673 	uma_kmem_limit = limit;
4674 }
4675 
4676 unsigned long
4677 uma_size(void)
4678 {
4679 
4680 	return (atomic_load_long(&uma_kmem_total));
4681 }
4682 
4683 long
4684 uma_avail(void)
4685 {
4686 
4687 	return (uma_kmem_limit - uma_size());
4688 }
4689 
4690 #ifdef DDB
4691 /*
4692  * Generate statistics across both the zone and its per-cpu cache's.  Return
4693  * desired statistics if the pointer is non-NULL for that statistic.
4694  *
4695  * Note: does not update the zone statistics, as it can't safely clear the
4696  * per-CPU cache statistic.
4697  *
4698  */
4699 static void
4700 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4701     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4702 {
4703 	uma_cache_t cache;
4704 	uint64_t allocs, frees, sleeps, xdomain;
4705 	int cachefree, cpu;
4706 
4707 	allocs = frees = sleeps = xdomain = 0;
4708 	cachefree = 0;
4709 	CPU_FOREACH(cpu) {
4710 		cache = &z->uz_cpu[cpu];
4711 		cachefree += cache->uc_allocbucket.ucb_cnt;
4712 		cachefree += cache->uc_freebucket.ucb_cnt;
4713 		xdomain += cache->uc_crossbucket.ucb_cnt;
4714 		cachefree += cache->uc_crossbucket.ucb_cnt;
4715 		allocs += cache->uc_allocs;
4716 		frees += cache->uc_frees;
4717 	}
4718 	allocs += counter_u64_fetch(z->uz_allocs);
4719 	frees += counter_u64_fetch(z->uz_frees);
4720 	sleeps += z->uz_sleeps;
4721 	xdomain += z->uz_xdomain;
4722 	if (cachefreep != NULL)
4723 		*cachefreep = cachefree;
4724 	if (allocsp != NULL)
4725 		*allocsp = allocs;
4726 	if (freesp != NULL)
4727 		*freesp = frees;
4728 	if (sleepsp != NULL)
4729 		*sleepsp = sleeps;
4730 	if (xdomainp != NULL)
4731 		*xdomainp = xdomain;
4732 }
4733 #endif /* DDB */
4734 
4735 static int
4736 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4737 {
4738 	uma_keg_t kz;
4739 	uma_zone_t z;
4740 	int count;
4741 
4742 	count = 0;
4743 	rw_rlock(&uma_rwlock);
4744 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4745 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4746 			count++;
4747 	}
4748 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4749 		count++;
4750 
4751 	rw_runlock(&uma_rwlock);
4752 	return (sysctl_handle_int(oidp, &count, 0, req));
4753 }
4754 
4755 static void
4756 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4757     struct uma_percpu_stat *ups, bool internal)
4758 {
4759 	uma_zone_domain_t zdom;
4760 	uma_cache_t cache;
4761 	int i;
4762 
4763 
4764 	for (i = 0; i < vm_ndomains; i++) {
4765 		zdom = &z->uz_domain[i];
4766 		uth->uth_zone_free += zdom->uzd_nitems;
4767 	}
4768 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4769 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4770 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4771 	uth->uth_sleeps = z->uz_sleeps;
4772 	uth->uth_xdomain = z->uz_xdomain;
4773 
4774 	/*
4775 	 * While it is not normally safe to access the cache bucket pointers
4776 	 * while not on the CPU that owns the cache, we only allow the pointers
4777 	 * to be exchanged without the zone lock held, not invalidated, so
4778 	 * accept the possible race associated with bucket exchange during
4779 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4780 	 * are loaded only once.
4781 	 */
4782 	for (i = 0; i < mp_maxid + 1; i++) {
4783 		bzero(&ups[i], sizeof(*ups));
4784 		if (internal || CPU_ABSENT(i))
4785 			continue;
4786 		cache = &z->uz_cpu[i];
4787 		ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt;
4788 		ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt;
4789 		ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt;
4790 		ups[i].ups_allocs = cache->uc_allocs;
4791 		ups[i].ups_frees = cache->uc_frees;
4792 	}
4793 }
4794 
4795 static int
4796 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4797 {
4798 	struct uma_stream_header ush;
4799 	struct uma_type_header uth;
4800 	struct uma_percpu_stat *ups;
4801 	struct sbuf sbuf;
4802 	uma_keg_t kz;
4803 	uma_zone_t z;
4804 	uint64_t items;
4805 	uint32_t kfree, pages;
4806 	int count, error, i;
4807 
4808 	error = sysctl_wire_old_buffer(req, 0);
4809 	if (error != 0)
4810 		return (error);
4811 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4812 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4813 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4814 
4815 	count = 0;
4816 	rw_rlock(&uma_rwlock);
4817 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4818 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4819 			count++;
4820 	}
4821 
4822 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4823 		count++;
4824 
4825 	/*
4826 	 * Insert stream header.
4827 	 */
4828 	bzero(&ush, sizeof(ush));
4829 	ush.ush_version = UMA_STREAM_VERSION;
4830 	ush.ush_maxcpus = (mp_maxid + 1);
4831 	ush.ush_count = count;
4832 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4833 
4834 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4835 		kfree = pages = 0;
4836 		for (i = 0; i < vm_ndomains; i++) {
4837 			kfree += kz->uk_domain[i].ud_free;
4838 			pages += kz->uk_domain[i].ud_pages;
4839 		}
4840 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4841 			bzero(&uth, sizeof(uth));
4842 			ZONE_LOCK(z);
4843 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4844 			uth.uth_align = kz->uk_align;
4845 			uth.uth_size = kz->uk_size;
4846 			uth.uth_rsize = kz->uk_rsize;
4847 			if (z->uz_max_items > 0) {
4848 				items = UZ_ITEMS_COUNT(z->uz_items);
4849 				uth.uth_pages = (items / kz->uk_ipers) *
4850 					kz->uk_ppera;
4851 			} else
4852 				uth.uth_pages = pages;
4853 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4854 			    kz->uk_ppera;
4855 			uth.uth_limit = z->uz_max_items;
4856 			uth.uth_keg_free = kfree;
4857 
4858 			/*
4859 			 * A zone is secondary is it is not the first entry
4860 			 * on the keg's zone list.
4861 			 */
4862 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4863 			    (LIST_FIRST(&kz->uk_zones) != z))
4864 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4865 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4866 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4867 			ZONE_UNLOCK(z);
4868 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4869 			for (i = 0; i < mp_maxid + 1; i++)
4870 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4871 		}
4872 	}
4873 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4874 		bzero(&uth, sizeof(uth));
4875 		ZONE_LOCK(z);
4876 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4877 		uth.uth_size = z->uz_size;
4878 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4879 		ZONE_UNLOCK(z);
4880 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4881 		for (i = 0; i < mp_maxid + 1; i++)
4882 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4883 	}
4884 
4885 	rw_runlock(&uma_rwlock);
4886 	error = sbuf_finish(&sbuf);
4887 	sbuf_delete(&sbuf);
4888 	free(ups, M_TEMP);
4889 	return (error);
4890 }
4891 
4892 int
4893 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4894 {
4895 	uma_zone_t zone = *(uma_zone_t *)arg1;
4896 	int error, max;
4897 
4898 	max = uma_zone_get_max(zone);
4899 	error = sysctl_handle_int(oidp, &max, 0, req);
4900 	if (error || !req->newptr)
4901 		return (error);
4902 
4903 	uma_zone_set_max(zone, max);
4904 
4905 	return (0);
4906 }
4907 
4908 int
4909 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4910 {
4911 	uma_zone_t zone;
4912 	int cur;
4913 
4914 	/*
4915 	 * Some callers want to add sysctls for global zones that
4916 	 * may not yet exist so they pass a pointer to a pointer.
4917 	 */
4918 	if (arg2 == 0)
4919 		zone = *(uma_zone_t *)arg1;
4920 	else
4921 		zone = arg1;
4922 	cur = uma_zone_get_cur(zone);
4923 	return (sysctl_handle_int(oidp, &cur, 0, req));
4924 }
4925 
4926 static int
4927 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
4928 {
4929 	uma_zone_t zone = arg1;
4930 	uint64_t cur;
4931 
4932 	cur = uma_zone_get_allocs(zone);
4933 	return (sysctl_handle_64(oidp, &cur, 0, req));
4934 }
4935 
4936 static int
4937 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
4938 {
4939 	uma_zone_t zone = arg1;
4940 	uint64_t cur;
4941 
4942 	cur = uma_zone_get_frees(zone);
4943 	return (sysctl_handle_64(oidp, &cur, 0, req));
4944 }
4945 
4946 static int
4947 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
4948 {
4949 	struct sbuf sbuf;
4950 	uma_zone_t zone = arg1;
4951 	int error;
4952 
4953 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
4954 	if (zone->uz_flags != 0)
4955 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
4956 	else
4957 		sbuf_printf(&sbuf, "0");
4958 	error = sbuf_finish(&sbuf);
4959 	sbuf_delete(&sbuf);
4960 
4961 	return (error);
4962 }
4963 
4964 static int
4965 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
4966 {
4967 	uma_keg_t keg = arg1;
4968 	int avail, effpct, total;
4969 
4970 	total = keg->uk_ppera * PAGE_SIZE;
4971 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
4972 		total += slabzone(keg->uk_ipers)->uz_keg->uk_rsize;
4973 	/*
4974 	 * We consider the client's requested size and alignment here, not the
4975 	 * real size determination uk_rsize, because we also adjust the real
4976 	 * size for internal implementation reasons (max bitset size).
4977 	 */
4978 	avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1);
4979 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
4980 		avail *= mp_maxid + 1;
4981 	effpct = 100 * avail / total;
4982 	return (sysctl_handle_int(oidp, &effpct, 0, req));
4983 }
4984 
4985 static int
4986 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS)
4987 {
4988 	uma_zone_t zone = arg1;
4989 	uint64_t cur;
4990 
4991 	cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items));
4992 	return (sysctl_handle_64(oidp, &cur, 0, req));
4993 }
4994 
4995 #ifdef INVARIANTS
4996 static uma_slab_t
4997 uma_dbg_getslab(uma_zone_t zone, void *item)
4998 {
4999 	uma_slab_t slab;
5000 	uma_keg_t keg;
5001 	uint8_t *mem;
5002 
5003 	/*
5004 	 * It is safe to return the slab here even though the
5005 	 * zone is unlocked because the item's allocation state
5006 	 * essentially holds a reference.
5007 	 */
5008 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
5009 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5010 		return (NULL);
5011 	if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
5012 		return (vtoslab((vm_offset_t)mem));
5013 	keg = zone->uz_keg;
5014 	if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
5015 		return ((uma_slab_t)(mem + keg->uk_pgoff));
5016 	KEG_LOCK(keg, 0);
5017 	slab = hash_sfind(&keg->uk_hash, mem);
5018 	KEG_UNLOCK(keg, 0);
5019 
5020 	return (slab);
5021 }
5022 
5023 static bool
5024 uma_dbg_zskip(uma_zone_t zone, void *mem)
5025 {
5026 
5027 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
5028 		return (true);
5029 
5030 	return (uma_dbg_kskip(zone->uz_keg, mem));
5031 }
5032 
5033 static bool
5034 uma_dbg_kskip(uma_keg_t keg, void *mem)
5035 {
5036 	uintptr_t idx;
5037 
5038 	if (dbg_divisor == 0)
5039 		return (true);
5040 
5041 	if (dbg_divisor == 1)
5042 		return (false);
5043 
5044 	idx = (uintptr_t)mem >> PAGE_SHIFT;
5045 	if (keg->uk_ipers > 1) {
5046 		idx *= keg->uk_ipers;
5047 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
5048 	}
5049 
5050 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
5051 		counter_u64_add(uma_skip_cnt, 1);
5052 		return (true);
5053 	}
5054 	counter_u64_add(uma_dbg_cnt, 1);
5055 
5056 	return (false);
5057 }
5058 
5059 /*
5060  * Set up the slab's freei data such that uma_dbg_free can function.
5061  *
5062  */
5063 static void
5064 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
5065 {
5066 	uma_keg_t keg;
5067 	int freei;
5068 
5069 	if (slab == NULL) {
5070 		slab = uma_dbg_getslab(zone, item);
5071 		if (slab == NULL)
5072 			panic("uma: item %p did not belong to zone %s\n",
5073 			    item, zone->uz_name);
5074 	}
5075 	keg = zone->uz_keg;
5076 	freei = slab_item_index(slab, keg, item);
5077 
5078 	if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
5079 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
5080 		    item, zone, zone->uz_name, slab, freei);
5081 	BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
5082 }
5083 
5084 /*
5085  * Verifies freed addresses.  Checks for alignment, valid slab membership
5086  * and duplicate frees.
5087  *
5088  */
5089 static void
5090 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
5091 {
5092 	uma_keg_t keg;
5093 	int freei;
5094 
5095 	if (slab == NULL) {
5096 		slab = uma_dbg_getslab(zone, item);
5097 		if (slab == NULL)
5098 			panic("uma: Freed item %p did not belong to zone %s\n",
5099 			    item, zone->uz_name);
5100 	}
5101 	keg = zone->uz_keg;
5102 	freei = slab_item_index(slab, keg, item);
5103 
5104 	if (freei >= keg->uk_ipers)
5105 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
5106 		    item, zone, zone->uz_name, slab, freei);
5107 
5108 	if (slab_item(slab, keg, freei) != item)
5109 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
5110 		    item, zone, zone->uz_name, slab, freei);
5111 
5112 	if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
5113 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
5114 		    item, zone, zone->uz_name, slab, freei);
5115 
5116 	BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
5117 }
5118 #endif /* INVARIANTS */
5119 
5120 #ifdef DDB
5121 static int64_t
5122 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
5123     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
5124 {
5125 	uint64_t frees;
5126 	int i;
5127 
5128 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
5129 		*allocs = counter_u64_fetch(z->uz_allocs);
5130 		frees = counter_u64_fetch(z->uz_frees);
5131 		*sleeps = z->uz_sleeps;
5132 		*cachefree = 0;
5133 		*xdomain = 0;
5134 	} else
5135 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
5136 		    xdomain);
5137 	for (i = 0; i < vm_ndomains; i++) {
5138 		*cachefree += z->uz_domain[i].uzd_nitems;
5139 		if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
5140 		    (LIST_FIRST(&kz->uk_zones) != z)))
5141 			*cachefree += kz->uk_domain[i].ud_free;
5142 	}
5143 	*used = *allocs - frees;
5144 	return (((int64_t)*used + *cachefree) * kz->uk_size);
5145 }
5146 
5147 DB_SHOW_COMMAND(uma, db_show_uma)
5148 {
5149 	const char *fmt_hdr, *fmt_entry;
5150 	uma_keg_t kz;
5151 	uma_zone_t z;
5152 	uint64_t allocs, used, sleeps, xdomain;
5153 	long cachefree;
5154 	/* variables for sorting */
5155 	uma_keg_t cur_keg;
5156 	uma_zone_t cur_zone, last_zone;
5157 	int64_t cur_size, last_size, size;
5158 	int ties;
5159 
5160 	/* /i option produces machine-parseable CSV output */
5161 	if (modif[0] == 'i') {
5162 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
5163 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
5164 	} else {
5165 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
5166 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
5167 	}
5168 
5169 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
5170 	    "Sleeps", "Bucket", "Total Mem", "XFree");
5171 
5172 	/* Sort the zones with largest size first. */
5173 	last_zone = NULL;
5174 	last_size = INT64_MAX;
5175 	for (;;) {
5176 		cur_zone = NULL;
5177 		cur_size = -1;
5178 		ties = 0;
5179 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
5180 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
5181 				/*
5182 				 * In the case of size ties, print out zones
5183 				 * in the order they are encountered.  That is,
5184 				 * when we encounter the most recently output
5185 				 * zone, we have already printed all preceding
5186 				 * ties, and we must print all following ties.
5187 				 */
5188 				if (z == last_zone) {
5189 					ties = 1;
5190 					continue;
5191 				}
5192 				size = get_uma_stats(kz, z, &allocs, &used,
5193 				    &sleeps, &cachefree, &xdomain);
5194 				if (size > cur_size && size < last_size + ties)
5195 				{
5196 					cur_size = size;
5197 					cur_zone = z;
5198 					cur_keg = kz;
5199 				}
5200 			}
5201 		}
5202 		if (cur_zone == NULL)
5203 			break;
5204 
5205 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
5206 		    &sleeps, &cachefree, &xdomain);
5207 		db_printf(fmt_entry, cur_zone->uz_name,
5208 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
5209 		    (uintmax_t)allocs, (uintmax_t)sleeps,
5210 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
5211 		    xdomain);
5212 
5213 		if (db_pager_quit)
5214 			return;
5215 		last_zone = cur_zone;
5216 		last_size = cur_size;
5217 	}
5218 }
5219 
5220 DB_SHOW_COMMAND(umacache, db_show_umacache)
5221 {
5222 	uma_zone_t z;
5223 	uint64_t allocs, frees;
5224 	long cachefree;
5225 	int i;
5226 
5227 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
5228 	    "Requests", "Bucket");
5229 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5230 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
5231 		for (i = 0; i < vm_ndomains; i++)
5232 			cachefree += z->uz_domain[i].uzd_nitems;
5233 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
5234 		    z->uz_name, (uintmax_t)z->uz_size,
5235 		    (intmax_t)(allocs - frees), cachefree,
5236 		    (uintmax_t)allocs, z->uz_bucket_size);
5237 		if (db_pager_quit)
5238 			return;
5239 	}
5240 }
5241 #endif	/* DDB */
5242