xref: /freebsd/sys/vm/uma_core.c (revision 840aca288042eaf625a23908e807abdfde0bc21d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 
145 static struct sx uma_drain_lock;
146 
147 /*
148  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
149  * allocations don't trigger a wakeup of the reclaim thread.
150  */
151 static unsigned long uma_kmem_limit = LONG_MAX;
152 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
153     "UMA kernel memory soft limit");
154 static unsigned long uma_kmem_total;
155 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
156     "UMA kernel memory usage");
157 
158 /* Is the VM done starting up? */
159 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
160     BOOT_RUNNING } booted = BOOT_COLD;
161 
162 /*
163  * This is the handle used to schedule events that need to happen
164  * outside of the allocation fast path.
165  */
166 static struct callout uma_callout;
167 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
168 
169 /*
170  * This structure is passed as the zone ctor arg so that I don't have to create
171  * a special allocation function just for zones.
172  */
173 struct uma_zctor_args {
174 	const char *name;
175 	size_t size;
176 	uma_ctor ctor;
177 	uma_dtor dtor;
178 	uma_init uminit;
179 	uma_fini fini;
180 	uma_import import;
181 	uma_release release;
182 	void *arg;
183 	uma_keg_t keg;
184 	int align;
185 	uint32_t flags;
186 };
187 
188 struct uma_kctor_args {
189 	uma_zone_t zone;
190 	size_t size;
191 	uma_init uminit;
192 	uma_fini fini;
193 	int align;
194 	uint32_t flags;
195 };
196 
197 struct uma_bucket_zone {
198 	uma_zone_t	ubz_zone;
199 	char		*ubz_name;
200 	int		ubz_entries;	/* Number of items it can hold. */
201 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
202 };
203 
204 /*
205  * Compute the actual number of bucket entries to pack them in power
206  * of two sizes for more efficient space utilization.
207  */
208 #define	BUCKET_SIZE(n)						\
209     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
210 
211 #define	BUCKET_MAX	BUCKET_SIZE(256)
212 #define	BUCKET_MIN	BUCKET_SIZE(4)
213 
214 struct uma_bucket_zone bucket_zones[] = {
215 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
216 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
217 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
218 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
219 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
220 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
221 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
222 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
223 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
224 	{ NULL, NULL, 0}
225 };
226 
227 /*
228  * Flags and enumerations to be passed to internal functions.
229  */
230 enum zfreeskip {
231 	SKIP_NONE =	0,
232 	SKIP_CNT =	0x00000001,
233 	SKIP_DTOR =	0x00010000,
234 	SKIP_FINI =	0x00020000,
235 };
236 
237 /* Prototypes.. */
238 
239 int	uma_startup_count(int);
240 void	uma_startup(void *, int);
241 void	uma_startup1(void);
242 void	uma_startup2(void);
243 
244 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
245 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void page_free(void *, vm_size_t, uint8_t);
249 static void pcpu_page_free(void *, vm_size_t, uint8_t);
250 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
251 static void cache_drain(uma_zone_t);
252 static void bucket_drain(uma_zone_t, uma_bucket_t);
253 static void bucket_cache_drain(uma_zone_t zone);
254 static int keg_ctor(void *, int, void *, int);
255 static void keg_dtor(void *, int, void *);
256 static int zone_ctor(void *, int, void *, int);
257 static void zone_dtor(void *, int, void *);
258 static int zero_init(void *, int, int);
259 static void keg_small_init(uma_keg_t keg);
260 static void keg_large_init(uma_keg_t keg);
261 static void zone_foreach(void (*zfunc)(uma_zone_t));
262 static void zone_timeout(uma_zone_t zone);
263 static int hash_alloc(struct uma_hash *, u_int);
264 static int hash_expand(struct uma_hash *, struct uma_hash *);
265 static void hash_free(struct uma_hash *hash);
266 static void uma_timeout(void *);
267 static void uma_startup3(void);
268 static void *zone_alloc_item(uma_zone_t, void *, int, int);
269 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
270 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
271 static void bucket_enable(void);
272 static void bucket_init(void);
273 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
274 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
275 static void bucket_zone_drain(void);
276 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int);
277 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(uma_zone_t, void **, int, int, int);
283 static void zone_release(uma_zone_t, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 
286 void uma_print_zone(uma_zone_t);
287 void uma_print_stats(void);
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 
291 #ifdef INVARIANTS
292 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
293 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
294 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
295 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
296 
297 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
298     "Memory allocation debugging");
299 
300 static u_int dbg_divisor = 1;
301 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
302     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
303     "Debug & thrash every this item in memory allocator");
304 
305 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
306 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
307 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
308     &uma_dbg_cnt, "memory items debugged");
309 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
310     &uma_skip_cnt, "memory items skipped, not debugged");
311 #endif
312 
313 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
314 
315 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
316     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
317 
318 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
319     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
320 
321 static int zone_warnings = 1;
322 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
323     "Warn when UMA zones becomes full");
324 
325 /* Adjust bytes under management by UMA. */
326 static inline void
327 uma_total_dec(unsigned long size)
328 {
329 
330 	atomic_subtract_long(&uma_kmem_total, size);
331 }
332 
333 static inline void
334 uma_total_inc(unsigned long size)
335 {
336 
337 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
338 		uma_reclaim_wakeup();
339 }
340 
341 /*
342  * This routine checks to see whether or not it's safe to enable buckets.
343  */
344 static void
345 bucket_enable(void)
346 {
347 	bucketdisable = vm_page_count_min();
348 }
349 
350 /*
351  * Initialize bucket_zones, the array of zones of buckets of various sizes.
352  *
353  * For each zone, calculate the memory required for each bucket, consisting
354  * of the header and an array of pointers.
355  */
356 static void
357 bucket_init(void)
358 {
359 	struct uma_bucket_zone *ubz;
360 	int size;
361 
362 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
363 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
364 		size += sizeof(void *) * ubz->ubz_entries;
365 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
366 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
367 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
368 	}
369 }
370 
371 /*
372  * Given a desired number of entries for a bucket, return the zone from which
373  * to allocate the bucket.
374  */
375 static struct uma_bucket_zone *
376 bucket_zone_lookup(int entries)
377 {
378 	struct uma_bucket_zone *ubz;
379 
380 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
381 		if (ubz->ubz_entries >= entries)
382 			return (ubz);
383 	ubz--;
384 	return (ubz);
385 }
386 
387 static int
388 bucket_select(int size)
389 {
390 	struct uma_bucket_zone *ubz;
391 
392 	ubz = &bucket_zones[0];
393 	if (size > ubz->ubz_maxsize)
394 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
395 
396 	for (; ubz->ubz_entries != 0; ubz++)
397 		if (ubz->ubz_maxsize < size)
398 			break;
399 	ubz--;
400 	return (ubz->ubz_entries);
401 }
402 
403 static uma_bucket_t
404 bucket_alloc(uma_zone_t zone, void *udata, int flags)
405 {
406 	struct uma_bucket_zone *ubz;
407 	uma_bucket_t bucket;
408 
409 	/*
410 	 * This is to stop us from allocating per cpu buckets while we're
411 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
412 	 * boot pages.  This also prevents us from allocating buckets in
413 	 * low memory situations.
414 	 */
415 	if (bucketdisable)
416 		return (NULL);
417 	/*
418 	 * To limit bucket recursion we store the original zone flags
419 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
420 	 * NOVM flag to persist even through deep recursions.  We also
421 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
422 	 * a bucket for a bucket zone so we do not allow infinite bucket
423 	 * recursion.  This cookie will even persist to frees of unused
424 	 * buckets via the allocation path or bucket allocations in the
425 	 * free path.
426 	 */
427 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
428 		udata = (void *)(uintptr_t)zone->uz_flags;
429 	else {
430 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
431 			return (NULL);
432 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
433 	}
434 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
435 		flags |= M_NOVM;
436 	ubz = bucket_zone_lookup(zone->uz_count);
437 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
438 		ubz++;
439 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
440 	if (bucket) {
441 #ifdef INVARIANTS
442 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
443 #endif
444 		bucket->ub_cnt = 0;
445 		bucket->ub_entries = ubz->ubz_entries;
446 	}
447 
448 	return (bucket);
449 }
450 
451 static void
452 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
453 {
454 	struct uma_bucket_zone *ubz;
455 
456 	KASSERT(bucket->ub_cnt == 0,
457 	    ("bucket_free: Freeing a non free bucket."));
458 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
459 		udata = (void *)(uintptr_t)zone->uz_flags;
460 	ubz = bucket_zone_lookup(bucket->ub_entries);
461 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
462 }
463 
464 static void
465 bucket_zone_drain(void)
466 {
467 	struct uma_bucket_zone *ubz;
468 
469 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
470 		zone_drain(ubz->ubz_zone);
471 }
472 
473 static uma_bucket_t
474 zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws)
475 {
476 	uma_bucket_t bucket;
477 
478 	ZONE_LOCK_ASSERT(zone);
479 
480 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
481 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
482 		LIST_REMOVE(bucket, ub_link);
483 		zdom->uzd_nitems -= bucket->ub_cnt;
484 		if (ws && zdom->uzd_imin > zdom->uzd_nitems)
485 			zdom->uzd_imin = zdom->uzd_nitems;
486 		zone->uz_bkt_count -= bucket->ub_cnt;
487 	}
488 	return (bucket);
489 }
490 
491 static void
492 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
493     const bool ws)
494 {
495 
496 	ZONE_LOCK_ASSERT(zone);
497 	KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow",
498 	    __func__, zone));
499 
500 	LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
501 	zdom->uzd_nitems += bucket->ub_cnt;
502 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
503 		zdom->uzd_imax = zdom->uzd_nitems;
504 	zone->uz_bkt_count += bucket->ub_cnt;
505 }
506 
507 static void
508 zone_log_warning(uma_zone_t zone)
509 {
510 	static const struct timeval warninterval = { 300, 0 };
511 
512 	if (!zone_warnings || zone->uz_warning == NULL)
513 		return;
514 
515 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
516 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
517 }
518 
519 static inline void
520 zone_maxaction(uma_zone_t zone)
521 {
522 
523 	if (zone->uz_maxaction.ta_func != NULL)
524 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
525 }
526 
527 /*
528  * Routine called by timeout which is used to fire off some time interval
529  * based calculations.  (stats, hash size, etc.)
530  *
531  * Arguments:
532  *	arg   Unused
533  *
534  * Returns:
535  *	Nothing
536  */
537 static void
538 uma_timeout(void *unused)
539 {
540 	bucket_enable();
541 	zone_foreach(zone_timeout);
542 
543 	/* Reschedule this event */
544 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
545 }
546 
547 /*
548  * Update the working set size estimate for the zone's bucket cache.
549  * The constants chosen here are somewhat arbitrary.  With an update period of
550  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
551  * last 100s.
552  */
553 static void
554 zone_domain_update_wss(uma_zone_domain_t zdom)
555 {
556 	long wss;
557 
558 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
559 	wss = zdom->uzd_imax - zdom->uzd_imin;
560 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
561 	zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5;
562 }
563 
564 /*
565  * Routine to perform timeout driven calculations.  This expands the
566  * hashes and does per cpu statistics aggregation.
567  *
568  *  Returns nothing.
569  */
570 static void
571 zone_timeout(uma_zone_t zone)
572 {
573 	uma_keg_t keg = zone->uz_keg;
574 	u_int slabs;
575 
576 	KEG_LOCK(keg);
577 	/*
578 	 * Expand the keg hash table.
579 	 *
580 	 * This is done if the number of slabs is larger than the hash size.
581 	 * What I'm trying to do here is completely reduce collisions.  This
582 	 * may be a little aggressive.  Should I allow for two collisions max?
583 	 */
584 	if (keg->uk_flags & UMA_ZONE_HASH &&
585 	    (slabs = keg->uk_pages / keg->uk_ppera) >
586 	     keg->uk_hash.uh_hashsize) {
587 		struct uma_hash newhash;
588 		struct uma_hash oldhash;
589 		int ret;
590 
591 		/*
592 		 * This is so involved because allocating and freeing
593 		 * while the keg lock is held will lead to deadlock.
594 		 * I have to do everything in stages and check for
595 		 * races.
596 		 */
597 		KEG_UNLOCK(keg);
598 		ret = hash_alloc(&newhash, 1 << fls(slabs));
599 		KEG_LOCK(keg);
600 		if (ret) {
601 			if (hash_expand(&keg->uk_hash, &newhash)) {
602 				oldhash = keg->uk_hash;
603 				keg->uk_hash = newhash;
604 			} else
605 				oldhash = newhash;
606 
607 			KEG_UNLOCK(keg);
608 			hash_free(&oldhash);
609 			return;
610 		}
611 	}
612 
613 	for (int i = 0; i < vm_ndomains; i++)
614 		zone_domain_update_wss(&zone->uz_domain[i]);
615 
616 	KEG_UNLOCK(keg);
617 }
618 
619 /*
620  * Allocate and zero fill the next sized hash table from the appropriate
621  * backing store.
622  *
623  * Arguments:
624  *	hash  A new hash structure with the old hash size in uh_hashsize
625  *
626  * Returns:
627  *	1 on success and 0 on failure.
628  */
629 static int
630 hash_alloc(struct uma_hash *hash, u_int size)
631 {
632 	size_t alloc;
633 
634 	KASSERT(powerof2(size), ("hash size must be power of 2"));
635 	if (size > UMA_HASH_SIZE_INIT)  {
636 		hash->uh_hashsize = size;
637 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
638 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
639 		    M_UMAHASH, M_NOWAIT);
640 	} else {
641 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
642 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
643 		    UMA_ANYDOMAIN, M_WAITOK);
644 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
645 	}
646 	if (hash->uh_slab_hash) {
647 		bzero(hash->uh_slab_hash, alloc);
648 		hash->uh_hashmask = hash->uh_hashsize - 1;
649 		return (1);
650 	}
651 
652 	return (0);
653 }
654 
655 /*
656  * Expands the hash table for HASH zones.  This is done from zone_timeout
657  * to reduce collisions.  This must not be done in the regular allocation
658  * path, otherwise, we can recurse on the vm while allocating pages.
659  *
660  * Arguments:
661  *	oldhash  The hash you want to expand
662  *	newhash  The hash structure for the new table
663  *
664  * Returns:
665  *	Nothing
666  *
667  * Discussion:
668  */
669 static int
670 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
671 {
672 	uma_slab_t slab;
673 	u_int hval;
674 	u_int idx;
675 
676 	if (!newhash->uh_slab_hash)
677 		return (0);
678 
679 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
680 		return (0);
681 
682 	/*
683 	 * I need to investigate hash algorithms for resizing without a
684 	 * full rehash.
685 	 */
686 
687 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
688 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
689 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
690 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
691 			hval = UMA_HASH(newhash, slab->us_data);
692 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
693 			    slab, us_hlink);
694 		}
695 
696 	return (1);
697 }
698 
699 /*
700  * Free the hash bucket to the appropriate backing store.
701  *
702  * Arguments:
703  *	slab_hash  The hash bucket we're freeing
704  *	hashsize   The number of entries in that hash bucket
705  *
706  * Returns:
707  *	Nothing
708  */
709 static void
710 hash_free(struct uma_hash *hash)
711 {
712 	if (hash->uh_slab_hash == NULL)
713 		return;
714 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
715 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
716 	else
717 		free(hash->uh_slab_hash, M_UMAHASH);
718 }
719 
720 /*
721  * Frees all outstanding items in a bucket
722  *
723  * Arguments:
724  *	zone   The zone to free to, must be unlocked.
725  *	bucket The free/alloc bucket with items, cpu queue must be locked.
726  *
727  * Returns:
728  *	Nothing
729  */
730 
731 static void
732 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
733 {
734 	int i;
735 
736 	if (bucket == NULL)
737 		return;
738 
739 	if (zone->uz_fini)
740 		for (i = 0; i < bucket->ub_cnt; i++)
741 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
742 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
743 	if (zone->uz_max_items > 0) {
744 		ZONE_LOCK(zone);
745 		zone->uz_items -= bucket->ub_cnt;
746 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
747 			wakeup_one(zone);
748 		ZONE_UNLOCK(zone);
749 	}
750 	bucket->ub_cnt = 0;
751 }
752 
753 /*
754  * Drains the per cpu caches for a zone.
755  *
756  * NOTE: This may only be called while the zone is being turn down, and not
757  * during normal operation.  This is necessary in order that we do not have
758  * to migrate CPUs to drain the per-CPU caches.
759  *
760  * Arguments:
761  *	zone     The zone to drain, must be unlocked.
762  *
763  * Returns:
764  *	Nothing
765  */
766 static void
767 cache_drain(uma_zone_t zone)
768 {
769 	uma_cache_t cache;
770 	int cpu;
771 
772 	/*
773 	 * XXX: It is safe to not lock the per-CPU caches, because we're
774 	 * tearing down the zone anyway.  I.e., there will be no further use
775 	 * of the caches at this point.
776 	 *
777 	 * XXX: It would good to be able to assert that the zone is being
778 	 * torn down to prevent improper use of cache_drain().
779 	 *
780 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
781 	 * it is used elsewhere.  Should the tear-down path be made special
782 	 * there in some form?
783 	 */
784 	CPU_FOREACH(cpu) {
785 		cache = &zone->uz_cpu[cpu];
786 		bucket_drain(zone, cache->uc_allocbucket);
787 		if (cache->uc_allocbucket != NULL)
788 			bucket_free(zone, cache->uc_allocbucket, NULL);
789 		cache->uc_allocbucket = NULL;
790 		bucket_drain(zone, cache->uc_freebucket);
791 		if (cache->uc_freebucket != NULL)
792 			bucket_free(zone, cache->uc_freebucket, NULL);
793 		cache->uc_freebucket = NULL;
794 		bucket_drain(zone, cache->uc_crossbucket);
795 		if (cache->uc_crossbucket != NULL)
796 			bucket_free(zone, cache->uc_crossbucket, NULL);
797 		cache->uc_crossbucket = NULL;
798 	}
799 	ZONE_LOCK(zone);
800 	bucket_cache_drain(zone);
801 	ZONE_UNLOCK(zone);
802 }
803 
804 static void
805 cache_shrink(uma_zone_t zone)
806 {
807 
808 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
809 		return;
810 
811 	ZONE_LOCK(zone);
812 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
813 	ZONE_UNLOCK(zone);
814 }
815 
816 static void
817 cache_drain_safe_cpu(uma_zone_t zone)
818 {
819 	uma_cache_t cache;
820 	uma_bucket_t b1, b2, b3;
821 	int domain;
822 
823 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
824 		return;
825 
826 	b1 = b2 = b3 = NULL;
827 	ZONE_LOCK(zone);
828 	critical_enter();
829 	if (zone->uz_flags & UMA_ZONE_NUMA)
830 		domain = PCPU_GET(domain);
831 	else
832 		domain = 0;
833 	cache = &zone->uz_cpu[curcpu];
834 	if (cache->uc_allocbucket) {
835 		if (cache->uc_allocbucket->ub_cnt != 0)
836 			zone_put_bucket(zone, &zone->uz_domain[domain],
837 			    cache->uc_allocbucket, false);
838 		else
839 			b1 = cache->uc_allocbucket;
840 		cache->uc_allocbucket = NULL;
841 	}
842 	if (cache->uc_freebucket) {
843 		if (cache->uc_freebucket->ub_cnt != 0)
844 			zone_put_bucket(zone, &zone->uz_domain[domain],
845 			    cache->uc_freebucket, false);
846 		else
847 			b2 = cache->uc_freebucket;
848 		cache->uc_freebucket = NULL;
849 	}
850 	b3 = cache->uc_crossbucket;
851 	cache->uc_crossbucket = NULL;
852 	critical_exit();
853 	ZONE_UNLOCK(zone);
854 	if (b1)
855 		bucket_free(zone, b1, NULL);
856 	if (b2)
857 		bucket_free(zone, b2, NULL);
858 	if (b3) {
859 		bucket_drain(zone, b3);
860 		bucket_free(zone, b3, NULL);
861 	}
862 }
863 
864 /*
865  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
866  * This is an expensive call because it needs to bind to all CPUs
867  * one by one and enter a critical section on each of them in order
868  * to safely access their cache buckets.
869  * Zone lock must not be held on call this function.
870  */
871 static void
872 cache_drain_safe(uma_zone_t zone)
873 {
874 	int cpu;
875 
876 	/*
877 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
878 	 */
879 	if (zone)
880 		cache_shrink(zone);
881 	else
882 		zone_foreach(cache_shrink);
883 
884 	CPU_FOREACH(cpu) {
885 		thread_lock(curthread);
886 		sched_bind(curthread, cpu);
887 		thread_unlock(curthread);
888 
889 		if (zone)
890 			cache_drain_safe_cpu(zone);
891 		else
892 			zone_foreach(cache_drain_safe_cpu);
893 	}
894 	thread_lock(curthread);
895 	sched_unbind(curthread);
896 	thread_unlock(curthread);
897 }
898 
899 /*
900  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
901  */
902 static void
903 bucket_cache_drain(uma_zone_t zone)
904 {
905 	uma_zone_domain_t zdom;
906 	uma_bucket_t bucket;
907 	int i;
908 
909 	/*
910 	 * Drain the bucket queues and free the buckets.
911 	 */
912 	for (i = 0; i < vm_ndomains; i++) {
913 		zdom = &zone->uz_domain[i];
914 		while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) !=
915 		    NULL) {
916 			ZONE_UNLOCK(zone);
917 			bucket_drain(zone, bucket);
918 			bucket_free(zone, bucket, NULL);
919 			ZONE_LOCK(zone);
920 		}
921 	}
922 
923 	/*
924 	 * Shrink further bucket sizes.  Price of single zone lock collision
925 	 * is probably lower then price of global cache drain.
926 	 */
927 	if (zone->uz_count > zone->uz_count_min)
928 		zone->uz_count--;
929 }
930 
931 static void
932 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
933 {
934 	uint8_t *mem;
935 	int i;
936 	uint8_t flags;
937 
938 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
939 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
940 
941 	mem = slab->us_data;
942 	flags = slab->us_flags;
943 	i = start;
944 	if (keg->uk_fini != NULL) {
945 		for (i--; i > -1; i--)
946 #ifdef INVARIANTS
947 		/*
948 		 * trash_fini implies that dtor was trash_dtor. trash_fini
949 		 * would check that memory hasn't been modified since free,
950 		 * which executed trash_dtor.
951 		 * That's why we need to run uma_dbg_kskip() check here,
952 		 * albeit we don't make skip check for other init/fini
953 		 * invocations.
954 		 */
955 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
956 		    keg->uk_fini != trash_fini)
957 #endif
958 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
959 			    keg->uk_size);
960 	}
961 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
962 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
963 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
964 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
965 }
966 
967 /*
968  * Frees pages from a keg back to the system.  This is done on demand from
969  * the pageout daemon.
970  *
971  * Returns nothing.
972  */
973 static void
974 keg_drain(uma_keg_t keg)
975 {
976 	struct slabhead freeslabs = { 0 };
977 	uma_domain_t dom;
978 	uma_slab_t slab, tmp;
979 	int i;
980 
981 	/*
982 	 * We don't want to take pages from statically allocated kegs at this
983 	 * time
984 	 */
985 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
986 		return;
987 
988 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
989 	    keg->uk_name, keg, keg->uk_free);
990 	KEG_LOCK(keg);
991 	if (keg->uk_free == 0)
992 		goto finished;
993 
994 	for (i = 0; i < vm_ndomains; i++) {
995 		dom = &keg->uk_domain[i];
996 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
997 			/* We have nowhere to free these to. */
998 			if (slab->us_flags & UMA_SLAB_BOOT)
999 				continue;
1000 
1001 			LIST_REMOVE(slab, us_link);
1002 			keg->uk_pages -= keg->uk_ppera;
1003 			keg->uk_free -= keg->uk_ipers;
1004 
1005 			if (keg->uk_flags & UMA_ZONE_HASH)
1006 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
1007 				    slab->us_data);
1008 
1009 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1010 		}
1011 	}
1012 
1013 finished:
1014 	KEG_UNLOCK(keg);
1015 
1016 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1017 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1018 		keg_free_slab(keg, slab, keg->uk_ipers);
1019 	}
1020 }
1021 
1022 static void
1023 zone_drain_wait(uma_zone_t zone, int waitok)
1024 {
1025 
1026 	/*
1027 	 * Set draining to interlock with zone_dtor() so we can release our
1028 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1029 	 * is the only call that knows the structure will still be available
1030 	 * when it wakes up.
1031 	 */
1032 	ZONE_LOCK(zone);
1033 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
1034 		if (waitok == M_NOWAIT)
1035 			goto out;
1036 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1037 	}
1038 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
1039 	bucket_cache_drain(zone);
1040 	ZONE_UNLOCK(zone);
1041 	/*
1042 	 * The DRAINING flag protects us from being freed while
1043 	 * we're running.  Normally the uma_rwlock would protect us but we
1044 	 * must be able to release and acquire the right lock for each keg.
1045 	 */
1046 	keg_drain(zone->uz_keg);
1047 	ZONE_LOCK(zone);
1048 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
1049 	wakeup(zone);
1050 out:
1051 	ZONE_UNLOCK(zone);
1052 }
1053 
1054 void
1055 zone_drain(uma_zone_t zone)
1056 {
1057 
1058 	zone_drain_wait(zone, M_NOWAIT);
1059 }
1060 
1061 /*
1062  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1063  * If the allocation was successful, the keg lock will be held upon return,
1064  * otherwise the keg will be left unlocked.
1065  *
1066  * Arguments:
1067  *	flags   Wait flags for the item initialization routine
1068  *	aflags  Wait flags for the slab allocation
1069  *
1070  * Returns:
1071  *	The slab that was allocated or NULL if there is no memory and the
1072  *	caller specified M_NOWAIT.
1073  */
1074 static uma_slab_t
1075 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1076     int aflags)
1077 {
1078 	uma_alloc allocf;
1079 	uma_slab_t slab;
1080 	unsigned long size;
1081 	uint8_t *mem;
1082 	uint8_t sflags;
1083 	int i;
1084 
1085 	KASSERT(domain >= 0 && domain < vm_ndomains,
1086 	    ("keg_alloc_slab: domain %d out of range", domain));
1087 	KEG_LOCK_ASSERT(keg);
1088 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1089 
1090 	allocf = keg->uk_allocf;
1091 	KEG_UNLOCK(keg);
1092 
1093 	slab = NULL;
1094 	mem = NULL;
1095 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1096 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1097 		if (slab == NULL)
1098 			goto out;
1099 	}
1100 
1101 	/*
1102 	 * This reproduces the old vm_zone behavior of zero filling pages the
1103 	 * first time they are added to a zone.
1104 	 *
1105 	 * Malloced items are zeroed in uma_zalloc.
1106 	 */
1107 
1108 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1109 		aflags |= M_ZERO;
1110 	else
1111 		aflags &= ~M_ZERO;
1112 
1113 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1114 		aflags |= M_NODUMP;
1115 
1116 	/* zone is passed for legacy reasons. */
1117 	size = keg->uk_ppera * PAGE_SIZE;
1118 	mem = allocf(zone, size, domain, &sflags, aflags);
1119 	if (mem == NULL) {
1120 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1121 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1122 		slab = NULL;
1123 		goto out;
1124 	}
1125 	uma_total_inc(size);
1126 
1127 	/* Point the slab into the allocated memory */
1128 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1129 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1130 
1131 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1132 		for (i = 0; i < keg->uk_ppera; i++)
1133 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1134 
1135 	slab->us_keg = keg;
1136 	slab->us_data = mem;
1137 	slab->us_freecount = keg->uk_ipers;
1138 	slab->us_flags = sflags;
1139 	slab->us_domain = domain;
1140 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1141 #ifdef INVARIANTS
1142 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1143 #endif
1144 
1145 	if (keg->uk_init != NULL) {
1146 		for (i = 0; i < keg->uk_ipers; i++)
1147 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1148 			    keg->uk_size, flags) != 0)
1149 				break;
1150 		if (i != keg->uk_ipers) {
1151 			keg_free_slab(keg, slab, i);
1152 			slab = NULL;
1153 			goto out;
1154 		}
1155 	}
1156 	KEG_LOCK(keg);
1157 
1158 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1159 	    slab, keg->uk_name, keg);
1160 
1161 	if (keg->uk_flags & UMA_ZONE_HASH)
1162 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1163 
1164 	keg->uk_pages += keg->uk_ppera;
1165 	keg->uk_free += keg->uk_ipers;
1166 
1167 out:
1168 	return (slab);
1169 }
1170 
1171 /*
1172  * This function is intended to be used early on in place of page_alloc() so
1173  * that we may use the boot time page cache to satisfy allocations before
1174  * the VM is ready.
1175  */
1176 static void *
1177 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1178     int wait)
1179 {
1180 	uma_keg_t keg;
1181 	void *mem;
1182 	int pages;
1183 
1184 	keg = zone->uz_keg;
1185 	/*
1186 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1187 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1188 	 */
1189 	switch (booted) {
1190 		case BOOT_COLD:
1191 		case BOOT_STRAPPED:
1192 			break;
1193 		case BOOT_PAGEALLOC:
1194 			if (keg->uk_ppera > 1)
1195 				break;
1196 		case BOOT_BUCKETS:
1197 		case BOOT_RUNNING:
1198 #ifdef UMA_MD_SMALL_ALLOC
1199 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1200 			    page_alloc : uma_small_alloc;
1201 #else
1202 			keg->uk_allocf = page_alloc;
1203 #endif
1204 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1205 	}
1206 
1207 	/*
1208 	 * Check our small startup cache to see if it has pages remaining.
1209 	 */
1210 	pages = howmany(bytes, PAGE_SIZE);
1211 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1212 	if (pages > boot_pages)
1213 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1214 #ifdef DIAGNOSTIC
1215 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1216 	    boot_pages);
1217 #endif
1218 	mem = bootmem;
1219 	boot_pages -= pages;
1220 	bootmem += pages * PAGE_SIZE;
1221 	*pflag = UMA_SLAB_BOOT;
1222 
1223 	return (mem);
1224 }
1225 
1226 /*
1227  * Allocates a number of pages from the system
1228  *
1229  * Arguments:
1230  *	bytes  The number of bytes requested
1231  *	wait  Shall we wait?
1232  *
1233  * Returns:
1234  *	A pointer to the alloced memory or possibly
1235  *	NULL if M_NOWAIT is set.
1236  */
1237 static void *
1238 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1239     int wait)
1240 {
1241 	void *p;	/* Returned page */
1242 
1243 	*pflag = UMA_SLAB_KERNEL;
1244 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1245 
1246 	return (p);
1247 }
1248 
1249 static void *
1250 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1251     int wait)
1252 {
1253 	struct pglist alloctail;
1254 	vm_offset_t addr, zkva;
1255 	int cpu, flags;
1256 	vm_page_t p, p_next;
1257 #ifdef NUMA
1258 	struct pcpu *pc;
1259 #endif
1260 
1261 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1262 
1263 	TAILQ_INIT(&alloctail);
1264 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1265 	    malloc2vm_flags(wait);
1266 	*pflag = UMA_SLAB_KERNEL;
1267 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1268 		if (CPU_ABSENT(cpu)) {
1269 			p = vm_page_alloc(NULL, 0, flags);
1270 		} else {
1271 #ifndef NUMA
1272 			p = vm_page_alloc(NULL, 0, flags);
1273 #else
1274 			pc = pcpu_find(cpu);
1275 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1276 			if (__predict_false(p == NULL))
1277 				p = vm_page_alloc(NULL, 0, flags);
1278 #endif
1279 		}
1280 		if (__predict_false(p == NULL))
1281 			goto fail;
1282 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1283 	}
1284 	if ((addr = kva_alloc(bytes)) == 0)
1285 		goto fail;
1286 	zkva = addr;
1287 	TAILQ_FOREACH(p, &alloctail, listq) {
1288 		pmap_qenter(zkva, &p, 1);
1289 		zkva += PAGE_SIZE;
1290 	}
1291 	return ((void*)addr);
1292 fail:
1293 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1294 		vm_page_unwire_noq(p);
1295 		vm_page_free(p);
1296 	}
1297 	return (NULL);
1298 }
1299 
1300 /*
1301  * Allocates a number of pages from within an object
1302  *
1303  * Arguments:
1304  *	bytes  The number of bytes requested
1305  *	wait   Shall we wait?
1306  *
1307  * Returns:
1308  *	A pointer to the alloced memory or possibly
1309  *	NULL if M_NOWAIT is set.
1310  */
1311 static void *
1312 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1313     int wait)
1314 {
1315 	TAILQ_HEAD(, vm_page) alloctail;
1316 	u_long npages;
1317 	vm_offset_t retkva, zkva;
1318 	vm_page_t p, p_next;
1319 	uma_keg_t keg;
1320 
1321 	TAILQ_INIT(&alloctail);
1322 	keg = zone->uz_keg;
1323 
1324 	npages = howmany(bytes, PAGE_SIZE);
1325 	while (npages > 0) {
1326 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1327 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1328 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1329 		    VM_ALLOC_NOWAIT));
1330 		if (p != NULL) {
1331 			/*
1332 			 * Since the page does not belong to an object, its
1333 			 * listq is unused.
1334 			 */
1335 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1336 			npages--;
1337 			continue;
1338 		}
1339 		/*
1340 		 * Page allocation failed, free intermediate pages and
1341 		 * exit.
1342 		 */
1343 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1344 			vm_page_unwire_noq(p);
1345 			vm_page_free(p);
1346 		}
1347 		return (NULL);
1348 	}
1349 	*flags = UMA_SLAB_PRIV;
1350 	zkva = keg->uk_kva +
1351 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1352 	retkva = zkva;
1353 	TAILQ_FOREACH(p, &alloctail, listq) {
1354 		pmap_qenter(zkva, &p, 1);
1355 		zkva += PAGE_SIZE;
1356 	}
1357 
1358 	return ((void *)retkva);
1359 }
1360 
1361 /*
1362  * Frees a number of pages to the system
1363  *
1364  * Arguments:
1365  *	mem   A pointer to the memory to be freed
1366  *	size  The size of the memory being freed
1367  *	flags The original p->us_flags field
1368  *
1369  * Returns:
1370  *	Nothing
1371  */
1372 static void
1373 page_free(void *mem, vm_size_t size, uint8_t flags)
1374 {
1375 
1376 	if ((flags & UMA_SLAB_KERNEL) == 0)
1377 		panic("UMA: page_free used with invalid flags %x", flags);
1378 
1379 	kmem_free((vm_offset_t)mem, size);
1380 }
1381 
1382 /*
1383  * Frees pcpu zone allocations
1384  *
1385  * Arguments:
1386  *	mem   A pointer to the memory to be freed
1387  *	size  The size of the memory being freed
1388  *	flags The original p->us_flags field
1389  *
1390  * Returns:
1391  *	Nothing
1392  */
1393 static void
1394 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1395 {
1396 	vm_offset_t sva, curva;
1397 	vm_paddr_t paddr;
1398 	vm_page_t m;
1399 
1400 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1401 	sva = (vm_offset_t)mem;
1402 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1403 		paddr = pmap_kextract(curva);
1404 		m = PHYS_TO_VM_PAGE(paddr);
1405 		vm_page_unwire_noq(m);
1406 		vm_page_free(m);
1407 	}
1408 	pmap_qremove(sva, size >> PAGE_SHIFT);
1409 	kva_free(sva, size);
1410 }
1411 
1412 
1413 /*
1414  * Zero fill initializer
1415  *
1416  * Arguments/Returns follow uma_init specifications
1417  */
1418 static int
1419 zero_init(void *mem, int size, int flags)
1420 {
1421 	bzero(mem, size);
1422 	return (0);
1423 }
1424 
1425 /*
1426  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1427  *
1428  * Arguments
1429  *	keg  The zone we should initialize
1430  *
1431  * Returns
1432  *	Nothing
1433  */
1434 static void
1435 keg_small_init(uma_keg_t keg)
1436 {
1437 	u_int rsize;
1438 	u_int memused;
1439 	u_int wastedspace;
1440 	u_int shsize;
1441 	u_int slabsize;
1442 
1443 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1444 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1445 
1446 		slabsize = UMA_PCPU_ALLOC_SIZE;
1447 		keg->uk_ppera = ncpus;
1448 	} else {
1449 		slabsize = UMA_SLAB_SIZE;
1450 		keg->uk_ppera = 1;
1451 	}
1452 
1453 	/*
1454 	 * Calculate the size of each allocation (rsize) according to
1455 	 * alignment.  If the requested size is smaller than we have
1456 	 * allocation bits for we round it up.
1457 	 */
1458 	rsize = keg->uk_size;
1459 	if (rsize < slabsize / SLAB_SETSIZE)
1460 		rsize = slabsize / SLAB_SETSIZE;
1461 	if (rsize & keg->uk_align)
1462 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1463 	keg->uk_rsize = rsize;
1464 
1465 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1466 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1467 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1468 
1469 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1470 		shsize = 0;
1471 	else
1472 		shsize = SIZEOF_UMA_SLAB;
1473 
1474 	if (rsize <= slabsize - shsize)
1475 		keg->uk_ipers = (slabsize - shsize) / rsize;
1476 	else {
1477 		/* Handle special case when we have 1 item per slab, so
1478 		 * alignment requirement can be relaxed. */
1479 		KASSERT(keg->uk_size <= slabsize - shsize,
1480 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1481 		keg->uk_ipers = 1;
1482 	}
1483 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1484 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1485 
1486 	memused = keg->uk_ipers * rsize + shsize;
1487 	wastedspace = slabsize - memused;
1488 
1489 	/*
1490 	 * We can't do OFFPAGE if we're internal or if we've been
1491 	 * asked to not go to the VM for buckets.  If we do this we
1492 	 * may end up going to the VM  for slabs which we do not
1493 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1494 	 * of UMA_ZONE_VM, which clearly forbids it.
1495 	 */
1496 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1497 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1498 		return;
1499 
1500 	/*
1501 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1502 	 * this if it permits more items per-slab.
1503 	 *
1504 	 * XXX We could try growing slabsize to limit max waste as well.
1505 	 * Historically this was not done because the VM could not
1506 	 * efficiently handle contiguous allocations.
1507 	 */
1508 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1509 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1510 		keg->uk_ipers = slabsize / keg->uk_rsize;
1511 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1512 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1513 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1514 		    "keg: %s(%p), calculated wastedspace = %d, "
1515 		    "maximum wasted space allowed = %d, "
1516 		    "calculated ipers = %d, "
1517 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1518 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1519 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1520 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1521 	}
1522 
1523 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1524 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1525 		keg->uk_flags |= UMA_ZONE_HASH;
1526 }
1527 
1528 /*
1529  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1530  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1531  * more complicated.
1532  *
1533  * Arguments
1534  *	keg  The keg we should initialize
1535  *
1536  * Returns
1537  *	Nothing
1538  */
1539 static void
1540 keg_large_init(uma_keg_t keg)
1541 {
1542 
1543 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1544 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1545 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1546 
1547 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1548 	keg->uk_ipers = 1;
1549 	keg->uk_rsize = keg->uk_size;
1550 
1551 	/* Check whether we have enough space to not do OFFPAGE. */
1552 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1553 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1554 		/*
1555 		 * We can't do OFFPAGE if we're internal, in which case
1556 		 * we need an extra page per allocation to contain the
1557 		 * slab header.
1558 		 */
1559 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1560 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1561 		else
1562 			keg->uk_ppera++;
1563 	}
1564 
1565 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1566 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1567 		keg->uk_flags |= UMA_ZONE_HASH;
1568 }
1569 
1570 static void
1571 keg_cachespread_init(uma_keg_t keg)
1572 {
1573 	int alignsize;
1574 	int trailer;
1575 	int pages;
1576 	int rsize;
1577 
1578 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1579 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1580 
1581 	alignsize = keg->uk_align + 1;
1582 	rsize = keg->uk_size;
1583 	/*
1584 	 * We want one item to start on every align boundary in a page.  To
1585 	 * do this we will span pages.  We will also extend the item by the
1586 	 * size of align if it is an even multiple of align.  Otherwise, it
1587 	 * would fall on the same boundary every time.
1588 	 */
1589 	if (rsize & keg->uk_align)
1590 		rsize = (rsize & ~keg->uk_align) + alignsize;
1591 	if ((rsize & alignsize) == 0)
1592 		rsize += alignsize;
1593 	trailer = rsize - keg->uk_size;
1594 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1595 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1596 	keg->uk_rsize = rsize;
1597 	keg->uk_ppera = pages;
1598 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1599 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1600 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1601 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1602 	    keg->uk_ipers));
1603 }
1604 
1605 /*
1606  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1607  * the keg onto the global keg list.
1608  *
1609  * Arguments/Returns follow uma_ctor specifications
1610  *	udata  Actually uma_kctor_args
1611  */
1612 static int
1613 keg_ctor(void *mem, int size, void *udata, int flags)
1614 {
1615 	struct uma_kctor_args *arg = udata;
1616 	uma_keg_t keg = mem;
1617 	uma_zone_t zone;
1618 
1619 	bzero(keg, size);
1620 	keg->uk_size = arg->size;
1621 	keg->uk_init = arg->uminit;
1622 	keg->uk_fini = arg->fini;
1623 	keg->uk_align = arg->align;
1624 	keg->uk_free = 0;
1625 	keg->uk_reserve = 0;
1626 	keg->uk_pages = 0;
1627 	keg->uk_flags = arg->flags;
1628 	keg->uk_slabzone = NULL;
1629 
1630 	/*
1631 	 * We use a global round-robin policy by default.  Zones with
1632 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1633 	 * iterator is never run.
1634 	 */
1635 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1636 	keg->uk_dr.dr_iter = 0;
1637 
1638 	/*
1639 	 * The master zone is passed to us at keg-creation time.
1640 	 */
1641 	zone = arg->zone;
1642 	keg->uk_name = zone->uz_name;
1643 
1644 	if (arg->flags & UMA_ZONE_VM)
1645 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1646 
1647 	if (arg->flags & UMA_ZONE_ZINIT)
1648 		keg->uk_init = zero_init;
1649 
1650 	if (arg->flags & UMA_ZONE_MALLOC)
1651 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1652 
1653 	if (arg->flags & UMA_ZONE_PCPU)
1654 #ifdef SMP
1655 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1656 #else
1657 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1658 #endif
1659 
1660 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1661 		keg_cachespread_init(keg);
1662 	} else {
1663 		if (keg->uk_size > UMA_SLAB_SPACE)
1664 			keg_large_init(keg);
1665 		else
1666 			keg_small_init(keg);
1667 	}
1668 
1669 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1670 		keg->uk_slabzone = slabzone;
1671 
1672 	/*
1673 	 * If we haven't booted yet we need allocations to go through the
1674 	 * startup cache until the vm is ready.
1675 	 */
1676 	if (booted < BOOT_PAGEALLOC)
1677 		keg->uk_allocf = startup_alloc;
1678 #ifdef UMA_MD_SMALL_ALLOC
1679 	else if (keg->uk_ppera == 1)
1680 		keg->uk_allocf = uma_small_alloc;
1681 #endif
1682 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1683 		keg->uk_allocf = pcpu_page_alloc;
1684 	else
1685 		keg->uk_allocf = page_alloc;
1686 #ifdef UMA_MD_SMALL_ALLOC
1687 	if (keg->uk_ppera == 1)
1688 		keg->uk_freef = uma_small_free;
1689 	else
1690 #endif
1691 	if (keg->uk_flags & UMA_ZONE_PCPU)
1692 		keg->uk_freef = pcpu_page_free;
1693 	else
1694 		keg->uk_freef = page_free;
1695 
1696 	/*
1697 	 * Initialize keg's lock
1698 	 */
1699 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1700 
1701 	/*
1702 	 * If we're putting the slab header in the actual page we need to
1703 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1704 	 * macro definition.
1705 	 */
1706 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1707 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1708 		/*
1709 		 * The only way the following is possible is if with our
1710 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1711 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1712 		 * mathematically possible for all cases, so we make
1713 		 * sure here anyway.
1714 		 */
1715 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1716 		    PAGE_SIZE * keg->uk_ppera,
1717 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1718 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1719 	}
1720 
1721 	if (keg->uk_flags & UMA_ZONE_HASH)
1722 		hash_alloc(&keg->uk_hash, 0);
1723 
1724 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1725 	    keg, zone->uz_name, zone,
1726 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1727 	    keg->uk_free);
1728 
1729 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1730 
1731 	rw_wlock(&uma_rwlock);
1732 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1733 	rw_wunlock(&uma_rwlock);
1734 	return (0);
1735 }
1736 
1737 static void
1738 zone_alloc_counters(uma_zone_t zone)
1739 {
1740 
1741 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1742 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1743 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1744 }
1745 
1746 /*
1747  * Zone header ctor.  This initializes all fields, locks, etc.
1748  *
1749  * Arguments/Returns follow uma_ctor specifications
1750  *	udata  Actually uma_zctor_args
1751  */
1752 static int
1753 zone_ctor(void *mem, int size, void *udata, int flags)
1754 {
1755 	struct uma_zctor_args *arg = udata;
1756 	uma_zone_t zone = mem;
1757 	uma_zone_t z;
1758 	uma_keg_t keg;
1759 
1760 	bzero(zone, size);
1761 	zone->uz_name = arg->name;
1762 	zone->uz_ctor = arg->ctor;
1763 	zone->uz_dtor = arg->dtor;
1764 	zone->uz_init = NULL;
1765 	zone->uz_fini = NULL;
1766 	zone->uz_sleeps = 0;
1767 	zone->uz_xdomain = 0;
1768 	zone->uz_count = 0;
1769 	zone->uz_count_min = 0;
1770 	zone->uz_count_max = BUCKET_MAX;
1771 	zone->uz_flags = 0;
1772 	zone->uz_warning = NULL;
1773 	/* The domain structures follow the cpu structures. */
1774 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1775 	zone->uz_bkt_max = ULONG_MAX;
1776 	timevalclear(&zone->uz_ratecheck);
1777 
1778 	if (__predict_true(booted == BOOT_RUNNING))
1779 		zone_alloc_counters(zone);
1780 	else {
1781 		zone->uz_allocs = EARLY_COUNTER;
1782 		zone->uz_frees = EARLY_COUNTER;
1783 		zone->uz_fails = EARLY_COUNTER;
1784 	}
1785 
1786 	/*
1787 	 * This is a pure cache zone, no kegs.
1788 	 */
1789 	if (arg->import) {
1790 		if (arg->flags & UMA_ZONE_VM)
1791 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1792 		zone->uz_flags = arg->flags;
1793 		zone->uz_size = arg->size;
1794 		zone->uz_import = arg->import;
1795 		zone->uz_release = arg->release;
1796 		zone->uz_arg = arg->arg;
1797 		zone->uz_lockptr = &zone->uz_lock;
1798 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1799 		rw_wlock(&uma_rwlock);
1800 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1801 		rw_wunlock(&uma_rwlock);
1802 		goto out;
1803 	}
1804 
1805 	/*
1806 	 * Use the regular zone/keg/slab allocator.
1807 	 */
1808 	zone->uz_import = (uma_import)zone_import;
1809 	zone->uz_release = (uma_release)zone_release;
1810 	zone->uz_arg = zone;
1811 	keg = arg->keg;
1812 
1813 	if (arg->flags & UMA_ZONE_SECONDARY) {
1814 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1815 		zone->uz_init = arg->uminit;
1816 		zone->uz_fini = arg->fini;
1817 		zone->uz_lockptr = &keg->uk_lock;
1818 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1819 		rw_wlock(&uma_rwlock);
1820 		ZONE_LOCK(zone);
1821 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1822 			if (LIST_NEXT(z, uz_link) == NULL) {
1823 				LIST_INSERT_AFTER(z, zone, uz_link);
1824 				break;
1825 			}
1826 		}
1827 		ZONE_UNLOCK(zone);
1828 		rw_wunlock(&uma_rwlock);
1829 	} else if (keg == NULL) {
1830 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1831 		    arg->align, arg->flags)) == NULL)
1832 			return (ENOMEM);
1833 	} else {
1834 		struct uma_kctor_args karg;
1835 		int error;
1836 
1837 		/* We should only be here from uma_startup() */
1838 		karg.size = arg->size;
1839 		karg.uminit = arg->uminit;
1840 		karg.fini = arg->fini;
1841 		karg.align = arg->align;
1842 		karg.flags = arg->flags;
1843 		karg.zone = zone;
1844 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1845 		    flags);
1846 		if (error)
1847 			return (error);
1848 	}
1849 
1850 	zone->uz_keg = keg;
1851 	zone->uz_size = keg->uk_size;
1852 	zone->uz_flags |= (keg->uk_flags &
1853 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1854 
1855 	/*
1856 	 * Some internal zones don't have room allocated for the per cpu
1857 	 * caches.  If we're internal, bail out here.
1858 	 */
1859 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1860 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1861 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1862 		return (0);
1863 	}
1864 
1865 out:
1866 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1867 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1868 	    ("Invalid zone flag combination"));
1869 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) {
1870 		zone->uz_count = BUCKET_MAX;
1871 	} else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0) {
1872 		zone->uz_count = BUCKET_MIN;
1873 		zone->uz_count_max = BUCKET_MIN;
1874 	} else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1875 		zone->uz_count = 0;
1876 	else
1877 		zone->uz_count = bucket_select(zone->uz_size);
1878 	zone->uz_count_min = zone->uz_count;
1879 
1880 	return (0);
1881 }
1882 
1883 /*
1884  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1885  * table and removes the keg from the global list.
1886  *
1887  * Arguments/Returns follow uma_dtor specifications
1888  *	udata  unused
1889  */
1890 static void
1891 keg_dtor(void *arg, int size, void *udata)
1892 {
1893 	uma_keg_t keg;
1894 
1895 	keg = (uma_keg_t)arg;
1896 	KEG_LOCK(keg);
1897 	if (keg->uk_free != 0) {
1898 		printf("Freed UMA keg (%s) was not empty (%d items). "
1899 		    " Lost %d pages of memory.\n",
1900 		    keg->uk_name ? keg->uk_name : "",
1901 		    keg->uk_free, keg->uk_pages);
1902 	}
1903 	KEG_UNLOCK(keg);
1904 
1905 	hash_free(&keg->uk_hash);
1906 
1907 	KEG_LOCK_FINI(keg);
1908 }
1909 
1910 /*
1911  * Zone header dtor.
1912  *
1913  * Arguments/Returns follow uma_dtor specifications
1914  *	udata  unused
1915  */
1916 static void
1917 zone_dtor(void *arg, int size, void *udata)
1918 {
1919 	uma_zone_t zone;
1920 	uma_keg_t keg;
1921 
1922 	zone = (uma_zone_t)arg;
1923 
1924 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1925 		cache_drain(zone);
1926 
1927 	rw_wlock(&uma_rwlock);
1928 	LIST_REMOVE(zone, uz_link);
1929 	rw_wunlock(&uma_rwlock);
1930 	/*
1931 	 * XXX there are some races here where
1932 	 * the zone can be drained but zone lock
1933 	 * released and then refilled before we
1934 	 * remove it... we dont care for now
1935 	 */
1936 	zone_drain_wait(zone, M_WAITOK);
1937 	/*
1938 	 * We only destroy kegs from non secondary/non cache zones.
1939 	 */
1940 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
1941 		keg = zone->uz_keg;
1942 		rw_wlock(&uma_rwlock);
1943 		LIST_REMOVE(keg, uk_link);
1944 		rw_wunlock(&uma_rwlock);
1945 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1946 	}
1947 	counter_u64_free(zone->uz_allocs);
1948 	counter_u64_free(zone->uz_frees);
1949 	counter_u64_free(zone->uz_fails);
1950 	if (zone->uz_lockptr == &zone->uz_lock)
1951 		ZONE_LOCK_FINI(zone);
1952 }
1953 
1954 /*
1955  * Traverses every zone in the system and calls a callback
1956  *
1957  * Arguments:
1958  *	zfunc  A pointer to a function which accepts a zone
1959  *		as an argument.
1960  *
1961  * Returns:
1962  *	Nothing
1963  */
1964 static void
1965 zone_foreach(void (*zfunc)(uma_zone_t))
1966 {
1967 	uma_keg_t keg;
1968 	uma_zone_t zone;
1969 
1970 	/*
1971 	 * Before BOOT_RUNNING we are guaranteed to be single
1972 	 * threaded, so locking isn't needed. Startup functions
1973 	 * are allowed to use M_WAITOK.
1974 	 */
1975 	if (__predict_true(booted == BOOT_RUNNING))
1976 		rw_rlock(&uma_rwlock);
1977 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1978 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1979 			zfunc(zone);
1980 	}
1981 	if (__predict_true(booted == BOOT_RUNNING))
1982 		rw_runlock(&uma_rwlock);
1983 }
1984 
1985 /*
1986  * Count how many pages do we need to bootstrap.  VM supplies
1987  * its need in early zones in the argument, we add up our zones,
1988  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1989  * zone of zones and zone of kegs are accounted separately.
1990  */
1991 #define	UMA_BOOT_ZONES	11
1992 /* Zone of zones and zone of kegs have arbitrary alignment. */
1993 #define	UMA_BOOT_ALIGN	32
1994 static int zsize, ksize;
1995 int
1996 uma_startup_count(int vm_zones)
1997 {
1998 	int zones, pages;
1999 
2000 	ksize = sizeof(struct uma_keg) +
2001 	    (sizeof(struct uma_domain) * vm_ndomains);
2002 	zsize = sizeof(struct uma_zone) +
2003 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2004 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2005 
2006 	/*
2007 	 * Memory for the zone of kegs and its keg,
2008 	 * and for zone of zones.
2009 	 */
2010 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2011 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2012 
2013 #ifdef	UMA_MD_SMALL_ALLOC
2014 	zones = UMA_BOOT_ZONES;
2015 #else
2016 	zones = UMA_BOOT_ZONES + vm_zones;
2017 	vm_zones = 0;
2018 #endif
2019 
2020 	/* Memory for the rest of startup zones, UMA and VM, ... */
2021 	if (zsize > UMA_SLAB_SPACE) {
2022 		/* See keg_large_init(). */
2023 		u_int ppera;
2024 
2025 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2026 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2027 		    SIZEOF_UMA_SLAB)
2028 			ppera++;
2029 		pages += (zones + vm_zones) * ppera;
2030 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2031 		/* See keg_small_init() special case for uk_ppera = 1. */
2032 		pages += zones;
2033 	else
2034 		pages += howmany(zones,
2035 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2036 
2037 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2038 	pages += howmany(zones + 1,
2039 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2040 
2041 	/*
2042 	 * Most of startup zones are not going to be offpages, that's
2043 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2044 	 * calculations.  Some large bucket zones will be offpage, and
2045 	 * thus will allocate hashes.  We take conservative approach
2046 	 * and assume that all zones may allocate hash.  This may give
2047 	 * us some positive inaccuracy, usually an extra single page.
2048 	 */
2049 	pages += howmany(zones, UMA_SLAB_SPACE /
2050 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2051 
2052 	return (pages);
2053 }
2054 
2055 void
2056 uma_startup(void *mem, int npages)
2057 {
2058 	struct uma_zctor_args args;
2059 	uma_keg_t masterkeg;
2060 	uintptr_t m;
2061 
2062 #ifdef DIAGNOSTIC
2063 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2064 #endif
2065 
2066 	rw_init(&uma_rwlock, "UMA lock");
2067 
2068 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2069 	m = (uintptr_t)mem;
2070 	zones = (uma_zone_t)m;
2071 	m += roundup(zsize, CACHE_LINE_SIZE);
2072 	kegs = (uma_zone_t)m;
2073 	m += roundup(zsize, CACHE_LINE_SIZE);
2074 	masterkeg = (uma_keg_t)m;
2075 	m += roundup(ksize, CACHE_LINE_SIZE);
2076 	m = roundup(m, PAGE_SIZE);
2077 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2078 	mem = (void *)m;
2079 
2080 	/* "manually" create the initial zone */
2081 	memset(&args, 0, sizeof(args));
2082 	args.name = "UMA Kegs";
2083 	args.size = ksize;
2084 	args.ctor = keg_ctor;
2085 	args.dtor = keg_dtor;
2086 	args.uminit = zero_init;
2087 	args.fini = NULL;
2088 	args.keg = masterkeg;
2089 	args.align = UMA_BOOT_ALIGN - 1;
2090 	args.flags = UMA_ZFLAG_INTERNAL;
2091 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2092 
2093 	bootmem = mem;
2094 	boot_pages = npages;
2095 
2096 	args.name = "UMA Zones";
2097 	args.size = zsize;
2098 	args.ctor = zone_ctor;
2099 	args.dtor = zone_dtor;
2100 	args.uminit = zero_init;
2101 	args.fini = NULL;
2102 	args.keg = NULL;
2103 	args.align = UMA_BOOT_ALIGN - 1;
2104 	args.flags = UMA_ZFLAG_INTERNAL;
2105 	zone_ctor(zones, zsize, &args, M_WAITOK);
2106 
2107 	/* Now make a zone for slab headers */
2108 	slabzone = uma_zcreate("UMA Slabs",
2109 				sizeof(struct uma_slab),
2110 				NULL, NULL, NULL, NULL,
2111 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2112 
2113 	hashzone = uma_zcreate("UMA Hash",
2114 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2115 	    NULL, NULL, NULL, NULL,
2116 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2117 
2118 	bucket_init();
2119 
2120 	booted = BOOT_STRAPPED;
2121 }
2122 
2123 void
2124 uma_startup1(void)
2125 {
2126 
2127 #ifdef DIAGNOSTIC
2128 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2129 #endif
2130 	booted = BOOT_PAGEALLOC;
2131 }
2132 
2133 void
2134 uma_startup2(void)
2135 {
2136 
2137 #ifdef DIAGNOSTIC
2138 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2139 #endif
2140 	booted = BOOT_BUCKETS;
2141 	sx_init(&uma_drain_lock, "umadrain");
2142 	bucket_enable();
2143 }
2144 
2145 /*
2146  * Initialize our callout handle
2147  *
2148  */
2149 static void
2150 uma_startup3(void)
2151 {
2152 
2153 #ifdef INVARIANTS
2154 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2155 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2156 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2157 #endif
2158 	zone_foreach(zone_alloc_counters);
2159 	callout_init(&uma_callout, 1);
2160 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2161 	booted = BOOT_RUNNING;
2162 }
2163 
2164 static uma_keg_t
2165 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2166 		int align, uint32_t flags)
2167 {
2168 	struct uma_kctor_args args;
2169 
2170 	args.size = size;
2171 	args.uminit = uminit;
2172 	args.fini = fini;
2173 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2174 	args.flags = flags;
2175 	args.zone = zone;
2176 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2177 }
2178 
2179 /* Public functions */
2180 /* See uma.h */
2181 void
2182 uma_set_align(int align)
2183 {
2184 
2185 	if (align != UMA_ALIGN_CACHE)
2186 		uma_align_cache = align;
2187 }
2188 
2189 /* See uma.h */
2190 uma_zone_t
2191 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2192 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2193 
2194 {
2195 	struct uma_zctor_args args;
2196 	uma_zone_t res;
2197 	bool locked;
2198 
2199 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2200 	    align, name));
2201 
2202 	/* Sets all zones to a first-touch domain policy. */
2203 #ifdef UMA_FIRSTTOUCH
2204 	flags |= UMA_ZONE_NUMA;
2205 #endif
2206 
2207 	/* This stuff is essential for the zone ctor */
2208 	memset(&args, 0, sizeof(args));
2209 	args.name = name;
2210 	args.size = size;
2211 	args.ctor = ctor;
2212 	args.dtor = dtor;
2213 	args.uminit = uminit;
2214 	args.fini = fini;
2215 #ifdef  INVARIANTS
2216 	/*
2217 	 * If a zone is being created with an empty constructor and
2218 	 * destructor, pass UMA constructor/destructor which checks for
2219 	 * memory use after free.
2220 	 */
2221 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2222 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2223 		args.ctor = trash_ctor;
2224 		args.dtor = trash_dtor;
2225 		args.uminit = trash_init;
2226 		args.fini = trash_fini;
2227 	}
2228 #endif
2229 	args.align = align;
2230 	args.flags = flags;
2231 	args.keg = NULL;
2232 
2233 	if (booted < BOOT_BUCKETS) {
2234 		locked = false;
2235 	} else {
2236 		sx_slock(&uma_drain_lock);
2237 		locked = true;
2238 	}
2239 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2240 	if (locked)
2241 		sx_sunlock(&uma_drain_lock);
2242 	return (res);
2243 }
2244 
2245 /* See uma.h */
2246 uma_zone_t
2247 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2248 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2249 {
2250 	struct uma_zctor_args args;
2251 	uma_keg_t keg;
2252 	uma_zone_t res;
2253 	bool locked;
2254 
2255 	keg = master->uz_keg;
2256 	memset(&args, 0, sizeof(args));
2257 	args.name = name;
2258 	args.size = keg->uk_size;
2259 	args.ctor = ctor;
2260 	args.dtor = dtor;
2261 	args.uminit = zinit;
2262 	args.fini = zfini;
2263 	args.align = keg->uk_align;
2264 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2265 	args.keg = keg;
2266 
2267 	if (booted < BOOT_BUCKETS) {
2268 		locked = false;
2269 	} else {
2270 		sx_slock(&uma_drain_lock);
2271 		locked = true;
2272 	}
2273 	/* XXX Attaches only one keg of potentially many. */
2274 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2275 	if (locked)
2276 		sx_sunlock(&uma_drain_lock);
2277 	return (res);
2278 }
2279 
2280 /* See uma.h */
2281 uma_zone_t
2282 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2283 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2284 		    uma_release zrelease, void *arg, int flags)
2285 {
2286 	struct uma_zctor_args args;
2287 
2288 	memset(&args, 0, sizeof(args));
2289 	args.name = name;
2290 	args.size = size;
2291 	args.ctor = ctor;
2292 	args.dtor = dtor;
2293 	args.uminit = zinit;
2294 	args.fini = zfini;
2295 	args.import = zimport;
2296 	args.release = zrelease;
2297 	args.arg = arg;
2298 	args.align = 0;
2299 	args.flags = flags | UMA_ZFLAG_CACHE;
2300 
2301 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2302 }
2303 
2304 /* See uma.h */
2305 void
2306 uma_zdestroy(uma_zone_t zone)
2307 {
2308 
2309 	sx_slock(&uma_drain_lock);
2310 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2311 	sx_sunlock(&uma_drain_lock);
2312 }
2313 
2314 void
2315 uma_zwait(uma_zone_t zone)
2316 {
2317 	void *item;
2318 
2319 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2320 	uma_zfree(zone, item);
2321 }
2322 
2323 void *
2324 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2325 {
2326 	void *item;
2327 #ifdef SMP
2328 	int i;
2329 
2330 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2331 #endif
2332 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2333 	if (item != NULL && (flags & M_ZERO)) {
2334 #ifdef SMP
2335 		for (i = 0; i <= mp_maxid; i++)
2336 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2337 #else
2338 		bzero(item, zone->uz_size);
2339 #endif
2340 	}
2341 	return (item);
2342 }
2343 
2344 /*
2345  * A stub while both regular and pcpu cases are identical.
2346  */
2347 void
2348 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2349 {
2350 
2351 #ifdef SMP
2352 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2353 #endif
2354 	uma_zfree_arg(zone, item, udata);
2355 }
2356 
2357 /* See uma.h */
2358 void *
2359 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2360 {
2361 	uma_zone_domain_t zdom;
2362 	uma_bucket_t bucket;
2363 	uma_cache_t cache;
2364 	void *item;
2365 	int cpu, domain, lockfail, maxbucket;
2366 #ifdef INVARIANTS
2367 	bool skipdbg;
2368 #endif
2369 
2370 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2371 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2372 
2373 	/* This is the fast path allocation */
2374 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2375 	    curthread, zone->uz_name, zone, flags);
2376 
2377 	if (flags & M_WAITOK) {
2378 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2379 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2380 	}
2381 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2382 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2383 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2384 	if (zone->uz_flags & UMA_ZONE_PCPU)
2385 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2386 		    "with M_ZERO passed"));
2387 
2388 #ifdef DEBUG_MEMGUARD
2389 	if (memguard_cmp_zone(zone)) {
2390 		item = memguard_alloc(zone->uz_size, flags);
2391 		if (item != NULL) {
2392 			if (zone->uz_init != NULL &&
2393 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2394 				return (NULL);
2395 			if (zone->uz_ctor != NULL &&
2396 			    zone->uz_ctor(item, zone->uz_size, udata,
2397 			    flags) != 0) {
2398 			    	zone->uz_fini(item, zone->uz_size);
2399 				return (NULL);
2400 			}
2401 			return (item);
2402 		}
2403 		/* This is unfortunate but should not be fatal. */
2404 	}
2405 #endif
2406 	/*
2407 	 * If possible, allocate from the per-CPU cache.  There are two
2408 	 * requirements for safe access to the per-CPU cache: (1) the thread
2409 	 * accessing the cache must not be preempted or yield during access,
2410 	 * and (2) the thread must not migrate CPUs without switching which
2411 	 * cache it accesses.  We rely on a critical section to prevent
2412 	 * preemption and migration.  We release the critical section in
2413 	 * order to acquire the zone mutex if we are unable to allocate from
2414 	 * the current cache; when we re-acquire the critical section, we
2415 	 * must detect and handle migration if it has occurred.
2416 	 */
2417 zalloc_restart:
2418 	critical_enter();
2419 	cpu = curcpu;
2420 	cache = &zone->uz_cpu[cpu];
2421 
2422 zalloc_start:
2423 	bucket = cache->uc_allocbucket;
2424 	if (bucket != NULL && bucket->ub_cnt > 0) {
2425 		bucket->ub_cnt--;
2426 		item = bucket->ub_bucket[bucket->ub_cnt];
2427 #ifdef INVARIANTS
2428 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2429 #endif
2430 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2431 		cache->uc_allocs++;
2432 		critical_exit();
2433 #ifdef INVARIANTS
2434 		skipdbg = uma_dbg_zskip(zone, item);
2435 #endif
2436 		if (zone->uz_ctor != NULL &&
2437 #ifdef INVARIANTS
2438 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2439 		    zone->uz_dtor != trash_dtor) &&
2440 #endif
2441 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2442 			counter_u64_add(zone->uz_fails, 1);
2443 			zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2444 			return (NULL);
2445 		}
2446 #ifdef INVARIANTS
2447 		if (!skipdbg)
2448 			uma_dbg_alloc(zone, NULL, item);
2449 #endif
2450 		if (flags & M_ZERO)
2451 			uma_zero_item(item, zone);
2452 		return (item);
2453 	}
2454 
2455 	/*
2456 	 * We have run out of items in our alloc bucket.
2457 	 * See if we can switch with our free bucket.
2458 	 */
2459 	bucket = cache->uc_freebucket;
2460 	if (bucket != NULL && bucket->ub_cnt > 0) {
2461 		CTR2(KTR_UMA,
2462 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2463 		    zone->uz_name, zone);
2464 		cache->uc_freebucket = cache->uc_allocbucket;
2465 		cache->uc_allocbucket = bucket;
2466 		goto zalloc_start;
2467 	}
2468 
2469 	/*
2470 	 * Discard any empty allocation bucket while we hold no locks.
2471 	 */
2472 	bucket = cache->uc_allocbucket;
2473 	cache->uc_allocbucket = NULL;
2474 	critical_exit();
2475 	if (bucket != NULL)
2476 		bucket_free(zone, bucket, udata);
2477 
2478 	/* Short-circuit for zones without buckets and low memory. */
2479 	if (zone->uz_count == 0 || bucketdisable) {
2480 		ZONE_LOCK(zone);
2481 		if (zone->uz_flags & UMA_ZONE_NUMA)
2482 			domain = PCPU_GET(domain);
2483 		else
2484 			domain = UMA_ANYDOMAIN;
2485 		goto zalloc_item;
2486 	}
2487 
2488 	/*
2489 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2490 	 * we must go back to the zone.  This requires the zone lock, so we
2491 	 * must drop the critical section, then re-acquire it when we go back
2492 	 * to the cache.  Since the critical section is released, we may be
2493 	 * preempted or migrate.  As such, make sure not to maintain any
2494 	 * thread-local state specific to the cache from prior to releasing
2495 	 * the critical section.
2496 	 */
2497 	lockfail = 0;
2498 	if (ZONE_TRYLOCK(zone) == 0) {
2499 		/* Record contention to size the buckets. */
2500 		ZONE_LOCK(zone);
2501 		lockfail = 1;
2502 	}
2503 	critical_enter();
2504 	cpu = curcpu;
2505 	cache = &zone->uz_cpu[cpu];
2506 
2507 	/* See if we lost the race to fill the cache. */
2508 	if (cache->uc_allocbucket != NULL) {
2509 		ZONE_UNLOCK(zone);
2510 		goto zalloc_start;
2511 	}
2512 
2513 	/*
2514 	 * Check the zone's cache of buckets.
2515 	 */
2516 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2517 		domain = PCPU_GET(domain);
2518 		zdom = &zone->uz_domain[domain];
2519 	} else {
2520 		domain = UMA_ANYDOMAIN;
2521 		zdom = &zone->uz_domain[0];
2522 	}
2523 
2524 	if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) {
2525 		KASSERT(bucket->ub_cnt != 0,
2526 		    ("uma_zalloc_arg: Returning an empty bucket."));
2527 		cache->uc_allocbucket = bucket;
2528 		ZONE_UNLOCK(zone);
2529 		goto zalloc_start;
2530 	}
2531 	/* We are no longer associated with this CPU. */
2532 	critical_exit();
2533 
2534 	/*
2535 	 * We bump the uz count when the cache size is insufficient to
2536 	 * handle the working set.
2537 	 */
2538 	if (lockfail && zone->uz_count < zone->uz_count_max)
2539 		zone->uz_count++;
2540 
2541 	if (zone->uz_max_items > 0) {
2542 		if (zone->uz_items >= zone->uz_max_items)
2543 			goto zalloc_item;
2544 		maxbucket = MIN(zone->uz_count,
2545 		    zone->uz_max_items - zone->uz_items);
2546 		zone->uz_items += maxbucket;
2547 	} else
2548 		maxbucket = zone->uz_count;
2549 	ZONE_UNLOCK(zone);
2550 
2551 	/*
2552 	 * Now lets just fill a bucket and put it on the free list.  If that
2553 	 * works we'll restart the allocation from the beginning and it
2554 	 * will use the just filled bucket.
2555 	 */
2556 	bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket);
2557 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2558 	    zone->uz_name, zone, bucket);
2559 	ZONE_LOCK(zone);
2560 	if (bucket != NULL) {
2561 		if (zone->uz_max_items > 0 && bucket->ub_cnt < maxbucket) {
2562 			MPASS(zone->uz_items >= maxbucket - bucket->ub_cnt);
2563 			zone->uz_items -= maxbucket - bucket->ub_cnt;
2564 			if (zone->uz_sleepers > 0 &&
2565 			    zone->uz_items < zone->uz_max_items)
2566 				wakeup_one(zone);
2567 		}
2568 		critical_enter();
2569 		cpu = curcpu;
2570 		cache = &zone->uz_cpu[cpu];
2571 
2572 		/*
2573 		 * See if we lost the race or were migrated.  Cache the
2574 		 * initialized bucket to make this less likely or claim
2575 		 * the memory directly.
2576 		 */
2577 		if (cache->uc_allocbucket == NULL &&
2578 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2579 		    domain == PCPU_GET(domain))) {
2580 			cache->uc_allocbucket = bucket;
2581 			zdom->uzd_imax += bucket->ub_cnt;
2582 		} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2583 			critical_exit();
2584 			ZONE_UNLOCK(zone);
2585 			bucket_drain(zone, bucket);
2586 			bucket_free(zone, bucket, udata);
2587 			goto zalloc_restart;
2588 		} else
2589 			zone_put_bucket(zone, zdom, bucket, false);
2590 		ZONE_UNLOCK(zone);
2591 		goto zalloc_start;
2592 	} else if (zone->uz_max_items > 0) {
2593 		zone->uz_items -= maxbucket;
2594 		if (zone->uz_sleepers > 0 &&
2595 		    zone->uz_items + 1 < zone->uz_max_items)
2596 			wakeup_one(zone);
2597 	}
2598 
2599 	/*
2600 	 * We may not be able to get a bucket so return an actual item.
2601 	 */
2602 zalloc_item:
2603 	item = zone_alloc_item_locked(zone, udata, domain, flags);
2604 
2605 	return (item);
2606 }
2607 
2608 void *
2609 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2610 {
2611 
2612 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2613 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2614 
2615 	/* This is the fast path allocation */
2616 	CTR5(KTR_UMA,
2617 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2618 	    curthread, zone->uz_name, zone, domain, flags);
2619 
2620 	if (flags & M_WAITOK) {
2621 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2622 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2623 	}
2624 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2625 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2626 
2627 	return (zone_alloc_item(zone, udata, domain, flags));
2628 }
2629 
2630 /*
2631  * Find a slab with some space.  Prefer slabs that are partially used over those
2632  * that are totally full.  This helps to reduce fragmentation.
2633  *
2634  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2635  * only 'domain'.
2636  */
2637 static uma_slab_t
2638 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2639 {
2640 	uma_domain_t dom;
2641 	uma_slab_t slab;
2642 	int start;
2643 
2644 	KASSERT(domain >= 0 && domain < vm_ndomains,
2645 	    ("keg_first_slab: domain %d out of range", domain));
2646 	KEG_LOCK_ASSERT(keg);
2647 
2648 	slab = NULL;
2649 	start = domain;
2650 	do {
2651 		dom = &keg->uk_domain[domain];
2652 		if (!LIST_EMPTY(&dom->ud_part_slab))
2653 			return (LIST_FIRST(&dom->ud_part_slab));
2654 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2655 			slab = LIST_FIRST(&dom->ud_free_slab);
2656 			LIST_REMOVE(slab, us_link);
2657 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2658 			return (slab);
2659 		}
2660 		if (rr)
2661 			domain = (domain + 1) % vm_ndomains;
2662 	} while (domain != start);
2663 
2664 	return (NULL);
2665 }
2666 
2667 static uma_slab_t
2668 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2669 {
2670 	uint32_t reserve;
2671 
2672 	KEG_LOCK_ASSERT(keg);
2673 
2674 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2675 	if (keg->uk_free <= reserve)
2676 		return (NULL);
2677 	return (keg_first_slab(keg, domain, rr));
2678 }
2679 
2680 static uma_slab_t
2681 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2682 {
2683 	struct vm_domainset_iter di;
2684 	uma_domain_t dom;
2685 	uma_slab_t slab;
2686 	int aflags, domain;
2687 	bool rr;
2688 
2689 restart:
2690 	KEG_LOCK_ASSERT(keg);
2691 
2692 	/*
2693 	 * Use the keg's policy if upper layers haven't already specified a
2694 	 * domain (as happens with first-touch zones).
2695 	 *
2696 	 * To avoid races we run the iterator with the keg lock held, but that
2697 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2698 	 * clear M_WAITOK and handle low memory conditions locally.
2699 	 */
2700 	rr = rdomain == UMA_ANYDOMAIN;
2701 	if (rr) {
2702 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2703 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2704 		    &aflags);
2705 	} else {
2706 		aflags = flags;
2707 		domain = rdomain;
2708 	}
2709 
2710 	for (;;) {
2711 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2712 		if (slab != NULL) {
2713 			MPASS(slab->us_keg == keg);
2714 			return (slab);
2715 		}
2716 
2717 		/*
2718 		 * M_NOVM means don't ask at all!
2719 		 */
2720 		if (flags & M_NOVM)
2721 			break;
2722 
2723 		KASSERT(zone->uz_max_items == 0 ||
2724 		    zone->uz_items <= zone->uz_max_items,
2725 		    ("%s: zone %p overflow", __func__, zone));
2726 
2727 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
2728 		/*
2729 		 * If we got a slab here it's safe to mark it partially used
2730 		 * and return.  We assume that the caller is going to remove
2731 		 * at least one item.
2732 		 */
2733 		if (slab) {
2734 			MPASS(slab->us_keg == keg);
2735 			dom = &keg->uk_domain[slab->us_domain];
2736 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2737 			return (slab);
2738 		}
2739 		KEG_LOCK(keg);
2740 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2741 			if ((flags & M_WAITOK) != 0) {
2742 				KEG_UNLOCK(keg);
2743 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2744 				KEG_LOCK(keg);
2745 				goto restart;
2746 			}
2747 			break;
2748 		}
2749 	}
2750 
2751 	/*
2752 	 * We might not have been able to get a slab but another cpu
2753 	 * could have while we were unlocked.  Check again before we
2754 	 * fail.
2755 	 */
2756 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2757 		MPASS(slab->us_keg == keg);
2758 		return (slab);
2759 	}
2760 	return (NULL);
2761 }
2762 
2763 static uma_slab_t
2764 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2765 {
2766 	uma_slab_t slab;
2767 
2768 	if (keg == NULL) {
2769 		keg = zone->uz_keg;
2770 		KEG_LOCK(keg);
2771 	}
2772 
2773 	for (;;) {
2774 		slab = keg_fetch_slab(keg, zone, domain, flags);
2775 		if (slab)
2776 			return (slab);
2777 		if (flags & (M_NOWAIT | M_NOVM))
2778 			break;
2779 	}
2780 	KEG_UNLOCK(keg);
2781 	return (NULL);
2782 }
2783 
2784 static void *
2785 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2786 {
2787 	uma_domain_t dom;
2788 	void *item;
2789 	uint8_t freei;
2790 
2791 	MPASS(keg == slab->us_keg);
2792 	KEG_LOCK_ASSERT(keg);
2793 
2794 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2795 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2796 	item = slab->us_data + (keg->uk_rsize * freei);
2797 	slab->us_freecount--;
2798 	keg->uk_free--;
2799 
2800 	/* Move this slab to the full list */
2801 	if (slab->us_freecount == 0) {
2802 		LIST_REMOVE(slab, us_link);
2803 		dom = &keg->uk_domain[slab->us_domain];
2804 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2805 	}
2806 
2807 	return (item);
2808 }
2809 
2810 static int
2811 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2812 {
2813 	uma_slab_t slab;
2814 	uma_keg_t keg;
2815 #ifdef NUMA
2816 	int stripe;
2817 #endif
2818 	int i;
2819 
2820 	slab = NULL;
2821 	keg = NULL;
2822 	/* Try to keep the buckets totally full */
2823 	for (i = 0; i < max; ) {
2824 		if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
2825 			break;
2826 		keg = slab->us_keg;
2827 #ifdef NUMA
2828 		stripe = howmany(max, vm_ndomains);
2829 #endif
2830 		while (slab->us_freecount && i < max) {
2831 			bucket[i++] = slab_alloc_item(keg, slab);
2832 			if (keg->uk_free <= keg->uk_reserve)
2833 				break;
2834 #ifdef NUMA
2835 			/*
2836 			 * If the zone is striped we pick a new slab for every
2837 			 * N allocations.  Eliminating this conditional will
2838 			 * instead pick a new domain for each bucket rather
2839 			 * than stripe within each bucket.  The current option
2840 			 * produces more fragmentation and requires more cpu
2841 			 * time but yields better distribution.
2842 			 */
2843 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2844 			    vm_ndomains > 1 && --stripe == 0)
2845 				break;
2846 #endif
2847 		}
2848 		/* Don't block if we allocated any successfully. */
2849 		flags &= ~M_WAITOK;
2850 		flags |= M_NOWAIT;
2851 	}
2852 	if (slab != NULL)
2853 		KEG_UNLOCK(keg);
2854 
2855 	return i;
2856 }
2857 
2858 static uma_bucket_t
2859 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max)
2860 {
2861 	uma_bucket_t bucket;
2862 
2863 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
2864 
2865 	/* Avoid allocs targeting empty domains. */
2866 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
2867 		domain = UMA_ANYDOMAIN;
2868 
2869 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2870 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2871 	if (bucket == NULL)
2872 		return (NULL);
2873 
2874 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2875 	    MIN(max, bucket->ub_entries), domain, flags);
2876 
2877 	/*
2878 	 * Initialize the memory if necessary.
2879 	 */
2880 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2881 		int i;
2882 
2883 		for (i = 0; i < bucket->ub_cnt; i++)
2884 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2885 			    flags) != 0)
2886 				break;
2887 		/*
2888 		 * If we couldn't initialize the whole bucket, put the
2889 		 * rest back onto the freelist.
2890 		 */
2891 		if (i != bucket->ub_cnt) {
2892 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2893 			    bucket->ub_cnt - i);
2894 #ifdef INVARIANTS
2895 			bzero(&bucket->ub_bucket[i],
2896 			    sizeof(void *) * (bucket->ub_cnt - i));
2897 #endif
2898 			bucket->ub_cnt = i;
2899 		}
2900 	}
2901 
2902 	if (bucket->ub_cnt == 0) {
2903 		bucket_free(zone, bucket, udata);
2904 		counter_u64_add(zone->uz_fails, 1);
2905 		return (NULL);
2906 	}
2907 
2908 	return (bucket);
2909 }
2910 
2911 /*
2912  * Allocates a single item from a zone.
2913  *
2914  * Arguments
2915  *	zone   The zone to alloc for.
2916  *	udata  The data to be passed to the constructor.
2917  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2918  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2919  *
2920  * Returns
2921  *	NULL if there is no memory and M_NOWAIT is set
2922  *	An item if successful
2923  */
2924 
2925 static void *
2926 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2927 {
2928 
2929 	ZONE_LOCK(zone);
2930 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2931 }
2932 
2933 /*
2934  * Returns with zone unlocked.
2935  */
2936 static void *
2937 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
2938 {
2939 	void *item;
2940 #ifdef INVARIANTS
2941 	bool skipdbg;
2942 #endif
2943 
2944 	ZONE_LOCK_ASSERT(zone);
2945 
2946 	if (zone->uz_max_items > 0) {
2947 		if (zone->uz_items >= zone->uz_max_items) {
2948 			zone_log_warning(zone);
2949 			zone_maxaction(zone);
2950 			if (flags & M_NOWAIT) {
2951 				ZONE_UNLOCK(zone);
2952 				return (NULL);
2953 			}
2954 			zone->uz_sleeps++;
2955 			zone->uz_sleepers++;
2956 			while (zone->uz_items >= zone->uz_max_items)
2957 				mtx_sleep(zone, zone->uz_lockptr, PVM,
2958 				    "zonelimit", 0);
2959 			zone->uz_sleepers--;
2960 			if (zone->uz_sleepers > 0 &&
2961 			    zone->uz_items + 1 < zone->uz_max_items)
2962 				wakeup_one(zone);
2963 		}
2964 		zone->uz_items++;
2965 	}
2966 	ZONE_UNLOCK(zone);
2967 
2968 	/* Avoid allocs targeting empty domains. */
2969 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
2970 		domain = UMA_ANYDOMAIN;
2971 
2972 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
2973 		goto fail;
2974 
2975 #ifdef INVARIANTS
2976 	skipdbg = uma_dbg_zskip(zone, item);
2977 #endif
2978 	/*
2979 	 * We have to call both the zone's init (not the keg's init)
2980 	 * and the zone's ctor.  This is because the item is going from
2981 	 * a keg slab directly to the user, and the user is expecting it
2982 	 * to be both zone-init'd as well as zone-ctor'd.
2983 	 */
2984 	if (zone->uz_init != NULL) {
2985 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2986 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
2987 			goto fail;
2988 		}
2989 	}
2990 	if (zone->uz_ctor != NULL &&
2991 #ifdef INVARIANTS
2992 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
2993 	    zone->uz_dtor != trash_dtor) &&
2994 #endif
2995 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2996 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2997 		goto fail;
2998 	}
2999 #ifdef INVARIANTS
3000 	if (!skipdbg)
3001 		uma_dbg_alloc(zone, NULL, item);
3002 #endif
3003 	if (flags & M_ZERO)
3004 		uma_zero_item(item, zone);
3005 
3006 	counter_u64_add(zone->uz_allocs, 1);
3007 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3008 	    zone->uz_name, zone);
3009 
3010 	return (item);
3011 
3012 fail:
3013 	if (zone->uz_max_items > 0) {
3014 		ZONE_LOCK(zone);
3015 		zone->uz_items--;
3016 		ZONE_UNLOCK(zone);
3017 	}
3018 	counter_u64_add(zone->uz_fails, 1);
3019 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3020 	    zone->uz_name, zone);
3021 	return (NULL);
3022 }
3023 
3024 /* See uma.h */
3025 void
3026 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3027 {
3028 	uma_cache_t cache;
3029 	uma_bucket_t bucket;
3030 	uma_zone_domain_t zdom;
3031 	int cpu, domain;
3032 #ifdef UMA_XDOMAIN
3033 	int itemdomain;
3034 #endif
3035 	bool lockfail;
3036 #ifdef INVARIANTS
3037 	bool skipdbg;
3038 #endif
3039 
3040 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3041 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3042 
3043 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3044 	    zone->uz_name);
3045 
3046 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3047 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3048 
3049         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3050         if (item == NULL)
3051                 return;
3052 #ifdef DEBUG_MEMGUARD
3053 	if (is_memguard_addr(item)) {
3054 		if (zone->uz_dtor != NULL)
3055 			zone->uz_dtor(item, zone->uz_size, udata);
3056 		if (zone->uz_fini != NULL)
3057 			zone->uz_fini(item, zone->uz_size);
3058 		memguard_free(item);
3059 		return;
3060 	}
3061 #endif
3062 #ifdef INVARIANTS
3063 	skipdbg = uma_dbg_zskip(zone, item);
3064 	if (skipdbg == false) {
3065 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3066 			uma_dbg_free(zone, udata, item);
3067 		else
3068 			uma_dbg_free(zone, NULL, item);
3069 	}
3070 	if (zone->uz_dtor != NULL && (!skipdbg ||
3071 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3072 #else
3073 	if (zone->uz_dtor != NULL)
3074 #endif
3075 		zone->uz_dtor(item, zone->uz_size, udata);
3076 
3077 	/*
3078 	 * The race here is acceptable.  If we miss it we'll just have to wait
3079 	 * a little longer for the limits to be reset.
3080 	 */
3081 	if (zone->uz_sleepers > 0)
3082 		goto zfree_item;
3083 
3084 #ifdef UMA_XDOMAIN
3085 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3086 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3087 #endif
3088 
3089 	/*
3090 	 * If possible, free to the per-CPU cache.  There are two
3091 	 * requirements for safe access to the per-CPU cache: (1) the thread
3092 	 * accessing the cache must not be preempted or yield during access,
3093 	 * and (2) the thread must not migrate CPUs without switching which
3094 	 * cache it accesses.  We rely on a critical section to prevent
3095 	 * preemption and migration.  We release the critical section in
3096 	 * order to acquire the zone mutex if we are unable to free to the
3097 	 * current cache; when we re-acquire the critical section, we must
3098 	 * detect and handle migration if it has occurred.
3099 	 */
3100 zfree_restart:
3101 	critical_enter();
3102 	cpu = curcpu;
3103 	cache = &zone->uz_cpu[cpu];
3104 
3105 zfree_start:
3106 	domain = PCPU_GET(domain);
3107 #ifdef UMA_XDOMAIN
3108 	if ((zone->uz_flags & UMA_ZONE_NUMA) == 0)
3109 		itemdomain = domain;
3110 #endif
3111 	/*
3112 	 * Try to free into the allocbucket first to give LIFO ordering
3113 	 * for cache-hot datastructures.  Spill over into the freebucket
3114 	 * if necessary.  Alloc will swap them if one runs dry.
3115 	 */
3116 #ifdef UMA_XDOMAIN
3117 	if (domain != itemdomain) {
3118 		bucket = cache->uc_crossbucket;
3119 	} else
3120 #endif
3121 	{
3122 		bucket = cache->uc_allocbucket;
3123 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3124 			bucket = cache->uc_freebucket;
3125 	}
3126 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3127 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3128 		    ("uma_zfree: Freeing to non free bucket index."));
3129 		bucket->ub_bucket[bucket->ub_cnt] = item;
3130 		bucket->ub_cnt++;
3131 		cache->uc_frees++;
3132 		critical_exit();
3133 		return;
3134 	}
3135 
3136 	/*
3137 	 * We must go back the zone, which requires acquiring the zone lock,
3138 	 * which in turn means we must release and re-acquire the critical
3139 	 * section.  Since the critical section is released, we may be
3140 	 * preempted or migrate.  As such, make sure not to maintain any
3141 	 * thread-local state specific to the cache from prior to releasing
3142 	 * the critical section.
3143 	 */
3144 	critical_exit();
3145 	if (zone->uz_count == 0 || bucketdisable)
3146 		goto zfree_item;
3147 
3148 	lockfail = false;
3149 	if (ZONE_TRYLOCK(zone) == 0) {
3150 		/* Record contention to size the buckets. */
3151 		ZONE_LOCK(zone);
3152 		lockfail = true;
3153 	}
3154 	critical_enter();
3155 	cpu = curcpu;
3156 	domain = PCPU_GET(domain);
3157 	cache = &zone->uz_cpu[cpu];
3158 
3159 #ifdef UMA_XDOMAIN
3160 	if (domain != itemdomain)
3161 		bucket = cache->uc_crossbucket;
3162 	else
3163 #endif
3164 		bucket = cache->uc_freebucket;
3165 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3166 		ZONE_UNLOCK(zone);
3167 		goto zfree_start;
3168 	}
3169 #ifdef UMA_XDOMAIN
3170 	if (domain != itemdomain)
3171 		cache->uc_crossbucket = NULL;
3172 	else
3173 #endif
3174 		cache->uc_freebucket = NULL;
3175 	/* We are no longer associated with this CPU. */
3176 	critical_exit();
3177 
3178 #ifdef UMA_XDOMAIN
3179 	if (domain != itemdomain) {
3180 		if (bucket != NULL) {
3181 			zone->uz_xdomain += bucket->ub_cnt;
3182 			if (vm_ndomains > 2 ||
3183 			    zone->uz_bkt_count >= zone->uz_bkt_max) {
3184 				ZONE_UNLOCK(zone);
3185 				bucket_drain(zone, bucket);
3186 				bucket_free(zone, bucket, udata);
3187 			} else {
3188 				zdom = &zone->uz_domain[itemdomain];
3189 				zone_put_bucket(zone, zdom, bucket, true);
3190 				ZONE_UNLOCK(zone);
3191 			}
3192 		} else
3193 			ZONE_UNLOCK(zone);
3194 		bucket = bucket_alloc(zone, udata, M_NOWAIT);
3195 		if (bucket == NULL)
3196 			goto zfree_item;
3197 		critical_enter();
3198 		cpu = curcpu;
3199 		cache = &zone->uz_cpu[cpu];
3200 		if (cache->uc_crossbucket == NULL) {
3201 			cache->uc_crossbucket = bucket;
3202 			goto zfree_start;
3203 		}
3204 		critical_exit();
3205 		bucket_free(zone, bucket, udata);
3206 		goto zfree_restart;
3207 	}
3208 #endif
3209 
3210 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3211 		zdom = &zone->uz_domain[domain];
3212 	} else {
3213 		domain = 0;
3214 		zdom = &zone->uz_domain[0];
3215 	}
3216 
3217 	/* Can we throw this on the zone full list? */
3218 	if (bucket != NULL) {
3219 		CTR3(KTR_UMA,
3220 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3221 		    zone->uz_name, zone, bucket);
3222 		/* ub_cnt is pointing to the last free item */
3223 		KASSERT(bucket->ub_cnt == bucket->ub_entries,
3224 		    ("uma_zfree: Attempting to insert not full bucket onto the full list.\n"));
3225 		if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3226 			ZONE_UNLOCK(zone);
3227 			bucket_drain(zone, bucket);
3228 			bucket_free(zone, bucket, udata);
3229 			goto zfree_restart;
3230 		} else
3231 			zone_put_bucket(zone, zdom, bucket, true);
3232 	}
3233 
3234 	/*
3235 	 * We bump the uz count when the cache size is insufficient to
3236 	 * handle the working set.
3237 	 */
3238 	if (lockfail && zone->uz_count < zone->uz_count_max)
3239 		zone->uz_count++;
3240 	ZONE_UNLOCK(zone);
3241 
3242 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3243 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3244 	    zone->uz_name, zone, bucket);
3245 	if (bucket) {
3246 		critical_enter();
3247 		cpu = curcpu;
3248 		cache = &zone->uz_cpu[cpu];
3249 		if (cache->uc_freebucket == NULL &&
3250 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3251 		    domain == PCPU_GET(domain))) {
3252 			cache->uc_freebucket = bucket;
3253 			goto zfree_start;
3254 		}
3255 		/*
3256 		 * We lost the race, start over.  We have to drop our
3257 		 * critical section to free the bucket.
3258 		 */
3259 		critical_exit();
3260 		bucket_free(zone, bucket, udata);
3261 		goto zfree_restart;
3262 	}
3263 
3264 	/*
3265 	 * If nothing else caught this, we'll just do an internal free.
3266 	 */
3267 zfree_item:
3268 	zone_free_item(zone, item, udata, SKIP_DTOR);
3269 }
3270 
3271 void
3272 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3273 {
3274 
3275 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3276 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3277 
3278 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3279 	    zone->uz_name);
3280 
3281 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3282 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3283 
3284         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3285         if (item == NULL)
3286                 return;
3287 	zone_free_item(zone, item, udata, SKIP_NONE);
3288 }
3289 
3290 static void
3291 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3292 {
3293 	uma_keg_t keg;
3294 	uma_domain_t dom;
3295 	uint8_t freei;
3296 
3297 	keg = zone->uz_keg;
3298 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3299 	KEG_LOCK_ASSERT(keg);
3300 	MPASS(keg == slab->us_keg);
3301 
3302 	dom = &keg->uk_domain[slab->us_domain];
3303 
3304 	/* Do we need to remove from any lists? */
3305 	if (slab->us_freecount+1 == keg->uk_ipers) {
3306 		LIST_REMOVE(slab, us_link);
3307 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3308 	} else if (slab->us_freecount == 0) {
3309 		LIST_REMOVE(slab, us_link);
3310 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3311 	}
3312 
3313 	/* Slab management. */
3314 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3315 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3316 	slab->us_freecount++;
3317 
3318 	/* Keg statistics. */
3319 	keg->uk_free++;
3320 }
3321 
3322 static void
3323 zone_release(uma_zone_t zone, void **bucket, int cnt)
3324 {
3325 	void *item;
3326 	uma_slab_t slab;
3327 	uma_keg_t keg;
3328 	uint8_t *mem;
3329 	int i;
3330 
3331 	keg = zone->uz_keg;
3332 	KEG_LOCK(keg);
3333 	for (i = 0; i < cnt; i++) {
3334 		item = bucket[i];
3335 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3336 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3337 			if (zone->uz_flags & UMA_ZONE_HASH) {
3338 				slab = hash_sfind(&keg->uk_hash, mem);
3339 			} else {
3340 				mem += keg->uk_pgoff;
3341 				slab = (uma_slab_t)mem;
3342 			}
3343 		} else {
3344 			slab = vtoslab((vm_offset_t)item);
3345 			MPASS(slab->us_keg == keg);
3346 		}
3347 		slab_free_item(zone, slab, item);
3348 	}
3349 	KEG_UNLOCK(keg);
3350 }
3351 
3352 /*
3353  * Frees a single item to any zone.
3354  *
3355  * Arguments:
3356  *	zone   The zone to free to
3357  *	item   The item we're freeing
3358  *	udata  User supplied data for the dtor
3359  *	skip   Skip dtors and finis
3360  */
3361 static void
3362 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3363 {
3364 #ifdef INVARIANTS
3365 	bool skipdbg;
3366 
3367 	skipdbg = uma_dbg_zskip(zone, item);
3368 	if (skip == SKIP_NONE && !skipdbg) {
3369 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3370 			uma_dbg_free(zone, udata, item);
3371 		else
3372 			uma_dbg_free(zone, NULL, item);
3373 	}
3374 
3375 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3376 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3377 	    zone->uz_ctor != trash_ctor))
3378 #else
3379 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3380 #endif
3381 		zone->uz_dtor(item, zone->uz_size, udata);
3382 
3383 	if (skip < SKIP_FINI && zone->uz_fini)
3384 		zone->uz_fini(item, zone->uz_size);
3385 
3386 	zone->uz_release(zone->uz_arg, &item, 1);
3387 
3388 	if (skip & SKIP_CNT)
3389 		return;
3390 
3391 	counter_u64_add(zone->uz_frees, 1);
3392 
3393 	if (zone->uz_max_items > 0) {
3394 		ZONE_LOCK(zone);
3395 		zone->uz_items--;
3396 		if (zone->uz_sleepers > 0 &&
3397 		    zone->uz_items < zone->uz_max_items)
3398 			wakeup_one(zone);
3399 		ZONE_UNLOCK(zone);
3400 	}
3401 }
3402 
3403 /* See uma.h */
3404 int
3405 uma_zone_set_max(uma_zone_t zone, int nitems)
3406 {
3407 	struct uma_bucket_zone *ubz;
3408 
3409 	/*
3410 	 * If limit is very low we may need to limit how
3411 	 * much items are allowed in CPU caches.
3412 	 */
3413 	ubz = &bucket_zones[0];
3414 	for (; ubz->ubz_entries != 0; ubz++)
3415 		if (ubz->ubz_entries * 2 * mp_ncpus > nitems)
3416 			break;
3417 	if (ubz == &bucket_zones[0])
3418 		nitems = ubz->ubz_entries * 2 * mp_ncpus;
3419 	else
3420 		ubz--;
3421 
3422 	ZONE_LOCK(zone);
3423 	zone->uz_count_max = zone->uz_count = ubz->ubz_entries;
3424 	if (zone->uz_count_min > zone->uz_count_max)
3425 		zone->uz_count_min = zone->uz_count_max;
3426 	zone->uz_max_items = nitems;
3427 	ZONE_UNLOCK(zone);
3428 
3429 	return (nitems);
3430 }
3431 
3432 /* See uma.h */
3433 int
3434 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3435 {
3436 
3437 	ZONE_LOCK(zone);
3438 	zone->uz_bkt_max = nitems;
3439 	ZONE_UNLOCK(zone);
3440 
3441 	return (nitems);
3442 }
3443 
3444 /* See uma.h */
3445 int
3446 uma_zone_get_max(uma_zone_t zone)
3447 {
3448 	int nitems;
3449 
3450 	ZONE_LOCK(zone);
3451 	nitems = zone->uz_max_items;
3452 	ZONE_UNLOCK(zone);
3453 
3454 	return (nitems);
3455 }
3456 
3457 /* See uma.h */
3458 void
3459 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3460 {
3461 
3462 	ZONE_LOCK(zone);
3463 	zone->uz_warning = warning;
3464 	ZONE_UNLOCK(zone);
3465 }
3466 
3467 /* See uma.h */
3468 void
3469 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3470 {
3471 
3472 	ZONE_LOCK(zone);
3473 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3474 	ZONE_UNLOCK(zone);
3475 }
3476 
3477 /* See uma.h */
3478 int
3479 uma_zone_get_cur(uma_zone_t zone)
3480 {
3481 	int64_t nitems;
3482 	u_int i;
3483 
3484 	ZONE_LOCK(zone);
3485 	nitems = counter_u64_fetch(zone->uz_allocs) -
3486 	    counter_u64_fetch(zone->uz_frees);
3487 	CPU_FOREACH(i) {
3488 		/*
3489 		 * See the comment in uma_vm_zone_stats() regarding the
3490 		 * safety of accessing the per-cpu caches. With the zone lock
3491 		 * held, it is safe, but can potentially result in stale data.
3492 		 */
3493 		nitems += zone->uz_cpu[i].uc_allocs -
3494 		    zone->uz_cpu[i].uc_frees;
3495 	}
3496 	ZONE_UNLOCK(zone);
3497 
3498 	return (nitems < 0 ? 0 : nitems);
3499 }
3500 
3501 /* See uma.h */
3502 void
3503 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3504 {
3505 	uma_keg_t keg;
3506 
3507 	KEG_GET(zone, keg);
3508 	KEG_LOCK(keg);
3509 	KASSERT(keg->uk_pages == 0,
3510 	    ("uma_zone_set_init on non-empty keg"));
3511 	keg->uk_init = uminit;
3512 	KEG_UNLOCK(keg);
3513 }
3514 
3515 /* See uma.h */
3516 void
3517 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3518 {
3519 	uma_keg_t keg;
3520 
3521 	KEG_GET(zone, keg);
3522 	KEG_LOCK(keg);
3523 	KASSERT(keg->uk_pages == 0,
3524 	    ("uma_zone_set_fini on non-empty keg"));
3525 	keg->uk_fini = fini;
3526 	KEG_UNLOCK(keg);
3527 }
3528 
3529 /* See uma.h */
3530 void
3531 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3532 {
3533 
3534 	ZONE_LOCK(zone);
3535 	KASSERT(zone->uz_keg->uk_pages == 0,
3536 	    ("uma_zone_set_zinit on non-empty keg"));
3537 	zone->uz_init = zinit;
3538 	ZONE_UNLOCK(zone);
3539 }
3540 
3541 /* See uma.h */
3542 void
3543 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3544 {
3545 
3546 	ZONE_LOCK(zone);
3547 	KASSERT(zone->uz_keg->uk_pages == 0,
3548 	    ("uma_zone_set_zfini on non-empty keg"));
3549 	zone->uz_fini = zfini;
3550 	ZONE_UNLOCK(zone);
3551 }
3552 
3553 /* See uma.h */
3554 /* XXX uk_freef is not actually used with the zone locked */
3555 void
3556 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3557 {
3558 	uma_keg_t keg;
3559 
3560 	KEG_GET(zone, keg);
3561 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3562 	KEG_LOCK(keg);
3563 	keg->uk_freef = freef;
3564 	KEG_UNLOCK(keg);
3565 }
3566 
3567 /* See uma.h */
3568 /* XXX uk_allocf is not actually used with the zone locked */
3569 void
3570 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3571 {
3572 	uma_keg_t keg;
3573 
3574 	KEG_GET(zone, keg);
3575 	KEG_LOCK(keg);
3576 	keg->uk_allocf = allocf;
3577 	KEG_UNLOCK(keg);
3578 }
3579 
3580 /* See uma.h */
3581 void
3582 uma_zone_reserve(uma_zone_t zone, int items)
3583 {
3584 	uma_keg_t keg;
3585 
3586 	KEG_GET(zone, keg);
3587 	KEG_LOCK(keg);
3588 	keg->uk_reserve = items;
3589 	KEG_UNLOCK(keg);
3590 }
3591 
3592 /* See uma.h */
3593 int
3594 uma_zone_reserve_kva(uma_zone_t zone, int count)
3595 {
3596 	uma_keg_t keg;
3597 	vm_offset_t kva;
3598 	u_int pages;
3599 
3600 	KEG_GET(zone, keg);
3601 
3602 	pages = count / keg->uk_ipers;
3603 	if (pages * keg->uk_ipers < count)
3604 		pages++;
3605 	pages *= keg->uk_ppera;
3606 
3607 #ifdef UMA_MD_SMALL_ALLOC
3608 	if (keg->uk_ppera > 1) {
3609 #else
3610 	if (1) {
3611 #endif
3612 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3613 		if (kva == 0)
3614 			return (0);
3615 	} else
3616 		kva = 0;
3617 
3618 	ZONE_LOCK(zone);
3619 	MPASS(keg->uk_kva == 0);
3620 	keg->uk_kva = kva;
3621 	keg->uk_offset = 0;
3622 	zone->uz_max_items = pages * keg->uk_ipers;
3623 #ifdef UMA_MD_SMALL_ALLOC
3624 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3625 #else
3626 	keg->uk_allocf = noobj_alloc;
3627 #endif
3628 	keg->uk_flags |= UMA_ZONE_NOFREE;
3629 	ZONE_UNLOCK(zone);
3630 
3631 	return (1);
3632 }
3633 
3634 /* See uma.h */
3635 void
3636 uma_prealloc(uma_zone_t zone, int items)
3637 {
3638 	struct vm_domainset_iter di;
3639 	uma_domain_t dom;
3640 	uma_slab_t slab;
3641 	uma_keg_t keg;
3642 	int aflags, domain, slabs;
3643 
3644 	KEG_GET(zone, keg);
3645 	KEG_LOCK(keg);
3646 	slabs = items / keg->uk_ipers;
3647 	if (slabs * keg->uk_ipers < items)
3648 		slabs++;
3649 	while (slabs-- > 0) {
3650 		aflags = M_NOWAIT;
3651 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3652 		    &aflags);
3653 		for (;;) {
3654 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3655 			    aflags);
3656 			if (slab != NULL) {
3657 				MPASS(slab->us_keg == keg);
3658 				dom = &keg->uk_domain[slab->us_domain];
3659 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3660 				    us_link);
3661 				break;
3662 			}
3663 			KEG_LOCK(keg);
3664 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3665 				KEG_UNLOCK(keg);
3666 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3667 				KEG_LOCK(keg);
3668 			}
3669 		}
3670 	}
3671 	KEG_UNLOCK(keg);
3672 }
3673 
3674 /* See uma.h */
3675 static void
3676 uma_reclaim_locked(bool kmem_danger)
3677 {
3678 
3679 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3680 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3681 	bucket_enable();
3682 	zone_foreach(zone_drain);
3683 	if (vm_page_count_min() || kmem_danger) {
3684 		cache_drain_safe(NULL);
3685 		zone_foreach(zone_drain);
3686 	}
3687 
3688 	/*
3689 	 * Some slabs may have been freed but this zone will be visited early
3690 	 * we visit again so that we can free pages that are empty once other
3691 	 * zones are drained.  We have to do the same for buckets.
3692 	 */
3693 	zone_drain(slabzone);
3694 	bucket_zone_drain();
3695 }
3696 
3697 void
3698 uma_reclaim(void)
3699 {
3700 
3701 	sx_xlock(&uma_drain_lock);
3702 	uma_reclaim_locked(false);
3703 	sx_xunlock(&uma_drain_lock);
3704 }
3705 
3706 static volatile int uma_reclaim_needed;
3707 
3708 void
3709 uma_reclaim_wakeup(void)
3710 {
3711 
3712 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3713 		wakeup(uma_reclaim);
3714 }
3715 
3716 void
3717 uma_reclaim_worker(void *arg __unused)
3718 {
3719 
3720 	for (;;) {
3721 		sx_xlock(&uma_drain_lock);
3722 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3723 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
3724 			    hz);
3725 		sx_xunlock(&uma_drain_lock);
3726 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3727 		sx_xlock(&uma_drain_lock);
3728 		uma_reclaim_locked(true);
3729 		atomic_store_int(&uma_reclaim_needed, 0);
3730 		sx_xunlock(&uma_drain_lock);
3731 		/* Don't fire more than once per-second. */
3732 		pause("umarclslp", hz);
3733 	}
3734 }
3735 
3736 /* See uma.h */
3737 int
3738 uma_zone_exhausted(uma_zone_t zone)
3739 {
3740 	int full;
3741 
3742 	ZONE_LOCK(zone);
3743 	full = zone->uz_sleepers > 0;
3744 	ZONE_UNLOCK(zone);
3745 	return (full);
3746 }
3747 
3748 int
3749 uma_zone_exhausted_nolock(uma_zone_t zone)
3750 {
3751 	return (zone->uz_sleepers > 0);
3752 }
3753 
3754 void *
3755 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3756 {
3757 	struct domainset *policy;
3758 	vm_offset_t addr;
3759 	uma_slab_t slab;
3760 
3761 	if (domain != UMA_ANYDOMAIN) {
3762 		/* avoid allocs targeting empty domains */
3763 		if (VM_DOMAIN_EMPTY(domain))
3764 			domain = UMA_ANYDOMAIN;
3765 	}
3766 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3767 	if (slab == NULL)
3768 		return (NULL);
3769 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
3770 	    DOMAINSET_FIXED(domain);
3771 	addr = kmem_malloc_domainset(policy, size, wait);
3772 	if (addr != 0) {
3773 		vsetslab(addr, slab);
3774 		slab->us_data = (void *)addr;
3775 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3776 		slab->us_size = size;
3777 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3778 		    pmap_kextract(addr)));
3779 		uma_total_inc(size);
3780 	} else {
3781 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3782 	}
3783 
3784 	return ((void *)addr);
3785 }
3786 
3787 void *
3788 uma_large_malloc(vm_size_t size, int wait)
3789 {
3790 
3791 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3792 }
3793 
3794 void
3795 uma_large_free(uma_slab_t slab)
3796 {
3797 
3798 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3799 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3800 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3801 	uma_total_dec(slab->us_size);
3802 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3803 }
3804 
3805 static void
3806 uma_zero_item(void *item, uma_zone_t zone)
3807 {
3808 
3809 	bzero(item, zone->uz_size);
3810 }
3811 
3812 unsigned long
3813 uma_limit(void)
3814 {
3815 
3816 	return (uma_kmem_limit);
3817 }
3818 
3819 void
3820 uma_set_limit(unsigned long limit)
3821 {
3822 
3823 	uma_kmem_limit = limit;
3824 }
3825 
3826 unsigned long
3827 uma_size(void)
3828 {
3829 
3830 	return (atomic_load_long(&uma_kmem_total));
3831 }
3832 
3833 long
3834 uma_avail(void)
3835 {
3836 
3837 	return (uma_kmem_limit - uma_size());
3838 }
3839 
3840 void
3841 uma_print_stats(void)
3842 {
3843 	zone_foreach(uma_print_zone);
3844 }
3845 
3846 static void
3847 slab_print(uma_slab_t slab)
3848 {
3849 	printf("slab: keg %p, data %p, freecount %d\n",
3850 		slab->us_keg, slab->us_data, slab->us_freecount);
3851 }
3852 
3853 static void
3854 cache_print(uma_cache_t cache)
3855 {
3856 	printf("alloc: %p(%d), free: %p(%d), cross: %p(%d)j\n",
3857 		cache->uc_allocbucket,
3858 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3859 		cache->uc_freebucket,
3860 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0,
3861 		cache->uc_crossbucket,
3862 		cache->uc_crossbucket?cache->uc_crossbucket->ub_cnt:0);
3863 }
3864 
3865 static void
3866 uma_print_keg(uma_keg_t keg)
3867 {
3868 	uma_domain_t dom;
3869 	uma_slab_t slab;
3870 	int i;
3871 
3872 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3873 	    "out %d free %d\n",
3874 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3875 	    keg->uk_ipers, keg->uk_ppera,
3876 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3877 	    keg->uk_free);
3878 	for (i = 0; i < vm_ndomains; i++) {
3879 		dom = &keg->uk_domain[i];
3880 		printf("Part slabs:\n");
3881 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3882 			slab_print(slab);
3883 		printf("Free slabs:\n");
3884 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3885 			slab_print(slab);
3886 		printf("Full slabs:\n");
3887 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3888 			slab_print(slab);
3889 	}
3890 }
3891 
3892 void
3893 uma_print_zone(uma_zone_t zone)
3894 {
3895 	uma_cache_t cache;
3896 	int i;
3897 
3898 	printf("zone: %s(%p) size %d maxitems %ju flags %#x\n",
3899 	    zone->uz_name, zone, zone->uz_size, (uintmax_t)zone->uz_max_items,
3900 	    zone->uz_flags);
3901 	if (zone->uz_lockptr != &zone->uz_lock)
3902 		uma_print_keg(zone->uz_keg);
3903 	CPU_FOREACH(i) {
3904 		cache = &zone->uz_cpu[i];
3905 		printf("CPU %d Cache:\n", i);
3906 		cache_print(cache);
3907 	}
3908 }
3909 
3910 #ifdef DDB
3911 /*
3912  * Generate statistics across both the zone and its per-cpu cache's.  Return
3913  * desired statistics if the pointer is non-NULL for that statistic.
3914  *
3915  * Note: does not update the zone statistics, as it can't safely clear the
3916  * per-CPU cache statistic.
3917  *
3918  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3919  * safe from off-CPU; we should modify the caches to track this information
3920  * directly so that we don't have to.
3921  */
3922 static void
3923 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
3924     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
3925 {
3926 	uma_cache_t cache;
3927 	uint64_t allocs, frees, sleeps, xdomain;
3928 	int cachefree, cpu;
3929 
3930 	allocs = frees = sleeps = xdomain = 0;
3931 	cachefree = 0;
3932 	CPU_FOREACH(cpu) {
3933 		cache = &z->uz_cpu[cpu];
3934 		if (cache->uc_allocbucket != NULL)
3935 			cachefree += cache->uc_allocbucket->ub_cnt;
3936 		if (cache->uc_freebucket != NULL)
3937 			cachefree += cache->uc_freebucket->ub_cnt;
3938 		if (cache->uc_crossbucket != NULL) {
3939 			xdomain += cache->uc_crossbucket->ub_cnt;
3940 			cachefree += cache->uc_crossbucket->ub_cnt;
3941 		}
3942 		allocs += cache->uc_allocs;
3943 		frees += cache->uc_frees;
3944 	}
3945 	allocs += counter_u64_fetch(z->uz_allocs);
3946 	frees += counter_u64_fetch(z->uz_frees);
3947 	sleeps += z->uz_sleeps;
3948 	xdomain += z->uz_xdomain;
3949 	if (cachefreep != NULL)
3950 		*cachefreep = cachefree;
3951 	if (allocsp != NULL)
3952 		*allocsp = allocs;
3953 	if (freesp != NULL)
3954 		*freesp = frees;
3955 	if (sleepsp != NULL)
3956 		*sleepsp = sleeps;
3957 	if (xdomainp != NULL)
3958 		*xdomainp = xdomain;
3959 }
3960 #endif /* DDB */
3961 
3962 static int
3963 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3964 {
3965 	uma_keg_t kz;
3966 	uma_zone_t z;
3967 	int count;
3968 
3969 	count = 0;
3970 	rw_rlock(&uma_rwlock);
3971 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3972 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3973 			count++;
3974 	}
3975 	LIST_FOREACH(z, &uma_cachezones, uz_link)
3976 		count++;
3977 
3978 	rw_runlock(&uma_rwlock);
3979 	return (sysctl_handle_int(oidp, &count, 0, req));
3980 }
3981 
3982 static void
3983 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
3984     struct uma_percpu_stat *ups, bool internal)
3985 {
3986 	uma_zone_domain_t zdom;
3987 	uma_cache_t cache;
3988 	int i;
3989 
3990 
3991 	for (i = 0; i < vm_ndomains; i++) {
3992 		zdom = &z->uz_domain[i];
3993 		uth->uth_zone_free += zdom->uzd_nitems;
3994 	}
3995 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
3996 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
3997 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
3998 	uth->uth_sleeps = z->uz_sleeps;
3999 	uth->uth_xdomain = z->uz_xdomain;
4000 	/*
4001 	 * While it is not normally safe to access the cache
4002 	 * bucket pointers while not on the CPU that owns the
4003 	 * cache, we only allow the pointers to be exchanged
4004 	 * without the zone lock held, not invalidated, so
4005 	 * accept the possible race associated with bucket
4006 	 * exchange during monitoring.
4007 	 */
4008 	for (i = 0; i < mp_maxid + 1; i++) {
4009 		bzero(&ups[i], sizeof(*ups));
4010 		if (internal || CPU_ABSENT(i))
4011 			continue;
4012 		cache = &z->uz_cpu[i];
4013 		if (cache->uc_allocbucket != NULL)
4014 			ups[i].ups_cache_free +=
4015 			    cache->uc_allocbucket->ub_cnt;
4016 		if (cache->uc_freebucket != NULL)
4017 			ups[i].ups_cache_free +=
4018 			    cache->uc_freebucket->ub_cnt;
4019 		if (cache->uc_crossbucket != NULL)
4020 			ups[i].ups_cache_free +=
4021 			    cache->uc_crossbucket->ub_cnt;
4022 		ups[i].ups_allocs = cache->uc_allocs;
4023 		ups[i].ups_frees = cache->uc_frees;
4024 	}
4025 }
4026 
4027 static int
4028 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4029 {
4030 	struct uma_stream_header ush;
4031 	struct uma_type_header uth;
4032 	struct uma_percpu_stat *ups;
4033 	struct sbuf sbuf;
4034 	uma_keg_t kz;
4035 	uma_zone_t z;
4036 	int count, error, i;
4037 
4038 	error = sysctl_wire_old_buffer(req, 0);
4039 	if (error != 0)
4040 		return (error);
4041 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4042 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4043 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4044 
4045 	count = 0;
4046 	rw_rlock(&uma_rwlock);
4047 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4048 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4049 			count++;
4050 	}
4051 
4052 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4053 		count++;
4054 
4055 	/*
4056 	 * Insert stream header.
4057 	 */
4058 	bzero(&ush, sizeof(ush));
4059 	ush.ush_version = UMA_STREAM_VERSION;
4060 	ush.ush_maxcpus = (mp_maxid + 1);
4061 	ush.ush_count = count;
4062 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4063 
4064 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4065 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4066 			bzero(&uth, sizeof(uth));
4067 			ZONE_LOCK(z);
4068 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4069 			uth.uth_align = kz->uk_align;
4070 			uth.uth_size = kz->uk_size;
4071 			uth.uth_rsize = kz->uk_rsize;
4072 			if (z->uz_max_items > 0)
4073 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4074 					kz->uk_ppera;
4075 			else
4076 				uth.uth_pages = kz->uk_pages;
4077 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4078 			    kz->uk_ppera;
4079 			uth.uth_limit = z->uz_max_items;
4080 			uth.uth_keg_free = z->uz_keg->uk_free;
4081 
4082 			/*
4083 			 * A zone is secondary is it is not the first entry
4084 			 * on the keg's zone list.
4085 			 */
4086 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4087 			    (LIST_FIRST(&kz->uk_zones) != z))
4088 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4089 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4090 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4091 			ZONE_UNLOCK(z);
4092 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4093 			for (i = 0; i < mp_maxid + 1; i++)
4094 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4095 		}
4096 	}
4097 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4098 		bzero(&uth, sizeof(uth));
4099 		ZONE_LOCK(z);
4100 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4101 		uth.uth_size = z->uz_size;
4102 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4103 		ZONE_UNLOCK(z);
4104 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4105 		for (i = 0; i < mp_maxid + 1; i++)
4106 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4107 	}
4108 
4109 	rw_runlock(&uma_rwlock);
4110 	error = sbuf_finish(&sbuf);
4111 	sbuf_delete(&sbuf);
4112 	free(ups, M_TEMP);
4113 	return (error);
4114 }
4115 
4116 int
4117 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4118 {
4119 	uma_zone_t zone = *(uma_zone_t *)arg1;
4120 	int error, max;
4121 
4122 	max = uma_zone_get_max(zone);
4123 	error = sysctl_handle_int(oidp, &max, 0, req);
4124 	if (error || !req->newptr)
4125 		return (error);
4126 
4127 	uma_zone_set_max(zone, max);
4128 
4129 	return (0);
4130 }
4131 
4132 int
4133 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4134 {
4135 	uma_zone_t zone = *(uma_zone_t *)arg1;
4136 	int cur;
4137 
4138 	cur = uma_zone_get_cur(zone);
4139 	return (sysctl_handle_int(oidp, &cur, 0, req));
4140 }
4141 
4142 #ifdef INVARIANTS
4143 static uma_slab_t
4144 uma_dbg_getslab(uma_zone_t zone, void *item)
4145 {
4146 	uma_slab_t slab;
4147 	uma_keg_t keg;
4148 	uint8_t *mem;
4149 
4150 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4151 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4152 		slab = vtoslab((vm_offset_t)mem);
4153 	} else {
4154 		/*
4155 		 * It is safe to return the slab here even though the
4156 		 * zone is unlocked because the item's allocation state
4157 		 * essentially holds a reference.
4158 		 */
4159 		if (zone->uz_lockptr == &zone->uz_lock)
4160 			return (NULL);
4161 		ZONE_LOCK(zone);
4162 		keg = zone->uz_keg;
4163 		if (keg->uk_flags & UMA_ZONE_HASH)
4164 			slab = hash_sfind(&keg->uk_hash, mem);
4165 		else
4166 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4167 		ZONE_UNLOCK(zone);
4168 	}
4169 
4170 	return (slab);
4171 }
4172 
4173 static bool
4174 uma_dbg_zskip(uma_zone_t zone, void *mem)
4175 {
4176 
4177 	if (zone->uz_lockptr == &zone->uz_lock)
4178 		return (true);
4179 
4180 	return (uma_dbg_kskip(zone->uz_keg, mem));
4181 }
4182 
4183 static bool
4184 uma_dbg_kskip(uma_keg_t keg, void *mem)
4185 {
4186 	uintptr_t idx;
4187 
4188 	if (dbg_divisor == 0)
4189 		return (true);
4190 
4191 	if (dbg_divisor == 1)
4192 		return (false);
4193 
4194 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4195 	if (keg->uk_ipers > 1) {
4196 		idx *= keg->uk_ipers;
4197 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4198 	}
4199 
4200 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4201 		counter_u64_add(uma_skip_cnt, 1);
4202 		return (true);
4203 	}
4204 	counter_u64_add(uma_dbg_cnt, 1);
4205 
4206 	return (false);
4207 }
4208 
4209 /*
4210  * Set up the slab's freei data such that uma_dbg_free can function.
4211  *
4212  */
4213 static void
4214 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4215 {
4216 	uma_keg_t keg;
4217 	int freei;
4218 
4219 	if (slab == NULL) {
4220 		slab = uma_dbg_getslab(zone, item);
4221 		if (slab == NULL)
4222 			panic("uma: item %p did not belong to zone %s\n",
4223 			    item, zone->uz_name);
4224 	}
4225 	keg = slab->us_keg;
4226 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4227 
4228 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4229 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4230 		    item, zone, zone->uz_name, slab, freei);
4231 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4232 
4233 	return;
4234 }
4235 
4236 /*
4237  * Verifies freed addresses.  Checks for alignment, valid slab membership
4238  * and duplicate frees.
4239  *
4240  */
4241 static void
4242 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4243 {
4244 	uma_keg_t keg;
4245 	int freei;
4246 
4247 	if (slab == NULL) {
4248 		slab = uma_dbg_getslab(zone, item);
4249 		if (slab == NULL)
4250 			panic("uma: Freed item %p did not belong to zone %s\n",
4251 			    item, zone->uz_name);
4252 	}
4253 	keg = slab->us_keg;
4254 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4255 
4256 	if (freei >= keg->uk_ipers)
4257 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4258 		    item, zone, zone->uz_name, slab, freei);
4259 
4260 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4261 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4262 		    item, zone, zone->uz_name, slab, freei);
4263 
4264 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4265 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4266 		    item, zone, zone->uz_name, slab, freei);
4267 
4268 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4269 }
4270 #endif /* INVARIANTS */
4271 
4272 #ifdef DDB
4273 DB_SHOW_COMMAND(uma, db_show_uma)
4274 {
4275 	uma_keg_t kz;
4276 	uma_zone_t z;
4277 	uint64_t allocs, frees, sleeps, xdomain;
4278 	long cachefree;
4279 	int i;
4280 
4281 	db_printf("%18s %8s %8s %8s %12s %8s %8s %8s\n", "Zone", "Size", "Used",
4282 	    "Free", "Requests", "Sleeps", "Bucket", "XFree");
4283 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4284 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4285 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4286 				allocs = counter_u64_fetch(z->uz_allocs);
4287 				frees = counter_u64_fetch(z->uz_frees);
4288 				sleeps = z->uz_sleeps;
4289 				cachefree = 0;
4290 			} else
4291 				uma_zone_sumstat(z, &cachefree, &allocs,
4292 				    &frees, &sleeps, &xdomain);
4293 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4294 			    (LIST_FIRST(&kz->uk_zones) != z)))
4295 				cachefree += kz->uk_free;
4296 			for (i = 0; i < vm_ndomains; i++)
4297 				cachefree += z->uz_domain[i].uzd_nitems;
4298 
4299 			db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u %8ju\n",
4300 			    z->uz_name, (uintmax_t)kz->uk_size,
4301 			    (intmax_t)(allocs - frees), cachefree,
4302 			    (uintmax_t)allocs, sleeps, z->uz_count, xdomain);
4303 			if (db_pager_quit)
4304 				return;
4305 		}
4306 	}
4307 }
4308 
4309 DB_SHOW_COMMAND(umacache, db_show_umacache)
4310 {
4311 	uma_zone_t z;
4312 	uint64_t allocs, frees;
4313 	long cachefree;
4314 	int i;
4315 
4316 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4317 	    "Requests", "Bucket");
4318 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4319 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4320 		for (i = 0; i < vm_ndomains; i++)
4321 			cachefree += z->uz_domain[i].uzd_nitems;
4322 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4323 		    z->uz_name, (uintmax_t)z->uz_size,
4324 		    (intmax_t)(allocs - frees), cachefree,
4325 		    (uintmax_t)allocs, z->uz_count);
4326 		if (db_pager_quit)
4327 			return;
4328 	}
4329 }
4330 #endif	/* DDB */
4331