xref: /freebsd/sys/vm/uma_core.c (revision 48c779cdecb5f803e5fe5d761987e976ca9609db)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 
145 static struct sx uma_reclaim_lock;
146 
147 /*
148  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
149  * allocations don't trigger a wakeup of the reclaim thread.
150  */
151 static unsigned long uma_kmem_limit = LONG_MAX;
152 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
153     "UMA kernel memory soft limit");
154 static unsigned long uma_kmem_total;
155 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
156     "UMA kernel memory usage");
157 
158 /* Is the VM done starting up? */
159 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
160     BOOT_RUNNING } booted = BOOT_COLD;
161 
162 /*
163  * This is the handle used to schedule events that need to happen
164  * outside of the allocation fast path.
165  */
166 static struct callout uma_callout;
167 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
168 
169 /*
170  * This structure is passed as the zone ctor arg so that I don't have to create
171  * a special allocation function just for zones.
172  */
173 struct uma_zctor_args {
174 	const char *name;
175 	size_t size;
176 	uma_ctor ctor;
177 	uma_dtor dtor;
178 	uma_init uminit;
179 	uma_fini fini;
180 	uma_import import;
181 	uma_release release;
182 	void *arg;
183 	uma_keg_t keg;
184 	int align;
185 	uint32_t flags;
186 };
187 
188 struct uma_kctor_args {
189 	uma_zone_t zone;
190 	size_t size;
191 	uma_init uminit;
192 	uma_fini fini;
193 	int align;
194 	uint32_t flags;
195 };
196 
197 struct uma_bucket_zone {
198 	uma_zone_t	ubz_zone;
199 	char		*ubz_name;
200 	int		ubz_entries;	/* Number of items it can hold. */
201 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
202 };
203 
204 /*
205  * Compute the actual number of bucket entries to pack them in power
206  * of two sizes for more efficient space utilization.
207  */
208 #define	BUCKET_SIZE(n)						\
209     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
210 
211 #define	BUCKET_MAX	BUCKET_SIZE(256)
212 #define	BUCKET_MIN	BUCKET_SIZE(4)
213 
214 struct uma_bucket_zone bucket_zones[] = {
215 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
216 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
217 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
218 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
219 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
220 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
221 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
222 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
223 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
224 	{ NULL, NULL, 0}
225 };
226 
227 /*
228  * Flags and enumerations to be passed to internal functions.
229  */
230 enum zfreeskip {
231 	SKIP_NONE =	0,
232 	SKIP_CNT =	0x00000001,
233 	SKIP_DTOR =	0x00010000,
234 	SKIP_FINI =	0x00020000,
235 };
236 
237 /* Prototypes.. */
238 
239 int	uma_startup_count(int);
240 void	uma_startup(void *, int);
241 void	uma_startup1(void);
242 void	uma_startup2(void);
243 
244 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
245 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void page_free(void *, vm_size_t, uint8_t);
249 static void pcpu_page_free(void *, vm_size_t, uint8_t);
250 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
251 static void cache_drain(uma_zone_t);
252 static void bucket_drain(uma_zone_t, uma_bucket_t);
253 static void bucket_cache_reclaim(uma_zone_t zone, bool);
254 static int keg_ctor(void *, int, void *, int);
255 static void keg_dtor(void *, int, void *);
256 static int zone_ctor(void *, int, void *, int);
257 static void zone_dtor(void *, int, void *);
258 static int zero_init(void *, int, int);
259 static void keg_small_init(uma_keg_t keg);
260 static void keg_large_init(uma_keg_t keg);
261 static void zone_foreach(void (*zfunc)(uma_zone_t));
262 static void zone_timeout(uma_zone_t zone);
263 static int hash_alloc(struct uma_hash *, u_int);
264 static int hash_expand(struct uma_hash *, struct uma_hash *);
265 static void hash_free(struct uma_hash *hash);
266 static void uma_timeout(void *);
267 static void uma_startup3(void);
268 static void *zone_alloc_item(uma_zone_t, void *, int, int);
269 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
270 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
271 static void bucket_enable(void);
272 static void bucket_init(void);
273 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
274 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
275 static void bucket_zone_drain(void);
276 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int);
277 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(uma_zone_t, void **, int, int, int);
283 static void zone_release(uma_zone_t, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 
286 void uma_print_zone(uma_zone_t);
287 void uma_print_stats(void);
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 
291 #ifdef INVARIANTS
292 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
293 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
294 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
295 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
296 
297 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
298     "Memory allocation debugging");
299 
300 static u_int dbg_divisor = 1;
301 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
302     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
303     "Debug & thrash every this item in memory allocator");
304 
305 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
306 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
307 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
308     &uma_dbg_cnt, "memory items debugged");
309 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
310     &uma_skip_cnt, "memory items skipped, not debugged");
311 #endif
312 
313 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
314 
315 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
316     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
317 
318 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
319     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
320 
321 static int zone_warnings = 1;
322 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
323     "Warn when UMA zones becomes full");
324 
325 /* Adjust bytes under management by UMA. */
326 static inline void
327 uma_total_dec(unsigned long size)
328 {
329 
330 	atomic_subtract_long(&uma_kmem_total, size);
331 }
332 
333 static inline void
334 uma_total_inc(unsigned long size)
335 {
336 
337 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
338 		uma_reclaim_wakeup();
339 }
340 
341 /*
342  * This routine checks to see whether or not it's safe to enable buckets.
343  */
344 static void
345 bucket_enable(void)
346 {
347 	bucketdisable = vm_page_count_min();
348 }
349 
350 /*
351  * Initialize bucket_zones, the array of zones of buckets of various sizes.
352  *
353  * For each zone, calculate the memory required for each bucket, consisting
354  * of the header and an array of pointers.
355  */
356 static void
357 bucket_init(void)
358 {
359 	struct uma_bucket_zone *ubz;
360 	int size;
361 
362 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
363 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
364 		size += sizeof(void *) * ubz->ubz_entries;
365 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
366 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
367 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
368 	}
369 }
370 
371 /*
372  * Given a desired number of entries for a bucket, return the zone from which
373  * to allocate the bucket.
374  */
375 static struct uma_bucket_zone *
376 bucket_zone_lookup(int entries)
377 {
378 	struct uma_bucket_zone *ubz;
379 
380 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
381 		if (ubz->ubz_entries >= entries)
382 			return (ubz);
383 	ubz--;
384 	return (ubz);
385 }
386 
387 static int
388 bucket_select(int size)
389 {
390 	struct uma_bucket_zone *ubz;
391 
392 	ubz = &bucket_zones[0];
393 	if (size > ubz->ubz_maxsize)
394 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
395 
396 	for (; ubz->ubz_entries != 0; ubz++)
397 		if (ubz->ubz_maxsize < size)
398 			break;
399 	ubz--;
400 	return (ubz->ubz_entries);
401 }
402 
403 static uma_bucket_t
404 bucket_alloc(uma_zone_t zone, void *udata, int flags)
405 {
406 	struct uma_bucket_zone *ubz;
407 	uma_bucket_t bucket;
408 
409 	/*
410 	 * This is to stop us from allocating per cpu buckets while we're
411 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
412 	 * boot pages.  This also prevents us from allocating buckets in
413 	 * low memory situations.
414 	 */
415 	if (bucketdisable)
416 		return (NULL);
417 	/*
418 	 * To limit bucket recursion we store the original zone flags
419 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
420 	 * NOVM flag to persist even through deep recursions.  We also
421 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
422 	 * a bucket for a bucket zone so we do not allow infinite bucket
423 	 * recursion.  This cookie will even persist to frees of unused
424 	 * buckets via the allocation path or bucket allocations in the
425 	 * free path.
426 	 */
427 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
428 		udata = (void *)(uintptr_t)zone->uz_flags;
429 	else {
430 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
431 			return (NULL);
432 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
433 	}
434 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
435 		flags |= M_NOVM;
436 	ubz = bucket_zone_lookup(zone->uz_count);
437 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
438 		ubz++;
439 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
440 	if (bucket) {
441 #ifdef INVARIANTS
442 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
443 #endif
444 		bucket->ub_cnt = 0;
445 		bucket->ub_entries = ubz->ubz_entries;
446 	}
447 
448 	return (bucket);
449 }
450 
451 static void
452 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
453 {
454 	struct uma_bucket_zone *ubz;
455 
456 	KASSERT(bucket->ub_cnt == 0,
457 	    ("bucket_free: Freeing a non free bucket."));
458 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
459 		udata = (void *)(uintptr_t)zone->uz_flags;
460 	ubz = bucket_zone_lookup(bucket->ub_entries);
461 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
462 }
463 
464 static void
465 bucket_zone_drain(void)
466 {
467 	struct uma_bucket_zone *ubz;
468 
469 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
470 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
471 }
472 
473 /*
474  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
475  * zone's caches.
476  */
477 static uma_bucket_t
478 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
479 {
480 	uma_bucket_t bucket;
481 
482 	ZONE_LOCK_ASSERT(zone);
483 
484 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
485 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
486 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
487 		zdom->uzd_nitems -= bucket->ub_cnt;
488 		if (zdom->uzd_imin > zdom->uzd_nitems)
489 			zdom->uzd_imin = zdom->uzd_nitems;
490 		zone->uz_bkt_count -= bucket->ub_cnt;
491 	}
492 	return (bucket);
493 }
494 
495 /*
496  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
497  * whether the bucket's contents should be counted as part of the zone's working
498  * set.
499  */
500 static void
501 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
502     const bool ws)
503 {
504 
505 	ZONE_LOCK_ASSERT(zone);
506 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
507 	    ("%s: zone %p overflow", __func__, zone));
508 
509 	if (ws)
510 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
511 	else
512 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
513 	zdom->uzd_nitems += bucket->ub_cnt;
514 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
515 		zdom->uzd_imax = zdom->uzd_nitems;
516 	zone->uz_bkt_count += bucket->ub_cnt;
517 }
518 
519 static void
520 zone_log_warning(uma_zone_t zone)
521 {
522 	static const struct timeval warninterval = { 300, 0 };
523 
524 	if (!zone_warnings || zone->uz_warning == NULL)
525 		return;
526 
527 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
528 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
529 }
530 
531 static inline void
532 zone_maxaction(uma_zone_t zone)
533 {
534 
535 	if (zone->uz_maxaction.ta_func != NULL)
536 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
537 }
538 
539 /*
540  * Routine called by timeout which is used to fire off some time interval
541  * based calculations.  (stats, hash size, etc.)
542  *
543  * Arguments:
544  *	arg   Unused
545  *
546  * Returns:
547  *	Nothing
548  */
549 static void
550 uma_timeout(void *unused)
551 {
552 	bucket_enable();
553 	zone_foreach(zone_timeout);
554 
555 	/* Reschedule this event */
556 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
557 }
558 
559 /*
560  * Update the working set size estimate for the zone's bucket cache.
561  * The constants chosen here are somewhat arbitrary.  With an update period of
562  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
563  * last 100s.
564  */
565 static void
566 zone_domain_update_wss(uma_zone_domain_t zdom)
567 {
568 	long wss;
569 
570 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
571 	wss = zdom->uzd_imax - zdom->uzd_imin;
572 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
573 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
574 }
575 
576 /*
577  * Routine to perform timeout driven calculations.  This expands the
578  * hashes and does per cpu statistics aggregation.
579  *
580  *  Returns nothing.
581  */
582 static void
583 zone_timeout(uma_zone_t zone)
584 {
585 	uma_keg_t keg;
586 	u_int slabs;
587 
588 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
589 		goto update_wss;
590 
591 	keg = zone->uz_keg;
592 	KEG_LOCK(keg);
593 	/*
594 	 * Expand the keg hash table.
595 	 *
596 	 * This is done if the number of slabs is larger than the hash size.
597 	 * What I'm trying to do here is completely reduce collisions.  This
598 	 * may be a little aggressive.  Should I allow for two collisions max?
599 	 */
600 	if (keg->uk_flags & UMA_ZONE_HASH &&
601 	    (slabs = keg->uk_pages / keg->uk_ppera) >
602 	     keg->uk_hash.uh_hashsize) {
603 		struct uma_hash newhash;
604 		struct uma_hash oldhash;
605 		int ret;
606 
607 		/*
608 		 * This is so involved because allocating and freeing
609 		 * while the keg lock is held will lead to deadlock.
610 		 * I have to do everything in stages and check for
611 		 * races.
612 		 */
613 		KEG_UNLOCK(keg);
614 		ret = hash_alloc(&newhash, 1 << fls(slabs));
615 		KEG_LOCK(keg);
616 		if (ret) {
617 			if (hash_expand(&keg->uk_hash, &newhash)) {
618 				oldhash = keg->uk_hash;
619 				keg->uk_hash = newhash;
620 			} else
621 				oldhash = newhash;
622 
623 			KEG_UNLOCK(keg);
624 			hash_free(&oldhash);
625 			return;
626 		}
627 	}
628 	KEG_UNLOCK(keg);
629 
630 update_wss:
631 	ZONE_LOCK(zone);
632 	for (int i = 0; i < vm_ndomains; i++)
633 		zone_domain_update_wss(&zone->uz_domain[i]);
634 	ZONE_UNLOCK(zone);
635 }
636 
637 /*
638  * Allocate and zero fill the next sized hash table from the appropriate
639  * backing store.
640  *
641  * Arguments:
642  *	hash  A new hash structure with the old hash size in uh_hashsize
643  *
644  * Returns:
645  *	1 on success and 0 on failure.
646  */
647 static int
648 hash_alloc(struct uma_hash *hash, u_int size)
649 {
650 	size_t alloc;
651 
652 	KASSERT(powerof2(size), ("hash size must be power of 2"));
653 	if (size > UMA_HASH_SIZE_INIT)  {
654 		hash->uh_hashsize = size;
655 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
656 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
657 		    M_UMAHASH, M_NOWAIT);
658 	} else {
659 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
660 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
661 		    UMA_ANYDOMAIN, M_WAITOK);
662 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
663 	}
664 	if (hash->uh_slab_hash) {
665 		bzero(hash->uh_slab_hash, alloc);
666 		hash->uh_hashmask = hash->uh_hashsize - 1;
667 		return (1);
668 	}
669 
670 	return (0);
671 }
672 
673 /*
674  * Expands the hash table for HASH zones.  This is done from zone_timeout
675  * to reduce collisions.  This must not be done in the regular allocation
676  * path, otherwise, we can recurse on the vm while allocating pages.
677  *
678  * Arguments:
679  *	oldhash  The hash you want to expand
680  *	newhash  The hash structure for the new table
681  *
682  * Returns:
683  *	Nothing
684  *
685  * Discussion:
686  */
687 static int
688 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
689 {
690 	uma_slab_t slab;
691 	u_int hval;
692 	u_int idx;
693 
694 	if (!newhash->uh_slab_hash)
695 		return (0);
696 
697 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
698 		return (0);
699 
700 	/*
701 	 * I need to investigate hash algorithms for resizing without a
702 	 * full rehash.
703 	 */
704 
705 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
706 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
707 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
708 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
709 			hval = UMA_HASH(newhash, slab->us_data);
710 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
711 			    slab, us_hlink);
712 		}
713 
714 	return (1);
715 }
716 
717 /*
718  * Free the hash bucket to the appropriate backing store.
719  *
720  * Arguments:
721  *	slab_hash  The hash bucket we're freeing
722  *	hashsize   The number of entries in that hash bucket
723  *
724  * Returns:
725  *	Nothing
726  */
727 static void
728 hash_free(struct uma_hash *hash)
729 {
730 	if (hash->uh_slab_hash == NULL)
731 		return;
732 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
733 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
734 	else
735 		free(hash->uh_slab_hash, M_UMAHASH);
736 }
737 
738 /*
739  * Frees all outstanding items in a bucket
740  *
741  * Arguments:
742  *	zone   The zone to free to, must be unlocked.
743  *	bucket The free/alloc bucket with items, cpu queue must be locked.
744  *
745  * Returns:
746  *	Nothing
747  */
748 
749 static void
750 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
751 {
752 	int i;
753 
754 	if (bucket == NULL)
755 		return;
756 
757 	if (zone->uz_fini)
758 		for (i = 0; i < bucket->ub_cnt; i++)
759 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
760 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
761 	if (zone->uz_max_items > 0) {
762 		ZONE_LOCK(zone);
763 		zone->uz_items -= bucket->ub_cnt;
764 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
765 			wakeup_one(zone);
766 		ZONE_UNLOCK(zone);
767 	}
768 	bucket->ub_cnt = 0;
769 }
770 
771 /*
772  * Drains the per cpu caches for a zone.
773  *
774  * NOTE: This may only be called while the zone is being turn down, and not
775  * during normal operation.  This is necessary in order that we do not have
776  * to migrate CPUs to drain the per-CPU caches.
777  *
778  * Arguments:
779  *	zone     The zone to drain, must be unlocked.
780  *
781  * Returns:
782  *	Nothing
783  */
784 static void
785 cache_drain(uma_zone_t zone)
786 {
787 	uma_cache_t cache;
788 	int cpu;
789 
790 	/*
791 	 * XXX: It is safe to not lock the per-CPU caches, because we're
792 	 * tearing down the zone anyway.  I.e., there will be no further use
793 	 * of the caches at this point.
794 	 *
795 	 * XXX: It would good to be able to assert that the zone is being
796 	 * torn down to prevent improper use of cache_drain().
797 	 *
798 	 * XXX: We lock the zone before passing into bucket_cache_reclaim() as
799 	 * it is used elsewhere.  Should the tear-down path be made special
800 	 * there in some form?
801 	 */
802 	CPU_FOREACH(cpu) {
803 		cache = &zone->uz_cpu[cpu];
804 		bucket_drain(zone, cache->uc_allocbucket);
805 		if (cache->uc_allocbucket != NULL)
806 			bucket_free(zone, cache->uc_allocbucket, NULL);
807 		cache->uc_allocbucket = NULL;
808 		bucket_drain(zone, cache->uc_freebucket);
809 		if (cache->uc_freebucket != NULL)
810 			bucket_free(zone, cache->uc_freebucket, NULL);
811 		cache->uc_freebucket = NULL;
812 		bucket_drain(zone, cache->uc_crossbucket);
813 		if (cache->uc_crossbucket != NULL)
814 			bucket_free(zone, cache->uc_crossbucket, NULL);
815 		cache->uc_crossbucket = NULL;
816 	}
817 	ZONE_LOCK(zone);
818 	bucket_cache_reclaim(zone, true);
819 	ZONE_UNLOCK(zone);
820 }
821 
822 static void
823 cache_shrink(uma_zone_t zone)
824 {
825 
826 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
827 		return;
828 
829 	ZONE_LOCK(zone);
830 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
831 	ZONE_UNLOCK(zone);
832 }
833 
834 static void
835 cache_drain_safe_cpu(uma_zone_t zone)
836 {
837 	uma_cache_t cache;
838 	uma_bucket_t b1, b2, b3;
839 	int domain;
840 
841 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
842 		return;
843 
844 	b1 = b2 = b3 = NULL;
845 	ZONE_LOCK(zone);
846 	critical_enter();
847 	if (zone->uz_flags & UMA_ZONE_NUMA)
848 		domain = PCPU_GET(domain);
849 	else
850 		domain = 0;
851 	cache = &zone->uz_cpu[curcpu];
852 	if (cache->uc_allocbucket) {
853 		if (cache->uc_allocbucket->ub_cnt != 0)
854 			zone_put_bucket(zone, &zone->uz_domain[domain],
855 			    cache->uc_allocbucket, false);
856 		else
857 			b1 = cache->uc_allocbucket;
858 		cache->uc_allocbucket = NULL;
859 	}
860 	if (cache->uc_freebucket) {
861 		if (cache->uc_freebucket->ub_cnt != 0)
862 			zone_put_bucket(zone, &zone->uz_domain[domain],
863 			    cache->uc_freebucket, false);
864 		else
865 			b2 = cache->uc_freebucket;
866 		cache->uc_freebucket = NULL;
867 	}
868 	b3 = cache->uc_crossbucket;
869 	cache->uc_crossbucket = NULL;
870 	critical_exit();
871 	ZONE_UNLOCK(zone);
872 	if (b1)
873 		bucket_free(zone, b1, NULL);
874 	if (b2)
875 		bucket_free(zone, b2, NULL);
876 	if (b3) {
877 		bucket_drain(zone, b3);
878 		bucket_free(zone, b3, NULL);
879 	}
880 }
881 
882 /*
883  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
884  * This is an expensive call because it needs to bind to all CPUs
885  * one by one and enter a critical section on each of them in order
886  * to safely access their cache buckets.
887  * Zone lock must not be held on call this function.
888  */
889 static void
890 pcpu_cache_drain_safe(uma_zone_t zone)
891 {
892 	int cpu;
893 
894 	/*
895 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
896 	 */
897 	if (zone)
898 		cache_shrink(zone);
899 	else
900 		zone_foreach(cache_shrink);
901 
902 	CPU_FOREACH(cpu) {
903 		thread_lock(curthread);
904 		sched_bind(curthread, cpu);
905 		thread_unlock(curthread);
906 
907 		if (zone)
908 			cache_drain_safe_cpu(zone);
909 		else
910 			zone_foreach(cache_drain_safe_cpu);
911 	}
912 	thread_lock(curthread);
913 	sched_unbind(curthread);
914 	thread_unlock(curthread);
915 }
916 
917 /*
918  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
919  * requested a drain, otherwise the per-domain caches are trimmed to either
920  * estimated working set size.
921  */
922 static void
923 bucket_cache_reclaim(uma_zone_t zone, bool drain)
924 {
925 	uma_zone_domain_t zdom;
926 	uma_bucket_t bucket;
927 	long target, tofree;
928 	int i;
929 
930 	for (i = 0; i < vm_ndomains; i++) {
931 		zdom = &zone->uz_domain[i];
932 
933 		/*
934 		 * If we were asked to drain the zone, we are done only once
935 		 * this bucket cache is empty.  Otherwise, we reclaim items in
936 		 * excess of the zone's estimated working set size.  If the
937 		 * difference nitems - imin is larger than the WSS estimate,
938 		 * then the estimate will grow at the end of this interval and
939 		 * we ignore the historical average.
940 		 */
941 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
942 		    zdom->uzd_imin);
943 		while (zdom->uzd_nitems > target) {
944 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
945 			if (bucket == NULL)
946 				break;
947 			tofree = bucket->ub_cnt;
948 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
949 			zdom->uzd_nitems -= tofree;
950 
951 			/*
952 			 * Shift the bounds of the current WSS interval to avoid
953 			 * perturbing the estimate.
954 			 */
955 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
956 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
957 
958 			ZONE_UNLOCK(zone);
959 			bucket_drain(zone, bucket);
960 			bucket_free(zone, bucket, NULL);
961 			ZONE_LOCK(zone);
962 		}
963 	}
964 
965 	/*
966 	 * Shrink the zone bucket size to ensure that the per-CPU caches
967 	 * don't grow too large.
968 	 */
969 	if (zone->uz_count > zone->uz_count_min)
970 		zone->uz_count--;
971 }
972 
973 static void
974 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
975 {
976 	uint8_t *mem;
977 	int i;
978 	uint8_t flags;
979 
980 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
981 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
982 
983 	mem = slab->us_data;
984 	flags = slab->us_flags;
985 	i = start;
986 	if (keg->uk_fini != NULL) {
987 		for (i--; i > -1; i--)
988 #ifdef INVARIANTS
989 		/*
990 		 * trash_fini implies that dtor was trash_dtor. trash_fini
991 		 * would check that memory hasn't been modified since free,
992 		 * which executed trash_dtor.
993 		 * That's why we need to run uma_dbg_kskip() check here,
994 		 * albeit we don't make skip check for other init/fini
995 		 * invocations.
996 		 */
997 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
998 		    keg->uk_fini != trash_fini)
999 #endif
1000 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
1001 			    keg->uk_size);
1002 	}
1003 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1004 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1005 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1006 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1007 }
1008 
1009 /*
1010  * Frees pages from a keg back to the system.  This is done on demand from
1011  * the pageout daemon.
1012  *
1013  * Returns nothing.
1014  */
1015 static void
1016 keg_drain(uma_keg_t keg)
1017 {
1018 	struct slabhead freeslabs = { 0 };
1019 	uma_domain_t dom;
1020 	uma_slab_t slab, tmp;
1021 	int i;
1022 
1023 	/*
1024 	 * We don't want to take pages from statically allocated kegs at this
1025 	 * time
1026 	 */
1027 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1028 		return;
1029 
1030 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
1031 	    keg->uk_name, keg, keg->uk_free);
1032 	KEG_LOCK(keg);
1033 	if (keg->uk_free == 0)
1034 		goto finished;
1035 
1036 	for (i = 0; i < vm_ndomains; i++) {
1037 		dom = &keg->uk_domain[i];
1038 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1039 			/* We have nowhere to free these to. */
1040 			if (slab->us_flags & UMA_SLAB_BOOT)
1041 				continue;
1042 
1043 			LIST_REMOVE(slab, us_link);
1044 			keg->uk_pages -= keg->uk_ppera;
1045 			keg->uk_free -= keg->uk_ipers;
1046 
1047 			if (keg->uk_flags & UMA_ZONE_HASH)
1048 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
1049 				    slab->us_data);
1050 
1051 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1052 		}
1053 	}
1054 
1055 finished:
1056 	KEG_UNLOCK(keg);
1057 
1058 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1059 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1060 		keg_free_slab(keg, slab, keg->uk_ipers);
1061 	}
1062 }
1063 
1064 static void
1065 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1066 {
1067 
1068 	/*
1069 	 * Set draining to interlock with zone_dtor() so we can release our
1070 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1071 	 * is the only call that knows the structure will still be available
1072 	 * when it wakes up.
1073 	 */
1074 	ZONE_LOCK(zone);
1075 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1076 		if (waitok == M_NOWAIT)
1077 			goto out;
1078 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1079 	}
1080 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1081 	bucket_cache_reclaim(zone, drain);
1082 	ZONE_UNLOCK(zone);
1083 
1084 	/*
1085 	 * The DRAINING flag protects us from being freed while
1086 	 * we're running.  Normally the uma_rwlock would protect us but we
1087 	 * must be able to release and acquire the right lock for each keg.
1088 	 */
1089 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1090 		keg_drain(zone->uz_keg);
1091 	ZONE_LOCK(zone);
1092 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1093 	wakeup(zone);
1094 out:
1095 	ZONE_UNLOCK(zone);
1096 }
1097 
1098 static void
1099 zone_drain(uma_zone_t zone)
1100 {
1101 
1102 	zone_reclaim(zone, M_NOWAIT, true);
1103 }
1104 
1105 static void
1106 zone_trim(uma_zone_t zone)
1107 {
1108 
1109 	zone_reclaim(zone, M_NOWAIT, false);
1110 }
1111 
1112 /*
1113  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1114  * If the allocation was successful, the keg lock will be held upon return,
1115  * otherwise the keg will be left unlocked.
1116  *
1117  * Arguments:
1118  *	flags   Wait flags for the item initialization routine
1119  *	aflags  Wait flags for the slab allocation
1120  *
1121  * Returns:
1122  *	The slab that was allocated or NULL if there is no memory and the
1123  *	caller specified M_NOWAIT.
1124  */
1125 static uma_slab_t
1126 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1127     int aflags)
1128 {
1129 	uma_alloc allocf;
1130 	uma_slab_t slab;
1131 	unsigned long size;
1132 	uint8_t *mem;
1133 	uint8_t sflags;
1134 	int i;
1135 
1136 	KASSERT(domain >= 0 && domain < vm_ndomains,
1137 	    ("keg_alloc_slab: domain %d out of range", domain));
1138 	KEG_LOCK_ASSERT(keg);
1139 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1140 
1141 	allocf = keg->uk_allocf;
1142 	KEG_UNLOCK(keg);
1143 
1144 	slab = NULL;
1145 	mem = NULL;
1146 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1147 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1148 		if (slab == NULL)
1149 			goto out;
1150 	}
1151 
1152 	/*
1153 	 * This reproduces the old vm_zone behavior of zero filling pages the
1154 	 * first time they are added to a zone.
1155 	 *
1156 	 * Malloced items are zeroed in uma_zalloc.
1157 	 */
1158 
1159 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1160 		aflags |= M_ZERO;
1161 	else
1162 		aflags &= ~M_ZERO;
1163 
1164 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1165 		aflags |= M_NODUMP;
1166 
1167 	/* zone is passed for legacy reasons. */
1168 	size = keg->uk_ppera * PAGE_SIZE;
1169 	mem = allocf(zone, size, domain, &sflags, aflags);
1170 	if (mem == NULL) {
1171 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1172 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1173 		slab = NULL;
1174 		goto out;
1175 	}
1176 	uma_total_inc(size);
1177 
1178 	/* Point the slab into the allocated memory */
1179 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1180 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1181 
1182 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1183 		for (i = 0; i < keg->uk_ppera; i++)
1184 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1185 
1186 	slab->us_keg = keg;
1187 	slab->us_data = mem;
1188 	slab->us_freecount = keg->uk_ipers;
1189 	slab->us_flags = sflags;
1190 	slab->us_domain = domain;
1191 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1192 #ifdef INVARIANTS
1193 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1194 #endif
1195 
1196 	if (keg->uk_init != NULL) {
1197 		for (i = 0; i < keg->uk_ipers; i++)
1198 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1199 			    keg->uk_size, flags) != 0)
1200 				break;
1201 		if (i != keg->uk_ipers) {
1202 			keg_free_slab(keg, slab, i);
1203 			slab = NULL;
1204 			goto out;
1205 		}
1206 	}
1207 	KEG_LOCK(keg);
1208 
1209 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1210 	    slab, keg->uk_name, keg);
1211 
1212 	if (keg->uk_flags & UMA_ZONE_HASH)
1213 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1214 
1215 	keg->uk_pages += keg->uk_ppera;
1216 	keg->uk_free += keg->uk_ipers;
1217 
1218 out:
1219 	return (slab);
1220 }
1221 
1222 /*
1223  * This function is intended to be used early on in place of page_alloc() so
1224  * that we may use the boot time page cache to satisfy allocations before
1225  * the VM is ready.
1226  */
1227 static void *
1228 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1229     int wait)
1230 {
1231 	uma_keg_t keg;
1232 	void *mem;
1233 	int pages;
1234 
1235 	keg = zone->uz_keg;
1236 	/*
1237 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1238 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1239 	 */
1240 	switch (booted) {
1241 		case BOOT_COLD:
1242 		case BOOT_STRAPPED:
1243 			break;
1244 		case BOOT_PAGEALLOC:
1245 			if (keg->uk_ppera > 1)
1246 				break;
1247 		case BOOT_BUCKETS:
1248 		case BOOT_RUNNING:
1249 #ifdef UMA_MD_SMALL_ALLOC
1250 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1251 			    page_alloc : uma_small_alloc;
1252 #else
1253 			keg->uk_allocf = page_alloc;
1254 #endif
1255 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1256 	}
1257 
1258 	/*
1259 	 * Check our small startup cache to see if it has pages remaining.
1260 	 */
1261 	pages = howmany(bytes, PAGE_SIZE);
1262 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1263 	if (pages > boot_pages)
1264 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1265 #ifdef DIAGNOSTIC
1266 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1267 	    boot_pages);
1268 #endif
1269 	mem = bootmem;
1270 	boot_pages -= pages;
1271 	bootmem += pages * PAGE_SIZE;
1272 	*pflag = UMA_SLAB_BOOT;
1273 
1274 	return (mem);
1275 }
1276 
1277 /*
1278  * Allocates a number of pages from the system
1279  *
1280  * Arguments:
1281  *	bytes  The number of bytes requested
1282  *	wait  Shall we wait?
1283  *
1284  * Returns:
1285  *	A pointer to the alloced memory or possibly
1286  *	NULL if M_NOWAIT is set.
1287  */
1288 static void *
1289 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1290     int wait)
1291 {
1292 	void *p;	/* Returned page */
1293 
1294 	*pflag = UMA_SLAB_KERNEL;
1295 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1296 
1297 	return (p);
1298 }
1299 
1300 static void *
1301 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1302     int wait)
1303 {
1304 	struct pglist alloctail;
1305 	vm_offset_t addr, zkva;
1306 	int cpu, flags;
1307 	vm_page_t p, p_next;
1308 #ifdef NUMA
1309 	struct pcpu *pc;
1310 #endif
1311 
1312 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1313 
1314 	TAILQ_INIT(&alloctail);
1315 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1316 	    malloc2vm_flags(wait);
1317 	*pflag = UMA_SLAB_KERNEL;
1318 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1319 		if (CPU_ABSENT(cpu)) {
1320 			p = vm_page_alloc(NULL, 0, flags);
1321 		} else {
1322 #ifndef NUMA
1323 			p = vm_page_alloc(NULL, 0, flags);
1324 #else
1325 			pc = pcpu_find(cpu);
1326 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1327 			if (__predict_false(p == NULL))
1328 				p = vm_page_alloc(NULL, 0, flags);
1329 #endif
1330 		}
1331 		if (__predict_false(p == NULL))
1332 			goto fail;
1333 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1334 	}
1335 	if ((addr = kva_alloc(bytes)) == 0)
1336 		goto fail;
1337 	zkva = addr;
1338 	TAILQ_FOREACH(p, &alloctail, listq) {
1339 		pmap_qenter(zkva, &p, 1);
1340 		zkva += PAGE_SIZE;
1341 	}
1342 	return ((void*)addr);
1343 fail:
1344 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1345 		vm_page_unwire_noq(p);
1346 		vm_page_free(p);
1347 	}
1348 	return (NULL);
1349 }
1350 
1351 /*
1352  * Allocates a number of pages from within an object
1353  *
1354  * Arguments:
1355  *	bytes  The number of bytes requested
1356  *	wait   Shall we wait?
1357  *
1358  * Returns:
1359  *	A pointer to the alloced memory or possibly
1360  *	NULL if M_NOWAIT is set.
1361  */
1362 static void *
1363 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1364     int wait)
1365 {
1366 	TAILQ_HEAD(, vm_page) alloctail;
1367 	u_long npages;
1368 	vm_offset_t retkva, zkva;
1369 	vm_page_t p, p_next;
1370 	uma_keg_t keg;
1371 
1372 	TAILQ_INIT(&alloctail);
1373 	keg = zone->uz_keg;
1374 
1375 	npages = howmany(bytes, PAGE_SIZE);
1376 	while (npages > 0) {
1377 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1378 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1379 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1380 		    VM_ALLOC_NOWAIT));
1381 		if (p != NULL) {
1382 			/*
1383 			 * Since the page does not belong to an object, its
1384 			 * listq is unused.
1385 			 */
1386 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1387 			npages--;
1388 			continue;
1389 		}
1390 		/*
1391 		 * Page allocation failed, free intermediate pages and
1392 		 * exit.
1393 		 */
1394 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1395 			vm_page_unwire_noq(p);
1396 			vm_page_free(p);
1397 		}
1398 		return (NULL);
1399 	}
1400 	*flags = UMA_SLAB_PRIV;
1401 	zkva = keg->uk_kva +
1402 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1403 	retkva = zkva;
1404 	TAILQ_FOREACH(p, &alloctail, listq) {
1405 		pmap_qenter(zkva, &p, 1);
1406 		zkva += PAGE_SIZE;
1407 	}
1408 
1409 	return ((void *)retkva);
1410 }
1411 
1412 /*
1413  * Frees a number of pages to the system
1414  *
1415  * Arguments:
1416  *	mem   A pointer to the memory to be freed
1417  *	size  The size of the memory being freed
1418  *	flags The original p->us_flags field
1419  *
1420  * Returns:
1421  *	Nothing
1422  */
1423 static void
1424 page_free(void *mem, vm_size_t size, uint8_t flags)
1425 {
1426 
1427 	if ((flags & UMA_SLAB_KERNEL) == 0)
1428 		panic("UMA: page_free used with invalid flags %x", flags);
1429 
1430 	kmem_free((vm_offset_t)mem, size);
1431 }
1432 
1433 /*
1434  * Frees pcpu zone allocations
1435  *
1436  * Arguments:
1437  *	mem   A pointer to the memory to be freed
1438  *	size  The size of the memory being freed
1439  *	flags The original p->us_flags field
1440  *
1441  * Returns:
1442  *	Nothing
1443  */
1444 static void
1445 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1446 {
1447 	vm_offset_t sva, curva;
1448 	vm_paddr_t paddr;
1449 	vm_page_t m;
1450 
1451 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1452 	sva = (vm_offset_t)mem;
1453 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1454 		paddr = pmap_kextract(curva);
1455 		m = PHYS_TO_VM_PAGE(paddr);
1456 		vm_page_unwire_noq(m);
1457 		vm_page_free(m);
1458 	}
1459 	pmap_qremove(sva, size >> PAGE_SHIFT);
1460 	kva_free(sva, size);
1461 }
1462 
1463 
1464 /*
1465  * Zero fill initializer
1466  *
1467  * Arguments/Returns follow uma_init specifications
1468  */
1469 static int
1470 zero_init(void *mem, int size, int flags)
1471 {
1472 	bzero(mem, size);
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1478  *
1479  * Arguments
1480  *	keg  The zone we should initialize
1481  *
1482  * Returns
1483  *	Nothing
1484  */
1485 static void
1486 keg_small_init(uma_keg_t keg)
1487 {
1488 	u_int rsize;
1489 	u_int memused;
1490 	u_int wastedspace;
1491 	u_int shsize;
1492 	u_int slabsize;
1493 
1494 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1495 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1496 
1497 		slabsize = UMA_PCPU_ALLOC_SIZE;
1498 		keg->uk_ppera = ncpus;
1499 	} else {
1500 		slabsize = UMA_SLAB_SIZE;
1501 		keg->uk_ppera = 1;
1502 	}
1503 
1504 	/*
1505 	 * Calculate the size of each allocation (rsize) according to
1506 	 * alignment.  If the requested size is smaller than we have
1507 	 * allocation bits for we round it up.
1508 	 */
1509 	rsize = keg->uk_size;
1510 	if (rsize < slabsize / SLAB_SETSIZE)
1511 		rsize = slabsize / SLAB_SETSIZE;
1512 	if (rsize & keg->uk_align)
1513 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1514 	keg->uk_rsize = rsize;
1515 
1516 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1517 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1518 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1519 
1520 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1521 		shsize = 0;
1522 	else
1523 		shsize = SIZEOF_UMA_SLAB;
1524 
1525 	if (rsize <= slabsize - shsize)
1526 		keg->uk_ipers = (slabsize - shsize) / rsize;
1527 	else {
1528 		/* Handle special case when we have 1 item per slab, so
1529 		 * alignment requirement can be relaxed. */
1530 		KASSERT(keg->uk_size <= slabsize - shsize,
1531 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1532 		keg->uk_ipers = 1;
1533 	}
1534 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1535 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1536 
1537 	memused = keg->uk_ipers * rsize + shsize;
1538 	wastedspace = slabsize - memused;
1539 
1540 	/*
1541 	 * We can't do OFFPAGE if we're internal or if we've been
1542 	 * asked to not go to the VM for buckets.  If we do this we
1543 	 * may end up going to the VM  for slabs which we do not
1544 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1545 	 * of UMA_ZONE_VM, which clearly forbids it.
1546 	 */
1547 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1548 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1549 		return;
1550 
1551 	/*
1552 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1553 	 * this if it permits more items per-slab.
1554 	 *
1555 	 * XXX We could try growing slabsize to limit max waste as well.
1556 	 * Historically this was not done because the VM could not
1557 	 * efficiently handle contiguous allocations.
1558 	 */
1559 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1560 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1561 		keg->uk_ipers = slabsize / keg->uk_rsize;
1562 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1563 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1564 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1565 		    "keg: %s(%p), calculated wastedspace = %d, "
1566 		    "maximum wasted space allowed = %d, "
1567 		    "calculated ipers = %d, "
1568 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1569 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1570 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1571 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1572 	}
1573 
1574 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1575 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1576 		keg->uk_flags |= UMA_ZONE_HASH;
1577 }
1578 
1579 /*
1580  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1581  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1582  * more complicated.
1583  *
1584  * Arguments
1585  *	keg  The keg we should initialize
1586  *
1587  * Returns
1588  *	Nothing
1589  */
1590 static void
1591 keg_large_init(uma_keg_t keg)
1592 {
1593 
1594 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1595 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1596 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1597 
1598 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1599 	keg->uk_ipers = 1;
1600 	keg->uk_rsize = keg->uk_size;
1601 
1602 	/* Check whether we have enough space to not do OFFPAGE. */
1603 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1604 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1605 		/*
1606 		 * We can't do OFFPAGE if we're internal, in which case
1607 		 * we need an extra page per allocation to contain the
1608 		 * slab header.
1609 		 */
1610 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1611 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1612 		else
1613 			keg->uk_ppera++;
1614 	}
1615 
1616 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1617 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1618 		keg->uk_flags |= UMA_ZONE_HASH;
1619 }
1620 
1621 static void
1622 keg_cachespread_init(uma_keg_t keg)
1623 {
1624 	int alignsize;
1625 	int trailer;
1626 	int pages;
1627 	int rsize;
1628 
1629 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1630 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1631 
1632 	alignsize = keg->uk_align + 1;
1633 	rsize = keg->uk_size;
1634 	/*
1635 	 * We want one item to start on every align boundary in a page.  To
1636 	 * do this we will span pages.  We will also extend the item by the
1637 	 * size of align if it is an even multiple of align.  Otherwise, it
1638 	 * would fall on the same boundary every time.
1639 	 */
1640 	if (rsize & keg->uk_align)
1641 		rsize = (rsize & ~keg->uk_align) + alignsize;
1642 	if ((rsize & alignsize) == 0)
1643 		rsize += alignsize;
1644 	trailer = rsize - keg->uk_size;
1645 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1646 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1647 	keg->uk_rsize = rsize;
1648 	keg->uk_ppera = pages;
1649 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1650 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1651 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1652 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1653 	    keg->uk_ipers));
1654 }
1655 
1656 /*
1657  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1658  * the keg onto the global keg list.
1659  *
1660  * Arguments/Returns follow uma_ctor specifications
1661  *	udata  Actually uma_kctor_args
1662  */
1663 static int
1664 keg_ctor(void *mem, int size, void *udata, int flags)
1665 {
1666 	struct uma_kctor_args *arg = udata;
1667 	uma_keg_t keg = mem;
1668 	uma_zone_t zone;
1669 
1670 	bzero(keg, size);
1671 	keg->uk_size = arg->size;
1672 	keg->uk_init = arg->uminit;
1673 	keg->uk_fini = arg->fini;
1674 	keg->uk_align = arg->align;
1675 	keg->uk_free = 0;
1676 	keg->uk_reserve = 0;
1677 	keg->uk_pages = 0;
1678 	keg->uk_flags = arg->flags;
1679 	keg->uk_slabzone = NULL;
1680 
1681 	/*
1682 	 * We use a global round-robin policy by default.  Zones with
1683 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1684 	 * iterator is never run.
1685 	 */
1686 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1687 	keg->uk_dr.dr_iter = 0;
1688 
1689 	/*
1690 	 * The master zone is passed to us at keg-creation time.
1691 	 */
1692 	zone = arg->zone;
1693 	keg->uk_name = zone->uz_name;
1694 
1695 	if (arg->flags & UMA_ZONE_VM)
1696 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1697 
1698 	if (arg->flags & UMA_ZONE_ZINIT)
1699 		keg->uk_init = zero_init;
1700 
1701 	if (arg->flags & UMA_ZONE_MALLOC)
1702 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1703 
1704 	if (arg->flags & UMA_ZONE_PCPU)
1705 #ifdef SMP
1706 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1707 #else
1708 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1709 #endif
1710 
1711 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1712 		keg_cachespread_init(keg);
1713 	} else {
1714 		if (keg->uk_size > UMA_SLAB_SPACE)
1715 			keg_large_init(keg);
1716 		else
1717 			keg_small_init(keg);
1718 	}
1719 
1720 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1721 		keg->uk_slabzone = slabzone;
1722 
1723 	/*
1724 	 * If we haven't booted yet we need allocations to go through the
1725 	 * startup cache until the vm is ready.
1726 	 */
1727 	if (booted < BOOT_PAGEALLOC)
1728 		keg->uk_allocf = startup_alloc;
1729 #ifdef UMA_MD_SMALL_ALLOC
1730 	else if (keg->uk_ppera == 1)
1731 		keg->uk_allocf = uma_small_alloc;
1732 #endif
1733 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1734 		keg->uk_allocf = pcpu_page_alloc;
1735 	else
1736 		keg->uk_allocf = page_alloc;
1737 #ifdef UMA_MD_SMALL_ALLOC
1738 	if (keg->uk_ppera == 1)
1739 		keg->uk_freef = uma_small_free;
1740 	else
1741 #endif
1742 	if (keg->uk_flags & UMA_ZONE_PCPU)
1743 		keg->uk_freef = pcpu_page_free;
1744 	else
1745 		keg->uk_freef = page_free;
1746 
1747 	/*
1748 	 * Initialize keg's lock
1749 	 */
1750 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1751 
1752 	/*
1753 	 * If we're putting the slab header in the actual page we need to
1754 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1755 	 * macro definition.
1756 	 */
1757 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1758 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1759 		/*
1760 		 * The only way the following is possible is if with our
1761 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1762 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1763 		 * mathematically possible for all cases, so we make
1764 		 * sure here anyway.
1765 		 */
1766 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1767 		    PAGE_SIZE * keg->uk_ppera,
1768 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1769 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1770 	}
1771 
1772 	if (keg->uk_flags & UMA_ZONE_HASH)
1773 		hash_alloc(&keg->uk_hash, 0);
1774 
1775 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1776 	    keg, zone->uz_name, zone,
1777 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1778 	    keg->uk_free);
1779 
1780 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1781 
1782 	rw_wlock(&uma_rwlock);
1783 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1784 	rw_wunlock(&uma_rwlock);
1785 	return (0);
1786 }
1787 
1788 static void
1789 zone_alloc_counters(uma_zone_t zone)
1790 {
1791 
1792 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1793 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1794 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1795 }
1796 
1797 /*
1798  * Zone header ctor.  This initializes all fields, locks, etc.
1799  *
1800  * Arguments/Returns follow uma_ctor specifications
1801  *	udata  Actually uma_zctor_args
1802  */
1803 static int
1804 zone_ctor(void *mem, int size, void *udata, int flags)
1805 {
1806 	struct uma_zctor_args *arg = udata;
1807 	uma_zone_t zone = mem;
1808 	uma_zone_t z;
1809 	uma_keg_t keg;
1810 	int i;
1811 
1812 	bzero(zone, size);
1813 	zone->uz_name = arg->name;
1814 	zone->uz_ctor = arg->ctor;
1815 	zone->uz_dtor = arg->dtor;
1816 	zone->uz_init = NULL;
1817 	zone->uz_fini = NULL;
1818 	zone->uz_sleeps = 0;
1819 	zone->uz_xdomain = 0;
1820 	zone->uz_count = 0;
1821 	zone->uz_count_min = 0;
1822 	zone->uz_count_max = BUCKET_MAX;
1823 	zone->uz_flags = 0;
1824 	zone->uz_warning = NULL;
1825 	/* The domain structures follow the cpu structures. */
1826 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1827 	zone->uz_bkt_max = ULONG_MAX;
1828 	timevalclear(&zone->uz_ratecheck);
1829 
1830 	if (__predict_true(booted == BOOT_RUNNING))
1831 		zone_alloc_counters(zone);
1832 	else {
1833 		zone->uz_allocs = EARLY_COUNTER;
1834 		zone->uz_frees = EARLY_COUNTER;
1835 		zone->uz_fails = EARLY_COUNTER;
1836 	}
1837 
1838 	for (i = 0; i < vm_ndomains; i++)
1839 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
1840 
1841 	/*
1842 	 * This is a pure cache zone, no kegs.
1843 	 */
1844 	if (arg->import) {
1845 		if (arg->flags & UMA_ZONE_VM)
1846 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1847 		zone->uz_flags = arg->flags;
1848 		zone->uz_size = arg->size;
1849 		zone->uz_import = arg->import;
1850 		zone->uz_release = arg->release;
1851 		zone->uz_arg = arg->arg;
1852 		zone->uz_lockptr = &zone->uz_lock;
1853 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1854 		rw_wlock(&uma_rwlock);
1855 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1856 		rw_wunlock(&uma_rwlock);
1857 		goto out;
1858 	}
1859 
1860 	/*
1861 	 * Use the regular zone/keg/slab allocator.
1862 	 */
1863 	zone->uz_import = (uma_import)zone_import;
1864 	zone->uz_release = (uma_release)zone_release;
1865 	zone->uz_arg = zone;
1866 	keg = arg->keg;
1867 
1868 	if (arg->flags & UMA_ZONE_SECONDARY) {
1869 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1870 		zone->uz_init = arg->uminit;
1871 		zone->uz_fini = arg->fini;
1872 		zone->uz_lockptr = &keg->uk_lock;
1873 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1874 		rw_wlock(&uma_rwlock);
1875 		ZONE_LOCK(zone);
1876 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1877 			if (LIST_NEXT(z, uz_link) == NULL) {
1878 				LIST_INSERT_AFTER(z, zone, uz_link);
1879 				break;
1880 			}
1881 		}
1882 		ZONE_UNLOCK(zone);
1883 		rw_wunlock(&uma_rwlock);
1884 	} else if (keg == NULL) {
1885 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1886 		    arg->align, arg->flags)) == NULL)
1887 			return (ENOMEM);
1888 	} else {
1889 		struct uma_kctor_args karg;
1890 		int error;
1891 
1892 		/* We should only be here from uma_startup() */
1893 		karg.size = arg->size;
1894 		karg.uminit = arg->uminit;
1895 		karg.fini = arg->fini;
1896 		karg.align = arg->align;
1897 		karg.flags = arg->flags;
1898 		karg.zone = zone;
1899 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1900 		    flags);
1901 		if (error)
1902 			return (error);
1903 	}
1904 
1905 	zone->uz_keg = keg;
1906 	zone->uz_size = keg->uk_size;
1907 	zone->uz_flags |= (keg->uk_flags &
1908 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1909 
1910 	/*
1911 	 * Some internal zones don't have room allocated for the per cpu
1912 	 * caches.  If we're internal, bail out here.
1913 	 */
1914 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1915 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1916 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1917 		return (0);
1918 	}
1919 
1920 out:
1921 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1922 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1923 	    ("Invalid zone flag combination"));
1924 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) {
1925 		zone->uz_count = BUCKET_MAX;
1926 	} else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0) {
1927 		zone->uz_count = BUCKET_MIN;
1928 		zone->uz_count_max = BUCKET_MIN;
1929 	} else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1930 		zone->uz_count = 0;
1931 	else
1932 		zone->uz_count = bucket_select(zone->uz_size);
1933 	zone->uz_count_min = zone->uz_count;
1934 
1935 	return (0);
1936 }
1937 
1938 /*
1939  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1940  * table and removes the keg from the global list.
1941  *
1942  * Arguments/Returns follow uma_dtor specifications
1943  *	udata  unused
1944  */
1945 static void
1946 keg_dtor(void *arg, int size, void *udata)
1947 {
1948 	uma_keg_t keg;
1949 
1950 	keg = (uma_keg_t)arg;
1951 	KEG_LOCK(keg);
1952 	if (keg->uk_free != 0) {
1953 		printf("Freed UMA keg (%s) was not empty (%d items). "
1954 		    " Lost %d pages of memory.\n",
1955 		    keg->uk_name ? keg->uk_name : "",
1956 		    keg->uk_free, keg->uk_pages);
1957 	}
1958 	KEG_UNLOCK(keg);
1959 
1960 	hash_free(&keg->uk_hash);
1961 
1962 	KEG_LOCK_FINI(keg);
1963 }
1964 
1965 /*
1966  * Zone header dtor.
1967  *
1968  * Arguments/Returns follow uma_dtor specifications
1969  *	udata  unused
1970  */
1971 static void
1972 zone_dtor(void *arg, int size, void *udata)
1973 {
1974 	uma_zone_t zone;
1975 	uma_keg_t keg;
1976 
1977 	zone = (uma_zone_t)arg;
1978 
1979 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1980 		cache_drain(zone);
1981 
1982 	rw_wlock(&uma_rwlock);
1983 	LIST_REMOVE(zone, uz_link);
1984 	rw_wunlock(&uma_rwlock);
1985 	/*
1986 	 * XXX there are some races here where
1987 	 * the zone can be drained but zone lock
1988 	 * released and then refilled before we
1989 	 * remove it... we dont care for now
1990 	 */
1991 	zone_reclaim(zone, M_WAITOK, true);
1992 	/*
1993 	 * We only destroy kegs from non secondary/non cache zones.
1994 	 */
1995 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
1996 		keg = zone->uz_keg;
1997 		rw_wlock(&uma_rwlock);
1998 		LIST_REMOVE(keg, uk_link);
1999 		rw_wunlock(&uma_rwlock);
2000 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2001 	}
2002 	counter_u64_free(zone->uz_allocs);
2003 	counter_u64_free(zone->uz_frees);
2004 	counter_u64_free(zone->uz_fails);
2005 	if (zone->uz_lockptr == &zone->uz_lock)
2006 		ZONE_LOCK_FINI(zone);
2007 }
2008 
2009 /*
2010  * Traverses every zone in the system and calls a callback
2011  *
2012  * Arguments:
2013  *	zfunc  A pointer to a function which accepts a zone
2014  *		as an argument.
2015  *
2016  * Returns:
2017  *	Nothing
2018  */
2019 static void
2020 zone_foreach(void (*zfunc)(uma_zone_t))
2021 {
2022 	uma_keg_t keg;
2023 	uma_zone_t zone;
2024 
2025 	/*
2026 	 * Before BOOT_RUNNING we are guaranteed to be single
2027 	 * threaded, so locking isn't needed. Startup functions
2028 	 * are allowed to use M_WAITOK.
2029 	 */
2030 	if (__predict_true(booted == BOOT_RUNNING))
2031 		rw_rlock(&uma_rwlock);
2032 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2033 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2034 			zfunc(zone);
2035 	}
2036 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2037 		zfunc(zone);
2038 	if (__predict_true(booted == BOOT_RUNNING))
2039 		rw_runlock(&uma_rwlock);
2040 }
2041 
2042 /*
2043  * Count how many pages do we need to bootstrap.  VM supplies
2044  * its need in early zones in the argument, we add up our zones,
2045  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
2046  * zone of zones and zone of kegs are accounted separately.
2047  */
2048 #define	UMA_BOOT_ZONES	11
2049 /* Zone of zones and zone of kegs have arbitrary alignment. */
2050 #define	UMA_BOOT_ALIGN	32
2051 static int zsize, ksize;
2052 int
2053 uma_startup_count(int vm_zones)
2054 {
2055 	int zones, pages;
2056 
2057 	ksize = sizeof(struct uma_keg) +
2058 	    (sizeof(struct uma_domain) * vm_ndomains);
2059 	zsize = sizeof(struct uma_zone) +
2060 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2061 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2062 
2063 	/*
2064 	 * Memory for the zone of kegs and its keg,
2065 	 * and for zone of zones.
2066 	 */
2067 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2068 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2069 
2070 #ifdef	UMA_MD_SMALL_ALLOC
2071 	zones = UMA_BOOT_ZONES;
2072 #else
2073 	zones = UMA_BOOT_ZONES + vm_zones;
2074 	vm_zones = 0;
2075 #endif
2076 
2077 	/* Memory for the rest of startup zones, UMA and VM, ... */
2078 	if (zsize > UMA_SLAB_SPACE) {
2079 		/* See keg_large_init(). */
2080 		u_int ppera;
2081 
2082 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2083 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2084 		    SIZEOF_UMA_SLAB)
2085 			ppera++;
2086 		pages += (zones + vm_zones) * ppera;
2087 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2088 		/* See keg_small_init() special case for uk_ppera = 1. */
2089 		pages += zones;
2090 	else
2091 		pages += howmany(zones,
2092 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2093 
2094 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2095 	pages += howmany(zones + 1,
2096 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2097 
2098 	/*
2099 	 * Most of startup zones are not going to be offpages, that's
2100 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2101 	 * calculations.  Some large bucket zones will be offpage, and
2102 	 * thus will allocate hashes.  We take conservative approach
2103 	 * and assume that all zones may allocate hash.  This may give
2104 	 * us some positive inaccuracy, usually an extra single page.
2105 	 */
2106 	pages += howmany(zones, UMA_SLAB_SPACE /
2107 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2108 
2109 	return (pages);
2110 }
2111 
2112 void
2113 uma_startup(void *mem, int npages)
2114 {
2115 	struct uma_zctor_args args;
2116 	uma_keg_t masterkeg;
2117 	uintptr_t m;
2118 
2119 #ifdef DIAGNOSTIC
2120 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2121 #endif
2122 
2123 	rw_init(&uma_rwlock, "UMA lock");
2124 
2125 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2126 	m = (uintptr_t)mem;
2127 	zones = (uma_zone_t)m;
2128 	m += roundup(zsize, CACHE_LINE_SIZE);
2129 	kegs = (uma_zone_t)m;
2130 	m += roundup(zsize, CACHE_LINE_SIZE);
2131 	masterkeg = (uma_keg_t)m;
2132 	m += roundup(ksize, CACHE_LINE_SIZE);
2133 	m = roundup(m, PAGE_SIZE);
2134 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2135 	mem = (void *)m;
2136 
2137 	/* "manually" create the initial zone */
2138 	memset(&args, 0, sizeof(args));
2139 	args.name = "UMA Kegs";
2140 	args.size = ksize;
2141 	args.ctor = keg_ctor;
2142 	args.dtor = keg_dtor;
2143 	args.uminit = zero_init;
2144 	args.fini = NULL;
2145 	args.keg = masterkeg;
2146 	args.align = UMA_BOOT_ALIGN - 1;
2147 	args.flags = UMA_ZFLAG_INTERNAL;
2148 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2149 
2150 	bootmem = mem;
2151 	boot_pages = npages;
2152 
2153 	args.name = "UMA Zones";
2154 	args.size = zsize;
2155 	args.ctor = zone_ctor;
2156 	args.dtor = zone_dtor;
2157 	args.uminit = zero_init;
2158 	args.fini = NULL;
2159 	args.keg = NULL;
2160 	args.align = UMA_BOOT_ALIGN - 1;
2161 	args.flags = UMA_ZFLAG_INTERNAL;
2162 	zone_ctor(zones, zsize, &args, M_WAITOK);
2163 
2164 	/* Now make a zone for slab headers */
2165 	slabzone = uma_zcreate("UMA Slabs",
2166 				sizeof(struct uma_slab),
2167 				NULL, NULL, NULL, NULL,
2168 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2169 
2170 	hashzone = uma_zcreate("UMA Hash",
2171 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2172 	    NULL, NULL, NULL, NULL,
2173 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2174 
2175 	bucket_init();
2176 
2177 	booted = BOOT_STRAPPED;
2178 }
2179 
2180 void
2181 uma_startup1(void)
2182 {
2183 
2184 #ifdef DIAGNOSTIC
2185 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2186 #endif
2187 	booted = BOOT_PAGEALLOC;
2188 }
2189 
2190 void
2191 uma_startup2(void)
2192 {
2193 
2194 #ifdef DIAGNOSTIC
2195 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2196 #endif
2197 	booted = BOOT_BUCKETS;
2198 	sx_init(&uma_reclaim_lock, "umareclaim");
2199 	bucket_enable();
2200 }
2201 
2202 /*
2203  * Initialize our callout handle
2204  *
2205  */
2206 static void
2207 uma_startup3(void)
2208 {
2209 
2210 #ifdef INVARIANTS
2211 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2212 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2213 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2214 #endif
2215 	zone_foreach(zone_alloc_counters);
2216 	callout_init(&uma_callout, 1);
2217 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2218 	booted = BOOT_RUNNING;
2219 }
2220 
2221 static uma_keg_t
2222 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2223 		int align, uint32_t flags)
2224 {
2225 	struct uma_kctor_args args;
2226 
2227 	args.size = size;
2228 	args.uminit = uminit;
2229 	args.fini = fini;
2230 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2231 	args.flags = flags;
2232 	args.zone = zone;
2233 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2234 }
2235 
2236 /* Public functions */
2237 /* See uma.h */
2238 void
2239 uma_set_align(int align)
2240 {
2241 
2242 	if (align != UMA_ALIGN_CACHE)
2243 		uma_align_cache = align;
2244 }
2245 
2246 /* See uma.h */
2247 uma_zone_t
2248 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2249 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2250 
2251 {
2252 	struct uma_zctor_args args;
2253 	uma_zone_t res;
2254 	bool locked;
2255 
2256 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2257 	    align, name));
2258 
2259 	/* Sets all zones to a first-touch domain policy. */
2260 #ifdef UMA_FIRSTTOUCH
2261 	flags |= UMA_ZONE_NUMA;
2262 #endif
2263 
2264 	/* This stuff is essential for the zone ctor */
2265 	memset(&args, 0, sizeof(args));
2266 	args.name = name;
2267 	args.size = size;
2268 	args.ctor = ctor;
2269 	args.dtor = dtor;
2270 	args.uminit = uminit;
2271 	args.fini = fini;
2272 #ifdef  INVARIANTS
2273 	/*
2274 	 * If a zone is being created with an empty constructor and
2275 	 * destructor, pass UMA constructor/destructor which checks for
2276 	 * memory use after free.
2277 	 */
2278 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2279 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2280 		args.ctor = trash_ctor;
2281 		args.dtor = trash_dtor;
2282 		args.uminit = trash_init;
2283 		args.fini = trash_fini;
2284 	}
2285 #endif
2286 	args.align = align;
2287 	args.flags = flags;
2288 	args.keg = NULL;
2289 
2290 	if (booted < BOOT_BUCKETS) {
2291 		locked = false;
2292 	} else {
2293 		sx_slock(&uma_reclaim_lock);
2294 		locked = true;
2295 	}
2296 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2297 	if (locked)
2298 		sx_sunlock(&uma_reclaim_lock);
2299 	return (res);
2300 }
2301 
2302 /* See uma.h */
2303 uma_zone_t
2304 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2305 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2306 {
2307 	struct uma_zctor_args args;
2308 	uma_keg_t keg;
2309 	uma_zone_t res;
2310 	bool locked;
2311 
2312 	keg = master->uz_keg;
2313 	memset(&args, 0, sizeof(args));
2314 	args.name = name;
2315 	args.size = keg->uk_size;
2316 	args.ctor = ctor;
2317 	args.dtor = dtor;
2318 	args.uminit = zinit;
2319 	args.fini = zfini;
2320 	args.align = keg->uk_align;
2321 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2322 	args.keg = keg;
2323 
2324 	if (booted < BOOT_BUCKETS) {
2325 		locked = false;
2326 	} else {
2327 		sx_slock(&uma_reclaim_lock);
2328 		locked = true;
2329 	}
2330 	/* XXX Attaches only one keg of potentially many. */
2331 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2332 	if (locked)
2333 		sx_sunlock(&uma_reclaim_lock);
2334 	return (res);
2335 }
2336 
2337 /* See uma.h */
2338 uma_zone_t
2339 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2340 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2341 		    uma_release zrelease, void *arg, int flags)
2342 {
2343 	struct uma_zctor_args args;
2344 
2345 	memset(&args, 0, sizeof(args));
2346 	args.name = name;
2347 	args.size = size;
2348 	args.ctor = ctor;
2349 	args.dtor = dtor;
2350 	args.uminit = zinit;
2351 	args.fini = zfini;
2352 	args.import = zimport;
2353 	args.release = zrelease;
2354 	args.arg = arg;
2355 	args.align = 0;
2356 	args.flags = flags | UMA_ZFLAG_CACHE;
2357 
2358 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2359 }
2360 
2361 /* See uma.h */
2362 void
2363 uma_zdestroy(uma_zone_t zone)
2364 {
2365 
2366 	sx_slock(&uma_reclaim_lock);
2367 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2368 	sx_sunlock(&uma_reclaim_lock);
2369 }
2370 
2371 void
2372 uma_zwait(uma_zone_t zone)
2373 {
2374 	void *item;
2375 
2376 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2377 	uma_zfree(zone, item);
2378 }
2379 
2380 void *
2381 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2382 {
2383 	void *item;
2384 #ifdef SMP
2385 	int i;
2386 
2387 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2388 #endif
2389 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2390 	if (item != NULL && (flags & M_ZERO)) {
2391 #ifdef SMP
2392 		for (i = 0; i <= mp_maxid; i++)
2393 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2394 #else
2395 		bzero(item, zone->uz_size);
2396 #endif
2397 	}
2398 	return (item);
2399 }
2400 
2401 /*
2402  * A stub while both regular and pcpu cases are identical.
2403  */
2404 void
2405 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2406 {
2407 
2408 #ifdef SMP
2409 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2410 #endif
2411 	uma_zfree_arg(zone, item, udata);
2412 }
2413 
2414 /* See uma.h */
2415 void *
2416 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2417 {
2418 	uma_zone_domain_t zdom;
2419 	uma_bucket_t bucket;
2420 	uma_cache_t cache;
2421 	void *item;
2422 	int cpu, domain, lockfail, maxbucket;
2423 #ifdef INVARIANTS
2424 	bool skipdbg;
2425 #endif
2426 
2427 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2428 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2429 
2430 	/* This is the fast path allocation */
2431 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2432 	    curthread, zone->uz_name, zone, flags);
2433 
2434 	if (flags & M_WAITOK) {
2435 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2436 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2437 	}
2438 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2439 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2440 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2441 	if (zone->uz_flags & UMA_ZONE_PCPU)
2442 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2443 		    "with M_ZERO passed"));
2444 
2445 #ifdef DEBUG_MEMGUARD
2446 	if (memguard_cmp_zone(zone)) {
2447 		item = memguard_alloc(zone->uz_size, flags);
2448 		if (item != NULL) {
2449 			if (zone->uz_init != NULL &&
2450 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2451 				return (NULL);
2452 			if (zone->uz_ctor != NULL &&
2453 			    zone->uz_ctor(item, zone->uz_size, udata,
2454 			    flags) != 0) {
2455 			    	zone->uz_fini(item, zone->uz_size);
2456 				return (NULL);
2457 			}
2458 			return (item);
2459 		}
2460 		/* This is unfortunate but should not be fatal. */
2461 	}
2462 #endif
2463 	/*
2464 	 * If possible, allocate from the per-CPU cache.  There are two
2465 	 * requirements for safe access to the per-CPU cache: (1) the thread
2466 	 * accessing the cache must not be preempted or yield during access,
2467 	 * and (2) the thread must not migrate CPUs without switching which
2468 	 * cache it accesses.  We rely on a critical section to prevent
2469 	 * preemption and migration.  We release the critical section in
2470 	 * order to acquire the zone mutex if we are unable to allocate from
2471 	 * the current cache; when we re-acquire the critical section, we
2472 	 * must detect and handle migration if it has occurred.
2473 	 */
2474 zalloc_restart:
2475 	critical_enter();
2476 	cpu = curcpu;
2477 	cache = &zone->uz_cpu[cpu];
2478 
2479 zalloc_start:
2480 	bucket = cache->uc_allocbucket;
2481 	if (bucket != NULL && bucket->ub_cnt > 0) {
2482 		bucket->ub_cnt--;
2483 		item = bucket->ub_bucket[bucket->ub_cnt];
2484 #ifdef INVARIANTS
2485 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2486 #endif
2487 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2488 		cache->uc_allocs++;
2489 		critical_exit();
2490 #ifdef INVARIANTS
2491 		skipdbg = uma_dbg_zskip(zone, item);
2492 #endif
2493 		if (zone->uz_ctor != NULL &&
2494 #ifdef INVARIANTS
2495 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2496 		    zone->uz_dtor != trash_dtor) &&
2497 #endif
2498 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2499 			counter_u64_add(zone->uz_fails, 1);
2500 			zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2501 			return (NULL);
2502 		}
2503 #ifdef INVARIANTS
2504 		if (!skipdbg)
2505 			uma_dbg_alloc(zone, NULL, item);
2506 #endif
2507 		if (flags & M_ZERO)
2508 			uma_zero_item(item, zone);
2509 		return (item);
2510 	}
2511 
2512 	/*
2513 	 * We have run out of items in our alloc bucket.
2514 	 * See if we can switch with our free bucket.
2515 	 */
2516 	bucket = cache->uc_freebucket;
2517 	if (bucket != NULL && bucket->ub_cnt > 0) {
2518 		CTR2(KTR_UMA,
2519 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2520 		    zone->uz_name, zone);
2521 		cache->uc_freebucket = cache->uc_allocbucket;
2522 		cache->uc_allocbucket = bucket;
2523 		goto zalloc_start;
2524 	}
2525 
2526 	/*
2527 	 * Discard any empty allocation bucket while we hold no locks.
2528 	 */
2529 	bucket = cache->uc_allocbucket;
2530 	cache->uc_allocbucket = NULL;
2531 	critical_exit();
2532 	if (bucket != NULL)
2533 		bucket_free(zone, bucket, udata);
2534 
2535 	/* Short-circuit for zones without buckets and low memory. */
2536 	if (zone->uz_count == 0 || bucketdisable) {
2537 		ZONE_LOCK(zone);
2538 		if (zone->uz_flags & UMA_ZONE_NUMA)
2539 			domain = PCPU_GET(domain);
2540 		else
2541 			domain = UMA_ANYDOMAIN;
2542 		goto zalloc_item;
2543 	}
2544 
2545 	/*
2546 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2547 	 * we must go back to the zone.  This requires the zone lock, so we
2548 	 * must drop the critical section, then re-acquire it when we go back
2549 	 * to the cache.  Since the critical section is released, we may be
2550 	 * preempted or migrate.  As such, make sure not to maintain any
2551 	 * thread-local state specific to the cache from prior to releasing
2552 	 * the critical section.
2553 	 */
2554 	lockfail = 0;
2555 	if (ZONE_TRYLOCK(zone) == 0) {
2556 		/* Record contention to size the buckets. */
2557 		ZONE_LOCK(zone);
2558 		lockfail = 1;
2559 	}
2560 	critical_enter();
2561 	cpu = curcpu;
2562 	cache = &zone->uz_cpu[cpu];
2563 
2564 	/* See if we lost the race to fill the cache. */
2565 	if (cache->uc_allocbucket != NULL) {
2566 		ZONE_UNLOCK(zone);
2567 		goto zalloc_start;
2568 	}
2569 
2570 	/*
2571 	 * Check the zone's cache of buckets.
2572 	 */
2573 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2574 		domain = PCPU_GET(domain);
2575 		zdom = &zone->uz_domain[domain];
2576 	} else {
2577 		domain = UMA_ANYDOMAIN;
2578 		zdom = &zone->uz_domain[0];
2579 	}
2580 
2581 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
2582 		KASSERT(bucket->ub_cnt != 0,
2583 		    ("uma_zalloc_arg: Returning an empty bucket."));
2584 		cache->uc_allocbucket = bucket;
2585 		ZONE_UNLOCK(zone);
2586 		goto zalloc_start;
2587 	}
2588 	/* We are no longer associated with this CPU. */
2589 	critical_exit();
2590 
2591 	/*
2592 	 * We bump the uz count when the cache size is insufficient to
2593 	 * handle the working set.
2594 	 */
2595 	if (lockfail && zone->uz_count < zone->uz_count_max)
2596 		zone->uz_count++;
2597 
2598 	if (zone->uz_max_items > 0) {
2599 		if (zone->uz_items >= zone->uz_max_items)
2600 			goto zalloc_item;
2601 		maxbucket = MIN(zone->uz_count,
2602 		    zone->uz_max_items - zone->uz_items);
2603 		zone->uz_items += maxbucket;
2604 	} else
2605 		maxbucket = zone->uz_count;
2606 	ZONE_UNLOCK(zone);
2607 
2608 	/*
2609 	 * Now lets just fill a bucket and put it on the free list.  If that
2610 	 * works we'll restart the allocation from the beginning and it
2611 	 * will use the just filled bucket.
2612 	 */
2613 	bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket);
2614 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2615 	    zone->uz_name, zone, bucket);
2616 	ZONE_LOCK(zone);
2617 	if (bucket != NULL) {
2618 		if (zone->uz_max_items > 0 && bucket->ub_cnt < maxbucket) {
2619 			MPASS(zone->uz_items >= maxbucket - bucket->ub_cnt);
2620 			zone->uz_items -= maxbucket - bucket->ub_cnt;
2621 			if (zone->uz_sleepers > 0 &&
2622 			    zone->uz_items < zone->uz_max_items)
2623 				wakeup_one(zone);
2624 		}
2625 		critical_enter();
2626 		cpu = curcpu;
2627 		cache = &zone->uz_cpu[cpu];
2628 
2629 		/*
2630 		 * See if we lost the race or were migrated.  Cache the
2631 		 * initialized bucket to make this less likely or claim
2632 		 * the memory directly.
2633 		 */
2634 		if (cache->uc_allocbucket == NULL &&
2635 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2636 		    domain == PCPU_GET(domain))) {
2637 			cache->uc_allocbucket = bucket;
2638 			zdom->uzd_imax += bucket->ub_cnt;
2639 		} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2640 			critical_exit();
2641 			ZONE_UNLOCK(zone);
2642 			bucket_drain(zone, bucket);
2643 			bucket_free(zone, bucket, udata);
2644 			goto zalloc_restart;
2645 		} else
2646 			zone_put_bucket(zone, zdom, bucket, false);
2647 		ZONE_UNLOCK(zone);
2648 		goto zalloc_start;
2649 	} else if (zone->uz_max_items > 0) {
2650 		zone->uz_items -= maxbucket;
2651 		if (zone->uz_sleepers > 0 &&
2652 		    zone->uz_items + 1 < zone->uz_max_items)
2653 			wakeup_one(zone);
2654 	}
2655 
2656 	/*
2657 	 * We may not be able to get a bucket so return an actual item.
2658 	 */
2659 zalloc_item:
2660 	item = zone_alloc_item_locked(zone, udata, domain, flags);
2661 
2662 	return (item);
2663 }
2664 
2665 void *
2666 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2667 {
2668 
2669 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2670 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2671 
2672 	/* This is the fast path allocation */
2673 	CTR5(KTR_UMA,
2674 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2675 	    curthread, zone->uz_name, zone, domain, flags);
2676 
2677 	if (flags & M_WAITOK) {
2678 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2679 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2680 	}
2681 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2682 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2683 
2684 	return (zone_alloc_item(zone, udata, domain, flags));
2685 }
2686 
2687 /*
2688  * Find a slab with some space.  Prefer slabs that are partially used over those
2689  * that are totally full.  This helps to reduce fragmentation.
2690  *
2691  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2692  * only 'domain'.
2693  */
2694 static uma_slab_t
2695 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2696 {
2697 	uma_domain_t dom;
2698 	uma_slab_t slab;
2699 	int start;
2700 
2701 	KASSERT(domain >= 0 && domain < vm_ndomains,
2702 	    ("keg_first_slab: domain %d out of range", domain));
2703 	KEG_LOCK_ASSERT(keg);
2704 
2705 	slab = NULL;
2706 	start = domain;
2707 	do {
2708 		dom = &keg->uk_domain[domain];
2709 		if (!LIST_EMPTY(&dom->ud_part_slab))
2710 			return (LIST_FIRST(&dom->ud_part_slab));
2711 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2712 			slab = LIST_FIRST(&dom->ud_free_slab);
2713 			LIST_REMOVE(slab, us_link);
2714 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2715 			return (slab);
2716 		}
2717 		if (rr)
2718 			domain = (domain + 1) % vm_ndomains;
2719 	} while (domain != start);
2720 
2721 	return (NULL);
2722 }
2723 
2724 static uma_slab_t
2725 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2726 {
2727 	uint32_t reserve;
2728 
2729 	KEG_LOCK_ASSERT(keg);
2730 
2731 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2732 	if (keg->uk_free <= reserve)
2733 		return (NULL);
2734 	return (keg_first_slab(keg, domain, rr));
2735 }
2736 
2737 static uma_slab_t
2738 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2739 {
2740 	struct vm_domainset_iter di;
2741 	uma_domain_t dom;
2742 	uma_slab_t slab;
2743 	int aflags, domain;
2744 	bool rr;
2745 
2746 restart:
2747 	KEG_LOCK_ASSERT(keg);
2748 
2749 	/*
2750 	 * Use the keg's policy if upper layers haven't already specified a
2751 	 * domain (as happens with first-touch zones).
2752 	 *
2753 	 * To avoid races we run the iterator with the keg lock held, but that
2754 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2755 	 * clear M_WAITOK and handle low memory conditions locally.
2756 	 */
2757 	rr = rdomain == UMA_ANYDOMAIN;
2758 	if (rr) {
2759 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2760 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2761 		    &aflags);
2762 	} else {
2763 		aflags = flags;
2764 		domain = rdomain;
2765 	}
2766 
2767 	for (;;) {
2768 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2769 		if (slab != NULL) {
2770 			MPASS(slab->us_keg == keg);
2771 			return (slab);
2772 		}
2773 
2774 		/*
2775 		 * M_NOVM means don't ask at all!
2776 		 */
2777 		if (flags & M_NOVM)
2778 			break;
2779 
2780 		KASSERT(zone->uz_max_items == 0 ||
2781 		    zone->uz_items <= zone->uz_max_items,
2782 		    ("%s: zone %p overflow", __func__, zone));
2783 
2784 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
2785 		/*
2786 		 * If we got a slab here it's safe to mark it partially used
2787 		 * and return.  We assume that the caller is going to remove
2788 		 * at least one item.
2789 		 */
2790 		if (slab) {
2791 			MPASS(slab->us_keg == keg);
2792 			dom = &keg->uk_domain[slab->us_domain];
2793 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2794 			return (slab);
2795 		}
2796 		KEG_LOCK(keg);
2797 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2798 			if ((flags & M_WAITOK) != 0) {
2799 				KEG_UNLOCK(keg);
2800 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2801 				KEG_LOCK(keg);
2802 				goto restart;
2803 			}
2804 			break;
2805 		}
2806 	}
2807 
2808 	/*
2809 	 * We might not have been able to get a slab but another cpu
2810 	 * could have while we were unlocked.  Check again before we
2811 	 * fail.
2812 	 */
2813 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2814 		MPASS(slab->us_keg == keg);
2815 		return (slab);
2816 	}
2817 	return (NULL);
2818 }
2819 
2820 static uma_slab_t
2821 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2822 {
2823 	uma_slab_t slab;
2824 
2825 	if (keg == NULL) {
2826 		keg = zone->uz_keg;
2827 		KEG_LOCK(keg);
2828 	}
2829 
2830 	for (;;) {
2831 		slab = keg_fetch_slab(keg, zone, domain, flags);
2832 		if (slab)
2833 			return (slab);
2834 		if (flags & (M_NOWAIT | M_NOVM))
2835 			break;
2836 	}
2837 	KEG_UNLOCK(keg);
2838 	return (NULL);
2839 }
2840 
2841 static void *
2842 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2843 {
2844 	uma_domain_t dom;
2845 	void *item;
2846 	uint8_t freei;
2847 
2848 	MPASS(keg == slab->us_keg);
2849 	KEG_LOCK_ASSERT(keg);
2850 
2851 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2852 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2853 	item = slab->us_data + (keg->uk_rsize * freei);
2854 	slab->us_freecount--;
2855 	keg->uk_free--;
2856 
2857 	/* Move this slab to the full list */
2858 	if (slab->us_freecount == 0) {
2859 		LIST_REMOVE(slab, us_link);
2860 		dom = &keg->uk_domain[slab->us_domain];
2861 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2862 	}
2863 
2864 	return (item);
2865 }
2866 
2867 static int
2868 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2869 {
2870 	uma_slab_t slab;
2871 	uma_keg_t keg;
2872 #ifdef NUMA
2873 	int stripe;
2874 #endif
2875 	int i;
2876 
2877 	slab = NULL;
2878 	keg = NULL;
2879 	/* Try to keep the buckets totally full */
2880 	for (i = 0; i < max; ) {
2881 		if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
2882 			break;
2883 		keg = slab->us_keg;
2884 #ifdef NUMA
2885 		stripe = howmany(max, vm_ndomains);
2886 #endif
2887 		while (slab->us_freecount && i < max) {
2888 			bucket[i++] = slab_alloc_item(keg, slab);
2889 			if (keg->uk_free <= keg->uk_reserve)
2890 				break;
2891 #ifdef NUMA
2892 			/*
2893 			 * If the zone is striped we pick a new slab for every
2894 			 * N allocations.  Eliminating this conditional will
2895 			 * instead pick a new domain for each bucket rather
2896 			 * than stripe within each bucket.  The current option
2897 			 * produces more fragmentation and requires more cpu
2898 			 * time but yields better distribution.
2899 			 */
2900 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2901 			    vm_ndomains > 1 && --stripe == 0)
2902 				break;
2903 #endif
2904 		}
2905 		/* Don't block if we allocated any successfully. */
2906 		flags &= ~M_WAITOK;
2907 		flags |= M_NOWAIT;
2908 	}
2909 	if (slab != NULL)
2910 		KEG_UNLOCK(keg);
2911 
2912 	return i;
2913 }
2914 
2915 static uma_bucket_t
2916 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max)
2917 {
2918 	uma_bucket_t bucket;
2919 
2920 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
2921 
2922 	/* Avoid allocs targeting empty domains. */
2923 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
2924 		domain = UMA_ANYDOMAIN;
2925 
2926 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2927 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2928 	if (bucket == NULL)
2929 		return (NULL);
2930 
2931 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2932 	    MIN(max, bucket->ub_entries), domain, flags);
2933 
2934 	/*
2935 	 * Initialize the memory if necessary.
2936 	 */
2937 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2938 		int i;
2939 
2940 		for (i = 0; i < bucket->ub_cnt; i++)
2941 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2942 			    flags) != 0)
2943 				break;
2944 		/*
2945 		 * If we couldn't initialize the whole bucket, put the
2946 		 * rest back onto the freelist.
2947 		 */
2948 		if (i != bucket->ub_cnt) {
2949 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2950 			    bucket->ub_cnt - i);
2951 #ifdef INVARIANTS
2952 			bzero(&bucket->ub_bucket[i],
2953 			    sizeof(void *) * (bucket->ub_cnt - i));
2954 #endif
2955 			bucket->ub_cnt = i;
2956 		}
2957 	}
2958 
2959 	if (bucket->ub_cnt == 0) {
2960 		bucket_free(zone, bucket, udata);
2961 		counter_u64_add(zone->uz_fails, 1);
2962 		return (NULL);
2963 	}
2964 
2965 	return (bucket);
2966 }
2967 
2968 /*
2969  * Allocates a single item from a zone.
2970  *
2971  * Arguments
2972  *	zone   The zone to alloc for.
2973  *	udata  The data to be passed to the constructor.
2974  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2975  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2976  *
2977  * Returns
2978  *	NULL if there is no memory and M_NOWAIT is set
2979  *	An item if successful
2980  */
2981 
2982 static void *
2983 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2984 {
2985 
2986 	ZONE_LOCK(zone);
2987 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2988 }
2989 
2990 /*
2991  * Returns with zone unlocked.
2992  */
2993 static void *
2994 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
2995 {
2996 	void *item;
2997 #ifdef INVARIANTS
2998 	bool skipdbg;
2999 #endif
3000 
3001 	ZONE_LOCK_ASSERT(zone);
3002 
3003 	if (zone->uz_max_items > 0) {
3004 		if (zone->uz_items >= zone->uz_max_items) {
3005 			zone_log_warning(zone);
3006 			zone_maxaction(zone);
3007 			if (flags & M_NOWAIT) {
3008 				ZONE_UNLOCK(zone);
3009 				return (NULL);
3010 			}
3011 			zone->uz_sleeps++;
3012 			zone->uz_sleepers++;
3013 			while (zone->uz_items >= zone->uz_max_items)
3014 				mtx_sleep(zone, zone->uz_lockptr, PVM,
3015 				    "zonelimit", 0);
3016 			zone->uz_sleepers--;
3017 			if (zone->uz_sleepers > 0 &&
3018 			    zone->uz_items + 1 < zone->uz_max_items)
3019 				wakeup_one(zone);
3020 		}
3021 		zone->uz_items++;
3022 	}
3023 	ZONE_UNLOCK(zone);
3024 
3025 	/* Avoid allocs targeting empty domains. */
3026 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3027 		domain = UMA_ANYDOMAIN;
3028 
3029 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3030 		goto fail;
3031 
3032 #ifdef INVARIANTS
3033 	skipdbg = uma_dbg_zskip(zone, item);
3034 #endif
3035 	/*
3036 	 * We have to call both the zone's init (not the keg's init)
3037 	 * and the zone's ctor.  This is because the item is going from
3038 	 * a keg slab directly to the user, and the user is expecting it
3039 	 * to be both zone-init'd as well as zone-ctor'd.
3040 	 */
3041 	if (zone->uz_init != NULL) {
3042 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3043 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3044 			goto fail;
3045 		}
3046 	}
3047 	if (zone->uz_ctor != NULL &&
3048 #ifdef INVARIANTS
3049 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
3050 	    zone->uz_dtor != trash_dtor) &&
3051 #endif
3052 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
3053 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
3054 		goto fail;
3055 	}
3056 #ifdef INVARIANTS
3057 	if (!skipdbg)
3058 		uma_dbg_alloc(zone, NULL, item);
3059 #endif
3060 	if (flags & M_ZERO)
3061 		uma_zero_item(item, zone);
3062 
3063 	counter_u64_add(zone->uz_allocs, 1);
3064 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3065 	    zone->uz_name, zone);
3066 
3067 	return (item);
3068 
3069 fail:
3070 	if (zone->uz_max_items > 0) {
3071 		ZONE_LOCK(zone);
3072 		zone->uz_items--;
3073 		ZONE_UNLOCK(zone);
3074 	}
3075 	counter_u64_add(zone->uz_fails, 1);
3076 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3077 	    zone->uz_name, zone);
3078 	return (NULL);
3079 }
3080 
3081 /* See uma.h */
3082 void
3083 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3084 {
3085 	uma_cache_t cache;
3086 	uma_bucket_t bucket;
3087 	uma_zone_domain_t zdom;
3088 	int cpu, domain;
3089 #ifdef UMA_XDOMAIN
3090 	int itemdomain;
3091 #endif
3092 	bool lockfail;
3093 #ifdef INVARIANTS
3094 	bool skipdbg;
3095 #endif
3096 
3097 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3098 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3099 
3100 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3101 	    zone->uz_name);
3102 
3103 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3104 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3105 
3106         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3107         if (item == NULL)
3108                 return;
3109 #ifdef DEBUG_MEMGUARD
3110 	if (is_memguard_addr(item)) {
3111 		if (zone->uz_dtor != NULL)
3112 			zone->uz_dtor(item, zone->uz_size, udata);
3113 		if (zone->uz_fini != NULL)
3114 			zone->uz_fini(item, zone->uz_size);
3115 		memguard_free(item);
3116 		return;
3117 	}
3118 #endif
3119 #ifdef INVARIANTS
3120 	skipdbg = uma_dbg_zskip(zone, item);
3121 	if (skipdbg == false) {
3122 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3123 			uma_dbg_free(zone, udata, item);
3124 		else
3125 			uma_dbg_free(zone, NULL, item);
3126 	}
3127 	if (zone->uz_dtor != NULL && (!skipdbg ||
3128 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3129 #else
3130 	if (zone->uz_dtor != NULL)
3131 #endif
3132 		zone->uz_dtor(item, zone->uz_size, udata);
3133 
3134 	/*
3135 	 * The race here is acceptable.  If we miss it we'll just have to wait
3136 	 * a little longer for the limits to be reset.
3137 	 */
3138 	if (zone->uz_sleepers > 0)
3139 		goto zfree_item;
3140 
3141 #ifdef UMA_XDOMAIN
3142 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3143 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3144 #endif
3145 
3146 	/*
3147 	 * If possible, free to the per-CPU cache.  There are two
3148 	 * requirements for safe access to the per-CPU cache: (1) the thread
3149 	 * accessing the cache must not be preempted or yield during access,
3150 	 * and (2) the thread must not migrate CPUs without switching which
3151 	 * cache it accesses.  We rely on a critical section to prevent
3152 	 * preemption and migration.  We release the critical section in
3153 	 * order to acquire the zone mutex if we are unable to free to the
3154 	 * current cache; when we re-acquire the critical section, we must
3155 	 * detect and handle migration if it has occurred.
3156 	 */
3157 zfree_restart:
3158 	critical_enter();
3159 	cpu = curcpu;
3160 	cache = &zone->uz_cpu[cpu];
3161 
3162 zfree_start:
3163 	domain = PCPU_GET(domain);
3164 #ifdef UMA_XDOMAIN
3165 	if ((zone->uz_flags & UMA_ZONE_NUMA) == 0)
3166 		itemdomain = domain;
3167 #endif
3168 	/*
3169 	 * Try to free into the allocbucket first to give LIFO ordering
3170 	 * for cache-hot datastructures.  Spill over into the freebucket
3171 	 * if necessary.  Alloc will swap them if one runs dry.
3172 	 */
3173 #ifdef UMA_XDOMAIN
3174 	if (domain != itemdomain) {
3175 		bucket = cache->uc_crossbucket;
3176 	} else
3177 #endif
3178 	{
3179 		bucket = cache->uc_allocbucket;
3180 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3181 			bucket = cache->uc_freebucket;
3182 	}
3183 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3184 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3185 		    ("uma_zfree: Freeing to non free bucket index."));
3186 		bucket->ub_bucket[bucket->ub_cnt] = item;
3187 		bucket->ub_cnt++;
3188 		cache->uc_frees++;
3189 		critical_exit();
3190 		return;
3191 	}
3192 
3193 	/*
3194 	 * We must go back the zone, which requires acquiring the zone lock,
3195 	 * which in turn means we must release and re-acquire the critical
3196 	 * section.  Since the critical section is released, we may be
3197 	 * preempted or migrate.  As such, make sure not to maintain any
3198 	 * thread-local state specific to the cache from prior to releasing
3199 	 * the critical section.
3200 	 */
3201 	critical_exit();
3202 	if (zone->uz_count == 0 || bucketdisable)
3203 		goto zfree_item;
3204 
3205 	lockfail = false;
3206 	if (ZONE_TRYLOCK(zone) == 0) {
3207 		/* Record contention to size the buckets. */
3208 		ZONE_LOCK(zone);
3209 		lockfail = true;
3210 	}
3211 	critical_enter();
3212 	cpu = curcpu;
3213 	domain = PCPU_GET(domain);
3214 	cache = &zone->uz_cpu[cpu];
3215 
3216 #ifdef UMA_XDOMAIN
3217 	if (domain != itemdomain)
3218 		bucket = cache->uc_crossbucket;
3219 	else
3220 #endif
3221 		bucket = cache->uc_freebucket;
3222 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3223 		ZONE_UNLOCK(zone);
3224 		goto zfree_start;
3225 	}
3226 #ifdef UMA_XDOMAIN
3227 	if (domain != itemdomain)
3228 		cache->uc_crossbucket = NULL;
3229 	else
3230 #endif
3231 		cache->uc_freebucket = NULL;
3232 	/* We are no longer associated with this CPU. */
3233 	critical_exit();
3234 
3235 #ifdef UMA_XDOMAIN
3236 	if (domain != itemdomain) {
3237 		if (bucket != NULL) {
3238 			zone->uz_xdomain += bucket->ub_cnt;
3239 			if (vm_ndomains > 2 ||
3240 			    zone->uz_bkt_count >= zone->uz_bkt_max) {
3241 				ZONE_UNLOCK(zone);
3242 				bucket_drain(zone, bucket);
3243 				bucket_free(zone, bucket, udata);
3244 			} else {
3245 				zdom = &zone->uz_domain[itemdomain];
3246 				zone_put_bucket(zone, zdom, bucket, true);
3247 				ZONE_UNLOCK(zone);
3248 			}
3249 		} else
3250 			ZONE_UNLOCK(zone);
3251 		bucket = bucket_alloc(zone, udata, M_NOWAIT);
3252 		if (bucket == NULL)
3253 			goto zfree_item;
3254 		critical_enter();
3255 		cpu = curcpu;
3256 		cache = &zone->uz_cpu[cpu];
3257 		if (cache->uc_crossbucket == NULL) {
3258 			cache->uc_crossbucket = bucket;
3259 			goto zfree_start;
3260 		}
3261 		critical_exit();
3262 		bucket_free(zone, bucket, udata);
3263 		goto zfree_restart;
3264 	}
3265 #endif
3266 
3267 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3268 		zdom = &zone->uz_domain[domain];
3269 	} else {
3270 		domain = 0;
3271 		zdom = &zone->uz_domain[0];
3272 	}
3273 
3274 	/* Can we throw this on the zone full list? */
3275 	if (bucket != NULL) {
3276 		CTR3(KTR_UMA,
3277 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3278 		    zone->uz_name, zone, bucket);
3279 		/* ub_cnt is pointing to the last free item */
3280 		KASSERT(bucket->ub_cnt == bucket->ub_entries,
3281 		    ("uma_zfree: Attempting to insert not full bucket onto the full list.\n"));
3282 		if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3283 			ZONE_UNLOCK(zone);
3284 			bucket_drain(zone, bucket);
3285 			bucket_free(zone, bucket, udata);
3286 			goto zfree_restart;
3287 		} else
3288 			zone_put_bucket(zone, zdom, bucket, true);
3289 	}
3290 
3291 	/*
3292 	 * We bump the uz count when the cache size is insufficient to
3293 	 * handle the working set.
3294 	 */
3295 	if (lockfail && zone->uz_count < zone->uz_count_max)
3296 		zone->uz_count++;
3297 	ZONE_UNLOCK(zone);
3298 
3299 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3300 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3301 	    zone->uz_name, zone, bucket);
3302 	if (bucket) {
3303 		critical_enter();
3304 		cpu = curcpu;
3305 		cache = &zone->uz_cpu[cpu];
3306 		if (cache->uc_freebucket == NULL &&
3307 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3308 		    domain == PCPU_GET(domain))) {
3309 			cache->uc_freebucket = bucket;
3310 			goto zfree_start;
3311 		}
3312 		/*
3313 		 * We lost the race, start over.  We have to drop our
3314 		 * critical section to free the bucket.
3315 		 */
3316 		critical_exit();
3317 		bucket_free(zone, bucket, udata);
3318 		goto zfree_restart;
3319 	}
3320 
3321 	/*
3322 	 * If nothing else caught this, we'll just do an internal free.
3323 	 */
3324 zfree_item:
3325 	zone_free_item(zone, item, udata, SKIP_DTOR);
3326 }
3327 
3328 void
3329 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3330 {
3331 
3332 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3333 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3334 
3335 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3336 	    zone->uz_name);
3337 
3338 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3339 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3340 
3341         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3342         if (item == NULL)
3343                 return;
3344 	zone_free_item(zone, item, udata, SKIP_NONE);
3345 }
3346 
3347 static void
3348 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3349 {
3350 	uma_keg_t keg;
3351 	uma_domain_t dom;
3352 	uint8_t freei;
3353 
3354 	keg = zone->uz_keg;
3355 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3356 	KEG_LOCK_ASSERT(keg);
3357 	MPASS(keg == slab->us_keg);
3358 
3359 	dom = &keg->uk_domain[slab->us_domain];
3360 
3361 	/* Do we need to remove from any lists? */
3362 	if (slab->us_freecount+1 == keg->uk_ipers) {
3363 		LIST_REMOVE(slab, us_link);
3364 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3365 	} else if (slab->us_freecount == 0) {
3366 		LIST_REMOVE(slab, us_link);
3367 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3368 	}
3369 
3370 	/* Slab management. */
3371 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3372 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3373 	slab->us_freecount++;
3374 
3375 	/* Keg statistics. */
3376 	keg->uk_free++;
3377 }
3378 
3379 static void
3380 zone_release(uma_zone_t zone, void **bucket, int cnt)
3381 {
3382 	void *item;
3383 	uma_slab_t slab;
3384 	uma_keg_t keg;
3385 	uint8_t *mem;
3386 	int i;
3387 
3388 	keg = zone->uz_keg;
3389 	KEG_LOCK(keg);
3390 	for (i = 0; i < cnt; i++) {
3391 		item = bucket[i];
3392 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3393 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3394 			if (zone->uz_flags & UMA_ZONE_HASH) {
3395 				slab = hash_sfind(&keg->uk_hash, mem);
3396 			} else {
3397 				mem += keg->uk_pgoff;
3398 				slab = (uma_slab_t)mem;
3399 			}
3400 		} else {
3401 			slab = vtoslab((vm_offset_t)item);
3402 			MPASS(slab->us_keg == keg);
3403 		}
3404 		slab_free_item(zone, slab, item);
3405 	}
3406 	KEG_UNLOCK(keg);
3407 }
3408 
3409 /*
3410  * Frees a single item to any zone.
3411  *
3412  * Arguments:
3413  *	zone   The zone to free to
3414  *	item   The item we're freeing
3415  *	udata  User supplied data for the dtor
3416  *	skip   Skip dtors and finis
3417  */
3418 static void
3419 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3420 {
3421 #ifdef INVARIANTS
3422 	bool skipdbg;
3423 
3424 	skipdbg = uma_dbg_zskip(zone, item);
3425 	if (skip == SKIP_NONE && !skipdbg) {
3426 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3427 			uma_dbg_free(zone, udata, item);
3428 		else
3429 			uma_dbg_free(zone, NULL, item);
3430 	}
3431 
3432 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3433 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3434 	    zone->uz_ctor != trash_ctor))
3435 #else
3436 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3437 #endif
3438 		zone->uz_dtor(item, zone->uz_size, udata);
3439 
3440 	if (skip < SKIP_FINI && zone->uz_fini)
3441 		zone->uz_fini(item, zone->uz_size);
3442 
3443 	zone->uz_release(zone->uz_arg, &item, 1);
3444 
3445 	if (skip & SKIP_CNT)
3446 		return;
3447 
3448 	counter_u64_add(zone->uz_frees, 1);
3449 
3450 	if (zone->uz_max_items > 0) {
3451 		ZONE_LOCK(zone);
3452 		zone->uz_items--;
3453 		if (zone->uz_sleepers > 0 &&
3454 		    zone->uz_items < zone->uz_max_items)
3455 			wakeup_one(zone);
3456 		ZONE_UNLOCK(zone);
3457 	}
3458 }
3459 
3460 /* See uma.h */
3461 int
3462 uma_zone_set_max(uma_zone_t zone, int nitems)
3463 {
3464 	struct uma_bucket_zone *ubz;
3465 
3466 	/*
3467 	 * If limit is very low we may need to limit how
3468 	 * much items are allowed in CPU caches.
3469 	 */
3470 	ubz = &bucket_zones[0];
3471 	for (; ubz->ubz_entries != 0; ubz++)
3472 		if (ubz->ubz_entries * 2 * mp_ncpus > nitems)
3473 			break;
3474 	if (ubz == &bucket_zones[0])
3475 		nitems = ubz->ubz_entries * 2 * mp_ncpus;
3476 	else
3477 		ubz--;
3478 
3479 	ZONE_LOCK(zone);
3480 	zone->uz_count_max = zone->uz_count = ubz->ubz_entries;
3481 	if (zone->uz_count_min > zone->uz_count_max)
3482 		zone->uz_count_min = zone->uz_count_max;
3483 	zone->uz_max_items = nitems;
3484 	ZONE_UNLOCK(zone);
3485 
3486 	return (nitems);
3487 }
3488 
3489 /* See uma.h */
3490 int
3491 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3492 {
3493 
3494 	ZONE_LOCK(zone);
3495 	zone->uz_bkt_max = nitems;
3496 	ZONE_UNLOCK(zone);
3497 
3498 	return (nitems);
3499 }
3500 
3501 /* See uma.h */
3502 int
3503 uma_zone_get_max(uma_zone_t zone)
3504 {
3505 	int nitems;
3506 
3507 	ZONE_LOCK(zone);
3508 	nitems = zone->uz_max_items;
3509 	ZONE_UNLOCK(zone);
3510 
3511 	return (nitems);
3512 }
3513 
3514 /* See uma.h */
3515 void
3516 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3517 {
3518 
3519 	ZONE_LOCK(zone);
3520 	zone->uz_warning = warning;
3521 	ZONE_UNLOCK(zone);
3522 }
3523 
3524 /* See uma.h */
3525 void
3526 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3527 {
3528 
3529 	ZONE_LOCK(zone);
3530 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3531 	ZONE_UNLOCK(zone);
3532 }
3533 
3534 /* See uma.h */
3535 int
3536 uma_zone_get_cur(uma_zone_t zone)
3537 {
3538 	int64_t nitems;
3539 	u_int i;
3540 
3541 	ZONE_LOCK(zone);
3542 	nitems = counter_u64_fetch(zone->uz_allocs) -
3543 	    counter_u64_fetch(zone->uz_frees);
3544 	CPU_FOREACH(i) {
3545 		/*
3546 		 * See the comment in uma_vm_zone_stats() regarding the
3547 		 * safety of accessing the per-cpu caches. With the zone lock
3548 		 * held, it is safe, but can potentially result in stale data.
3549 		 */
3550 		nitems += zone->uz_cpu[i].uc_allocs -
3551 		    zone->uz_cpu[i].uc_frees;
3552 	}
3553 	ZONE_UNLOCK(zone);
3554 
3555 	return (nitems < 0 ? 0 : nitems);
3556 }
3557 
3558 /* See uma.h */
3559 void
3560 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3561 {
3562 	uma_keg_t keg;
3563 
3564 	KEG_GET(zone, keg);
3565 	KEG_LOCK(keg);
3566 	KASSERT(keg->uk_pages == 0,
3567 	    ("uma_zone_set_init on non-empty keg"));
3568 	keg->uk_init = uminit;
3569 	KEG_UNLOCK(keg);
3570 }
3571 
3572 /* See uma.h */
3573 void
3574 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3575 {
3576 	uma_keg_t keg;
3577 
3578 	KEG_GET(zone, keg);
3579 	KEG_LOCK(keg);
3580 	KASSERT(keg->uk_pages == 0,
3581 	    ("uma_zone_set_fini on non-empty keg"));
3582 	keg->uk_fini = fini;
3583 	KEG_UNLOCK(keg);
3584 }
3585 
3586 /* See uma.h */
3587 void
3588 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3589 {
3590 
3591 	ZONE_LOCK(zone);
3592 	KASSERT(zone->uz_keg->uk_pages == 0,
3593 	    ("uma_zone_set_zinit on non-empty keg"));
3594 	zone->uz_init = zinit;
3595 	ZONE_UNLOCK(zone);
3596 }
3597 
3598 /* See uma.h */
3599 void
3600 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3601 {
3602 
3603 	ZONE_LOCK(zone);
3604 	KASSERT(zone->uz_keg->uk_pages == 0,
3605 	    ("uma_zone_set_zfini on non-empty keg"));
3606 	zone->uz_fini = zfini;
3607 	ZONE_UNLOCK(zone);
3608 }
3609 
3610 /* See uma.h */
3611 /* XXX uk_freef is not actually used with the zone locked */
3612 void
3613 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3614 {
3615 	uma_keg_t keg;
3616 
3617 	KEG_GET(zone, keg);
3618 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3619 	KEG_LOCK(keg);
3620 	keg->uk_freef = freef;
3621 	KEG_UNLOCK(keg);
3622 }
3623 
3624 /* See uma.h */
3625 /* XXX uk_allocf is not actually used with the zone locked */
3626 void
3627 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3628 {
3629 	uma_keg_t keg;
3630 
3631 	KEG_GET(zone, keg);
3632 	KEG_LOCK(keg);
3633 	keg->uk_allocf = allocf;
3634 	KEG_UNLOCK(keg);
3635 }
3636 
3637 /* See uma.h */
3638 void
3639 uma_zone_reserve(uma_zone_t zone, int items)
3640 {
3641 	uma_keg_t keg;
3642 
3643 	KEG_GET(zone, keg);
3644 	KEG_LOCK(keg);
3645 	keg->uk_reserve = items;
3646 	KEG_UNLOCK(keg);
3647 }
3648 
3649 /* See uma.h */
3650 int
3651 uma_zone_reserve_kva(uma_zone_t zone, int count)
3652 {
3653 	uma_keg_t keg;
3654 	vm_offset_t kva;
3655 	u_int pages;
3656 
3657 	KEG_GET(zone, keg);
3658 
3659 	pages = count / keg->uk_ipers;
3660 	if (pages * keg->uk_ipers < count)
3661 		pages++;
3662 	pages *= keg->uk_ppera;
3663 
3664 #ifdef UMA_MD_SMALL_ALLOC
3665 	if (keg->uk_ppera > 1) {
3666 #else
3667 	if (1) {
3668 #endif
3669 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3670 		if (kva == 0)
3671 			return (0);
3672 	} else
3673 		kva = 0;
3674 
3675 	ZONE_LOCK(zone);
3676 	MPASS(keg->uk_kva == 0);
3677 	keg->uk_kva = kva;
3678 	keg->uk_offset = 0;
3679 	zone->uz_max_items = pages * keg->uk_ipers;
3680 #ifdef UMA_MD_SMALL_ALLOC
3681 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3682 #else
3683 	keg->uk_allocf = noobj_alloc;
3684 #endif
3685 	keg->uk_flags |= UMA_ZONE_NOFREE;
3686 	ZONE_UNLOCK(zone);
3687 
3688 	return (1);
3689 }
3690 
3691 /* See uma.h */
3692 void
3693 uma_prealloc(uma_zone_t zone, int items)
3694 {
3695 	struct vm_domainset_iter di;
3696 	uma_domain_t dom;
3697 	uma_slab_t slab;
3698 	uma_keg_t keg;
3699 	int aflags, domain, slabs;
3700 
3701 	KEG_GET(zone, keg);
3702 	KEG_LOCK(keg);
3703 	slabs = items / keg->uk_ipers;
3704 	if (slabs * keg->uk_ipers < items)
3705 		slabs++;
3706 	while (slabs-- > 0) {
3707 		aflags = M_NOWAIT;
3708 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3709 		    &aflags);
3710 		for (;;) {
3711 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3712 			    aflags);
3713 			if (slab != NULL) {
3714 				MPASS(slab->us_keg == keg);
3715 				dom = &keg->uk_domain[slab->us_domain];
3716 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3717 				    us_link);
3718 				break;
3719 			}
3720 			KEG_LOCK(keg);
3721 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3722 				KEG_UNLOCK(keg);
3723 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3724 				KEG_LOCK(keg);
3725 			}
3726 		}
3727 	}
3728 	KEG_UNLOCK(keg);
3729 }
3730 
3731 /* See uma.h */
3732 void
3733 uma_reclaim(int req)
3734 {
3735 
3736 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3737 	sx_xlock(&uma_reclaim_lock);
3738 	bucket_enable();
3739 
3740 	switch (req) {
3741 	case UMA_RECLAIM_TRIM:
3742 		zone_foreach(zone_trim);
3743 		break;
3744 	case UMA_RECLAIM_DRAIN:
3745 	case UMA_RECLAIM_DRAIN_CPU:
3746 		zone_foreach(zone_drain);
3747 		if (req == UMA_RECLAIM_DRAIN_CPU) {
3748 			pcpu_cache_drain_safe(NULL);
3749 			zone_foreach(zone_drain);
3750 		}
3751 		break;
3752 	default:
3753 		panic("unhandled reclamation request %d", req);
3754 	}
3755 
3756 	/*
3757 	 * Some slabs may have been freed but this zone will be visited early
3758 	 * we visit again so that we can free pages that are empty once other
3759 	 * zones are drained.  We have to do the same for buckets.
3760 	 */
3761 	zone_drain(slabzone);
3762 	bucket_zone_drain();
3763 	sx_xunlock(&uma_reclaim_lock);
3764 }
3765 
3766 static volatile int uma_reclaim_needed;
3767 
3768 void
3769 uma_reclaim_wakeup(void)
3770 {
3771 
3772 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3773 		wakeup(uma_reclaim);
3774 }
3775 
3776 void
3777 uma_reclaim_worker(void *arg __unused)
3778 {
3779 
3780 	for (;;) {
3781 		sx_xlock(&uma_reclaim_lock);
3782 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3783 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
3784 			    hz);
3785 		sx_xunlock(&uma_reclaim_lock);
3786 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3787 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
3788 		atomic_store_int(&uma_reclaim_needed, 0);
3789 		/* Don't fire more than once per-second. */
3790 		pause("umarclslp", hz);
3791 	}
3792 }
3793 
3794 /* See uma.h */
3795 void
3796 uma_zone_reclaim(uma_zone_t zone, int req)
3797 {
3798 
3799 	switch (req) {
3800 	case UMA_RECLAIM_TRIM:
3801 		zone_trim(zone);
3802 		break;
3803 	case UMA_RECLAIM_DRAIN:
3804 		zone_drain(zone);
3805 		break;
3806 	case UMA_RECLAIM_DRAIN_CPU:
3807 		pcpu_cache_drain_safe(zone);
3808 		zone_drain(zone);
3809 		break;
3810 	default:
3811 		panic("unhandled reclamation request %d", req);
3812 	}
3813 }
3814 
3815 /* See uma.h */
3816 int
3817 uma_zone_exhausted(uma_zone_t zone)
3818 {
3819 	int full;
3820 
3821 	ZONE_LOCK(zone);
3822 	full = zone->uz_sleepers > 0;
3823 	ZONE_UNLOCK(zone);
3824 	return (full);
3825 }
3826 
3827 int
3828 uma_zone_exhausted_nolock(uma_zone_t zone)
3829 {
3830 	return (zone->uz_sleepers > 0);
3831 }
3832 
3833 void *
3834 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3835 {
3836 	struct domainset *policy;
3837 	vm_offset_t addr;
3838 	uma_slab_t slab;
3839 
3840 	if (domain != UMA_ANYDOMAIN) {
3841 		/* avoid allocs targeting empty domains */
3842 		if (VM_DOMAIN_EMPTY(domain))
3843 			domain = UMA_ANYDOMAIN;
3844 	}
3845 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3846 	if (slab == NULL)
3847 		return (NULL);
3848 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
3849 	    DOMAINSET_FIXED(domain);
3850 	addr = kmem_malloc_domainset(policy, size, wait);
3851 	if (addr != 0) {
3852 		vsetslab(addr, slab);
3853 		slab->us_data = (void *)addr;
3854 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3855 		slab->us_size = size;
3856 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3857 		    pmap_kextract(addr)));
3858 		uma_total_inc(size);
3859 	} else {
3860 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3861 	}
3862 
3863 	return ((void *)addr);
3864 }
3865 
3866 void *
3867 uma_large_malloc(vm_size_t size, int wait)
3868 {
3869 
3870 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3871 }
3872 
3873 void
3874 uma_large_free(uma_slab_t slab)
3875 {
3876 
3877 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3878 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3879 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3880 	uma_total_dec(slab->us_size);
3881 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3882 }
3883 
3884 static void
3885 uma_zero_item(void *item, uma_zone_t zone)
3886 {
3887 
3888 	bzero(item, zone->uz_size);
3889 }
3890 
3891 unsigned long
3892 uma_limit(void)
3893 {
3894 
3895 	return (uma_kmem_limit);
3896 }
3897 
3898 void
3899 uma_set_limit(unsigned long limit)
3900 {
3901 
3902 	uma_kmem_limit = limit;
3903 }
3904 
3905 unsigned long
3906 uma_size(void)
3907 {
3908 
3909 	return (atomic_load_long(&uma_kmem_total));
3910 }
3911 
3912 long
3913 uma_avail(void)
3914 {
3915 
3916 	return (uma_kmem_limit - uma_size());
3917 }
3918 
3919 void
3920 uma_print_stats(void)
3921 {
3922 	zone_foreach(uma_print_zone);
3923 }
3924 
3925 static void
3926 slab_print(uma_slab_t slab)
3927 {
3928 	printf("slab: keg %p, data %p, freecount %d\n",
3929 		slab->us_keg, slab->us_data, slab->us_freecount);
3930 }
3931 
3932 static void
3933 cache_print(uma_cache_t cache)
3934 {
3935 	printf("alloc: %p(%d), free: %p(%d), cross: %p(%d)j\n",
3936 		cache->uc_allocbucket,
3937 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3938 		cache->uc_freebucket,
3939 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0,
3940 		cache->uc_crossbucket,
3941 		cache->uc_crossbucket?cache->uc_crossbucket->ub_cnt:0);
3942 }
3943 
3944 static void
3945 uma_print_keg(uma_keg_t keg)
3946 {
3947 	uma_domain_t dom;
3948 	uma_slab_t slab;
3949 	int i;
3950 
3951 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3952 	    "out %d free %d\n",
3953 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3954 	    keg->uk_ipers, keg->uk_ppera,
3955 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3956 	    keg->uk_free);
3957 	for (i = 0; i < vm_ndomains; i++) {
3958 		dom = &keg->uk_domain[i];
3959 		printf("Part slabs:\n");
3960 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3961 			slab_print(slab);
3962 		printf("Free slabs:\n");
3963 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3964 			slab_print(slab);
3965 		printf("Full slabs:\n");
3966 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3967 			slab_print(slab);
3968 	}
3969 }
3970 
3971 void
3972 uma_print_zone(uma_zone_t zone)
3973 {
3974 	uma_cache_t cache;
3975 	int i;
3976 
3977 	printf("zone: %s(%p) size %d maxitems %ju flags %#x\n",
3978 	    zone->uz_name, zone, zone->uz_size, (uintmax_t)zone->uz_max_items,
3979 	    zone->uz_flags);
3980 	if (zone->uz_lockptr != &zone->uz_lock)
3981 		uma_print_keg(zone->uz_keg);
3982 	CPU_FOREACH(i) {
3983 		cache = &zone->uz_cpu[i];
3984 		printf("CPU %d Cache:\n", i);
3985 		cache_print(cache);
3986 	}
3987 }
3988 
3989 #ifdef DDB
3990 /*
3991  * Generate statistics across both the zone and its per-cpu cache's.  Return
3992  * desired statistics if the pointer is non-NULL for that statistic.
3993  *
3994  * Note: does not update the zone statistics, as it can't safely clear the
3995  * per-CPU cache statistic.
3996  *
3997  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3998  * safe from off-CPU; we should modify the caches to track this information
3999  * directly so that we don't have to.
4000  */
4001 static void
4002 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4003     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4004 {
4005 	uma_cache_t cache;
4006 	uint64_t allocs, frees, sleeps, xdomain;
4007 	int cachefree, cpu;
4008 
4009 	allocs = frees = sleeps = xdomain = 0;
4010 	cachefree = 0;
4011 	CPU_FOREACH(cpu) {
4012 		cache = &z->uz_cpu[cpu];
4013 		if (cache->uc_allocbucket != NULL)
4014 			cachefree += cache->uc_allocbucket->ub_cnt;
4015 		if (cache->uc_freebucket != NULL)
4016 			cachefree += cache->uc_freebucket->ub_cnt;
4017 		if (cache->uc_crossbucket != NULL) {
4018 			xdomain += cache->uc_crossbucket->ub_cnt;
4019 			cachefree += cache->uc_crossbucket->ub_cnt;
4020 		}
4021 		allocs += cache->uc_allocs;
4022 		frees += cache->uc_frees;
4023 	}
4024 	allocs += counter_u64_fetch(z->uz_allocs);
4025 	frees += counter_u64_fetch(z->uz_frees);
4026 	sleeps += z->uz_sleeps;
4027 	xdomain += z->uz_xdomain;
4028 	if (cachefreep != NULL)
4029 		*cachefreep = cachefree;
4030 	if (allocsp != NULL)
4031 		*allocsp = allocs;
4032 	if (freesp != NULL)
4033 		*freesp = frees;
4034 	if (sleepsp != NULL)
4035 		*sleepsp = sleeps;
4036 	if (xdomainp != NULL)
4037 		*xdomainp = xdomain;
4038 }
4039 #endif /* DDB */
4040 
4041 static int
4042 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4043 {
4044 	uma_keg_t kz;
4045 	uma_zone_t z;
4046 	int count;
4047 
4048 	count = 0;
4049 	rw_rlock(&uma_rwlock);
4050 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4051 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4052 			count++;
4053 	}
4054 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4055 		count++;
4056 
4057 	rw_runlock(&uma_rwlock);
4058 	return (sysctl_handle_int(oidp, &count, 0, req));
4059 }
4060 
4061 static void
4062 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4063     struct uma_percpu_stat *ups, bool internal)
4064 {
4065 	uma_zone_domain_t zdom;
4066 	uma_bucket_t bucket;
4067 	uma_cache_t cache;
4068 	int i;
4069 
4070 
4071 	for (i = 0; i < vm_ndomains; i++) {
4072 		zdom = &z->uz_domain[i];
4073 		uth->uth_zone_free += zdom->uzd_nitems;
4074 	}
4075 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4076 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4077 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4078 	uth->uth_sleeps = z->uz_sleeps;
4079 	uth->uth_xdomain = z->uz_xdomain;
4080 
4081 	/*
4082 	 * While it is not normally safe to access the cache bucket pointers
4083 	 * while not on the CPU that owns the cache, we only allow the pointers
4084 	 * to be exchanged without the zone lock held, not invalidated, so
4085 	 * accept the possible race associated with bucket exchange during
4086 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4087 	 * are loaded only once.
4088 	 */
4089 	for (i = 0; i < mp_maxid + 1; i++) {
4090 		bzero(&ups[i], sizeof(*ups));
4091 		if (internal || CPU_ABSENT(i))
4092 			continue;
4093 		cache = &z->uz_cpu[i];
4094 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
4095 		if (bucket != NULL)
4096 			ups[i].ups_cache_free += bucket->ub_cnt;
4097 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
4098 		if (bucket != NULL)
4099 			ups[i].ups_cache_free += bucket->ub_cnt;
4100 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
4101 		if (bucket != NULL)
4102 			ups[i].ups_cache_free += bucket->ub_cnt;
4103 		ups[i].ups_allocs = cache->uc_allocs;
4104 		ups[i].ups_frees = cache->uc_frees;
4105 	}
4106 }
4107 
4108 static int
4109 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4110 {
4111 	struct uma_stream_header ush;
4112 	struct uma_type_header uth;
4113 	struct uma_percpu_stat *ups;
4114 	struct sbuf sbuf;
4115 	uma_keg_t kz;
4116 	uma_zone_t z;
4117 	int count, error, i;
4118 
4119 	error = sysctl_wire_old_buffer(req, 0);
4120 	if (error != 0)
4121 		return (error);
4122 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4123 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4124 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4125 
4126 	count = 0;
4127 	rw_rlock(&uma_rwlock);
4128 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4129 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4130 			count++;
4131 	}
4132 
4133 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4134 		count++;
4135 
4136 	/*
4137 	 * Insert stream header.
4138 	 */
4139 	bzero(&ush, sizeof(ush));
4140 	ush.ush_version = UMA_STREAM_VERSION;
4141 	ush.ush_maxcpus = (mp_maxid + 1);
4142 	ush.ush_count = count;
4143 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4144 
4145 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4146 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4147 			bzero(&uth, sizeof(uth));
4148 			ZONE_LOCK(z);
4149 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4150 			uth.uth_align = kz->uk_align;
4151 			uth.uth_size = kz->uk_size;
4152 			uth.uth_rsize = kz->uk_rsize;
4153 			if (z->uz_max_items > 0)
4154 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4155 					kz->uk_ppera;
4156 			else
4157 				uth.uth_pages = kz->uk_pages;
4158 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4159 			    kz->uk_ppera;
4160 			uth.uth_limit = z->uz_max_items;
4161 			uth.uth_keg_free = z->uz_keg->uk_free;
4162 
4163 			/*
4164 			 * A zone is secondary is it is not the first entry
4165 			 * on the keg's zone list.
4166 			 */
4167 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4168 			    (LIST_FIRST(&kz->uk_zones) != z))
4169 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4170 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4171 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4172 			ZONE_UNLOCK(z);
4173 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4174 			for (i = 0; i < mp_maxid + 1; i++)
4175 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4176 		}
4177 	}
4178 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4179 		bzero(&uth, sizeof(uth));
4180 		ZONE_LOCK(z);
4181 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4182 		uth.uth_size = z->uz_size;
4183 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4184 		ZONE_UNLOCK(z);
4185 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4186 		for (i = 0; i < mp_maxid + 1; i++)
4187 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4188 	}
4189 
4190 	rw_runlock(&uma_rwlock);
4191 	error = sbuf_finish(&sbuf);
4192 	sbuf_delete(&sbuf);
4193 	free(ups, M_TEMP);
4194 	return (error);
4195 }
4196 
4197 int
4198 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4199 {
4200 	uma_zone_t zone = *(uma_zone_t *)arg1;
4201 	int error, max;
4202 
4203 	max = uma_zone_get_max(zone);
4204 	error = sysctl_handle_int(oidp, &max, 0, req);
4205 	if (error || !req->newptr)
4206 		return (error);
4207 
4208 	uma_zone_set_max(zone, max);
4209 
4210 	return (0);
4211 }
4212 
4213 int
4214 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4215 {
4216 	uma_zone_t zone = *(uma_zone_t *)arg1;
4217 	int cur;
4218 
4219 	cur = uma_zone_get_cur(zone);
4220 	return (sysctl_handle_int(oidp, &cur, 0, req));
4221 }
4222 
4223 #ifdef INVARIANTS
4224 static uma_slab_t
4225 uma_dbg_getslab(uma_zone_t zone, void *item)
4226 {
4227 	uma_slab_t slab;
4228 	uma_keg_t keg;
4229 	uint8_t *mem;
4230 
4231 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4232 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4233 		slab = vtoslab((vm_offset_t)mem);
4234 	} else {
4235 		/*
4236 		 * It is safe to return the slab here even though the
4237 		 * zone is unlocked because the item's allocation state
4238 		 * essentially holds a reference.
4239 		 */
4240 		if (zone->uz_lockptr == &zone->uz_lock)
4241 			return (NULL);
4242 		ZONE_LOCK(zone);
4243 		keg = zone->uz_keg;
4244 		if (keg->uk_flags & UMA_ZONE_HASH)
4245 			slab = hash_sfind(&keg->uk_hash, mem);
4246 		else
4247 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4248 		ZONE_UNLOCK(zone);
4249 	}
4250 
4251 	return (slab);
4252 }
4253 
4254 static bool
4255 uma_dbg_zskip(uma_zone_t zone, void *mem)
4256 {
4257 
4258 	if (zone->uz_lockptr == &zone->uz_lock)
4259 		return (true);
4260 
4261 	return (uma_dbg_kskip(zone->uz_keg, mem));
4262 }
4263 
4264 static bool
4265 uma_dbg_kskip(uma_keg_t keg, void *mem)
4266 {
4267 	uintptr_t idx;
4268 
4269 	if (dbg_divisor == 0)
4270 		return (true);
4271 
4272 	if (dbg_divisor == 1)
4273 		return (false);
4274 
4275 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4276 	if (keg->uk_ipers > 1) {
4277 		idx *= keg->uk_ipers;
4278 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4279 	}
4280 
4281 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4282 		counter_u64_add(uma_skip_cnt, 1);
4283 		return (true);
4284 	}
4285 	counter_u64_add(uma_dbg_cnt, 1);
4286 
4287 	return (false);
4288 }
4289 
4290 /*
4291  * Set up the slab's freei data such that uma_dbg_free can function.
4292  *
4293  */
4294 static void
4295 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4296 {
4297 	uma_keg_t keg;
4298 	int freei;
4299 
4300 	if (slab == NULL) {
4301 		slab = uma_dbg_getslab(zone, item);
4302 		if (slab == NULL)
4303 			panic("uma: item %p did not belong to zone %s\n",
4304 			    item, zone->uz_name);
4305 	}
4306 	keg = slab->us_keg;
4307 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4308 
4309 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4310 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4311 		    item, zone, zone->uz_name, slab, freei);
4312 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4313 
4314 	return;
4315 }
4316 
4317 /*
4318  * Verifies freed addresses.  Checks for alignment, valid slab membership
4319  * and duplicate frees.
4320  *
4321  */
4322 static void
4323 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4324 {
4325 	uma_keg_t keg;
4326 	int freei;
4327 
4328 	if (slab == NULL) {
4329 		slab = uma_dbg_getslab(zone, item);
4330 		if (slab == NULL)
4331 			panic("uma: Freed item %p did not belong to zone %s\n",
4332 			    item, zone->uz_name);
4333 	}
4334 	keg = slab->us_keg;
4335 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4336 
4337 	if (freei >= keg->uk_ipers)
4338 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4339 		    item, zone, zone->uz_name, slab, freei);
4340 
4341 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4342 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4343 		    item, zone, zone->uz_name, slab, freei);
4344 
4345 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4346 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4347 		    item, zone, zone->uz_name, slab, freei);
4348 
4349 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4350 }
4351 #endif /* INVARIANTS */
4352 
4353 #ifdef DDB
4354 static int64_t
4355 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4356     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4357 {
4358 	uint64_t frees;
4359 	int i;
4360 
4361 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4362 		*allocs = counter_u64_fetch(z->uz_allocs);
4363 		frees = counter_u64_fetch(z->uz_frees);
4364 		*sleeps = z->uz_sleeps;
4365 		*cachefree = 0;
4366 		*xdomain = 0;
4367 	} else
4368 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4369 		    xdomain);
4370 	if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4371 	    (LIST_FIRST(&kz->uk_zones) != z)))
4372 		*cachefree += kz->uk_free;
4373 	for (i = 0; i < vm_ndomains; i++)
4374 		*cachefree += z->uz_domain[i].uzd_nitems;
4375 	*used = *allocs - frees;
4376 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4377 }
4378 
4379 DB_SHOW_COMMAND(uma, db_show_uma)
4380 {
4381 	const char *fmt_hdr, *fmt_entry;
4382 	uma_keg_t kz;
4383 	uma_zone_t z;
4384 	uint64_t allocs, used, sleeps, xdomain;
4385 	long cachefree;
4386 	/* variables for sorting */
4387 	uma_keg_t cur_keg;
4388 	uma_zone_t cur_zone, last_zone;
4389 	int64_t cur_size, last_size, size;
4390 	int ties;
4391 
4392 	/* /i option produces machine-parseable CSV output */
4393 	if (modif[0] == 'i') {
4394 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4395 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4396 	} else {
4397 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4398 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4399 	}
4400 
4401 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4402 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4403 
4404 	/* Sort the zones with largest size first. */
4405 	last_zone = NULL;
4406 	last_size = INT64_MAX;
4407 	for (;;) {
4408 		cur_zone = NULL;
4409 		cur_size = -1;
4410 		ties = 0;
4411 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4412 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4413 				/*
4414 				 * In the case of size ties, print out zones
4415 				 * in the order they are encountered.  That is,
4416 				 * when we encounter the most recently output
4417 				 * zone, we have already printed all preceding
4418 				 * ties, and we must print all following ties.
4419 				 */
4420 				if (z == last_zone) {
4421 					ties = 1;
4422 					continue;
4423 				}
4424 				size = get_uma_stats(kz, z, &allocs, &used,
4425 				    &sleeps, &cachefree, &xdomain);
4426 				if (size > cur_size && size < last_size + ties)
4427 				{
4428 					cur_size = size;
4429 					cur_zone = z;
4430 					cur_keg = kz;
4431 				}
4432 			}
4433 		}
4434 		if (cur_zone == NULL)
4435 			break;
4436 
4437 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
4438 		    &sleeps, &cachefree, &xdomain);
4439 		db_printf(fmt_entry, cur_zone->uz_name,
4440 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
4441 		    (uintmax_t)allocs, (uintmax_t)sleeps,
4442 		    (unsigned)cur_zone->uz_count, (intmax_t)size, xdomain);
4443 
4444 		if (db_pager_quit)
4445 			return;
4446 		last_zone = cur_zone;
4447 		last_size = cur_size;
4448 	}
4449 }
4450 
4451 DB_SHOW_COMMAND(umacache, db_show_umacache)
4452 {
4453 	uma_zone_t z;
4454 	uint64_t allocs, frees;
4455 	long cachefree;
4456 	int i;
4457 
4458 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4459 	    "Requests", "Bucket");
4460 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4461 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4462 		for (i = 0; i < vm_ndomains; i++)
4463 			cachefree += z->uz_domain[i].uzd_nitems;
4464 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4465 		    z->uz_name, (uintmax_t)z->uz_size,
4466 		    (intmax_t)(allocs - frees), cachefree,
4467 		    (uintmax_t)allocs, z->uz_count);
4468 		if (db_pager_quit)
4469 			return;
4470 	}
4471 }
4472 #endif	/* DDB */
4473