xref: /freebsd/sys/vm/uma_core.c (revision adc56f5a383771f594829b7db9c263b6f0dcf1bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
123 
124 /*
125  * Are we allowed to allocate buckets?
126  */
127 static int bucketdisable = 1;
128 
129 /* Linked list of all kegs in the system */
130 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
131 
132 /* Linked list of all cache-only zones in the system */
133 static LIST_HEAD(,uma_zone) uma_cachezones =
134     LIST_HEAD_INITIALIZER(uma_cachezones);
135 
136 /* This RW lock protects the keg list */
137 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
138 
139 /*
140  * Pointer and counter to pool of pages, that is preallocated at
141  * startup to bootstrap UMA.
142  */
143 static char *bootmem;
144 static int boot_pages;
145 
146 static struct sx uma_reclaim_lock;
147 
148 /*
149  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
150  * allocations don't trigger a wakeup of the reclaim thread.
151  */
152 unsigned long uma_kmem_limit = LONG_MAX;
153 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
154     "UMA kernel memory soft limit");
155 unsigned long uma_kmem_total;
156 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
157     "UMA kernel memory usage");
158 
159 /* Is the VM done starting up? */
160 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
161     BOOT_RUNNING } booted = BOOT_COLD;
162 
163 /*
164  * This is the handle used to schedule events that need to happen
165  * outside of the allocation fast path.
166  */
167 static struct callout uma_callout;
168 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
169 
170 /*
171  * This structure is passed as the zone ctor arg so that I don't have to create
172  * a special allocation function just for zones.
173  */
174 struct uma_zctor_args {
175 	const char *name;
176 	size_t size;
177 	uma_ctor ctor;
178 	uma_dtor dtor;
179 	uma_init uminit;
180 	uma_fini fini;
181 	uma_import import;
182 	uma_release release;
183 	void *arg;
184 	uma_keg_t keg;
185 	int align;
186 	uint32_t flags;
187 };
188 
189 struct uma_kctor_args {
190 	uma_zone_t zone;
191 	size_t size;
192 	uma_init uminit;
193 	uma_fini fini;
194 	int align;
195 	uint32_t flags;
196 };
197 
198 struct uma_bucket_zone {
199 	uma_zone_t	ubz_zone;
200 	char		*ubz_name;
201 	int		ubz_entries;	/* Number of items it can hold. */
202 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
203 };
204 
205 /*
206  * Compute the actual number of bucket entries to pack them in power
207  * of two sizes for more efficient space utilization.
208  */
209 #define	BUCKET_SIZE(n)						\
210     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
211 
212 #define	BUCKET_MAX	BUCKET_SIZE(256)
213 #define	BUCKET_MIN	BUCKET_SIZE(4)
214 
215 struct uma_bucket_zone bucket_zones[] = {
216 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
217 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
218 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
219 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
220 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
221 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
222 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
223 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
224 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
225 	{ NULL, NULL, 0}
226 };
227 
228 /*
229  * Flags and enumerations to be passed to internal functions.
230  */
231 enum zfreeskip {
232 	SKIP_NONE =	0,
233 	SKIP_CNT =	0x00000001,
234 	SKIP_DTOR =	0x00010000,
235 	SKIP_FINI =	0x00020000,
236 };
237 
238 /* Prototypes.. */
239 
240 int	uma_startup_count(int);
241 void	uma_startup(void *, int);
242 void	uma_startup1(void);
243 void	uma_startup2(void);
244 
245 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
249 static void page_free(void *, vm_size_t, uint8_t);
250 static void pcpu_page_free(void *, vm_size_t, uint8_t);
251 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
252 static void cache_drain(uma_zone_t);
253 static void bucket_drain(uma_zone_t, uma_bucket_t);
254 static void bucket_cache_reclaim(uma_zone_t zone, bool);
255 static int keg_ctor(void *, int, void *, int);
256 static void keg_dtor(void *, int, void *);
257 static int zone_ctor(void *, int, void *, int);
258 static void zone_dtor(void *, int, void *);
259 static int zero_init(void *, int, int);
260 static void keg_small_init(uma_keg_t keg);
261 static void keg_large_init(uma_keg_t keg);
262 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
263 static void zone_timeout(uma_zone_t zone, void *);
264 static int hash_alloc(struct uma_hash *, u_int);
265 static int hash_expand(struct uma_hash *, struct uma_hash *);
266 static void hash_free(struct uma_hash *hash);
267 static void uma_timeout(void *);
268 static void uma_startup3(void);
269 static void *zone_alloc_item(uma_zone_t, void *, int, int);
270 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
271 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
272 static void bucket_enable(void);
273 static void bucket_init(void);
274 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
275 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
276 static void bucket_zone_drain(void);
277 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(uma_zone_t, void **, int, int, int);
283 static void zone_release(uma_zone_t, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
286 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
287 
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
291 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
292 
293 #ifdef INVARIANTS
294 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
295 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
296 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
297 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
298 
299 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
300     "Memory allocation debugging");
301 
302 static u_int dbg_divisor = 1;
303 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
304     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
305     "Debug & thrash every this item in memory allocator");
306 
307 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
308 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
309 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
310     &uma_dbg_cnt, "memory items debugged");
311 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
312     &uma_skip_cnt, "memory items skipped, not debugged");
313 #endif
314 
315 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
316 
317 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
318 
319 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
320     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
321 
322 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
323     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
324 
325 static int zone_warnings = 1;
326 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
327     "Warn when UMA zones becomes full");
328 
329 /*
330  * This routine checks to see whether or not it's safe to enable buckets.
331  */
332 static void
333 bucket_enable(void)
334 {
335 	bucketdisable = vm_page_count_min();
336 }
337 
338 /*
339  * Initialize bucket_zones, the array of zones of buckets of various sizes.
340  *
341  * For each zone, calculate the memory required for each bucket, consisting
342  * of the header and an array of pointers.
343  */
344 static void
345 bucket_init(void)
346 {
347 	struct uma_bucket_zone *ubz;
348 	int size;
349 
350 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
351 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
352 		size += sizeof(void *) * ubz->ubz_entries;
353 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
354 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
355 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
356 	}
357 }
358 
359 /*
360  * Given a desired number of entries for a bucket, return the zone from which
361  * to allocate the bucket.
362  */
363 static struct uma_bucket_zone *
364 bucket_zone_lookup(int entries)
365 {
366 	struct uma_bucket_zone *ubz;
367 
368 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
369 		if (ubz->ubz_entries >= entries)
370 			return (ubz);
371 	ubz--;
372 	return (ubz);
373 }
374 
375 static struct uma_bucket_zone *
376 bucket_zone_max(uma_zone_t zone, int nitems)
377 {
378 	struct uma_bucket_zone *ubz;
379 	int bpcpu;
380 
381 	bpcpu = 2;
382 #ifdef UMA_XDOMAIN
383 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
384 		/* Count the cross-domain bucket. */
385 		bpcpu++;
386 #endif
387 
388 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
389 		if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
390 			break;
391 	if (ubz == &bucket_zones[0])
392 		ubz = NULL;
393 	else
394 		ubz--;
395 	return (ubz);
396 }
397 
398 static int
399 bucket_select(int size)
400 {
401 	struct uma_bucket_zone *ubz;
402 
403 	ubz = &bucket_zones[0];
404 	if (size > ubz->ubz_maxsize)
405 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
406 
407 	for (; ubz->ubz_entries != 0; ubz++)
408 		if (ubz->ubz_maxsize < size)
409 			break;
410 	ubz--;
411 	return (ubz->ubz_entries);
412 }
413 
414 static uma_bucket_t
415 bucket_alloc(uma_zone_t zone, void *udata, int flags)
416 {
417 	struct uma_bucket_zone *ubz;
418 	uma_bucket_t bucket;
419 
420 	/*
421 	 * This is to stop us from allocating per cpu buckets while we're
422 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
423 	 * boot pages.  This also prevents us from allocating buckets in
424 	 * low memory situations.
425 	 */
426 	if (bucketdisable)
427 		return (NULL);
428 	/*
429 	 * To limit bucket recursion we store the original zone flags
430 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
431 	 * NOVM flag to persist even through deep recursions.  We also
432 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
433 	 * a bucket for a bucket zone so we do not allow infinite bucket
434 	 * recursion.  This cookie will even persist to frees of unused
435 	 * buckets via the allocation path or bucket allocations in the
436 	 * free path.
437 	 */
438 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
439 		udata = (void *)(uintptr_t)zone->uz_flags;
440 	else {
441 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
442 			return (NULL);
443 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
444 	}
445 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
446 		flags |= M_NOVM;
447 	ubz = bucket_zone_lookup(zone->uz_bucket_size);
448 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
449 		ubz++;
450 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
451 	if (bucket) {
452 #ifdef INVARIANTS
453 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
454 #endif
455 		bucket->ub_cnt = 0;
456 		bucket->ub_entries = ubz->ubz_entries;
457 	}
458 
459 	return (bucket);
460 }
461 
462 static void
463 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
464 {
465 	struct uma_bucket_zone *ubz;
466 
467 	KASSERT(bucket->ub_cnt == 0,
468 	    ("bucket_free: Freeing a non free bucket."));
469 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
470 		udata = (void *)(uintptr_t)zone->uz_flags;
471 	ubz = bucket_zone_lookup(bucket->ub_entries);
472 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
473 }
474 
475 static void
476 bucket_zone_drain(void)
477 {
478 	struct uma_bucket_zone *ubz;
479 
480 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
481 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
482 }
483 
484 /*
485  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
486  * zone's caches.
487  */
488 static uma_bucket_t
489 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
490 {
491 	uma_bucket_t bucket;
492 
493 	ZONE_LOCK_ASSERT(zone);
494 
495 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
496 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
497 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
498 		zdom->uzd_nitems -= bucket->ub_cnt;
499 		if (zdom->uzd_imin > zdom->uzd_nitems)
500 			zdom->uzd_imin = zdom->uzd_nitems;
501 		zone->uz_bkt_count -= bucket->ub_cnt;
502 	}
503 	return (bucket);
504 }
505 
506 /*
507  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
508  * whether the bucket's contents should be counted as part of the zone's working
509  * set.
510  */
511 static void
512 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
513     const bool ws)
514 {
515 
516 	ZONE_LOCK_ASSERT(zone);
517 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
518 	    ("%s: zone %p overflow", __func__, zone));
519 
520 	if (ws)
521 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
522 	else
523 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
524 	zdom->uzd_nitems += bucket->ub_cnt;
525 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
526 		zdom->uzd_imax = zdom->uzd_nitems;
527 	zone->uz_bkt_count += bucket->ub_cnt;
528 }
529 
530 static void
531 zone_log_warning(uma_zone_t zone)
532 {
533 	static const struct timeval warninterval = { 300, 0 };
534 
535 	if (!zone_warnings || zone->uz_warning == NULL)
536 		return;
537 
538 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
539 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
540 }
541 
542 static inline void
543 zone_maxaction(uma_zone_t zone)
544 {
545 
546 	if (zone->uz_maxaction.ta_func != NULL)
547 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
548 }
549 
550 /*
551  * Routine called by timeout which is used to fire off some time interval
552  * based calculations.  (stats, hash size, etc.)
553  *
554  * Arguments:
555  *	arg   Unused
556  *
557  * Returns:
558  *	Nothing
559  */
560 static void
561 uma_timeout(void *unused)
562 {
563 	bucket_enable();
564 	zone_foreach(zone_timeout, NULL);
565 
566 	/* Reschedule this event */
567 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
568 }
569 
570 /*
571  * Update the working set size estimate for the zone's bucket cache.
572  * The constants chosen here are somewhat arbitrary.  With an update period of
573  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
574  * last 100s.
575  */
576 static void
577 zone_domain_update_wss(uma_zone_domain_t zdom)
578 {
579 	long wss;
580 
581 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
582 	wss = zdom->uzd_imax - zdom->uzd_imin;
583 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
584 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
585 }
586 
587 /*
588  * Routine to perform timeout driven calculations.  This expands the
589  * hashes and does per cpu statistics aggregation.
590  *
591  *  Returns nothing.
592  */
593 static void
594 zone_timeout(uma_zone_t zone, void *unused)
595 {
596 	uma_keg_t keg;
597 	u_int slabs;
598 
599 	if ((zone->uz_flags & UMA_ZONE_HASH) == 0)
600 		goto update_wss;
601 
602 	keg = zone->uz_keg;
603 	KEG_LOCK(keg);
604 	/*
605 	 * Expand the keg hash table.
606 	 *
607 	 * This is done if the number of slabs is larger than the hash size.
608 	 * What I'm trying to do here is completely reduce collisions.  This
609 	 * may be a little aggressive.  Should I allow for two collisions max?
610 	 */
611 	if (keg->uk_flags & UMA_ZONE_HASH &&
612 	    (slabs = keg->uk_pages / keg->uk_ppera) >
613 	     keg->uk_hash.uh_hashsize) {
614 		struct uma_hash newhash;
615 		struct uma_hash oldhash;
616 		int ret;
617 
618 		/*
619 		 * This is so involved because allocating and freeing
620 		 * while the keg lock is held will lead to deadlock.
621 		 * I have to do everything in stages and check for
622 		 * races.
623 		 */
624 		KEG_UNLOCK(keg);
625 		ret = hash_alloc(&newhash, 1 << fls(slabs));
626 		KEG_LOCK(keg);
627 		if (ret) {
628 			if (hash_expand(&keg->uk_hash, &newhash)) {
629 				oldhash = keg->uk_hash;
630 				keg->uk_hash = newhash;
631 			} else
632 				oldhash = newhash;
633 
634 			KEG_UNLOCK(keg);
635 			hash_free(&oldhash);
636 			return;
637 		}
638 	}
639 	KEG_UNLOCK(keg);
640 
641 update_wss:
642 	ZONE_LOCK(zone);
643 	for (int i = 0; i < vm_ndomains; i++)
644 		zone_domain_update_wss(&zone->uz_domain[i]);
645 	ZONE_UNLOCK(zone);
646 }
647 
648 /*
649  * Allocate and zero fill the next sized hash table from the appropriate
650  * backing store.
651  *
652  * Arguments:
653  *	hash  A new hash structure with the old hash size in uh_hashsize
654  *
655  * Returns:
656  *	1 on success and 0 on failure.
657  */
658 static int
659 hash_alloc(struct uma_hash *hash, u_int size)
660 {
661 	size_t alloc;
662 
663 	KASSERT(powerof2(size), ("hash size must be power of 2"));
664 	if (size > UMA_HASH_SIZE_INIT)  {
665 		hash->uh_hashsize = size;
666 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
667 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
668 		    M_UMAHASH, M_NOWAIT);
669 	} else {
670 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
671 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
672 		    UMA_ANYDOMAIN, M_WAITOK);
673 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
674 	}
675 	if (hash->uh_slab_hash) {
676 		bzero(hash->uh_slab_hash, alloc);
677 		hash->uh_hashmask = hash->uh_hashsize - 1;
678 		return (1);
679 	}
680 
681 	return (0);
682 }
683 
684 /*
685  * Expands the hash table for HASH zones.  This is done from zone_timeout
686  * to reduce collisions.  This must not be done in the regular allocation
687  * path, otherwise, we can recurse on the vm while allocating pages.
688  *
689  * Arguments:
690  *	oldhash  The hash you want to expand
691  *	newhash  The hash structure for the new table
692  *
693  * Returns:
694  *	Nothing
695  *
696  * Discussion:
697  */
698 static int
699 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
700 {
701 	uma_slab_t slab;
702 	u_int hval;
703 	u_int idx;
704 
705 	if (!newhash->uh_slab_hash)
706 		return (0);
707 
708 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
709 		return (0);
710 
711 	/*
712 	 * I need to investigate hash algorithms for resizing without a
713 	 * full rehash.
714 	 */
715 
716 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
717 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
718 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
719 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
720 			hval = UMA_HASH(newhash, slab->us_data);
721 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
722 			    slab, us_hlink);
723 		}
724 
725 	return (1);
726 }
727 
728 /*
729  * Free the hash bucket to the appropriate backing store.
730  *
731  * Arguments:
732  *	slab_hash  The hash bucket we're freeing
733  *	hashsize   The number of entries in that hash bucket
734  *
735  * Returns:
736  *	Nothing
737  */
738 static void
739 hash_free(struct uma_hash *hash)
740 {
741 	if (hash->uh_slab_hash == NULL)
742 		return;
743 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
744 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
745 	else
746 		free(hash->uh_slab_hash, M_UMAHASH);
747 }
748 
749 /*
750  * Frees all outstanding items in a bucket
751  *
752  * Arguments:
753  *	zone   The zone to free to, must be unlocked.
754  *	bucket The free/alloc bucket with items, cpu queue must be locked.
755  *
756  * Returns:
757  *	Nothing
758  */
759 
760 static void
761 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
762 {
763 	int i;
764 
765 	if (bucket == NULL)
766 		return;
767 
768 	if (zone->uz_fini)
769 		for (i = 0; i < bucket->ub_cnt; i++)
770 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
771 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
772 	if (zone->uz_max_items > 0) {
773 		ZONE_LOCK(zone);
774 		zone->uz_items -= bucket->ub_cnt;
775 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
776 			wakeup_one(zone);
777 		ZONE_UNLOCK(zone);
778 	}
779 	bucket->ub_cnt = 0;
780 }
781 
782 /*
783  * Drains the per cpu caches for a zone.
784  *
785  * NOTE: This may only be called while the zone is being turn down, and not
786  * during normal operation.  This is necessary in order that we do not have
787  * to migrate CPUs to drain the per-CPU caches.
788  *
789  * Arguments:
790  *	zone     The zone to drain, must be unlocked.
791  *
792  * Returns:
793  *	Nothing
794  */
795 static void
796 cache_drain(uma_zone_t zone)
797 {
798 	uma_cache_t cache;
799 	int cpu;
800 
801 	/*
802 	 * XXX: It is safe to not lock the per-CPU caches, because we're
803 	 * tearing down the zone anyway.  I.e., there will be no further use
804 	 * of the caches at this point.
805 	 *
806 	 * XXX: It would good to be able to assert that the zone is being
807 	 * torn down to prevent improper use of cache_drain().
808 	 *
809 	 * XXX: We lock the zone before passing into bucket_cache_reclaim() as
810 	 * it is used elsewhere.  Should the tear-down path be made special
811 	 * there in some form?
812 	 */
813 	CPU_FOREACH(cpu) {
814 		cache = &zone->uz_cpu[cpu];
815 		bucket_drain(zone, cache->uc_allocbucket);
816 		if (cache->uc_allocbucket != NULL)
817 			bucket_free(zone, cache->uc_allocbucket, NULL);
818 		cache->uc_allocbucket = NULL;
819 		bucket_drain(zone, cache->uc_freebucket);
820 		if (cache->uc_freebucket != NULL)
821 			bucket_free(zone, cache->uc_freebucket, NULL);
822 		cache->uc_freebucket = NULL;
823 		bucket_drain(zone, cache->uc_crossbucket);
824 		if (cache->uc_crossbucket != NULL)
825 			bucket_free(zone, cache->uc_crossbucket, NULL);
826 		cache->uc_crossbucket = NULL;
827 	}
828 	ZONE_LOCK(zone);
829 	bucket_cache_reclaim(zone, true);
830 	ZONE_UNLOCK(zone);
831 }
832 
833 static void
834 cache_shrink(uma_zone_t zone, void *unused)
835 {
836 
837 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
838 		return;
839 
840 	ZONE_LOCK(zone);
841 	zone->uz_bucket_size =
842 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
843 	ZONE_UNLOCK(zone);
844 }
845 
846 static void
847 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
848 {
849 	uma_cache_t cache;
850 	uma_bucket_t b1, b2, b3;
851 	int domain;
852 
853 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
854 		return;
855 
856 	b1 = b2 = b3 = NULL;
857 	ZONE_LOCK(zone);
858 	critical_enter();
859 	if (zone->uz_flags & UMA_ZONE_NUMA)
860 		domain = PCPU_GET(domain);
861 	else
862 		domain = 0;
863 	cache = &zone->uz_cpu[curcpu];
864 	if (cache->uc_allocbucket) {
865 		if (cache->uc_allocbucket->ub_cnt != 0)
866 			zone_put_bucket(zone, &zone->uz_domain[domain],
867 			    cache->uc_allocbucket, false);
868 		else
869 			b1 = cache->uc_allocbucket;
870 		cache->uc_allocbucket = NULL;
871 	}
872 	if (cache->uc_freebucket) {
873 		if (cache->uc_freebucket->ub_cnt != 0)
874 			zone_put_bucket(zone, &zone->uz_domain[domain],
875 			    cache->uc_freebucket, false);
876 		else
877 			b2 = cache->uc_freebucket;
878 		cache->uc_freebucket = NULL;
879 	}
880 	b3 = cache->uc_crossbucket;
881 	cache->uc_crossbucket = NULL;
882 	critical_exit();
883 	ZONE_UNLOCK(zone);
884 	if (b1)
885 		bucket_free(zone, b1, NULL);
886 	if (b2)
887 		bucket_free(zone, b2, NULL);
888 	if (b3) {
889 		bucket_drain(zone, b3);
890 		bucket_free(zone, b3, NULL);
891 	}
892 }
893 
894 /*
895  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
896  * This is an expensive call because it needs to bind to all CPUs
897  * one by one and enter a critical section on each of them in order
898  * to safely access their cache buckets.
899  * Zone lock must not be held on call this function.
900  */
901 static void
902 pcpu_cache_drain_safe(uma_zone_t zone)
903 {
904 	int cpu;
905 
906 	/*
907 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
908 	 */
909 	if (zone)
910 		cache_shrink(zone, NULL);
911 	else
912 		zone_foreach(cache_shrink, NULL);
913 
914 	CPU_FOREACH(cpu) {
915 		thread_lock(curthread);
916 		sched_bind(curthread, cpu);
917 		thread_unlock(curthread);
918 
919 		if (zone)
920 			cache_drain_safe_cpu(zone, NULL);
921 		else
922 			zone_foreach(cache_drain_safe_cpu, NULL);
923 	}
924 	thread_lock(curthread);
925 	sched_unbind(curthread);
926 	thread_unlock(curthread);
927 }
928 
929 /*
930  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
931  * requested a drain, otherwise the per-domain caches are trimmed to either
932  * estimated working set size.
933  */
934 static void
935 bucket_cache_reclaim(uma_zone_t zone, bool drain)
936 {
937 	uma_zone_domain_t zdom;
938 	uma_bucket_t bucket;
939 	long target, tofree;
940 	int i;
941 
942 	for (i = 0; i < vm_ndomains; i++) {
943 		zdom = &zone->uz_domain[i];
944 
945 		/*
946 		 * If we were asked to drain the zone, we are done only once
947 		 * this bucket cache is empty.  Otherwise, we reclaim items in
948 		 * excess of the zone's estimated working set size.  If the
949 		 * difference nitems - imin is larger than the WSS estimate,
950 		 * then the estimate will grow at the end of this interval and
951 		 * we ignore the historical average.
952 		 */
953 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
954 		    zdom->uzd_imin);
955 		while (zdom->uzd_nitems > target) {
956 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
957 			if (bucket == NULL)
958 				break;
959 			tofree = bucket->ub_cnt;
960 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
961 			zdom->uzd_nitems -= tofree;
962 
963 			/*
964 			 * Shift the bounds of the current WSS interval to avoid
965 			 * perturbing the estimate.
966 			 */
967 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
968 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
969 
970 			ZONE_UNLOCK(zone);
971 			bucket_drain(zone, bucket);
972 			bucket_free(zone, bucket, NULL);
973 			ZONE_LOCK(zone);
974 		}
975 	}
976 
977 	/*
978 	 * Shrink the zone bucket size to ensure that the per-CPU caches
979 	 * don't grow too large.
980 	 */
981 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
982 		zone->uz_bucket_size--;
983 }
984 
985 static void
986 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
987 {
988 	uint8_t *mem;
989 	int i;
990 	uint8_t flags;
991 
992 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
993 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
994 
995 	mem = slab->us_data;
996 	flags = slab->us_flags;
997 	i = start;
998 	if (keg->uk_fini != NULL) {
999 		for (i--; i > -1; i--)
1000 #ifdef INVARIANTS
1001 		/*
1002 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1003 		 * would check that memory hasn't been modified since free,
1004 		 * which executed trash_dtor.
1005 		 * That's why we need to run uma_dbg_kskip() check here,
1006 		 * albeit we don't make skip check for other init/fini
1007 		 * invocations.
1008 		 */
1009 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
1010 		    keg->uk_fini != trash_fini)
1011 #endif
1012 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
1013 			    keg->uk_size);
1014 	}
1015 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1016 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1017 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1018 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1019 }
1020 
1021 /*
1022  * Frees pages from a keg back to the system.  This is done on demand from
1023  * the pageout daemon.
1024  *
1025  * Returns nothing.
1026  */
1027 static void
1028 keg_drain(uma_keg_t keg)
1029 {
1030 	struct slabhead freeslabs = { 0 };
1031 	uma_domain_t dom;
1032 	uma_slab_t slab, tmp;
1033 	int i;
1034 
1035 	/*
1036 	 * We don't want to take pages from statically allocated kegs at this
1037 	 * time
1038 	 */
1039 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1040 		return;
1041 
1042 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
1043 	    keg->uk_name, keg, keg->uk_free);
1044 	KEG_LOCK(keg);
1045 	if (keg->uk_free == 0)
1046 		goto finished;
1047 
1048 	for (i = 0; i < vm_ndomains; i++) {
1049 		dom = &keg->uk_domain[i];
1050 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1051 			/* We have nowhere to free these to. */
1052 			if (slab->us_flags & UMA_SLAB_BOOT)
1053 				continue;
1054 
1055 			LIST_REMOVE(slab, us_link);
1056 			keg->uk_pages -= keg->uk_ppera;
1057 			keg->uk_free -= keg->uk_ipers;
1058 
1059 			if (keg->uk_flags & UMA_ZONE_HASH)
1060 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
1061 				    slab->us_data);
1062 
1063 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1064 		}
1065 	}
1066 
1067 finished:
1068 	KEG_UNLOCK(keg);
1069 
1070 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1071 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1072 		keg_free_slab(keg, slab, keg->uk_ipers);
1073 	}
1074 }
1075 
1076 static void
1077 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1078 {
1079 
1080 	/*
1081 	 * Set draining to interlock with zone_dtor() so we can release our
1082 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1083 	 * is the only call that knows the structure will still be available
1084 	 * when it wakes up.
1085 	 */
1086 	ZONE_LOCK(zone);
1087 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1088 		if (waitok == M_NOWAIT)
1089 			goto out;
1090 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1091 	}
1092 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1093 	bucket_cache_reclaim(zone, drain);
1094 	ZONE_UNLOCK(zone);
1095 
1096 	/*
1097 	 * The DRAINING flag protects us from being freed while
1098 	 * we're running.  Normally the uma_rwlock would protect us but we
1099 	 * must be able to release and acquire the right lock for each keg.
1100 	 */
1101 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1102 		keg_drain(zone->uz_keg);
1103 	ZONE_LOCK(zone);
1104 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1105 	wakeup(zone);
1106 out:
1107 	ZONE_UNLOCK(zone);
1108 }
1109 
1110 static void
1111 zone_drain(uma_zone_t zone, void *unused)
1112 {
1113 
1114 	zone_reclaim(zone, M_NOWAIT, true);
1115 }
1116 
1117 static void
1118 zone_trim(uma_zone_t zone, void *unused)
1119 {
1120 
1121 	zone_reclaim(zone, M_NOWAIT, false);
1122 }
1123 
1124 /*
1125  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1126  * If the allocation was successful, the keg lock will be held upon return,
1127  * otherwise the keg will be left unlocked.
1128  *
1129  * Arguments:
1130  *	flags   Wait flags for the item initialization routine
1131  *	aflags  Wait flags for the slab allocation
1132  *
1133  * Returns:
1134  *	The slab that was allocated or NULL if there is no memory and the
1135  *	caller specified M_NOWAIT.
1136  */
1137 static uma_slab_t
1138 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1139     int aflags)
1140 {
1141 	uma_alloc allocf;
1142 	uma_slab_t slab;
1143 	unsigned long size;
1144 	uint8_t *mem;
1145 	uint8_t sflags;
1146 	int i;
1147 
1148 	KASSERT(domain >= 0 && domain < vm_ndomains,
1149 	    ("keg_alloc_slab: domain %d out of range", domain));
1150 	KEG_LOCK_ASSERT(keg);
1151 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1152 
1153 	allocf = keg->uk_allocf;
1154 	KEG_UNLOCK(keg);
1155 
1156 	slab = NULL;
1157 	mem = NULL;
1158 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1159 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1160 		if (slab == NULL)
1161 			goto out;
1162 	}
1163 
1164 	/*
1165 	 * This reproduces the old vm_zone behavior of zero filling pages the
1166 	 * first time they are added to a zone.
1167 	 *
1168 	 * Malloced items are zeroed in uma_zalloc.
1169 	 */
1170 
1171 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1172 		aflags |= M_ZERO;
1173 	else
1174 		aflags &= ~M_ZERO;
1175 
1176 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1177 		aflags |= M_NODUMP;
1178 
1179 	/* zone is passed for legacy reasons. */
1180 	size = keg->uk_ppera * PAGE_SIZE;
1181 	mem = allocf(zone, size, domain, &sflags, aflags);
1182 	if (mem == NULL) {
1183 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1184 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1185 		slab = NULL;
1186 		goto out;
1187 	}
1188 	uma_total_inc(size);
1189 
1190 	/* Point the slab into the allocated memory */
1191 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1192 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1193 
1194 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1195 		for (i = 0; i < keg->uk_ppera; i++)
1196 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1197 			    zone, slab);
1198 
1199 	slab->us_data = mem;
1200 	slab->us_freecount = keg->uk_ipers;
1201 	slab->us_flags = sflags;
1202 	slab->us_domain = domain;
1203 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1204 #ifdef INVARIANTS
1205 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1206 #endif
1207 
1208 	if (keg->uk_init != NULL) {
1209 		for (i = 0; i < keg->uk_ipers; i++)
1210 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1211 			    keg->uk_size, flags) != 0)
1212 				break;
1213 		if (i != keg->uk_ipers) {
1214 			keg_free_slab(keg, slab, i);
1215 			slab = NULL;
1216 			goto out;
1217 		}
1218 	}
1219 	KEG_LOCK(keg);
1220 
1221 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1222 	    slab, keg->uk_name, keg);
1223 
1224 	if (keg->uk_flags & UMA_ZONE_HASH)
1225 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1226 
1227 	keg->uk_pages += keg->uk_ppera;
1228 	keg->uk_free += keg->uk_ipers;
1229 
1230 out:
1231 	return (slab);
1232 }
1233 
1234 /*
1235  * This function is intended to be used early on in place of page_alloc() so
1236  * that we may use the boot time page cache to satisfy allocations before
1237  * the VM is ready.
1238  */
1239 static void *
1240 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1241     int wait)
1242 {
1243 	uma_keg_t keg;
1244 	void *mem;
1245 	int pages;
1246 
1247 	keg = zone->uz_keg;
1248 	/*
1249 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1250 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1251 	 */
1252 	switch (booted) {
1253 		case BOOT_COLD:
1254 		case BOOT_STRAPPED:
1255 			break;
1256 		case BOOT_PAGEALLOC:
1257 			if (keg->uk_ppera > 1)
1258 				break;
1259 		case BOOT_BUCKETS:
1260 		case BOOT_RUNNING:
1261 #ifdef UMA_MD_SMALL_ALLOC
1262 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1263 			    page_alloc : uma_small_alloc;
1264 #else
1265 			keg->uk_allocf = page_alloc;
1266 #endif
1267 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1268 	}
1269 
1270 	/*
1271 	 * Check our small startup cache to see if it has pages remaining.
1272 	 */
1273 	pages = howmany(bytes, PAGE_SIZE);
1274 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1275 	if (pages > boot_pages)
1276 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1277 #ifdef DIAGNOSTIC
1278 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1279 	    boot_pages);
1280 #endif
1281 	mem = bootmem;
1282 	boot_pages -= pages;
1283 	bootmem += pages * PAGE_SIZE;
1284 	*pflag = UMA_SLAB_BOOT;
1285 
1286 	return (mem);
1287 }
1288 
1289 /*
1290  * Allocates a number of pages from the system
1291  *
1292  * Arguments:
1293  *	bytes  The number of bytes requested
1294  *	wait  Shall we wait?
1295  *
1296  * Returns:
1297  *	A pointer to the alloced memory or possibly
1298  *	NULL if M_NOWAIT is set.
1299  */
1300 static void *
1301 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1302     int wait)
1303 {
1304 	void *p;	/* Returned page */
1305 
1306 	*pflag = UMA_SLAB_KERNEL;
1307 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1308 
1309 	return (p);
1310 }
1311 
1312 static void *
1313 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1314     int wait)
1315 {
1316 	struct pglist alloctail;
1317 	vm_offset_t addr, zkva;
1318 	int cpu, flags;
1319 	vm_page_t p, p_next;
1320 #ifdef NUMA
1321 	struct pcpu *pc;
1322 #endif
1323 
1324 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1325 
1326 	TAILQ_INIT(&alloctail);
1327 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1328 	    malloc2vm_flags(wait);
1329 	*pflag = UMA_SLAB_KERNEL;
1330 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1331 		if (CPU_ABSENT(cpu)) {
1332 			p = vm_page_alloc(NULL, 0, flags);
1333 		} else {
1334 #ifndef NUMA
1335 			p = vm_page_alloc(NULL, 0, flags);
1336 #else
1337 			pc = pcpu_find(cpu);
1338 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1339 			if (__predict_false(p == NULL))
1340 				p = vm_page_alloc(NULL, 0, flags);
1341 #endif
1342 		}
1343 		if (__predict_false(p == NULL))
1344 			goto fail;
1345 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1346 	}
1347 	if ((addr = kva_alloc(bytes)) == 0)
1348 		goto fail;
1349 	zkva = addr;
1350 	TAILQ_FOREACH(p, &alloctail, listq) {
1351 		pmap_qenter(zkva, &p, 1);
1352 		zkva += PAGE_SIZE;
1353 	}
1354 	return ((void*)addr);
1355 fail:
1356 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1357 		vm_page_unwire_noq(p);
1358 		vm_page_free(p);
1359 	}
1360 	return (NULL);
1361 }
1362 
1363 /*
1364  * Allocates a number of pages from within an object
1365  *
1366  * Arguments:
1367  *	bytes  The number of bytes requested
1368  *	wait   Shall we wait?
1369  *
1370  * Returns:
1371  *	A pointer to the alloced memory or possibly
1372  *	NULL if M_NOWAIT is set.
1373  */
1374 static void *
1375 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1376     int wait)
1377 {
1378 	TAILQ_HEAD(, vm_page) alloctail;
1379 	u_long npages;
1380 	vm_offset_t retkva, zkva;
1381 	vm_page_t p, p_next;
1382 	uma_keg_t keg;
1383 
1384 	TAILQ_INIT(&alloctail);
1385 	keg = zone->uz_keg;
1386 
1387 	npages = howmany(bytes, PAGE_SIZE);
1388 	while (npages > 0) {
1389 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1390 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1391 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1392 		    VM_ALLOC_NOWAIT));
1393 		if (p != NULL) {
1394 			/*
1395 			 * Since the page does not belong to an object, its
1396 			 * listq is unused.
1397 			 */
1398 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1399 			npages--;
1400 			continue;
1401 		}
1402 		/*
1403 		 * Page allocation failed, free intermediate pages and
1404 		 * exit.
1405 		 */
1406 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1407 			vm_page_unwire_noq(p);
1408 			vm_page_free(p);
1409 		}
1410 		return (NULL);
1411 	}
1412 	*flags = UMA_SLAB_PRIV;
1413 	zkva = keg->uk_kva +
1414 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1415 	retkva = zkva;
1416 	TAILQ_FOREACH(p, &alloctail, listq) {
1417 		pmap_qenter(zkva, &p, 1);
1418 		zkva += PAGE_SIZE;
1419 	}
1420 
1421 	return ((void *)retkva);
1422 }
1423 
1424 /*
1425  * Frees a number of pages to the system
1426  *
1427  * Arguments:
1428  *	mem   A pointer to the memory to be freed
1429  *	size  The size of the memory being freed
1430  *	flags The original p->us_flags field
1431  *
1432  * Returns:
1433  *	Nothing
1434  */
1435 static void
1436 page_free(void *mem, vm_size_t size, uint8_t flags)
1437 {
1438 
1439 	if ((flags & UMA_SLAB_KERNEL) == 0)
1440 		panic("UMA: page_free used with invalid flags %x", flags);
1441 
1442 	kmem_free((vm_offset_t)mem, size);
1443 }
1444 
1445 /*
1446  * Frees pcpu zone allocations
1447  *
1448  * Arguments:
1449  *	mem   A pointer to the memory to be freed
1450  *	size  The size of the memory being freed
1451  *	flags The original p->us_flags field
1452  *
1453  * Returns:
1454  *	Nothing
1455  */
1456 static void
1457 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1458 {
1459 	vm_offset_t sva, curva;
1460 	vm_paddr_t paddr;
1461 	vm_page_t m;
1462 
1463 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1464 	sva = (vm_offset_t)mem;
1465 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1466 		paddr = pmap_kextract(curva);
1467 		m = PHYS_TO_VM_PAGE(paddr);
1468 		vm_page_unwire_noq(m);
1469 		vm_page_free(m);
1470 	}
1471 	pmap_qremove(sva, size >> PAGE_SHIFT);
1472 	kva_free(sva, size);
1473 }
1474 
1475 
1476 /*
1477  * Zero fill initializer
1478  *
1479  * Arguments/Returns follow uma_init specifications
1480  */
1481 static int
1482 zero_init(void *mem, int size, int flags)
1483 {
1484 	bzero(mem, size);
1485 	return (0);
1486 }
1487 
1488 /*
1489  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1490  *
1491  * Arguments
1492  *	keg  The zone we should initialize
1493  *
1494  * Returns
1495  *	Nothing
1496  */
1497 static void
1498 keg_small_init(uma_keg_t keg)
1499 {
1500 	u_int rsize;
1501 	u_int memused;
1502 	u_int wastedspace;
1503 	u_int shsize;
1504 	u_int slabsize;
1505 
1506 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1507 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1508 
1509 		slabsize = UMA_PCPU_ALLOC_SIZE;
1510 		keg->uk_ppera = ncpus;
1511 	} else {
1512 		slabsize = UMA_SLAB_SIZE;
1513 		keg->uk_ppera = 1;
1514 	}
1515 
1516 	/*
1517 	 * Calculate the size of each allocation (rsize) according to
1518 	 * alignment.  If the requested size is smaller than we have
1519 	 * allocation bits for we round it up.
1520 	 */
1521 	rsize = keg->uk_size;
1522 	if (rsize < slabsize / SLAB_SETSIZE)
1523 		rsize = slabsize / SLAB_SETSIZE;
1524 	if (rsize & keg->uk_align)
1525 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1526 	keg->uk_rsize = rsize;
1527 
1528 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1529 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1530 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1531 
1532 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1533 		shsize = 0;
1534 	else
1535 		shsize = SIZEOF_UMA_SLAB;
1536 
1537 	if (rsize <= slabsize - shsize)
1538 		keg->uk_ipers = (slabsize - shsize) / rsize;
1539 	else {
1540 		/* Handle special case when we have 1 item per slab, so
1541 		 * alignment requirement can be relaxed. */
1542 		KASSERT(keg->uk_size <= slabsize - shsize,
1543 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1544 		keg->uk_ipers = 1;
1545 	}
1546 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1547 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1548 
1549 	memused = keg->uk_ipers * rsize + shsize;
1550 	wastedspace = slabsize - memused;
1551 
1552 	/*
1553 	 * We can't do OFFPAGE if we're internal or if we've been
1554 	 * asked to not go to the VM for buckets.  If we do this we
1555 	 * may end up going to the VM  for slabs which we do not
1556 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1557 	 * of UMA_ZONE_VM, which clearly forbids it.
1558 	 */
1559 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1560 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1561 		return;
1562 
1563 	/*
1564 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1565 	 * this if it permits more items per-slab.
1566 	 *
1567 	 * XXX We could try growing slabsize to limit max waste as well.
1568 	 * Historically this was not done because the VM could not
1569 	 * efficiently handle contiguous allocations.
1570 	 */
1571 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1572 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1573 		keg->uk_ipers = slabsize / keg->uk_rsize;
1574 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1575 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1576 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1577 		    "keg: %s(%p), calculated wastedspace = %d, "
1578 		    "maximum wasted space allowed = %d, "
1579 		    "calculated ipers = %d, "
1580 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1581 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1582 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1583 		/*
1584 		 * If we had access to memory to embed a slab header we
1585 		 * also have a page structure to use vtoslab() instead of
1586 		 * hash to find slabs.  If the zone was explicitly created
1587 		 * OFFPAGE we can't necessarily touch the memory.
1588 		 */
1589 		if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
1590 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1591 	}
1592 
1593 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1594 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1595 		keg->uk_flags |= UMA_ZONE_HASH;
1596 }
1597 
1598 /*
1599  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1600  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1601  * more complicated.
1602  *
1603  * Arguments
1604  *	keg  The keg we should initialize
1605  *
1606  * Returns
1607  *	Nothing
1608  */
1609 static void
1610 keg_large_init(uma_keg_t keg)
1611 {
1612 
1613 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1614 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1615 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1616 
1617 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1618 	keg->uk_ipers = 1;
1619 	keg->uk_rsize = keg->uk_size;
1620 
1621 	/* Check whether we have enough space to not do OFFPAGE. */
1622 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1623 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1624 		/*
1625 		 * We can't do OFFPAGE if we're internal, in which case
1626 		 * we need an extra page per allocation to contain the
1627 		 * slab header.
1628 		 */
1629 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1630 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1631 		else
1632 			keg->uk_ppera++;
1633 	}
1634 
1635 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1636 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1637 		keg->uk_flags |= UMA_ZONE_HASH;
1638 }
1639 
1640 static void
1641 keg_cachespread_init(uma_keg_t keg)
1642 {
1643 	int alignsize;
1644 	int trailer;
1645 	int pages;
1646 	int rsize;
1647 
1648 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1649 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1650 
1651 	alignsize = keg->uk_align + 1;
1652 	rsize = keg->uk_size;
1653 	/*
1654 	 * We want one item to start on every align boundary in a page.  To
1655 	 * do this we will span pages.  We will also extend the item by the
1656 	 * size of align if it is an even multiple of align.  Otherwise, it
1657 	 * would fall on the same boundary every time.
1658 	 */
1659 	if (rsize & keg->uk_align)
1660 		rsize = (rsize & ~keg->uk_align) + alignsize;
1661 	if ((rsize & alignsize) == 0)
1662 		rsize += alignsize;
1663 	trailer = rsize - keg->uk_size;
1664 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1665 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1666 	keg->uk_rsize = rsize;
1667 	keg->uk_ppera = pages;
1668 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1669 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1670 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1671 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1672 	    keg->uk_ipers));
1673 }
1674 
1675 /*
1676  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1677  * the keg onto the global keg list.
1678  *
1679  * Arguments/Returns follow uma_ctor specifications
1680  *	udata  Actually uma_kctor_args
1681  */
1682 static int
1683 keg_ctor(void *mem, int size, void *udata, int flags)
1684 {
1685 	struct uma_kctor_args *arg = udata;
1686 	uma_keg_t keg = mem;
1687 	uma_zone_t zone;
1688 
1689 	bzero(keg, size);
1690 	keg->uk_size = arg->size;
1691 	keg->uk_init = arg->uminit;
1692 	keg->uk_fini = arg->fini;
1693 	keg->uk_align = arg->align;
1694 	keg->uk_free = 0;
1695 	keg->uk_reserve = 0;
1696 	keg->uk_pages = 0;
1697 	keg->uk_flags = arg->flags;
1698 	keg->uk_slabzone = NULL;
1699 
1700 	/*
1701 	 * We use a global round-robin policy by default.  Zones with
1702 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1703 	 * iterator is never run.
1704 	 */
1705 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1706 	keg->uk_dr.dr_iter = 0;
1707 
1708 	/*
1709 	 * The master zone is passed to us at keg-creation time.
1710 	 */
1711 	zone = arg->zone;
1712 	keg->uk_name = zone->uz_name;
1713 
1714 	if (arg->flags & UMA_ZONE_VM)
1715 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1716 
1717 	if (arg->flags & UMA_ZONE_ZINIT)
1718 		keg->uk_init = zero_init;
1719 
1720 	if (arg->flags & UMA_ZONE_MALLOC)
1721 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1722 
1723 	if (arg->flags & UMA_ZONE_PCPU)
1724 #ifdef SMP
1725 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1726 #else
1727 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1728 #endif
1729 
1730 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1731 		keg_cachespread_init(keg);
1732 	} else {
1733 		if (keg->uk_size > UMA_SLAB_SPACE)
1734 			keg_large_init(keg);
1735 		else
1736 			keg_small_init(keg);
1737 	}
1738 
1739 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1740 		keg->uk_slabzone = slabzone;
1741 
1742 	/*
1743 	 * If we haven't booted yet we need allocations to go through the
1744 	 * startup cache until the vm is ready.
1745 	 */
1746 	if (booted < BOOT_PAGEALLOC)
1747 		keg->uk_allocf = startup_alloc;
1748 #ifdef UMA_MD_SMALL_ALLOC
1749 	else if (keg->uk_ppera == 1)
1750 		keg->uk_allocf = uma_small_alloc;
1751 #endif
1752 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1753 		keg->uk_allocf = pcpu_page_alloc;
1754 	else
1755 		keg->uk_allocf = page_alloc;
1756 #ifdef UMA_MD_SMALL_ALLOC
1757 	if (keg->uk_ppera == 1)
1758 		keg->uk_freef = uma_small_free;
1759 	else
1760 #endif
1761 	if (keg->uk_flags & UMA_ZONE_PCPU)
1762 		keg->uk_freef = pcpu_page_free;
1763 	else
1764 		keg->uk_freef = page_free;
1765 
1766 	/*
1767 	 * Initialize keg's lock
1768 	 */
1769 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1770 
1771 	/*
1772 	 * If we're putting the slab header in the actual page we need to
1773 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1774 	 * macro definition.
1775 	 */
1776 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1777 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1778 		/*
1779 		 * The only way the following is possible is if with our
1780 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1781 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1782 		 * mathematically possible for all cases, so we make
1783 		 * sure here anyway.
1784 		 */
1785 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1786 		    PAGE_SIZE * keg->uk_ppera,
1787 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1788 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1789 	}
1790 
1791 	if (keg->uk_flags & UMA_ZONE_HASH)
1792 		hash_alloc(&keg->uk_hash, 0);
1793 
1794 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1795 	    keg, zone->uz_name, zone,
1796 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1797 	    keg->uk_free);
1798 
1799 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1800 
1801 	rw_wlock(&uma_rwlock);
1802 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1803 	rw_wunlock(&uma_rwlock);
1804 	return (0);
1805 }
1806 
1807 static void
1808 zone_alloc_counters(uma_zone_t zone, void *unused)
1809 {
1810 
1811 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1812 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1813 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1814 }
1815 
1816 #define	UMA_MAX_DUP	999
1817 static void
1818 zone_alloc_sysctl(uma_zone_t zone, void *unused)
1819 {
1820 	uma_zone_domain_t zdom;
1821 	uma_keg_t keg;
1822 	struct sysctl_oid *oid, *domainoid;
1823 	int domains, i;
1824 	static const char *nokeg = "cache zone";
1825 	char *c;
1826 
1827 	/*
1828 	 * Make a sysctl safe copy of the zone name by removing
1829 	 * any special characters and handling dups by appending
1830 	 * an index.
1831 	 */
1832 	if (zone->uz_namecnt != 0) {
1833 		if (zone->uz_namecnt > UMA_MAX_DUP)
1834 			zone->uz_namecnt = UMA_MAX_DUP;
1835 		zone->uz_ctlname = malloc(strlen(zone->uz_name) +
1836 		    sizeof(__XSTRING(UMA_MAX_DUP)) + 1 , M_UMA, M_WAITOK);
1837 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
1838 		    zone->uz_namecnt);
1839 	} else
1840 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
1841 	for (c = zone->uz_ctlname; *c != '\0'; c++)
1842 		if (strchr("./\\ -", *c) != NULL)
1843 			*c = '_';
1844 
1845 	/*
1846 	 * Basic parameters at the root.
1847 	 */
1848 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
1849 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
1850 	oid = zone->uz_oid;
1851 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1852 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
1853 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1854 	    "flags", CTLFLAG_RD, &zone->uz_flags, 0,
1855 	    "Allocator configuration flags");
1856 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1857 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
1858 	    "Desired per-cpu cache size");
1859 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1860 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
1861 	    "Maximum allowed per-cpu cache size");
1862 
1863 	/*
1864 	 * keg if present.
1865 	 */
1866 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1867 	    "keg", CTLFLAG_RD, NULL, "");
1868 	keg = zone->uz_keg;
1869 	if ((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0) {
1870 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1871 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
1872 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1873 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
1874 		    "Real object size with alignment");
1875 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1876 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
1877 		    "pages per-slab allocation");
1878 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1879 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
1880 		    "items available per-slab");
1881 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1882 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
1883 		    "item alignment mask");
1884 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1885 		    "pages", CTLFLAG_RD, &keg->uk_pages, 0,
1886 		    "Total pages currently allocated from VM");
1887 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1888 		    "free", CTLFLAG_RD, &keg->uk_free, 0,
1889 		    "items free in the slab layer");
1890 	} else
1891 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1892 		    "name", CTLFLAG_RD, nokeg, "Keg name");
1893 
1894 	/*
1895 	 * Information about zone limits.
1896 	 */
1897 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1898 	    "limit", CTLFLAG_RD, NULL, "");
1899 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1900 	    "items", CTLFLAG_RD, &zone->uz_items, 0,
1901 	    "current number of cached items");
1902 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1903 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
1904 	    "Maximum number of cached items");
1905 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1906 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
1907 	    "Number of threads sleeping at limit");
1908 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1909 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
1910 	    "Total zone limit sleeps");
1911 
1912 	/*
1913 	 * Per-domain information.
1914 	 */
1915 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
1916 		domains = vm_ndomains;
1917 	else
1918 		domains = 1;
1919 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
1920 	    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
1921 	for (i = 0; i < domains; i++) {
1922 		zdom = &zone->uz_domain[i];
1923 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
1924 		    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
1925 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1926 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
1927 		    "number of items in this domain");
1928 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1929 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
1930 		    "maximum item count in this period");
1931 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1932 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
1933 		    "minimum item count in this period");
1934 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1935 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
1936 		    "Working set size");
1937 	}
1938 
1939 	/*
1940 	 * General statistics.
1941 	 */
1942 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1943 	    "stats", CTLFLAG_RD, NULL, "");
1944 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1945 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
1946 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
1947 	    "Current number of allocated items");
1948 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1949 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
1950 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
1951 	    "Total allocation calls");
1952 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1953 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
1954 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
1955 	    "Total free calls");
1956 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1957 	    "fails", CTLFLAG_RD, &zone->uz_fails,
1958 	    "Number of allocation failures");
1959 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1960 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
1961 	    "Free calls from the wrong domain");
1962 }
1963 
1964 struct uma_zone_count {
1965 	const char	*name;
1966 	int		count;
1967 };
1968 
1969 static void
1970 zone_count(uma_zone_t zone, void *arg)
1971 {
1972 	struct uma_zone_count *cnt;
1973 
1974 	cnt = arg;
1975 	if (strcmp(zone->uz_name, cnt->name) == 0)
1976 		cnt->count++;
1977 }
1978 
1979 /*
1980  * Zone header ctor.  This initializes all fields, locks, etc.
1981  *
1982  * Arguments/Returns follow uma_ctor specifications
1983  *	udata  Actually uma_zctor_args
1984  */
1985 static int
1986 zone_ctor(void *mem, int size, void *udata, int flags)
1987 {
1988 	struct uma_zone_count cnt;
1989 	struct uma_zctor_args *arg = udata;
1990 	uma_zone_t zone = mem;
1991 	uma_zone_t z;
1992 	uma_keg_t keg;
1993 	int i;
1994 
1995 	bzero(zone, size);
1996 	zone->uz_name = arg->name;
1997 	zone->uz_ctor = arg->ctor;
1998 	zone->uz_dtor = arg->dtor;
1999 	zone->uz_init = NULL;
2000 	zone->uz_fini = NULL;
2001 	zone->uz_sleeps = 0;
2002 	zone->uz_xdomain = 0;
2003 	zone->uz_bucket_size = 0;
2004 	zone->uz_bucket_size_min = 0;
2005 	zone->uz_bucket_size_max = BUCKET_MAX;
2006 	zone->uz_flags = 0;
2007 	zone->uz_warning = NULL;
2008 	/* The domain structures follow the cpu structures. */
2009 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
2010 	zone->uz_bkt_max = ULONG_MAX;
2011 	timevalclear(&zone->uz_ratecheck);
2012 
2013 	/* Count the number of duplicate names. */
2014 	cnt.name = arg->name;
2015 	cnt.count = 0;
2016 	zone_foreach(zone_count, &cnt);
2017 	zone->uz_namecnt = cnt.count;
2018 
2019 	for (i = 0; i < vm_ndomains; i++)
2020 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
2021 
2022 #ifdef INVARIANTS
2023 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2024 		zone->uz_flags |= UMA_ZFLAG_TRASH;
2025 #endif
2026 
2027 	/*
2028 	 * This is a pure cache zone, no kegs.
2029 	 */
2030 	if (arg->import) {
2031 		if (arg->flags & UMA_ZONE_VM)
2032 			arg->flags |= UMA_ZFLAG_CACHEONLY;
2033 		zone->uz_flags = arg->flags;
2034 		zone->uz_size = arg->size;
2035 		zone->uz_import = arg->import;
2036 		zone->uz_release = arg->release;
2037 		zone->uz_arg = arg->arg;
2038 		zone->uz_lockptr = &zone->uz_lock;
2039 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
2040 		rw_wlock(&uma_rwlock);
2041 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2042 		rw_wunlock(&uma_rwlock);
2043 		goto out;
2044 	}
2045 
2046 	/*
2047 	 * Use the regular zone/keg/slab allocator.
2048 	 */
2049 	zone->uz_import = (uma_import)zone_import;
2050 	zone->uz_release = (uma_release)zone_release;
2051 	zone->uz_arg = zone;
2052 	keg = arg->keg;
2053 
2054 	if (arg->flags & UMA_ZONE_SECONDARY) {
2055 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2056 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2057 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2058 		zone->uz_init = arg->uminit;
2059 		zone->uz_fini = arg->fini;
2060 		zone->uz_lockptr = &keg->uk_lock;
2061 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2062 		rw_wlock(&uma_rwlock);
2063 		ZONE_LOCK(zone);
2064 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2065 			if (LIST_NEXT(z, uz_link) == NULL) {
2066 				LIST_INSERT_AFTER(z, zone, uz_link);
2067 				break;
2068 			}
2069 		}
2070 		ZONE_UNLOCK(zone);
2071 		rw_wunlock(&uma_rwlock);
2072 	} else if (keg == NULL) {
2073 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2074 		    arg->align, arg->flags)) == NULL)
2075 			return (ENOMEM);
2076 	} else {
2077 		struct uma_kctor_args karg;
2078 		int error;
2079 
2080 		/* We should only be here from uma_startup() */
2081 		karg.size = arg->size;
2082 		karg.uminit = arg->uminit;
2083 		karg.fini = arg->fini;
2084 		karg.align = arg->align;
2085 		karg.flags = arg->flags;
2086 		karg.zone = zone;
2087 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2088 		    flags);
2089 		if (error)
2090 			return (error);
2091 	}
2092 
2093 	/* Inherit properties from the keg. */
2094 	zone->uz_keg = keg;
2095 	zone->uz_size = keg->uk_size;
2096 	zone->uz_flags |= (keg->uk_flags &
2097 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2098 
2099 out:
2100 	if (__predict_true(booted == BOOT_RUNNING)) {
2101 		zone_alloc_counters(zone, NULL);
2102 		zone_alloc_sysctl(zone, NULL);
2103 	} else {
2104 		zone->uz_allocs = EARLY_COUNTER;
2105 		zone->uz_frees = EARLY_COUNTER;
2106 		zone->uz_fails = EARLY_COUNTER;
2107 	}
2108 
2109 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2110 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2111 	    ("Invalid zone flag combination"));
2112 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2113 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2114 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2115 		zone->uz_bucket_size = BUCKET_MAX;
2116 	else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
2117 		zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
2118 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2119 		zone->uz_bucket_size = 0;
2120 	else
2121 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2122 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2123 
2124 	return (0);
2125 }
2126 
2127 /*
2128  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2129  * table and removes the keg from the global list.
2130  *
2131  * Arguments/Returns follow uma_dtor specifications
2132  *	udata  unused
2133  */
2134 static void
2135 keg_dtor(void *arg, int size, void *udata)
2136 {
2137 	uma_keg_t keg;
2138 
2139 	keg = (uma_keg_t)arg;
2140 	KEG_LOCK(keg);
2141 	if (keg->uk_free != 0) {
2142 		printf("Freed UMA keg (%s) was not empty (%d items). "
2143 		    " Lost %d pages of memory.\n",
2144 		    keg->uk_name ? keg->uk_name : "",
2145 		    keg->uk_free, keg->uk_pages);
2146 	}
2147 	KEG_UNLOCK(keg);
2148 
2149 	hash_free(&keg->uk_hash);
2150 
2151 	KEG_LOCK_FINI(keg);
2152 }
2153 
2154 /*
2155  * Zone header dtor.
2156  *
2157  * Arguments/Returns follow uma_dtor specifications
2158  *	udata  unused
2159  */
2160 static void
2161 zone_dtor(void *arg, int size, void *udata)
2162 {
2163 	uma_zone_t zone;
2164 	uma_keg_t keg;
2165 
2166 	zone = (uma_zone_t)arg;
2167 
2168 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2169 
2170 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2171 		cache_drain(zone);
2172 
2173 	rw_wlock(&uma_rwlock);
2174 	LIST_REMOVE(zone, uz_link);
2175 	rw_wunlock(&uma_rwlock);
2176 	/*
2177 	 * XXX there are some races here where
2178 	 * the zone can be drained but zone lock
2179 	 * released and then refilled before we
2180 	 * remove it... we dont care for now
2181 	 */
2182 	zone_reclaim(zone, M_WAITOK, true);
2183 	/*
2184 	 * We only destroy kegs from non secondary/non cache zones.
2185 	 */
2186 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2187 		keg = zone->uz_keg;
2188 		rw_wlock(&uma_rwlock);
2189 		LIST_REMOVE(keg, uk_link);
2190 		rw_wunlock(&uma_rwlock);
2191 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2192 	}
2193 	counter_u64_free(zone->uz_allocs);
2194 	counter_u64_free(zone->uz_frees);
2195 	counter_u64_free(zone->uz_fails);
2196 	free(zone->uz_ctlname, M_UMA);
2197 	if (zone->uz_lockptr == &zone->uz_lock)
2198 		ZONE_LOCK_FINI(zone);
2199 }
2200 
2201 /*
2202  * Traverses every zone in the system and calls a callback
2203  *
2204  * Arguments:
2205  *	zfunc  A pointer to a function which accepts a zone
2206  *		as an argument.
2207  *
2208  * Returns:
2209  *	Nothing
2210  */
2211 static void
2212 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2213 {
2214 	uma_keg_t keg;
2215 	uma_zone_t zone;
2216 
2217 	/*
2218 	 * Before BOOT_RUNNING we are guaranteed to be single
2219 	 * threaded, so locking isn't needed. Startup functions
2220 	 * are allowed to use M_WAITOK.
2221 	 */
2222 	if (__predict_true(booted == BOOT_RUNNING))
2223 		rw_rlock(&uma_rwlock);
2224 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2225 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2226 			zfunc(zone, arg);
2227 	}
2228 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2229 		zfunc(zone, arg);
2230 	if (__predict_true(booted == BOOT_RUNNING))
2231 		rw_runlock(&uma_rwlock);
2232 }
2233 
2234 /*
2235  * Count how many pages do we need to bootstrap.  VM supplies
2236  * its need in early zones in the argument, we add up our zones,
2237  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
2238  * zone of zones and zone of kegs are accounted separately.
2239  */
2240 #define	UMA_BOOT_ZONES	11
2241 /* Zone of zones and zone of kegs have arbitrary alignment. */
2242 #define	UMA_BOOT_ALIGN	32
2243 static int zsize, ksize;
2244 int
2245 uma_startup_count(int vm_zones)
2246 {
2247 	int zones, pages;
2248 
2249 	ksize = sizeof(struct uma_keg) +
2250 	    (sizeof(struct uma_domain) * vm_ndomains);
2251 	zsize = sizeof(struct uma_zone) +
2252 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2253 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2254 
2255 	/*
2256 	 * Memory for the zone of kegs and its keg,
2257 	 * and for zone of zones.
2258 	 */
2259 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2260 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2261 
2262 #ifdef	UMA_MD_SMALL_ALLOC
2263 	zones = UMA_BOOT_ZONES;
2264 #else
2265 	zones = UMA_BOOT_ZONES + vm_zones;
2266 	vm_zones = 0;
2267 #endif
2268 
2269 	/* Memory for the rest of startup zones, UMA and VM, ... */
2270 	if (zsize > UMA_SLAB_SPACE) {
2271 		/* See keg_large_init(). */
2272 		u_int ppera;
2273 
2274 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2275 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2276 		    SIZEOF_UMA_SLAB)
2277 			ppera++;
2278 		pages += (zones + vm_zones) * ppera;
2279 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2280 		/* See keg_small_init() special case for uk_ppera = 1. */
2281 		pages += zones;
2282 	else
2283 		pages += howmany(zones,
2284 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2285 
2286 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2287 	pages += howmany(zones + 1,
2288 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2289 
2290 	/*
2291 	 * Most of startup zones are not going to be offpages, that's
2292 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2293 	 * calculations.  Some large bucket zones will be offpage, and
2294 	 * thus will allocate hashes.  We take conservative approach
2295 	 * and assume that all zones may allocate hash.  This may give
2296 	 * us some positive inaccuracy, usually an extra single page.
2297 	 */
2298 	pages += howmany(zones, UMA_SLAB_SPACE /
2299 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2300 
2301 	return (pages);
2302 }
2303 
2304 void
2305 uma_startup(void *mem, int npages)
2306 {
2307 	struct uma_zctor_args args;
2308 	uma_keg_t masterkeg;
2309 	uintptr_t m;
2310 
2311 #ifdef DIAGNOSTIC
2312 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2313 #endif
2314 
2315 	rw_init(&uma_rwlock, "UMA lock");
2316 
2317 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2318 	m = (uintptr_t)mem;
2319 	zones = (uma_zone_t)m;
2320 	m += roundup(zsize, CACHE_LINE_SIZE);
2321 	kegs = (uma_zone_t)m;
2322 	m += roundup(zsize, CACHE_LINE_SIZE);
2323 	masterkeg = (uma_keg_t)m;
2324 	m += roundup(ksize, CACHE_LINE_SIZE);
2325 	m = roundup(m, PAGE_SIZE);
2326 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2327 	mem = (void *)m;
2328 
2329 	/* "manually" create the initial zone */
2330 	memset(&args, 0, sizeof(args));
2331 	args.name = "UMA Kegs";
2332 	args.size = ksize;
2333 	args.ctor = keg_ctor;
2334 	args.dtor = keg_dtor;
2335 	args.uminit = zero_init;
2336 	args.fini = NULL;
2337 	args.keg = masterkeg;
2338 	args.align = UMA_BOOT_ALIGN - 1;
2339 	args.flags = UMA_ZFLAG_INTERNAL;
2340 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2341 
2342 	bootmem = mem;
2343 	boot_pages = npages;
2344 
2345 	args.name = "UMA Zones";
2346 	args.size = zsize;
2347 	args.ctor = zone_ctor;
2348 	args.dtor = zone_dtor;
2349 	args.uminit = zero_init;
2350 	args.fini = NULL;
2351 	args.keg = NULL;
2352 	args.align = UMA_BOOT_ALIGN - 1;
2353 	args.flags = UMA_ZFLAG_INTERNAL;
2354 	zone_ctor(zones, zsize, &args, M_WAITOK);
2355 
2356 	/* Now make a zone for slab headers */
2357 	slabzone = uma_zcreate("UMA Slabs",
2358 				sizeof(struct uma_slab),
2359 				NULL, NULL, NULL, NULL,
2360 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2361 
2362 	hashzone = uma_zcreate("UMA Hash",
2363 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2364 	    NULL, NULL, NULL, NULL,
2365 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2366 
2367 	bucket_init();
2368 
2369 	booted = BOOT_STRAPPED;
2370 }
2371 
2372 void
2373 uma_startup1(void)
2374 {
2375 
2376 #ifdef DIAGNOSTIC
2377 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2378 #endif
2379 	booted = BOOT_PAGEALLOC;
2380 }
2381 
2382 void
2383 uma_startup2(void)
2384 {
2385 
2386 #ifdef DIAGNOSTIC
2387 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2388 #endif
2389 	booted = BOOT_BUCKETS;
2390 	sx_init(&uma_reclaim_lock, "umareclaim");
2391 	bucket_enable();
2392 }
2393 
2394 /*
2395  * Initialize our callout handle
2396  *
2397  */
2398 static void
2399 uma_startup3(void)
2400 {
2401 
2402 #ifdef INVARIANTS
2403 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2404 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2405 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2406 #endif
2407 	zone_foreach(zone_alloc_counters, NULL);
2408 	zone_foreach(zone_alloc_sysctl, NULL);
2409 	callout_init(&uma_callout, 1);
2410 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2411 	booted = BOOT_RUNNING;
2412 }
2413 
2414 static uma_keg_t
2415 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2416 		int align, uint32_t flags)
2417 {
2418 	struct uma_kctor_args args;
2419 
2420 	args.size = size;
2421 	args.uminit = uminit;
2422 	args.fini = fini;
2423 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2424 	args.flags = flags;
2425 	args.zone = zone;
2426 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2427 }
2428 
2429 /* Public functions */
2430 /* See uma.h */
2431 void
2432 uma_set_align(int align)
2433 {
2434 
2435 	if (align != UMA_ALIGN_CACHE)
2436 		uma_align_cache = align;
2437 }
2438 
2439 /* See uma.h */
2440 uma_zone_t
2441 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2442 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2443 
2444 {
2445 	struct uma_zctor_args args;
2446 	uma_zone_t res;
2447 	bool locked;
2448 
2449 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2450 	    align, name));
2451 
2452 	/* Sets all zones to a first-touch domain policy. */
2453 #ifdef UMA_FIRSTTOUCH
2454 	flags |= UMA_ZONE_NUMA;
2455 #endif
2456 
2457 	/* This stuff is essential for the zone ctor */
2458 	memset(&args, 0, sizeof(args));
2459 	args.name = name;
2460 	args.size = size;
2461 	args.ctor = ctor;
2462 	args.dtor = dtor;
2463 	args.uminit = uminit;
2464 	args.fini = fini;
2465 #ifdef  INVARIANTS
2466 	/*
2467 	 * Inject procedures which check for memory use after free if we are
2468 	 * allowed to scramble the memory while it is not allocated.  This
2469 	 * requires that: UMA is actually able to access the memory, no init
2470 	 * or fini procedures, no dependency on the initial value of the
2471 	 * memory, and no (legitimate) use of the memory after free.  Note,
2472 	 * the ctor and dtor do not need to be empty.
2473 	 *
2474 	 * XXX UMA_ZONE_OFFPAGE.
2475 	 */
2476 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2477 	    uminit == NULL && fini == NULL) {
2478 		args.uminit = trash_init;
2479 		args.fini = trash_fini;
2480 	}
2481 #endif
2482 	args.align = align;
2483 	args.flags = flags;
2484 	args.keg = NULL;
2485 
2486 	if (booted < BOOT_BUCKETS) {
2487 		locked = false;
2488 	} else {
2489 		sx_slock(&uma_reclaim_lock);
2490 		locked = true;
2491 	}
2492 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2493 	if (locked)
2494 		sx_sunlock(&uma_reclaim_lock);
2495 	return (res);
2496 }
2497 
2498 /* See uma.h */
2499 uma_zone_t
2500 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2501 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2502 {
2503 	struct uma_zctor_args args;
2504 	uma_keg_t keg;
2505 	uma_zone_t res;
2506 	bool locked;
2507 
2508 	keg = master->uz_keg;
2509 	memset(&args, 0, sizeof(args));
2510 	args.name = name;
2511 	args.size = keg->uk_size;
2512 	args.ctor = ctor;
2513 	args.dtor = dtor;
2514 	args.uminit = zinit;
2515 	args.fini = zfini;
2516 	args.align = keg->uk_align;
2517 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2518 	args.keg = keg;
2519 
2520 	if (booted < BOOT_BUCKETS) {
2521 		locked = false;
2522 	} else {
2523 		sx_slock(&uma_reclaim_lock);
2524 		locked = true;
2525 	}
2526 	/* XXX Attaches only one keg of potentially many. */
2527 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2528 	if (locked)
2529 		sx_sunlock(&uma_reclaim_lock);
2530 	return (res);
2531 }
2532 
2533 /* See uma.h */
2534 uma_zone_t
2535 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2536 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2537 		    uma_release zrelease, void *arg, int flags)
2538 {
2539 	struct uma_zctor_args args;
2540 
2541 	memset(&args, 0, sizeof(args));
2542 	args.name = name;
2543 	args.size = size;
2544 	args.ctor = ctor;
2545 	args.dtor = dtor;
2546 	args.uminit = zinit;
2547 	args.fini = zfini;
2548 	args.import = zimport;
2549 	args.release = zrelease;
2550 	args.arg = arg;
2551 	args.align = 0;
2552 	args.flags = flags | UMA_ZFLAG_CACHE;
2553 
2554 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2555 }
2556 
2557 /* See uma.h */
2558 void
2559 uma_zdestroy(uma_zone_t zone)
2560 {
2561 
2562 	sx_slock(&uma_reclaim_lock);
2563 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2564 	sx_sunlock(&uma_reclaim_lock);
2565 }
2566 
2567 void
2568 uma_zwait(uma_zone_t zone)
2569 {
2570 	void *item;
2571 
2572 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2573 	uma_zfree(zone, item);
2574 }
2575 
2576 void *
2577 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2578 {
2579 	void *item;
2580 #ifdef SMP
2581 	int i;
2582 
2583 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2584 #endif
2585 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2586 	if (item != NULL && (flags & M_ZERO)) {
2587 #ifdef SMP
2588 		for (i = 0; i <= mp_maxid; i++)
2589 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2590 #else
2591 		bzero(item, zone->uz_size);
2592 #endif
2593 	}
2594 	return (item);
2595 }
2596 
2597 /*
2598  * A stub while both regular and pcpu cases are identical.
2599  */
2600 void
2601 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2602 {
2603 
2604 #ifdef SMP
2605 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2606 #endif
2607 	uma_zfree_arg(zone, item, udata);
2608 }
2609 
2610 static inline void *
2611 bucket_pop(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket)
2612 {
2613 	void *item;
2614 
2615 	bucket->ub_cnt--;
2616 	item = bucket->ub_bucket[bucket->ub_cnt];
2617 #ifdef INVARIANTS
2618 	bucket->ub_bucket[bucket->ub_cnt] = NULL;
2619 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2620 #endif
2621 	cache->uc_allocs++;
2622 
2623 	return (item);
2624 }
2625 
2626 static inline void
2627 bucket_push(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket,
2628     void *item)
2629 {
2630 	KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2631 	    ("uma_zfree: Freeing to non free bucket index."));
2632 	bucket->ub_bucket[bucket->ub_cnt] = item;
2633 	bucket->ub_cnt++;
2634 	cache->uc_frees++;
2635 }
2636 
2637 static void *
2638 item_ctor(uma_zone_t zone, void *udata, int flags, void *item)
2639 {
2640 #ifdef INVARIANTS
2641 	bool skipdbg;
2642 
2643 	skipdbg = uma_dbg_zskip(zone, item);
2644 	if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2645 	    zone->uz_ctor != trash_ctor)
2646 		trash_ctor(item, zone->uz_size, udata, flags);
2647 #endif
2648 	if (__predict_false(zone->uz_ctor != NULL) &&
2649 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2650 		counter_u64_add(zone->uz_fails, 1);
2651 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2652 		return (NULL);
2653 	}
2654 #ifdef INVARIANTS
2655 	if (!skipdbg)
2656 		uma_dbg_alloc(zone, NULL, item);
2657 #endif
2658 	if (flags & M_ZERO)
2659 		uma_zero_item(item, zone);
2660 
2661 	return (item);
2662 }
2663 
2664 static inline void
2665 item_dtor(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2666 {
2667 #ifdef INVARIANTS
2668 	bool skipdbg;
2669 
2670 	skipdbg = uma_dbg_zskip(zone, item);
2671 	if (skip == SKIP_NONE && !skipdbg) {
2672 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
2673 			uma_dbg_free(zone, udata, item);
2674 		else
2675 			uma_dbg_free(zone, NULL, item);
2676 	}
2677 #endif
2678 	if (skip < SKIP_DTOR) {
2679 		if (zone->uz_dtor != NULL)
2680 			zone->uz_dtor(item, zone->uz_size, udata);
2681 #ifdef INVARIANTS
2682 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2683 		    zone->uz_dtor != trash_dtor)
2684 			trash_dtor(item, zone->uz_size, udata);
2685 #endif
2686 	}
2687 }
2688 
2689 /* See uma.h */
2690 void *
2691 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2692 {
2693 	uma_bucket_t bucket;
2694 	uma_cache_t cache;
2695 	void *item;
2696 	int cpu, domain;
2697 
2698 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2699 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2700 
2701 	/* This is the fast path allocation */
2702 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2703 	    curthread, zone->uz_name, zone, flags);
2704 
2705 	if (flags & M_WAITOK) {
2706 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2707 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2708 	}
2709 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2710 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2711 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2712 	if (zone->uz_flags & UMA_ZONE_PCPU)
2713 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2714 		    "with M_ZERO passed"));
2715 
2716 #ifdef DEBUG_MEMGUARD
2717 	if (memguard_cmp_zone(zone)) {
2718 		item = memguard_alloc(zone->uz_size, flags);
2719 		if (item != NULL) {
2720 			if (zone->uz_init != NULL &&
2721 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2722 				return (NULL);
2723 			if (zone->uz_ctor != NULL &&
2724 			    zone->uz_ctor(item, zone->uz_size, udata,
2725 			    flags) != 0) {
2726 				counter_u64_add(zone->uz_fails, 1);
2727 			    	zone->uz_fini(item, zone->uz_size);
2728 				return (NULL);
2729 			}
2730 			return (item);
2731 		}
2732 		/* This is unfortunate but should not be fatal. */
2733 	}
2734 #endif
2735 	/*
2736 	 * If possible, allocate from the per-CPU cache.  There are two
2737 	 * requirements for safe access to the per-CPU cache: (1) the thread
2738 	 * accessing the cache must not be preempted or yield during access,
2739 	 * and (2) the thread must not migrate CPUs without switching which
2740 	 * cache it accesses.  We rely on a critical section to prevent
2741 	 * preemption and migration.  We release the critical section in
2742 	 * order to acquire the zone mutex if we are unable to allocate from
2743 	 * the current cache; when we re-acquire the critical section, we
2744 	 * must detect and handle migration if it has occurred.
2745 	 */
2746 	critical_enter();
2747 	do {
2748 		cpu = curcpu;
2749 		cache = &zone->uz_cpu[cpu];
2750 		bucket = cache->uc_allocbucket;
2751 		if (__predict_true(bucket != NULL && bucket->ub_cnt != 0)) {
2752 			item = bucket_pop(zone, cache, bucket);
2753 			critical_exit();
2754 			return (item_ctor(zone, udata, flags, item));
2755 		}
2756 	} while (cache_alloc(zone, cache, udata, flags));
2757 	critical_exit();
2758 
2759 	/*
2760 	 * We can not get a bucket so try to return a single item.
2761 	 */
2762 	if (zone->uz_flags & UMA_ZONE_NUMA)
2763 		domain = PCPU_GET(domain);
2764 	else
2765 		domain = UMA_ANYDOMAIN;
2766 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2767 }
2768 
2769 /*
2770  * Replenish an alloc bucket and possibly restore an old one.  Called in
2771  * a critical section.  Returns in a critical section.
2772  *
2773  * A false return value indicates failure and returns with the zone lock
2774  * held.  A true return value indicates success and the caller should retry.
2775  */
2776 static __noinline bool
2777 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
2778 {
2779 	uma_zone_domain_t zdom;
2780 	uma_bucket_t bucket;
2781 	int cpu, domain;
2782 	bool lockfail;
2783 
2784 	CRITICAL_ASSERT(curthread);
2785 
2786 	/*
2787 	 * If we have run out of items in our alloc bucket see
2788 	 * if we can switch with the free bucket.
2789 	 */
2790 	bucket = cache->uc_freebucket;
2791 	if (bucket != NULL && bucket->ub_cnt != 0) {
2792 		cache->uc_freebucket = cache->uc_allocbucket;
2793 		cache->uc_allocbucket = bucket;
2794 		return (true);
2795 	}
2796 
2797 	/*
2798 	 * Discard any empty allocation bucket while we hold no locks.
2799 	 */
2800 	bucket = cache->uc_allocbucket;
2801 	cache->uc_allocbucket = NULL;
2802 	critical_exit();
2803 	if (bucket != NULL)
2804 		bucket_free(zone, bucket, udata);
2805 
2806 	/*
2807 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2808 	 * we must go back to the zone.  This requires the zone lock, so we
2809 	 * must drop the critical section, then re-acquire it when we go back
2810 	 * to the cache.  Since the critical section is released, we may be
2811 	 * preempted or migrate.  As such, make sure not to maintain any
2812 	 * thread-local state specific to the cache from prior to releasing
2813 	 * the critical section.
2814 	 */
2815 	lockfail = 0;
2816 	if (ZONE_TRYLOCK(zone) == 0) {
2817 		/* Record contention to size the buckets. */
2818 		ZONE_LOCK(zone);
2819 		lockfail = 1;
2820 	}
2821 
2822 	critical_enter();
2823 	/* Short-circuit for zones without buckets and low memory. */
2824 	if (zone->uz_bucket_size == 0 || bucketdisable)
2825 		return (false);
2826 
2827 	cpu = curcpu;
2828 	cache = &zone->uz_cpu[cpu];
2829 
2830 	/* See if we lost the race to fill the cache. */
2831 	if (cache->uc_allocbucket != NULL) {
2832 		ZONE_UNLOCK(zone);
2833 		return (true);
2834 	}
2835 
2836 	/*
2837 	 * Check the zone's cache of buckets.
2838 	 */
2839 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2840 		domain = PCPU_GET(domain);
2841 		zdom = &zone->uz_domain[domain];
2842 	} else {
2843 		domain = UMA_ANYDOMAIN;
2844 		zdom = &zone->uz_domain[0];
2845 	}
2846 
2847 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
2848 		ZONE_UNLOCK(zone);
2849 		KASSERT(bucket->ub_cnt != 0,
2850 		    ("uma_zalloc_arg: Returning an empty bucket."));
2851 		cache->uc_allocbucket = bucket;
2852 		return (true);
2853 	}
2854 	/* We are no longer associated with this CPU. */
2855 	critical_exit();
2856 
2857 	/*
2858 	 * We bump the uz count when the cache size is insufficient to
2859 	 * handle the working set.
2860 	 */
2861 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
2862 		zone->uz_bucket_size++;
2863 
2864 	/*
2865 	 * Fill a bucket and attempt to use it as the alloc bucket.
2866 	 */
2867 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
2868 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2869 	    zone->uz_name, zone, bucket);
2870 	critical_enter();
2871 	if (bucket == NULL)
2872 		return (false);
2873 
2874 	/*
2875 	 * See if we lost the race or were migrated.  Cache the
2876 	 * initialized bucket to make this less likely or claim
2877 	 * the memory directly.
2878 	 */
2879 	cpu = curcpu;
2880 	cache = &zone->uz_cpu[cpu];
2881 	if (cache->uc_allocbucket == NULL &&
2882 	    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2883 	    domain == PCPU_GET(domain))) {
2884 		cache->uc_allocbucket = bucket;
2885 		zdom->uzd_imax += bucket->ub_cnt;
2886 	} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2887 		critical_exit();
2888 		ZONE_UNLOCK(zone);
2889 		bucket_drain(zone, bucket);
2890 		bucket_free(zone, bucket, udata);
2891 		critical_enter();
2892 		return (true);
2893 	} else
2894 		zone_put_bucket(zone, zdom, bucket, false);
2895 	ZONE_UNLOCK(zone);
2896 	return (true);
2897 }
2898 
2899 void *
2900 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2901 {
2902 
2903 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2904 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2905 
2906 	/* This is the fast path allocation */
2907 	CTR5(KTR_UMA,
2908 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2909 	    curthread, zone->uz_name, zone, domain, flags);
2910 
2911 	if (flags & M_WAITOK) {
2912 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2913 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2914 	}
2915 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2916 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2917 
2918 	return (zone_alloc_item(zone, udata, domain, flags));
2919 }
2920 
2921 /*
2922  * Find a slab with some space.  Prefer slabs that are partially used over those
2923  * that are totally full.  This helps to reduce fragmentation.
2924  *
2925  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2926  * only 'domain'.
2927  */
2928 static uma_slab_t
2929 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2930 {
2931 	uma_domain_t dom;
2932 	uma_slab_t slab;
2933 	int start;
2934 
2935 	KASSERT(domain >= 0 && domain < vm_ndomains,
2936 	    ("keg_first_slab: domain %d out of range", domain));
2937 	KEG_LOCK_ASSERT(keg);
2938 
2939 	slab = NULL;
2940 	start = domain;
2941 	do {
2942 		dom = &keg->uk_domain[domain];
2943 		if (!LIST_EMPTY(&dom->ud_part_slab))
2944 			return (LIST_FIRST(&dom->ud_part_slab));
2945 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2946 			slab = LIST_FIRST(&dom->ud_free_slab);
2947 			LIST_REMOVE(slab, us_link);
2948 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2949 			return (slab);
2950 		}
2951 		if (rr)
2952 			domain = (domain + 1) % vm_ndomains;
2953 	} while (domain != start);
2954 
2955 	return (NULL);
2956 }
2957 
2958 static uma_slab_t
2959 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2960 {
2961 	uint32_t reserve;
2962 
2963 	KEG_LOCK_ASSERT(keg);
2964 
2965 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2966 	if (keg->uk_free <= reserve)
2967 		return (NULL);
2968 	return (keg_first_slab(keg, domain, rr));
2969 }
2970 
2971 static uma_slab_t
2972 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2973 {
2974 	struct vm_domainset_iter di;
2975 	uma_domain_t dom;
2976 	uma_slab_t slab;
2977 	int aflags, domain;
2978 	bool rr;
2979 
2980 restart:
2981 	KEG_LOCK_ASSERT(keg);
2982 
2983 	/*
2984 	 * Use the keg's policy if upper layers haven't already specified a
2985 	 * domain (as happens with first-touch zones).
2986 	 *
2987 	 * To avoid races we run the iterator with the keg lock held, but that
2988 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2989 	 * clear M_WAITOK and handle low memory conditions locally.
2990 	 */
2991 	rr = rdomain == UMA_ANYDOMAIN;
2992 	if (rr) {
2993 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2994 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2995 		    &aflags);
2996 	} else {
2997 		aflags = flags;
2998 		domain = rdomain;
2999 	}
3000 
3001 	for (;;) {
3002 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3003 		if (slab != NULL)
3004 			return (slab);
3005 
3006 		/*
3007 		 * M_NOVM means don't ask at all!
3008 		 */
3009 		if (flags & M_NOVM)
3010 			break;
3011 
3012 		KASSERT(zone->uz_max_items == 0 ||
3013 		    zone->uz_items <= zone->uz_max_items,
3014 		    ("%s: zone %p overflow", __func__, zone));
3015 
3016 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3017 		/*
3018 		 * If we got a slab here it's safe to mark it partially used
3019 		 * and return.  We assume that the caller is going to remove
3020 		 * at least one item.
3021 		 */
3022 		if (slab) {
3023 			dom = &keg->uk_domain[slab->us_domain];
3024 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3025 			return (slab);
3026 		}
3027 		KEG_LOCK(keg);
3028 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3029 			if ((flags & M_WAITOK) != 0) {
3030 				KEG_UNLOCK(keg);
3031 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3032 				KEG_LOCK(keg);
3033 				goto restart;
3034 			}
3035 			break;
3036 		}
3037 	}
3038 
3039 	/*
3040 	 * We might not have been able to get a slab but another cpu
3041 	 * could have while we were unlocked.  Check again before we
3042 	 * fail.
3043 	 */
3044 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
3045 		return (slab);
3046 	}
3047 	return (NULL);
3048 }
3049 
3050 static void *
3051 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3052 {
3053 	uma_domain_t dom;
3054 	void *item;
3055 	uint8_t freei;
3056 
3057 	KEG_LOCK_ASSERT(keg);
3058 
3059 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
3060 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
3061 	item = slab->us_data + (keg->uk_rsize * freei);
3062 	slab->us_freecount--;
3063 	keg->uk_free--;
3064 
3065 	/* Move this slab to the full list */
3066 	if (slab->us_freecount == 0) {
3067 		LIST_REMOVE(slab, us_link);
3068 		dom = &keg->uk_domain[slab->us_domain];
3069 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3070 	}
3071 
3072 	return (item);
3073 }
3074 
3075 static int
3076 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
3077 {
3078 	uma_slab_t slab;
3079 	uma_keg_t keg;
3080 #ifdef NUMA
3081 	int stripe;
3082 #endif
3083 	int i;
3084 
3085 	slab = NULL;
3086 	keg = zone->uz_keg;
3087 	KEG_LOCK(keg);
3088 	/* Try to keep the buckets totally full */
3089 	for (i = 0; i < max; ) {
3090 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3091 			break;
3092 #ifdef NUMA
3093 		stripe = howmany(max, vm_ndomains);
3094 #endif
3095 		while (slab->us_freecount && i < max) {
3096 			bucket[i++] = slab_alloc_item(keg, slab);
3097 			if (keg->uk_free <= keg->uk_reserve)
3098 				break;
3099 #ifdef NUMA
3100 			/*
3101 			 * If the zone is striped we pick a new slab for every
3102 			 * N allocations.  Eliminating this conditional will
3103 			 * instead pick a new domain for each bucket rather
3104 			 * than stripe within each bucket.  The current option
3105 			 * produces more fragmentation and requires more cpu
3106 			 * time but yields better distribution.
3107 			 */
3108 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
3109 			    vm_ndomains > 1 && --stripe == 0)
3110 				break;
3111 #endif
3112 		}
3113 		/* Don't block if we allocated any successfully. */
3114 		flags &= ~M_WAITOK;
3115 		flags |= M_NOWAIT;
3116 	}
3117 	KEG_UNLOCK(keg);
3118 
3119 	return i;
3120 }
3121 
3122 static uma_bucket_t
3123 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
3124 {
3125 	uma_bucket_t bucket;
3126 	int maxbucket, cnt;
3127 
3128 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
3129 
3130 	/* Avoid allocs targeting empty domains. */
3131 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3132 		domain = UMA_ANYDOMAIN;
3133 
3134 	if (zone->uz_max_items > 0) {
3135 		if (zone->uz_items >= zone->uz_max_items)
3136 			return (false);
3137 		maxbucket = MIN(zone->uz_bucket_size,
3138 		    zone->uz_max_items - zone->uz_items);
3139 		zone->uz_items += maxbucket;
3140 	} else
3141 		maxbucket = zone->uz_bucket_size;
3142 	ZONE_UNLOCK(zone);
3143 
3144 	/* Don't wait for buckets, preserve caller's NOVM setting. */
3145 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
3146 	if (bucket == NULL) {
3147 		cnt = 0;
3148 		goto out;
3149 	}
3150 
3151 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
3152 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
3153 
3154 	/*
3155 	 * Initialize the memory if necessary.
3156 	 */
3157 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
3158 		int i;
3159 
3160 		for (i = 0; i < bucket->ub_cnt; i++)
3161 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
3162 			    flags) != 0)
3163 				break;
3164 		/*
3165 		 * If we couldn't initialize the whole bucket, put the
3166 		 * rest back onto the freelist.
3167 		 */
3168 		if (i != bucket->ub_cnt) {
3169 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
3170 			    bucket->ub_cnt - i);
3171 #ifdef INVARIANTS
3172 			bzero(&bucket->ub_bucket[i],
3173 			    sizeof(void *) * (bucket->ub_cnt - i));
3174 #endif
3175 			bucket->ub_cnt = i;
3176 		}
3177 	}
3178 
3179 	cnt = bucket->ub_cnt;
3180 	if (bucket->ub_cnt == 0) {
3181 		bucket_free(zone, bucket, udata);
3182 		counter_u64_add(zone->uz_fails, 1);
3183 		bucket = NULL;
3184 	}
3185 out:
3186 	ZONE_LOCK(zone);
3187 	if (zone->uz_max_items > 0 && cnt < maxbucket) {
3188 		MPASS(zone->uz_items >= maxbucket - cnt);
3189 		zone->uz_items -= maxbucket - cnt;
3190 		if (zone->uz_sleepers > 0 &&
3191 		    (cnt == 0 ? zone->uz_items + 1 : zone->uz_items) <
3192 		    zone->uz_max_items)
3193 			wakeup_one(zone);
3194 	}
3195 
3196 	return (bucket);
3197 }
3198 
3199 /*
3200  * Allocates a single item from a zone.
3201  *
3202  * Arguments
3203  *	zone   The zone to alloc for.
3204  *	udata  The data to be passed to the constructor.
3205  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3206  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
3207  *
3208  * Returns
3209  *	NULL if there is no memory and M_NOWAIT is set
3210  *	An item if successful
3211  */
3212 
3213 static void *
3214 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
3215 {
3216 
3217 	ZONE_LOCK(zone);
3218 	return (zone_alloc_item_locked(zone, udata, domain, flags));
3219 }
3220 
3221 /*
3222  * Returns with zone unlocked.
3223  */
3224 static void *
3225 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
3226 {
3227 	void *item;
3228 
3229 	ZONE_LOCK_ASSERT(zone);
3230 
3231 	if (zone->uz_max_items > 0) {
3232 		if (zone->uz_items >= zone->uz_max_items) {
3233 			zone_log_warning(zone);
3234 			zone_maxaction(zone);
3235 			if (flags & M_NOWAIT) {
3236 				ZONE_UNLOCK(zone);
3237 				return (NULL);
3238 			}
3239 			zone->uz_sleeps++;
3240 			zone->uz_sleepers++;
3241 			while (zone->uz_items >= zone->uz_max_items)
3242 				mtx_sleep(zone, zone->uz_lockptr, PVM,
3243 				    "zonelimit", 0);
3244 			zone->uz_sleepers--;
3245 			if (zone->uz_sleepers > 0 &&
3246 			    zone->uz_items + 1 < zone->uz_max_items)
3247 				wakeup_one(zone);
3248 		}
3249 		zone->uz_items++;
3250 	}
3251 	ZONE_UNLOCK(zone);
3252 
3253 	/* Avoid allocs targeting empty domains. */
3254 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3255 		domain = UMA_ANYDOMAIN;
3256 
3257 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3258 		goto fail_cnt;
3259 
3260 	/*
3261 	 * We have to call both the zone's init (not the keg's init)
3262 	 * and the zone's ctor.  This is because the item is going from
3263 	 * a keg slab directly to the user, and the user is expecting it
3264 	 * to be both zone-init'd as well as zone-ctor'd.
3265 	 */
3266 	if (zone->uz_init != NULL) {
3267 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3268 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3269 			goto fail_cnt;
3270 		}
3271 	}
3272 	item = item_ctor(zone, udata, flags, item);
3273 	if (item == NULL)
3274 		goto fail;
3275 
3276 	counter_u64_add(zone->uz_allocs, 1);
3277 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3278 	    zone->uz_name, zone);
3279 
3280 	return (item);
3281 
3282 fail_cnt:
3283 	counter_u64_add(zone->uz_fails, 1);
3284 fail:
3285 	if (zone->uz_max_items > 0) {
3286 		ZONE_LOCK(zone);
3287 		/* XXX Decrement without wakeup */
3288 		zone->uz_items--;
3289 		ZONE_UNLOCK(zone);
3290 	}
3291 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3292 	    zone->uz_name, zone);
3293 	return (NULL);
3294 }
3295 
3296 /* See uma.h */
3297 void
3298 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3299 {
3300 	uma_cache_t cache;
3301 	uma_bucket_t bucket;
3302 	int cpu, domain, itemdomain;
3303 
3304 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3305 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3306 
3307 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3308 	    zone->uz_name);
3309 
3310 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3311 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3312 
3313         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3314         if (item == NULL)
3315                 return;
3316 #ifdef DEBUG_MEMGUARD
3317 	if (is_memguard_addr(item)) {
3318 		if (zone->uz_dtor != NULL)
3319 			zone->uz_dtor(item, zone->uz_size, udata);
3320 		if (zone->uz_fini != NULL)
3321 			zone->uz_fini(item, zone->uz_size);
3322 		memguard_free(item);
3323 		return;
3324 	}
3325 #endif
3326 	item_dtor(zone, item, udata, SKIP_NONE);
3327 
3328 	/*
3329 	 * The race here is acceptable.  If we miss it we'll just have to wait
3330 	 * a little longer for the limits to be reset.
3331 	 */
3332 	if (zone->uz_sleepers > 0)
3333 		goto zfree_item;
3334 
3335 	/*
3336 	 * If possible, free to the per-CPU cache.  There are two
3337 	 * requirements for safe access to the per-CPU cache: (1) the thread
3338 	 * accessing the cache must not be preempted or yield during access,
3339 	 * and (2) the thread must not migrate CPUs without switching which
3340 	 * cache it accesses.  We rely on a critical section to prevent
3341 	 * preemption and migration.  We release the critical section in
3342 	 * order to acquire the zone mutex if we are unable to free to the
3343 	 * current cache; when we re-acquire the critical section, we must
3344 	 * detect and handle migration if it has occurred.
3345 	 */
3346 	domain = itemdomain = 0;
3347 	critical_enter();
3348 	do {
3349 		cpu = curcpu;
3350 		cache = &zone->uz_cpu[cpu];
3351 		bucket = cache->uc_allocbucket;
3352 #ifdef UMA_XDOMAIN
3353 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3354 			itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3355 			domain = PCPU_GET(domain);
3356 		}
3357 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0 &&
3358 		    domain != itemdomain) {
3359 			bucket = cache->uc_crossbucket;
3360 		} else
3361 #endif
3362 
3363 		/*
3364 		 * Try to free into the allocbucket first to give LIFO ordering
3365 		 * for cache-hot datastructures.  Spill over into the freebucket
3366 		 * if necessary.  Alloc will swap them if one runs dry.
3367 		 */
3368 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3369 			bucket = cache->uc_freebucket;
3370 		if (__predict_true(bucket != NULL &&
3371 		    bucket->ub_cnt < bucket->ub_entries)) {
3372 			bucket_push(zone, cache, bucket, item);
3373 			critical_exit();
3374 			return;
3375 		}
3376 	} while (cache_free(zone, cache, udata, item, itemdomain));
3377 	critical_exit();
3378 
3379 	/*
3380 	 * If nothing else caught this, we'll just do an internal free.
3381 	 */
3382 zfree_item:
3383 	zone_free_item(zone, item, udata, SKIP_DTOR);
3384 }
3385 
3386 static void
3387 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
3388     int domain, int itemdomain)
3389 {
3390 	uma_zone_domain_t zdom;
3391 
3392 #ifdef UMA_XDOMAIN
3393 	/*
3394 	 * Buckets coming from the wrong domain will be entirely for the
3395 	 * only other domain on two domain systems.  In this case we can
3396 	 * simply cache them.  Otherwise we need to sort them back to
3397 	 * correct domains by freeing the contents to the slab layer.
3398 	 */
3399 	if (domain != itemdomain && vm_ndomains > 2) {
3400 		CTR3(KTR_UMA,
3401 		    "uma_zfree: zone %s(%p) draining cross bucket %p",
3402 		    zone->uz_name, zone, bucket);
3403 		bucket_drain(zone, bucket);
3404 		bucket_free(zone, bucket, udata);
3405 		return;
3406 	}
3407 #endif
3408 	/*
3409 	 * Attempt to save the bucket in the zone's domain bucket cache.
3410 	 *
3411 	 * We bump the uz count when the cache size is insufficient to
3412 	 * handle the working set.
3413 	 */
3414 	if (ZONE_TRYLOCK(zone) == 0) {
3415 		/* Record contention to size the buckets. */
3416 		ZONE_LOCK(zone);
3417 		if (zone->uz_bucket_size < zone->uz_bucket_size_max)
3418 			zone->uz_bucket_size++;
3419 	}
3420 
3421 	CTR3(KTR_UMA,
3422 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3423 	    zone->uz_name, zone, bucket);
3424 	/* ub_cnt is pointing to the last free item */
3425 	KASSERT(bucket->ub_cnt == bucket->ub_entries,
3426 	    ("uma_zfree: Attempting to insert partial  bucket onto the full list.\n"));
3427 	if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3428 		ZONE_UNLOCK(zone);
3429 		bucket_drain(zone, bucket);
3430 		bucket_free(zone, bucket, udata);
3431 	} else {
3432 		zdom = &zone->uz_domain[itemdomain];
3433 		zone_put_bucket(zone, zdom, bucket, true);
3434 		ZONE_UNLOCK(zone);
3435 	}
3436 }
3437 
3438 /*
3439  * Populate a free or cross bucket for the current cpu cache.  Free any
3440  * existing full bucket either to the zone cache or back to the slab layer.
3441  *
3442  * Enters and returns in a critical section.  false return indicates that
3443  * we can not satisfy this free in the cache layer.  true indicates that
3444  * the caller should retry.
3445  */
3446 static __noinline bool
3447 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
3448     int itemdomain)
3449 {
3450 	uma_bucket_t bucket;
3451 	int cpu, domain;
3452 
3453 	CRITICAL_ASSERT(curthread);
3454 
3455 	if (zone->uz_bucket_size == 0 || bucketdisable)
3456 		return false;
3457 
3458 	cpu = curcpu;
3459 	cache = &zone->uz_cpu[cpu];
3460 
3461 	/*
3462 	 * NUMA domains need to free to the correct zdom.  When XDOMAIN
3463 	 * is enabled this is the zdom of the item and the bucket may be
3464 	 * the cross bucket if they do not match.
3465 	 */
3466 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3467 #ifdef UMA_XDOMAIN
3468 		domain = PCPU_GET(domain);
3469 #else
3470 		itemdomain = domain = PCPU_GET(domain);
3471 #endif
3472 	else
3473 		itemdomain = domain = 0;
3474 #ifdef UMA_XDOMAIN
3475 	if (domain != itemdomain) {
3476 		bucket = cache->uc_crossbucket;
3477 		cache->uc_crossbucket = NULL;
3478 		if (bucket != NULL)
3479 			atomic_add_64(&zone->uz_xdomain, bucket->ub_cnt);
3480 	} else
3481 #endif
3482 	{
3483 		bucket = cache->uc_freebucket;
3484 		cache->uc_freebucket = NULL;
3485 	}
3486 
3487 
3488 	/* We are no longer associated with this CPU. */
3489 	critical_exit();
3490 
3491 	if (bucket != NULL)
3492 		zone_free_bucket(zone, bucket, udata, domain, itemdomain);
3493 
3494 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3495 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3496 	    zone->uz_name, zone, bucket);
3497 	critical_enter();
3498 	if (bucket == NULL)
3499 		return (false);
3500 	cpu = curcpu;
3501 	cache = &zone->uz_cpu[cpu];
3502 #ifdef UMA_XDOMAIN
3503 	/*
3504 	 * Check to see if we should be populating the cross bucket.  If it
3505 	 * is already populated we will fall through and attempt to populate
3506 	 * the free bucket.
3507 	 */
3508 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3509 		domain = PCPU_GET(domain);
3510 		if (domain != itemdomain && cache->uc_crossbucket == NULL) {
3511 			cache->uc_crossbucket = bucket;
3512 			return (true);
3513 		}
3514 	}
3515 #endif
3516 	/*
3517 	 * We may have lost the race to fill the bucket or switched CPUs.
3518 	 */
3519 	if (cache->uc_freebucket != NULL) {
3520 		critical_exit();
3521 		bucket_free(zone, bucket, udata);
3522 		critical_enter();
3523 	} else
3524 		cache->uc_freebucket = bucket;
3525 
3526 	return (true);
3527 }
3528 
3529 void
3530 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3531 {
3532 
3533 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3534 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3535 
3536 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3537 	    zone->uz_name);
3538 
3539 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3540 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3541 
3542         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3543         if (item == NULL)
3544                 return;
3545 	zone_free_item(zone, item, udata, SKIP_NONE);
3546 }
3547 
3548 static void
3549 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3550 {
3551 	uma_keg_t keg;
3552 	uma_domain_t dom;
3553 	uint8_t freei;
3554 
3555 	keg = zone->uz_keg;
3556 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3557 	KEG_LOCK_ASSERT(keg);
3558 
3559 	dom = &keg->uk_domain[slab->us_domain];
3560 
3561 	/* Do we need to remove from any lists? */
3562 	if (slab->us_freecount+1 == keg->uk_ipers) {
3563 		LIST_REMOVE(slab, us_link);
3564 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3565 	} else if (slab->us_freecount == 0) {
3566 		LIST_REMOVE(slab, us_link);
3567 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3568 	}
3569 
3570 	/* Slab management. */
3571 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3572 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3573 	slab->us_freecount++;
3574 
3575 	/* Keg statistics. */
3576 	keg->uk_free++;
3577 }
3578 
3579 static void
3580 zone_release(uma_zone_t zone, void **bucket, int cnt)
3581 {
3582 	void *item;
3583 	uma_slab_t slab;
3584 	uma_keg_t keg;
3585 	uint8_t *mem;
3586 	int i;
3587 
3588 	keg = zone->uz_keg;
3589 	KEG_LOCK(keg);
3590 	for (i = 0; i < cnt; i++) {
3591 		item = bucket[i];
3592 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3593 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3594 			if (zone->uz_flags & UMA_ZONE_HASH) {
3595 				slab = hash_sfind(&keg->uk_hash, mem);
3596 			} else {
3597 				mem += keg->uk_pgoff;
3598 				slab = (uma_slab_t)mem;
3599 			}
3600 		} else
3601 			slab = vtoslab((vm_offset_t)item);
3602 		slab_free_item(zone, slab, item);
3603 	}
3604 	KEG_UNLOCK(keg);
3605 }
3606 
3607 /*
3608  * Frees a single item to any zone.
3609  *
3610  * Arguments:
3611  *	zone   The zone to free to
3612  *	item   The item we're freeing
3613  *	udata  User supplied data for the dtor
3614  *	skip   Skip dtors and finis
3615  */
3616 static void
3617 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3618 {
3619 
3620 	item_dtor(zone, item, udata, skip);
3621 
3622 	if (skip < SKIP_FINI && zone->uz_fini)
3623 		zone->uz_fini(item, zone->uz_size);
3624 
3625 	zone->uz_release(zone->uz_arg, &item, 1);
3626 
3627 	if (skip & SKIP_CNT)
3628 		return;
3629 
3630 	counter_u64_add(zone->uz_frees, 1);
3631 
3632 	if (zone->uz_max_items > 0) {
3633 		ZONE_LOCK(zone);
3634 		zone->uz_items--;
3635 		if (zone->uz_sleepers > 0 &&
3636 		    zone->uz_items < zone->uz_max_items)
3637 			wakeup_one(zone);
3638 		ZONE_UNLOCK(zone);
3639 	}
3640 }
3641 
3642 /* See uma.h */
3643 int
3644 uma_zone_set_max(uma_zone_t zone, int nitems)
3645 {
3646 	struct uma_bucket_zone *ubz;
3647 	int count;
3648 
3649 	ZONE_LOCK(zone);
3650 	ubz = bucket_zone_max(zone, nitems);
3651 	count = ubz != NULL ? ubz->ubz_entries : 0;
3652 	zone->uz_bucket_size_max = zone->uz_bucket_size = count;
3653 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3654 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3655 	zone->uz_max_items = nitems;
3656 	ZONE_UNLOCK(zone);
3657 
3658 	return (nitems);
3659 }
3660 
3661 /* See uma.h */
3662 void
3663 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3664 {
3665 	struct uma_bucket_zone *ubz;
3666 	int bpcpu;
3667 
3668 	ZONE_LOCK(zone);
3669 	ubz = bucket_zone_max(zone, nitems);
3670 	if (ubz != NULL) {
3671 		bpcpu = 2;
3672 #ifdef UMA_XDOMAIN
3673 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3674 			/* Count the cross-domain bucket. */
3675 			bpcpu++;
3676 #endif
3677 		nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
3678 		zone->uz_bucket_size_max = ubz->ubz_entries;
3679 	} else {
3680 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
3681 	}
3682 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3683 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3684 	zone->uz_bkt_max = nitems;
3685 	ZONE_UNLOCK(zone);
3686 }
3687 
3688 /* See uma.h */
3689 int
3690 uma_zone_get_max(uma_zone_t zone)
3691 {
3692 	int nitems;
3693 
3694 	ZONE_LOCK(zone);
3695 	nitems = zone->uz_max_items;
3696 	ZONE_UNLOCK(zone);
3697 
3698 	return (nitems);
3699 }
3700 
3701 /* See uma.h */
3702 void
3703 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3704 {
3705 
3706 	ZONE_LOCK(zone);
3707 	zone->uz_warning = warning;
3708 	ZONE_UNLOCK(zone);
3709 }
3710 
3711 /* See uma.h */
3712 void
3713 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3714 {
3715 
3716 	ZONE_LOCK(zone);
3717 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3718 	ZONE_UNLOCK(zone);
3719 }
3720 
3721 /* See uma.h */
3722 int
3723 uma_zone_get_cur(uma_zone_t zone)
3724 {
3725 	int64_t nitems;
3726 	u_int i;
3727 
3728 	ZONE_LOCK(zone);
3729 	nitems = counter_u64_fetch(zone->uz_allocs) -
3730 	    counter_u64_fetch(zone->uz_frees);
3731 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3732 		CPU_FOREACH(i) {
3733 			/*
3734 			 * See the comment in uma_vm_zone_stats() regarding
3735 			 * the safety of accessing the per-cpu caches. With
3736 			 * the zone lock held, it is safe, but can potentially
3737 			 * result in stale data.
3738 			 */
3739 			nitems += zone->uz_cpu[i].uc_allocs -
3740 			    zone->uz_cpu[i].uc_frees;
3741 		}
3742 	}
3743 	ZONE_UNLOCK(zone);
3744 
3745 	return (nitems < 0 ? 0 : nitems);
3746 }
3747 
3748 static uint64_t
3749 uma_zone_get_allocs(uma_zone_t zone)
3750 {
3751 	uint64_t nitems;
3752 	u_int i;
3753 
3754 	ZONE_LOCK(zone);
3755 	nitems = counter_u64_fetch(zone->uz_allocs);
3756 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3757 		CPU_FOREACH(i) {
3758 			/*
3759 			 * See the comment in uma_vm_zone_stats() regarding
3760 			 * the safety of accessing the per-cpu caches. With
3761 			 * the zone lock held, it is safe, but can potentially
3762 			 * result in stale data.
3763 			 */
3764 			nitems += zone->uz_cpu[i].uc_allocs;
3765 		}
3766 	}
3767 	ZONE_UNLOCK(zone);
3768 
3769 	return (nitems);
3770 }
3771 
3772 static uint64_t
3773 uma_zone_get_frees(uma_zone_t zone)
3774 {
3775 	uint64_t nitems;
3776 	u_int i;
3777 
3778 	ZONE_LOCK(zone);
3779 	nitems = counter_u64_fetch(zone->uz_frees);
3780 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3781 		CPU_FOREACH(i) {
3782 			/*
3783 			 * See the comment in uma_vm_zone_stats() regarding
3784 			 * the safety of accessing the per-cpu caches. With
3785 			 * the zone lock held, it is safe, but can potentially
3786 			 * result in stale data.
3787 			 */
3788 			nitems += zone->uz_cpu[i].uc_frees;
3789 		}
3790 	}
3791 	ZONE_UNLOCK(zone);
3792 
3793 	return (nitems);
3794 }
3795 
3796 /* See uma.h */
3797 void
3798 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3799 {
3800 	uma_keg_t keg;
3801 
3802 	KEG_GET(zone, keg);
3803 	KEG_LOCK(keg);
3804 	KASSERT(keg->uk_pages == 0,
3805 	    ("uma_zone_set_init on non-empty keg"));
3806 	keg->uk_init = uminit;
3807 	KEG_UNLOCK(keg);
3808 }
3809 
3810 /* See uma.h */
3811 void
3812 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3813 {
3814 	uma_keg_t keg;
3815 
3816 	KEG_GET(zone, keg);
3817 	KEG_LOCK(keg);
3818 	KASSERT(keg->uk_pages == 0,
3819 	    ("uma_zone_set_fini on non-empty keg"));
3820 	keg->uk_fini = fini;
3821 	KEG_UNLOCK(keg);
3822 }
3823 
3824 /* See uma.h */
3825 void
3826 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3827 {
3828 
3829 	ZONE_LOCK(zone);
3830 	KASSERT(zone->uz_keg->uk_pages == 0,
3831 	    ("uma_zone_set_zinit on non-empty keg"));
3832 	zone->uz_init = zinit;
3833 	ZONE_UNLOCK(zone);
3834 }
3835 
3836 /* See uma.h */
3837 void
3838 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3839 {
3840 
3841 	ZONE_LOCK(zone);
3842 	KASSERT(zone->uz_keg->uk_pages == 0,
3843 	    ("uma_zone_set_zfini on non-empty keg"));
3844 	zone->uz_fini = zfini;
3845 	ZONE_UNLOCK(zone);
3846 }
3847 
3848 /* See uma.h */
3849 /* XXX uk_freef is not actually used with the zone locked */
3850 void
3851 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3852 {
3853 	uma_keg_t keg;
3854 
3855 	KEG_GET(zone, keg);
3856 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3857 	KEG_LOCK(keg);
3858 	keg->uk_freef = freef;
3859 	KEG_UNLOCK(keg);
3860 }
3861 
3862 /* See uma.h */
3863 /* XXX uk_allocf is not actually used with the zone locked */
3864 void
3865 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3866 {
3867 	uma_keg_t keg;
3868 
3869 	KEG_GET(zone, keg);
3870 	KEG_LOCK(keg);
3871 	keg->uk_allocf = allocf;
3872 	KEG_UNLOCK(keg);
3873 }
3874 
3875 /* See uma.h */
3876 void
3877 uma_zone_reserve(uma_zone_t zone, int items)
3878 {
3879 	uma_keg_t keg;
3880 
3881 	KEG_GET(zone, keg);
3882 	KEG_LOCK(keg);
3883 	keg->uk_reserve = items;
3884 	KEG_UNLOCK(keg);
3885 }
3886 
3887 /* See uma.h */
3888 int
3889 uma_zone_reserve_kva(uma_zone_t zone, int count)
3890 {
3891 	uma_keg_t keg;
3892 	vm_offset_t kva;
3893 	u_int pages;
3894 
3895 	KEG_GET(zone, keg);
3896 
3897 	pages = count / keg->uk_ipers;
3898 	if (pages * keg->uk_ipers < count)
3899 		pages++;
3900 	pages *= keg->uk_ppera;
3901 
3902 #ifdef UMA_MD_SMALL_ALLOC
3903 	if (keg->uk_ppera > 1) {
3904 #else
3905 	if (1) {
3906 #endif
3907 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3908 		if (kva == 0)
3909 			return (0);
3910 	} else
3911 		kva = 0;
3912 
3913 	ZONE_LOCK(zone);
3914 	MPASS(keg->uk_kva == 0);
3915 	keg->uk_kva = kva;
3916 	keg->uk_offset = 0;
3917 	zone->uz_max_items = pages * keg->uk_ipers;
3918 #ifdef UMA_MD_SMALL_ALLOC
3919 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3920 #else
3921 	keg->uk_allocf = noobj_alloc;
3922 #endif
3923 	keg->uk_flags |= UMA_ZONE_NOFREE;
3924 	ZONE_UNLOCK(zone);
3925 
3926 	return (1);
3927 }
3928 
3929 /* See uma.h */
3930 void
3931 uma_prealloc(uma_zone_t zone, int items)
3932 {
3933 	struct vm_domainset_iter di;
3934 	uma_domain_t dom;
3935 	uma_slab_t slab;
3936 	uma_keg_t keg;
3937 	int aflags, domain, slabs;
3938 
3939 	KEG_GET(zone, keg);
3940 	KEG_LOCK(keg);
3941 	slabs = items / keg->uk_ipers;
3942 	if (slabs * keg->uk_ipers < items)
3943 		slabs++;
3944 	while (slabs-- > 0) {
3945 		aflags = M_NOWAIT;
3946 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3947 		    &aflags);
3948 		for (;;) {
3949 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3950 			    aflags);
3951 			if (slab != NULL) {
3952 				dom = &keg->uk_domain[slab->us_domain];
3953 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3954 				    us_link);
3955 				break;
3956 			}
3957 			KEG_LOCK(keg);
3958 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3959 				KEG_UNLOCK(keg);
3960 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3961 				KEG_LOCK(keg);
3962 			}
3963 		}
3964 	}
3965 	KEG_UNLOCK(keg);
3966 }
3967 
3968 /* See uma.h */
3969 void
3970 uma_reclaim(int req)
3971 {
3972 
3973 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3974 	sx_xlock(&uma_reclaim_lock);
3975 	bucket_enable();
3976 
3977 	switch (req) {
3978 	case UMA_RECLAIM_TRIM:
3979 		zone_foreach(zone_trim, NULL);
3980 		break;
3981 	case UMA_RECLAIM_DRAIN:
3982 	case UMA_RECLAIM_DRAIN_CPU:
3983 		zone_foreach(zone_drain, NULL);
3984 		if (req == UMA_RECLAIM_DRAIN_CPU) {
3985 			pcpu_cache_drain_safe(NULL);
3986 			zone_foreach(zone_drain, NULL);
3987 		}
3988 		break;
3989 	default:
3990 		panic("unhandled reclamation request %d", req);
3991 	}
3992 
3993 	/*
3994 	 * Some slabs may have been freed but this zone will be visited early
3995 	 * we visit again so that we can free pages that are empty once other
3996 	 * zones are drained.  We have to do the same for buckets.
3997 	 */
3998 	zone_drain(slabzone, NULL);
3999 	bucket_zone_drain();
4000 	sx_xunlock(&uma_reclaim_lock);
4001 }
4002 
4003 static volatile int uma_reclaim_needed;
4004 
4005 void
4006 uma_reclaim_wakeup(void)
4007 {
4008 
4009 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
4010 		wakeup(uma_reclaim);
4011 }
4012 
4013 void
4014 uma_reclaim_worker(void *arg __unused)
4015 {
4016 
4017 	for (;;) {
4018 		sx_xlock(&uma_reclaim_lock);
4019 		while (atomic_load_int(&uma_reclaim_needed) == 0)
4020 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
4021 			    hz);
4022 		sx_xunlock(&uma_reclaim_lock);
4023 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
4024 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
4025 		atomic_store_int(&uma_reclaim_needed, 0);
4026 		/* Don't fire more than once per-second. */
4027 		pause("umarclslp", hz);
4028 	}
4029 }
4030 
4031 /* See uma.h */
4032 void
4033 uma_zone_reclaim(uma_zone_t zone, int req)
4034 {
4035 
4036 	switch (req) {
4037 	case UMA_RECLAIM_TRIM:
4038 		zone_trim(zone, NULL);
4039 		break;
4040 	case UMA_RECLAIM_DRAIN:
4041 		zone_drain(zone, NULL);
4042 		break;
4043 	case UMA_RECLAIM_DRAIN_CPU:
4044 		pcpu_cache_drain_safe(zone);
4045 		zone_drain(zone, NULL);
4046 		break;
4047 	default:
4048 		panic("unhandled reclamation request %d", req);
4049 	}
4050 }
4051 
4052 /* See uma.h */
4053 int
4054 uma_zone_exhausted(uma_zone_t zone)
4055 {
4056 	int full;
4057 
4058 	ZONE_LOCK(zone);
4059 	full = zone->uz_sleepers > 0;
4060 	ZONE_UNLOCK(zone);
4061 	return (full);
4062 }
4063 
4064 int
4065 uma_zone_exhausted_nolock(uma_zone_t zone)
4066 {
4067 	return (zone->uz_sleepers > 0);
4068 }
4069 
4070 static void
4071 uma_zero_item(void *item, uma_zone_t zone)
4072 {
4073 
4074 	bzero(item, zone->uz_size);
4075 }
4076 
4077 unsigned long
4078 uma_limit(void)
4079 {
4080 
4081 	return (uma_kmem_limit);
4082 }
4083 
4084 void
4085 uma_set_limit(unsigned long limit)
4086 {
4087 
4088 	uma_kmem_limit = limit;
4089 }
4090 
4091 unsigned long
4092 uma_size(void)
4093 {
4094 
4095 	return (atomic_load_long(&uma_kmem_total));
4096 }
4097 
4098 long
4099 uma_avail(void)
4100 {
4101 
4102 	return (uma_kmem_limit - uma_size());
4103 }
4104 
4105 #ifdef DDB
4106 /*
4107  * Generate statistics across both the zone and its per-cpu cache's.  Return
4108  * desired statistics if the pointer is non-NULL for that statistic.
4109  *
4110  * Note: does not update the zone statistics, as it can't safely clear the
4111  * per-CPU cache statistic.
4112  *
4113  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
4114  * safe from off-CPU; we should modify the caches to track this information
4115  * directly so that we don't have to.
4116  */
4117 static void
4118 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4119     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4120 {
4121 	uma_cache_t cache;
4122 	uint64_t allocs, frees, sleeps, xdomain;
4123 	int cachefree, cpu;
4124 
4125 	allocs = frees = sleeps = xdomain = 0;
4126 	cachefree = 0;
4127 	CPU_FOREACH(cpu) {
4128 		cache = &z->uz_cpu[cpu];
4129 		if (cache->uc_allocbucket != NULL)
4130 			cachefree += cache->uc_allocbucket->ub_cnt;
4131 		if (cache->uc_freebucket != NULL)
4132 			cachefree += cache->uc_freebucket->ub_cnt;
4133 		if (cache->uc_crossbucket != NULL) {
4134 			xdomain += cache->uc_crossbucket->ub_cnt;
4135 			cachefree += cache->uc_crossbucket->ub_cnt;
4136 		}
4137 		allocs += cache->uc_allocs;
4138 		frees += cache->uc_frees;
4139 	}
4140 	allocs += counter_u64_fetch(z->uz_allocs);
4141 	frees += counter_u64_fetch(z->uz_frees);
4142 	sleeps += z->uz_sleeps;
4143 	xdomain += z->uz_xdomain;
4144 	if (cachefreep != NULL)
4145 		*cachefreep = cachefree;
4146 	if (allocsp != NULL)
4147 		*allocsp = allocs;
4148 	if (freesp != NULL)
4149 		*freesp = frees;
4150 	if (sleepsp != NULL)
4151 		*sleepsp = sleeps;
4152 	if (xdomainp != NULL)
4153 		*xdomainp = xdomain;
4154 }
4155 #endif /* DDB */
4156 
4157 static int
4158 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4159 {
4160 	uma_keg_t kz;
4161 	uma_zone_t z;
4162 	int count;
4163 
4164 	count = 0;
4165 	rw_rlock(&uma_rwlock);
4166 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4167 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4168 			count++;
4169 	}
4170 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4171 		count++;
4172 
4173 	rw_runlock(&uma_rwlock);
4174 	return (sysctl_handle_int(oidp, &count, 0, req));
4175 }
4176 
4177 static void
4178 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4179     struct uma_percpu_stat *ups, bool internal)
4180 {
4181 	uma_zone_domain_t zdom;
4182 	uma_bucket_t bucket;
4183 	uma_cache_t cache;
4184 	int i;
4185 
4186 
4187 	for (i = 0; i < vm_ndomains; i++) {
4188 		zdom = &z->uz_domain[i];
4189 		uth->uth_zone_free += zdom->uzd_nitems;
4190 	}
4191 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4192 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4193 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4194 	uth->uth_sleeps = z->uz_sleeps;
4195 	uth->uth_xdomain = z->uz_xdomain;
4196 
4197 	/*
4198 	 * While it is not normally safe to access the cache bucket pointers
4199 	 * while not on the CPU that owns the cache, we only allow the pointers
4200 	 * to be exchanged without the zone lock held, not invalidated, so
4201 	 * accept the possible race associated with bucket exchange during
4202 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4203 	 * are loaded only once.
4204 	 */
4205 	for (i = 0; i < mp_maxid + 1; i++) {
4206 		bzero(&ups[i], sizeof(*ups));
4207 		if (internal || CPU_ABSENT(i))
4208 			continue;
4209 		cache = &z->uz_cpu[i];
4210 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
4211 		if (bucket != NULL)
4212 			ups[i].ups_cache_free += bucket->ub_cnt;
4213 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
4214 		if (bucket != NULL)
4215 			ups[i].ups_cache_free += bucket->ub_cnt;
4216 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
4217 		if (bucket != NULL)
4218 			ups[i].ups_cache_free += bucket->ub_cnt;
4219 		ups[i].ups_allocs = cache->uc_allocs;
4220 		ups[i].ups_frees = cache->uc_frees;
4221 	}
4222 }
4223 
4224 static int
4225 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4226 {
4227 	struct uma_stream_header ush;
4228 	struct uma_type_header uth;
4229 	struct uma_percpu_stat *ups;
4230 	struct sbuf sbuf;
4231 	uma_keg_t kz;
4232 	uma_zone_t z;
4233 	int count, error, i;
4234 
4235 	error = sysctl_wire_old_buffer(req, 0);
4236 	if (error != 0)
4237 		return (error);
4238 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4239 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4240 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4241 
4242 	count = 0;
4243 	rw_rlock(&uma_rwlock);
4244 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4245 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4246 			count++;
4247 	}
4248 
4249 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4250 		count++;
4251 
4252 	/*
4253 	 * Insert stream header.
4254 	 */
4255 	bzero(&ush, sizeof(ush));
4256 	ush.ush_version = UMA_STREAM_VERSION;
4257 	ush.ush_maxcpus = (mp_maxid + 1);
4258 	ush.ush_count = count;
4259 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4260 
4261 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4262 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4263 			bzero(&uth, sizeof(uth));
4264 			ZONE_LOCK(z);
4265 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4266 			uth.uth_align = kz->uk_align;
4267 			uth.uth_size = kz->uk_size;
4268 			uth.uth_rsize = kz->uk_rsize;
4269 			if (z->uz_max_items > 0)
4270 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4271 					kz->uk_ppera;
4272 			else
4273 				uth.uth_pages = kz->uk_pages;
4274 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4275 			    kz->uk_ppera;
4276 			uth.uth_limit = z->uz_max_items;
4277 			uth.uth_keg_free = z->uz_keg->uk_free;
4278 
4279 			/*
4280 			 * A zone is secondary is it is not the first entry
4281 			 * on the keg's zone list.
4282 			 */
4283 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4284 			    (LIST_FIRST(&kz->uk_zones) != z))
4285 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4286 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4287 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4288 			ZONE_UNLOCK(z);
4289 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4290 			for (i = 0; i < mp_maxid + 1; i++)
4291 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4292 		}
4293 	}
4294 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4295 		bzero(&uth, sizeof(uth));
4296 		ZONE_LOCK(z);
4297 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4298 		uth.uth_size = z->uz_size;
4299 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4300 		ZONE_UNLOCK(z);
4301 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4302 		for (i = 0; i < mp_maxid + 1; i++)
4303 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4304 	}
4305 
4306 	rw_runlock(&uma_rwlock);
4307 	error = sbuf_finish(&sbuf);
4308 	sbuf_delete(&sbuf);
4309 	free(ups, M_TEMP);
4310 	return (error);
4311 }
4312 
4313 int
4314 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4315 {
4316 	uma_zone_t zone = *(uma_zone_t *)arg1;
4317 	int error, max;
4318 
4319 	max = uma_zone_get_max(zone);
4320 	error = sysctl_handle_int(oidp, &max, 0, req);
4321 	if (error || !req->newptr)
4322 		return (error);
4323 
4324 	uma_zone_set_max(zone, max);
4325 
4326 	return (0);
4327 }
4328 
4329 int
4330 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4331 {
4332 	uma_zone_t zone;
4333 	int cur;
4334 
4335 	/*
4336 	 * Some callers want to add sysctls for global zones that
4337 	 * may not yet exist so they pass a pointer to a pointer.
4338 	 */
4339 	if (arg2 == 0)
4340 		zone = *(uma_zone_t *)arg1;
4341 	else
4342 		zone = arg1;
4343 	cur = uma_zone_get_cur(zone);
4344 	return (sysctl_handle_int(oidp, &cur, 0, req));
4345 }
4346 
4347 static int
4348 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
4349 {
4350 	uma_zone_t zone = arg1;
4351 	uint64_t cur;
4352 
4353 	cur = uma_zone_get_allocs(zone);
4354 	return (sysctl_handle_64(oidp, &cur, 0, req));
4355 }
4356 
4357 static int
4358 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
4359 {
4360 	uma_zone_t zone = arg1;
4361 	uint64_t cur;
4362 
4363 	cur = uma_zone_get_frees(zone);
4364 	return (sysctl_handle_64(oidp, &cur, 0, req));
4365 }
4366 
4367 #ifdef INVARIANTS
4368 static uma_slab_t
4369 uma_dbg_getslab(uma_zone_t zone, void *item)
4370 {
4371 	uma_slab_t slab;
4372 	uma_keg_t keg;
4373 	uint8_t *mem;
4374 
4375 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4376 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4377 		slab = vtoslab((vm_offset_t)mem);
4378 	} else {
4379 		/*
4380 		 * It is safe to return the slab here even though the
4381 		 * zone is unlocked because the item's allocation state
4382 		 * essentially holds a reference.
4383 		 */
4384 		if (zone->uz_lockptr == &zone->uz_lock)
4385 			return (NULL);
4386 		ZONE_LOCK(zone);
4387 		keg = zone->uz_keg;
4388 		if (keg->uk_flags & UMA_ZONE_HASH)
4389 			slab = hash_sfind(&keg->uk_hash, mem);
4390 		else
4391 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4392 		ZONE_UNLOCK(zone);
4393 	}
4394 
4395 	return (slab);
4396 }
4397 
4398 static bool
4399 uma_dbg_zskip(uma_zone_t zone, void *mem)
4400 {
4401 
4402 	if (zone->uz_lockptr == &zone->uz_lock)
4403 		return (true);
4404 
4405 	return (uma_dbg_kskip(zone->uz_keg, mem));
4406 }
4407 
4408 static bool
4409 uma_dbg_kskip(uma_keg_t keg, void *mem)
4410 {
4411 	uintptr_t idx;
4412 
4413 	if (dbg_divisor == 0)
4414 		return (true);
4415 
4416 	if (dbg_divisor == 1)
4417 		return (false);
4418 
4419 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4420 	if (keg->uk_ipers > 1) {
4421 		idx *= keg->uk_ipers;
4422 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4423 	}
4424 
4425 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4426 		counter_u64_add(uma_skip_cnt, 1);
4427 		return (true);
4428 	}
4429 	counter_u64_add(uma_dbg_cnt, 1);
4430 
4431 	return (false);
4432 }
4433 
4434 /*
4435  * Set up the slab's freei data such that uma_dbg_free can function.
4436  *
4437  */
4438 static void
4439 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4440 {
4441 	uma_keg_t keg;
4442 	int freei;
4443 
4444 	if (slab == NULL) {
4445 		slab = uma_dbg_getslab(zone, item);
4446 		if (slab == NULL)
4447 			panic("uma: item %p did not belong to zone %s\n",
4448 			    item, zone->uz_name);
4449 	}
4450 	keg = zone->uz_keg;
4451 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4452 
4453 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4454 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4455 		    item, zone, zone->uz_name, slab, freei);
4456 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4457 
4458 	return;
4459 }
4460 
4461 /*
4462  * Verifies freed addresses.  Checks for alignment, valid slab membership
4463  * and duplicate frees.
4464  *
4465  */
4466 static void
4467 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4468 {
4469 	uma_keg_t keg;
4470 	int freei;
4471 
4472 	if (slab == NULL) {
4473 		slab = uma_dbg_getslab(zone, item);
4474 		if (slab == NULL)
4475 			panic("uma: Freed item %p did not belong to zone %s\n",
4476 			    item, zone->uz_name);
4477 	}
4478 	keg = zone->uz_keg;
4479 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4480 
4481 	if (freei >= keg->uk_ipers)
4482 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4483 		    item, zone, zone->uz_name, slab, freei);
4484 
4485 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4486 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4487 		    item, zone, zone->uz_name, slab, freei);
4488 
4489 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4490 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4491 		    item, zone, zone->uz_name, slab, freei);
4492 
4493 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4494 }
4495 #endif /* INVARIANTS */
4496 
4497 #ifdef DDB
4498 static int64_t
4499 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4500     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4501 {
4502 	uint64_t frees;
4503 	int i;
4504 
4505 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4506 		*allocs = counter_u64_fetch(z->uz_allocs);
4507 		frees = counter_u64_fetch(z->uz_frees);
4508 		*sleeps = z->uz_sleeps;
4509 		*cachefree = 0;
4510 		*xdomain = 0;
4511 	} else
4512 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4513 		    xdomain);
4514 	if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4515 	    (LIST_FIRST(&kz->uk_zones) != z)))
4516 		*cachefree += kz->uk_free;
4517 	for (i = 0; i < vm_ndomains; i++)
4518 		*cachefree += z->uz_domain[i].uzd_nitems;
4519 	*used = *allocs - frees;
4520 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4521 }
4522 
4523 DB_SHOW_COMMAND(uma, db_show_uma)
4524 {
4525 	const char *fmt_hdr, *fmt_entry;
4526 	uma_keg_t kz;
4527 	uma_zone_t z;
4528 	uint64_t allocs, used, sleeps, xdomain;
4529 	long cachefree;
4530 	/* variables for sorting */
4531 	uma_keg_t cur_keg;
4532 	uma_zone_t cur_zone, last_zone;
4533 	int64_t cur_size, last_size, size;
4534 	int ties;
4535 
4536 	/* /i option produces machine-parseable CSV output */
4537 	if (modif[0] == 'i') {
4538 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4539 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4540 	} else {
4541 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4542 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4543 	}
4544 
4545 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4546 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4547 
4548 	/* Sort the zones with largest size first. */
4549 	last_zone = NULL;
4550 	last_size = INT64_MAX;
4551 	for (;;) {
4552 		cur_zone = NULL;
4553 		cur_size = -1;
4554 		ties = 0;
4555 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4556 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4557 				/*
4558 				 * In the case of size ties, print out zones
4559 				 * in the order they are encountered.  That is,
4560 				 * when we encounter the most recently output
4561 				 * zone, we have already printed all preceding
4562 				 * ties, and we must print all following ties.
4563 				 */
4564 				if (z == last_zone) {
4565 					ties = 1;
4566 					continue;
4567 				}
4568 				size = get_uma_stats(kz, z, &allocs, &used,
4569 				    &sleeps, &cachefree, &xdomain);
4570 				if (size > cur_size && size < last_size + ties)
4571 				{
4572 					cur_size = size;
4573 					cur_zone = z;
4574 					cur_keg = kz;
4575 				}
4576 			}
4577 		}
4578 		if (cur_zone == NULL)
4579 			break;
4580 
4581 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
4582 		    &sleeps, &cachefree, &xdomain);
4583 		db_printf(fmt_entry, cur_zone->uz_name,
4584 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
4585 		    (uintmax_t)allocs, (uintmax_t)sleeps,
4586 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
4587 		    xdomain);
4588 
4589 		if (db_pager_quit)
4590 			return;
4591 		last_zone = cur_zone;
4592 		last_size = cur_size;
4593 	}
4594 }
4595 
4596 DB_SHOW_COMMAND(umacache, db_show_umacache)
4597 {
4598 	uma_zone_t z;
4599 	uint64_t allocs, frees;
4600 	long cachefree;
4601 	int i;
4602 
4603 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4604 	    "Requests", "Bucket");
4605 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4606 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4607 		for (i = 0; i < vm_ndomains; i++)
4608 			cachefree += z->uz_domain[i].uzd_nitems;
4609 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4610 		    z->uz_name, (uintmax_t)z->uz_size,
4611 		    (intmax_t)(allocs - frees), cachefree,
4612 		    (uintmax_t)allocs, z->uz_bucket_size);
4613 		if (db_pager_quit)
4614 			return;
4615 	}
4616 }
4617 #endif	/* DDB */
4618