xref: /freebsd/sys/vm/uma_core.c (revision 3a3deb00a5e449c9478156b162dfa10ec82a2a3f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
123 
124 /*
125  * Are we allowed to allocate buckets?
126  */
127 static int bucketdisable = 1;
128 
129 /* Linked list of all kegs in the system */
130 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
131 
132 /* Linked list of all cache-only zones in the system */
133 static LIST_HEAD(,uma_zone) uma_cachezones =
134     LIST_HEAD_INITIALIZER(uma_cachezones);
135 
136 /* This RW lock protects the keg list */
137 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
138 
139 /*
140  * Pointer and counter to pool of pages, that is preallocated at
141  * startup to bootstrap UMA.
142  */
143 static char *bootmem;
144 static int boot_pages;
145 
146 static struct sx uma_reclaim_lock;
147 
148 /*
149  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
150  * allocations don't trigger a wakeup of the reclaim thread.
151  */
152 unsigned long uma_kmem_limit = LONG_MAX;
153 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
154     "UMA kernel memory soft limit");
155 unsigned long uma_kmem_total;
156 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
157     "UMA kernel memory usage");
158 
159 /* Is the VM done starting up? */
160 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
161     BOOT_RUNNING } booted = BOOT_COLD;
162 
163 /*
164  * This is the handle used to schedule events that need to happen
165  * outside of the allocation fast path.
166  */
167 static struct callout uma_callout;
168 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
169 
170 /*
171  * This structure is passed as the zone ctor arg so that I don't have to create
172  * a special allocation function just for zones.
173  */
174 struct uma_zctor_args {
175 	const char *name;
176 	size_t size;
177 	uma_ctor ctor;
178 	uma_dtor dtor;
179 	uma_init uminit;
180 	uma_fini fini;
181 	uma_import import;
182 	uma_release release;
183 	void *arg;
184 	uma_keg_t keg;
185 	int align;
186 	uint32_t flags;
187 };
188 
189 struct uma_kctor_args {
190 	uma_zone_t zone;
191 	size_t size;
192 	uma_init uminit;
193 	uma_fini fini;
194 	int align;
195 	uint32_t flags;
196 };
197 
198 struct uma_bucket_zone {
199 	uma_zone_t	ubz_zone;
200 	char		*ubz_name;
201 	int		ubz_entries;	/* Number of items it can hold. */
202 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
203 };
204 
205 /*
206  * Compute the actual number of bucket entries to pack them in power
207  * of two sizes for more efficient space utilization.
208  */
209 #define	BUCKET_SIZE(n)						\
210     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
211 
212 #define	BUCKET_MAX	BUCKET_SIZE(256)
213 #define	BUCKET_MIN	BUCKET_SIZE(4)
214 
215 struct uma_bucket_zone bucket_zones[] = {
216 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
217 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
218 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
219 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
220 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
221 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
222 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
223 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
224 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
225 	{ NULL, NULL, 0}
226 };
227 
228 /*
229  * Flags and enumerations to be passed to internal functions.
230  */
231 enum zfreeskip {
232 	SKIP_NONE =	0,
233 	SKIP_CNT =	0x00000001,
234 	SKIP_DTOR =	0x00010000,
235 	SKIP_FINI =	0x00020000,
236 };
237 
238 /* Prototypes.. */
239 
240 int	uma_startup_count(int);
241 void	uma_startup(void *, int);
242 void	uma_startup1(void);
243 void	uma_startup2(void);
244 
245 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
249 static void page_free(void *, vm_size_t, uint8_t);
250 static void pcpu_page_free(void *, vm_size_t, uint8_t);
251 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
252 static void cache_drain(uma_zone_t);
253 static void bucket_drain(uma_zone_t, uma_bucket_t);
254 static void bucket_cache_reclaim(uma_zone_t zone, bool);
255 static int keg_ctor(void *, int, void *, int);
256 static void keg_dtor(void *, int, void *);
257 static int zone_ctor(void *, int, void *, int);
258 static void zone_dtor(void *, int, void *);
259 static int zero_init(void *, int, int);
260 static void keg_small_init(uma_keg_t keg);
261 static void keg_large_init(uma_keg_t keg);
262 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
263 static void zone_timeout(uma_zone_t zone, void *);
264 static int hash_alloc(struct uma_hash *, u_int);
265 static int hash_expand(struct uma_hash *, struct uma_hash *);
266 static void hash_free(struct uma_hash *hash);
267 static void uma_timeout(void *);
268 static void uma_startup3(void);
269 static void *zone_alloc_item(uma_zone_t, void *, int, int);
270 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
271 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
272 static void bucket_enable(void);
273 static void bucket_init(void);
274 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
275 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
276 static void bucket_zone_drain(void);
277 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(void *, void **, int, int, int);
283 static void zone_release(void *, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
286 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
287 
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
291 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
292 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
293 
294 #ifdef INVARIANTS
295 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
296 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
297 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
298 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
299 
300 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
301     "Memory allocation debugging");
302 
303 static u_int dbg_divisor = 1;
304 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
305     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
306     "Debug & thrash every this item in memory allocator");
307 
308 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
309 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
310 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
311     &uma_dbg_cnt, "memory items debugged");
312 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
313     &uma_skip_cnt, "memory items skipped, not debugged");
314 #endif
315 
316 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
317 
318 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
319 
320 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
321     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
322 
323 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
324     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
325 
326 static int zone_warnings = 1;
327 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
328     "Warn when UMA zones becomes full");
329 
330 /*
331  * This routine checks to see whether or not it's safe to enable buckets.
332  */
333 static void
334 bucket_enable(void)
335 {
336 	bucketdisable = vm_page_count_min();
337 }
338 
339 /*
340  * Initialize bucket_zones, the array of zones of buckets of various sizes.
341  *
342  * For each zone, calculate the memory required for each bucket, consisting
343  * of the header and an array of pointers.
344  */
345 static void
346 bucket_init(void)
347 {
348 	struct uma_bucket_zone *ubz;
349 	int size;
350 
351 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
352 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
353 		size += sizeof(void *) * ubz->ubz_entries;
354 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
355 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
356 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
357 	}
358 }
359 
360 /*
361  * Given a desired number of entries for a bucket, return the zone from which
362  * to allocate the bucket.
363  */
364 static struct uma_bucket_zone *
365 bucket_zone_lookup(int entries)
366 {
367 	struct uma_bucket_zone *ubz;
368 
369 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
370 		if (ubz->ubz_entries >= entries)
371 			return (ubz);
372 	ubz--;
373 	return (ubz);
374 }
375 
376 static struct uma_bucket_zone *
377 bucket_zone_max(uma_zone_t zone, int nitems)
378 {
379 	struct uma_bucket_zone *ubz;
380 	int bpcpu;
381 
382 	bpcpu = 2;
383 #ifdef UMA_XDOMAIN
384 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
385 		/* Count the cross-domain bucket. */
386 		bpcpu++;
387 #endif
388 
389 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
390 		if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
391 			break;
392 	if (ubz == &bucket_zones[0])
393 		ubz = NULL;
394 	else
395 		ubz--;
396 	return (ubz);
397 }
398 
399 static int
400 bucket_select(int size)
401 {
402 	struct uma_bucket_zone *ubz;
403 
404 	ubz = &bucket_zones[0];
405 	if (size > ubz->ubz_maxsize)
406 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
407 
408 	for (; ubz->ubz_entries != 0; ubz++)
409 		if (ubz->ubz_maxsize < size)
410 			break;
411 	ubz--;
412 	return (ubz->ubz_entries);
413 }
414 
415 static uma_bucket_t
416 bucket_alloc(uma_zone_t zone, void *udata, int flags)
417 {
418 	struct uma_bucket_zone *ubz;
419 	uma_bucket_t bucket;
420 
421 	/*
422 	 * This is to stop us from allocating per cpu buckets while we're
423 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
424 	 * boot pages.  This also prevents us from allocating buckets in
425 	 * low memory situations.
426 	 */
427 	if (bucketdisable)
428 		return (NULL);
429 	/*
430 	 * To limit bucket recursion we store the original zone flags
431 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
432 	 * NOVM flag to persist even through deep recursions.  We also
433 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
434 	 * a bucket for a bucket zone so we do not allow infinite bucket
435 	 * recursion.  This cookie will even persist to frees of unused
436 	 * buckets via the allocation path or bucket allocations in the
437 	 * free path.
438 	 */
439 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
440 		udata = (void *)(uintptr_t)zone->uz_flags;
441 	else {
442 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
443 			return (NULL);
444 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
445 	}
446 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
447 		flags |= M_NOVM;
448 	ubz = bucket_zone_lookup(zone->uz_bucket_size);
449 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
450 		ubz++;
451 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
452 	if (bucket) {
453 #ifdef INVARIANTS
454 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
455 #endif
456 		bucket->ub_cnt = 0;
457 		bucket->ub_entries = ubz->ubz_entries;
458 	}
459 
460 	return (bucket);
461 }
462 
463 static void
464 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
465 {
466 	struct uma_bucket_zone *ubz;
467 
468 	KASSERT(bucket->ub_cnt == 0,
469 	    ("bucket_free: Freeing a non free bucket."));
470 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
471 		udata = (void *)(uintptr_t)zone->uz_flags;
472 	ubz = bucket_zone_lookup(bucket->ub_entries);
473 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
474 }
475 
476 static void
477 bucket_zone_drain(void)
478 {
479 	struct uma_bucket_zone *ubz;
480 
481 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
482 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
483 }
484 
485 /*
486  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
487  * zone's caches.
488  */
489 static uma_bucket_t
490 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
491 {
492 	uma_bucket_t bucket;
493 
494 	ZONE_LOCK_ASSERT(zone);
495 
496 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
497 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
498 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
499 		zdom->uzd_nitems -= bucket->ub_cnt;
500 		if (zdom->uzd_imin > zdom->uzd_nitems)
501 			zdom->uzd_imin = zdom->uzd_nitems;
502 		zone->uz_bkt_count -= bucket->ub_cnt;
503 	}
504 	return (bucket);
505 }
506 
507 /*
508  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
509  * whether the bucket's contents should be counted as part of the zone's working
510  * set.
511  */
512 static void
513 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
514     const bool ws)
515 {
516 
517 	ZONE_LOCK_ASSERT(zone);
518 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
519 	    ("%s: zone %p overflow", __func__, zone));
520 
521 	if (ws)
522 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
523 	else
524 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
525 	zdom->uzd_nitems += bucket->ub_cnt;
526 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
527 		zdom->uzd_imax = zdom->uzd_nitems;
528 	zone->uz_bkt_count += bucket->ub_cnt;
529 }
530 
531 static void
532 zone_log_warning(uma_zone_t zone)
533 {
534 	static const struct timeval warninterval = { 300, 0 };
535 
536 	if (!zone_warnings || zone->uz_warning == NULL)
537 		return;
538 
539 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
540 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
541 }
542 
543 static inline void
544 zone_maxaction(uma_zone_t zone)
545 {
546 
547 	if (zone->uz_maxaction.ta_func != NULL)
548 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
549 }
550 
551 /*
552  * Routine called by timeout which is used to fire off some time interval
553  * based calculations.  (stats, hash size, etc.)
554  *
555  * Arguments:
556  *	arg   Unused
557  *
558  * Returns:
559  *	Nothing
560  */
561 static void
562 uma_timeout(void *unused)
563 {
564 	bucket_enable();
565 	zone_foreach(zone_timeout, NULL);
566 
567 	/* Reschedule this event */
568 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
569 }
570 
571 /*
572  * Update the working set size estimate for the zone's bucket cache.
573  * The constants chosen here are somewhat arbitrary.  With an update period of
574  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
575  * last 100s.
576  */
577 static void
578 zone_domain_update_wss(uma_zone_domain_t zdom)
579 {
580 	long wss;
581 
582 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
583 	wss = zdom->uzd_imax - zdom->uzd_imin;
584 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
585 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
586 }
587 
588 /*
589  * Routine to perform timeout driven calculations.  This expands the
590  * hashes and does per cpu statistics aggregation.
591  *
592  *  Returns nothing.
593  */
594 static void
595 zone_timeout(uma_zone_t zone, void *unused)
596 {
597 	uma_keg_t keg;
598 	u_int slabs;
599 
600 	if ((zone->uz_flags & UMA_ZONE_HASH) == 0)
601 		goto update_wss;
602 
603 	keg = zone->uz_keg;
604 	KEG_LOCK(keg);
605 	/*
606 	 * Expand the keg hash table.
607 	 *
608 	 * This is done if the number of slabs is larger than the hash size.
609 	 * What I'm trying to do here is completely reduce collisions.  This
610 	 * may be a little aggressive.  Should I allow for two collisions max?
611 	 */
612 	if (keg->uk_flags & UMA_ZONE_HASH &&
613 	    (slabs = keg->uk_pages / keg->uk_ppera) >
614 	     keg->uk_hash.uh_hashsize) {
615 		struct uma_hash newhash;
616 		struct uma_hash oldhash;
617 		int ret;
618 
619 		/*
620 		 * This is so involved because allocating and freeing
621 		 * while the keg lock is held will lead to deadlock.
622 		 * I have to do everything in stages and check for
623 		 * races.
624 		 */
625 		KEG_UNLOCK(keg);
626 		ret = hash_alloc(&newhash, 1 << fls(slabs));
627 		KEG_LOCK(keg);
628 		if (ret) {
629 			if (hash_expand(&keg->uk_hash, &newhash)) {
630 				oldhash = keg->uk_hash;
631 				keg->uk_hash = newhash;
632 			} else
633 				oldhash = newhash;
634 
635 			KEG_UNLOCK(keg);
636 			hash_free(&oldhash);
637 			return;
638 		}
639 	}
640 	KEG_UNLOCK(keg);
641 
642 update_wss:
643 	ZONE_LOCK(zone);
644 	for (int i = 0; i < vm_ndomains; i++)
645 		zone_domain_update_wss(&zone->uz_domain[i]);
646 	ZONE_UNLOCK(zone);
647 }
648 
649 /*
650  * Allocate and zero fill the next sized hash table from the appropriate
651  * backing store.
652  *
653  * Arguments:
654  *	hash  A new hash structure with the old hash size in uh_hashsize
655  *
656  * Returns:
657  *	1 on success and 0 on failure.
658  */
659 static int
660 hash_alloc(struct uma_hash *hash, u_int size)
661 {
662 	size_t alloc;
663 
664 	KASSERT(powerof2(size), ("hash size must be power of 2"));
665 	if (size > UMA_HASH_SIZE_INIT)  {
666 		hash->uh_hashsize = size;
667 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
668 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
669 	} else {
670 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
671 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
672 		    UMA_ANYDOMAIN, M_WAITOK);
673 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
674 	}
675 	if (hash->uh_slab_hash) {
676 		bzero(hash->uh_slab_hash, alloc);
677 		hash->uh_hashmask = hash->uh_hashsize - 1;
678 		return (1);
679 	}
680 
681 	return (0);
682 }
683 
684 /*
685  * Expands the hash table for HASH zones.  This is done from zone_timeout
686  * to reduce collisions.  This must not be done in the regular allocation
687  * path, otherwise, we can recurse on the vm while allocating pages.
688  *
689  * Arguments:
690  *	oldhash  The hash you want to expand
691  *	newhash  The hash structure for the new table
692  *
693  * Returns:
694  *	Nothing
695  *
696  * Discussion:
697  */
698 static int
699 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
700 {
701 	uma_hash_slab_t slab;
702 	u_int hval;
703 	u_int idx;
704 
705 	if (!newhash->uh_slab_hash)
706 		return (0);
707 
708 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
709 		return (0);
710 
711 	/*
712 	 * I need to investigate hash algorithms for resizing without a
713 	 * full rehash.
714 	 */
715 
716 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
717 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
718 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
719 			LIST_REMOVE(slab, uhs_hlink);
720 			hval = UMA_HASH(newhash, slab->uhs_data);
721 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
722 			    slab, uhs_hlink);
723 		}
724 
725 	return (1);
726 }
727 
728 /*
729  * Free the hash bucket to the appropriate backing store.
730  *
731  * Arguments:
732  *	slab_hash  The hash bucket we're freeing
733  *	hashsize   The number of entries in that hash bucket
734  *
735  * Returns:
736  *	Nothing
737  */
738 static void
739 hash_free(struct uma_hash *hash)
740 {
741 	if (hash->uh_slab_hash == NULL)
742 		return;
743 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
744 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
745 	else
746 		free(hash->uh_slab_hash, M_UMAHASH);
747 }
748 
749 /*
750  * Frees all outstanding items in a bucket
751  *
752  * Arguments:
753  *	zone   The zone to free to, must be unlocked.
754  *	bucket The free/alloc bucket with items, cpu queue must be locked.
755  *
756  * Returns:
757  *	Nothing
758  */
759 
760 static void
761 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
762 {
763 	int i;
764 
765 	if (bucket == NULL)
766 		return;
767 
768 	if (zone->uz_fini)
769 		for (i = 0; i < bucket->ub_cnt; i++)
770 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
771 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
772 	if (zone->uz_max_items > 0) {
773 		ZONE_LOCK(zone);
774 		zone->uz_items -= bucket->ub_cnt;
775 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
776 			wakeup_one(zone);
777 		ZONE_UNLOCK(zone);
778 	}
779 	bucket->ub_cnt = 0;
780 }
781 
782 /*
783  * Drains the per cpu caches for a zone.
784  *
785  * NOTE: This may only be called while the zone is being turn down, and not
786  * during normal operation.  This is necessary in order that we do not have
787  * to migrate CPUs to drain the per-CPU caches.
788  *
789  * Arguments:
790  *	zone     The zone to drain, must be unlocked.
791  *
792  * Returns:
793  *	Nothing
794  */
795 static void
796 cache_drain(uma_zone_t zone)
797 {
798 	uma_cache_t cache;
799 	int cpu;
800 
801 	/*
802 	 * XXX: It is safe to not lock the per-CPU caches, because we're
803 	 * tearing down the zone anyway.  I.e., there will be no further use
804 	 * of the caches at this point.
805 	 *
806 	 * XXX: It would good to be able to assert that the zone is being
807 	 * torn down to prevent improper use of cache_drain().
808 	 *
809 	 * XXX: We lock the zone before passing into bucket_cache_reclaim() as
810 	 * it is used elsewhere.  Should the tear-down path be made special
811 	 * there in some form?
812 	 */
813 	CPU_FOREACH(cpu) {
814 		cache = &zone->uz_cpu[cpu];
815 		bucket_drain(zone, cache->uc_allocbucket);
816 		if (cache->uc_allocbucket != NULL)
817 			bucket_free(zone, cache->uc_allocbucket, NULL);
818 		cache->uc_allocbucket = NULL;
819 		bucket_drain(zone, cache->uc_freebucket);
820 		if (cache->uc_freebucket != NULL)
821 			bucket_free(zone, cache->uc_freebucket, NULL);
822 		cache->uc_freebucket = NULL;
823 		bucket_drain(zone, cache->uc_crossbucket);
824 		if (cache->uc_crossbucket != NULL)
825 			bucket_free(zone, cache->uc_crossbucket, NULL);
826 		cache->uc_crossbucket = NULL;
827 	}
828 	ZONE_LOCK(zone);
829 	bucket_cache_reclaim(zone, true);
830 	ZONE_UNLOCK(zone);
831 }
832 
833 static void
834 cache_shrink(uma_zone_t zone, void *unused)
835 {
836 
837 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
838 		return;
839 
840 	ZONE_LOCK(zone);
841 	zone->uz_bucket_size =
842 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
843 	ZONE_UNLOCK(zone);
844 }
845 
846 static void
847 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
848 {
849 	uma_cache_t cache;
850 	uma_bucket_t b1, b2, b3;
851 	int domain;
852 
853 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
854 		return;
855 
856 	b1 = b2 = b3 = NULL;
857 	ZONE_LOCK(zone);
858 	critical_enter();
859 	if (zone->uz_flags & UMA_ZONE_NUMA)
860 		domain = PCPU_GET(domain);
861 	else
862 		domain = 0;
863 	cache = &zone->uz_cpu[curcpu];
864 	if (cache->uc_allocbucket) {
865 		if (cache->uc_allocbucket->ub_cnt != 0)
866 			zone_put_bucket(zone, &zone->uz_domain[domain],
867 			    cache->uc_allocbucket, false);
868 		else
869 			b1 = cache->uc_allocbucket;
870 		cache->uc_allocbucket = NULL;
871 	}
872 	if (cache->uc_freebucket) {
873 		if (cache->uc_freebucket->ub_cnt != 0)
874 			zone_put_bucket(zone, &zone->uz_domain[domain],
875 			    cache->uc_freebucket, false);
876 		else
877 			b2 = cache->uc_freebucket;
878 		cache->uc_freebucket = NULL;
879 	}
880 	b3 = cache->uc_crossbucket;
881 	cache->uc_crossbucket = NULL;
882 	critical_exit();
883 	ZONE_UNLOCK(zone);
884 	if (b1)
885 		bucket_free(zone, b1, NULL);
886 	if (b2)
887 		bucket_free(zone, b2, NULL);
888 	if (b3) {
889 		bucket_drain(zone, b3);
890 		bucket_free(zone, b3, NULL);
891 	}
892 }
893 
894 /*
895  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
896  * This is an expensive call because it needs to bind to all CPUs
897  * one by one and enter a critical section on each of them in order
898  * to safely access their cache buckets.
899  * Zone lock must not be held on call this function.
900  */
901 static void
902 pcpu_cache_drain_safe(uma_zone_t zone)
903 {
904 	int cpu;
905 
906 	/*
907 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
908 	 */
909 	if (zone)
910 		cache_shrink(zone, NULL);
911 	else
912 		zone_foreach(cache_shrink, NULL);
913 
914 	CPU_FOREACH(cpu) {
915 		thread_lock(curthread);
916 		sched_bind(curthread, cpu);
917 		thread_unlock(curthread);
918 
919 		if (zone)
920 			cache_drain_safe_cpu(zone, NULL);
921 		else
922 			zone_foreach(cache_drain_safe_cpu, NULL);
923 	}
924 	thread_lock(curthread);
925 	sched_unbind(curthread);
926 	thread_unlock(curthread);
927 }
928 
929 /*
930  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
931  * requested a drain, otherwise the per-domain caches are trimmed to either
932  * estimated working set size.
933  */
934 static void
935 bucket_cache_reclaim(uma_zone_t zone, bool drain)
936 {
937 	uma_zone_domain_t zdom;
938 	uma_bucket_t bucket;
939 	long target, tofree;
940 	int i;
941 
942 	for (i = 0; i < vm_ndomains; i++) {
943 		zdom = &zone->uz_domain[i];
944 
945 		/*
946 		 * If we were asked to drain the zone, we are done only once
947 		 * this bucket cache is empty.  Otherwise, we reclaim items in
948 		 * excess of the zone's estimated working set size.  If the
949 		 * difference nitems - imin is larger than the WSS estimate,
950 		 * then the estimate will grow at the end of this interval and
951 		 * we ignore the historical average.
952 		 */
953 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
954 		    zdom->uzd_imin);
955 		while (zdom->uzd_nitems > target) {
956 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
957 			if (bucket == NULL)
958 				break;
959 			tofree = bucket->ub_cnt;
960 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
961 			zdom->uzd_nitems -= tofree;
962 
963 			/*
964 			 * Shift the bounds of the current WSS interval to avoid
965 			 * perturbing the estimate.
966 			 */
967 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
968 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
969 
970 			ZONE_UNLOCK(zone);
971 			bucket_drain(zone, bucket);
972 			bucket_free(zone, bucket, NULL);
973 			ZONE_LOCK(zone);
974 		}
975 	}
976 
977 	/*
978 	 * Shrink the zone bucket size to ensure that the per-CPU caches
979 	 * don't grow too large.
980 	 */
981 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
982 		zone->uz_bucket_size--;
983 }
984 
985 static void
986 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
987 {
988 	uint8_t *mem;
989 	int i;
990 	uint8_t flags;
991 
992 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
993 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
994 
995 	mem = slab_data(slab, keg);
996 	flags = slab->us_flags;
997 	i = start;
998 	if (keg->uk_fini != NULL) {
999 		for (i--; i > -1; i--)
1000 #ifdef INVARIANTS
1001 		/*
1002 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1003 		 * would check that memory hasn't been modified since free,
1004 		 * which executed trash_dtor.
1005 		 * That's why we need to run uma_dbg_kskip() check here,
1006 		 * albeit we don't make skip check for other init/fini
1007 		 * invocations.
1008 		 */
1009 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1010 		    keg->uk_fini != trash_fini)
1011 #endif
1012 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1013 	}
1014 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1015 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1016 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1017 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1018 }
1019 
1020 /*
1021  * Frees pages from a keg back to the system.  This is done on demand from
1022  * the pageout daemon.
1023  *
1024  * Returns nothing.
1025  */
1026 static void
1027 keg_drain(uma_keg_t keg)
1028 {
1029 	struct slabhead freeslabs = { 0 };
1030 	uma_domain_t dom;
1031 	uma_slab_t slab, tmp;
1032 	int i;
1033 
1034 	/*
1035 	 * We don't want to take pages from statically allocated kegs at this
1036 	 * time
1037 	 */
1038 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1039 		return;
1040 
1041 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
1042 	    keg->uk_name, keg, keg->uk_free);
1043 	KEG_LOCK(keg);
1044 	if (keg->uk_free == 0)
1045 		goto finished;
1046 
1047 	for (i = 0; i < vm_ndomains; i++) {
1048 		dom = &keg->uk_domain[i];
1049 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1050 			/* We have nowhere to free these to. */
1051 			if (slab->us_flags & UMA_SLAB_BOOT)
1052 				continue;
1053 
1054 			LIST_REMOVE(slab, us_link);
1055 			keg->uk_pages -= keg->uk_ppera;
1056 			keg->uk_free -= keg->uk_ipers;
1057 
1058 			if (keg->uk_flags & UMA_ZONE_HASH)
1059 				UMA_HASH_REMOVE(&keg->uk_hash, slab);
1060 
1061 			LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1062 		}
1063 	}
1064 
1065 finished:
1066 	KEG_UNLOCK(keg);
1067 
1068 	while ((slab = LIST_FIRST(&freeslabs)) != NULL) {
1069 		LIST_REMOVE(slab, us_link);
1070 		keg_free_slab(keg, slab, keg->uk_ipers);
1071 	}
1072 }
1073 
1074 static void
1075 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1076 {
1077 
1078 	/*
1079 	 * Set draining to interlock with zone_dtor() so we can release our
1080 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1081 	 * is the only call that knows the structure will still be available
1082 	 * when it wakes up.
1083 	 */
1084 	ZONE_LOCK(zone);
1085 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1086 		if (waitok == M_NOWAIT)
1087 			goto out;
1088 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1089 	}
1090 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1091 	bucket_cache_reclaim(zone, drain);
1092 	ZONE_UNLOCK(zone);
1093 
1094 	/*
1095 	 * The DRAINING flag protects us from being freed while
1096 	 * we're running.  Normally the uma_rwlock would protect us but we
1097 	 * must be able to release and acquire the right lock for each keg.
1098 	 */
1099 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1100 		keg_drain(zone->uz_keg);
1101 	ZONE_LOCK(zone);
1102 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1103 	wakeup(zone);
1104 out:
1105 	ZONE_UNLOCK(zone);
1106 }
1107 
1108 static void
1109 zone_drain(uma_zone_t zone, void *unused)
1110 {
1111 
1112 	zone_reclaim(zone, M_NOWAIT, true);
1113 }
1114 
1115 static void
1116 zone_trim(uma_zone_t zone, void *unused)
1117 {
1118 
1119 	zone_reclaim(zone, M_NOWAIT, false);
1120 }
1121 
1122 /*
1123  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1124  * If the allocation was successful, the keg lock will be held upon return,
1125  * otherwise the keg will be left unlocked.
1126  *
1127  * Arguments:
1128  *	flags   Wait flags for the item initialization routine
1129  *	aflags  Wait flags for the slab allocation
1130  *
1131  * Returns:
1132  *	The slab that was allocated or NULL if there is no memory and the
1133  *	caller specified M_NOWAIT.
1134  */
1135 static uma_slab_t
1136 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1137     int aflags)
1138 {
1139 	uma_alloc allocf;
1140 	uma_slab_t slab;
1141 	unsigned long size;
1142 	uint8_t *mem;
1143 	uint8_t sflags;
1144 	int i;
1145 
1146 	KASSERT(domain >= 0 && domain < vm_ndomains,
1147 	    ("keg_alloc_slab: domain %d out of range", domain));
1148 	KEG_LOCK_ASSERT(keg);
1149 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1150 
1151 	allocf = keg->uk_allocf;
1152 	KEG_UNLOCK(keg);
1153 
1154 	slab = NULL;
1155 	mem = NULL;
1156 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1157 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1158 		if (slab == NULL)
1159 			goto out;
1160 	}
1161 
1162 	/*
1163 	 * This reproduces the old vm_zone behavior of zero filling pages the
1164 	 * first time they are added to a zone.
1165 	 *
1166 	 * Malloced items are zeroed in uma_zalloc.
1167 	 */
1168 
1169 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1170 		aflags |= M_ZERO;
1171 	else
1172 		aflags &= ~M_ZERO;
1173 
1174 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1175 		aflags |= M_NODUMP;
1176 
1177 	/* zone is passed for legacy reasons. */
1178 	size = keg->uk_ppera * PAGE_SIZE;
1179 	mem = allocf(zone, size, domain, &sflags, aflags);
1180 	if (mem == NULL) {
1181 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1182 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1183 		slab = NULL;
1184 		goto out;
1185 	}
1186 	uma_total_inc(size);
1187 
1188 	/* Point the slab into the allocated memory */
1189 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1190 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1191 	else
1192 		((uma_hash_slab_t)slab)->uhs_data = mem;
1193 
1194 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1195 		for (i = 0; i < keg->uk_ppera; i++)
1196 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1197 			    zone, slab);
1198 
1199 	slab->us_freecount = keg->uk_ipers;
1200 	slab->us_flags = sflags;
1201 	slab->us_domain = domain;
1202 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1203 #ifdef INVARIANTS
1204 	BIT_ZERO(SLAB_MAX_SETSIZE, &slab->us_debugfree);
1205 #endif
1206 
1207 	if (keg->uk_init != NULL) {
1208 		for (i = 0; i < keg->uk_ipers; i++)
1209 			if (keg->uk_init(slab_item(slab, keg, i),
1210 			    keg->uk_size, flags) != 0)
1211 				break;
1212 		if (i != keg->uk_ipers) {
1213 			keg_free_slab(keg, slab, i);
1214 			slab = NULL;
1215 			goto out;
1216 		}
1217 	}
1218 	KEG_LOCK(keg);
1219 
1220 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1221 	    slab, keg->uk_name, keg);
1222 
1223 	if (keg->uk_flags & UMA_ZONE_HASH)
1224 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1225 
1226 	keg->uk_pages += keg->uk_ppera;
1227 	keg->uk_free += keg->uk_ipers;
1228 
1229 out:
1230 	return (slab);
1231 }
1232 
1233 /*
1234  * This function is intended to be used early on in place of page_alloc() so
1235  * that we may use the boot time page cache to satisfy allocations before
1236  * the VM is ready.
1237  */
1238 static void *
1239 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1240     int wait)
1241 {
1242 	uma_keg_t keg;
1243 	void *mem;
1244 	int pages;
1245 
1246 	keg = zone->uz_keg;
1247 	/*
1248 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1249 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1250 	 */
1251 	switch (booted) {
1252 		case BOOT_COLD:
1253 		case BOOT_STRAPPED:
1254 			break;
1255 		case BOOT_PAGEALLOC:
1256 			if (keg->uk_ppera > 1)
1257 				break;
1258 		case BOOT_BUCKETS:
1259 		case BOOT_RUNNING:
1260 #ifdef UMA_MD_SMALL_ALLOC
1261 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1262 			    page_alloc : uma_small_alloc;
1263 #else
1264 			keg->uk_allocf = page_alloc;
1265 #endif
1266 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1267 	}
1268 
1269 	/*
1270 	 * Check our small startup cache to see if it has pages remaining.
1271 	 */
1272 	pages = howmany(bytes, PAGE_SIZE);
1273 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1274 	if (pages > boot_pages)
1275 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1276 #ifdef DIAGNOSTIC
1277 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1278 	    boot_pages);
1279 #endif
1280 	mem = bootmem;
1281 	boot_pages -= pages;
1282 	bootmem += pages * PAGE_SIZE;
1283 	*pflag = UMA_SLAB_BOOT;
1284 
1285 	return (mem);
1286 }
1287 
1288 /*
1289  * Allocates a number of pages from the system
1290  *
1291  * Arguments:
1292  *	bytes  The number of bytes requested
1293  *	wait  Shall we wait?
1294  *
1295  * Returns:
1296  *	A pointer to the alloced memory or possibly
1297  *	NULL if M_NOWAIT is set.
1298  */
1299 static void *
1300 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1301     int wait)
1302 {
1303 	void *p;	/* Returned page */
1304 
1305 	*pflag = UMA_SLAB_KERNEL;
1306 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1307 
1308 	return (p);
1309 }
1310 
1311 static void *
1312 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1313     int wait)
1314 {
1315 	struct pglist alloctail;
1316 	vm_offset_t addr, zkva;
1317 	int cpu, flags;
1318 	vm_page_t p, p_next;
1319 #ifdef NUMA
1320 	struct pcpu *pc;
1321 #endif
1322 
1323 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1324 
1325 	TAILQ_INIT(&alloctail);
1326 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1327 	    malloc2vm_flags(wait);
1328 	*pflag = UMA_SLAB_KERNEL;
1329 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1330 		if (CPU_ABSENT(cpu)) {
1331 			p = vm_page_alloc(NULL, 0, flags);
1332 		} else {
1333 #ifndef NUMA
1334 			p = vm_page_alloc(NULL, 0, flags);
1335 #else
1336 			pc = pcpu_find(cpu);
1337 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1338 			if (__predict_false(p == NULL))
1339 				p = vm_page_alloc(NULL, 0, flags);
1340 #endif
1341 		}
1342 		if (__predict_false(p == NULL))
1343 			goto fail;
1344 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1345 	}
1346 	if ((addr = kva_alloc(bytes)) == 0)
1347 		goto fail;
1348 	zkva = addr;
1349 	TAILQ_FOREACH(p, &alloctail, listq) {
1350 		pmap_qenter(zkva, &p, 1);
1351 		zkva += PAGE_SIZE;
1352 	}
1353 	return ((void*)addr);
1354 fail:
1355 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1356 		vm_page_unwire_noq(p);
1357 		vm_page_free(p);
1358 	}
1359 	return (NULL);
1360 }
1361 
1362 /*
1363  * Allocates a number of pages from within an object
1364  *
1365  * Arguments:
1366  *	bytes  The number of bytes requested
1367  *	wait   Shall we wait?
1368  *
1369  * Returns:
1370  *	A pointer to the alloced memory or possibly
1371  *	NULL if M_NOWAIT is set.
1372  */
1373 static void *
1374 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1375     int wait)
1376 {
1377 	TAILQ_HEAD(, vm_page) alloctail;
1378 	u_long npages;
1379 	vm_offset_t retkva, zkva;
1380 	vm_page_t p, p_next;
1381 	uma_keg_t keg;
1382 
1383 	TAILQ_INIT(&alloctail);
1384 	keg = zone->uz_keg;
1385 
1386 	npages = howmany(bytes, PAGE_SIZE);
1387 	while (npages > 0) {
1388 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1389 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1390 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1391 		    VM_ALLOC_NOWAIT));
1392 		if (p != NULL) {
1393 			/*
1394 			 * Since the page does not belong to an object, its
1395 			 * listq is unused.
1396 			 */
1397 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1398 			npages--;
1399 			continue;
1400 		}
1401 		/*
1402 		 * Page allocation failed, free intermediate pages and
1403 		 * exit.
1404 		 */
1405 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1406 			vm_page_unwire_noq(p);
1407 			vm_page_free(p);
1408 		}
1409 		return (NULL);
1410 	}
1411 	*flags = UMA_SLAB_PRIV;
1412 	zkva = keg->uk_kva +
1413 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1414 	retkva = zkva;
1415 	TAILQ_FOREACH(p, &alloctail, listq) {
1416 		pmap_qenter(zkva, &p, 1);
1417 		zkva += PAGE_SIZE;
1418 	}
1419 
1420 	return ((void *)retkva);
1421 }
1422 
1423 /*
1424  * Frees a number of pages to the system
1425  *
1426  * Arguments:
1427  *	mem   A pointer to the memory to be freed
1428  *	size  The size of the memory being freed
1429  *	flags The original p->us_flags field
1430  *
1431  * Returns:
1432  *	Nothing
1433  */
1434 static void
1435 page_free(void *mem, vm_size_t size, uint8_t flags)
1436 {
1437 
1438 	if ((flags & UMA_SLAB_KERNEL) == 0)
1439 		panic("UMA: page_free used with invalid flags %x", flags);
1440 
1441 	kmem_free((vm_offset_t)mem, size);
1442 }
1443 
1444 /*
1445  * Frees pcpu zone allocations
1446  *
1447  * Arguments:
1448  *	mem   A pointer to the memory to be freed
1449  *	size  The size of the memory being freed
1450  *	flags The original p->us_flags field
1451  *
1452  * Returns:
1453  *	Nothing
1454  */
1455 static void
1456 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1457 {
1458 	vm_offset_t sva, curva;
1459 	vm_paddr_t paddr;
1460 	vm_page_t m;
1461 
1462 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1463 	sva = (vm_offset_t)mem;
1464 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1465 		paddr = pmap_kextract(curva);
1466 		m = PHYS_TO_VM_PAGE(paddr);
1467 		vm_page_unwire_noq(m);
1468 		vm_page_free(m);
1469 	}
1470 	pmap_qremove(sva, size >> PAGE_SHIFT);
1471 	kva_free(sva, size);
1472 }
1473 
1474 
1475 /*
1476  * Zero fill initializer
1477  *
1478  * Arguments/Returns follow uma_init specifications
1479  */
1480 static int
1481 zero_init(void *mem, int size, int flags)
1482 {
1483 	bzero(mem, size);
1484 	return (0);
1485 }
1486 
1487 /*
1488  * Actual size of embedded struct slab (!OFFPAGE).
1489  */
1490 size_t
1491 slab_sizeof(int nitems)
1492 {
1493 	size_t s;
1494 
1495 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems);
1496 	return (roundup(s, UMA_ALIGN_PTR + 1));
1497 }
1498 
1499 /*
1500  * Size of memory for embedded slabs (!OFFPAGE).
1501  */
1502 size_t
1503 slab_space(int nitems)
1504 {
1505 	return (UMA_SLAB_SIZE - slab_sizeof(nitems));
1506 }
1507 
1508 /*
1509  * Compute the number of items that will fit in an embedded (!OFFPAGE) slab
1510  * with a given size and alignment.
1511  */
1512 int
1513 slab_ipers(size_t size, int align)
1514 {
1515 	int rsize;
1516 	int nitems;
1517 
1518         /*
1519          * Compute the ideal number of items that will fit in a page and
1520          * then compute the actual number based on a bitset nitems wide.
1521          */
1522 	rsize = roundup(size, align + 1);
1523         nitems = UMA_SLAB_SIZE / rsize;
1524 	return (slab_space(nitems) / rsize);
1525 }
1526 
1527 /*
1528  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1529  *
1530  * Arguments
1531  *	keg  The zone we should initialize
1532  *
1533  * Returns
1534  *	Nothing
1535  */
1536 static void
1537 keg_small_init(uma_keg_t keg)
1538 {
1539 	u_int rsize;
1540 	u_int memused;
1541 	u_int wastedspace;
1542 	u_int shsize;
1543 	u_int slabsize;
1544 
1545 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1546 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1547 
1548 		slabsize = UMA_PCPU_ALLOC_SIZE;
1549 		keg->uk_ppera = ncpus;
1550 	} else {
1551 		slabsize = UMA_SLAB_SIZE;
1552 		keg->uk_ppera = 1;
1553 	}
1554 
1555 	/*
1556 	 * Calculate the size of each allocation (rsize) according to
1557 	 * alignment.  If the requested size is smaller than we have
1558 	 * allocation bits for we round it up.
1559 	 */
1560 	rsize = keg->uk_size;
1561 	if (rsize < slabsize / SLAB_MAX_SETSIZE)
1562 		rsize = slabsize / SLAB_MAX_SETSIZE;
1563 	if (rsize & keg->uk_align)
1564 		rsize = roundup(rsize, keg->uk_align + 1);
1565 	keg->uk_rsize = rsize;
1566 
1567 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1568 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1569 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1570 
1571 	/*
1572 	 * Use a pessimistic bit count for shsize.  It may be possible to
1573 	 * squeeze one more item in for very particular sizes if we were
1574 	 * to loop and reduce the bitsize if there is waste.
1575 	 */
1576 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1577 		shsize = 0;
1578 	else
1579 		shsize = slab_sizeof(slabsize / rsize);
1580 
1581 	if (rsize <= slabsize - shsize)
1582 		keg->uk_ipers = (slabsize - shsize) / rsize;
1583 	else {
1584 		/* Handle special case when we have 1 item per slab, so
1585 		 * alignment requirement can be relaxed. */
1586 		KASSERT(keg->uk_size <= slabsize - shsize,
1587 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1588 		keg->uk_ipers = 1;
1589 	}
1590 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1591 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1592 
1593 	memused = keg->uk_ipers * rsize + shsize;
1594 	wastedspace = slabsize - memused;
1595 
1596 	/*
1597 	 * We can't do OFFPAGE if we're internal or if we've been
1598 	 * asked to not go to the VM for buckets.  If we do this we
1599 	 * may end up going to the VM  for slabs which we do not
1600 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1601 	 * of UMA_ZONE_VM, which clearly forbids it.
1602 	 */
1603 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1604 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1605 		return;
1606 
1607 	/*
1608 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1609 	 * this if it permits more items per-slab.
1610 	 *
1611 	 * XXX We could try growing slabsize to limit max waste as well.
1612 	 * Historically this was not done because the VM could not
1613 	 * efficiently handle contiguous allocations.
1614 	 */
1615 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1616 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1617 		keg->uk_ipers = slabsize / keg->uk_rsize;
1618 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1619 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1620 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1621 		    "keg: %s(%p), calculated wastedspace = %d, "
1622 		    "maximum wasted space allowed = %d, "
1623 		    "calculated ipers = %d, "
1624 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1625 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1626 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1627 		/*
1628 		 * If we had access to memory to embed a slab header we
1629 		 * also have a page structure to use vtoslab() instead of
1630 		 * hash to find slabs.  If the zone was explicitly created
1631 		 * OFFPAGE we can't necessarily touch the memory.
1632 		 */
1633 		if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
1634 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1635 	}
1636 
1637 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1638 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1639 		keg->uk_flags |= UMA_ZONE_HASH;
1640 }
1641 
1642 /*
1643  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1644  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1645  * more complicated.
1646  *
1647  * Arguments
1648  *	keg  The keg we should initialize
1649  *
1650  * Returns
1651  *	Nothing
1652  */
1653 static void
1654 keg_large_init(uma_keg_t keg)
1655 {
1656 
1657 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1658 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1659 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1660 
1661 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1662 	keg->uk_ipers = 1;
1663 	keg->uk_rsize = keg->uk_size;
1664 
1665 	/* Check whether we have enough space to not do OFFPAGE. */
1666 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1667 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize <
1668 	    slab_sizeof(SLAB_MIN_SETSIZE)) {
1669 		/*
1670 		 * We can't do OFFPAGE if we're internal, in which case
1671 		 * we need an extra page per allocation to contain the
1672 		 * slab header.
1673 		 */
1674 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1675 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1676 		else
1677 			keg->uk_ppera++;
1678 	}
1679 
1680 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1681 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1682 		keg->uk_flags |= UMA_ZONE_HASH;
1683 }
1684 
1685 static void
1686 keg_cachespread_init(uma_keg_t keg)
1687 {
1688 	int alignsize;
1689 	int trailer;
1690 	int pages;
1691 	int rsize;
1692 
1693 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1694 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1695 
1696 	alignsize = keg->uk_align + 1;
1697 	rsize = keg->uk_size;
1698 	/*
1699 	 * We want one item to start on every align boundary in a page.  To
1700 	 * do this we will span pages.  We will also extend the item by the
1701 	 * size of align if it is an even multiple of align.  Otherwise, it
1702 	 * would fall on the same boundary every time.
1703 	 */
1704 	if (rsize & keg->uk_align)
1705 		rsize = (rsize & ~keg->uk_align) + alignsize;
1706 	if ((rsize & alignsize) == 0)
1707 		rsize += alignsize;
1708 	trailer = rsize - keg->uk_size;
1709 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1710 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1711 	keg->uk_rsize = rsize;
1712 	keg->uk_ppera = pages;
1713 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1714 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1715 	KASSERT(keg->uk_ipers <= SLAB_MAX_SETSIZE,
1716 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1717 	    keg->uk_ipers));
1718 }
1719 
1720 /*
1721  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1722  * the keg onto the global keg list.
1723  *
1724  * Arguments/Returns follow uma_ctor specifications
1725  *	udata  Actually uma_kctor_args
1726  */
1727 static int
1728 keg_ctor(void *mem, int size, void *udata, int flags)
1729 {
1730 	struct uma_kctor_args *arg = udata;
1731 	uma_keg_t keg = mem;
1732 	uma_zone_t zone;
1733 
1734 	bzero(keg, size);
1735 	keg->uk_size = arg->size;
1736 	keg->uk_init = arg->uminit;
1737 	keg->uk_fini = arg->fini;
1738 	keg->uk_align = arg->align;
1739 	keg->uk_free = 0;
1740 	keg->uk_reserve = 0;
1741 	keg->uk_pages = 0;
1742 	keg->uk_flags = arg->flags;
1743 	keg->uk_slabzone = NULL;
1744 
1745 	/*
1746 	 * We use a global round-robin policy by default.  Zones with
1747 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1748 	 * iterator is never run.
1749 	 */
1750 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1751 	keg->uk_dr.dr_iter = 0;
1752 
1753 	/*
1754 	 * The master zone is passed to us at keg-creation time.
1755 	 */
1756 	zone = arg->zone;
1757 	keg->uk_name = zone->uz_name;
1758 
1759 	if (arg->flags & UMA_ZONE_VM)
1760 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1761 
1762 	if (arg->flags & UMA_ZONE_ZINIT)
1763 		keg->uk_init = zero_init;
1764 
1765 	if (arg->flags & UMA_ZONE_MALLOC)
1766 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1767 
1768 	if (arg->flags & UMA_ZONE_PCPU)
1769 #ifdef SMP
1770 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1771 #else
1772 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1773 #endif
1774 
1775 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1776 		keg_cachespread_init(keg);
1777 	} else {
1778 		if (keg->uk_size > slab_space(SLAB_MIN_SETSIZE))
1779 			keg_large_init(keg);
1780 		else
1781 			keg_small_init(keg);
1782 	}
1783 
1784 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1785 		keg->uk_slabzone = slabzone;
1786 
1787 	/*
1788 	 * If we haven't booted yet we need allocations to go through the
1789 	 * startup cache until the vm is ready.
1790 	 */
1791 	if (booted < BOOT_PAGEALLOC)
1792 		keg->uk_allocf = startup_alloc;
1793 #ifdef UMA_MD_SMALL_ALLOC
1794 	else if (keg->uk_ppera == 1)
1795 		keg->uk_allocf = uma_small_alloc;
1796 #endif
1797 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1798 		keg->uk_allocf = pcpu_page_alloc;
1799 	else
1800 		keg->uk_allocf = page_alloc;
1801 #ifdef UMA_MD_SMALL_ALLOC
1802 	if (keg->uk_ppera == 1)
1803 		keg->uk_freef = uma_small_free;
1804 	else
1805 #endif
1806 	if (keg->uk_flags & UMA_ZONE_PCPU)
1807 		keg->uk_freef = pcpu_page_free;
1808 	else
1809 		keg->uk_freef = page_free;
1810 
1811 	/*
1812 	 * Initialize keg's lock
1813 	 */
1814 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1815 
1816 	/*
1817 	 * If we're putting the slab header in the actual page we need to
1818 	 * figure out where in each page it goes.  See slab_sizeof
1819 	 * definition.
1820 	 */
1821 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1822 		size_t shsize;
1823 
1824 		shsize = slab_sizeof(keg->uk_ipers);
1825 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
1826 		/*
1827 		 * The only way the following is possible is if with our
1828 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1829 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1830 		 * mathematically possible for all cases, so we make
1831 		 * sure here anyway.
1832 		 */
1833 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
1834 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1835 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1836 	}
1837 
1838 	if (keg->uk_flags & UMA_ZONE_HASH)
1839 		hash_alloc(&keg->uk_hash, 0);
1840 
1841 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1842 	    keg, zone->uz_name, zone,
1843 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1844 	    keg->uk_free);
1845 
1846 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1847 
1848 	rw_wlock(&uma_rwlock);
1849 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1850 	rw_wunlock(&uma_rwlock);
1851 	return (0);
1852 }
1853 
1854 static void
1855 zone_alloc_counters(uma_zone_t zone, void *unused)
1856 {
1857 
1858 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1859 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1860 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1861 }
1862 
1863 static void
1864 zone_alloc_sysctl(uma_zone_t zone, void *unused)
1865 {
1866 	uma_zone_domain_t zdom;
1867 	uma_keg_t keg;
1868 	struct sysctl_oid *oid, *domainoid;
1869 	int domains, i, cnt;
1870 	static const char *nokeg = "cache zone";
1871 	char *c;
1872 
1873 	/*
1874 	 * Make a sysctl safe copy of the zone name by removing
1875 	 * any special characters and handling dups by appending
1876 	 * an index.
1877 	 */
1878 	if (zone->uz_namecnt != 0) {
1879 		/* Count the number of decimal digits and '_' separator. */
1880 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
1881 			cnt /= 10;
1882 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
1883 		    M_UMA, M_WAITOK);
1884 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
1885 		    zone->uz_namecnt);
1886 	} else
1887 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
1888 	for (c = zone->uz_ctlname; *c != '\0'; c++)
1889 		if (strchr("./\\ -", *c) != NULL)
1890 			*c = '_';
1891 
1892 	/*
1893 	 * Basic parameters at the root.
1894 	 */
1895 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
1896 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
1897 	oid = zone->uz_oid;
1898 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1899 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
1900 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1901 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
1902 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
1903 	    "Allocator configuration flags");
1904 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1905 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
1906 	    "Desired per-cpu cache size");
1907 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1908 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
1909 	    "Maximum allowed per-cpu cache size");
1910 
1911 	/*
1912 	 * keg if present.
1913 	 */
1914 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1915 	    "keg", CTLFLAG_RD, NULL, "");
1916 	keg = zone->uz_keg;
1917 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
1918 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1919 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
1920 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1921 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
1922 		    "Real object size with alignment");
1923 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1924 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
1925 		    "pages per-slab allocation");
1926 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1927 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
1928 		    "items available per-slab");
1929 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1930 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
1931 		    "item alignment mask");
1932 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1933 		    "pages", CTLFLAG_RD, &keg->uk_pages, 0,
1934 		    "Total pages currently allocated from VM");
1935 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1936 		    "free", CTLFLAG_RD, &keg->uk_free, 0,
1937 		    "items free in the slab layer");
1938 	} else
1939 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1940 		    "name", CTLFLAG_RD, nokeg, "Keg name");
1941 
1942 	/*
1943 	 * Information about zone limits.
1944 	 */
1945 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1946 	    "limit", CTLFLAG_RD, NULL, "");
1947 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1948 	    "items", CTLFLAG_RD, &zone->uz_items, 0,
1949 	    "current number of cached items");
1950 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1951 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
1952 	    "Maximum number of cached items");
1953 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1954 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
1955 	    "Number of threads sleeping at limit");
1956 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1957 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
1958 	    "Total zone limit sleeps");
1959 
1960 	/*
1961 	 * Per-domain information.
1962 	 */
1963 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
1964 		domains = vm_ndomains;
1965 	else
1966 		domains = 1;
1967 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
1968 	    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
1969 	for (i = 0; i < domains; i++) {
1970 		zdom = &zone->uz_domain[i];
1971 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
1972 		    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
1973 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1974 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
1975 		    "number of items in this domain");
1976 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1977 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
1978 		    "maximum item count in this period");
1979 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1980 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
1981 		    "minimum item count in this period");
1982 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1983 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
1984 		    "Working set size");
1985 	}
1986 
1987 	/*
1988 	 * General statistics.
1989 	 */
1990 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1991 	    "stats", CTLFLAG_RD, NULL, "");
1992 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1993 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
1994 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
1995 	    "Current number of allocated items");
1996 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1997 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
1998 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
1999 	    "Total allocation calls");
2000 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2001 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2002 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2003 	    "Total free calls");
2004 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2005 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2006 	    "Number of allocation failures");
2007 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2008 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
2009 	    "Free calls from the wrong domain");
2010 }
2011 
2012 struct uma_zone_count {
2013 	const char	*name;
2014 	int		count;
2015 };
2016 
2017 static void
2018 zone_count(uma_zone_t zone, void *arg)
2019 {
2020 	struct uma_zone_count *cnt;
2021 
2022 	cnt = arg;
2023 	/*
2024 	 * Some zones are rapidly created with identical names and
2025 	 * destroyed out of order.  This can lead to gaps in the count.
2026 	 * Use one greater than the maximum observed for this name.
2027 	 */
2028 	if (strcmp(zone->uz_name, cnt->name) == 0)
2029 		cnt->count = MAX(cnt->count,
2030 		    zone->uz_namecnt + 1);
2031 }
2032 
2033 /*
2034  * Zone header ctor.  This initializes all fields, locks, etc.
2035  *
2036  * Arguments/Returns follow uma_ctor specifications
2037  *	udata  Actually uma_zctor_args
2038  */
2039 static int
2040 zone_ctor(void *mem, int size, void *udata, int flags)
2041 {
2042 	struct uma_zone_count cnt;
2043 	struct uma_zctor_args *arg = udata;
2044 	uma_zone_t zone = mem;
2045 	uma_zone_t z;
2046 	uma_keg_t keg;
2047 	int i;
2048 
2049 	bzero(zone, size);
2050 	zone->uz_name = arg->name;
2051 	zone->uz_ctor = arg->ctor;
2052 	zone->uz_dtor = arg->dtor;
2053 	zone->uz_init = NULL;
2054 	zone->uz_fini = NULL;
2055 	zone->uz_sleeps = 0;
2056 	zone->uz_xdomain = 0;
2057 	zone->uz_bucket_size = 0;
2058 	zone->uz_bucket_size_min = 0;
2059 	zone->uz_bucket_size_max = BUCKET_MAX;
2060 	zone->uz_flags = 0;
2061 	zone->uz_warning = NULL;
2062 	/* The domain structures follow the cpu structures. */
2063 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
2064 	zone->uz_bkt_max = ULONG_MAX;
2065 	timevalclear(&zone->uz_ratecheck);
2066 
2067 	/* Count the number of duplicate names. */
2068 	cnt.name = arg->name;
2069 	cnt.count = 0;
2070 	zone_foreach(zone_count, &cnt);
2071 	zone->uz_namecnt = cnt.count;
2072 
2073 	for (i = 0; i < vm_ndomains; i++)
2074 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
2075 
2076 #ifdef INVARIANTS
2077 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2078 		zone->uz_flags |= UMA_ZFLAG_TRASH;
2079 #endif
2080 
2081 	/*
2082 	 * This is a pure cache zone, no kegs.
2083 	 */
2084 	if (arg->import) {
2085 		if (arg->flags & UMA_ZONE_VM)
2086 			arg->flags |= UMA_ZFLAG_CACHEONLY;
2087 		zone->uz_flags = arg->flags;
2088 		zone->uz_size = arg->size;
2089 		zone->uz_import = arg->import;
2090 		zone->uz_release = arg->release;
2091 		zone->uz_arg = arg->arg;
2092 		zone->uz_lockptr = &zone->uz_lock;
2093 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
2094 		rw_wlock(&uma_rwlock);
2095 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2096 		rw_wunlock(&uma_rwlock);
2097 		goto out;
2098 	}
2099 
2100 	/*
2101 	 * Use the regular zone/keg/slab allocator.
2102 	 */
2103 	zone->uz_import = zone_import;
2104 	zone->uz_release = zone_release;
2105 	zone->uz_arg = zone;
2106 	keg = arg->keg;
2107 
2108 	if (arg->flags & UMA_ZONE_SECONDARY) {
2109 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2110 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2111 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2112 		zone->uz_init = arg->uminit;
2113 		zone->uz_fini = arg->fini;
2114 		zone->uz_lockptr = &keg->uk_lock;
2115 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2116 		rw_wlock(&uma_rwlock);
2117 		ZONE_LOCK(zone);
2118 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2119 			if (LIST_NEXT(z, uz_link) == NULL) {
2120 				LIST_INSERT_AFTER(z, zone, uz_link);
2121 				break;
2122 			}
2123 		}
2124 		ZONE_UNLOCK(zone);
2125 		rw_wunlock(&uma_rwlock);
2126 	} else if (keg == NULL) {
2127 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2128 		    arg->align, arg->flags)) == NULL)
2129 			return (ENOMEM);
2130 	} else {
2131 		struct uma_kctor_args karg;
2132 		int error;
2133 
2134 		/* We should only be here from uma_startup() */
2135 		karg.size = arg->size;
2136 		karg.uminit = arg->uminit;
2137 		karg.fini = arg->fini;
2138 		karg.align = arg->align;
2139 		karg.flags = arg->flags;
2140 		karg.zone = zone;
2141 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2142 		    flags);
2143 		if (error)
2144 			return (error);
2145 	}
2146 
2147 	/* Inherit properties from the keg. */
2148 	zone->uz_keg = keg;
2149 	zone->uz_size = keg->uk_size;
2150 	zone->uz_flags |= (keg->uk_flags &
2151 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2152 
2153 out:
2154 	if (__predict_true(booted == BOOT_RUNNING)) {
2155 		zone_alloc_counters(zone, NULL);
2156 		zone_alloc_sysctl(zone, NULL);
2157 	} else {
2158 		zone->uz_allocs = EARLY_COUNTER;
2159 		zone->uz_frees = EARLY_COUNTER;
2160 		zone->uz_fails = EARLY_COUNTER;
2161 	}
2162 
2163 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2164 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2165 	    ("Invalid zone flag combination"));
2166 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2167 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2168 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2169 		zone->uz_bucket_size = BUCKET_MAX;
2170 	else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
2171 		zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
2172 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2173 		zone->uz_bucket_size = 0;
2174 	else
2175 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2176 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2177 
2178 	return (0);
2179 }
2180 
2181 /*
2182  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2183  * table and removes the keg from the global list.
2184  *
2185  * Arguments/Returns follow uma_dtor specifications
2186  *	udata  unused
2187  */
2188 static void
2189 keg_dtor(void *arg, int size, void *udata)
2190 {
2191 	uma_keg_t keg;
2192 
2193 	keg = (uma_keg_t)arg;
2194 	KEG_LOCK(keg);
2195 	if (keg->uk_free != 0) {
2196 		printf("Freed UMA keg (%s) was not empty (%d items). "
2197 		    " Lost %d pages of memory.\n",
2198 		    keg->uk_name ? keg->uk_name : "",
2199 		    keg->uk_free, keg->uk_pages);
2200 	}
2201 	KEG_UNLOCK(keg);
2202 
2203 	hash_free(&keg->uk_hash);
2204 
2205 	KEG_LOCK_FINI(keg);
2206 }
2207 
2208 /*
2209  * Zone header dtor.
2210  *
2211  * Arguments/Returns follow uma_dtor specifications
2212  *	udata  unused
2213  */
2214 static void
2215 zone_dtor(void *arg, int size, void *udata)
2216 {
2217 	uma_zone_t zone;
2218 	uma_keg_t keg;
2219 
2220 	zone = (uma_zone_t)arg;
2221 
2222 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2223 
2224 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2225 		cache_drain(zone);
2226 
2227 	rw_wlock(&uma_rwlock);
2228 	LIST_REMOVE(zone, uz_link);
2229 	rw_wunlock(&uma_rwlock);
2230 	/*
2231 	 * XXX there are some races here where
2232 	 * the zone can be drained but zone lock
2233 	 * released and then refilled before we
2234 	 * remove it... we dont care for now
2235 	 */
2236 	zone_reclaim(zone, M_WAITOK, true);
2237 	/*
2238 	 * We only destroy kegs from non secondary/non cache zones.
2239 	 */
2240 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2241 		keg = zone->uz_keg;
2242 		rw_wlock(&uma_rwlock);
2243 		LIST_REMOVE(keg, uk_link);
2244 		rw_wunlock(&uma_rwlock);
2245 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2246 	}
2247 	counter_u64_free(zone->uz_allocs);
2248 	counter_u64_free(zone->uz_frees);
2249 	counter_u64_free(zone->uz_fails);
2250 	free(zone->uz_ctlname, M_UMA);
2251 	if (zone->uz_lockptr == &zone->uz_lock)
2252 		ZONE_LOCK_FINI(zone);
2253 }
2254 
2255 /*
2256  * Traverses every zone in the system and calls a callback
2257  *
2258  * Arguments:
2259  *	zfunc  A pointer to a function which accepts a zone
2260  *		as an argument.
2261  *
2262  * Returns:
2263  *	Nothing
2264  */
2265 static void
2266 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2267 {
2268 	uma_keg_t keg;
2269 	uma_zone_t zone;
2270 
2271 	/*
2272 	 * Before BOOT_RUNNING we are guaranteed to be single
2273 	 * threaded, so locking isn't needed. Startup functions
2274 	 * are allowed to use M_WAITOK.
2275 	 */
2276 	if (__predict_true(booted == BOOT_RUNNING))
2277 		rw_rlock(&uma_rwlock);
2278 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2279 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2280 			zfunc(zone, arg);
2281 	}
2282 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2283 		zfunc(zone, arg);
2284 	if (__predict_true(booted == BOOT_RUNNING))
2285 		rw_runlock(&uma_rwlock);
2286 }
2287 
2288 /*
2289  * Count how many pages do we need to bootstrap.  VM supplies
2290  * its need in early zones in the argument, we add up our zones,
2291  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
2292  * zone of zones and zone of kegs are accounted separately.
2293  */
2294 #define	UMA_BOOT_ZONES	11
2295 /* Zone of zones and zone of kegs have arbitrary alignment. */
2296 #define	UMA_BOOT_ALIGN	32
2297 static int zsize, ksize;
2298 int
2299 uma_startup_count(int vm_zones)
2300 {
2301 	int zones, pages;
2302 	size_t space, size;
2303 
2304 	ksize = sizeof(struct uma_keg) +
2305 	    (sizeof(struct uma_domain) * vm_ndomains);
2306 	zsize = sizeof(struct uma_zone) +
2307 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2308 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2309 
2310 	/*
2311 	 * Memory for the zone of kegs and its keg,
2312 	 * and for zone of zones.
2313 	 */
2314 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2315 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2316 
2317 #ifdef	UMA_MD_SMALL_ALLOC
2318 	zones = UMA_BOOT_ZONES;
2319 #else
2320 	zones = UMA_BOOT_ZONES + vm_zones;
2321 	vm_zones = 0;
2322 #endif
2323 	size = slab_sizeof(SLAB_MAX_SETSIZE);
2324 	space = slab_space(SLAB_MAX_SETSIZE);
2325 
2326 	/* Memory for the rest of startup zones, UMA and VM, ... */
2327 	if (zsize > space) {
2328 		/* See keg_large_init(). */
2329 		u_int ppera;
2330 
2331 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2332 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) < size)
2333 			ppera++;
2334 		pages += (zones + vm_zones) * ppera;
2335 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > space)
2336 		/* See keg_small_init() special case for uk_ppera = 1. */
2337 		pages += zones;
2338 	else
2339 		pages += howmany(zones,
2340 		    space / roundup2(zsize, UMA_BOOT_ALIGN));
2341 
2342 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2343 	pages += howmany(zones + 1,
2344 	    space / roundup2(ksize, UMA_BOOT_ALIGN));
2345 
2346 	return (pages);
2347 }
2348 
2349 void
2350 uma_startup(void *mem, int npages)
2351 {
2352 	struct uma_zctor_args args;
2353 	uma_keg_t masterkeg;
2354 	uintptr_t m;
2355 
2356 #ifdef DIAGNOSTIC
2357 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2358 #endif
2359 
2360 	rw_init(&uma_rwlock, "UMA lock");
2361 
2362 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2363 	m = (uintptr_t)mem;
2364 	zones = (uma_zone_t)m;
2365 	m += roundup(zsize, CACHE_LINE_SIZE);
2366 	kegs = (uma_zone_t)m;
2367 	m += roundup(zsize, CACHE_LINE_SIZE);
2368 	masterkeg = (uma_keg_t)m;
2369 	m += roundup(ksize, CACHE_LINE_SIZE);
2370 	m = roundup(m, PAGE_SIZE);
2371 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2372 	mem = (void *)m;
2373 
2374 	/* "manually" create the initial zone */
2375 	memset(&args, 0, sizeof(args));
2376 	args.name = "UMA Kegs";
2377 	args.size = ksize;
2378 	args.ctor = keg_ctor;
2379 	args.dtor = keg_dtor;
2380 	args.uminit = zero_init;
2381 	args.fini = NULL;
2382 	args.keg = masterkeg;
2383 	args.align = UMA_BOOT_ALIGN - 1;
2384 	args.flags = UMA_ZFLAG_INTERNAL;
2385 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2386 
2387 	bootmem = mem;
2388 	boot_pages = npages;
2389 
2390 	args.name = "UMA Zones";
2391 	args.size = zsize;
2392 	args.ctor = zone_ctor;
2393 	args.dtor = zone_dtor;
2394 	args.uminit = zero_init;
2395 	args.fini = NULL;
2396 	args.keg = NULL;
2397 	args.align = UMA_BOOT_ALIGN - 1;
2398 	args.flags = UMA_ZFLAG_INTERNAL;
2399 	zone_ctor(zones, zsize, &args, M_WAITOK);
2400 
2401 	/* Now make a zone for slab headers */
2402 	slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_hash_slab),
2403 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2404 
2405 	hashzone = uma_zcreate("UMA Hash",
2406 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2407 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2408 
2409 	bucket_init();
2410 
2411 	booted = BOOT_STRAPPED;
2412 }
2413 
2414 void
2415 uma_startup1(void)
2416 {
2417 
2418 #ifdef DIAGNOSTIC
2419 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2420 #endif
2421 	booted = BOOT_PAGEALLOC;
2422 }
2423 
2424 void
2425 uma_startup2(void)
2426 {
2427 
2428 #ifdef DIAGNOSTIC
2429 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2430 #endif
2431 	booted = BOOT_BUCKETS;
2432 	sx_init(&uma_reclaim_lock, "umareclaim");
2433 	bucket_enable();
2434 }
2435 
2436 /*
2437  * Initialize our callout handle
2438  *
2439  */
2440 static void
2441 uma_startup3(void)
2442 {
2443 
2444 #ifdef INVARIANTS
2445 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2446 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2447 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2448 #endif
2449 	zone_foreach(zone_alloc_counters, NULL);
2450 	zone_foreach(zone_alloc_sysctl, NULL);
2451 	callout_init(&uma_callout, 1);
2452 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2453 	booted = BOOT_RUNNING;
2454 }
2455 
2456 static uma_keg_t
2457 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2458 		int align, uint32_t flags)
2459 {
2460 	struct uma_kctor_args args;
2461 
2462 	args.size = size;
2463 	args.uminit = uminit;
2464 	args.fini = fini;
2465 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2466 	args.flags = flags;
2467 	args.zone = zone;
2468 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2469 }
2470 
2471 /* Public functions */
2472 /* See uma.h */
2473 void
2474 uma_set_align(int align)
2475 {
2476 
2477 	if (align != UMA_ALIGN_CACHE)
2478 		uma_align_cache = align;
2479 }
2480 
2481 /* See uma.h */
2482 uma_zone_t
2483 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2484 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2485 
2486 {
2487 	struct uma_zctor_args args;
2488 	uma_zone_t res;
2489 	bool locked;
2490 
2491 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2492 	    align, name));
2493 
2494 	/* Sets all zones to a first-touch domain policy. */
2495 #ifdef UMA_FIRSTTOUCH
2496 	flags |= UMA_ZONE_NUMA;
2497 #endif
2498 
2499 	/* This stuff is essential for the zone ctor */
2500 	memset(&args, 0, sizeof(args));
2501 	args.name = name;
2502 	args.size = size;
2503 	args.ctor = ctor;
2504 	args.dtor = dtor;
2505 	args.uminit = uminit;
2506 	args.fini = fini;
2507 #ifdef  INVARIANTS
2508 	/*
2509 	 * Inject procedures which check for memory use after free if we are
2510 	 * allowed to scramble the memory while it is not allocated.  This
2511 	 * requires that: UMA is actually able to access the memory, no init
2512 	 * or fini procedures, no dependency on the initial value of the
2513 	 * memory, and no (legitimate) use of the memory after free.  Note,
2514 	 * the ctor and dtor do not need to be empty.
2515 	 *
2516 	 * XXX UMA_ZONE_OFFPAGE.
2517 	 */
2518 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2519 	    uminit == NULL && fini == NULL) {
2520 		args.uminit = trash_init;
2521 		args.fini = trash_fini;
2522 	}
2523 #endif
2524 	args.align = align;
2525 	args.flags = flags;
2526 	args.keg = NULL;
2527 
2528 	if (booted < BOOT_BUCKETS) {
2529 		locked = false;
2530 	} else {
2531 		sx_slock(&uma_reclaim_lock);
2532 		locked = true;
2533 	}
2534 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2535 	if (locked)
2536 		sx_sunlock(&uma_reclaim_lock);
2537 	return (res);
2538 }
2539 
2540 /* See uma.h */
2541 uma_zone_t
2542 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2543 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2544 {
2545 	struct uma_zctor_args args;
2546 	uma_keg_t keg;
2547 	uma_zone_t res;
2548 	bool locked;
2549 
2550 	keg = master->uz_keg;
2551 	memset(&args, 0, sizeof(args));
2552 	args.name = name;
2553 	args.size = keg->uk_size;
2554 	args.ctor = ctor;
2555 	args.dtor = dtor;
2556 	args.uminit = zinit;
2557 	args.fini = zfini;
2558 	args.align = keg->uk_align;
2559 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2560 	args.keg = keg;
2561 
2562 	if (booted < BOOT_BUCKETS) {
2563 		locked = false;
2564 	} else {
2565 		sx_slock(&uma_reclaim_lock);
2566 		locked = true;
2567 	}
2568 	/* XXX Attaches only one keg of potentially many. */
2569 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2570 	if (locked)
2571 		sx_sunlock(&uma_reclaim_lock);
2572 	return (res);
2573 }
2574 
2575 /* See uma.h */
2576 uma_zone_t
2577 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2578 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2579 		    uma_release zrelease, void *arg, int flags)
2580 {
2581 	struct uma_zctor_args args;
2582 
2583 	memset(&args, 0, sizeof(args));
2584 	args.name = name;
2585 	args.size = size;
2586 	args.ctor = ctor;
2587 	args.dtor = dtor;
2588 	args.uminit = zinit;
2589 	args.fini = zfini;
2590 	args.import = zimport;
2591 	args.release = zrelease;
2592 	args.arg = arg;
2593 	args.align = 0;
2594 	args.flags = flags | UMA_ZFLAG_CACHE;
2595 
2596 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2597 }
2598 
2599 /* See uma.h */
2600 void
2601 uma_zdestroy(uma_zone_t zone)
2602 {
2603 
2604 	sx_slock(&uma_reclaim_lock);
2605 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2606 	sx_sunlock(&uma_reclaim_lock);
2607 }
2608 
2609 void
2610 uma_zwait(uma_zone_t zone)
2611 {
2612 	void *item;
2613 
2614 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2615 	uma_zfree(zone, item);
2616 }
2617 
2618 void *
2619 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2620 {
2621 	void *item;
2622 #ifdef SMP
2623 	int i;
2624 
2625 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2626 #endif
2627 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2628 	if (item != NULL && (flags & M_ZERO)) {
2629 #ifdef SMP
2630 		for (i = 0; i <= mp_maxid; i++)
2631 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2632 #else
2633 		bzero(item, zone->uz_size);
2634 #endif
2635 	}
2636 	return (item);
2637 }
2638 
2639 /*
2640  * A stub while both regular and pcpu cases are identical.
2641  */
2642 void
2643 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2644 {
2645 
2646 #ifdef SMP
2647 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2648 #endif
2649 	uma_zfree_arg(zone, item, udata);
2650 }
2651 
2652 static inline void *
2653 bucket_pop(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket)
2654 {
2655 	void *item;
2656 
2657 	bucket->ub_cnt--;
2658 	item = bucket->ub_bucket[bucket->ub_cnt];
2659 #ifdef INVARIANTS
2660 	bucket->ub_bucket[bucket->ub_cnt] = NULL;
2661 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2662 #endif
2663 	cache->uc_allocs++;
2664 
2665 	return (item);
2666 }
2667 
2668 static inline void
2669 bucket_push(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket,
2670     void *item)
2671 {
2672 	KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2673 	    ("uma_zfree: Freeing to non free bucket index."));
2674 	bucket->ub_bucket[bucket->ub_cnt] = item;
2675 	bucket->ub_cnt++;
2676 	cache->uc_frees++;
2677 }
2678 
2679 static void *
2680 item_ctor(uma_zone_t zone, void *udata, int flags, void *item)
2681 {
2682 #ifdef INVARIANTS
2683 	bool skipdbg;
2684 
2685 	skipdbg = uma_dbg_zskip(zone, item);
2686 	if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2687 	    zone->uz_ctor != trash_ctor)
2688 		trash_ctor(item, zone->uz_size, udata, flags);
2689 #endif
2690 	if (__predict_false(zone->uz_ctor != NULL) &&
2691 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2692 		counter_u64_add(zone->uz_fails, 1);
2693 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2694 		return (NULL);
2695 	}
2696 #ifdef INVARIANTS
2697 	if (!skipdbg)
2698 		uma_dbg_alloc(zone, NULL, item);
2699 #endif
2700 	if (flags & M_ZERO)
2701 		uma_zero_item(item, zone);
2702 
2703 	return (item);
2704 }
2705 
2706 static inline void
2707 item_dtor(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2708 {
2709 #ifdef INVARIANTS
2710 	bool skipdbg;
2711 
2712 	skipdbg = uma_dbg_zskip(zone, item);
2713 	if (skip == SKIP_NONE && !skipdbg) {
2714 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
2715 			uma_dbg_free(zone, udata, item);
2716 		else
2717 			uma_dbg_free(zone, NULL, item);
2718 	}
2719 #endif
2720 	if (skip < SKIP_DTOR) {
2721 		if (zone->uz_dtor != NULL)
2722 			zone->uz_dtor(item, zone->uz_size, udata);
2723 #ifdef INVARIANTS
2724 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2725 		    zone->uz_dtor != trash_dtor)
2726 			trash_dtor(item, zone->uz_size, udata);
2727 #endif
2728 	}
2729 }
2730 
2731 /* See uma.h */
2732 void *
2733 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2734 {
2735 	uma_bucket_t bucket;
2736 	uma_cache_t cache;
2737 	void *item;
2738 	int cpu, domain;
2739 
2740 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2741 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2742 
2743 	/* This is the fast path allocation */
2744 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2745 	    curthread, zone->uz_name, zone, flags);
2746 
2747 	if (flags & M_WAITOK) {
2748 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2749 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2750 	}
2751 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2752 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2753 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2754 	if (zone->uz_flags & UMA_ZONE_PCPU)
2755 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2756 		    "with M_ZERO passed"));
2757 
2758 #ifdef DEBUG_MEMGUARD
2759 	if (memguard_cmp_zone(zone)) {
2760 		item = memguard_alloc(zone->uz_size, flags);
2761 		if (item != NULL) {
2762 			if (zone->uz_init != NULL &&
2763 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2764 				return (NULL);
2765 			if (zone->uz_ctor != NULL &&
2766 			    zone->uz_ctor(item, zone->uz_size, udata,
2767 			    flags) != 0) {
2768 				counter_u64_add(zone->uz_fails, 1);
2769 			    	zone->uz_fini(item, zone->uz_size);
2770 				return (NULL);
2771 			}
2772 			return (item);
2773 		}
2774 		/* This is unfortunate but should not be fatal. */
2775 	}
2776 #endif
2777 	/*
2778 	 * If possible, allocate from the per-CPU cache.  There are two
2779 	 * requirements for safe access to the per-CPU cache: (1) the thread
2780 	 * accessing the cache must not be preempted or yield during access,
2781 	 * and (2) the thread must not migrate CPUs without switching which
2782 	 * cache it accesses.  We rely on a critical section to prevent
2783 	 * preemption and migration.  We release the critical section in
2784 	 * order to acquire the zone mutex if we are unable to allocate from
2785 	 * the current cache; when we re-acquire the critical section, we
2786 	 * must detect and handle migration if it has occurred.
2787 	 */
2788 	critical_enter();
2789 	do {
2790 		cpu = curcpu;
2791 		cache = &zone->uz_cpu[cpu];
2792 		bucket = cache->uc_allocbucket;
2793 		if (__predict_true(bucket != NULL && bucket->ub_cnt != 0)) {
2794 			item = bucket_pop(zone, cache, bucket);
2795 			critical_exit();
2796 			return (item_ctor(zone, udata, flags, item));
2797 		}
2798 	} while (cache_alloc(zone, cache, udata, flags));
2799 	critical_exit();
2800 
2801 	/*
2802 	 * We can not get a bucket so try to return a single item.
2803 	 */
2804 	if (zone->uz_flags & UMA_ZONE_NUMA)
2805 		domain = PCPU_GET(domain);
2806 	else
2807 		domain = UMA_ANYDOMAIN;
2808 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2809 }
2810 
2811 /*
2812  * Replenish an alloc bucket and possibly restore an old one.  Called in
2813  * a critical section.  Returns in a critical section.
2814  *
2815  * A false return value indicates failure and returns with the zone lock
2816  * held.  A true return value indicates success and the caller should retry.
2817  */
2818 static __noinline bool
2819 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
2820 {
2821 	uma_zone_domain_t zdom;
2822 	uma_bucket_t bucket;
2823 	int cpu, domain;
2824 	bool lockfail;
2825 
2826 	CRITICAL_ASSERT(curthread);
2827 
2828 	/*
2829 	 * If we have run out of items in our alloc bucket see
2830 	 * if we can switch with the free bucket.
2831 	 */
2832 	bucket = cache->uc_freebucket;
2833 	if (bucket != NULL && bucket->ub_cnt != 0) {
2834 		cache->uc_freebucket = cache->uc_allocbucket;
2835 		cache->uc_allocbucket = bucket;
2836 		return (true);
2837 	}
2838 
2839 	/*
2840 	 * Discard any empty allocation bucket while we hold no locks.
2841 	 */
2842 	bucket = cache->uc_allocbucket;
2843 	cache->uc_allocbucket = NULL;
2844 	critical_exit();
2845 	if (bucket != NULL)
2846 		bucket_free(zone, bucket, udata);
2847 
2848 	/*
2849 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2850 	 * we must go back to the zone.  This requires the zone lock, so we
2851 	 * must drop the critical section, then re-acquire it when we go back
2852 	 * to the cache.  Since the critical section is released, we may be
2853 	 * preempted or migrate.  As such, make sure not to maintain any
2854 	 * thread-local state specific to the cache from prior to releasing
2855 	 * the critical section.
2856 	 */
2857 	lockfail = 0;
2858 	if (ZONE_TRYLOCK(zone) == 0) {
2859 		/* Record contention to size the buckets. */
2860 		ZONE_LOCK(zone);
2861 		lockfail = 1;
2862 	}
2863 
2864 	critical_enter();
2865 	/* Short-circuit for zones without buckets and low memory. */
2866 	if (zone->uz_bucket_size == 0 || bucketdisable)
2867 		return (false);
2868 
2869 	cpu = curcpu;
2870 	cache = &zone->uz_cpu[cpu];
2871 
2872 	/* See if we lost the race to fill the cache. */
2873 	if (cache->uc_allocbucket != NULL) {
2874 		ZONE_UNLOCK(zone);
2875 		return (true);
2876 	}
2877 
2878 	/*
2879 	 * Check the zone's cache of buckets.
2880 	 */
2881 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2882 		domain = PCPU_GET(domain);
2883 		zdom = &zone->uz_domain[domain];
2884 	} else {
2885 		domain = UMA_ANYDOMAIN;
2886 		zdom = &zone->uz_domain[0];
2887 	}
2888 
2889 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
2890 		ZONE_UNLOCK(zone);
2891 		KASSERT(bucket->ub_cnt != 0,
2892 		    ("uma_zalloc_arg: Returning an empty bucket."));
2893 		cache->uc_allocbucket = bucket;
2894 		return (true);
2895 	}
2896 	/* We are no longer associated with this CPU. */
2897 	critical_exit();
2898 
2899 	/*
2900 	 * We bump the uz count when the cache size is insufficient to
2901 	 * handle the working set.
2902 	 */
2903 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
2904 		zone->uz_bucket_size++;
2905 
2906 	/*
2907 	 * Fill a bucket and attempt to use it as the alloc bucket.
2908 	 */
2909 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
2910 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2911 	    zone->uz_name, zone, bucket);
2912 	critical_enter();
2913 	if (bucket == NULL)
2914 		return (false);
2915 
2916 	/*
2917 	 * See if we lost the race or were migrated.  Cache the
2918 	 * initialized bucket to make this less likely or claim
2919 	 * the memory directly.
2920 	 */
2921 	cpu = curcpu;
2922 	cache = &zone->uz_cpu[cpu];
2923 	if (cache->uc_allocbucket == NULL &&
2924 	    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2925 	    domain == PCPU_GET(domain))) {
2926 		cache->uc_allocbucket = bucket;
2927 		zdom->uzd_imax += bucket->ub_cnt;
2928 	} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2929 		critical_exit();
2930 		ZONE_UNLOCK(zone);
2931 		bucket_drain(zone, bucket);
2932 		bucket_free(zone, bucket, udata);
2933 		critical_enter();
2934 		return (true);
2935 	} else
2936 		zone_put_bucket(zone, zdom, bucket, false);
2937 	ZONE_UNLOCK(zone);
2938 	return (true);
2939 }
2940 
2941 void *
2942 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2943 {
2944 
2945 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2946 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2947 
2948 	/* This is the fast path allocation */
2949 	CTR5(KTR_UMA,
2950 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2951 	    curthread, zone->uz_name, zone, domain, flags);
2952 
2953 	if (flags & M_WAITOK) {
2954 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2955 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2956 	}
2957 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2958 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2959 
2960 	return (zone_alloc_item(zone, udata, domain, flags));
2961 }
2962 
2963 /*
2964  * Find a slab with some space.  Prefer slabs that are partially used over those
2965  * that are totally full.  This helps to reduce fragmentation.
2966  *
2967  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2968  * only 'domain'.
2969  */
2970 static uma_slab_t
2971 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2972 {
2973 	uma_domain_t dom;
2974 	uma_slab_t slab;
2975 	int start;
2976 
2977 	KASSERT(domain >= 0 && domain < vm_ndomains,
2978 	    ("keg_first_slab: domain %d out of range", domain));
2979 	KEG_LOCK_ASSERT(keg);
2980 
2981 	slab = NULL;
2982 	start = domain;
2983 	do {
2984 		dom = &keg->uk_domain[domain];
2985 		if (!LIST_EMPTY(&dom->ud_part_slab))
2986 			return (LIST_FIRST(&dom->ud_part_slab));
2987 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2988 			slab = LIST_FIRST(&dom->ud_free_slab);
2989 			LIST_REMOVE(slab, us_link);
2990 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2991 			return (slab);
2992 		}
2993 		if (rr)
2994 			domain = (domain + 1) % vm_ndomains;
2995 	} while (domain != start);
2996 
2997 	return (NULL);
2998 }
2999 
3000 static uma_slab_t
3001 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3002 {
3003 	uint32_t reserve;
3004 
3005 	KEG_LOCK_ASSERT(keg);
3006 
3007 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3008 	if (keg->uk_free <= reserve)
3009 		return (NULL);
3010 	return (keg_first_slab(keg, domain, rr));
3011 }
3012 
3013 static uma_slab_t
3014 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3015 {
3016 	struct vm_domainset_iter di;
3017 	uma_domain_t dom;
3018 	uma_slab_t slab;
3019 	int aflags, domain;
3020 	bool rr;
3021 
3022 restart:
3023 	KEG_LOCK_ASSERT(keg);
3024 
3025 	/*
3026 	 * Use the keg's policy if upper layers haven't already specified a
3027 	 * domain (as happens with first-touch zones).
3028 	 *
3029 	 * To avoid races we run the iterator with the keg lock held, but that
3030 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3031 	 * clear M_WAITOK and handle low memory conditions locally.
3032 	 */
3033 	rr = rdomain == UMA_ANYDOMAIN;
3034 	if (rr) {
3035 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3036 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3037 		    &aflags);
3038 	} else {
3039 		aflags = flags;
3040 		domain = rdomain;
3041 	}
3042 
3043 	for (;;) {
3044 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3045 		if (slab != NULL)
3046 			return (slab);
3047 
3048 		/*
3049 		 * M_NOVM means don't ask at all!
3050 		 */
3051 		if (flags & M_NOVM)
3052 			break;
3053 
3054 		KASSERT(zone->uz_max_items == 0 ||
3055 		    zone->uz_items <= zone->uz_max_items,
3056 		    ("%s: zone %p overflow", __func__, zone));
3057 
3058 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3059 		/*
3060 		 * If we got a slab here it's safe to mark it partially used
3061 		 * and return.  We assume that the caller is going to remove
3062 		 * at least one item.
3063 		 */
3064 		if (slab) {
3065 			dom = &keg->uk_domain[slab->us_domain];
3066 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3067 			return (slab);
3068 		}
3069 		KEG_LOCK(keg);
3070 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3071 			if ((flags & M_WAITOK) != 0) {
3072 				KEG_UNLOCK(keg);
3073 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3074 				KEG_LOCK(keg);
3075 				goto restart;
3076 			}
3077 			break;
3078 		}
3079 	}
3080 
3081 	/*
3082 	 * We might not have been able to get a slab but another cpu
3083 	 * could have while we were unlocked.  Check again before we
3084 	 * fail.
3085 	 */
3086 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
3087 		return (slab);
3088 	}
3089 	return (NULL);
3090 }
3091 
3092 static void *
3093 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3094 {
3095 	uma_domain_t dom;
3096 	void *item;
3097 	uint8_t freei;
3098 
3099 	KEG_LOCK_ASSERT(keg);
3100 
3101 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3102 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3103 	item = slab_item(slab, keg, freei);
3104 	slab->us_freecount--;
3105 	keg->uk_free--;
3106 
3107 	/* Move this slab to the full list */
3108 	if (slab->us_freecount == 0) {
3109 		LIST_REMOVE(slab, us_link);
3110 		dom = &keg->uk_domain[slab->us_domain];
3111 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3112 	}
3113 
3114 	return (item);
3115 }
3116 
3117 static int
3118 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3119 {
3120 	uma_zone_t zone;
3121 	uma_slab_t slab;
3122 	uma_keg_t keg;
3123 #ifdef NUMA
3124 	int stripe;
3125 #endif
3126 	int i;
3127 
3128 	zone = arg;
3129 	slab = NULL;
3130 	keg = zone->uz_keg;
3131 	KEG_LOCK(keg);
3132 	/* Try to keep the buckets totally full */
3133 	for (i = 0; i < max; ) {
3134 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3135 			break;
3136 #ifdef NUMA
3137 		stripe = howmany(max, vm_ndomains);
3138 #endif
3139 		while (slab->us_freecount && i < max) {
3140 			bucket[i++] = slab_alloc_item(keg, slab);
3141 			if (keg->uk_free <= keg->uk_reserve)
3142 				break;
3143 #ifdef NUMA
3144 			/*
3145 			 * If the zone is striped we pick a new slab for every
3146 			 * N allocations.  Eliminating this conditional will
3147 			 * instead pick a new domain for each bucket rather
3148 			 * than stripe within each bucket.  The current option
3149 			 * produces more fragmentation and requires more cpu
3150 			 * time but yields better distribution.
3151 			 */
3152 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
3153 			    vm_ndomains > 1 && --stripe == 0)
3154 				break;
3155 #endif
3156 		}
3157 		/* Don't block if we allocated any successfully. */
3158 		flags &= ~M_WAITOK;
3159 		flags |= M_NOWAIT;
3160 	}
3161 	KEG_UNLOCK(keg);
3162 
3163 	return i;
3164 }
3165 
3166 static uma_bucket_t
3167 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
3168 {
3169 	uma_bucket_t bucket;
3170 	int maxbucket, cnt;
3171 
3172 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
3173 
3174 	/* Avoid allocs targeting empty domains. */
3175 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3176 		domain = UMA_ANYDOMAIN;
3177 
3178 	if (zone->uz_max_items > 0) {
3179 		if (zone->uz_items >= zone->uz_max_items)
3180 			return (false);
3181 		maxbucket = MIN(zone->uz_bucket_size,
3182 		    zone->uz_max_items - zone->uz_items);
3183 		zone->uz_items += maxbucket;
3184 	} else
3185 		maxbucket = zone->uz_bucket_size;
3186 	ZONE_UNLOCK(zone);
3187 
3188 	/* Don't wait for buckets, preserve caller's NOVM setting. */
3189 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
3190 	if (bucket == NULL) {
3191 		cnt = 0;
3192 		goto out;
3193 	}
3194 
3195 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
3196 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
3197 
3198 	/*
3199 	 * Initialize the memory if necessary.
3200 	 */
3201 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
3202 		int i;
3203 
3204 		for (i = 0; i < bucket->ub_cnt; i++)
3205 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
3206 			    flags) != 0)
3207 				break;
3208 		/*
3209 		 * If we couldn't initialize the whole bucket, put the
3210 		 * rest back onto the freelist.
3211 		 */
3212 		if (i != bucket->ub_cnt) {
3213 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
3214 			    bucket->ub_cnt - i);
3215 #ifdef INVARIANTS
3216 			bzero(&bucket->ub_bucket[i],
3217 			    sizeof(void *) * (bucket->ub_cnt - i));
3218 #endif
3219 			bucket->ub_cnt = i;
3220 		}
3221 	}
3222 
3223 	cnt = bucket->ub_cnt;
3224 	if (bucket->ub_cnt == 0) {
3225 		bucket_free(zone, bucket, udata);
3226 		counter_u64_add(zone->uz_fails, 1);
3227 		bucket = NULL;
3228 	}
3229 out:
3230 	ZONE_LOCK(zone);
3231 	if (zone->uz_max_items > 0 && cnt < maxbucket) {
3232 		MPASS(zone->uz_items >= maxbucket - cnt);
3233 		zone->uz_items -= maxbucket - cnt;
3234 		if (zone->uz_sleepers > 0 &&
3235 		    (cnt == 0 ? zone->uz_items + 1 : zone->uz_items) <
3236 		    zone->uz_max_items)
3237 			wakeup_one(zone);
3238 	}
3239 
3240 	return (bucket);
3241 }
3242 
3243 /*
3244  * Allocates a single item from a zone.
3245  *
3246  * Arguments
3247  *	zone   The zone to alloc for.
3248  *	udata  The data to be passed to the constructor.
3249  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3250  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
3251  *
3252  * Returns
3253  *	NULL if there is no memory and M_NOWAIT is set
3254  *	An item if successful
3255  */
3256 
3257 static void *
3258 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
3259 {
3260 
3261 	ZONE_LOCK(zone);
3262 	return (zone_alloc_item_locked(zone, udata, domain, flags));
3263 }
3264 
3265 /*
3266  * Returns with zone unlocked.
3267  */
3268 static void *
3269 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
3270 {
3271 	void *item;
3272 
3273 	ZONE_LOCK_ASSERT(zone);
3274 
3275 	if (zone->uz_max_items > 0) {
3276 		if (zone->uz_items >= zone->uz_max_items) {
3277 			zone_log_warning(zone);
3278 			zone_maxaction(zone);
3279 			if (flags & M_NOWAIT) {
3280 				ZONE_UNLOCK(zone);
3281 				return (NULL);
3282 			}
3283 			zone->uz_sleeps++;
3284 			zone->uz_sleepers++;
3285 			while (zone->uz_items >= zone->uz_max_items)
3286 				mtx_sleep(zone, zone->uz_lockptr, PVM,
3287 				    "zonelimit", 0);
3288 			zone->uz_sleepers--;
3289 			if (zone->uz_sleepers > 0 &&
3290 			    zone->uz_items + 1 < zone->uz_max_items)
3291 				wakeup_one(zone);
3292 		}
3293 		zone->uz_items++;
3294 	}
3295 	ZONE_UNLOCK(zone);
3296 
3297 	/* Avoid allocs targeting empty domains. */
3298 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3299 		domain = UMA_ANYDOMAIN;
3300 
3301 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3302 		goto fail_cnt;
3303 
3304 	/*
3305 	 * We have to call both the zone's init (not the keg's init)
3306 	 * and the zone's ctor.  This is because the item is going from
3307 	 * a keg slab directly to the user, and the user is expecting it
3308 	 * to be both zone-init'd as well as zone-ctor'd.
3309 	 */
3310 	if (zone->uz_init != NULL) {
3311 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3312 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3313 			goto fail_cnt;
3314 		}
3315 	}
3316 	item = item_ctor(zone, udata, flags, item);
3317 	if (item == NULL)
3318 		goto fail;
3319 
3320 	counter_u64_add(zone->uz_allocs, 1);
3321 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3322 	    zone->uz_name, zone);
3323 
3324 	return (item);
3325 
3326 fail_cnt:
3327 	counter_u64_add(zone->uz_fails, 1);
3328 fail:
3329 	if (zone->uz_max_items > 0) {
3330 		ZONE_LOCK(zone);
3331 		/* XXX Decrement without wakeup */
3332 		zone->uz_items--;
3333 		ZONE_UNLOCK(zone);
3334 	}
3335 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3336 	    zone->uz_name, zone);
3337 	return (NULL);
3338 }
3339 
3340 /* See uma.h */
3341 void
3342 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3343 {
3344 	uma_cache_t cache;
3345 	uma_bucket_t bucket;
3346 	int cpu, domain, itemdomain;
3347 
3348 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3349 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3350 
3351 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3352 	    zone->uz_name);
3353 
3354 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3355 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3356 
3357         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3358         if (item == NULL)
3359                 return;
3360 #ifdef DEBUG_MEMGUARD
3361 	if (is_memguard_addr(item)) {
3362 		if (zone->uz_dtor != NULL)
3363 			zone->uz_dtor(item, zone->uz_size, udata);
3364 		if (zone->uz_fini != NULL)
3365 			zone->uz_fini(item, zone->uz_size);
3366 		memguard_free(item);
3367 		return;
3368 	}
3369 #endif
3370 	item_dtor(zone, item, udata, SKIP_NONE);
3371 
3372 	/*
3373 	 * The race here is acceptable.  If we miss it we'll just have to wait
3374 	 * a little longer for the limits to be reset.
3375 	 */
3376 	if (zone->uz_sleepers > 0)
3377 		goto zfree_item;
3378 
3379 	/*
3380 	 * If possible, free to the per-CPU cache.  There are two
3381 	 * requirements for safe access to the per-CPU cache: (1) the thread
3382 	 * accessing the cache must not be preempted or yield during access,
3383 	 * and (2) the thread must not migrate CPUs without switching which
3384 	 * cache it accesses.  We rely on a critical section to prevent
3385 	 * preemption and migration.  We release the critical section in
3386 	 * order to acquire the zone mutex if we are unable to free to the
3387 	 * current cache; when we re-acquire the critical section, we must
3388 	 * detect and handle migration if it has occurred.
3389 	 */
3390 	domain = itemdomain = 0;
3391 	critical_enter();
3392 	do {
3393 		cpu = curcpu;
3394 		cache = &zone->uz_cpu[cpu];
3395 		bucket = cache->uc_allocbucket;
3396 #ifdef UMA_XDOMAIN
3397 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3398 			itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3399 			domain = PCPU_GET(domain);
3400 		}
3401 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0 &&
3402 		    domain != itemdomain) {
3403 			bucket = cache->uc_crossbucket;
3404 		} else
3405 #endif
3406 
3407 		/*
3408 		 * Try to free into the allocbucket first to give LIFO ordering
3409 		 * for cache-hot datastructures.  Spill over into the freebucket
3410 		 * if necessary.  Alloc will swap them if one runs dry.
3411 		 */
3412 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3413 			bucket = cache->uc_freebucket;
3414 		if (__predict_true(bucket != NULL &&
3415 		    bucket->ub_cnt < bucket->ub_entries)) {
3416 			bucket_push(zone, cache, bucket, item);
3417 			critical_exit();
3418 			return;
3419 		}
3420 	} while (cache_free(zone, cache, udata, item, itemdomain));
3421 	critical_exit();
3422 
3423 	/*
3424 	 * If nothing else caught this, we'll just do an internal free.
3425 	 */
3426 zfree_item:
3427 	zone_free_item(zone, item, udata, SKIP_DTOR);
3428 }
3429 
3430 static void
3431 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
3432     int domain, int itemdomain)
3433 {
3434 	uma_zone_domain_t zdom;
3435 
3436 #ifdef UMA_XDOMAIN
3437 	/*
3438 	 * Buckets coming from the wrong domain will be entirely for the
3439 	 * only other domain on two domain systems.  In this case we can
3440 	 * simply cache them.  Otherwise we need to sort them back to
3441 	 * correct domains by freeing the contents to the slab layer.
3442 	 */
3443 	if (domain != itemdomain && vm_ndomains > 2) {
3444 		CTR3(KTR_UMA,
3445 		    "uma_zfree: zone %s(%p) draining cross bucket %p",
3446 		    zone->uz_name, zone, bucket);
3447 		bucket_drain(zone, bucket);
3448 		bucket_free(zone, bucket, udata);
3449 		return;
3450 	}
3451 #endif
3452 	/*
3453 	 * Attempt to save the bucket in the zone's domain bucket cache.
3454 	 *
3455 	 * We bump the uz count when the cache size is insufficient to
3456 	 * handle the working set.
3457 	 */
3458 	if (ZONE_TRYLOCK(zone) == 0) {
3459 		/* Record contention to size the buckets. */
3460 		ZONE_LOCK(zone);
3461 		if (zone->uz_bucket_size < zone->uz_bucket_size_max)
3462 			zone->uz_bucket_size++;
3463 	}
3464 
3465 	CTR3(KTR_UMA,
3466 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3467 	    zone->uz_name, zone, bucket);
3468 	/* ub_cnt is pointing to the last free item */
3469 	KASSERT(bucket->ub_cnt == bucket->ub_entries,
3470 	    ("uma_zfree: Attempting to insert partial  bucket onto the full list.\n"));
3471 	if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3472 		ZONE_UNLOCK(zone);
3473 		bucket_drain(zone, bucket);
3474 		bucket_free(zone, bucket, udata);
3475 	} else {
3476 		zdom = &zone->uz_domain[itemdomain];
3477 		zone_put_bucket(zone, zdom, bucket, true);
3478 		ZONE_UNLOCK(zone);
3479 	}
3480 }
3481 
3482 /*
3483  * Populate a free or cross bucket for the current cpu cache.  Free any
3484  * existing full bucket either to the zone cache or back to the slab layer.
3485  *
3486  * Enters and returns in a critical section.  false return indicates that
3487  * we can not satisfy this free in the cache layer.  true indicates that
3488  * the caller should retry.
3489  */
3490 static __noinline bool
3491 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
3492     int itemdomain)
3493 {
3494 	uma_bucket_t bucket;
3495 	int cpu, domain;
3496 
3497 	CRITICAL_ASSERT(curthread);
3498 
3499 	if (zone->uz_bucket_size == 0 || bucketdisable)
3500 		return false;
3501 
3502 	cpu = curcpu;
3503 	cache = &zone->uz_cpu[cpu];
3504 
3505 	/*
3506 	 * NUMA domains need to free to the correct zdom.  When XDOMAIN
3507 	 * is enabled this is the zdom of the item and the bucket may be
3508 	 * the cross bucket if they do not match.
3509 	 */
3510 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3511 #ifdef UMA_XDOMAIN
3512 		domain = PCPU_GET(domain);
3513 #else
3514 		itemdomain = domain = PCPU_GET(domain);
3515 #endif
3516 	else
3517 		itemdomain = domain = 0;
3518 #ifdef UMA_XDOMAIN
3519 	if (domain != itemdomain) {
3520 		bucket = cache->uc_crossbucket;
3521 		cache->uc_crossbucket = NULL;
3522 		if (bucket != NULL)
3523 			atomic_add_64(&zone->uz_xdomain, bucket->ub_cnt);
3524 	} else
3525 #endif
3526 	{
3527 		bucket = cache->uc_freebucket;
3528 		cache->uc_freebucket = NULL;
3529 	}
3530 
3531 
3532 	/* We are no longer associated with this CPU. */
3533 	critical_exit();
3534 
3535 	if (bucket != NULL)
3536 		zone_free_bucket(zone, bucket, udata, domain, itemdomain);
3537 
3538 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3539 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3540 	    zone->uz_name, zone, bucket);
3541 	critical_enter();
3542 	if (bucket == NULL)
3543 		return (false);
3544 	cpu = curcpu;
3545 	cache = &zone->uz_cpu[cpu];
3546 #ifdef UMA_XDOMAIN
3547 	/*
3548 	 * Check to see if we should be populating the cross bucket.  If it
3549 	 * is already populated we will fall through and attempt to populate
3550 	 * the free bucket.
3551 	 */
3552 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3553 		domain = PCPU_GET(domain);
3554 		if (domain != itemdomain && cache->uc_crossbucket == NULL) {
3555 			cache->uc_crossbucket = bucket;
3556 			return (true);
3557 		}
3558 	}
3559 #endif
3560 	/*
3561 	 * We may have lost the race to fill the bucket or switched CPUs.
3562 	 */
3563 	if (cache->uc_freebucket != NULL) {
3564 		critical_exit();
3565 		bucket_free(zone, bucket, udata);
3566 		critical_enter();
3567 	} else
3568 		cache->uc_freebucket = bucket;
3569 
3570 	return (true);
3571 }
3572 
3573 void
3574 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3575 {
3576 
3577 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3578 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3579 
3580 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3581 	    zone->uz_name);
3582 
3583 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3584 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3585 
3586         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3587         if (item == NULL)
3588                 return;
3589 	zone_free_item(zone, item, udata, SKIP_NONE);
3590 }
3591 
3592 static void
3593 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3594 {
3595 	uma_keg_t keg;
3596 	uma_domain_t dom;
3597 	uint8_t freei;
3598 
3599 	keg = zone->uz_keg;
3600 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3601 	KEG_LOCK_ASSERT(keg);
3602 
3603 	dom = &keg->uk_domain[slab->us_domain];
3604 
3605 	/* Do we need to remove from any lists? */
3606 	if (slab->us_freecount+1 == keg->uk_ipers) {
3607 		LIST_REMOVE(slab, us_link);
3608 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3609 	} else if (slab->us_freecount == 0) {
3610 		LIST_REMOVE(slab, us_link);
3611 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3612 	}
3613 
3614 	/* Slab management. */
3615 	freei = slab_item_index(slab, keg, item);
3616 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
3617 	slab->us_freecount++;
3618 
3619 	/* Keg statistics. */
3620 	keg->uk_free++;
3621 }
3622 
3623 static void
3624 zone_release(void *arg, void **bucket, int cnt)
3625 {
3626 	uma_zone_t zone;
3627 	void *item;
3628 	uma_slab_t slab;
3629 	uma_keg_t keg;
3630 	uint8_t *mem;
3631 	int i;
3632 
3633 	zone = arg;
3634 	keg = zone->uz_keg;
3635 	KEG_LOCK(keg);
3636 	for (i = 0; i < cnt; i++) {
3637 		item = bucket[i];
3638 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3639 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3640 			if (zone->uz_flags & UMA_ZONE_HASH) {
3641 				slab = hash_sfind(&keg->uk_hash, mem);
3642 			} else {
3643 				mem += keg->uk_pgoff;
3644 				slab = (uma_slab_t)mem;
3645 			}
3646 		} else
3647 			slab = vtoslab((vm_offset_t)item);
3648 		slab_free_item(zone, slab, item);
3649 	}
3650 	KEG_UNLOCK(keg);
3651 }
3652 
3653 /*
3654  * Frees a single item to any zone.
3655  *
3656  * Arguments:
3657  *	zone   The zone to free to
3658  *	item   The item we're freeing
3659  *	udata  User supplied data for the dtor
3660  *	skip   Skip dtors and finis
3661  */
3662 static void
3663 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3664 {
3665 
3666 	item_dtor(zone, item, udata, skip);
3667 
3668 	if (skip < SKIP_FINI && zone->uz_fini)
3669 		zone->uz_fini(item, zone->uz_size);
3670 
3671 	zone->uz_release(zone->uz_arg, &item, 1);
3672 
3673 	if (skip & SKIP_CNT)
3674 		return;
3675 
3676 	counter_u64_add(zone->uz_frees, 1);
3677 
3678 	if (zone->uz_max_items > 0) {
3679 		ZONE_LOCK(zone);
3680 		zone->uz_items--;
3681 		if (zone->uz_sleepers > 0 &&
3682 		    zone->uz_items < zone->uz_max_items)
3683 			wakeup_one(zone);
3684 		ZONE_UNLOCK(zone);
3685 	}
3686 }
3687 
3688 /* See uma.h */
3689 int
3690 uma_zone_set_max(uma_zone_t zone, int nitems)
3691 {
3692 	struct uma_bucket_zone *ubz;
3693 	int count;
3694 
3695 	ZONE_LOCK(zone);
3696 	ubz = bucket_zone_max(zone, nitems);
3697 	count = ubz != NULL ? ubz->ubz_entries : 0;
3698 	zone->uz_bucket_size_max = zone->uz_bucket_size = count;
3699 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3700 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3701 	zone->uz_max_items = nitems;
3702 	ZONE_UNLOCK(zone);
3703 
3704 	return (nitems);
3705 }
3706 
3707 /* See uma.h */
3708 void
3709 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3710 {
3711 	struct uma_bucket_zone *ubz;
3712 	int bpcpu;
3713 
3714 	ZONE_LOCK(zone);
3715 	ubz = bucket_zone_max(zone, nitems);
3716 	if (ubz != NULL) {
3717 		bpcpu = 2;
3718 #ifdef UMA_XDOMAIN
3719 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3720 			/* Count the cross-domain bucket. */
3721 			bpcpu++;
3722 #endif
3723 		nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
3724 		zone->uz_bucket_size_max = ubz->ubz_entries;
3725 	} else {
3726 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
3727 	}
3728 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3729 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3730 	zone->uz_bkt_max = nitems;
3731 	ZONE_UNLOCK(zone);
3732 }
3733 
3734 /* See uma.h */
3735 int
3736 uma_zone_get_max(uma_zone_t zone)
3737 {
3738 	int nitems;
3739 
3740 	ZONE_LOCK(zone);
3741 	nitems = zone->uz_max_items;
3742 	ZONE_UNLOCK(zone);
3743 
3744 	return (nitems);
3745 }
3746 
3747 /* See uma.h */
3748 void
3749 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3750 {
3751 
3752 	ZONE_LOCK(zone);
3753 	zone->uz_warning = warning;
3754 	ZONE_UNLOCK(zone);
3755 }
3756 
3757 /* See uma.h */
3758 void
3759 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3760 {
3761 
3762 	ZONE_LOCK(zone);
3763 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3764 	ZONE_UNLOCK(zone);
3765 }
3766 
3767 /* See uma.h */
3768 int
3769 uma_zone_get_cur(uma_zone_t zone)
3770 {
3771 	int64_t nitems;
3772 	u_int i;
3773 
3774 	ZONE_LOCK(zone);
3775 	nitems = counter_u64_fetch(zone->uz_allocs) -
3776 	    counter_u64_fetch(zone->uz_frees);
3777 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3778 		CPU_FOREACH(i) {
3779 			/*
3780 			 * See the comment in uma_vm_zone_stats() regarding
3781 			 * the safety of accessing the per-cpu caches. With
3782 			 * the zone lock held, it is safe, but can potentially
3783 			 * result in stale data.
3784 			 */
3785 			nitems += zone->uz_cpu[i].uc_allocs -
3786 			    zone->uz_cpu[i].uc_frees;
3787 		}
3788 	}
3789 	ZONE_UNLOCK(zone);
3790 
3791 	return (nitems < 0 ? 0 : nitems);
3792 }
3793 
3794 static uint64_t
3795 uma_zone_get_allocs(uma_zone_t zone)
3796 {
3797 	uint64_t nitems;
3798 	u_int i;
3799 
3800 	ZONE_LOCK(zone);
3801 	nitems = counter_u64_fetch(zone->uz_allocs);
3802 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3803 		CPU_FOREACH(i) {
3804 			/*
3805 			 * See the comment in uma_vm_zone_stats() regarding
3806 			 * the safety of accessing the per-cpu caches. With
3807 			 * the zone lock held, it is safe, but can potentially
3808 			 * result in stale data.
3809 			 */
3810 			nitems += zone->uz_cpu[i].uc_allocs;
3811 		}
3812 	}
3813 	ZONE_UNLOCK(zone);
3814 
3815 	return (nitems);
3816 }
3817 
3818 static uint64_t
3819 uma_zone_get_frees(uma_zone_t zone)
3820 {
3821 	uint64_t nitems;
3822 	u_int i;
3823 
3824 	ZONE_LOCK(zone);
3825 	nitems = counter_u64_fetch(zone->uz_frees);
3826 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3827 		CPU_FOREACH(i) {
3828 			/*
3829 			 * See the comment in uma_vm_zone_stats() regarding
3830 			 * the safety of accessing the per-cpu caches. With
3831 			 * the zone lock held, it is safe, but can potentially
3832 			 * result in stale data.
3833 			 */
3834 			nitems += zone->uz_cpu[i].uc_frees;
3835 		}
3836 	}
3837 	ZONE_UNLOCK(zone);
3838 
3839 	return (nitems);
3840 }
3841 
3842 /* See uma.h */
3843 void
3844 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3845 {
3846 	uma_keg_t keg;
3847 
3848 	KEG_GET(zone, keg);
3849 	KEG_LOCK(keg);
3850 	KASSERT(keg->uk_pages == 0,
3851 	    ("uma_zone_set_init on non-empty keg"));
3852 	keg->uk_init = uminit;
3853 	KEG_UNLOCK(keg);
3854 }
3855 
3856 /* See uma.h */
3857 void
3858 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3859 {
3860 	uma_keg_t keg;
3861 
3862 	KEG_GET(zone, keg);
3863 	KEG_LOCK(keg);
3864 	KASSERT(keg->uk_pages == 0,
3865 	    ("uma_zone_set_fini on non-empty keg"));
3866 	keg->uk_fini = fini;
3867 	KEG_UNLOCK(keg);
3868 }
3869 
3870 /* See uma.h */
3871 void
3872 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3873 {
3874 
3875 	ZONE_LOCK(zone);
3876 	KASSERT(zone->uz_keg->uk_pages == 0,
3877 	    ("uma_zone_set_zinit on non-empty keg"));
3878 	zone->uz_init = zinit;
3879 	ZONE_UNLOCK(zone);
3880 }
3881 
3882 /* See uma.h */
3883 void
3884 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3885 {
3886 
3887 	ZONE_LOCK(zone);
3888 	KASSERT(zone->uz_keg->uk_pages == 0,
3889 	    ("uma_zone_set_zfini on non-empty keg"));
3890 	zone->uz_fini = zfini;
3891 	ZONE_UNLOCK(zone);
3892 }
3893 
3894 /* See uma.h */
3895 /* XXX uk_freef is not actually used with the zone locked */
3896 void
3897 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3898 {
3899 	uma_keg_t keg;
3900 
3901 	KEG_GET(zone, keg);
3902 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3903 	KEG_LOCK(keg);
3904 	keg->uk_freef = freef;
3905 	KEG_UNLOCK(keg);
3906 }
3907 
3908 /* See uma.h */
3909 /* XXX uk_allocf is not actually used with the zone locked */
3910 void
3911 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3912 {
3913 	uma_keg_t keg;
3914 
3915 	KEG_GET(zone, keg);
3916 	KEG_LOCK(keg);
3917 	keg->uk_allocf = allocf;
3918 	KEG_UNLOCK(keg);
3919 }
3920 
3921 /* See uma.h */
3922 void
3923 uma_zone_reserve(uma_zone_t zone, int items)
3924 {
3925 	uma_keg_t keg;
3926 
3927 	KEG_GET(zone, keg);
3928 	KEG_LOCK(keg);
3929 	keg->uk_reserve = items;
3930 	KEG_UNLOCK(keg);
3931 }
3932 
3933 /* See uma.h */
3934 int
3935 uma_zone_reserve_kva(uma_zone_t zone, int count)
3936 {
3937 	uma_keg_t keg;
3938 	vm_offset_t kva;
3939 	u_int pages;
3940 
3941 	KEG_GET(zone, keg);
3942 
3943 	pages = count / keg->uk_ipers;
3944 	if (pages * keg->uk_ipers < count)
3945 		pages++;
3946 	pages *= keg->uk_ppera;
3947 
3948 #ifdef UMA_MD_SMALL_ALLOC
3949 	if (keg->uk_ppera > 1) {
3950 #else
3951 	if (1) {
3952 #endif
3953 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3954 		if (kva == 0)
3955 			return (0);
3956 	} else
3957 		kva = 0;
3958 
3959 	ZONE_LOCK(zone);
3960 	MPASS(keg->uk_kva == 0);
3961 	keg->uk_kva = kva;
3962 	keg->uk_offset = 0;
3963 	zone->uz_max_items = pages * keg->uk_ipers;
3964 #ifdef UMA_MD_SMALL_ALLOC
3965 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3966 #else
3967 	keg->uk_allocf = noobj_alloc;
3968 #endif
3969 	keg->uk_flags |= UMA_ZONE_NOFREE;
3970 	ZONE_UNLOCK(zone);
3971 
3972 	return (1);
3973 }
3974 
3975 /* See uma.h */
3976 void
3977 uma_prealloc(uma_zone_t zone, int items)
3978 {
3979 	struct vm_domainset_iter di;
3980 	uma_domain_t dom;
3981 	uma_slab_t slab;
3982 	uma_keg_t keg;
3983 	int aflags, domain, slabs;
3984 
3985 	KEG_GET(zone, keg);
3986 	KEG_LOCK(keg);
3987 	slabs = items / keg->uk_ipers;
3988 	if (slabs * keg->uk_ipers < items)
3989 		slabs++;
3990 	while (slabs-- > 0) {
3991 		aflags = M_NOWAIT;
3992 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3993 		    &aflags);
3994 		for (;;) {
3995 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3996 			    aflags);
3997 			if (slab != NULL) {
3998 				dom = &keg->uk_domain[slab->us_domain];
3999 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
4000 				    us_link);
4001 				break;
4002 			}
4003 			KEG_LOCK(keg);
4004 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
4005 				KEG_UNLOCK(keg);
4006 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
4007 				KEG_LOCK(keg);
4008 			}
4009 		}
4010 	}
4011 	KEG_UNLOCK(keg);
4012 }
4013 
4014 /* See uma.h */
4015 void
4016 uma_reclaim(int req)
4017 {
4018 
4019 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
4020 	sx_xlock(&uma_reclaim_lock);
4021 	bucket_enable();
4022 
4023 	switch (req) {
4024 	case UMA_RECLAIM_TRIM:
4025 		zone_foreach(zone_trim, NULL);
4026 		break;
4027 	case UMA_RECLAIM_DRAIN:
4028 	case UMA_RECLAIM_DRAIN_CPU:
4029 		zone_foreach(zone_drain, NULL);
4030 		if (req == UMA_RECLAIM_DRAIN_CPU) {
4031 			pcpu_cache_drain_safe(NULL);
4032 			zone_foreach(zone_drain, NULL);
4033 		}
4034 		break;
4035 	default:
4036 		panic("unhandled reclamation request %d", req);
4037 	}
4038 
4039 	/*
4040 	 * Some slabs may have been freed but this zone will be visited early
4041 	 * we visit again so that we can free pages that are empty once other
4042 	 * zones are drained.  We have to do the same for buckets.
4043 	 */
4044 	zone_drain(slabzone, NULL);
4045 	bucket_zone_drain();
4046 	sx_xunlock(&uma_reclaim_lock);
4047 }
4048 
4049 static volatile int uma_reclaim_needed;
4050 
4051 void
4052 uma_reclaim_wakeup(void)
4053 {
4054 
4055 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
4056 		wakeup(uma_reclaim);
4057 }
4058 
4059 void
4060 uma_reclaim_worker(void *arg __unused)
4061 {
4062 
4063 	for (;;) {
4064 		sx_xlock(&uma_reclaim_lock);
4065 		while (atomic_load_int(&uma_reclaim_needed) == 0)
4066 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
4067 			    hz);
4068 		sx_xunlock(&uma_reclaim_lock);
4069 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
4070 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
4071 		atomic_store_int(&uma_reclaim_needed, 0);
4072 		/* Don't fire more than once per-second. */
4073 		pause("umarclslp", hz);
4074 	}
4075 }
4076 
4077 /* See uma.h */
4078 void
4079 uma_zone_reclaim(uma_zone_t zone, int req)
4080 {
4081 
4082 	switch (req) {
4083 	case UMA_RECLAIM_TRIM:
4084 		zone_trim(zone, NULL);
4085 		break;
4086 	case UMA_RECLAIM_DRAIN:
4087 		zone_drain(zone, NULL);
4088 		break;
4089 	case UMA_RECLAIM_DRAIN_CPU:
4090 		pcpu_cache_drain_safe(zone);
4091 		zone_drain(zone, NULL);
4092 		break;
4093 	default:
4094 		panic("unhandled reclamation request %d", req);
4095 	}
4096 }
4097 
4098 /* See uma.h */
4099 int
4100 uma_zone_exhausted(uma_zone_t zone)
4101 {
4102 	int full;
4103 
4104 	ZONE_LOCK(zone);
4105 	full = zone->uz_sleepers > 0;
4106 	ZONE_UNLOCK(zone);
4107 	return (full);
4108 }
4109 
4110 int
4111 uma_zone_exhausted_nolock(uma_zone_t zone)
4112 {
4113 	return (zone->uz_sleepers > 0);
4114 }
4115 
4116 static void
4117 uma_zero_item(void *item, uma_zone_t zone)
4118 {
4119 
4120 	bzero(item, zone->uz_size);
4121 }
4122 
4123 unsigned long
4124 uma_limit(void)
4125 {
4126 
4127 	return (uma_kmem_limit);
4128 }
4129 
4130 void
4131 uma_set_limit(unsigned long limit)
4132 {
4133 
4134 	uma_kmem_limit = limit;
4135 }
4136 
4137 unsigned long
4138 uma_size(void)
4139 {
4140 
4141 	return (atomic_load_long(&uma_kmem_total));
4142 }
4143 
4144 long
4145 uma_avail(void)
4146 {
4147 
4148 	return (uma_kmem_limit - uma_size());
4149 }
4150 
4151 #ifdef DDB
4152 /*
4153  * Generate statistics across both the zone and its per-cpu cache's.  Return
4154  * desired statistics if the pointer is non-NULL for that statistic.
4155  *
4156  * Note: does not update the zone statistics, as it can't safely clear the
4157  * per-CPU cache statistic.
4158  *
4159  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
4160  * safe from off-CPU; we should modify the caches to track this information
4161  * directly so that we don't have to.
4162  */
4163 static void
4164 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4165     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4166 {
4167 	uma_cache_t cache;
4168 	uint64_t allocs, frees, sleeps, xdomain;
4169 	int cachefree, cpu;
4170 
4171 	allocs = frees = sleeps = xdomain = 0;
4172 	cachefree = 0;
4173 	CPU_FOREACH(cpu) {
4174 		cache = &z->uz_cpu[cpu];
4175 		if (cache->uc_allocbucket != NULL)
4176 			cachefree += cache->uc_allocbucket->ub_cnt;
4177 		if (cache->uc_freebucket != NULL)
4178 			cachefree += cache->uc_freebucket->ub_cnt;
4179 		if (cache->uc_crossbucket != NULL) {
4180 			xdomain += cache->uc_crossbucket->ub_cnt;
4181 			cachefree += cache->uc_crossbucket->ub_cnt;
4182 		}
4183 		allocs += cache->uc_allocs;
4184 		frees += cache->uc_frees;
4185 	}
4186 	allocs += counter_u64_fetch(z->uz_allocs);
4187 	frees += counter_u64_fetch(z->uz_frees);
4188 	sleeps += z->uz_sleeps;
4189 	xdomain += z->uz_xdomain;
4190 	if (cachefreep != NULL)
4191 		*cachefreep = cachefree;
4192 	if (allocsp != NULL)
4193 		*allocsp = allocs;
4194 	if (freesp != NULL)
4195 		*freesp = frees;
4196 	if (sleepsp != NULL)
4197 		*sleepsp = sleeps;
4198 	if (xdomainp != NULL)
4199 		*xdomainp = xdomain;
4200 }
4201 #endif /* DDB */
4202 
4203 static int
4204 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4205 {
4206 	uma_keg_t kz;
4207 	uma_zone_t z;
4208 	int count;
4209 
4210 	count = 0;
4211 	rw_rlock(&uma_rwlock);
4212 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4213 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4214 			count++;
4215 	}
4216 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4217 		count++;
4218 
4219 	rw_runlock(&uma_rwlock);
4220 	return (sysctl_handle_int(oidp, &count, 0, req));
4221 }
4222 
4223 static void
4224 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4225     struct uma_percpu_stat *ups, bool internal)
4226 {
4227 	uma_zone_domain_t zdom;
4228 	uma_bucket_t bucket;
4229 	uma_cache_t cache;
4230 	int i;
4231 
4232 
4233 	for (i = 0; i < vm_ndomains; i++) {
4234 		zdom = &z->uz_domain[i];
4235 		uth->uth_zone_free += zdom->uzd_nitems;
4236 	}
4237 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4238 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4239 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4240 	uth->uth_sleeps = z->uz_sleeps;
4241 	uth->uth_xdomain = z->uz_xdomain;
4242 
4243 	/*
4244 	 * While it is not normally safe to access the cache bucket pointers
4245 	 * while not on the CPU that owns the cache, we only allow the pointers
4246 	 * to be exchanged without the zone lock held, not invalidated, so
4247 	 * accept the possible race associated with bucket exchange during
4248 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4249 	 * are loaded only once.
4250 	 */
4251 	for (i = 0; i < mp_maxid + 1; i++) {
4252 		bzero(&ups[i], sizeof(*ups));
4253 		if (internal || CPU_ABSENT(i))
4254 			continue;
4255 		cache = &z->uz_cpu[i];
4256 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
4257 		if (bucket != NULL)
4258 			ups[i].ups_cache_free += bucket->ub_cnt;
4259 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
4260 		if (bucket != NULL)
4261 			ups[i].ups_cache_free += bucket->ub_cnt;
4262 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
4263 		if (bucket != NULL)
4264 			ups[i].ups_cache_free += bucket->ub_cnt;
4265 		ups[i].ups_allocs = cache->uc_allocs;
4266 		ups[i].ups_frees = cache->uc_frees;
4267 	}
4268 }
4269 
4270 static int
4271 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4272 {
4273 	struct uma_stream_header ush;
4274 	struct uma_type_header uth;
4275 	struct uma_percpu_stat *ups;
4276 	struct sbuf sbuf;
4277 	uma_keg_t kz;
4278 	uma_zone_t z;
4279 	int count, error, i;
4280 
4281 	error = sysctl_wire_old_buffer(req, 0);
4282 	if (error != 0)
4283 		return (error);
4284 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4285 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4286 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4287 
4288 	count = 0;
4289 	rw_rlock(&uma_rwlock);
4290 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4291 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4292 			count++;
4293 	}
4294 
4295 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4296 		count++;
4297 
4298 	/*
4299 	 * Insert stream header.
4300 	 */
4301 	bzero(&ush, sizeof(ush));
4302 	ush.ush_version = UMA_STREAM_VERSION;
4303 	ush.ush_maxcpus = (mp_maxid + 1);
4304 	ush.ush_count = count;
4305 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4306 
4307 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4308 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4309 			bzero(&uth, sizeof(uth));
4310 			ZONE_LOCK(z);
4311 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4312 			uth.uth_align = kz->uk_align;
4313 			uth.uth_size = kz->uk_size;
4314 			uth.uth_rsize = kz->uk_rsize;
4315 			if (z->uz_max_items > 0)
4316 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4317 					kz->uk_ppera;
4318 			else
4319 				uth.uth_pages = kz->uk_pages;
4320 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4321 			    kz->uk_ppera;
4322 			uth.uth_limit = z->uz_max_items;
4323 			uth.uth_keg_free = z->uz_keg->uk_free;
4324 
4325 			/*
4326 			 * A zone is secondary is it is not the first entry
4327 			 * on the keg's zone list.
4328 			 */
4329 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4330 			    (LIST_FIRST(&kz->uk_zones) != z))
4331 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4332 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4333 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4334 			ZONE_UNLOCK(z);
4335 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4336 			for (i = 0; i < mp_maxid + 1; i++)
4337 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4338 		}
4339 	}
4340 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4341 		bzero(&uth, sizeof(uth));
4342 		ZONE_LOCK(z);
4343 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4344 		uth.uth_size = z->uz_size;
4345 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4346 		ZONE_UNLOCK(z);
4347 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4348 		for (i = 0; i < mp_maxid + 1; i++)
4349 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4350 	}
4351 
4352 	rw_runlock(&uma_rwlock);
4353 	error = sbuf_finish(&sbuf);
4354 	sbuf_delete(&sbuf);
4355 	free(ups, M_TEMP);
4356 	return (error);
4357 }
4358 
4359 int
4360 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4361 {
4362 	uma_zone_t zone = *(uma_zone_t *)arg1;
4363 	int error, max;
4364 
4365 	max = uma_zone_get_max(zone);
4366 	error = sysctl_handle_int(oidp, &max, 0, req);
4367 	if (error || !req->newptr)
4368 		return (error);
4369 
4370 	uma_zone_set_max(zone, max);
4371 
4372 	return (0);
4373 }
4374 
4375 int
4376 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4377 {
4378 	uma_zone_t zone;
4379 	int cur;
4380 
4381 	/*
4382 	 * Some callers want to add sysctls for global zones that
4383 	 * may not yet exist so they pass a pointer to a pointer.
4384 	 */
4385 	if (arg2 == 0)
4386 		zone = *(uma_zone_t *)arg1;
4387 	else
4388 		zone = arg1;
4389 	cur = uma_zone_get_cur(zone);
4390 	return (sysctl_handle_int(oidp, &cur, 0, req));
4391 }
4392 
4393 static int
4394 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
4395 {
4396 	uma_zone_t zone = arg1;
4397 	uint64_t cur;
4398 
4399 	cur = uma_zone_get_allocs(zone);
4400 	return (sysctl_handle_64(oidp, &cur, 0, req));
4401 }
4402 
4403 static int
4404 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
4405 {
4406 	uma_zone_t zone = arg1;
4407 	uint64_t cur;
4408 
4409 	cur = uma_zone_get_frees(zone);
4410 	return (sysctl_handle_64(oidp, &cur, 0, req));
4411 }
4412 
4413 static int
4414 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
4415 {
4416 	struct sbuf sbuf;
4417 	uma_zone_t zone = arg1;
4418 	int error;
4419 
4420 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
4421 	if (zone->uz_flags != 0)
4422 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
4423 	else
4424 		sbuf_printf(&sbuf, "0");
4425 	error = sbuf_finish(&sbuf);
4426 	sbuf_delete(&sbuf);
4427 
4428 	return (error);
4429 }
4430 
4431 #ifdef INVARIANTS
4432 static uma_slab_t
4433 uma_dbg_getslab(uma_zone_t zone, void *item)
4434 {
4435 	uma_slab_t slab;
4436 	uma_keg_t keg;
4437 	uint8_t *mem;
4438 
4439 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4440 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4441 		slab = vtoslab((vm_offset_t)mem);
4442 	} else {
4443 		/*
4444 		 * It is safe to return the slab here even though the
4445 		 * zone is unlocked because the item's allocation state
4446 		 * essentially holds a reference.
4447 		 */
4448 		if (zone->uz_lockptr == &zone->uz_lock)
4449 			return (NULL);
4450 		ZONE_LOCK(zone);
4451 		keg = zone->uz_keg;
4452 		if (keg->uk_flags & UMA_ZONE_HASH)
4453 			slab = hash_sfind(&keg->uk_hash, mem);
4454 		else
4455 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4456 		ZONE_UNLOCK(zone);
4457 	}
4458 
4459 	return (slab);
4460 }
4461 
4462 static bool
4463 uma_dbg_zskip(uma_zone_t zone, void *mem)
4464 {
4465 
4466 	if (zone->uz_lockptr == &zone->uz_lock)
4467 		return (true);
4468 
4469 	return (uma_dbg_kskip(zone->uz_keg, mem));
4470 }
4471 
4472 static bool
4473 uma_dbg_kskip(uma_keg_t keg, void *mem)
4474 {
4475 	uintptr_t idx;
4476 
4477 	if (dbg_divisor == 0)
4478 		return (true);
4479 
4480 	if (dbg_divisor == 1)
4481 		return (false);
4482 
4483 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4484 	if (keg->uk_ipers > 1) {
4485 		idx *= keg->uk_ipers;
4486 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4487 	}
4488 
4489 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4490 		counter_u64_add(uma_skip_cnt, 1);
4491 		return (true);
4492 	}
4493 	counter_u64_add(uma_dbg_cnt, 1);
4494 
4495 	return (false);
4496 }
4497 
4498 /*
4499  * Set up the slab's freei data such that uma_dbg_free can function.
4500  *
4501  */
4502 static void
4503 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4504 {
4505 	uma_keg_t keg;
4506 	int freei;
4507 
4508 	if (slab == NULL) {
4509 		slab = uma_dbg_getslab(zone, item);
4510 		if (slab == NULL)
4511 			panic("uma: item %p did not belong to zone %s\n",
4512 			    item, zone->uz_name);
4513 	}
4514 	keg = zone->uz_keg;
4515 	freei = slab_item_index(slab, keg, item);
4516 
4517 	if (BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree))
4518 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4519 		    item, zone, zone->uz_name, slab, freei);
4520 	BIT_SET_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree);
4521 
4522 	return;
4523 }
4524 
4525 /*
4526  * Verifies freed addresses.  Checks for alignment, valid slab membership
4527  * and duplicate frees.
4528  *
4529  */
4530 static void
4531 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4532 {
4533 	uma_keg_t keg;
4534 	int freei;
4535 
4536 	if (slab == NULL) {
4537 		slab = uma_dbg_getslab(zone, item);
4538 		if (slab == NULL)
4539 			panic("uma: Freed item %p did not belong to zone %s\n",
4540 			    item, zone->uz_name);
4541 	}
4542 	keg = zone->uz_keg;
4543 	freei = slab_item_index(slab, keg, item);
4544 
4545 	if (freei >= keg->uk_ipers)
4546 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4547 		    item, zone, zone->uz_name, slab, freei);
4548 
4549 	if (slab_item(slab, keg, freei) != item)
4550 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4551 		    item, zone, zone->uz_name, slab, freei);
4552 
4553 	if (!BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree))
4554 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4555 		    item, zone, zone->uz_name, slab, freei);
4556 
4557 	BIT_CLR_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree);
4558 }
4559 #endif /* INVARIANTS */
4560 
4561 #ifdef DDB
4562 static int64_t
4563 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4564     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4565 {
4566 	uint64_t frees;
4567 	int i;
4568 
4569 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4570 		*allocs = counter_u64_fetch(z->uz_allocs);
4571 		frees = counter_u64_fetch(z->uz_frees);
4572 		*sleeps = z->uz_sleeps;
4573 		*cachefree = 0;
4574 		*xdomain = 0;
4575 	} else
4576 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4577 		    xdomain);
4578 	if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4579 	    (LIST_FIRST(&kz->uk_zones) != z)))
4580 		*cachefree += kz->uk_free;
4581 	for (i = 0; i < vm_ndomains; i++)
4582 		*cachefree += z->uz_domain[i].uzd_nitems;
4583 	*used = *allocs - frees;
4584 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4585 }
4586 
4587 DB_SHOW_COMMAND(uma, db_show_uma)
4588 {
4589 	const char *fmt_hdr, *fmt_entry;
4590 	uma_keg_t kz;
4591 	uma_zone_t z;
4592 	uint64_t allocs, used, sleeps, xdomain;
4593 	long cachefree;
4594 	/* variables for sorting */
4595 	uma_keg_t cur_keg;
4596 	uma_zone_t cur_zone, last_zone;
4597 	int64_t cur_size, last_size, size;
4598 	int ties;
4599 
4600 	/* /i option produces machine-parseable CSV output */
4601 	if (modif[0] == 'i') {
4602 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4603 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4604 	} else {
4605 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4606 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4607 	}
4608 
4609 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4610 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4611 
4612 	/* Sort the zones with largest size first. */
4613 	last_zone = NULL;
4614 	last_size = INT64_MAX;
4615 	for (;;) {
4616 		cur_zone = NULL;
4617 		cur_size = -1;
4618 		ties = 0;
4619 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4620 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4621 				/*
4622 				 * In the case of size ties, print out zones
4623 				 * in the order they are encountered.  That is,
4624 				 * when we encounter the most recently output
4625 				 * zone, we have already printed all preceding
4626 				 * ties, and we must print all following ties.
4627 				 */
4628 				if (z == last_zone) {
4629 					ties = 1;
4630 					continue;
4631 				}
4632 				size = get_uma_stats(kz, z, &allocs, &used,
4633 				    &sleeps, &cachefree, &xdomain);
4634 				if (size > cur_size && size < last_size + ties)
4635 				{
4636 					cur_size = size;
4637 					cur_zone = z;
4638 					cur_keg = kz;
4639 				}
4640 			}
4641 		}
4642 		if (cur_zone == NULL)
4643 			break;
4644 
4645 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
4646 		    &sleeps, &cachefree, &xdomain);
4647 		db_printf(fmt_entry, cur_zone->uz_name,
4648 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
4649 		    (uintmax_t)allocs, (uintmax_t)sleeps,
4650 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
4651 		    xdomain);
4652 
4653 		if (db_pager_quit)
4654 			return;
4655 		last_zone = cur_zone;
4656 		last_size = cur_size;
4657 	}
4658 }
4659 
4660 DB_SHOW_COMMAND(umacache, db_show_umacache)
4661 {
4662 	uma_zone_t z;
4663 	uint64_t allocs, frees;
4664 	long cachefree;
4665 	int i;
4666 
4667 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4668 	    "Requests", "Bucket");
4669 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4670 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4671 		for (i = 0; i < vm_ndomains; i++)
4672 			cachefree += z->uz_domain[i].uzd_nitems;
4673 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4674 		    z->uz_name, (uintmax_t)z->uz_size,
4675 		    (intmax_t)(allocs - frees), cachefree,
4676 		    (uintmax_t)allocs, z->uz_bucket_size);
4677 		if (db_pager_quit)
4678 			return;
4679 	}
4680 }
4681 #endif	/* DDB */
4682