xref: /freebsd/sys/vm/uma_core.c (revision 28f4385e45a2681c14bd04b83fe1796eaefe8265)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 
145 static struct sx uma_drain_lock;
146 
147 /* kmem soft limit. */
148 static unsigned long uma_kmem_limit = LONG_MAX;
149 static volatile unsigned long uma_kmem_total;
150 
151 /* Is the VM done starting up? */
152 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
153     BOOT_RUNNING } booted = BOOT_COLD;
154 
155 /*
156  * This is the handle used to schedule events that need to happen
157  * outside of the allocation fast path.
158  */
159 static struct callout uma_callout;
160 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
161 
162 /*
163  * This structure is passed as the zone ctor arg so that I don't have to create
164  * a special allocation function just for zones.
165  */
166 struct uma_zctor_args {
167 	const char *name;
168 	size_t size;
169 	uma_ctor ctor;
170 	uma_dtor dtor;
171 	uma_init uminit;
172 	uma_fini fini;
173 	uma_import import;
174 	uma_release release;
175 	void *arg;
176 	uma_keg_t keg;
177 	int align;
178 	uint32_t flags;
179 };
180 
181 struct uma_kctor_args {
182 	uma_zone_t zone;
183 	size_t size;
184 	uma_init uminit;
185 	uma_fini fini;
186 	int align;
187 	uint32_t flags;
188 };
189 
190 struct uma_bucket_zone {
191 	uma_zone_t	ubz_zone;
192 	char		*ubz_name;
193 	int		ubz_entries;	/* Number of items it can hold. */
194 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
195 };
196 
197 /*
198  * Compute the actual number of bucket entries to pack them in power
199  * of two sizes for more efficient space utilization.
200  */
201 #define	BUCKET_SIZE(n)						\
202     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
203 
204 #define	BUCKET_MAX	BUCKET_SIZE(256)
205 
206 struct uma_bucket_zone bucket_zones[] = {
207 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
208 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
209 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
210 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
211 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
212 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
213 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
214 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
215 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
216 	{ NULL, NULL, 0}
217 };
218 
219 /*
220  * Flags and enumerations to be passed to internal functions.
221  */
222 enum zfreeskip {
223 	SKIP_NONE =	0,
224 	SKIP_CNT =	0x00000001,
225 	SKIP_DTOR =	0x00010000,
226 	SKIP_FINI =	0x00020000,
227 };
228 
229 #define	UMA_ANYDOMAIN	-1	/* Special value for domain search. */
230 
231 /* Prototypes.. */
232 
233 int	uma_startup_count(int);
234 void	uma_startup(void *, int);
235 void	uma_startup1(void);
236 void	uma_startup2(void);
237 
238 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
239 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
240 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
241 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
242 static void page_free(void *, vm_size_t, uint8_t);
243 static void pcpu_page_free(void *, vm_size_t, uint8_t);
244 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
245 static void cache_drain(uma_zone_t);
246 static void bucket_drain(uma_zone_t, uma_bucket_t);
247 static void bucket_cache_drain(uma_zone_t zone);
248 static int keg_ctor(void *, int, void *, int);
249 static void keg_dtor(void *, int, void *);
250 static int zone_ctor(void *, int, void *, int);
251 static void zone_dtor(void *, int, void *);
252 static int zero_init(void *, int, int);
253 static void keg_small_init(uma_keg_t keg);
254 static void keg_large_init(uma_keg_t keg);
255 static void zone_foreach(void (*zfunc)(uma_zone_t));
256 static void zone_timeout(uma_zone_t zone);
257 static int hash_alloc(struct uma_hash *);
258 static int hash_expand(struct uma_hash *, struct uma_hash *);
259 static void hash_free(struct uma_hash *hash);
260 static void uma_timeout(void *);
261 static void uma_startup3(void);
262 static void *zone_alloc_item(uma_zone_t, void *, int, int);
263 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
264 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
265 static void bucket_enable(void);
266 static void bucket_init(void);
267 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
268 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
269 static void bucket_zone_drain(void);
270 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int);
271 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
272 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
273 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
274 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
275     uma_fini fini, int align, uint32_t flags);
276 static int zone_import(uma_zone_t, void **, int, int, int);
277 static void zone_release(uma_zone_t, void **, int);
278 static void uma_zero_item(void *, uma_zone_t);
279 
280 void uma_print_zone(uma_zone_t);
281 void uma_print_stats(void);
282 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
283 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
284 
285 #ifdef INVARIANTS
286 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
287 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
288 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
289 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
290 
291 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
292     "Memory allocation debugging");
293 
294 static u_int dbg_divisor = 1;
295 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
296     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
297     "Debug & thrash every this item in memory allocator");
298 
299 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
300 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
301 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
302     &uma_dbg_cnt, "memory items debugged");
303 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
304     &uma_skip_cnt, "memory items skipped, not debugged");
305 #endif
306 
307 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
308 
309 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
310     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
311 
312 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
313     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
314 
315 static int zone_warnings = 1;
316 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
317     "Warn when UMA zones becomes full");
318 
319 /* Adjust bytes under management by UMA. */
320 static inline void
321 uma_total_dec(unsigned long size)
322 {
323 
324 	atomic_subtract_long(&uma_kmem_total, size);
325 }
326 
327 static inline void
328 uma_total_inc(unsigned long size)
329 {
330 
331 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
332 		uma_reclaim_wakeup();
333 }
334 
335 /*
336  * This routine checks to see whether or not it's safe to enable buckets.
337  */
338 static void
339 bucket_enable(void)
340 {
341 	bucketdisable = vm_page_count_min();
342 }
343 
344 /*
345  * Initialize bucket_zones, the array of zones of buckets of various sizes.
346  *
347  * For each zone, calculate the memory required for each bucket, consisting
348  * of the header and an array of pointers.
349  */
350 static void
351 bucket_init(void)
352 {
353 	struct uma_bucket_zone *ubz;
354 	int size;
355 
356 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
357 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
358 		size += sizeof(void *) * ubz->ubz_entries;
359 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
360 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
361 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
362 	}
363 }
364 
365 /*
366  * Given a desired number of entries for a bucket, return the zone from which
367  * to allocate the bucket.
368  */
369 static struct uma_bucket_zone *
370 bucket_zone_lookup(int entries)
371 {
372 	struct uma_bucket_zone *ubz;
373 
374 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
375 		if (ubz->ubz_entries >= entries)
376 			return (ubz);
377 	ubz--;
378 	return (ubz);
379 }
380 
381 static int
382 bucket_select(int size)
383 {
384 	struct uma_bucket_zone *ubz;
385 
386 	ubz = &bucket_zones[0];
387 	if (size > ubz->ubz_maxsize)
388 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
389 
390 	for (; ubz->ubz_entries != 0; ubz++)
391 		if (ubz->ubz_maxsize < size)
392 			break;
393 	ubz--;
394 	return (ubz->ubz_entries);
395 }
396 
397 static uma_bucket_t
398 bucket_alloc(uma_zone_t zone, void *udata, int flags)
399 {
400 	struct uma_bucket_zone *ubz;
401 	uma_bucket_t bucket;
402 
403 	/*
404 	 * This is to stop us from allocating per cpu buckets while we're
405 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
406 	 * boot pages.  This also prevents us from allocating buckets in
407 	 * low memory situations.
408 	 */
409 	if (bucketdisable)
410 		return (NULL);
411 	/*
412 	 * To limit bucket recursion we store the original zone flags
413 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
414 	 * NOVM flag to persist even through deep recursions.  We also
415 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
416 	 * a bucket for a bucket zone so we do not allow infinite bucket
417 	 * recursion.  This cookie will even persist to frees of unused
418 	 * buckets via the allocation path or bucket allocations in the
419 	 * free path.
420 	 */
421 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
422 		udata = (void *)(uintptr_t)zone->uz_flags;
423 	else {
424 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
425 			return (NULL);
426 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
427 	}
428 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
429 		flags |= M_NOVM;
430 	ubz = bucket_zone_lookup(zone->uz_count);
431 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
432 		ubz++;
433 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
434 	if (bucket) {
435 #ifdef INVARIANTS
436 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
437 #endif
438 		bucket->ub_cnt = 0;
439 		bucket->ub_entries = ubz->ubz_entries;
440 	}
441 
442 	return (bucket);
443 }
444 
445 static void
446 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
447 {
448 	struct uma_bucket_zone *ubz;
449 
450 	KASSERT(bucket->ub_cnt == 0,
451 	    ("bucket_free: Freeing a non free bucket."));
452 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
453 		udata = (void *)(uintptr_t)zone->uz_flags;
454 	ubz = bucket_zone_lookup(bucket->ub_entries);
455 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
456 }
457 
458 static void
459 bucket_zone_drain(void)
460 {
461 	struct uma_bucket_zone *ubz;
462 
463 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
464 		zone_drain(ubz->ubz_zone);
465 }
466 
467 static uma_bucket_t
468 zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws)
469 {
470 	uma_bucket_t bucket;
471 
472 	ZONE_LOCK_ASSERT(zone);
473 
474 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
475 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
476 		LIST_REMOVE(bucket, ub_link);
477 		zdom->uzd_nitems -= bucket->ub_cnt;
478 		if (ws && zdom->uzd_imin > zdom->uzd_nitems)
479 			zdom->uzd_imin = zdom->uzd_nitems;
480 		zone->uz_bkt_count -= bucket->ub_cnt;
481 	}
482 	return (bucket);
483 }
484 
485 static void
486 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
487     const bool ws)
488 {
489 
490 	ZONE_LOCK_ASSERT(zone);
491 	KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow",
492 	    __func__, zone));
493 
494 	LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
495 	zdom->uzd_nitems += bucket->ub_cnt;
496 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
497 		zdom->uzd_imax = zdom->uzd_nitems;
498 	zone->uz_bkt_count += bucket->ub_cnt;
499 }
500 
501 static void
502 zone_log_warning(uma_zone_t zone)
503 {
504 	static const struct timeval warninterval = { 300, 0 };
505 
506 	if (!zone_warnings || zone->uz_warning == NULL)
507 		return;
508 
509 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
510 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
511 }
512 
513 static inline void
514 zone_maxaction(uma_zone_t zone)
515 {
516 
517 	if (zone->uz_maxaction.ta_func != NULL)
518 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
519 }
520 
521 /*
522  * Routine called by timeout which is used to fire off some time interval
523  * based calculations.  (stats, hash size, etc.)
524  *
525  * Arguments:
526  *	arg   Unused
527  *
528  * Returns:
529  *	Nothing
530  */
531 static void
532 uma_timeout(void *unused)
533 {
534 	bucket_enable();
535 	zone_foreach(zone_timeout);
536 
537 	/* Reschedule this event */
538 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
539 }
540 
541 /*
542  * Update the working set size estimate for the zone's bucket cache.
543  * The constants chosen here are somewhat arbitrary.  With an update period of
544  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
545  * last 100s.
546  */
547 static void
548 zone_domain_update_wss(uma_zone_domain_t zdom)
549 {
550 	long wss;
551 
552 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
553 	wss = zdom->uzd_imax - zdom->uzd_imin;
554 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
555 	zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5;
556 }
557 
558 /*
559  * Routine to perform timeout driven calculations.  This expands the
560  * hashes and does per cpu statistics aggregation.
561  *
562  *  Returns nothing.
563  */
564 static void
565 zone_timeout(uma_zone_t zone)
566 {
567 	uma_keg_t keg = zone->uz_keg;
568 
569 	KEG_LOCK(keg);
570 	/*
571 	 * Expand the keg hash table.
572 	 *
573 	 * This is done if the number of slabs is larger than the hash size.
574 	 * What I'm trying to do here is completely reduce collisions.  This
575 	 * may be a little aggressive.  Should I allow for two collisions max?
576 	 */
577 	if (keg->uk_flags & UMA_ZONE_HASH &&
578 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
579 		struct uma_hash newhash;
580 		struct uma_hash oldhash;
581 		int ret;
582 
583 		/*
584 		 * This is so involved because allocating and freeing
585 		 * while the keg lock is held will lead to deadlock.
586 		 * I have to do everything in stages and check for
587 		 * races.
588 		 */
589 		newhash = keg->uk_hash;
590 		KEG_UNLOCK(keg);
591 		ret = hash_alloc(&newhash);
592 		KEG_LOCK(keg);
593 		if (ret) {
594 			if (hash_expand(&keg->uk_hash, &newhash)) {
595 				oldhash = keg->uk_hash;
596 				keg->uk_hash = newhash;
597 			} else
598 				oldhash = newhash;
599 
600 			KEG_UNLOCK(keg);
601 			hash_free(&oldhash);
602 			return;
603 		}
604 	}
605 
606 	for (int i = 0; i < vm_ndomains; i++)
607 		zone_domain_update_wss(&zone->uz_domain[i]);
608 
609 	KEG_UNLOCK(keg);
610 }
611 
612 /*
613  * Allocate and zero fill the next sized hash table from the appropriate
614  * backing store.
615  *
616  * Arguments:
617  *	hash  A new hash structure with the old hash size in uh_hashsize
618  *
619  * Returns:
620  *	1 on success and 0 on failure.
621  */
622 static int
623 hash_alloc(struct uma_hash *hash)
624 {
625 	int oldsize;
626 	int alloc;
627 
628 	oldsize = hash->uh_hashsize;
629 
630 	/* We're just going to go to a power of two greater */
631 	if (oldsize)  {
632 		hash->uh_hashsize = oldsize * 2;
633 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
634 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
635 		    M_UMAHASH, M_NOWAIT);
636 	} else {
637 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
638 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
639 		    UMA_ANYDOMAIN, M_WAITOK);
640 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
641 	}
642 	if (hash->uh_slab_hash) {
643 		bzero(hash->uh_slab_hash, alloc);
644 		hash->uh_hashmask = hash->uh_hashsize - 1;
645 		return (1);
646 	}
647 
648 	return (0);
649 }
650 
651 /*
652  * Expands the hash table for HASH zones.  This is done from zone_timeout
653  * to reduce collisions.  This must not be done in the regular allocation
654  * path, otherwise, we can recurse on the vm while allocating pages.
655  *
656  * Arguments:
657  *	oldhash  The hash you want to expand
658  *	newhash  The hash structure for the new table
659  *
660  * Returns:
661  *	Nothing
662  *
663  * Discussion:
664  */
665 static int
666 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
667 {
668 	uma_slab_t slab;
669 	int hval;
670 	int i;
671 
672 	if (!newhash->uh_slab_hash)
673 		return (0);
674 
675 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
676 		return (0);
677 
678 	/*
679 	 * I need to investigate hash algorithms for resizing without a
680 	 * full rehash.
681 	 */
682 
683 	for (i = 0; i < oldhash->uh_hashsize; i++)
684 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
685 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
686 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
687 			hval = UMA_HASH(newhash, slab->us_data);
688 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
689 			    slab, us_hlink);
690 		}
691 
692 	return (1);
693 }
694 
695 /*
696  * Free the hash bucket to the appropriate backing store.
697  *
698  * Arguments:
699  *	slab_hash  The hash bucket we're freeing
700  *	hashsize   The number of entries in that hash bucket
701  *
702  * Returns:
703  *	Nothing
704  */
705 static void
706 hash_free(struct uma_hash *hash)
707 {
708 	if (hash->uh_slab_hash == NULL)
709 		return;
710 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
711 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
712 	else
713 		free(hash->uh_slab_hash, M_UMAHASH);
714 }
715 
716 /*
717  * Frees all outstanding items in a bucket
718  *
719  * Arguments:
720  *	zone   The zone to free to, must be unlocked.
721  *	bucket The free/alloc bucket with items, cpu queue must be locked.
722  *
723  * Returns:
724  *	Nothing
725  */
726 
727 static void
728 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
729 {
730 	int i;
731 
732 	if (bucket == NULL)
733 		return;
734 
735 	if (zone->uz_fini)
736 		for (i = 0; i < bucket->ub_cnt; i++)
737 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
738 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
739 	if (zone->uz_max_items > 0) {
740 		ZONE_LOCK(zone);
741 		zone->uz_items -= bucket->ub_cnt;
742 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
743 			wakeup_one(zone);
744 		ZONE_UNLOCK(zone);
745 	}
746 	bucket->ub_cnt = 0;
747 }
748 
749 /*
750  * Drains the per cpu caches for a zone.
751  *
752  * NOTE: This may only be called while the zone is being turn down, and not
753  * during normal operation.  This is necessary in order that we do not have
754  * to migrate CPUs to drain the per-CPU caches.
755  *
756  * Arguments:
757  *	zone     The zone to drain, must be unlocked.
758  *
759  * Returns:
760  *	Nothing
761  */
762 static void
763 cache_drain(uma_zone_t zone)
764 {
765 	uma_cache_t cache;
766 	int cpu;
767 
768 	/*
769 	 * XXX: It is safe to not lock the per-CPU caches, because we're
770 	 * tearing down the zone anyway.  I.e., there will be no further use
771 	 * of the caches at this point.
772 	 *
773 	 * XXX: It would good to be able to assert that the zone is being
774 	 * torn down to prevent improper use of cache_drain().
775 	 *
776 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
777 	 * it is used elsewhere.  Should the tear-down path be made special
778 	 * there in some form?
779 	 */
780 	CPU_FOREACH(cpu) {
781 		cache = &zone->uz_cpu[cpu];
782 		bucket_drain(zone, cache->uc_allocbucket);
783 		bucket_drain(zone, cache->uc_freebucket);
784 		if (cache->uc_allocbucket != NULL)
785 			bucket_free(zone, cache->uc_allocbucket, NULL);
786 		if (cache->uc_freebucket != NULL)
787 			bucket_free(zone, cache->uc_freebucket, NULL);
788 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
789 	}
790 	ZONE_LOCK(zone);
791 	bucket_cache_drain(zone);
792 	ZONE_UNLOCK(zone);
793 }
794 
795 static void
796 cache_shrink(uma_zone_t zone)
797 {
798 
799 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
800 		return;
801 
802 	ZONE_LOCK(zone);
803 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
804 	ZONE_UNLOCK(zone);
805 }
806 
807 static void
808 cache_drain_safe_cpu(uma_zone_t zone)
809 {
810 	uma_cache_t cache;
811 	uma_bucket_t b1, b2;
812 	int domain;
813 
814 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
815 		return;
816 
817 	b1 = b2 = NULL;
818 	ZONE_LOCK(zone);
819 	critical_enter();
820 	if (zone->uz_flags & UMA_ZONE_NUMA)
821 		domain = PCPU_GET(domain);
822 	else
823 		domain = 0;
824 	cache = &zone->uz_cpu[curcpu];
825 	if (cache->uc_allocbucket) {
826 		if (cache->uc_allocbucket->ub_cnt != 0)
827 			zone_put_bucket(zone, &zone->uz_domain[domain],
828 			    cache->uc_allocbucket, false);
829 		else
830 			b1 = cache->uc_allocbucket;
831 		cache->uc_allocbucket = NULL;
832 	}
833 	if (cache->uc_freebucket) {
834 		if (cache->uc_freebucket->ub_cnt != 0)
835 			zone_put_bucket(zone, &zone->uz_domain[domain],
836 			    cache->uc_freebucket, false);
837 		else
838 			b2 = cache->uc_freebucket;
839 		cache->uc_freebucket = NULL;
840 	}
841 	critical_exit();
842 	ZONE_UNLOCK(zone);
843 	if (b1)
844 		bucket_free(zone, b1, NULL);
845 	if (b2)
846 		bucket_free(zone, b2, NULL);
847 }
848 
849 /*
850  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
851  * This is an expensive call because it needs to bind to all CPUs
852  * one by one and enter a critical section on each of them in order
853  * to safely access their cache buckets.
854  * Zone lock must not be held on call this function.
855  */
856 static void
857 cache_drain_safe(uma_zone_t zone)
858 {
859 	int cpu;
860 
861 	/*
862 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
863 	 */
864 	if (zone)
865 		cache_shrink(zone);
866 	else
867 		zone_foreach(cache_shrink);
868 
869 	CPU_FOREACH(cpu) {
870 		thread_lock(curthread);
871 		sched_bind(curthread, cpu);
872 		thread_unlock(curthread);
873 
874 		if (zone)
875 			cache_drain_safe_cpu(zone);
876 		else
877 			zone_foreach(cache_drain_safe_cpu);
878 	}
879 	thread_lock(curthread);
880 	sched_unbind(curthread);
881 	thread_unlock(curthread);
882 }
883 
884 /*
885  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
886  */
887 static void
888 bucket_cache_drain(uma_zone_t zone)
889 {
890 	uma_zone_domain_t zdom;
891 	uma_bucket_t bucket;
892 	int i;
893 
894 	/*
895 	 * Drain the bucket queues and free the buckets.
896 	 */
897 	for (i = 0; i < vm_ndomains; i++) {
898 		zdom = &zone->uz_domain[i];
899 		while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) !=
900 		    NULL) {
901 			ZONE_UNLOCK(zone);
902 			bucket_drain(zone, bucket);
903 			bucket_free(zone, bucket, NULL);
904 			ZONE_LOCK(zone);
905 		}
906 	}
907 
908 	/*
909 	 * Shrink further bucket sizes.  Price of single zone lock collision
910 	 * is probably lower then price of global cache drain.
911 	 */
912 	if (zone->uz_count > zone->uz_count_min)
913 		zone->uz_count--;
914 }
915 
916 static void
917 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
918 {
919 	uint8_t *mem;
920 	int i;
921 	uint8_t flags;
922 
923 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
924 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
925 
926 	mem = slab->us_data;
927 	flags = slab->us_flags;
928 	i = start;
929 	if (keg->uk_fini != NULL) {
930 		for (i--; i > -1; i--)
931 #ifdef INVARIANTS
932 		/*
933 		 * trash_fini implies that dtor was trash_dtor. trash_fini
934 		 * would check that memory hasn't been modified since free,
935 		 * which executed trash_dtor.
936 		 * That's why we need to run uma_dbg_kskip() check here,
937 		 * albeit we don't make skip check for other init/fini
938 		 * invocations.
939 		 */
940 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
941 		    keg->uk_fini != trash_fini)
942 #endif
943 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
944 			    keg->uk_size);
945 	}
946 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
947 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
948 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
949 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
950 }
951 
952 /*
953  * Frees pages from a keg back to the system.  This is done on demand from
954  * the pageout daemon.
955  *
956  * Returns nothing.
957  */
958 static void
959 keg_drain(uma_keg_t keg)
960 {
961 	struct slabhead freeslabs = { 0 };
962 	uma_domain_t dom;
963 	uma_slab_t slab, tmp;
964 	int i;
965 
966 	/*
967 	 * We don't want to take pages from statically allocated kegs at this
968 	 * time
969 	 */
970 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
971 		return;
972 
973 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
974 	    keg->uk_name, keg, keg->uk_free);
975 	KEG_LOCK(keg);
976 	if (keg->uk_free == 0)
977 		goto finished;
978 
979 	for (i = 0; i < vm_ndomains; i++) {
980 		dom = &keg->uk_domain[i];
981 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
982 			/* We have nowhere to free these to. */
983 			if (slab->us_flags & UMA_SLAB_BOOT)
984 				continue;
985 
986 			LIST_REMOVE(slab, us_link);
987 			keg->uk_pages -= keg->uk_ppera;
988 			keg->uk_free -= keg->uk_ipers;
989 
990 			if (keg->uk_flags & UMA_ZONE_HASH)
991 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
992 				    slab->us_data);
993 
994 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
995 		}
996 	}
997 
998 finished:
999 	KEG_UNLOCK(keg);
1000 
1001 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1002 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1003 		keg_free_slab(keg, slab, keg->uk_ipers);
1004 	}
1005 }
1006 
1007 static void
1008 zone_drain_wait(uma_zone_t zone, int waitok)
1009 {
1010 
1011 	/*
1012 	 * Set draining to interlock with zone_dtor() so we can release our
1013 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1014 	 * is the only call that knows the structure will still be available
1015 	 * when it wakes up.
1016 	 */
1017 	ZONE_LOCK(zone);
1018 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
1019 		if (waitok == M_NOWAIT)
1020 			goto out;
1021 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1022 	}
1023 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
1024 	bucket_cache_drain(zone);
1025 	ZONE_UNLOCK(zone);
1026 	/*
1027 	 * The DRAINING flag protects us from being freed while
1028 	 * we're running.  Normally the uma_rwlock would protect us but we
1029 	 * must be able to release and acquire the right lock for each keg.
1030 	 */
1031 	keg_drain(zone->uz_keg);
1032 	ZONE_LOCK(zone);
1033 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
1034 	wakeup(zone);
1035 out:
1036 	ZONE_UNLOCK(zone);
1037 }
1038 
1039 void
1040 zone_drain(uma_zone_t zone)
1041 {
1042 
1043 	zone_drain_wait(zone, M_NOWAIT);
1044 }
1045 
1046 /*
1047  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1048  * If the allocation was successful, the keg lock will be held upon return,
1049  * otherwise the keg will be left unlocked.
1050  *
1051  * Arguments:
1052  *	flags   Wait flags for the item initialization routine
1053  *	aflags  Wait flags for the slab allocation
1054  *
1055  * Returns:
1056  *	The slab that was allocated or NULL if there is no memory and the
1057  *	caller specified M_NOWAIT.
1058  */
1059 static uma_slab_t
1060 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1061     int aflags)
1062 {
1063 	uma_alloc allocf;
1064 	uma_slab_t slab;
1065 	unsigned long size;
1066 	uint8_t *mem;
1067 	uint8_t sflags;
1068 	int i;
1069 
1070 	KASSERT(domain >= 0 && domain < vm_ndomains,
1071 	    ("keg_alloc_slab: domain %d out of range", domain));
1072 	KEG_LOCK_ASSERT(keg);
1073 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1074 
1075 	allocf = keg->uk_allocf;
1076 	KEG_UNLOCK(keg);
1077 
1078 	slab = NULL;
1079 	mem = NULL;
1080 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1081 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1082 		if (slab == NULL)
1083 			goto out;
1084 	}
1085 
1086 	/*
1087 	 * This reproduces the old vm_zone behavior of zero filling pages the
1088 	 * first time they are added to a zone.
1089 	 *
1090 	 * Malloced items are zeroed in uma_zalloc.
1091 	 */
1092 
1093 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1094 		aflags |= M_ZERO;
1095 	else
1096 		aflags &= ~M_ZERO;
1097 
1098 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1099 		aflags |= M_NODUMP;
1100 
1101 	/* zone is passed for legacy reasons. */
1102 	size = keg->uk_ppera * PAGE_SIZE;
1103 	mem = allocf(zone, size, domain, &sflags, aflags);
1104 	if (mem == NULL) {
1105 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1106 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1107 		slab = NULL;
1108 		goto out;
1109 	}
1110 	uma_total_inc(size);
1111 
1112 	/* Point the slab into the allocated memory */
1113 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1114 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1115 
1116 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1117 		for (i = 0; i < keg->uk_ppera; i++)
1118 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1119 
1120 	slab->us_keg = keg;
1121 	slab->us_data = mem;
1122 	slab->us_freecount = keg->uk_ipers;
1123 	slab->us_flags = sflags;
1124 	slab->us_domain = domain;
1125 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1126 #ifdef INVARIANTS
1127 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1128 #endif
1129 
1130 	if (keg->uk_init != NULL) {
1131 		for (i = 0; i < keg->uk_ipers; i++)
1132 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1133 			    keg->uk_size, flags) != 0)
1134 				break;
1135 		if (i != keg->uk_ipers) {
1136 			keg_free_slab(keg, slab, i);
1137 			slab = NULL;
1138 			goto out;
1139 		}
1140 	}
1141 	KEG_LOCK(keg);
1142 
1143 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1144 	    slab, keg->uk_name, keg);
1145 
1146 	if (keg->uk_flags & UMA_ZONE_HASH)
1147 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1148 
1149 	keg->uk_pages += keg->uk_ppera;
1150 	keg->uk_free += keg->uk_ipers;
1151 
1152 out:
1153 	return (slab);
1154 }
1155 
1156 /*
1157  * This function is intended to be used early on in place of page_alloc() so
1158  * that we may use the boot time page cache to satisfy allocations before
1159  * the VM is ready.
1160  */
1161 static void *
1162 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1163     int wait)
1164 {
1165 	uma_keg_t keg;
1166 	void *mem;
1167 	int pages;
1168 
1169 	keg = zone->uz_keg;
1170 	/*
1171 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1172 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1173 	 */
1174 	switch (booted) {
1175 		case BOOT_COLD:
1176 		case BOOT_STRAPPED:
1177 			break;
1178 		case BOOT_PAGEALLOC:
1179 			if (keg->uk_ppera > 1)
1180 				break;
1181 		case BOOT_BUCKETS:
1182 		case BOOT_RUNNING:
1183 #ifdef UMA_MD_SMALL_ALLOC
1184 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1185 			    page_alloc : uma_small_alloc;
1186 #else
1187 			keg->uk_allocf = page_alloc;
1188 #endif
1189 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1190 	}
1191 
1192 	/*
1193 	 * Check our small startup cache to see if it has pages remaining.
1194 	 */
1195 	pages = howmany(bytes, PAGE_SIZE);
1196 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1197 	if (pages > boot_pages)
1198 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1199 #ifdef DIAGNOSTIC
1200 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1201 	    boot_pages);
1202 #endif
1203 	mem = bootmem;
1204 	boot_pages -= pages;
1205 	bootmem += pages * PAGE_SIZE;
1206 	*pflag = UMA_SLAB_BOOT;
1207 
1208 	return (mem);
1209 }
1210 
1211 /*
1212  * Allocates a number of pages from the system
1213  *
1214  * Arguments:
1215  *	bytes  The number of bytes requested
1216  *	wait  Shall we wait?
1217  *
1218  * Returns:
1219  *	A pointer to the alloced memory or possibly
1220  *	NULL if M_NOWAIT is set.
1221  */
1222 static void *
1223 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1224     int wait)
1225 {
1226 	void *p;	/* Returned page */
1227 
1228 	*pflag = UMA_SLAB_KERNEL;
1229 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1230 
1231 	return (p);
1232 }
1233 
1234 static void *
1235 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1236     int wait)
1237 {
1238 	struct pglist alloctail;
1239 	vm_offset_t addr, zkva;
1240 	int cpu, flags;
1241 	vm_page_t p, p_next;
1242 #ifdef NUMA
1243 	struct pcpu *pc;
1244 #endif
1245 
1246 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1247 
1248 	TAILQ_INIT(&alloctail);
1249 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1250 	    malloc2vm_flags(wait);
1251 	*pflag = UMA_SLAB_KERNEL;
1252 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1253 		if (CPU_ABSENT(cpu)) {
1254 			p = vm_page_alloc(NULL, 0, flags);
1255 		} else {
1256 #ifndef NUMA
1257 			p = vm_page_alloc(NULL, 0, flags);
1258 #else
1259 			pc = pcpu_find(cpu);
1260 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1261 			if (__predict_false(p == NULL))
1262 				p = vm_page_alloc(NULL, 0, flags);
1263 #endif
1264 		}
1265 		if (__predict_false(p == NULL))
1266 			goto fail;
1267 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1268 	}
1269 	if ((addr = kva_alloc(bytes)) == 0)
1270 		goto fail;
1271 	zkva = addr;
1272 	TAILQ_FOREACH(p, &alloctail, listq) {
1273 		pmap_qenter(zkva, &p, 1);
1274 		zkva += PAGE_SIZE;
1275 	}
1276 	return ((void*)addr);
1277  fail:
1278 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1279 		vm_page_unwire(p, PQ_NONE);
1280 		vm_page_free(p);
1281 	}
1282 	return (NULL);
1283 }
1284 
1285 /*
1286  * Allocates a number of pages from within an object
1287  *
1288  * Arguments:
1289  *	bytes  The number of bytes requested
1290  *	wait   Shall we wait?
1291  *
1292  * Returns:
1293  *	A pointer to the alloced memory or possibly
1294  *	NULL if M_NOWAIT is set.
1295  */
1296 static void *
1297 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1298     int wait)
1299 {
1300 	TAILQ_HEAD(, vm_page) alloctail;
1301 	u_long npages;
1302 	vm_offset_t retkva, zkva;
1303 	vm_page_t p, p_next;
1304 	uma_keg_t keg;
1305 
1306 	TAILQ_INIT(&alloctail);
1307 	keg = zone->uz_keg;
1308 
1309 	npages = howmany(bytes, PAGE_SIZE);
1310 	while (npages > 0) {
1311 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1312 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1313 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1314 		    VM_ALLOC_NOWAIT));
1315 		if (p != NULL) {
1316 			/*
1317 			 * Since the page does not belong to an object, its
1318 			 * listq is unused.
1319 			 */
1320 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1321 			npages--;
1322 			continue;
1323 		}
1324 		/*
1325 		 * Page allocation failed, free intermediate pages and
1326 		 * exit.
1327 		 */
1328 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1329 			vm_page_unwire(p, PQ_NONE);
1330 			vm_page_free(p);
1331 		}
1332 		return (NULL);
1333 	}
1334 	*flags = UMA_SLAB_PRIV;
1335 	zkva = keg->uk_kva +
1336 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1337 	retkva = zkva;
1338 	TAILQ_FOREACH(p, &alloctail, listq) {
1339 		pmap_qenter(zkva, &p, 1);
1340 		zkva += PAGE_SIZE;
1341 	}
1342 
1343 	return ((void *)retkva);
1344 }
1345 
1346 /*
1347  * Frees a number of pages to the system
1348  *
1349  * Arguments:
1350  *	mem   A pointer to the memory to be freed
1351  *	size  The size of the memory being freed
1352  *	flags The original p->us_flags field
1353  *
1354  * Returns:
1355  *	Nothing
1356  */
1357 static void
1358 page_free(void *mem, vm_size_t size, uint8_t flags)
1359 {
1360 
1361 	if ((flags & UMA_SLAB_KERNEL) == 0)
1362 		panic("UMA: page_free used with invalid flags %x", flags);
1363 
1364 	kmem_free((vm_offset_t)mem, size);
1365 }
1366 
1367 /*
1368  * Frees pcpu zone allocations
1369  *
1370  * Arguments:
1371  *	mem   A pointer to the memory to be freed
1372  *	size  The size of the memory being freed
1373  *	flags The original p->us_flags field
1374  *
1375  * Returns:
1376  *	Nothing
1377  */
1378 static void
1379 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1380 {
1381 	vm_offset_t sva, curva;
1382 	vm_paddr_t paddr;
1383 	vm_page_t m;
1384 
1385 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1386 	sva = (vm_offset_t)mem;
1387 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1388 		paddr = pmap_kextract(curva);
1389 		m = PHYS_TO_VM_PAGE(paddr);
1390 		vm_page_unwire(m, PQ_NONE);
1391 		vm_page_free(m);
1392 	}
1393 	pmap_qremove(sva, size >> PAGE_SHIFT);
1394 	kva_free(sva, size);
1395 }
1396 
1397 
1398 /*
1399  * Zero fill initializer
1400  *
1401  * Arguments/Returns follow uma_init specifications
1402  */
1403 static int
1404 zero_init(void *mem, int size, int flags)
1405 {
1406 	bzero(mem, size);
1407 	return (0);
1408 }
1409 
1410 /*
1411  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1412  *
1413  * Arguments
1414  *	keg  The zone we should initialize
1415  *
1416  * Returns
1417  *	Nothing
1418  */
1419 static void
1420 keg_small_init(uma_keg_t keg)
1421 {
1422 	u_int rsize;
1423 	u_int memused;
1424 	u_int wastedspace;
1425 	u_int shsize;
1426 	u_int slabsize;
1427 
1428 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1429 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1430 
1431 		slabsize = UMA_PCPU_ALLOC_SIZE;
1432 		keg->uk_ppera = ncpus;
1433 	} else {
1434 		slabsize = UMA_SLAB_SIZE;
1435 		keg->uk_ppera = 1;
1436 	}
1437 
1438 	/*
1439 	 * Calculate the size of each allocation (rsize) according to
1440 	 * alignment.  If the requested size is smaller than we have
1441 	 * allocation bits for we round it up.
1442 	 */
1443 	rsize = keg->uk_size;
1444 	if (rsize < slabsize / SLAB_SETSIZE)
1445 		rsize = slabsize / SLAB_SETSIZE;
1446 	if (rsize & keg->uk_align)
1447 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1448 	keg->uk_rsize = rsize;
1449 
1450 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1451 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1452 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1453 
1454 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1455 		shsize = 0;
1456 	else
1457 		shsize = SIZEOF_UMA_SLAB;
1458 
1459 	if (rsize <= slabsize - shsize)
1460 		keg->uk_ipers = (slabsize - shsize) / rsize;
1461 	else {
1462 		/* Handle special case when we have 1 item per slab, so
1463 		 * alignment requirement can be relaxed. */
1464 		KASSERT(keg->uk_size <= slabsize - shsize,
1465 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1466 		keg->uk_ipers = 1;
1467 	}
1468 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1469 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1470 
1471 	memused = keg->uk_ipers * rsize + shsize;
1472 	wastedspace = slabsize - memused;
1473 
1474 	/*
1475 	 * We can't do OFFPAGE if we're internal or if we've been
1476 	 * asked to not go to the VM for buckets.  If we do this we
1477 	 * may end up going to the VM  for slabs which we do not
1478 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1479 	 * of UMA_ZONE_VM, which clearly forbids it.
1480 	 */
1481 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1482 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1483 		return;
1484 
1485 	/*
1486 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1487 	 * this if it permits more items per-slab.
1488 	 *
1489 	 * XXX We could try growing slabsize to limit max waste as well.
1490 	 * Historically this was not done because the VM could not
1491 	 * efficiently handle contiguous allocations.
1492 	 */
1493 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1494 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1495 		keg->uk_ipers = slabsize / keg->uk_rsize;
1496 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1497 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1498 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1499 		    "keg: %s(%p), calculated wastedspace = %d, "
1500 		    "maximum wasted space allowed = %d, "
1501 		    "calculated ipers = %d, "
1502 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1503 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1504 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1505 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1506 	}
1507 
1508 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1509 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1510 		keg->uk_flags |= UMA_ZONE_HASH;
1511 }
1512 
1513 /*
1514  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1515  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1516  * more complicated.
1517  *
1518  * Arguments
1519  *	keg  The keg we should initialize
1520  *
1521  * Returns
1522  *	Nothing
1523  */
1524 static void
1525 keg_large_init(uma_keg_t keg)
1526 {
1527 
1528 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1529 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1530 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1531 
1532 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1533 	keg->uk_ipers = 1;
1534 	keg->uk_rsize = keg->uk_size;
1535 
1536 	/* Check whether we have enough space to not do OFFPAGE. */
1537 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1538 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1539 		/*
1540 		 * We can't do OFFPAGE if we're internal, in which case
1541 		 * we need an extra page per allocation to contain the
1542 		 * slab header.
1543 		 */
1544 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1545 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1546 		else
1547 			keg->uk_ppera++;
1548 	}
1549 
1550 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1551 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1552 		keg->uk_flags |= UMA_ZONE_HASH;
1553 }
1554 
1555 static void
1556 keg_cachespread_init(uma_keg_t keg)
1557 {
1558 	int alignsize;
1559 	int trailer;
1560 	int pages;
1561 	int rsize;
1562 
1563 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1564 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1565 
1566 	alignsize = keg->uk_align + 1;
1567 	rsize = keg->uk_size;
1568 	/*
1569 	 * We want one item to start on every align boundary in a page.  To
1570 	 * do this we will span pages.  We will also extend the item by the
1571 	 * size of align if it is an even multiple of align.  Otherwise, it
1572 	 * would fall on the same boundary every time.
1573 	 */
1574 	if (rsize & keg->uk_align)
1575 		rsize = (rsize & ~keg->uk_align) + alignsize;
1576 	if ((rsize & alignsize) == 0)
1577 		rsize += alignsize;
1578 	trailer = rsize - keg->uk_size;
1579 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1580 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1581 	keg->uk_rsize = rsize;
1582 	keg->uk_ppera = pages;
1583 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1584 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1585 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1586 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1587 	    keg->uk_ipers));
1588 }
1589 
1590 /*
1591  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1592  * the keg onto the global keg list.
1593  *
1594  * Arguments/Returns follow uma_ctor specifications
1595  *	udata  Actually uma_kctor_args
1596  */
1597 static int
1598 keg_ctor(void *mem, int size, void *udata, int flags)
1599 {
1600 	struct uma_kctor_args *arg = udata;
1601 	uma_keg_t keg = mem;
1602 	uma_zone_t zone;
1603 
1604 	bzero(keg, size);
1605 	keg->uk_size = arg->size;
1606 	keg->uk_init = arg->uminit;
1607 	keg->uk_fini = arg->fini;
1608 	keg->uk_align = arg->align;
1609 	keg->uk_free = 0;
1610 	keg->uk_reserve = 0;
1611 	keg->uk_pages = 0;
1612 	keg->uk_flags = arg->flags;
1613 	keg->uk_slabzone = NULL;
1614 
1615 	/*
1616 	 * We use a global round-robin policy by default.  Zones with
1617 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1618 	 * iterator is never run.
1619 	 */
1620 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1621 	keg->uk_dr.dr_iter = 0;
1622 
1623 	/*
1624 	 * The master zone is passed to us at keg-creation time.
1625 	 */
1626 	zone = arg->zone;
1627 	keg->uk_name = zone->uz_name;
1628 
1629 	if (arg->flags & UMA_ZONE_VM)
1630 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1631 
1632 	if (arg->flags & UMA_ZONE_ZINIT)
1633 		keg->uk_init = zero_init;
1634 
1635 	if (arg->flags & UMA_ZONE_MALLOC)
1636 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1637 
1638 	if (arg->flags & UMA_ZONE_PCPU)
1639 #ifdef SMP
1640 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1641 #else
1642 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1643 #endif
1644 
1645 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1646 		keg_cachespread_init(keg);
1647 	} else {
1648 		if (keg->uk_size > UMA_SLAB_SPACE)
1649 			keg_large_init(keg);
1650 		else
1651 			keg_small_init(keg);
1652 	}
1653 
1654 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1655 		keg->uk_slabzone = slabzone;
1656 
1657 	/*
1658 	 * If we haven't booted yet we need allocations to go through the
1659 	 * startup cache until the vm is ready.
1660 	 */
1661 	if (booted < BOOT_PAGEALLOC)
1662 		keg->uk_allocf = startup_alloc;
1663 #ifdef UMA_MD_SMALL_ALLOC
1664 	else if (keg->uk_ppera == 1)
1665 		keg->uk_allocf = uma_small_alloc;
1666 #endif
1667 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1668 		keg->uk_allocf = pcpu_page_alloc;
1669 	else
1670 		keg->uk_allocf = page_alloc;
1671 #ifdef UMA_MD_SMALL_ALLOC
1672 	if (keg->uk_ppera == 1)
1673 		keg->uk_freef = uma_small_free;
1674 	else
1675 #endif
1676 	if (keg->uk_flags & UMA_ZONE_PCPU)
1677 		keg->uk_freef = pcpu_page_free;
1678 	else
1679 		keg->uk_freef = page_free;
1680 
1681 	/*
1682 	 * Initialize keg's lock
1683 	 */
1684 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1685 
1686 	/*
1687 	 * If we're putting the slab header in the actual page we need to
1688 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1689 	 * macro definition.
1690 	 */
1691 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1692 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1693 		/*
1694 		 * The only way the following is possible is if with our
1695 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1696 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1697 		 * mathematically possible for all cases, so we make
1698 		 * sure here anyway.
1699 		 */
1700 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1701 		    PAGE_SIZE * keg->uk_ppera,
1702 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1703 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1704 	}
1705 
1706 	if (keg->uk_flags & UMA_ZONE_HASH)
1707 		hash_alloc(&keg->uk_hash);
1708 
1709 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1710 	    keg, zone->uz_name, zone,
1711 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1712 	    keg->uk_free);
1713 
1714 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1715 
1716 	rw_wlock(&uma_rwlock);
1717 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1718 	rw_wunlock(&uma_rwlock);
1719 	return (0);
1720 }
1721 
1722 static void
1723 zone_alloc_counters(uma_zone_t zone)
1724 {
1725 
1726 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1727 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1728 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1729 }
1730 
1731 /*
1732  * Zone header ctor.  This initializes all fields, locks, etc.
1733  *
1734  * Arguments/Returns follow uma_ctor specifications
1735  *	udata  Actually uma_zctor_args
1736  */
1737 static int
1738 zone_ctor(void *mem, int size, void *udata, int flags)
1739 {
1740 	struct uma_zctor_args *arg = udata;
1741 	uma_zone_t zone = mem;
1742 	uma_zone_t z;
1743 	uma_keg_t keg;
1744 
1745 	bzero(zone, size);
1746 	zone->uz_name = arg->name;
1747 	zone->uz_ctor = arg->ctor;
1748 	zone->uz_dtor = arg->dtor;
1749 	zone->uz_slab = zone_fetch_slab;
1750 	zone->uz_init = NULL;
1751 	zone->uz_fini = NULL;
1752 	zone->uz_sleeps = 0;
1753 	zone->uz_count = 0;
1754 	zone->uz_count_min = 0;
1755 	zone->uz_count_max = BUCKET_MAX;
1756 	zone->uz_flags = 0;
1757 	zone->uz_warning = NULL;
1758 	/* The domain structures follow the cpu structures. */
1759 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1760 	zone->uz_bkt_max = ULONG_MAX;
1761 	timevalclear(&zone->uz_ratecheck);
1762 
1763 	if (__predict_true(booted == BOOT_RUNNING))
1764 		zone_alloc_counters(zone);
1765 	else {
1766 		zone->uz_allocs = EARLY_COUNTER;
1767 		zone->uz_frees = EARLY_COUNTER;
1768 		zone->uz_fails = EARLY_COUNTER;
1769 	}
1770 
1771 	/*
1772 	 * This is a pure cache zone, no kegs.
1773 	 */
1774 	if (arg->import) {
1775 		if (arg->flags & UMA_ZONE_VM)
1776 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1777 		zone->uz_flags = arg->flags;
1778 		zone->uz_size = arg->size;
1779 		zone->uz_import = arg->import;
1780 		zone->uz_release = arg->release;
1781 		zone->uz_arg = arg->arg;
1782 		zone->uz_lockptr = &zone->uz_lock;
1783 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1784 		rw_wlock(&uma_rwlock);
1785 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1786 		rw_wunlock(&uma_rwlock);
1787 		goto out;
1788 	}
1789 
1790 	/*
1791 	 * Use the regular zone/keg/slab allocator.
1792 	 */
1793 	zone->uz_import = (uma_import)zone_import;
1794 	zone->uz_release = (uma_release)zone_release;
1795 	zone->uz_arg = zone;
1796 	keg = arg->keg;
1797 
1798 	if (arg->flags & UMA_ZONE_SECONDARY) {
1799 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1800 		zone->uz_init = arg->uminit;
1801 		zone->uz_fini = arg->fini;
1802 		zone->uz_lockptr = &keg->uk_lock;
1803 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1804 		rw_wlock(&uma_rwlock);
1805 		ZONE_LOCK(zone);
1806 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1807 			if (LIST_NEXT(z, uz_link) == NULL) {
1808 				LIST_INSERT_AFTER(z, zone, uz_link);
1809 				break;
1810 			}
1811 		}
1812 		ZONE_UNLOCK(zone);
1813 		rw_wunlock(&uma_rwlock);
1814 	} else if (keg == NULL) {
1815 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1816 		    arg->align, arg->flags)) == NULL)
1817 			return (ENOMEM);
1818 	} else {
1819 		struct uma_kctor_args karg;
1820 		int error;
1821 
1822 		/* We should only be here from uma_startup() */
1823 		karg.size = arg->size;
1824 		karg.uminit = arg->uminit;
1825 		karg.fini = arg->fini;
1826 		karg.align = arg->align;
1827 		karg.flags = arg->flags;
1828 		karg.zone = zone;
1829 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1830 		    flags);
1831 		if (error)
1832 			return (error);
1833 	}
1834 
1835 	zone->uz_keg = keg;
1836 	zone->uz_size = keg->uk_size;
1837 	zone->uz_flags |= (keg->uk_flags &
1838 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1839 
1840 	/*
1841 	 * Some internal zones don't have room allocated for the per cpu
1842 	 * caches.  If we're internal, bail out here.
1843 	 */
1844 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1845 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1846 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1847 		return (0);
1848 	}
1849 
1850 out:
1851 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1852 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1853 	    ("Invalid zone flag combination"));
1854 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
1855 		zone->uz_count = BUCKET_MAX;
1856 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1857 		zone->uz_count = 0;
1858 	else
1859 		zone->uz_count = bucket_select(zone->uz_size);
1860 	zone->uz_count_min = zone->uz_count;
1861 
1862 	return (0);
1863 }
1864 
1865 /*
1866  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1867  * table and removes the keg from the global list.
1868  *
1869  * Arguments/Returns follow uma_dtor specifications
1870  *	udata  unused
1871  */
1872 static void
1873 keg_dtor(void *arg, int size, void *udata)
1874 {
1875 	uma_keg_t keg;
1876 
1877 	keg = (uma_keg_t)arg;
1878 	KEG_LOCK(keg);
1879 	if (keg->uk_free != 0) {
1880 		printf("Freed UMA keg (%s) was not empty (%d items). "
1881 		    " Lost %d pages of memory.\n",
1882 		    keg->uk_name ? keg->uk_name : "",
1883 		    keg->uk_free, keg->uk_pages);
1884 	}
1885 	KEG_UNLOCK(keg);
1886 
1887 	hash_free(&keg->uk_hash);
1888 
1889 	KEG_LOCK_FINI(keg);
1890 }
1891 
1892 /*
1893  * Zone header dtor.
1894  *
1895  * Arguments/Returns follow uma_dtor specifications
1896  *	udata  unused
1897  */
1898 static void
1899 zone_dtor(void *arg, int size, void *udata)
1900 {
1901 	uma_zone_t zone;
1902 	uma_keg_t keg;
1903 
1904 	zone = (uma_zone_t)arg;
1905 
1906 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1907 		cache_drain(zone);
1908 
1909 	rw_wlock(&uma_rwlock);
1910 	LIST_REMOVE(zone, uz_link);
1911 	rw_wunlock(&uma_rwlock);
1912 	/*
1913 	 * XXX there are some races here where
1914 	 * the zone can be drained but zone lock
1915 	 * released and then refilled before we
1916 	 * remove it... we dont care for now
1917 	 */
1918 	zone_drain_wait(zone, M_WAITOK);
1919 	/*
1920 	 * We only destroy kegs from non secondary zones.
1921 	 */
1922 	if ((keg = zone->uz_keg) != NULL &&
1923 	    (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1924 		rw_wlock(&uma_rwlock);
1925 		LIST_REMOVE(keg, uk_link);
1926 		rw_wunlock(&uma_rwlock);
1927 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1928 	}
1929 	counter_u64_free(zone->uz_allocs);
1930 	counter_u64_free(zone->uz_frees);
1931 	counter_u64_free(zone->uz_fails);
1932 	if (zone->uz_lockptr == &zone->uz_lock)
1933 		ZONE_LOCK_FINI(zone);
1934 }
1935 
1936 /*
1937  * Traverses every zone in the system and calls a callback
1938  *
1939  * Arguments:
1940  *	zfunc  A pointer to a function which accepts a zone
1941  *		as an argument.
1942  *
1943  * Returns:
1944  *	Nothing
1945  */
1946 static void
1947 zone_foreach(void (*zfunc)(uma_zone_t))
1948 {
1949 	uma_keg_t keg;
1950 	uma_zone_t zone;
1951 
1952 	/*
1953 	 * Before BOOT_RUNNING we are guaranteed to be single
1954 	 * threaded, so locking isn't needed. Startup functions
1955 	 * are allowed to use M_WAITOK.
1956 	 */
1957 	if (__predict_true(booted == BOOT_RUNNING))
1958 		rw_rlock(&uma_rwlock);
1959 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1960 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1961 			zfunc(zone);
1962 	}
1963 	if (__predict_true(booted == BOOT_RUNNING))
1964 		rw_runlock(&uma_rwlock);
1965 }
1966 
1967 /*
1968  * Count how many pages do we need to bootstrap.  VM supplies
1969  * its need in early zones in the argument, we add up our zones,
1970  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1971  * zone of zones and zone of kegs are accounted separately.
1972  */
1973 #define	UMA_BOOT_ZONES	11
1974 /* Zone of zones and zone of kegs have arbitrary alignment. */
1975 #define	UMA_BOOT_ALIGN	32
1976 static int zsize, ksize;
1977 int
1978 uma_startup_count(int vm_zones)
1979 {
1980 	int zones, pages;
1981 
1982 	ksize = sizeof(struct uma_keg) +
1983 	    (sizeof(struct uma_domain) * vm_ndomains);
1984 	zsize = sizeof(struct uma_zone) +
1985 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1986 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1987 
1988 	/*
1989 	 * Memory for the zone of kegs and its keg,
1990 	 * and for zone of zones.
1991 	 */
1992 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
1993 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
1994 
1995 #ifdef	UMA_MD_SMALL_ALLOC
1996 	zones = UMA_BOOT_ZONES;
1997 #else
1998 	zones = UMA_BOOT_ZONES + vm_zones;
1999 	vm_zones = 0;
2000 #endif
2001 
2002 	/* Memory for the rest of startup zones, UMA and VM, ... */
2003 	if (zsize > UMA_SLAB_SPACE) {
2004 		/* See keg_large_init(). */
2005 		u_int ppera;
2006 
2007 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2008 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2009 		    SIZEOF_UMA_SLAB)
2010 			ppera++;
2011 		pages += (zones + vm_zones) * ppera;
2012 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2013 		/* See keg_small_init() special case for uk_ppera = 1. */
2014 		pages += zones;
2015 	else
2016 		pages += howmany(zones,
2017 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2018 
2019 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2020 	pages += howmany(zones + 1,
2021 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2022 
2023 	/*
2024 	 * Most of startup zones are not going to be offpages, that's
2025 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2026 	 * calculations.  Some large bucket zones will be offpage, and
2027 	 * thus will allocate hashes.  We take conservative approach
2028 	 * and assume that all zones may allocate hash.  This may give
2029 	 * us some positive inaccuracy, usually an extra single page.
2030 	 */
2031 	pages += howmany(zones, UMA_SLAB_SPACE /
2032 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2033 
2034 	return (pages);
2035 }
2036 
2037 void
2038 uma_startup(void *mem, int npages)
2039 {
2040 	struct uma_zctor_args args;
2041 	uma_keg_t masterkeg;
2042 	uintptr_t m;
2043 
2044 #ifdef DIAGNOSTIC
2045 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2046 #endif
2047 
2048 	rw_init(&uma_rwlock, "UMA lock");
2049 
2050 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2051 	m = (uintptr_t)mem;
2052 	zones = (uma_zone_t)m;
2053 	m += roundup(zsize, CACHE_LINE_SIZE);
2054 	kegs = (uma_zone_t)m;
2055 	m += roundup(zsize, CACHE_LINE_SIZE);
2056 	masterkeg = (uma_keg_t)m;
2057 	m += roundup(ksize, CACHE_LINE_SIZE);
2058 	m = roundup(m, PAGE_SIZE);
2059 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2060 	mem = (void *)m;
2061 
2062 	/* "manually" create the initial zone */
2063 	memset(&args, 0, sizeof(args));
2064 	args.name = "UMA Kegs";
2065 	args.size = ksize;
2066 	args.ctor = keg_ctor;
2067 	args.dtor = keg_dtor;
2068 	args.uminit = zero_init;
2069 	args.fini = NULL;
2070 	args.keg = masterkeg;
2071 	args.align = UMA_BOOT_ALIGN - 1;
2072 	args.flags = UMA_ZFLAG_INTERNAL;
2073 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2074 
2075 	bootmem = mem;
2076 	boot_pages = npages;
2077 
2078 	args.name = "UMA Zones";
2079 	args.size = zsize;
2080 	args.ctor = zone_ctor;
2081 	args.dtor = zone_dtor;
2082 	args.uminit = zero_init;
2083 	args.fini = NULL;
2084 	args.keg = NULL;
2085 	args.align = UMA_BOOT_ALIGN - 1;
2086 	args.flags = UMA_ZFLAG_INTERNAL;
2087 	zone_ctor(zones, zsize, &args, M_WAITOK);
2088 
2089 	/* Now make a zone for slab headers */
2090 	slabzone = uma_zcreate("UMA Slabs",
2091 				sizeof(struct uma_slab),
2092 				NULL, NULL, NULL, NULL,
2093 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2094 
2095 	hashzone = uma_zcreate("UMA Hash",
2096 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2097 	    NULL, NULL, NULL, NULL,
2098 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2099 
2100 	bucket_init();
2101 
2102 	booted = BOOT_STRAPPED;
2103 }
2104 
2105 void
2106 uma_startup1(void)
2107 {
2108 
2109 #ifdef DIAGNOSTIC
2110 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2111 #endif
2112 	booted = BOOT_PAGEALLOC;
2113 }
2114 
2115 void
2116 uma_startup2(void)
2117 {
2118 
2119 #ifdef DIAGNOSTIC
2120 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2121 #endif
2122 	booted = BOOT_BUCKETS;
2123 	sx_init(&uma_drain_lock, "umadrain");
2124 	bucket_enable();
2125 }
2126 
2127 /*
2128  * Initialize our callout handle
2129  *
2130  */
2131 static void
2132 uma_startup3(void)
2133 {
2134 
2135 #ifdef INVARIANTS
2136 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2137 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2138 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2139 #endif
2140 	zone_foreach(zone_alloc_counters);
2141 	callout_init(&uma_callout, 1);
2142 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2143 	booted = BOOT_RUNNING;
2144 }
2145 
2146 static uma_keg_t
2147 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2148 		int align, uint32_t flags)
2149 {
2150 	struct uma_kctor_args args;
2151 
2152 	args.size = size;
2153 	args.uminit = uminit;
2154 	args.fini = fini;
2155 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2156 	args.flags = flags;
2157 	args.zone = zone;
2158 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2159 }
2160 
2161 /* Public functions */
2162 /* See uma.h */
2163 void
2164 uma_set_align(int align)
2165 {
2166 
2167 	if (align != UMA_ALIGN_CACHE)
2168 		uma_align_cache = align;
2169 }
2170 
2171 /* See uma.h */
2172 uma_zone_t
2173 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2174 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2175 
2176 {
2177 	struct uma_zctor_args args;
2178 	uma_zone_t res;
2179 	bool locked;
2180 
2181 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2182 	    align, name));
2183 
2184 	/* This stuff is essential for the zone ctor */
2185 	memset(&args, 0, sizeof(args));
2186 	args.name = name;
2187 	args.size = size;
2188 	args.ctor = ctor;
2189 	args.dtor = dtor;
2190 	args.uminit = uminit;
2191 	args.fini = fini;
2192 #ifdef  INVARIANTS
2193 	/*
2194 	 * If a zone is being created with an empty constructor and
2195 	 * destructor, pass UMA constructor/destructor which checks for
2196 	 * memory use after free.
2197 	 */
2198 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2199 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2200 		args.ctor = trash_ctor;
2201 		args.dtor = trash_dtor;
2202 		args.uminit = trash_init;
2203 		args.fini = trash_fini;
2204 	}
2205 #endif
2206 	args.align = align;
2207 	args.flags = flags;
2208 	args.keg = NULL;
2209 
2210 	if (booted < BOOT_BUCKETS) {
2211 		locked = false;
2212 	} else {
2213 		sx_slock(&uma_drain_lock);
2214 		locked = true;
2215 	}
2216 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2217 	if (locked)
2218 		sx_sunlock(&uma_drain_lock);
2219 	return (res);
2220 }
2221 
2222 /* See uma.h */
2223 uma_zone_t
2224 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2225 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2226 {
2227 	struct uma_zctor_args args;
2228 	uma_keg_t keg;
2229 	uma_zone_t res;
2230 	bool locked;
2231 
2232 	keg = master->uz_keg;
2233 	memset(&args, 0, sizeof(args));
2234 	args.name = name;
2235 	args.size = keg->uk_size;
2236 	args.ctor = ctor;
2237 	args.dtor = dtor;
2238 	args.uminit = zinit;
2239 	args.fini = zfini;
2240 	args.align = keg->uk_align;
2241 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2242 	args.keg = keg;
2243 
2244 	if (booted < BOOT_BUCKETS) {
2245 		locked = false;
2246 	} else {
2247 		sx_slock(&uma_drain_lock);
2248 		locked = true;
2249 	}
2250 	/* XXX Attaches only one keg of potentially many. */
2251 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2252 	if (locked)
2253 		sx_sunlock(&uma_drain_lock);
2254 	return (res);
2255 }
2256 
2257 /* See uma.h */
2258 uma_zone_t
2259 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2260 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2261 		    uma_release zrelease, void *arg, int flags)
2262 {
2263 	struct uma_zctor_args args;
2264 
2265 	memset(&args, 0, sizeof(args));
2266 	args.name = name;
2267 	args.size = size;
2268 	args.ctor = ctor;
2269 	args.dtor = dtor;
2270 	args.uminit = zinit;
2271 	args.fini = zfini;
2272 	args.import = zimport;
2273 	args.release = zrelease;
2274 	args.arg = arg;
2275 	args.align = 0;
2276 	args.flags = flags | UMA_ZFLAG_CACHE;
2277 
2278 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2279 }
2280 
2281 /* See uma.h */
2282 void
2283 uma_zdestroy(uma_zone_t zone)
2284 {
2285 
2286 	sx_slock(&uma_drain_lock);
2287 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2288 	sx_sunlock(&uma_drain_lock);
2289 }
2290 
2291 void
2292 uma_zwait(uma_zone_t zone)
2293 {
2294 	void *item;
2295 
2296 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2297 	uma_zfree(zone, item);
2298 }
2299 
2300 void *
2301 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2302 {
2303 	void *item;
2304 #ifdef SMP
2305 	int i;
2306 
2307 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2308 #endif
2309 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2310 	if (item != NULL && (flags & M_ZERO)) {
2311 #ifdef SMP
2312 		for (i = 0; i <= mp_maxid; i++)
2313 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2314 #else
2315 		bzero(item, zone->uz_size);
2316 #endif
2317 	}
2318 	return (item);
2319 }
2320 
2321 /*
2322  * A stub while both regular and pcpu cases are identical.
2323  */
2324 void
2325 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2326 {
2327 
2328 #ifdef SMP
2329 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2330 #endif
2331 	uma_zfree_arg(zone, item, udata);
2332 }
2333 
2334 /* See uma.h */
2335 void *
2336 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2337 {
2338 	uma_zone_domain_t zdom;
2339 	uma_bucket_t bucket;
2340 	uma_cache_t cache;
2341 	void *item;
2342 	int cpu, domain, lockfail, maxbucket;
2343 #ifdef INVARIANTS
2344 	bool skipdbg;
2345 #endif
2346 
2347 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2348 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2349 
2350 	/* This is the fast path allocation */
2351 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2352 	    curthread, zone->uz_name, zone, flags);
2353 
2354 	if (flags & M_WAITOK) {
2355 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2356 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2357 	}
2358 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2359 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2360 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2361 	if (zone->uz_flags & UMA_ZONE_PCPU)
2362 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2363 		    "with M_ZERO passed"));
2364 
2365 #ifdef DEBUG_MEMGUARD
2366 	if (memguard_cmp_zone(zone)) {
2367 		item = memguard_alloc(zone->uz_size, flags);
2368 		if (item != NULL) {
2369 			if (zone->uz_init != NULL &&
2370 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2371 				return (NULL);
2372 			if (zone->uz_ctor != NULL &&
2373 			    zone->uz_ctor(item, zone->uz_size, udata,
2374 			    flags) != 0) {
2375 			    	zone->uz_fini(item, zone->uz_size);
2376 				return (NULL);
2377 			}
2378 			return (item);
2379 		}
2380 		/* This is unfortunate but should not be fatal. */
2381 	}
2382 #endif
2383 	/*
2384 	 * If possible, allocate from the per-CPU cache.  There are two
2385 	 * requirements for safe access to the per-CPU cache: (1) the thread
2386 	 * accessing the cache must not be preempted or yield during access,
2387 	 * and (2) the thread must not migrate CPUs without switching which
2388 	 * cache it accesses.  We rely on a critical section to prevent
2389 	 * preemption and migration.  We release the critical section in
2390 	 * order to acquire the zone mutex if we are unable to allocate from
2391 	 * the current cache; when we re-acquire the critical section, we
2392 	 * must detect and handle migration if it has occurred.
2393 	 */
2394 zalloc_restart:
2395 	critical_enter();
2396 	cpu = curcpu;
2397 	cache = &zone->uz_cpu[cpu];
2398 
2399 zalloc_start:
2400 	bucket = cache->uc_allocbucket;
2401 	if (bucket != NULL && bucket->ub_cnt > 0) {
2402 		bucket->ub_cnt--;
2403 		item = bucket->ub_bucket[bucket->ub_cnt];
2404 #ifdef INVARIANTS
2405 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2406 #endif
2407 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2408 		cache->uc_allocs++;
2409 		critical_exit();
2410 #ifdef INVARIANTS
2411 		skipdbg = uma_dbg_zskip(zone, item);
2412 #endif
2413 		if (zone->uz_ctor != NULL &&
2414 #ifdef INVARIANTS
2415 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2416 		    zone->uz_dtor != trash_dtor) &&
2417 #endif
2418 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2419 			counter_u64_add(zone->uz_fails, 1);
2420 			zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2421 			return (NULL);
2422 		}
2423 #ifdef INVARIANTS
2424 		if (!skipdbg)
2425 			uma_dbg_alloc(zone, NULL, item);
2426 #endif
2427 		if (flags & M_ZERO)
2428 			uma_zero_item(item, zone);
2429 		return (item);
2430 	}
2431 
2432 	/*
2433 	 * We have run out of items in our alloc bucket.
2434 	 * See if we can switch with our free bucket.
2435 	 */
2436 	bucket = cache->uc_freebucket;
2437 	if (bucket != NULL && bucket->ub_cnt > 0) {
2438 		CTR2(KTR_UMA,
2439 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2440 		    zone->uz_name, zone);
2441 		cache->uc_freebucket = cache->uc_allocbucket;
2442 		cache->uc_allocbucket = bucket;
2443 		goto zalloc_start;
2444 	}
2445 
2446 	/*
2447 	 * Discard any empty allocation bucket while we hold no locks.
2448 	 */
2449 	bucket = cache->uc_allocbucket;
2450 	cache->uc_allocbucket = NULL;
2451 	critical_exit();
2452 	if (bucket != NULL)
2453 		bucket_free(zone, bucket, udata);
2454 
2455 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2456 		domain = PCPU_GET(domain);
2457 		if (VM_DOMAIN_EMPTY(domain))
2458 			domain = UMA_ANYDOMAIN;
2459 	} else
2460 		domain = UMA_ANYDOMAIN;
2461 
2462 	/* Short-circuit for zones without buckets and low memory. */
2463 	if (zone->uz_count == 0 || bucketdisable) {
2464 		ZONE_LOCK(zone);
2465 		goto zalloc_item;
2466 	}
2467 
2468 	/*
2469 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2470 	 * we must go back to the zone.  This requires the zone lock, so we
2471 	 * must drop the critical section, then re-acquire it when we go back
2472 	 * to the cache.  Since the critical section is released, we may be
2473 	 * preempted or migrate.  As such, make sure not to maintain any
2474 	 * thread-local state specific to the cache from prior to releasing
2475 	 * the critical section.
2476 	 */
2477 	lockfail = 0;
2478 	if (ZONE_TRYLOCK(zone) == 0) {
2479 		/* Record contention to size the buckets. */
2480 		ZONE_LOCK(zone);
2481 		lockfail = 1;
2482 	}
2483 	critical_enter();
2484 	cpu = curcpu;
2485 	cache = &zone->uz_cpu[cpu];
2486 
2487 	/* See if we lost the race to fill the cache. */
2488 	if (cache->uc_allocbucket != NULL) {
2489 		ZONE_UNLOCK(zone);
2490 		goto zalloc_start;
2491 	}
2492 
2493 	/*
2494 	 * Check the zone's cache of buckets.
2495 	 */
2496 	if (domain == UMA_ANYDOMAIN)
2497 		zdom = &zone->uz_domain[0];
2498 	else
2499 		zdom = &zone->uz_domain[domain];
2500 	if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) {
2501 		KASSERT(bucket->ub_cnt != 0,
2502 		    ("uma_zalloc_arg: Returning an empty bucket."));
2503 		cache->uc_allocbucket = bucket;
2504 		ZONE_UNLOCK(zone);
2505 		goto zalloc_start;
2506 	}
2507 	/* We are no longer associated with this CPU. */
2508 	critical_exit();
2509 
2510 	/*
2511 	 * We bump the uz count when the cache size is insufficient to
2512 	 * handle the working set.
2513 	 */
2514 	if (lockfail && zone->uz_count < zone->uz_count_max)
2515 		zone->uz_count++;
2516 
2517 	if (zone->uz_max_items > 0) {
2518 		if (zone->uz_items >= zone->uz_max_items)
2519 			goto zalloc_item;
2520 		maxbucket = MIN(zone->uz_count,
2521 		    zone->uz_max_items - zone->uz_items);
2522 		zone->uz_items += maxbucket;
2523 	} else
2524 		maxbucket = zone->uz_count;
2525 	ZONE_UNLOCK(zone);
2526 
2527 	/*
2528 	 * Now lets just fill a bucket and put it on the free list.  If that
2529 	 * works we'll restart the allocation from the beginning and it
2530 	 * will use the just filled bucket.
2531 	 */
2532 	bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket);
2533 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2534 	    zone->uz_name, zone, bucket);
2535 	ZONE_LOCK(zone);
2536 	if (bucket != NULL) {
2537 		if (zone->uz_max_items > 0 && bucket->ub_cnt < maxbucket) {
2538 			MPASS(zone->uz_items >= maxbucket - bucket->ub_cnt);
2539 			zone->uz_items -= maxbucket - bucket->ub_cnt;
2540 			if (zone->uz_sleepers > 0 &&
2541 			    zone->uz_items < zone->uz_max_items)
2542 				wakeup_one(zone);
2543 		}
2544 		critical_enter();
2545 		cpu = curcpu;
2546 		cache = &zone->uz_cpu[cpu];
2547 
2548 		/*
2549 		 * See if we lost the race or were migrated.  Cache the
2550 		 * initialized bucket to make this less likely or claim
2551 		 * the memory directly.
2552 		 */
2553 		if (cache->uc_allocbucket == NULL &&
2554 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2555 		    domain == PCPU_GET(domain))) {
2556 			cache->uc_allocbucket = bucket;
2557 			zdom->uzd_imax += bucket->ub_cnt;
2558 		} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2559 			critical_exit();
2560 			ZONE_UNLOCK(zone);
2561 			bucket_drain(zone, bucket);
2562 			bucket_free(zone, bucket, udata);
2563 			goto zalloc_restart;
2564 		} else
2565 			zone_put_bucket(zone, zdom, bucket, false);
2566 		ZONE_UNLOCK(zone);
2567 		goto zalloc_start;
2568 	} else if (zone->uz_max_items > 0) {
2569 		zone->uz_items -= maxbucket;
2570 		if (zone->uz_sleepers > 0 &&
2571 		    zone->uz_items + 1 < zone->uz_max_items)
2572 			wakeup_one(zone);
2573 	}
2574 
2575 	/*
2576 	 * We may not be able to get a bucket so return an actual item.
2577 	 */
2578 zalloc_item:
2579 	item = zone_alloc_item_locked(zone, udata, domain, flags);
2580 
2581 	return (item);
2582 }
2583 
2584 void *
2585 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2586 {
2587 
2588 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2589 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2590 
2591 	/* This is the fast path allocation */
2592 	CTR5(KTR_UMA,
2593 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2594 	    curthread, zone->uz_name, zone, domain, flags);
2595 
2596 	if (flags & M_WAITOK) {
2597 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2598 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2599 	}
2600 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2601 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2602 
2603 	return (zone_alloc_item(zone, udata, domain, flags));
2604 }
2605 
2606 /*
2607  * Find a slab with some space.  Prefer slabs that are partially used over those
2608  * that are totally full.  This helps to reduce fragmentation.
2609  *
2610  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2611  * only 'domain'.
2612  */
2613 static uma_slab_t
2614 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2615 {
2616 	uma_domain_t dom;
2617 	uma_slab_t slab;
2618 	int start;
2619 
2620 	KASSERT(domain >= 0 && domain < vm_ndomains,
2621 	    ("keg_first_slab: domain %d out of range", domain));
2622 	KEG_LOCK_ASSERT(keg);
2623 
2624 	slab = NULL;
2625 	start = domain;
2626 	do {
2627 		dom = &keg->uk_domain[domain];
2628 		if (!LIST_EMPTY(&dom->ud_part_slab))
2629 			return (LIST_FIRST(&dom->ud_part_slab));
2630 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2631 			slab = LIST_FIRST(&dom->ud_free_slab);
2632 			LIST_REMOVE(slab, us_link);
2633 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2634 			return (slab);
2635 		}
2636 		if (rr)
2637 			domain = (domain + 1) % vm_ndomains;
2638 	} while (domain != start);
2639 
2640 	return (NULL);
2641 }
2642 
2643 static uma_slab_t
2644 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2645 {
2646 	uint32_t reserve;
2647 
2648 	KEG_LOCK_ASSERT(keg);
2649 
2650 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2651 	if (keg->uk_free <= reserve)
2652 		return (NULL);
2653 	return (keg_first_slab(keg, domain, rr));
2654 }
2655 
2656 static uma_slab_t
2657 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2658 {
2659 	struct vm_domainset_iter di;
2660 	uma_domain_t dom;
2661 	uma_slab_t slab;
2662 	int aflags, domain;
2663 	bool rr;
2664 
2665 restart:
2666 	KEG_LOCK_ASSERT(keg);
2667 
2668 	/*
2669 	 * Use the keg's policy if upper layers haven't already specified a
2670 	 * domain (as happens with first-touch zones).
2671 	 *
2672 	 * To avoid races we run the iterator with the keg lock held, but that
2673 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2674 	 * clear M_WAITOK and handle low memory conditions locally.
2675 	 */
2676 	rr = rdomain == UMA_ANYDOMAIN;
2677 	if (rr) {
2678 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2679 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2680 		    &aflags);
2681 	} else {
2682 		aflags = flags;
2683 		domain = rdomain;
2684 	}
2685 
2686 	for (;;) {
2687 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2688 		if (slab != NULL) {
2689 			MPASS(slab->us_keg == keg);
2690 			return (slab);
2691 		}
2692 
2693 		/*
2694 		 * M_NOVM means don't ask at all!
2695 		 */
2696 		if (flags & M_NOVM)
2697 			break;
2698 
2699 		KASSERT(zone->uz_max_items == 0 ||
2700 		    zone->uz_items <= zone->uz_max_items,
2701 		    ("%s: zone %p overflow", __func__, zone));
2702 
2703 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
2704 		/*
2705 		 * If we got a slab here it's safe to mark it partially used
2706 		 * and return.  We assume that the caller is going to remove
2707 		 * at least one item.
2708 		 */
2709 		if (slab) {
2710 			MPASS(slab->us_keg == keg);
2711 			dom = &keg->uk_domain[slab->us_domain];
2712 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2713 			return (slab);
2714 		}
2715 		KEG_LOCK(keg);
2716 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2717 			if ((flags & M_WAITOK) != 0) {
2718 				KEG_UNLOCK(keg);
2719 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2720 				KEG_LOCK(keg);
2721 				goto restart;
2722 			}
2723 			break;
2724 		}
2725 	}
2726 
2727 	/*
2728 	 * We might not have been able to get a slab but another cpu
2729 	 * could have while we were unlocked.  Check again before we
2730 	 * fail.
2731 	 */
2732 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2733 		MPASS(slab->us_keg == keg);
2734 		return (slab);
2735 	}
2736 	return (NULL);
2737 }
2738 
2739 static uma_slab_t
2740 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2741 {
2742 	uma_slab_t slab;
2743 
2744 	if (keg == NULL) {
2745 		keg = zone->uz_keg;
2746 		KEG_LOCK(keg);
2747 	}
2748 
2749 	for (;;) {
2750 		slab = keg_fetch_slab(keg, zone, domain, flags);
2751 		if (slab)
2752 			return (slab);
2753 		if (flags & (M_NOWAIT | M_NOVM))
2754 			break;
2755 	}
2756 	KEG_UNLOCK(keg);
2757 	return (NULL);
2758 }
2759 
2760 static void *
2761 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2762 {
2763 	uma_domain_t dom;
2764 	void *item;
2765 	uint8_t freei;
2766 
2767 	MPASS(keg == slab->us_keg);
2768 	KEG_LOCK_ASSERT(keg);
2769 
2770 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2771 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2772 	item = slab->us_data + (keg->uk_rsize * freei);
2773 	slab->us_freecount--;
2774 	keg->uk_free--;
2775 
2776 	/* Move this slab to the full list */
2777 	if (slab->us_freecount == 0) {
2778 		LIST_REMOVE(slab, us_link);
2779 		dom = &keg->uk_domain[slab->us_domain];
2780 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2781 	}
2782 
2783 	return (item);
2784 }
2785 
2786 static int
2787 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2788 {
2789 	uma_slab_t slab;
2790 	uma_keg_t keg;
2791 #ifdef NUMA
2792 	int stripe;
2793 #endif
2794 	int i;
2795 
2796 	slab = NULL;
2797 	keg = NULL;
2798 	/* Try to keep the buckets totally full */
2799 	for (i = 0; i < max; ) {
2800 		if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
2801 			break;
2802 		keg = slab->us_keg;
2803 #ifdef NUMA
2804 		stripe = howmany(max, vm_ndomains);
2805 #endif
2806 		while (slab->us_freecount && i < max) {
2807 			bucket[i++] = slab_alloc_item(keg, slab);
2808 			if (keg->uk_free <= keg->uk_reserve)
2809 				break;
2810 #ifdef NUMA
2811 			/*
2812 			 * If the zone is striped we pick a new slab for every
2813 			 * N allocations.  Eliminating this conditional will
2814 			 * instead pick a new domain for each bucket rather
2815 			 * than stripe within each bucket.  The current option
2816 			 * produces more fragmentation and requires more cpu
2817 			 * time but yields better distribution.
2818 			 */
2819 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2820 			    vm_ndomains > 1 && --stripe == 0)
2821 				break;
2822 #endif
2823 		}
2824 		/* Don't block if we allocated any successfully. */
2825 		flags &= ~M_WAITOK;
2826 		flags |= M_NOWAIT;
2827 	}
2828 	if (slab != NULL)
2829 		KEG_UNLOCK(keg);
2830 
2831 	return i;
2832 }
2833 
2834 static uma_bucket_t
2835 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max)
2836 {
2837 	uma_bucket_t bucket;
2838 
2839 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
2840 
2841 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2842 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2843 	if (bucket == NULL)
2844 		return (NULL);
2845 
2846 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2847 	    max, domain, flags);
2848 
2849 	/*
2850 	 * Initialize the memory if necessary.
2851 	 */
2852 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2853 		int i;
2854 
2855 		for (i = 0; i < bucket->ub_cnt; i++)
2856 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2857 			    flags) != 0)
2858 				break;
2859 		/*
2860 		 * If we couldn't initialize the whole bucket, put the
2861 		 * rest back onto the freelist.
2862 		 */
2863 		if (i != bucket->ub_cnt) {
2864 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2865 			    bucket->ub_cnt - i);
2866 #ifdef INVARIANTS
2867 			bzero(&bucket->ub_bucket[i],
2868 			    sizeof(void *) * (bucket->ub_cnt - i));
2869 #endif
2870 			bucket->ub_cnt = i;
2871 		}
2872 	}
2873 
2874 	if (bucket->ub_cnt == 0) {
2875 		bucket_free(zone, bucket, udata);
2876 		counter_u64_add(zone->uz_fails, 1);
2877 		return (NULL);
2878 	}
2879 
2880 	return (bucket);
2881 }
2882 
2883 /*
2884  * Allocates a single item from a zone.
2885  *
2886  * Arguments
2887  *	zone   The zone to alloc for.
2888  *	udata  The data to be passed to the constructor.
2889  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2890  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2891  *
2892  * Returns
2893  *	NULL if there is no memory and M_NOWAIT is set
2894  *	An item if successful
2895  */
2896 
2897 static void *
2898 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2899 {
2900 
2901 	ZONE_LOCK(zone);
2902 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2903 }
2904 
2905 /*
2906  * Returns with zone unlocked.
2907  */
2908 static void *
2909 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
2910 {
2911 	void *item;
2912 #ifdef INVARIANTS
2913 	bool skipdbg;
2914 #endif
2915 
2916 	ZONE_LOCK_ASSERT(zone);
2917 
2918 	if (zone->uz_max_items > 0) {
2919 		if (zone->uz_items >= zone->uz_max_items) {
2920 			zone_log_warning(zone);
2921 			zone_maxaction(zone);
2922 			if (flags & M_NOWAIT) {
2923 				ZONE_UNLOCK(zone);
2924 				return (NULL);
2925 			}
2926 			zone->uz_sleeps++;
2927 			zone->uz_sleepers++;
2928 			while (zone->uz_items >= zone->uz_max_items)
2929 				mtx_sleep(zone, zone->uz_lockptr, PVM,
2930 				    "zonelimit", 0);
2931 			zone->uz_sleepers--;
2932 			if (zone->uz_sleepers > 0 &&
2933 			    zone->uz_items + 1 < zone->uz_max_items)
2934 				wakeup_one(zone);
2935 		}
2936 		zone->uz_items++;
2937 	}
2938 	ZONE_UNLOCK(zone);
2939 
2940 	if (domain != UMA_ANYDOMAIN) {
2941 		/* avoid allocs targeting empty domains */
2942 		if (VM_DOMAIN_EMPTY(domain))
2943 			domain = UMA_ANYDOMAIN;
2944 	}
2945 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
2946 		goto fail;
2947 
2948 #ifdef INVARIANTS
2949 	skipdbg = uma_dbg_zskip(zone, item);
2950 #endif
2951 	/*
2952 	 * We have to call both the zone's init (not the keg's init)
2953 	 * and the zone's ctor.  This is because the item is going from
2954 	 * a keg slab directly to the user, and the user is expecting it
2955 	 * to be both zone-init'd as well as zone-ctor'd.
2956 	 */
2957 	if (zone->uz_init != NULL) {
2958 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2959 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
2960 			goto fail;
2961 		}
2962 	}
2963 	if (zone->uz_ctor != NULL &&
2964 #ifdef INVARIANTS
2965 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
2966 	    zone->uz_dtor != trash_dtor) &&
2967 #endif
2968 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2969 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2970 		goto fail;
2971 	}
2972 #ifdef INVARIANTS
2973 	if (!skipdbg)
2974 		uma_dbg_alloc(zone, NULL, item);
2975 #endif
2976 	if (flags & M_ZERO)
2977 		uma_zero_item(item, zone);
2978 
2979 	counter_u64_add(zone->uz_allocs, 1);
2980 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
2981 	    zone->uz_name, zone);
2982 
2983 	return (item);
2984 
2985 fail:
2986 	if (zone->uz_max_items > 0) {
2987 		ZONE_LOCK(zone);
2988 		zone->uz_items--;
2989 		ZONE_UNLOCK(zone);
2990 	}
2991 	counter_u64_add(zone->uz_fails, 1);
2992 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
2993 	    zone->uz_name, zone);
2994 	return (NULL);
2995 }
2996 
2997 /* See uma.h */
2998 void
2999 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3000 {
3001 	uma_cache_t cache;
3002 	uma_bucket_t bucket;
3003 	uma_zone_domain_t zdom;
3004 	int cpu, domain;
3005 	bool lockfail;
3006 #ifdef INVARIANTS
3007 	bool skipdbg;
3008 #endif
3009 
3010 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3011 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3012 
3013 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3014 	    zone->uz_name);
3015 
3016 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3017 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3018 
3019         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3020         if (item == NULL)
3021                 return;
3022 #ifdef DEBUG_MEMGUARD
3023 	if (is_memguard_addr(item)) {
3024 		if (zone->uz_dtor != NULL)
3025 			zone->uz_dtor(item, zone->uz_size, udata);
3026 		if (zone->uz_fini != NULL)
3027 			zone->uz_fini(item, zone->uz_size);
3028 		memguard_free(item);
3029 		return;
3030 	}
3031 #endif
3032 #ifdef INVARIANTS
3033 	skipdbg = uma_dbg_zskip(zone, item);
3034 	if (skipdbg == false) {
3035 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3036 			uma_dbg_free(zone, udata, item);
3037 		else
3038 			uma_dbg_free(zone, NULL, item);
3039 	}
3040 	if (zone->uz_dtor != NULL && (!skipdbg ||
3041 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3042 #else
3043 	if (zone->uz_dtor != NULL)
3044 #endif
3045 		zone->uz_dtor(item, zone->uz_size, udata);
3046 
3047 	/*
3048 	 * The race here is acceptable.  If we miss it we'll just have to wait
3049 	 * a little longer for the limits to be reset.
3050 	 */
3051 	if (zone->uz_sleepers > 0)
3052 		goto zfree_item;
3053 
3054 	/*
3055 	 * If possible, free to the per-CPU cache.  There are two
3056 	 * requirements for safe access to the per-CPU cache: (1) the thread
3057 	 * accessing the cache must not be preempted or yield during access,
3058 	 * and (2) the thread must not migrate CPUs without switching which
3059 	 * cache it accesses.  We rely on a critical section to prevent
3060 	 * preemption and migration.  We release the critical section in
3061 	 * order to acquire the zone mutex if we are unable to free to the
3062 	 * current cache; when we re-acquire the critical section, we must
3063 	 * detect and handle migration if it has occurred.
3064 	 */
3065 zfree_restart:
3066 	critical_enter();
3067 	cpu = curcpu;
3068 	cache = &zone->uz_cpu[cpu];
3069 
3070 zfree_start:
3071 	/*
3072 	 * Try to free into the allocbucket first to give LIFO ordering
3073 	 * for cache-hot datastructures.  Spill over into the freebucket
3074 	 * if necessary.  Alloc will swap them if one runs dry.
3075 	 */
3076 	bucket = cache->uc_allocbucket;
3077 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3078 		bucket = cache->uc_freebucket;
3079 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3080 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3081 		    ("uma_zfree: Freeing to non free bucket index."));
3082 		bucket->ub_bucket[bucket->ub_cnt] = item;
3083 		bucket->ub_cnt++;
3084 		cache->uc_frees++;
3085 		critical_exit();
3086 		return;
3087 	}
3088 
3089 	/*
3090 	 * We must go back the zone, which requires acquiring the zone lock,
3091 	 * which in turn means we must release and re-acquire the critical
3092 	 * section.  Since the critical section is released, we may be
3093 	 * preempted or migrate.  As such, make sure not to maintain any
3094 	 * thread-local state specific to the cache from prior to releasing
3095 	 * the critical section.
3096 	 */
3097 	critical_exit();
3098 	if (zone->uz_count == 0 || bucketdisable)
3099 		goto zfree_item;
3100 
3101 	lockfail = false;
3102 	if (ZONE_TRYLOCK(zone) == 0) {
3103 		/* Record contention to size the buckets. */
3104 		ZONE_LOCK(zone);
3105 		lockfail = true;
3106 	}
3107 	critical_enter();
3108 	cpu = curcpu;
3109 	cache = &zone->uz_cpu[cpu];
3110 
3111 	bucket = cache->uc_freebucket;
3112 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3113 		ZONE_UNLOCK(zone);
3114 		goto zfree_start;
3115 	}
3116 	cache->uc_freebucket = NULL;
3117 	/* We are no longer associated with this CPU. */
3118 	critical_exit();
3119 
3120 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3121 		domain = PCPU_GET(domain);
3122 		if (VM_DOMAIN_EMPTY(domain))
3123 			domain = UMA_ANYDOMAIN;
3124 	} else
3125 		domain = 0;
3126 	zdom = &zone->uz_domain[0];
3127 
3128 	/* Can we throw this on the zone full list? */
3129 	if (bucket != NULL) {
3130 		CTR3(KTR_UMA,
3131 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3132 		    zone->uz_name, zone, bucket);
3133 		/* ub_cnt is pointing to the last free item */
3134 		KASSERT(bucket->ub_cnt == bucket->ub_entries,
3135 		    ("uma_zfree: Attempting to insert not full bucket onto the full list.\n"));
3136 		if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3137 			ZONE_UNLOCK(zone);
3138 			bucket_drain(zone, bucket);
3139 			bucket_free(zone, bucket, udata);
3140 			goto zfree_restart;
3141 		} else
3142 			zone_put_bucket(zone, zdom, bucket, true);
3143 	}
3144 
3145 	/*
3146 	 * We bump the uz count when the cache size is insufficient to
3147 	 * handle the working set.
3148 	 */
3149 	if (lockfail && zone->uz_count < zone->uz_count_max)
3150 		zone->uz_count++;
3151 	ZONE_UNLOCK(zone);
3152 
3153 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3154 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3155 	    zone->uz_name, zone, bucket);
3156 	if (bucket) {
3157 		critical_enter();
3158 		cpu = curcpu;
3159 		cache = &zone->uz_cpu[cpu];
3160 		if (cache->uc_freebucket == NULL &&
3161 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3162 		    domain == PCPU_GET(domain))) {
3163 			cache->uc_freebucket = bucket;
3164 			goto zfree_start;
3165 		}
3166 		/*
3167 		 * We lost the race, start over.  We have to drop our
3168 		 * critical section to free the bucket.
3169 		 */
3170 		critical_exit();
3171 		bucket_free(zone, bucket, udata);
3172 		goto zfree_restart;
3173 	}
3174 
3175 	/*
3176 	 * If nothing else caught this, we'll just do an internal free.
3177 	 */
3178 zfree_item:
3179 	zone_free_item(zone, item, udata, SKIP_DTOR);
3180 }
3181 
3182 void
3183 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3184 {
3185 
3186 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3187 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3188 
3189 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3190 	    zone->uz_name);
3191 
3192 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3193 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3194 
3195         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3196         if (item == NULL)
3197                 return;
3198 	zone_free_item(zone, item, udata, SKIP_NONE);
3199 }
3200 
3201 static void
3202 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3203 {
3204 	uma_keg_t keg;
3205 	uma_domain_t dom;
3206 	uint8_t freei;
3207 
3208 	keg = zone->uz_keg;
3209 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3210 	KEG_LOCK_ASSERT(keg);
3211 	MPASS(keg == slab->us_keg);
3212 
3213 	dom = &keg->uk_domain[slab->us_domain];
3214 
3215 	/* Do we need to remove from any lists? */
3216 	if (slab->us_freecount+1 == keg->uk_ipers) {
3217 		LIST_REMOVE(slab, us_link);
3218 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3219 	} else if (slab->us_freecount == 0) {
3220 		LIST_REMOVE(slab, us_link);
3221 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3222 	}
3223 
3224 	/* Slab management. */
3225 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3226 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3227 	slab->us_freecount++;
3228 
3229 	/* Keg statistics. */
3230 	keg->uk_free++;
3231 }
3232 
3233 static void
3234 zone_release(uma_zone_t zone, void **bucket, int cnt)
3235 {
3236 	void *item;
3237 	uma_slab_t slab;
3238 	uma_keg_t keg;
3239 	uint8_t *mem;
3240 	int i;
3241 
3242 	keg = zone->uz_keg;
3243 	KEG_LOCK(keg);
3244 	for (i = 0; i < cnt; i++) {
3245 		item = bucket[i];
3246 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3247 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3248 			if (zone->uz_flags & UMA_ZONE_HASH) {
3249 				slab = hash_sfind(&keg->uk_hash, mem);
3250 			} else {
3251 				mem += keg->uk_pgoff;
3252 				slab = (uma_slab_t)mem;
3253 			}
3254 		} else {
3255 			slab = vtoslab((vm_offset_t)item);
3256 			MPASS(slab->us_keg == keg);
3257 		}
3258 		slab_free_item(zone, slab, item);
3259 	}
3260 	KEG_UNLOCK(keg);
3261 }
3262 
3263 /*
3264  * Frees a single item to any zone.
3265  *
3266  * Arguments:
3267  *	zone   The zone to free to
3268  *	item   The item we're freeing
3269  *	udata  User supplied data for the dtor
3270  *	skip   Skip dtors and finis
3271  */
3272 static void
3273 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3274 {
3275 #ifdef INVARIANTS
3276 	bool skipdbg;
3277 
3278 	skipdbg = uma_dbg_zskip(zone, item);
3279 	if (skip == SKIP_NONE && !skipdbg) {
3280 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3281 			uma_dbg_free(zone, udata, item);
3282 		else
3283 			uma_dbg_free(zone, NULL, item);
3284 	}
3285 
3286 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3287 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3288 	    zone->uz_ctor != trash_ctor))
3289 #else
3290 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3291 #endif
3292 		zone->uz_dtor(item, zone->uz_size, udata);
3293 
3294 	if (skip < SKIP_FINI && zone->uz_fini)
3295 		zone->uz_fini(item, zone->uz_size);
3296 
3297 	zone->uz_release(zone->uz_arg, &item, 1);
3298 
3299 	if (skip & SKIP_CNT)
3300 		return;
3301 
3302 	counter_u64_add(zone->uz_frees, 1);
3303 
3304 	if (zone->uz_max_items > 0) {
3305 		ZONE_LOCK(zone);
3306 		zone->uz_items--;
3307 		if (zone->uz_sleepers > 0 &&
3308 		    zone->uz_items < zone->uz_max_items)
3309 			wakeup_one(zone);
3310 		ZONE_UNLOCK(zone);
3311 	}
3312 }
3313 
3314 /* See uma.h */
3315 int
3316 uma_zone_set_max(uma_zone_t zone, int nitems)
3317 {
3318 	struct uma_bucket_zone *ubz;
3319 
3320 	/*
3321 	 * If limit is very low we may need to limit how
3322 	 * much items are allowed in CPU caches.
3323 	 */
3324 	ubz = &bucket_zones[0];
3325 	for (; ubz->ubz_entries != 0; ubz++)
3326 		if (ubz->ubz_entries * 2 * mp_ncpus > nitems)
3327 			break;
3328 	if (ubz == &bucket_zones[0])
3329 		nitems = ubz->ubz_entries * 2 * mp_ncpus;
3330 	else
3331 		ubz--;
3332 
3333 	ZONE_LOCK(zone);
3334 	zone->uz_count_max = zone->uz_count = ubz->ubz_entries;
3335 	if (zone->uz_count_min > zone->uz_count_max)
3336 		zone->uz_count_min = zone->uz_count_max;
3337 	zone->uz_max_items = nitems;
3338 	ZONE_UNLOCK(zone);
3339 
3340 	return (nitems);
3341 }
3342 
3343 /* See uma.h */
3344 int
3345 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3346 {
3347 
3348 	ZONE_LOCK(zone);
3349 	zone->uz_bkt_max = nitems;
3350 	ZONE_UNLOCK(zone);
3351 
3352 	return (nitems);
3353 }
3354 
3355 /* See uma.h */
3356 int
3357 uma_zone_get_max(uma_zone_t zone)
3358 {
3359 	int nitems;
3360 
3361 	ZONE_LOCK(zone);
3362 	nitems = zone->uz_max_items;
3363 	ZONE_UNLOCK(zone);
3364 
3365 	return (nitems);
3366 }
3367 
3368 /* See uma.h */
3369 void
3370 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3371 {
3372 
3373 	ZONE_LOCK(zone);
3374 	zone->uz_warning = warning;
3375 	ZONE_UNLOCK(zone);
3376 }
3377 
3378 /* See uma.h */
3379 void
3380 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3381 {
3382 
3383 	ZONE_LOCK(zone);
3384 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3385 	ZONE_UNLOCK(zone);
3386 }
3387 
3388 /* See uma.h */
3389 int
3390 uma_zone_get_cur(uma_zone_t zone)
3391 {
3392 	int64_t nitems;
3393 	u_int i;
3394 
3395 	ZONE_LOCK(zone);
3396 	nitems = counter_u64_fetch(zone->uz_allocs) -
3397 	    counter_u64_fetch(zone->uz_frees);
3398 	CPU_FOREACH(i) {
3399 		/*
3400 		 * See the comment in sysctl_vm_zone_stats() regarding the
3401 		 * safety of accessing the per-cpu caches. With the zone lock
3402 		 * held, it is safe, but can potentially result in stale data.
3403 		 */
3404 		nitems += zone->uz_cpu[i].uc_allocs -
3405 		    zone->uz_cpu[i].uc_frees;
3406 	}
3407 	ZONE_UNLOCK(zone);
3408 
3409 	return (nitems < 0 ? 0 : nitems);
3410 }
3411 
3412 /* See uma.h */
3413 void
3414 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3415 {
3416 	uma_keg_t keg;
3417 
3418 	KEG_GET(zone, keg);
3419 	KEG_LOCK(keg);
3420 	KASSERT(keg->uk_pages == 0,
3421 	    ("uma_zone_set_init on non-empty keg"));
3422 	keg->uk_init = uminit;
3423 	KEG_UNLOCK(keg);
3424 }
3425 
3426 /* See uma.h */
3427 void
3428 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3429 {
3430 	uma_keg_t keg;
3431 
3432 	KEG_GET(zone, keg);
3433 	KEG_LOCK(keg);
3434 	KASSERT(keg->uk_pages == 0,
3435 	    ("uma_zone_set_fini on non-empty keg"));
3436 	keg->uk_fini = fini;
3437 	KEG_UNLOCK(keg);
3438 }
3439 
3440 /* See uma.h */
3441 void
3442 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3443 {
3444 
3445 	ZONE_LOCK(zone);
3446 	KASSERT(zone->uz_keg->uk_pages == 0,
3447 	    ("uma_zone_set_zinit on non-empty keg"));
3448 	zone->uz_init = zinit;
3449 	ZONE_UNLOCK(zone);
3450 }
3451 
3452 /* See uma.h */
3453 void
3454 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3455 {
3456 
3457 	ZONE_LOCK(zone);
3458 	KASSERT(zone->uz_keg->uk_pages == 0,
3459 	    ("uma_zone_set_zfini on non-empty keg"));
3460 	zone->uz_fini = zfini;
3461 	ZONE_UNLOCK(zone);
3462 }
3463 
3464 /* See uma.h */
3465 /* XXX uk_freef is not actually used with the zone locked */
3466 void
3467 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3468 {
3469 	uma_keg_t keg;
3470 
3471 	KEG_GET(zone, keg);
3472 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3473 	KEG_LOCK(keg);
3474 	keg->uk_freef = freef;
3475 	KEG_UNLOCK(keg);
3476 }
3477 
3478 /* See uma.h */
3479 /* XXX uk_allocf is not actually used with the zone locked */
3480 void
3481 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3482 {
3483 	uma_keg_t keg;
3484 
3485 	KEG_GET(zone, keg);
3486 	KEG_LOCK(keg);
3487 	keg->uk_allocf = allocf;
3488 	KEG_UNLOCK(keg);
3489 }
3490 
3491 /* See uma.h */
3492 void
3493 uma_zone_reserve(uma_zone_t zone, int items)
3494 {
3495 	uma_keg_t keg;
3496 
3497 	KEG_GET(zone, keg);
3498 	KEG_LOCK(keg);
3499 	keg->uk_reserve = items;
3500 	KEG_UNLOCK(keg);
3501 }
3502 
3503 /* See uma.h */
3504 int
3505 uma_zone_reserve_kva(uma_zone_t zone, int count)
3506 {
3507 	uma_keg_t keg;
3508 	vm_offset_t kva;
3509 	u_int pages;
3510 
3511 	KEG_GET(zone, keg);
3512 
3513 	pages = count / keg->uk_ipers;
3514 	if (pages * keg->uk_ipers < count)
3515 		pages++;
3516 	pages *= keg->uk_ppera;
3517 
3518 #ifdef UMA_MD_SMALL_ALLOC
3519 	if (keg->uk_ppera > 1) {
3520 #else
3521 	if (1) {
3522 #endif
3523 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3524 		if (kva == 0)
3525 			return (0);
3526 	} else
3527 		kva = 0;
3528 
3529 	ZONE_LOCK(zone);
3530 	MPASS(keg->uk_kva == 0);
3531 	keg->uk_kva = kva;
3532 	keg->uk_offset = 0;
3533 	zone->uz_max_items = pages * keg->uk_ipers;
3534 #ifdef UMA_MD_SMALL_ALLOC
3535 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3536 #else
3537 	keg->uk_allocf = noobj_alloc;
3538 #endif
3539 	keg->uk_flags |= UMA_ZONE_NOFREE;
3540 	ZONE_UNLOCK(zone);
3541 
3542 	return (1);
3543 }
3544 
3545 /* See uma.h */
3546 void
3547 uma_prealloc(uma_zone_t zone, int items)
3548 {
3549 	struct vm_domainset_iter di;
3550 	uma_domain_t dom;
3551 	uma_slab_t slab;
3552 	uma_keg_t keg;
3553 	int aflags, domain, slabs;
3554 
3555 	KEG_GET(zone, keg);
3556 	KEG_LOCK(keg);
3557 	slabs = items / keg->uk_ipers;
3558 	if (slabs * keg->uk_ipers < items)
3559 		slabs++;
3560 	while (slabs-- > 0) {
3561 		aflags = M_NOWAIT;
3562 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3563 		    &aflags);
3564 		for (;;) {
3565 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3566 			    aflags);
3567 			if (slab != NULL) {
3568 				MPASS(slab->us_keg == keg);
3569 				dom = &keg->uk_domain[slab->us_domain];
3570 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3571 				    us_link);
3572 				break;
3573 			}
3574 			KEG_LOCK(keg);
3575 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3576 				KEG_UNLOCK(keg);
3577 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3578 				KEG_LOCK(keg);
3579 			}
3580 		}
3581 	}
3582 	KEG_UNLOCK(keg);
3583 }
3584 
3585 /* See uma.h */
3586 static void
3587 uma_reclaim_locked(bool kmem_danger)
3588 {
3589 
3590 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3591 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3592 	bucket_enable();
3593 	zone_foreach(zone_drain);
3594 	if (vm_page_count_min() || kmem_danger) {
3595 		cache_drain_safe(NULL);
3596 		zone_foreach(zone_drain);
3597 	}
3598 
3599 	/*
3600 	 * Some slabs may have been freed but this zone will be visited early
3601 	 * we visit again so that we can free pages that are empty once other
3602 	 * zones are drained.  We have to do the same for buckets.
3603 	 */
3604 	zone_drain(slabzone);
3605 	bucket_zone_drain();
3606 }
3607 
3608 void
3609 uma_reclaim(void)
3610 {
3611 
3612 	sx_xlock(&uma_drain_lock);
3613 	uma_reclaim_locked(false);
3614 	sx_xunlock(&uma_drain_lock);
3615 }
3616 
3617 static volatile int uma_reclaim_needed;
3618 
3619 void
3620 uma_reclaim_wakeup(void)
3621 {
3622 
3623 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3624 		wakeup(uma_reclaim);
3625 }
3626 
3627 void
3628 uma_reclaim_worker(void *arg __unused)
3629 {
3630 
3631 	for (;;) {
3632 		sx_xlock(&uma_drain_lock);
3633 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3634 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
3635 			    hz);
3636 		sx_xunlock(&uma_drain_lock);
3637 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3638 		sx_xlock(&uma_drain_lock);
3639 		uma_reclaim_locked(true);
3640 		atomic_store_int(&uma_reclaim_needed, 0);
3641 		sx_xunlock(&uma_drain_lock);
3642 		/* Don't fire more than once per-second. */
3643 		pause("umarclslp", hz);
3644 	}
3645 }
3646 
3647 /* See uma.h */
3648 int
3649 uma_zone_exhausted(uma_zone_t zone)
3650 {
3651 	int full;
3652 
3653 	ZONE_LOCK(zone);
3654 	full = zone->uz_sleepers > 0;
3655 	ZONE_UNLOCK(zone);
3656 	return (full);
3657 }
3658 
3659 int
3660 uma_zone_exhausted_nolock(uma_zone_t zone)
3661 {
3662 	return (zone->uz_sleepers > 0);
3663 }
3664 
3665 void *
3666 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3667 {
3668 	struct domainset *policy;
3669 	vm_offset_t addr;
3670 	uma_slab_t slab;
3671 
3672 	if (domain != UMA_ANYDOMAIN) {
3673 		/* avoid allocs targeting empty domains */
3674 		if (VM_DOMAIN_EMPTY(domain))
3675 			domain = UMA_ANYDOMAIN;
3676 	}
3677 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3678 	if (slab == NULL)
3679 		return (NULL);
3680 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
3681 	    DOMAINSET_FIXED(domain);
3682 	addr = kmem_malloc_domainset(policy, size, wait);
3683 	if (addr != 0) {
3684 		vsetslab(addr, slab);
3685 		slab->us_data = (void *)addr;
3686 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3687 		slab->us_size = size;
3688 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3689 		    pmap_kextract(addr)));
3690 		uma_total_inc(size);
3691 	} else {
3692 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3693 	}
3694 
3695 	return ((void *)addr);
3696 }
3697 
3698 void *
3699 uma_large_malloc(vm_size_t size, int wait)
3700 {
3701 
3702 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3703 }
3704 
3705 void
3706 uma_large_free(uma_slab_t slab)
3707 {
3708 
3709 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3710 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3711 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3712 	uma_total_dec(slab->us_size);
3713 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3714 }
3715 
3716 static void
3717 uma_zero_item(void *item, uma_zone_t zone)
3718 {
3719 
3720 	bzero(item, zone->uz_size);
3721 }
3722 
3723 unsigned long
3724 uma_limit(void)
3725 {
3726 
3727 	return (uma_kmem_limit);
3728 }
3729 
3730 void
3731 uma_set_limit(unsigned long limit)
3732 {
3733 
3734 	uma_kmem_limit = limit;
3735 }
3736 
3737 unsigned long
3738 uma_size(void)
3739 {
3740 
3741 	return (uma_kmem_total);
3742 }
3743 
3744 long
3745 uma_avail(void)
3746 {
3747 
3748 	return (uma_kmem_limit - uma_kmem_total);
3749 }
3750 
3751 void
3752 uma_print_stats(void)
3753 {
3754 	zone_foreach(uma_print_zone);
3755 }
3756 
3757 static void
3758 slab_print(uma_slab_t slab)
3759 {
3760 	printf("slab: keg %p, data %p, freecount %d\n",
3761 		slab->us_keg, slab->us_data, slab->us_freecount);
3762 }
3763 
3764 static void
3765 cache_print(uma_cache_t cache)
3766 {
3767 	printf("alloc: %p(%d), free: %p(%d)\n",
3768 		cache->uc_allocbucket,
3769 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3770 		cache->uc_freebucket,
3771 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3772 }
3773 
3774 static void
3775 uma_print_keg(uma_keg_t keg)
3776 {
3777 	uma_domain_t dom;
3778 	uma_slab_t slab;
3779 	int i;
3780 
3781 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3782 	    "out %d free %d\n",
3783 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3784 	    keg->uk_ipers, keg->uk_ppera,
3785 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3786 	    keg->uk_free);
3787 	for (i = 0; i < vm_ndomains; i++) {
3788 		dom = &keg->uk_domain[i];
3789 		printf("Part slabs:\n");
3790 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3791 			slab_print(slab);
3792 		printf("Free slabs:\n");
3793 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3794 			slab_print(slab);
3795 		printf("Full slabs:\n");
3796 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3797 			slab_print(slab);
3798 	}
3799 }
3800 
3801 void
3802 uma_print_zone(uma_zone_t zone)
3803 {
3804 	uma_cache_t cache;
3805 	int i;
3806 
3807 	printf("zone: %s(%p) size %d maxitems %ju flags %#x\n",
3808 	    zone->uz_name, zone, zone->uz_size, (uintmax_t)zone->uz_max_items,
3809 	    zone->uz_flags);
3810 	if (zone->uz_lockptr != &zone->uz_lock)
3811 		uma_print_keg(zone->uz_keg);
3812 	CPU_FOREACH(i) {
3813 		cache = &zone->uz_cpu[i];
3814 		printf("CPU %d Cache:\n", i);
3815 		cache_print(cache);
3816 	}
3817 }
3818 
3819 #ifdef DDB
3820 /*
3821  * Generate statistics across both the zone and its per-cpu cache's.  Return
3822  * desired statistics if the pointer is non-NULL for that statistic.
3823  *
3824  * Note: does not update the zone statistics, as it can't safely clear the
3825  * per-CPU cache statistic.
3826  *
3827  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3828  * safe from off-CPU; we should modify the caches to track this information
3829  * directly so that we don't have to.
3830  */
3831 static void
3832 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
3833     uint64_t *freesp, uint64_t *sleepsp)
3834 {
3835 	uma_cache_t cache;
3836 	uint64_t allocs, frees, sleeps;
3837 	int cachefree, cpu;
3838 
3839 	allocs = frees = sleeps = 0;
3840 	cachefree = 0;
3841 	CPU_FOREACH(cpu) {
3842 		cache = &z->uz_cpu[cpu];
3843 		if (cache->uc_allocbucket != NULL)
3844 			cachefree += cache->uc_allocbucket->ub_cnt;
3845 		if (cache->uc_freebucket != NULL)
3846 			cachefree += cache->uc_freebucket->ub_cnt;
3847 		allocs += cache->uc_allocs;
3848 		frees += cache->uc_frees;
3849 	}
3850 	allocs += counter_u64_fetch(z->uz_allocs);
3851 	frees += counter_u64_fetch(z->uz_frees);
3852 	sleeps += z->uz_sleeps;
3853 	if (cachefreep != NULL)
3854 		*cachefreep = cachefree;
3855 	if (allocsp != NULL)
3856 		*allocsp = allocs;
3857 	if (freesp != NULL)
3858 		*freesp = frees;
3859 	if (sleepsp != NULL)
3860 		*sleepsp = sleeps;
3861 }
3862 #endif /* DDB */
3863 
3864 static int
3865 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3866 {
3867 	uma_keg_t kz;
3868 	uma_zone_t z;
3869 	int count;
3870 
3871 	count = 0;
3872 	rw_rlock(&uma_rwlock);
3873 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3874 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3875 			count++;
3876 	}
3877 	rw_runlock(&uma_rwlock);
3878 	return (sysctl_handle_int(oidp, &count, 0, req));
3879 }
3880 
3881 static int
3882 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3883 {
3884 	struct uma_stream_header ush;
3885 	struct uma_type_header uth;
3886 	struct uma_percpu_stat *ups;
3887 	uma_zone_domain_t zdom;
3888 	struct sbuf sbuf;
3889 	uma_cache_t cache;
3890 	uma_keg_t kz;
3891 	uma_zone_t z;
3892 	int count, error, i;
3893 
3894 	error = sysctl_wire_old_buffer(req, 0);
3895 	if (error != 0)
3896 		return (error);
3897 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3898 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3899 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
3900 
3901 	count = 0;
3902 	rw_rlock(&uma_rwlock);
3903 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3904 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3905 			count++;
3906 	}
3907 
3908 	/*
3909 	 * Insert stream header.
3910 	 */
3911 	bzero(&ush, sizeof(ush));
3912 	ush.ush_version = UMA_STREAM_VERSION;
3913 	ush.ush_maxcpus = (mp_maxid + 1);
3914 	ush.ush_count = count;
3915 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3916 
3917 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3918 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3919 			bzero(&uth, sizeof(uth));
3920 			ZONE_LOCK(z);
3921 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3922 			uth.uth_align = kz->uk_align;
3923 			uth.uth_size = kz->uk_size;
3924 			uth.uth_rsize = kz->uk_rsize;
3925 			if (z->uz_max_items > 0)
3926 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
3927 					kz->uk_ppera;
3928 			else
3929 				uth.uth_pages = kz->uk_pages;
3930 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
3931 			    kz->uk_ppera;
3932 			uth.uth_limit = z->uz_max_items;
3933 			uth.uth_keg_free = z->uz_keg->uk_free;
3934 
3935 			/*
3936 			 * A zone is secondary is it is not the first entry
3937 			 * on the keg's zone list.
3938 			 */
3939 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3940 			    (LIST_FIRST(&kz->uk_zones) != z))
3941 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3942 
3943 			for (i = 0; i < vm_ndomains; i++) {
3944 				zdom = &z->uz_domain[i];
3945 				uth.uth_zone_free += zdom->uzd_nitems;
3946 			}
3947 			uth.uth_allocs = counter_u64_fetch(z->uz_allocs);
3948 			uth.uth_frees = counter_u64_fetch(z->uz_frees);
3949 			uth.uth_fails = counter_u64_fetch(z->uz_fails);
3950 			uth.uth_sleeps = z->uz_sleeps;
3951 			/*
3952 			 * While it is not normally safe to access the cache
3953 			 * bucket pointers while not on the CPU that owns the
3954 			 * cache, we only allow the pointers to be exchanged
3955 			 * without the zone lock held, not invalidated, so
3956 			 * accept the possible race associated with bucket
3957 			 * exchange during monitoring.
3958 			 */
3959 			for (i = 0; i < mp_maxid + 1; i++) {
3960 				bzero(&ups[i], sizeof(*ups));
3961 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
3962 				    CPU_ABSENT(i))
3963 					continue;
3964 				cache = &z->uz_cpu[i];
3965 				if (cache->uc_allocbucket != NULL)
3966 					ups[i].ups_cache_free +=
3967 					    cache->uc_allocbucket->ub_cnt;
3968 				if (cache->uc_freebucket != NULL)
3969 					ups[i].ups_cache_free +=
3970 					    cache->uc_freebucket->ub_cnt;
3971 				ups[i].ups_allocs = cache->uc_allocs;
3972 				ups[i].ups_frees = cache->uc_frees;
3973 			}
3974 			ZONE_UNLOCK(z);
3975 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3976 			for (i = 0; i < mp_maxid + 1; i++)
3977 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
3978 		}
3979 	}
3980 	rw_runlock(&uma_rwlock);
3981 	error = sbuf_finish(&sbuf);
3982 	sbuf_delete(&sbuf);
3983 	free(ups, M_TEMP);
3984 	return (error);
3985 }
3986 
3987 int
3988 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3989 {
3990 	uma_zone_t zone = *(uma_zone_t *)arg1;
3991 	int error, max;
3992 
3993 	max = uma_zone_get_max(zone);
3994 	error = sysctl_handle_int(oidp, &max, 0, req);
3995 	if (error || !req->newptr)
3996 		return (error);
3997 
3998 	uma_zone_set_max(zone, max);
3999 
4000 	return (0);
4001 }
4002 
4003 int
4004 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4005 {
4006 	uma_zone_t zone = *(uma_zone_t *)arg1;
4007 	int cur;
4008 
4009 	cur = uma_zone_get_cur(zone);
4010 	return (sysctl_handle_int(oidp, &cur, 0, req));
4011 }
4012 
4013 #ifdef INVARIANTS
4014 static uma_slab_t
4015 uma_dbg_getslab(uma_zone_t zone, void *item)
4016 {
4017 	uma_slab_t slab;
4018 	uma_keg_t keg;
4019 	uint8_t *mem;
4020 
4021 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4022 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4023 		slab = vtoslab((vm_offset_t)mem);
4024 	} else {
4025 		/*
4026 		 * It is safe to return the slab here even though the
4027 		 * zone is unlocked because the item's allocation state
4028 		 * essentially holds a reference.
4029 		 */
4030 		if (zone->uz_lockptr == &zone->uz_lock)
4031 			return (NULL);
4032 		ZONE_LOCK(zone);
4033 		keg = zone->uz_keg;
4034 		if (keg->uk_flags & UMA_ZONE_HASH)
4035 			slab = hash_sfind(&keg->uk_hash, mem);
4036 		else
4037 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4038 		ZONE_UNLOCK(zone);
4039 	}
4040 
4041 	return (slab);
4042 }
4043 
4044 static bool
4045 uma_dbg_zskip(uma_zone_t zone, void *mem)
4046 {
4047 
4048 	if (zone->uz_lockptr == &zone->uz_lock)
4049 		return (true);
4050 
4051 	return (uma_dbg_kskip(zone->uz_keg, mem));
4052 }
4053 
4054 static bool
4055 uma_dbg_kskip(uma_keg_t keg, void *mem)
4056 {
4057 	uintptr_t idx;
4058 
4059 	if (dbg_divisor == 0)
4060 		return (true);
4061 
4062 	if (dbg_divisor == 1)
4063 		return (false);
4064 
4065 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4066 	if (keg->uk_ipers > 1) {
4067 		idx *= keg->uk_ipers;
4068 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4069 	}
4070 
4071 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4072 		counter_u64_add(uma_skip_cnt, 1);
4073 		return (true);
4074 	}
4075 	counter_u64_add(uma_dbg_cnt, 1);
4076 
4077 	return (false);
4078 }
4079 
4080 /*
4081  * Set up the slab's freei data such that uma_dbg_free can function.
4082  *
4083  */
4084 static void
4085 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4086 {
4087 	uma_keg_t keg;
4088 	int freei;
4089 
4090 	if (slab == NULL) {
4091 		slab = uma_dbg_getslab(zone, item);
4092 		if (slab == NULL)
4093 			panic("uma: item %p did not belong to zone %s\n",
4094 			    item, zone->uz_name);
4095 	}
4096 	keg = slab->us_keg;
4097 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4098 
4099 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4100 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4101 		    item, zone, zone->uz_name, slab, freei);
4102 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4103 
4104 	return;
4105 }
4106 
4107 /*
4108  * Verifies freed addresses.  Checks for alignment, valid slab membership
4109  * and duplicate frees.
4110  *
4111  */
4112 static void
4113 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4114 {
4115 	uma_keg_t keg;
4116 	int freei;
4117 
4118 	if (slab == NULL) {
4119 		slab = uma_dbg_getslab(zone, item);
4120 		if (slab == NULL)
4121 			panic("uma: Freed item %p did not belong to zone %s\n",
4122 			    item, zone->uz_name);
4123 	}
4124 	keg = slab->us_keg;
4125 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4126 
4127 	if (freei >= keg->uk_ipers)
4128 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4129 		    item, zone, zone->uz_name, slab, freei);
4130 
4131 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4132 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4133 		    item, zone, zone->uz_name, slab, freei);
4134 
4135 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4136 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4137 		    item, zone, zone->uz_name, slab, freei);
4138 
4139 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4140 }
4141 #endif /* INVARIANTS */
4142 
4143 #ifdef DDB
4144 DB_SHOW_COMMAND(uma, db_show_uma)
4145 {
4146 	uma_keg_t kz;
4147 	uma_zone_t z;
4148 	uint64_t allocs, frees, sleeps;
4149 	long cachefree;
4150 	int i;
4151 
4152 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
4153 	    "Free", "Requests", "Sleeps", "Bucket");
4154 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4155 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4156 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4157 				allocs = counter_u64_fetch(z->uz_allocs);
4158 				frees = counter_u64_fetch(z->uz_frees);
4159 				sleeps = z->uz_sleeps;
4160 				cachefree = 0;
4161 			} else
4162 				uma_zone_sumstat(z, &cachefree, &allocs,
4163 				    &frees, &sleeps);
4164 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4165 			    (LIST_FIRST(&kz->uk_zones) != z)))
4166 				cachefree += kz->uk_free;
4167 			for (i = 0; i < vm_ndomains; i++)
4168 				cachefree += z->uz_domain[i].uzd_nitems;
4169 
4170 			db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u\n",
4171 			    z->uz_name, (uintmax_t)kz->uk_size,
4172 			    (intmax_t)(allocs - frees), cachefree,
4173 			    (uintmax_t)allocs, sleeps, z->uz_count);
4174 			if (db_pager_quit)
4175 				return;
4176 		}
4177 	}
4178 }
4179 
4180 DB_SHOW_COMMAND(umacache, db_show_umacache)
4181 {
4182 	uma_zone_t z;
4183 	uint64_t allocs, frees;
4184 	long cachefree;
4185 	int i;
4186 
4187 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4188 	    "Requests", "Bucket");
4189 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4190 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
4191 		for (i = 0; i < vm_ndomains; i++)
4192 			cachefree += z->uz_domain[i].uzd_nitems;
4193 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4194 		    z->uz_name, (uintmax_t)z->uz_size,
4195 		    (intmax_t)(allocs - frees), cachefree,
4196 		    (uintmax_t)allocs, z->uz_count);
4197 		if (db_pager_quit)
4198 			return;
4199 	}
4200 }
4201 #endif	/* DDB */
4202