xref: /freebsd/sys/vm/uma_core.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 
145 static struct sx uma_drain_lock;
146 
147 /*
148  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
149  * allocations don't trigger a wakeup of the reclaim thread.
150  */
151 static unsigned long uma_kmem_limit = LONG_MAX;
152 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
153     "UMA kernel memory soft limit");
154 static unsigned long uma_kmem_total;
155 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
156     "UMA kernel memory usage");
157 
158 /* Is the VM done starting up? */
159 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
160     BOOT_RUNNING } booted = BOOT_COLD;
161 
162 /*
163  * This is the handle used to schedule events that need to happen
164  * outside of the allocation fast path.
165  */
166 static struct callout uma_callout;
167 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
168 
169 /*
170  * This structure is passed as the zone ctor arg so that I don't have to create
171  * a special allocation function just for zones.
172  */
173 struct uma_zctor_args {
174 	const char *name;
175 	size_t size;
176 	uma_ctor ctor;
177 	uma_dtor dtor;
178 	uma_init uminit;
179 	uma_fini fini;
180 	uma_import import;
181 	uma_release release;
182 	void *arg;
183 	uma_keg_t keg;
184 	int align;
185 	uint32_t flags;
186 };
187 
188 struct uma_kctor_args {
189 	uma_zone_t zone;
190 	size_t size;
191 	uma_init uminit;
192 	uma_fini fini;
193 	int align;
194 	uint32_t flags;
195 };
196 
197 struct uma_bucket_zone {
198 	uma_zone_t	ubz_zone;
199 	char		*ubz_name;
200 	int		ubz_entries;	/* Number of items it can hold. */
201 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
202 };
203 
204 /*
205  * Compute the actual number of bucket entries to pack them in power
206  * of two sizes for more efficient space utilization.
207  */
208 #define	BUCKET_SIZE(n)						\
209     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
210 
211 #define	BUCKET_MAX	BUCKET_SIZE(256)
212 
213 struct uma_bucket_zone bucket_zones[] = {
214 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
215 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
216 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
217 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
218 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
219 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
220 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
221 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
222 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
223 	{ NULL, NULL, 0}
224 };
225 
226 /*
227  * Flags and enumerations to be passed to internal functions.
228  */
229 enum zfreeskip {
230 	SKIP_NONE =	0,
231 	SKIP_CNT =	0x00000001,
232 	SKIP_DTOR =	0x00010000,
233 	SKIP_FINI =	0x00020000,
234 };
235 
236 #define	UMA_ANYDOMAIN	-1	/* Special value for domain search. */
237 
238 /* Prototypes.. */
239 
240 int	uma_startup_count(int);
241 void	uma_startup(void *, int);
242 void	uma_startup1(void);
243 void	uma_startup2(void);
244 
245 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
249 static void page_free(void *, vm_size_t, uint8_t);
250 static void pcpu_page_free(void *, vm_size_t, uint8_t);
251 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
252 static void cache_drain(uma_zone_t);
253 static void bucket_drain(uma_zone_t, uma_bucket_t);
254 static void bucket_cache_drain(uma_zone_t zone);
255 static int keg_ctor(void *, int, void *, int);
256 static void keg_dtor(void *, int, void *);
257 static int zone_ctor(void *, int, void *, int);
258 static void zone_dtor(void *, int, void *);
259 static int zero_init(void *, int, int);
260 static void keg_small_init(uma_keg_t keg);
261 static void keg_large_init(uma_keg_t keg);
262 static void zone_foreach(void (*zfunc)(uma_zone_t));
263 static void zone_timeout(uma_zone_t zone);
264 static int hash_alloc(struct uma_hash *, u_int);
265 static int hash_expand(struct uma_hash *, struct uma_hash *);
266 static void hash_free(struct uma_hash *hash);
267 static void uma_timeout(void *);
268 static void uma_startup3(void);
269 static void *zone_alloc_item(uma_zone_t, void *, int, int);
270 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
271 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
272 static void bucket_enable(void);
273 static void bucket_init(void);
274 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
275 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
276 static void bucket_zone_drain(void);
277 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int);
278 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
279 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
280 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
281 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
282     uma_fini fini, int align, uint32_t flags);
283 static int zone_import(uma_zone_t, void **, int, int, int);
284 static void zone_release(uma_zone_t, void **, int);
285 static void uma_zero_item(void *, uma_zone_t);
286 
287 void uma_print_zone(uma_zone_t);
288 void uma_print_stats(void);
289 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
290 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
291 
292 #ifdef INVARIANTS
293 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
294 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
295 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
296 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
297 
298 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
299     "Memory allocation debugging");
300 
301 static u_int dbg_divisor = 1;
302 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
303     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
304     "Debug & thrash every this item in memory allocator");
305 
306 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
307 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
308 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
309     &uma_dbg_cnt, "memory items debugged");
310 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
311     &uma_skip_cnt, "memory items skipped, not debugged");
312 #endif
313 
314 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
315 
316 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
317     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
318 
319 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
320     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
321 
322 static int zone_warnings = 1;
323 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
324     "Warn when UMA zones becomes full");
325 
326 /* Adjust bytes under management by UMA. */
327 static inline void
328 uma_total_dec(unsigned long size)
329 {
330 
331 	atomic_subtract_long(&uma_kmem_total, size);
332 }
333 
334 static inline void
335 uma_total_inc(unsigned long size)
336 {
337 
338 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
339 		uma_reclaim_wakeup();
340 }
341 
342 /*
343  * This routine checks to see whether or not it's safe to enable buckets.
344  */
345 static void
346 bucket_enable(void)
347 {
348 	bucketdisable = vm_page_count_min();
349 }
350 
351 /*
352  * Initialize bucket_zones, the array of zones of buckets of various sizes.
353  *
354  * For each zone, calculate the memory required for each bucket, consisting
355  * of the header and an array of pointers.
356  */
357 static void
358 bucket_init(void)
359 {
360 	struct uma_bucket_zone *ubz;
361 	int size;
362 
363 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
364 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
365 		size += sizeof(void *) * ubz->ubz_entries;
366 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
367 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
368 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
369 	}
370 }
371 
372 /*
373  * Given a desired number of entries for a bucket, return the zone from which
374  * to allocate the bucket.
375  */
376 static struct uma_bucket_zone *
377 bucket_zone_lookup(int entries)
378 {
379 	struct uma_bucket_zone *ubz;
380 
381 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
382 		if (ubz->ubz_entries >= entries)
383 			return (ubz);
384 	ubz--;
385 	return (ubz);
386 }
387 
388 static int
389 bucket_select(int size)
390 {
391 	struct uma_bucket_zone *ubz;
392 
393 	ubz = &bucket_zones[0];
394 	if (size > ubz->ubz_maxsize)
395 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
396 
397 	for (; ubz->ubz_entries != 0; ubz++)
398 		if (ubz->ubz_maxsize < size)
399 			break;
400 	ubz--;
401 	return (ubz->ubz_entries);
402 }
403 
404 static uma_bucket_t
405 bucket_alloc(uma_zone_t zone, void *udata, int flags)
406 {
407 	struct uma_bucket_zone *ubz;
408 	uma_bucket_t bucket;
409 
410 	/*
411 	 * This is to stop us from allocating per cpu buckets while we're
412 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
413 	 * boot pages.  This also prevents us from allocating buckets in
414 	 * low memory situations.
415 	 */
416 	if (bucketdisable)
417 		return (NULL);
418 	/*
419 	 * To limit bucket recursion we store the original zone flags
420 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
421 	 * NOVM flag to persist even through deep recursions.  We also
422 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
423 	 * a bucket for a bucket zone so we do not allow infinite bucket
424 	 * recursion.  This cookie will even persist to frees of unused
425 	 * buckets via the allocation path or bucket allocations in the
426 	 * free path.
427 	 */
428 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
429 		udata = (void *)(uintptr_t)zone->uz_flags;
430 	else {
431 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
432 			return (NULL);
433 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
434 	}
435 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
436 		flags |= M_NOVM;
437 	ubz = bucket_zone_lookup(zone->uz_count);
438 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
439 		ubz++;
440 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
441 	if (bucket) {
442 #ifdef INVARIANTS
443 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
444 #endif
445 		bucket->ub_cnt = 0;
446 		bucket->ub_entries = ubz->ubz_entries;
447 	}
448 
449 	return (bucket);
450 }
451 
452 static void
453 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
454 {
455 	struct uma_bucket_zone *ubz;
456 
457 	KASSERT(bucket->ub_cnt == 0,
458 	    ("bucket_free: Freeing a non free bucket."));
459 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
460 		udata = (void *)(uintptr_t)zone->uz_flags;
461 	ubz = bucket_zone_lookup(bucket->ub_entries);
462 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
463 }
464 
465 static void
466 bucket_zone_drain(void)
467 {
468 	struct uma_bucket_zone *ubz;
469 
470 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
471 		zone_drain(ubz->ubz_zone);
472 }
473 
474 static uma_bucket_t
475 zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws)
476 {
477 	uma_bucket_t bucket;
478 
479 	ZONE_LOCK_ASSERT(zone);
480 
481 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
482 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
483 		LIST_REMOVE(bucket, ub_link);
484 		zdom->uzd_nitems -= bucket->ub_cnt;
485 		if (ws && zdom->uzd_imin > zdom->uzd_nitems)
486 			zdom->uzd_imin = zdom->uzd_nitems;
487 		zone->uz_bkt_count -= bucket->ub_cnt;
488 	}
489 	return (bucket);
490 }
491 
492 static void
493 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
494     const bool ws)
495 {
496 
497 	ZONE_LOCK_ASSERT(zone);
498 	KASSERT(zone->uz_bkt_count < zone->uz_bkt_max, ("%s: zone %p overflow",
499 	    __func__, zone));
500 
501 	LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
502 	zdom->uzd_nitems += bucket->ub_cnt;
503 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
504 		zdom->uzd_imax = zdom->uzd_nitems;
505 	zone->uz_bkt_count += bucket->ub_cnt;
506 }
507 
508 static void
509 zone_log_warning(uma_zone_t zone)
510 {
511 	static const struct timeval warninterval = { 300, 0 };
512 
513 	if (!zone_warnings || zone->uz_warning == NULL)
514 		return;
515 
516 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
517 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
518 }
519 
520 static inline void
521 zone_maxaction(uma_zone_t zone)
522 {
523 
524 	if (zone->uz_maxaction.ta_func != NULL)
525 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
526 }
527 
528 /*
529  * Routine called by timeout which is used to fire off some time interval
530  * based calculations.  (stats, hash size, etc.)
531  *
532  * Arguments:
533  *	arg   Unused
534  *
535  * Returns:
536  *	Nothing
537  */
538 static void
539 uma_timeout(void *unused)
540 {
541 	bucket_enable();
542 	zone_foreach(zone_timeout);
543 
544 	/* Reschedule this event */
545 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
546 }
547 
548 /*
549  * Update the working set size estimate for the zone's bucket cache.
550  * The constants chosen here are somewhat arbitrary.  With an update period of
551  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
552  * last 100s.
553  */
554 static void
555 zone_domain_update_wss(uma_zone_domain_t zdom)
556 {
557 	long wss;
558 
559 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
560 	wss = zdom->uzd_imax - zdom->uzd_imin;
561 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
562 	zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5;
563 }
564 
565 /*
566  * Routine to perform timeout driven calculations.  This expands the
567  * hashes and does per cpu statistics aggregation.
568  *
569  *  Returns nothing.
570  */
571 static void
572 zone_timeout(uma_zone_t zone)
573 {
574 	uma_keg_t keg = zone->uz_keg;
575 	u_int slabs;
576 
577 	KEG_LOCK(keg);
578 	/*
579 	 * Expand the keg hash table.
580 	 *
581 	 * This is done if the number of slabs is larger than the hash size.
582 	 * What I'm trying to do here is completely reduce collisions.  This
583 	 * may be a little aggressive.  Should I allow for two collisions max?
584 	 */
585 	if (keg->uk_flags & UMA_ZONE_HASH &&
586 	    (slabs = keg->uk_pages / keg->uk_ppera) >
587 	     keg->uk_hash.uh_hashsize) {
588 		struct uma_hash newhash;
589 		struct uma_hash oldhash;
590 		int ret;
591 
592 		/*
593 		 * This is so involved because allocating and freeing
594 		 * while the keg lock is held will lead to deadlock.
595 		 * I have to do everything in stages and check for
596 		 * races.
597 		 */
598 		KEG_UNLOCK(keg);
599 		ret = hash_alloc(&newhash, 1 << fls(slabs));
600 		KEG_LOCK(keg);
601 		if (ret) {
602 			if (hash_expand(&keg->uk_hash, &newhash)) {
603 				oldhash = keg->uk_hash;
604 				keg->uk_hash = newhash;
605 			} else
606 				oldhash = newhash;
607 
608 			KEG_UNLOCK(keg);
609 			hash_free(&oldhash);
610 			return;
611 		}
612 	}
613 
614 	for (int i = 0; i < vm_ndomains; i++)
615 		zone_domain_update_wss(&zone->uz_domain[i]);
616 
617 	KEG_UNLOCK(keg);
618 }
619 
620 /*
621  * Allocate and zero fill the next sized hash table from the appropriate
622  * backing store.
623  *
624  * Arguments:
625  *	hash  A new hash structure with the old hash size in uh_hashsize
626  *
627  * Returns:
628  *	1 on success and 0 on failure.
629  */
630 static int
631 hash_alloc(struct uma_hash *hash, u_int size)
632 {
633 	size_t alloc;
634 
635 	KASSERT(powerof2(size), ("hash size must be power of 2"));
636 	if (size > UMA_HASH_SIZE_INIT)  {
637 		hash->uh_hashsize = size;
638 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
639 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
640 		    M_UMAHASH, M_NOWAIT);
641 	} else {
642 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
643 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
644 		    UMA_ANYDOMAIN, M_WAITOK);
645 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
646 	}
647 	if (hash->uh_slab_hash) {
648 		bzero(hash->uh_slab_hash, alloc);
649 		hash->uh_hashmask = hash->uh_hashsize - 1;
650 		return (1);
651 	}
652 
653 	return (0);
654 }
655 
656 /*
657  * Expands the hash table for HASH zones.  This is done from zone_timeout
658  * to reduce collisions.  This must not be done in the regular allocation
659  * path, otherwise, we can recurse on the vm while allocating pages.
660  *
661  * Arguments:
662  *	oldhash  The hash you want to expand
663  *	newhash  The hash structure for the new table
664  *
665  * Returns:
666  *	Nothing
667  *
668  * Discussion:
669  */
670 static int
671 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
672 {
673 	uma_slab_t slab;
674 	u_int hval;
675 	u_int idx;
676 
677 	if (!newhash->uh_slab_hash)
678 		return (0);
679 
680 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
681 		return (0);
682 
683 	/*
684 	 * I need to investigate hash algorithms for resizing without a
685 	 * full rehash.
686 	 */
687 
688 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
689 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
690 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
691 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
692 			hval = UMA_HASH(newhash, slab->us_data);
693 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
694 			    slab, us_hlink);
695 		}
696 
697 	return (1);
698 }
699 
700 /*
701  * Free the hash bucket to the appropriate backing store.
702  *
703  * Arguments:
704  *	slab_hash  The hash bucket we're freeing
705  *	hashsize   The number of entries in that hash bucket
706  *
707  * Returns:
708  *	Nothing
709  */
710 static void
711 hash_free(struct uma_hash *hash)
712 {
713 	if (hash->uh_slab_hash == NULL)
714 		return;
715 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
716 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
717 	else
718 		free(hash->uh_slab_hash, M_UMAHASH);
719 }
720 
721 /*
722  * Frees all outstanding items in a bucket
723  *
724  * Arguments:
725  *	zone   The zone to free to, must be unlocked.
726  *	bucket The free/alloc bucket with items, cpu queue must be locked.
727  *
728  * Returns:
729  *	Nothing
730  */
731 
732 static void
733 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
734 {
735 	int i;
736 
737 	if (bucket == NULL)
738 		return;
739 
740 	if (zone->uz_fini)
741 		for (i = 0; i < bucket->ub_cnt; i++)
742 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
743 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
744 	if (zone->uz_max_items > 0) {
745 		ZONE_LOCK(zone);
746 		zone->uz_items -= bucket->ub_cnt;
747 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
748 			wakeup_one(zone);
749 		ZONE_UNLOCK(zone);
750 	}
751 	bucket->ub_cnt = 0;
752 }
753 
754 /*
755  * Drains the per cpu caches for a zone.
756  *
757  * NOTE: This may only be called while the zone is being turn down, and not
758  * during normal operation.  This is necessary in order that we do not have
759  * to migrate CPUs to drain the per-CPU caches.
760  *
761  * Arguments:
762  *	zone     The zone to drain, must be unlocked.
763  *
764  * Returns:
765  *	Nothing
766  */
767 static void
768 cache_drain(uma_zone_t zone)
769 {
770 	uma_cache_t cache;
771 	int cpu;
772 
773 	/*
774 	 * XXX: It is safe to not lock the per-CPU caches, because we're
775 	 * tearing down the zone anyway.  I.e., there will be no further use
776 	 * of the caches at this point.
777 	 *
778 	 * XXX: It would good to be able to assert that the zone is being
779 	 * torn down to prevent improper use of cache_drain().
780 	 *
781 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
782 	 * it is used elsewhere.  Should the tear-down path be made special
783 	 * there in some form?
784 	 */
785 	CPU_FOREACH(cpu) {
786 		cache = &zone->uz_cpu[cpu];
787 		bucket_drain(zone, cache->uc_allocbucket);
788 		bucket_drain(zone, cache->uc_freebucket);
789 		if (cache->uc_allocbucket != NULL)
790 			bucket_free(zone, cache->uc_allocbucket, NULL);
791 		if (cache->uc_freebucket != NULL)
792 			bucket_free(zone, cache->uc_freebucket, NULL);
793 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
794 	}
795 	ZONE_LOCK(zone);
796 	bucket_cache_drain(zone);
797 	ZONE_UNLOCK(zone);
798 }
799 
800 static void
801 cache_shrink(uma_zone_t zone)
802 {
803 
804 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
805 		return;
806 
807 	ZONE_LOCK(zone);
808 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
809 	ZONE_UNLOCK(zone);
810 }
811 
812 static void
813 cache_drain_safe_cpu(uma_zone_t zone)
814 {
815 	uma_cache_t cache;
816 	uma_bucket_t b1, b2;
817 	int domain;
818 
819 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
820 		return;
821 
822 	b1 = b2 = NULL;
823 	ZONE_LOCK(zone);
824 	critical_enter();
825 	if (zone->uz_flags & UMA_ZONE_NUMA)
826 		domain = PCPU_GET(domain);
827 	else
828 		domain = 0;
829 	cache = &zone->uz_cpu[curcpu];
830 	if (cache->uc_allocbucket) {
831 		if (cache->uc_allocbucket->ub_cnt != 0)
832 			zone_put_bucket(zone, &zone->uz_domain[domain],
833 			    cache->uc_allocbucket, false);
834 		else
835 			b1 = cache->uc_allocbucket;
836 		cache->uc_allocbucket = NULL;
837 	}
838 	if (cache->uc_freebucket) {
839 		if (cache->uc_freebucket->ub_cnt != 0)
840 			zone_put_bucket(zone, &zone->uz_domain[domain],
841 			    cache->uc_freebucket, false);
842 		else
843 			b2 = cache->uc_freebucket;
844 		cache->uc_freebucket = NULL;
845 	}
846 	critical_exit();
847 	ZONE_UNLOCK(zone);
848 	if (b1)
849 		bucket_free(zone, b1, NULL);
850 	if (b2)
851 		bucket_free(zone, b2, NULL);
852 }
853 
854 /*
855  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
856  * This is an expensive call because it needs to bind to all CPUs
857  * one by one and enter a critical section on each of them in order
858  * to safely access their cache buckets.
859  * Zone lock must not be held on call this function.
860  */
861 static void
862 cache_drain_safe(uma_zone_t zone)
863 {
864 	int cpu;
865 
866 	/*
867 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
868 	 */
869 	if (zone)
870 		cache_shrink(zone);
871 	else
872 		zone_foreach(cache_shrink);
873 
874 	CPU_FOREACH(cpu) {
875 		thread_lock(curthread);
876 		sched_bind(curthread, cpu);
877 		thread_unlock(curthread);
878 
879 		if (zone)
880 			cache_drain_safe_cpu(zone);
881 		else
882 			zone_foreach(cache_drain_safe_cpu);
883 	}
884 	thread_lock(curthread);
885 	sched_unbind(curthread);
886 	thread_unlock(curthread);
887 }
888 
889 /*
890  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
891  */
892 static void
893 bucket_cache_drain(uma_zone_t zone)
894 {
895 	uma_zone_domain_t zdom;
896 	uma_bucket_t bucket;
897 	int i;
898 
899 	/*
900 	 * Drain the bucket queues and free the buckets.
901 	 */
902 	for (i = 0; i < vm_ndomains; i++) {
903 		zdom = &zone->uz_domain[i];
904 		while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) !=
905 		    NULL) {
906 			ZONE_UNLOCK(zone);
907 			bucket_drain(zone, bucket);
908 			bucket_free(zone, bucket, NULL);
909 			ZONE_LOCK(zone);
910 		}
911 	}
912 
913 	/*
914 	 * Shrink further bucket sizes.  Price of single zone lock collision
915 	 * is probably lower then price of global cache drain.
916 	 */
917 	if (zone->uz_count > zone->uz_count_min)
918 		zone->uz_count--;
919 }
920 
921 static void
922 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
923 {
924 	uint8_t *mem;
925 	int i;
926 	uint8_t flags;
927 
928 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
929 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
930 
931 	mem = slab->us_data;
932 	flags = slab->us_flags;
933 	i = start;
934 	if (keg->uk_fini != NULL) {
935 		for (i--; i > -1; i--)
936 #ifdef INVARIANTS
937 		/*
938 		 * trash_fini implies that dtor was trash_dtor. trash_fini
939 		 * would check that memory hasn't been modified since free,
940 		 * which executed trash_dtor.
941 		 * That's why we need to run uma_dbg_kskip() check here,
942 		 * albeit we don't make skip check for other init/fini
943 		 * invocations.
944 		 */
945 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
946 		    keg->uk_fini != trash_fini)
947 #endif
948 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
949 			    keg->uk_size);
950 	}
951 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
952 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
953 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
954 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
955 }
956 
957 /*
958  * Frees pages from a keg back to the system.  This is done on demand from
959  * the pageout daemon.
960  *
961  * Returns nothing.
962  */
963 static void
964 keg_drain(uma_keg_t keg)
965 {
966 	struct slabhead freeslabs = { 0 };
967 	uma_domain_t dom;
968 	uma_slab_t slab, tmp;
969 	int i;
970 
971 	/*
972 	 * We don't want to take pages from statically allocated kegs at this
973 	 * time
974 	 */
975 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
976 		return;
977 
978 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
979 	    keg->uk_name, keg, keg->uk_free);
980 	KEG_LOCK(keg);
981 	if (keg->uk_free == 0)
982 		goto finished;
983 
984 	for (i = 0; i < vm_ndomains; i++) {
985 		dom = &keg->uk_domain[i];
986 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
987 			/* We have nowhere to free these to. */
988 			if (slab->us_flags & UMA_SLAB_BOOT)
989 				continue;
990 
991 			LIST_REMOVE(slab, us_link);
992 			keg->uk_pages -= keg->uk_ppera;
993 			keg->uk_free -= keg->uk_ipers;
994 
995 			if (keg->uk_flags & UMA_ZONE_HASH)
996 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
997 				    slab->us_data);
998 
999 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1000 		}
1001 	}
1002 
1003 finished:
1004 	KEG_UNLOCK(keg);
1005 
1006 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1007 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1008 		keg_free_slab(keg, slab, keg->uk_ipers);
1009 	}
1010 }
1011 
1012 static void
1013 zone_drain_wait(uma_zone_t zone, int waitok)
1014 {
1015 
1016 	/*
1017 	 * Set draining to interlock with zone_dtor() so we can release our
1018 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1019 	 * is the only call that knows the structure will still be available
1020 	 * when it wakes up.
1021 	 */
1022 	ZONE_LOCK(zone);
1023 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
1024 		if (waitok == M_NOWAIT)
1025 			goto out;
1026 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1027 	}
1028 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
1029 	bucket_cache_drain(zone);
1030 	ZONE_UNLOCK(zone);
1031 	/*
1032 	 * The DRAINING flag protects us from being freed while
1033 	 * we're running.  Normally the uma_rwlock would protect us but we
1034 	 * must be able to release and acquire the right lock for each keg.
1035 	 */
1036 	keg_drain(zone->uz_keg);
1037 	ZONE_LOCK(zone);
1038 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
1039 	wakeup(zone);
1040 out:
1041 	ZONE_UNLOCK(zone);
1042 }
1043 
1044 void
1045 zone_drain(uma_zone_t zone)
1046 {
1047 
1048 	zone_drain_wait(zone, M_NOWAIT);
1049 }
1050 
1051 /*
1052  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1053  * If the allocation was successful, the keg lock will be held upon return,
1054  * otherwise the keg will be left unlocked.
1055  *
1056  * Arguments:
1057  *	flags   Wait flags for the item initialization routine
1058  *	aflags  Wait flags for the slab allocation
1059  *
1060  * Returns:
1061  *	The slab that was allocated or NULL if there is no memory and the
1062  *	caller specified M_NOWAIT.
1063  */
1064 static uma_slab_t
1065 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1066     int aflags)
1067 {
1068 	uma_alloc allocf;
1069 	uma_slab_t slab;
1070 	unsigned long size;
1071 	uint8_t *mem;
1072 	uint8_t sflags;
1073 	int i;
1074 
1075 	KASSERT(domain >= 0 && domain < vm_ndomains,
1076 	    ("keg_alloc_slab: domain %d out of range", domain));
1077 	KEG_LOCK_ASSERT(keg);
1078 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1079 
1080 	allocf = keg->uk_allocf;
1081 	KEG_UNLOCK(keg);
1082 
1083 	slab = NULL;
1084 	mem = NULL;
1085 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1086 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1087 		if (slab == NULL)
1088 			goto out;
1089 	}
1090 
1091 	/*
1092 	 * This reproduces the old vm_zone behavior of zero filling pages the
1093 	 * first time they are added to a zone.
1094 	 *
1095 	 * Malloced items are zeroed in uma_zalloc.
1096 	 */
1097 
1098 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1099 		aflags |= M_ZERO;
1100 	else
1101 		aflags &= ~M_ZERO;
1102 
1103 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1104 		aflags |= M_NODUMP;
1105 
1106 	/* zone is passed for legacy reasons. */
1107 	size = keg->uk_ppera * PAGE_SIZE;
1108 	mem = allocf(zone, size, domain, &sflags, aflags);
1109 	if (mem == NULL) {
1110 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1111 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1112 		slab = NULL;
1113 		goto out;
1114 	}
1115 	uma_total_inc(size);
1116 
1117 	/* Point the slab into the allocated memory */
1118 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1119 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1120 
1121 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1122 		for (i = 0; i < keg->uk_ppera; i++)
1123 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1124 
1125 	slab->us_keg = keg;
1126 	slab->us_data = mem;
1127 	slab->us_freecount = keg->uk_ipers;
1128 	slab->us_flags = sflags;
1129 	slab->us_domain = domain;
1130 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1131 #ifdef INVARIANTS
1132 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1133 #endif
1134 
1135 	if (keg->uk_init != NULL) {
1136 		for (i = 0; i < keg->uk_ipers; i++)
1137 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1138 			    keg->uk_size, flags) != 0)
1139 				break;
1140 		if (i != keg->uk_ipers) {
1141 			keg_free_slab(keg, slab, i);
1142 			slab = NULL;
1143 			goto out;
1144 		}
1145 	}
1146 	KEG_LOCK(keg);
1147 
1148 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1149 	    slab, keg->uk_name, keg);
1150 
1151 	if (keg->uk_flags & UMA_ZONE_HASH)
1152 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1153 
1154 	keg->uk_pages += keg->uk_ppera;
1155 	keg->uk_free += keg->uk_ipers;
1156 
1157 out:
1158 	return (slab);
1159 }
1160 
1161 /*
1162  * This function is intended to be used early on in place of page_alloc() so
1163  * that we may use the boot time page cache to satisfy allocations before
1164  * the VM is ready.
1165  */
1166 static void *
1167 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1168     int wait)
1169 {
1170 	uma_keg_t keg;
1171 	void *mem;
1172 	int pages;
1173 
1174 	keg = zone->uz_keg;
1175 	/*
1176 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1177 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1178 	 */
1179 	switch (booted) {
1180 		case BOOT_COLD:
1181 		case BOOT_STRAPPED:
1182 			break;
1183 		case BOOT_PAGEALLOC:
1184 			if (keg->uk_ppera > 1)
1185 				break;
1186 		case BOOT_BUCKETS:
1187 		case BOOT_RUNNING:
1188 #ifdef UMA_MD_SMALL_ALLOC
1189 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1190 			    page_alloc : uma_small_alloc;
1191 #else
1192 			keg->uk_allocf = page_alloc;
1193 #endif
1194 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1195 	}
1196 
1197 	/*
1198 	 * Check our small startup cache to see if it has pages remaining.
1199 	 */
1200 	pages = howmany(bytes, PAGE_SIZE);
1201 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1202 	if (pages > boot_pages)
1203 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1204 #ifdef DIAGNOSTIC
1205 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1206 	    boot_pages);
1207 #endif
1208 	mem = bootmem;
1209 	boot_pages -= pages;
1210 	bootmem += pages * PAGE_SIZE;
1211 	*pflag = UMA_SLAB_BOOT;
1212 
1213 	return (mem);
1214 }
1215 
1216 /*
1217  * Allocates a number of pages from the system
1218  *
1219  * Arguments:
1220  *	bytes  The number of bytes requested
1221  *	wait  Shall we wait?
1222  *
1223  * Returns:
1224  *	A pointer to the alloced memory or possibly
1225  *	NULL if M_NOWAIT is set.
1226  */
1227 static void *
1228 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1229     int wait)
1230 {
1231 	void *p;	/* Returned page */
1232 
1233 	*pflag = UMA_SLAB_KERNEL;
1234 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1235 
1236 	return (p);
1237 }
1238 
1239 static void *
1240 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1241     int wait)
1242 {
1243 	struct pglist alloctail;
1244 	vm_offset_t addr, zkva;
1245 	int cpu, flags;
1246 	vm_page_t p, p_next;
1247 #ifdef NUMA
1248 	struct pcpu *pc;
1249 #endif
1250 
1251 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1252 
1253 	TAILQ_INIT(&alloctail);
1254 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1255 	    malloc2vm_flags(wait);
1256 	*pflag = UMA_SLAB_KERNEL;
1257 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1258 		if (CPU_ABSENT(cpu)) {
1259 			p = vm_page_alloc(NULL, 0, flags);
1260 		} else {
1261 #ifndef NUMA
1262 			p = vm_page_alloc(NULL, 0, flags);
1263 #else
1264 			pc = pcpu_find(cpu);
1265 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1266 			if (__predict_false(p == NULL))
1267 				p = vm_page_alloc(NULL, 0, flags);
1268 #endif
1269 		}
1270 		if (__predict_false(p == NULL))
1271 			goto fail;
1272 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1273 	}
1274 	if ((addr = kva_alloc(bytes)) == 0)
1275 		goto fail;
1276 	zkva = addr;
1277 	TAILQ_FOREACH(p, &alloctail, listq) {
1278 		pmap_qenter(zkva, &p, 1);
1279 		zkva += PAGE_SIZE;
1280 	}
1281 	return ((void*)addr);
1282 fail:
1283 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1284 		vm_page_unwire_noq(p);
1285 		vm_page_free(p);
1286 	}
1287 	return (NULL);
1288 }
1289 
1290 /*
1291  * Allocates a number of pages from within an object
1292  *
1293  * Arguments:
1294  *	bytes  The number of bytes requested
1295  *	wait   Shall we wait?
1296  *
1297  * Returns:
1298  *	A pointer to the alloced memory or possibly
1299  *	NULL if M_NOWAIT is set.
1300  */
1301 static void *
1302 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1303     int wait)
1304 {
1305 	TAILQ_HEAD(, vm_page) alloctail;
1306 	u_long npages;
1307 	vm_offset_t retkva, zkva;
1308 	vm_page_t p, p_next;
1309 	uma_keg_t keg;
1310 
1311 	TAILQ_INIT(&alloctail);
1312 	keg = zone->uz_keg;
1313 
1314 	npages = howmany(bytes, PAGE_SIZE);
1315 	while (npages > 0) {
1316 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1317 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1318 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1319 		    VM_ALLOC_NOWAIT));
1320 		if (p != NULL) {
1321 			/*
1322 			 * Since the page does not belong to an object, its
1323 			 * listq is unused.
1324 			 */
1325 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1326 			npages--;
1327 			continue;
1328 		}
1329 		/*
1330 		 * Page allocation failed, free intermediate pages and
1331 		 * exit.
1332 		 */
1333 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1334 			vm_page_unwire_noq(p);
1335 			vm_page_free(p);
1336 		}
1337 		return (NULL);
1338 	}
1339 	*flags = UMA_SLAB_PRIV;
1340 	zkva = keg->uk_kva +
1341 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1342 	retkva = zkva;
1343 	TAILQ_FOREACH(p, &alloctail, listq) {
1344 		pmap_qenter(zkva, &p, 1);
1345 		zkva += PAGE_SIZE;
1346 	}
1347 
1348 	return ((void *)retkva);
1349 }
1350 
1351 /*
1352  * Frees a number of pages to the system
1353  *
1354  * Arguments:
1355  *	mem   A pointer to the memory to be freed
1356  *	size  The size of the memory being freed
1357  *	flags The original p->us_flags field
1358  *
1359  * Returns:
1360  *	Nothing
1361  */
1362 static void
1363 page_free(void *mem, vm_size_t size, uint8_t flags)
1364 {
1365 
1366 	if ((flags & UMA_SLAB_KERNEL) == 0)
1367 		panic("UMA: page_free used with invalid flags %x", flags);
1368 
1369 	kmem_free((vm_offset_t)mem, size);
1370 }
1371 
1372 /*
1373  * Frees pcpu zone allocations
1374  *
1375  * Arguments:
1376  *	mem   A pointer to the memory to be freed
1377  *	size  The size of the memory being freed
1378  *	flags The original p->us_flags field
1379  *
1380  * Returns:
1381  *	Nothing
1382  */
1383 static void
1384 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1385 {
1386 	vm_offset_t sva, curva;
1387 	vm_paddr_t paddr;
1388 	vm_page_t m;
1389 
1390 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1391 	sva = (vm_offset_t)mem;
1392 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1393 		paddr = pmap_kextract(curva);
1394 		m = PHYS_TO_VM_PAGE(paddr);
1395 		vm_page_unwire_noq(m);
1396 		vm_page_free(m);
1397 	}
1398 	pmap_qremove(sva, size >> PAGE_SHIFT);
1399 	kva_free(sva, size);
1400 }
1401 
1402 
1403 /*
1404  * Zero fill initializer
1405  *
1406  * Arguments/Returns follow uma_init specifications
1407  */
1408 static int
1409 zero_init(void *mem, int size, int flags)
1410 {
1411 	bzero(mem, size);
1412 	return (0);
1413 }
1414 
1415 /*
1416  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1417  *
1418  * Arguments
1419  *	keg  The zone we should initialize
1420  *
1421  * Returns
1422  *	Nothing
1423  */
1424 static void
1425 keg_small_init(uma_keg_t keg)
1426 {
1427 	u_int rsize;
1428 	u_int memused;
1429 	u_int wastedspace;
1430 	u_int shsize;
1431 	u_int slabsize;
1432 
1433 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1434 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1435 
1436 		slabsize = UMA_PCPU_ALLOC_SIZE;
1437 		keg->uk_ppera = ncpus;
1438 	} else {
1439 		slabsize = UMA_SLAB_SIZE;
1440 		keg->uk_ppera = 1;
1441 	}
1442 
1443 	/*
1444 	 * Calculate the size of each allocation (rsize) according to
1445 	 * alignment.  If the requested size is smaller than we have
1446 	 * allocation bits for we round it up.
1447 	 */
1448 	rsize = keg->uk_size;
1449 	if (rsize < slabsize / SLAB_SETSIZE)
1450 		rsize = slabsize / SLAB_SETSIZE;
1451 	if (rsize & keg->uk_align)
1452 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1453 	keg->uk_rsize = rsize;
1454 
1455 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1456 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1457 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1458 
1459 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1460 		shsize = 0;
1461 	else
1462 		shsize = SIZEOF_UMA_SLAB;
1463 
1464 	if (rsize <= slabsize - shsize)
1465 		keg->uk_ipers = (slabsize - shsize) / rsize;
1466 	else {
1467 		/* Handle special case when we have 1 item per slab, so
1468 		 * alignment requirement can be relaxed. */
1469 		KASSERT(keg->uk_size <= slabsize - shsize,
1470 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1471 		keg->uk_ipers = 1;
1472 	}
1473 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1474 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1475 
1476 	memused = keg->uk_ipers * rsize + shsize;
1477 	wastedspace = slabsize - memused;
1478 
1479 	/*
1480 	 * We can't do OFFPAGE if we're internal or if we've been
1481 	 * asked to not go to the VM for buckets.  If we do this we
1482 	 * may end up going to the VM  for slabs which we do not
1483 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1484 	 * of UMA_ZONE_VM, which clearly forbids it.
1485 	 */
1486 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1487 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1488 		return;
1489 
1490 	/*
1491 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1492 	 * this if it permits more items per-slab.
1493 	 *
1494 	 * XXX We could try growing slabsize to limit max waste as well.
1495 	 * Historically this was not done because the VM could not
1496 	 * efficiently handle contiguous allocations.
1497 	 */
1498 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1499 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1500 		keg->uk_ipers = slabsize / keg->uk_rsize;
1501 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1502 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1503 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1504 		    "keg: %s(%p), calculated wastedspace = %d, "
1505 		    "maximum wasted space allowed = %d, "
1506 		    "calculated ipers = %d, "
1507 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1508 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1509 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1510 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1511 	}
1512 
1513 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1514 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1515 		keg->uk_flags |= UMA_ZONE_HASH;
1516 }
1517 
1518 /*
1519  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1520  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1521  * more complicated.
1522  *
1523  * Arguments
1524  *	keg  The keg we should initialize
1525  *
1526  * Returns
1527  *	Nothing
1528  */
1529 static void
1530 keg_large_init(uma_keg_t keg)
1531 {
1532 
1533 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1534 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1535 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1536 
1537 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1538 	keg->uk_ipers = 1;
1539 	keg->uk_rsize = keg->uk_size;
1540 
1541 	/* Check whether we have enough space to not do OFFPAGE. */
1542 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1543 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1544 		/*
1545 		 * We can't do OFFPAGE if we're internal, in which case
1546 		 * we need an extra page per allocation to contain the
1547 		 * slab header.
1548 		 */
1549 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1550 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1551 		else
1552 			keg->uk_ppera++;
1553 	}
1554 
1555 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1556 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1557 		keg->uk_flags |= UMA_ZONE_HASH;
1558 }
1559 
1560 static void
1561 keg_cachespread_init(uma_keg_t keg)
1562 {
1563 	int alignsize;
1564 	int trailer;
1565 	int pages;
1566 	int rsize;
1567 
1568 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1569 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1570 
1571 	alignsize = keg->uk_align + 1;
1572 	rsize = keg->uk_size;
1573 	/*
1574 	 * We want one item to start on every align boundary in a page.  To
1575 	 * do this we will span pages.  We will also extend the item by the
1576 	 * size of align if it is an even multiple of align.  Otherwise, it
1577 	 * would fall on the same boundary every time.
1578 	 */
1579 	if (rsize & keg->uk_align)
1580 		rsize = (rsize & ~keg->uk_align) + alignsize;
1581 	if ((rsize & alignsize) == 0)
1582 		rsize += alignsize;
1583 	trailer = rsize - keg->uk_size;
1584 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1585 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1586 	keg->uk_rsize = rsize;
1587 	keg->uk_ppera = pages;
1588 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1589 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1590 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1591 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1592 	    keg->uk_ipers));
1593 }
1594 
1595 /*
1596  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1597  * the keg onto the global keg list.
1598  *
1599  * Arguments/Returns follow uma_ctor specifications
1600  *	udata  Actually uma_kctor_args
1601  */
1602 static int
1603 keg_ctor(void *mem, int size, void *udata, int flags)
1604 {
1605 	struct uma_kctor_args *arg = udata;
1606 	uma_keg_t keg = mem;
1607 	uma_zone_t zone;
1608 
1609 	bzero(keg, size);
1610 	keg->uk_size = arg->size;
1611 	keg->uk_init = arg->uminit;
1612 	keg->uk_fini = arg->fini;
1613 	keg->uk_align = arg->align;
1614 	keg->uk_free = 0;
1615 	keg->uk_reserve = 0;
1616 	keg->uk_pages = 0;
1617 	keg->uk_flags = arg->flags;
1618 	keg->uk_slabzone = NULL;
1619 
1620 	/*
1621 	 * We use a global round-robin policy by default.  Zones with
1622 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1623 	 * iterator is never run.
1624 	 */
1625 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1626 	keg->uk_dr.dr_iter = 0;
1627 
1628 	/*
1629 	 * The master zone is passed to us at keg-creation time.
1630 	 */
1631 	zone = arg->zone;
1632 	keg->uk_name = zone->uz_name;
1633 
1634 	if (arg->flags & UMA_ZONE_VM)
1635 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1636 
1637 	if (arg->flags & UMA_ZONE_ZINIT)
1638 		keg->uk_init = zero_init;
1639 
1640 	if (arg->flags & UMA_ZONE_MALLOC)
1641 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1642 
1643 	if (arg->flags & UMA_ZONE_PCPU)
1644 #ifdef SMP
1645 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1646 #else
1647 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1648 #endif
1649 
1650 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1651 		keg_cachespread_init(keg);
1652 	} else {
1653 		if (keg->uk_size > UMA_SLAB_SPACE)
1654 			keg_large_init(keg);
1655 		else
1656 			keg_small_init(keg);
1657 	}
1658 
1659 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1660 		keg->uk_slabzone = slabzone;
1661 
1662 	/*
1663 	 * If we haven't booted yet we need allocations to go through the
1664 	 * startup cache until the vm is ready.
1665 	 */
1666 	if (booted < BOOT_PAGEALLOC)
1667 		keg->uk_allocf = startup_alloc;
1668 #ifdef UMA_MD_SMALL_ALLOC
1669 	else if (keg->uk_ppera == 1)
1670 		keg->uk_allocf = uma_small_alloc;
1671 #endif
1672 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1673 		keg->uk_allocf = pcpu_page_alloc;
1674 	else
1675 		keg->uk_allocf = page_alloc;
1676 #ifdef UMA_MD_SMALL_ALLOC
1677 	if (keg->uk_ppera == 1)
1678 		keg->uk_freef = uma_small_free;
1679 	else
1680 #endif
1681 	if (keg->uk_flags & UMA_ZONE_PCPU)
1682 		keg->uk_freef = pcpu_page_free;
1683 	else
1684 		keg->uk_freef = page_free;
1685 
1686 	/*
1687 	 * Initialize keg's lock
1688 	 */
1689 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1690 
1691 	/*
1692 	 * If we're putting the slab header in the actual page we need to
1693 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1694 	 * macro definition.
1695 	 */
1696 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1697 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1698 		/*
1699 		 * The only way the following is possible is if with our
1700 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1701 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1702 		 * mathematically possible for all cases, so we make
1703 		 * sure here anyway.
1704 		 */
1705 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1706 		    PAGE_SIZE * keg->uk_ppera,
1707 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1708 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1709 	}
1710 
1711 	if (keg->uk_flags & UMA_ZONE_HASH)
1712 		hash_alloc(&keg->uk_hash, 0);
1713 
1714 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1715 	    keg, zone->uz_name, zone,
1716 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1717 	    keg->uk_free);
1718 
1719 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1720 
1721 	rw_wlock(&uma_rwlock);
1722 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1723 	rw_wunlock(&uma_rwlock);
1724 	return (0);
1725 }
1726 
1727 static void
1728 zone_alloc_counters(uma_zone_t zone)
1729 {
1730 
1731 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1732 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1733 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1734 }
1735 
1736 /*
1737  * Zone header ctor.  This initializes all fields, locks, etc.
1738  *
1739  * Arguments/Returns follow uma_ctor specifications
1740  *	udata  Actually uma_zctor_args
1741  */
1742 static int
1743 zone_ctor(void *mem, int size, void *udata, int flags)
1744 {
1745 	struct uma_zctor_args *arg = udata;
1746 	uma_zone_t zone = mem;
1747 	uma_zone_t z;
1748 	uma_keg_t keg;
1749 
1750 	bzero(zone, size);
1751 	zone->uz_name = arg->name;
1752 	zone->uz_ctor = arg->ctor;
1753 	zone->uz_dtor = arg->dtor;
1754 	zone->uz_init = NULL;
1755 	zone->uz_fini = NULL;
1756 	zone->uz_sleeps = 0;
1757 	zone->uz_count = 0;
1758 	zone->uz_count_min = 0;
1759 	zone->uz_count_max = BUCKET_MAX;
1760 	zone->uz_flags = 0;
1761 	zone->uz_warning = NULL;
1762 	/* The domain structures follow the cpu structures. */
1763 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1764 	zone->uz_bkt_max = ULONG_MAX;
1765 	timevalclear(&zone->uz_ratecheck);
1766 
1767 	if (__predict_true(booted == BOOT_RUNNING))
1768 		zone_alloc_counters(zone);
1769 	else {
1770 		zone->uz_allocs = EARLY_COUNTER;
1771 		zone->uz_frees = EARLY_COUNTER;
1772 		zone->uz_fails = EARLY_COUNTER;
1773 	}
1774 
1775 	/*
1776 	 * This is a pure cache zone, no kegs.
1777 	 */
1778 	if (arg->import) {
1779 		if (arg->flags & UMA_ZONE_VM)
1780 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1781 		zone->uz_flags = arg->flags;
1782 		zone->uz_size = arg->size;
1783 		zone->uz_import = arg->import;
1784 		zone->uz_release = arg->release;
1785 		zone->uz_arg = arg->arg;
1786 		zone->uz_lockptr = &zone->uz_lock;
1787 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1788 		rw_wlock(&uma_rwlock);
1789 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1790 		rw_wunlock(&uma_rwlock);
1791 		goto out;
1792 	}
1793 
1794 	/*
1795 	 * Use the regular zone/keg/slab allocator.
1796 	 */
1797 	zone->uz_import = (uma_import)zone_import;
1798 	zone->uz_release = (uma_release)zone_release;
1799 	zone->uz_arg = zone;
1800 	keg = arg->keg;
1801 
1802 	if (arg->flags & UMA_ZONE_SECONDARY) {
1803 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1804 		zone->uz_init = arg->uminit;
1805 		zone->uz_fini = arg->fini;
1806 		zone->uz_lockptr = &keg->uk_lock;
1807 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1808 		rw_wlock(&uma_rwlock);
1809 		ZONE_LOCK(zone);
1810 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1811 			if (LIST_NEXT(z, uz_link) == NULL) {
1812 				LIST_INSERT_AFTER(z, zone, uz_link);
1813 				break;
1814 			}
1815 		}
1816 		ZONE_UNLOCK(zone);
1817 		rw_wunlock(&uma_rwlock);
1818 	} else if (keg == NULL) {
1819 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1820 		    arg->align, arg->flags)) == NULL)
1821 			return (ENOMEM);
1822 	} else {
1823 		struct uma_kctor_args karg;
1824 		int error;
1825 
1826 		/* We should only be here from uma_startup() */
1827 		karg.size = arg->size;
1828 		karg.uminit = arg->uminit;
1829 		karg.fini = arg->fini;
1830 		karg.align = arg->align;
1831 		karg.flags = arg->flags;
1832 		karg.zone = zone;
1833 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1834 		    flags);
1835 		if (error)
1836 			return (error);
1837 	}
1838 
1839 	zone->uz_keg = keg;
1840 	zone->uz_size = keg->uk_size;
1841 	zone->uz_flags |= (keg->uk_flags &
1842 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1843 
1844 	/*
1845 	 * Some internal zones don't have room allocated for the per cpu
1846 	 * caches.  If we're internal, bail out here.
1847 	 */
1848 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1849 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1850 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1851 		return (0);
1852 	}
1853 
1854 out:
1855 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1856 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1857 	    ("Invalid zone flag combination"));
1858 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
1859 		zone->uz_count = BUCKET_MAX;
1860 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1861 		zone->uz_count = 0;
1862 	else
1863 		zone->uz_count = bucket_select(zone->uz_size);
1864 	zone->uz_count_min = zone->uz_count;
1865 
1866 	return (0);
1867 }
1868 
1869 /*
1870  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1871  * table and removes the keg from the global list.
1872  *
1873  * Arguments/Returns follow uma_dtor specifications
1874  *	udata  unused
1875  */
1876 static void
1877 keg_dtor(void *arg, int size, void *udata)
1878 {
1879 	uma_keg_t keg;
1880 
1881 	keg = (uma_keg_t)arg;
1882 	KEG_LOCK(keg);
1883 	if (keg->uk_free != 0) {
1884 		printf("Freed UMA keg (%s) was not empty (%d items). "
1885 		    " Lost %d pages of memory.\n",
1886 		    keg->uk_name ? keg->uk_name : "",
1887 		    keg->uk_free, keg->uk_pages);
1888 	}
1889 	KEG_UNLOCK(keg);
1890 
1891 	hash_free(&keg->uk_hash);
1892 
1893 	KEG_LOCK_FINI(keg);
1894 }
1895 
1896 /*
1897  * Zone header dtor.
1898  *
1899  * Arguments/Returns follow uma_dtor specifications
1900  *	udata  unused
1901  */
1902 static void
1903 zone_dtor(void *arg, int size, void *udata)
1904 {
1905 	uma_zone_t zone;
1906 	uma_keg_t keg;
1907 
1908 	zone = (uma_zone_t)arg;
1909 
1910 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1911 		cache_drain(zone);
1912 
1913 	rw_wlock(&uma_rwlock);
1914 	LIST_REMOVE(zone, uz_link);
1915 	rw_wunlock(&uma_rwlock);
1916 	/*
1917 	 * XXX there are some races here where
1918 	 * the zone can be drained but zone lock
1919 	 * released and then refilled before we
1920 	 * remove it... we dont care for now
1921 	 */
1922 	zone_drain_wait(zone, M_WAITOK);
1923 	/*
1924 	 * We only destroy kegs from non secondary/non cache zones.
1925 	 */
1926 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
1927 		keg = zone->uz_keg;
1928 		rw_wlock(&uma_rwlock);
1929 		LIST_REMOVE(keg, uk_link);
1930 		rw_wunlock(&uma_rwlock);
1931 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1932 	}
1933 	counter_u64_free(zone->uz_allocs);
1934 	counter_u64_free(zone->uz_frees);
1935 	counter_u64_free(zone->uz_fails);
1936 	if (zone->uz_lockptr == &zone->uz_lock)
1937 		ZONE_LOCK_FINI(zone);
1938 }
1939 
1940 /*
1941  * Traverses every zone in the system and calls a callback
1942  *
1943  * Arguments:
1944  *	zfunc  A pointer to a function which accepts a zone
1945  *		as an argument.
1946  *
1947  * Returns:
1948  *	Nothing
1949  */
1950 static void
1951 zone_foreach(void (*zfunc)(uma_zone_t))
1952 {
1953 	uma_keg_t keg;
1954 	uma_zone_t zone;
1955 
1956 	/*
1957 	 * Before BOOT_RUNNING we are guaranteed to be single
1958 	 * threaded, so locking isn't needed. Startup functions
1959 	 * are allowed to use M_WAITOK.
1960 	 */
1961 	if (__predict_true(booted == BOOT_RUNNING))
1962 		rw_rlock(&uma_rwlock);
1963 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1964 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1965 			zfunc(zone);
1966 	}
1967 	if (__predict_true(booted == BOOT_RUNNING))
1968 		rw_runlock(&uma_rwlock);
1969 }
1970 
1971 /*
1972  * Count how many pages do we need to bootstrap.  VM supplies
1973  * its need in early zones in the argument, we add up our zones,
1974  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1975  * zone of zones and zone of kegs are accounted separately.
1976  */
1977 #define	UMA_BOOT_ZONES	11
1978 /* Zone of zones and zone of kegs have arbitrary alignment. */
1979 #define	UMA_BOOT_ALIGN	32
1980 static int zsize, ksize;
1981 int
1982 uma_startup_count(int vm_zones)
1983 {
1984 	int zones, pages;
1985 
1986 	ksize = sizeof(struct uma_keg) +
1987 	    (sizeof(struct uma_domain) * vm_ndomains);
1988 	zsize = sizeof(struct uma_zone) +
1989 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1990 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1991 
1992 	/*
1993 	 * Memory for the zone of kegs and its keg,
1994 	 * and for zone of zones.
1995 	 */
1996 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
1997 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
1998 
1999 #ifdef	UMA_MD_SMALL_ALLOC
2000 	zones = UMA_BOOT_ZONES;
2001 #else
2002 	zones = UMA_BOOT_ZONES + vm_zones;
2003 	vm_zones = 0;
2004 #endif
2005 
2006 	/* Memory for the rest of startup zones, UMA and VM, ... */
2007 	if (zsize > UMA_SLAB_SPACE) {
2008 		/* See keg_large_init(). */
2009 		u_int ppera;
2010 
2011 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2012 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2013 		    SIZEOF_UMA_SLAB)
2014 			ppera++;
2015 		pages += (zones + vm_zones) * ppera;
2016 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2017 		/* See keg_small_init() special case for uk_ppera = 1. */
2018 		pages += zones;
2019 	else
2020 		pages += howmany(zones,
2021 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2022 
2023 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2024 	pages += howmany(zones + 1,
2025 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2026 
2027 	/*
2028 	 * Most of startup zones are not going to be offpages, that's
2029 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2030 	 * calculations.  Some large bucket zones will be offpage, and
2031 	 * thus will allocate hashes.  We take conservative approach
2032 	 * and assume that all zones may allocate hash.  This may give
2033 	 * us some positive inaccuracy, usually an extra single page.
2034 	 */
2035 	pages += howmany(zones, UMA_SLAB_SPACE /
2036 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2037 
2038 	return (pages);
2039 }
2040 
2041 void
2042 uma_startup(void *mem, int npages)
2043 {
2044 	struct uma_zctor_args args;
2045 	uma_keg_t masterkeg;
2046 	uintptr_t m;
2047 
2048 #ifdef DIAGNOSTIC
2049 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2050 #endif
2051 
2052 	rw_init(&uma_rwlock, "UMA lock");
2053 
2054 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2055 	m = (uintptr_t)mem;
2056 	zones = (uma_zone_t)m;
2057 	m += roundup(zsize, CACHE_LINE_SIZE);
2058 	kegs = (uma_zone_t)m;
2059 	m += roundup(zsize, CACHE_LINE_SIZE);
2060 	masterkeg = (uma_keg_t)m;
2061 	m += roundup(ksize, CACHE_LINE_SIZE);
2062 	m = roundup(m, PAGE_SIZE);
2063 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2064 	mem = (void *)m;
2065 
2066 	/* "manually" create the initial zone */
2067 	memset(&args, 0, sizeof(args));
2068 	args.name = "UMA Kegs";
2069 	args.size = ksize;
2070 	args.ctor = keg_ctor;
2071 	args.dtor = keg_dtor;
2072 	args.uminit = zero_init;
2073 	args.fini = NULL;
2074 	args.keg = masterkeg;
2075 	args.align = UMA_BOOT_ALIGN - 1;
2076 	args.flags = UMA_ZFLAG_INTERNAL;
2077 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2078 
2079 	bootmem = mem;
2080 	boot_pages = npages;
2081 
2082 	args.name = "UMA Zones";
2083 	args.size = zsize;
2084 	args.ctor = zone_ctor;
2085 	args.dtor = zone_dtor;
2086 	args.uminit = zero_init;
2087 	args.fini = NULL;
2088 	args.keg = NULL;
2089 	args.align = UMA_BOOT_ALIGN - 1;
2090 	args.flags = UMA_ZFLAG_INTERNAL;
2091 	zone_ctor(zones, zsize, &args, M_WAITOK);
2092 
2093 	/* Now make a zone for slab headers */
2094 	slabzone = uma_zcreate("UMA Slabs",
2095 				sizeof(struct uma_slab),
2096 				NULL, NULL, NULL, NULL,
2097 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2098 
2099 	hashzone = uma_zcreate("UMA Hash",
2100 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2101 	    NULL, NULL, NULL, NULL,
2102 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2103 
2104 	bucket_init();
2105 
2106 	booted = BOOT_STRAPPED;
2107 }
2108 
2109 void
2110 uma_startup1(void)
2111 {
2112 
2113 #ifdef DIAGNOSTIC
2114 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2115 #endif
2116 	booted = BOOT_PAGEALLOC;
2117 }
2118 
2119 void
2120 uma_startup2(void)
2121 {
2122 
2123 #ifdef DIAGNOSTIC
2124 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2125 #endif
2126 	booted = BOOT_BUCKETS;
2127 	sx_init(&uma_drain_lock, "umadrain");
2128 	bucket_enable();
2129 }
2130 
2131 /*
2132  * Initialize our callout handle
2133  *
2134  */
2135 static void
2136 uma_startup3(void)
2137 {
2138 
2139 #ifdef INVARIANTS
2140 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2141 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2142 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2143 #endif
2144 	zone_foreach(zone_alloc_counters);
2145 	callout_init(&uma_callout, 1);
2146 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2147 	booted = BOOT_RUNNING;
2148 }
2149 
2150 static uma_keg_t
2151 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2152 		int align, uint32_t flags)
2153 {
2154 	struct uma_kctor_args args;
2155 
2156 	args.size = size;
2157 	args.uminit = uminit;
2158 	args.fini = fini;
2159 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2160 	args.flags = flags;
2161 	args.zone = zone;
2162 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2163 }
2164 
2165 /* Public functions */
2166 /* See uma.h */
2167 void
2168 uma_set_align(int align)
2169 {
2170 
2171 	if (align != UMA_ALIGN_CACHE)
2172 		uma_align_cache = align;
2173 }
2174 
2175 /* See uma.h */
2176 uma_zone_t
2177 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2178 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2179 
2180 {
2181 	struct uma_zctor_args args;
2182 	uma_zone_t res;
2183 	bool locked;
2184 
2185 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2186 	    align, name));
2187 
2188 	/* This stuff is essential for the zone ctor */
2189 	memset(&args, 0, sizeof(args));
2190 	args.name = name;
2191 	args.size = size;
2192 	args.ctor = ctor;
2193 	args.dtor = dtor;
2194 	args.uminit = uminit;
2195 	args.fini = fini;
2196 #ifdef  INVARIANTS
2197 	/*
2198 	 * If a zone is being created with an empty constructor and
2199 	 * destructor, pass UMA constructor/destructor which checks for
2200 	 * memory use after free.
2201 	 */
2202 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2203 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2204 		args.ctor = trash_ctor;
2205 		args.dtor = trash_dtor;
2206 		args.uminit = trash_init;
2207 		args.fini = trash_fini;
2208 	}
2209 #endif
2210 	args.align = align;
2211 	args.flags = flags;
2212 	args.keg = NULL;
2213 
2214 	if (booted < BOOT_BUCKETS) {
2215 		locked = false;
2216 	} else {
2217 		sx_slock(&uma_drain_lock);
2218 		locked = true;
2219 	}
2220 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2221 	if (locked)
2222 		sx_sunlock(&uma_drain_lock);
2223 	return (res);
2224 }
2225 
2226 /* See uma.h */
2227 uma_zone_t
2228 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2229 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2230 {
2231 	struct uma_zctor_args args;
2232 	uma_keg_t keg;
2233 	uma_zone_t res;
2234 	bool locked;
2235 
2236 	keg = master->uz_keg;
2237 	memset(&args, 0, sizeof(args));
2238 	args.name = name;
2239 	args.size = keg->uk_size;
2240 	args.ctor = ctor;
2241 	args.dtor = dtor;
2242 	args.uminit = zinit;
2243 	args.fini = zfini;
2244 	args.align = keg->uk_align;
2245 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2246 	args.keg = keg;
2247 
2248 	if (booted < BOOT_BUCKETS) {
2249 		locked = false;
2250 	} else {
2251 		sx_slock(&uma_drain_lock);
2252 		locked = true;
2253 	}
2254 	/* XXX Attaches only one keg of potentially many. */
2255 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2256 	if (locked)
2257 		sx_sunlock(&uma_drain_lock);
2258 	return (res);
2259 }
2260 
2261 /* See uma.h */
2262 uma_zone_t
2263 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2264 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2265 		    uma_release zrelease, void *arg, int flags)
2266 {
2267 	struct uma_zctor_args args;
2268 
2269 	memset(&args, 0, sizeof(args));
2270 	args.name = name;
2271 	args.size = size;
2272 	args.ctor = ctor;
2273 	args.dtor = dtor;
2274 	args.uminit = zinit;
2275 	args.fini = zfini;
2276 	args.import = zimport;
2277 	args.release = zrelease;
2278 	args.arg = arg;
2279 	args.align = 0;
2280 	args.flags = flags | UMA_ZFLAG_CACHE;
2281 
2282 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2283 }
2284 
2285 /* See uma.h */
2286 void
2287 uma_zdestroy(uma_zone_t zone)
2288 {
2289 
2290 	sx_slock(&uma_drain_lock);
2291 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2292 	sx_sunlock(&uma_drain_lock);
2293 }
2294 
2295 void
2296 uma_zwait(uma_zone_t zone)
2297 {
2298 	void *item;
2299 
2300 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2301 	uma_zfree(zone, item);
2302 }
2303 
2304 void *
2305 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2306 {
2307 	void *item;
2308 #ifdef SMP
2309 	int i;
2310 
2311 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2312 #endif
2313 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2314 	if (item != NULL && (flags & M_ZERO)) {
2315 #ifdef SMP
2316 		for (i = 0; i <= mp_maxid; i++)
2317 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2318 #else
2319 		bzero(item, zone->uz_size);
2320 #endif
2321 	}
2322 	return (item);
2323 }
2324 
2325 /*
2326  * A stub while both regular and pcpu cases are identical.
2327  */
2328 void
2329 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2330 {
2331 
2332 #ifdef SMP
2333 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2334 #endif
2335 	uma_zfree_arg(zone, item, udata);
2336 }
2337 
2338 /* See uma.h */
2339 void *
2340 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2341 {
2342 	uma_zone_domain_t zdom;
2343 	uma_bucket_t bucket;
2344 	uma_cache_t cache;
2345 	void *item;
2346 	int cpu, domain, lockfail, maxbucket;
2347 #ifdef INVARIANTS
2348 	bool skipdbg;
2349 #endif
2350 
2351 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2352 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2353 
2354 	/* This is the fast path allocation */
2355 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2356 	    curthread, zone->uz_name, zone, flags);
2357 
2358 	if (flags & M_WAITOK) {
2359 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2360 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2361 	}
2362 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2363 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2364 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2365 	if (zone->uz_flags & UMA_ZONE_PCPU)
2366 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2367 		    "with M_ZERO passed"));
2368 
2369 #ifdef DEBUG_MEMGUARD
2370 	if (memguard_cmp_zone(zone)) {
2371 		item = memguard_alloc(zone->uz_size, flags);
2372 		if (item != NULL) {
2373 			if (zone->uz_init != NULL &&
2374 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2375 				return (NULL);
2376 			if (zone->uz_ctor != NULL &&
2377 			    zone->uz_ctor(item, zone->uz_size, udata,
2378 			    flags) != 0) {
2379 			    	zone->uz_fini(item, zone->uz_size);
2380 				return (NULL);
2381 			}
2382 			return (item);
2383 		}
2384 		/* This is unfortunate but should not be fatal. */
2385 	}
2386 #endif
2387 	/*
2388 	 * If possible, allocate from the per-CPU cache.  There are two
2389 	 * requirements for safe access to the per-CPU cache: (1) the thread
2390 	 * accessing the cache must not be preempted or yield during access,
2391 	 * and (2) the thread must not migrate CPUs without switching which
2392 	 * cache it accesses.  We rely on a critical section to prevent
2393 	 * preemption and migration.  We release the critical section in
2394 	 * order to acquire the zone mutex if we are unable to allocate from
2395 	 * the current cache; when we re-acquire the critical section, we
2396 	 * must detect and handle migration if it has occurred.
2397 	 */
2398 zalloc_restart:
2399 	critical_enter();
2400 	cpu = curcpu;
2401 	cache = &zone->uz_cpu[cpu];
2402 
2403 zalloc_start:
2404 	bucket = cache->uc_allocbucket;
2405 	if (bucket != NULL && bucket->ub_cnt > 0) {
2406 		bucket->ub_cnt--;
2407 		item = bucket->ub_bucket[bucket->ub_cnt];
2408 #ifdef INVARIANTS
2409 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2410 #endif
2411 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2412 		cache->uc_allocs++;
2413 		critical_exit();
2414 #ifdef INVARIANTS
2415 		skipdbg = uma_dbg_zskip(zone, item);
2416 #endif
2417 		if (zone->uz_ctor != NULL &&
2418 #ifdef INVARIANTS
2419 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2420 		    zone->uz_dtor != trash_dtor) &&
2421 #endif
2422 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2423 			counter_u64_add(zone->uz_fails, 1);
2424 			zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2425 			return (NULL);
2426 		}
2427 #ifdef INVARIANTS
2428 		if (!skipdbg)
2429 			uma_dbg_alloc(zone, NULL, item);
2430 #endif
2431 		if (flags & M_ZERO)
2432 			uma_zero_item(item, zone);
2433 		return (item);
2434 	}
2435 
2436 	/*
2437 	 * We have run out of items in our alloc bucket.
2438 	 * See if we can switch with our free bucket.
2439 	 */
2440 	bucket = cache->uc_freebucket;
2441 	if (bucket != NULL && bucket->ub_cnt > 0) {
2442 		CTR2(KTR_UMA,
2443 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2444 		    zone->uz_name, zone);
2445 		cache->uc_freebucket = cache->uc_allocbucket;
2446 		cache->uc_allocbucket = bucket;
2447 		goto zalloc_start;
2448 	}
2449 
2450 	/*
2451 	 * Discard any empty allocation bucket while we hold no locks.
2452 	 */
2453 	bucket = cache->uc_allocbucket;
2454 	cache->uc_allocbucket = NULL;
2455 	critical_exit();
2456 	if (bucket != NULL)
2457 		bucket_free(zone, bucket, udata);
2458 
2459 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2460 		domain = PCPU_GET(domain);
2461 		if (VM_DOMAIN_EMPTY(domain))
2462 			domain = UMA_ANYDOMAIN;
2463 	} else
2464 		domain = UMA_ANYDOMAIN;
2465 
2466 	/* Short-circuit for zones without buckets and low memory. */
2467 	if (zone->uz_count == 0 || bucketdisable) {
2468 		ZONE_LOCK(zone);
2469 		goto zalloc_item;
2470 	}
2471 
2472 	/*
2473 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2474 	 * we must go back to the zone.  This requires the zone lock, so we
2475 	 * must drop the critical section, then re-acquire it when we go back
2476 	 * to the cache.  Since the critical section is released, we may be
2477 	 * preempted or migrate.  As such, make sure not to maintain any
2478 	 * thread-local state specific to the cache from prior to releasing
2479 	 * the critical section.
2480 	 */
2481 	lockfail = 0;
2482 	if (ZONE_TRYLOCK(zone) == 0) {
2483 		/* Record contention to size the buckets. */
2484 		ZONE_LOCK(zone);
2485 		lockfail = 1;
2486 	}
2487 	critical_enter();
2488 	cpu = curcpu;
2489 	cache = &zone->uz_cpu[cpu];
2490 
2491 	/* See if we lost the race to fill the cache. */
2492 	if (cache->uc_allocbucket != NULL) {
2493 		ZONE_UNLOCK(zone);
2494 		goto zalloc_start;
2495 	}
2496 
2497 	/*
2498 	 * Check the zone's cache of buckets.
2499 	 */
2500 	if (domain == UMA_ANYDOMAIN)
2501 		zdom = &zone->uz_domain[0];
2502 	else
2503 		zdom = &zone->uz_domain[domain];
2504 	if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) {
2505 		KASSERT(bucket->ub_cnt != 0,
2506 		    ("uma_zalloc_arg: Returning an empty bucket."));
2507 		cache->uc_allocbucket = bucket;
2508 		ZONE_UNLOCK(zone);
2509 		goto zalloc_start;
2510 	}
2511 	/* We are no longer associated with this CPU. */
2512 	critical_exit();
2513 
2514 	/*
2515 	 * We bump the uz count when the cache size is insufficient to
2516 	 * handle the working set.
2517 	 */
2518 	if (lockfail && zone->uz_count < zone->uz_count_max)
2519 		zone->uz_count++;
2520 
2521 	if (zone->uz_max_items > 0) {
2522 		if (zone->uz_items >= zone->uz_max_items)
2523 			goto zalloc_item;
2524 		maxbucket = MIN(zone->uz_count,
2525 		    zone->uz_max_items - zone->uz_items);
2526 		zone->uz_items += maxbucket;
2527 	} else
2528 		maxbucket = zone->uz_count;
2529 	ZONE_UNLOCK(zone);
2530 
2531 	/*
2532 	 * Now lets just fill a bucket and put it on the free list.  If that
2533 	 * works we'll restart the allocation from the beginning and it
2534 	 * will use the just filled bucket.
2535 	 */
2536 	bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket);
2537 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2538 	    zone->uz_name, zone, bucket);
2539 	ZONE_LOCK(zone);
2540 	if (bucket != NULL) {
2541 		if (zone->uz_max_items > 0 && bucket->ub_cnt < maxbucket) {
2542 			MPASS(zone->uz_items >= maxbucket - bucket->ub_cnt);
2543 			zone->uz_items -= maxbucket - bucket->ub_cnt;
2544 			if (zone->uz_sleepers > 0 &&
2545 			    zone->uz_items < zone->uz_max_items)
2546 				wakeup_one(zone);
2547 		}
2548 		critical_enter();
2549 		cpu = curcpu;
2550 		cache = &zone->uz_cpu[cpu];
2551 
2552 		/*
2553 		 * See if we lost the race or were migrated.  Cache the
2554 		 * initialized bucket to make this less likely or claim
2555 		 * the memory directly.
2556 		 */
2557 		if (cache->uc_allocbucket == NULL &&
2558 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2559 		    domain == PCPU_GET(domain))) {
2560 			cache->uc_allocbucket = bucket;
2561 			zdom->uzd_imax += bucket->ub_cnt;
2562 		} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2563 			critical_exit();
2564 			ZONE_UNLOCK(zone);
2565 			bucket_drain(zone, bucket);
2566 			bucket_free(zone, bucket, udata);
2567 			goto zalloc_restart;
2568 		} else
2569 			zone_put_bucket(zone, zdom, bucket, false);
2570 		ZONE_UNLOCK(zone);
2571 		goto zalloc_start;
2572 	} else if (zone->uz_max_items > 0) {
2573 		zone->uz_items -= maxbucket;
2574 		if (zone->uz_sleepers > 0 &&
2575 		    zone->uz_items + 1 < zone->uz_max_items)
2576 			wakeup_one(zone);
2577 	}
2578 
2579 	/*
2580 	 * We may not be able to get a bucket so return an actual item.
2581 	 */
2582 zalloc_item:
2583 	item = zone_alloc_item_locked(zone, udata, domain, flags);
2584 
2585 	return (item);
2586 }
2587 
2588 void *
2589 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2590 {
2591 
2592 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2593 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2594 
2595 	/* This is the fast path allocation */
2596 	CTR5(KTR_UMA,
2597 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2598 	    curthread, zone->uz_name, zone, domain, flags);
2599 
2600 	if (flags & M_WAITOK) {
2601 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2602 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2603 	}
2604 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2605 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2606 
2607 	return (zone_alloc_item(zone, udata, domain, flags));
2608 }
2609 
2610 /*
2611  * Find a slab with some space.  Prefer slabs that are partially used over those
2612  * that are totally full.  This helps to reduce fragmentation.
2613  *
2614  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2615  * only 'domain'.
2616  */
2617 static uma_slab_t
2618 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2619 {
2620 	uma_domain_t dom;
2621 	uma_slab_t slab;
2622 	int start;
2623 
2624 	KASSERT(domain >= 0 && domain < vm_ndomains,
2625 	    ("keg_first_slab: domain %d out of range", domain));
2626 	KEG_LOCK_ASSERT(keg);
2627 
2628 	slab = NULL;
2629 	start = domain;
2630 	do {
2631 		dom = &keg->uk_domain[domain];
2632 		if (!LIST_EMPTY(&dom->ud_part_slab))
2633 			return (LIST_FIRST(&dom->ud_part_slab));
2634 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2635 			slab = LIST_FIRST(&dom->ud_free_slab);
2636 			LIST_REMOVE(slab, us_link);
2637 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2638 			return (slab);
2639 		}
2640 		if (rr)
2641 			domain = (domain + 1) % vm_ndomains;
2642 	} while (domain != start);
2643 
2644 	return (NULL);
2645 }
2646 
2647 static uma_slab_t
2648 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2649 {
2650 	uint32_t reserve;
2651 
2652 	KEG_LOCK_ASSERT(keg);
2653 
2654 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2655 	if (keg->uk_free <= reserve)
2656 		return (NULL);
2657 	return (keg_first_slab(keg, domain, rr));
2658 }
2659 
2660 static uma_slab_t
2661 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2662 {
2663 	struct vm_domainset_iter di;
2664 	uma_domain_t dom;
2665 	uma_slab_t slab;
2666 	int aflags, domain;
2667 	bool rr;
2668 
2669 restart:
2670 	KEG_LOCK_ASSERT(keg);
2671 
2672 	/*
2673 	 * Use the keg's policy if upper layers haven't already specified a
2674 	 * domain (as happens with first-touch zones).
2675 	 *
2676 	 * To avoid races we run the iterator with the keg lock held, but that
2677 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2678 	 * clear M_WAITOK and handle low memory conditions locally.
2679 	 */
2680 	rr = rdomain == UMA_ANYDOMAIN;
2681 	if (rr) {
2682 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2683 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2684 		    &aflags);
2685 	} else {
2686 		aflags = flags;
2687 		domain = rdomain;
2688 	}
2689 
2690 	for (;;) {
2691 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2692 		if (slab != NULL) {
2693 			MPASS(slab->us_keg == keg);
2694 			return (slab);
2695 		}
2696 
2697 		/*
2698 		 * M_NOVM means don't ask at all!
2699 		 */
2700 		if (flags & M_NOVM)
2701 			break;
2702 
2703 		KASSERT(zone->uz_max_items == 0 ||
2704 		    zone->uz_items <= zone->uz_max_items,
2705 		    ("%s: zone %p overflow", __func__, zone));
2706 
2707 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
2708 		/*
2709 		 * If we got a slab here it's safe to mark it partially used
2710 		 * and return.  We assume that the caller is going to remove
2711 		 * at least one item.
2712 		 */
2713 		if (slab) {
2714 			MPASS(slab->us_keg == keg);
2715 			dom = &keg->uk_domain[slab->us_domain];
2716 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2717 			return (slab);
2718 		}
2719 		KEG_LOCK(keg);
2720 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2721 			if ((flags & M_WAITOK) != 0) {
2722 				KEG_UNLOCK(keg);
2723 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2724 				KEG_LOCK(keg);
2725 				goto restart;
2726 			}
2727 			break;
2728 		}
2729 	}
2730 
2731 	/*
2732 	 * We might not have been able to get a slab but another cpu
2733 	 * could have while we were unlocked.  Check again before we
2734 	 * fail.
2735 	 */
2736 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2737 		MPASS(slab->us_keg == keg);
2738 		return (slab);
2739 	}
2740 	return (NULL);
2741 }
2742 
2743 static uma_slab_t
2744 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2745 {
2746 	uma_slab_t slab;
2747 
2748 	if (keg == NULL) {
2749 		keg = zone->uz_keg;
2750 		KEG_LOCK(keg);
2751 	}
2752 
2753 	for (;;) {
2754 		slab = keg_fetch_slab(keg, zone, domain, flags);
2755 		if (slab)
2756 			return (slab);
2757 		if (flags & (M_NOWAIT | M_NOVM))
2758 			break;
2759 	}
2760 	KEG_UNLOCK(keg);
2761 	return (NULL);
2762 }
2763 
2764 static void *
2765 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2766 {
2767 	uma_domain_t dom;
2768 	void *item;
2769 	uint8_t freei;
2770 
2771 	MPASS(keg == slab->us_keg);
2772 	KEG_LOCK_ASSERT(keg);
2773 
2774 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2775 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2776 	item = slab->us_data + (keg->uk_rsize * freei);
2777 	slab->us_freecount--;
2778 	keg->uk_free--;
2779 
2780 	/* Move this slab to the full list */
2781 	if (slab->us_freecount == 0) {
2782 		LIST_REMOVE(slab, us_link);
2783 		dom = &keg->uk_domain[slab->us_domain];
2784 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2785 	}
2786 
2787 	return (item);
2788 }
2789 
2790 static int
2791 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2792 {
2793 	uma_slab_t slab;
2794 	uma_keg_t keg;
2795 #ifdef NUMA
2796 	int stripe;
2797 #endif
2798 	int i;
2799 
2800 	slab = NULL;
2801 	keg = NULL;
2802 	/* Try to keep the buckets totally full */
2803 	for (i = 0; i < max; ) {
2804 		if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
2805 			break;
2806 		keg = slab->us_keg;
2807 #ifdef NUMA
2808 		stripe = howmany(max, vm_ndomains);
2809 #endif
2810 		while (slab->us_freecount && i < max) {
2811 			bucket[i++] = slab_alloc_item(keg, slab);
2812 			if (keg->uk_free <= keg->uk_reserve)
2813 				break;
2814 #ifdef NUMA
2815 			/*
2816 			 * If the zone is striped we pick a new slab for every
2817 			 * N allocations.  Eliminating this conditional will
2818 			 * instead pick a new domain for each bucket rather
2819 			 * than stripe within each bucket.  The current option
2820 			 * produces more fragmentation and requires more cpu
2821 			 * time but yields better distribution.
2822 			 */
2823 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2824 			    vm_ndomains > 1 && --stripe == 0)
2825 				break;
2826 #endif
2827 		}
2828 		/* Don't block if we allocated any successfully. */
2829 		flags &= ~M_WAITOK;
2830 		flags |= M_NOWAIT;
2831 	}
2832 	if (slab != NULL)
2833 		KEG_UNLOCK(keg);
2834 
2835 	return i;
2836 }
2837 
2838 static uma_bucket_t
2839 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max)
2840 {
2841 	uma_bucket_t bucket;
2842 
2843 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
2844 
2845 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2846 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2847 	if (bucket == NULL)
2848 		return (NULL);
2849 
2850 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2851 	    MIN(max, bucket->ub_entries), domain, flags);
2852 
2853 	/*
2854 	 * Initialize the memory if necessary.
2855 	 */
2856 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2857 		int i;
2858 
2859 		for (i = 0; i < bucket->ub_cnt; i++)
2860 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2861 			    flags) != 0)
2862 				break;
2863 		/*
2864 		 * If we couldn't initialize the whole bucket, put the
2865 		 * rest back onto the freelist.
2866 		 */
2867 		if (i != bucket->ub_cnt) {
2868 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2869 			    bucket->ub_cnt - i);
2870 #ifdef INVARIANTS
2871 			bzero(&bucket->ub_bucket[i],
2872 			    sizeof(void *) * (bucket->ub_cnt - i));
2873 #endif
2874 			bucket->ub_cnt = i;
2875 		}
2876 	}
2877 
2878 	if (bucket->ub_cnt == 0) {
2879 		bucket_free(zone, bucket, udata);
2880 		counter_u64_add(zone->uz_fails, 1);
2881 		return (NULL);
2882 	}
2883 
2884 	return (bucket);
2885 }
2886 
2887 /*
2888  * Allocates a single item from a zone.
2889  *
2890  * Arguments
2891  *	zone   The zone to alloc for.
2892  *	udata  The data to be passed to the constructor.
2893  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2894  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2895  *
2896  * Returns
2897  *	NULL if there is no memory and M_NOWAIT is set
2898  *	An item if successful
2899  */
2900 
2901 static void *
2902 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2903 {
2904 
2905 	ZONE_LOCK(zone);
2906 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2907 }
2908 
2909 /*
2910  * Returns with zone unlocked.
2911  */
2912 static void *
2913 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
2914 {
2915 	void *item;
2916 #ifdef INVARIANTS
2917 	bool skipdbg;
2918 #endif
2919 
2920 	ZONE_LOCK_ASSERT(zone);
2921 
2922 	if (zone->uz_max_items > 0) {
2923 		if (zone->uz_items >= zone->uz_max_items) {
2924 			zone_log_warning(zone);
2925 			zone_maxaction(zone);
2926 			if (flags & M_NOWAIT) {
2927 				ZONE_UNLOCK(zone);
2928 				return (NULL);
2929 			}
2930 			zone->uz_sleeps++;
2931 			zone->uz_sleepers++;
2932 			while (zone->uz_items >= zone->uz_max_items)
2933 				mtx_sleep(zone, zone->uz_lockptr, PVM,
2934 				    "zonelimit", 0);
2935 			zone->uz_sleepers--;
2936 			if (zone->uz_sleepers > 0 &&
2937 			    zone->uz_items + 1 < zone->uz_max_items)
2938 				wakeup_one(zone);
2939 		}
2940 		zone->uz_items++;
2941 	}
2942 	ZONE_UNLOCK(zone);
2943 
2944 	if (domain != UMA_ANYDOMAIN) {
2945 		/* avoid allocs targeting empty domains */
2946 		if (VM_DOMAIN_EMPTY(domain))
2947 			domain = UMA_ANYDOMAIN;
2948 	}
2949 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
2950 		goto fail;
2951 
2952 #ifdef INVARIANTS
2953 	skipdbg = uma_dbg_zskip(zone, item);
2954 #endif
2955 	/*
2956 	 * We have to call both the zone's init (not the keg's init)
2957 	 * and the zone's ctor.  This is because the item is going from
2958 	 * a keg slab directly to the user, and the user is expecting it
2959 	 * to be both zone-init'd as well as zone-ctor'd.
2960 	 */
2961 	if (zone->uz_init != NULL) {
2962 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2963 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
2964 			goto fail;
2965 		}
2966 	}
2967 	if (zone->uz_ctor != NULL &&
2968 #ifdef INVARIANTS
2969 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
2970 	    zone->uz_dtor != trash_dtor) &&
2971 #endif
2972 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2973 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2974 		goto fail;
2975 	}
2976 #ifdef INVARIANTS
2977 	if (!skipdbg)
2978 		uma_dbg_alloc(zone, NULL, item);
2979 #endif
2980 	if (flags & M_ZERO)
2981 		uma_zero_item(item, zone);
2982 
2983 	counter_u64_add(zone->uz_allocs, 1);
2984 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
2985 	    zone->uz_name, zone);
2986 
2987 	return (item);
2988 
2989 fail:
2990 	if (zone->uz_max_items > 0) {
2991 		ZONE_LOCK(zone);
2992 		zone->uz_items--;
2993 		ZONE_UNLOCK(zone);
2994 	}
2995 	counter_u64_add(zone->uz_fails, 1);
2996 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
2997 	    zone->uz_name, zone);
2998 	return (NULL);
2999 }
3000 
3001 /* See uma.h */
3002 void
3003 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3004 {
3005 	uma_cache_t cache;
3006 	uma_bucket_t bucket;
3007 	uma_zone_domain_t zdom;
3008 	int cpu, domain;
3009 	bool lockfail;
3010 #ifdef INVARIANTS
3011 	bool skipdbg;
3012 #endif
3013 
3014 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3015 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3016 
3017 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3018 	    zone->uz_name);
3019 
3020 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3021 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3022 
3023         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3024         if (item == NULL)
3025                 return;
3026 #ifdef DEBUG_MEMGUARD
3027 	if (is_memguard_addr(item)) {
3028 		if (zone->uz_dtor != NULL)
3029 			zone->uz_dtor(item, zone->uz_size, udata);
3030 		if (zone->uz_fini != NULL)
3031 			zone->uz_fini(item, zone->uz_size);
3032 		memguard_free(item);
3033 		return;
3034 	}
3035 #endif
3036 #ifdef INVARIANTS
3037 	skipdbg = uma_dbg_zskip(zone, item);
3038 	if (skipdbg == false) {
3039 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3040 			uma_dbg_free(zone, udata, item);
3041 		else
3042 			uma_dbg_free(zone, NULL, item);
3043 	}
3044 	if (zone->uz_dtor != NULL && (!skipdbg ||
3045 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3046 #else
3047 	if (zone->uz_dtor != NULL)
3048 #endif
3049 		zone->uz_dtor(item, zone->uz_size, udata);
3050 
3051 	/*
3052 	 * The race here is acceptable.  If we miss it we'll just have to wait
3053 	 * a little longer for the limits to be reset.
3054 	 */
3055 	if (zone->uz_sleepers > 0)
3056 		goto zfree_item;
3057 
3058 	/*
3059 	 * If possible, free to the per-CPU cache.  There are two
3060 	 * requirements for safe access to the per-CPU cache: (1) the thread
3061 	 * accessing the cache must not be preempted or yield during access,
3062 	 * and (2) the thread must not migrate CPUs without switching which
3063 	 * cache it accesses.  We rely on a critical section to prevent
3064 	 * preemption and migration.  We release the critical section in
3065 	 * order to acquire the zone mutex if we are unable to free to the
3066 	 * current cache; when we re-acquire the critical section, we must
3067 	 * detect and handle migration if it has occurred.
3068 	 */
3069 zfree_restart:
3070 	critical_enter();
3071 	cpu = curcpu;
3072 	cache = &zone->uz_cpu[cpu];
3073 
3074 zfree_start:
3075 	/*
3076 	 * Try to free into the allocbucket first to give LIFO ordering
3077 	 * for cache-hot datastructures.  Spill over into the freebucket
3078 	 * if necessary.  Alloc will swap them if one runs dry.
3079 	 */
3080 	bucket = cache->uc_allocbucket;
3081 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3082 		bucket = cache->uc_freebucket;
3083 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3084 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3085 		    ("uma_zfree: Freeing to non free bucket index."));
3086 		bucket->ub_bucket[bucket->ub_cnt] = item;
3087 		bucket->ub_cnt++;
3088 		cache->uc_frees++;
3089 		critical_exit();
3090 		return;
3091 	}
3092 
3093 	/*
3094 	 * We must go back the zone, which requires acquiring the zone lock,
3095 	 * which in turn means we must release and re-acquire the critical
3096 	 * section.  Since the critical section is released, we may be
3097 	 * preempted or migrate.  As such, make sure not to maintain any
3098 	 * thread-local state specific to the cache from prior to releasing
3099 	 * the critical section.
3100 	 */
3101 	critical_exit();
3102 	if (zone->uz_count == 0 || bucketdisable)
3103 		goto zfree_item;
3104 
3105 	lockfail = false;
3106 	if (ZONE_TRYLOCK(zone) == 0) {
3107 		/* Record contention to size the buckets. */
3108 		ZONE_LOCK(zone);
3109 		lockfail = true;
3110 	}
3111 	critical_enter();
3112 	cpu = curcpu;
3113 	cache = &zone->uz_cpu[cpu];
3114 
3115 	bucket = cache->uc_freebucket;
3116 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3117 		ZONE_UNLOCK(zone);
3118 		goto zfree_start;
3119 	}
3120 	cache->uc_freebucket = NULL;
3121 	/* We are no longer associated with this CPU. */
3122 	critical_exit();
3123 
3124 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3125 		domain = PCPU_GET(domain);
3126 		if (VM_DOMAIN_EMPTY(domain))
3127 			domain = UMA_ANYDOMAIN;
3128 	} else
3129 		domain = 0;
3130 	zdom = &zone->uz_domain[0];
3131 
3132 	/* Can we throw this on the zone full list? */
3133 	if (bucket != NULL) {
3134 		CTR3(KTR_UMA,
3135 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3136 		    zone->uz_name, zone, bucket);
3137 		/* ub_cnt is pointing to the last free item */
3138 		KASSERT(bucket->ub_cnt == bucket->ub_entries,
3139 		    ("uma_zfree: Attempting to insert not full bucket onto the full list.\n"));
3140 		if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3141 			ZONE_UNLOCK(zone);
3142 			bucket_drain(zone, bucket);
3143 			bucket_free(zone, bucket, udata);
3144 			goto zfree_restart;
3145 		} else
3146 			zone_put_bucket(zone, zdom, bucket, true);
3147 	}
3148 
3149 	/*
3150 	 * We bump the uz count when the cache size is insufficient to
3151 	 * handle the working set.
3152 	 */
3153 	if (lockfail && zone->uz_count < zone->uz_count_max)
3154 		zone->uz_count++;
3155 	ZONE_UNLOCK(zone);
3156 
3157 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3158 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3159 	    zone->uz_name, zone, bucket);
3160 	if (bucket) {
3161 		critical_enter();
3162 		cpu = curcpu;
3163 		cache = &zone->uz_cpu[cpu];
3164 		if (cache->uc_freebucket == NULL &&
3165 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3166 		    domain == PCPU_GET(domain))) {
3167 			cache->uc_freebucket = bucket;
3168 			goto zfree_start;
3169 		}
3170 		/*
3171 		 * We lost the race, start over.  We have to drop our
3172 		 * critical section to free the bucket.
3173 		 */
3174 		critical_exit();
3175 		bucket_free(zone, bucket, udata);
3176 		goto zfree_restart;
3177 	}
3178 
3179 	/*
3180 	 * If nothing else caught this, we'll just do an internal free.
3181 	 */
3182 zfree_item:
3183 	zone_free_item(zone, item, udata, SKIP_DTOR);
3184 }
3185 
3186 void
3187 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3188 {
3189 
3190 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3191 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3192 
3193 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3194 	    zone->uz_name);
3195 
3196 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3197 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3198 
3199         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3200         if (item == NULL)
3201                 return;
3202 	zone_free_item(zone, item, udata, SKIP_NONE);
3203 }
3204 
3205 static void
3206 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3207 {
3208 	uma_keg_t keg;
3209 	uma_domain_t dom;
3210 	uint8_t freei;
3211 
3212 	keg = zone->uz_keg;
3213 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3214 	KEG_LOCK_ASSERT(keg);
3215 	MPASS(keg == slab->us_keg);
3216 
3217 	dom = &keg->uk_domain[slab->us_domain];
3218 
3219 	/* Do we need to remove from any lists? */
3220 	if (slab->us_freecount+1 == keg->uk_ipers) {
3221 		LIST_REMOVE(slab, us_link);
3222 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3223 	} else if (slab->us_freecount == 0) {
3224 		LIST_REMOVE(slab, us_link);
3225 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3226 	}
3227 
3228 	/* Slab management. */
3229 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3230 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3231 	slab->us_freecount++;
3232 
3233 	/* Keg statistics. */
3234 	keg->uk_free++;
3235 }
3236 
3237 static void
3238 zone_release(uma_zone_t zone, void **bucket, int cnt)
3239 {
3240 	void *item;
3241 	uma_slab_t slab;
3242 	uma_keg_t keg;
3243 	uint8_t *mem;
3244 	int i;
3245 
3246 	keg = zone->uz_keg;
3247 	KEG_LOCK(keg);
3248 	for (i = 0; i < cnt; i++) {
3249 		item = bucket[i];
3250 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3251 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3252 			if (zone->uz_flags & UMA_ZONE_HASH) {
3253 				slab = hash_sfind(&keg->uk_hash, mem);
3254 			} else {
3255 				mem += keg->uk_pgoff;
3256 				slab = (uma_slab_t)mem;
3257 			}
3258 		} else {
3259 			slab = vtoslab((vm_offset_t)item);
3260 			MPASS(slab->us_keg == keg);
3261 		}
3262 		slab_free_item(zone, slab, item);
3263 	}
3264 	KEG_UNLOCK(keg);
3265 }
3266 
3267 /*
3268  * Frees a single item to any zone.
3269  *
3270  * Arguments:
3271  *	zone   The zone to free to
3272  *	item   The item we're freeing
3273  *	udata  User supplied data for the dtor
3274  *	skip   Skip dtors and finis
3275  */
3276 static void
3277 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3278 {
3279 #ifdef INVARIANTS
3280 	bool skipdbg;
3281 
3282 	skipdbg = uma_dbg_zskip(zone, item);
3283 	if (skip == SKIP_NONE && !skipdbg) {
3284 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3285 			uma_dbg_free(zone, udata, item);
3286 		else
3287 			uma_dbg_free(zone, NULL, item);
3288 	}
3289 
3290 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3291 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3292 	    zone->uz_ctor != trash_ctor))
3293 #else
3294 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3295 #endif
3296 		zone->uz_dtor(item, zone->uz_size, udata);
3297 
3298 	if (skip < SKIP_FINI && zone->uz_fini)
3299 		zone->uz_fini(item, zone->uz_size);
3300 
3301 	zone->uz_release(zone->uz_arg, &item, 1);
3302 
3303 	if (skip & SKIP_CNT)
3304 		return;
3305 
3306 	counter_u64_add(zone->uz_frees, 1);
3307 
3308 	if (zone->uz_max_items > 0) {
3309 		ZONE_LOCK(zone);
3310 		zone->uz_items--;
3311 		if (zone->uz_sleepers > 0 &&
3312 		    zone->uz_items < zone->uz_max_items)
3313 			wakeup_one(zone);
3314 		ZONE_UNLOCK(zone);
3315 	}
3316 }
3317 
3318 /* See uma.h */
3319 int
3320 uma_zone_set_max(uma_zone_t zone, int nitems)
3321 {
3322 	struct uma_bucket_zone *ubz;
3323 
3324 	/*
3325 	 * If limit is very low we may need to limit how
3326 	 * much items are allowed in CPU caches.
3327 	 */
3328 	ubz = &bucket_zones[0];
3329 	for (; ubz->ubz_entries != 0; ubz++)
3330 		if (ubz->ubz_entries * 2 * mp_ncpus > nitems)
3331 			break;
3332 	if (ubz == &bucket_zones[0])
3333 		nitems = ubz->ubz_entries * 2 * mp_ncpus;
3334 	else
3335 		ubz--;
3336 
3337 	ZONE_LOCK(zone);
3338 	zone->uz_count_max = zone->uz_count = ubz->ubz_entries;
3339 	if (zone->uz_count_min > zone->uz_count_max)
3340 		zone->uz_count_min = zone->uz_count_max;
3341 	zone->uz_max_items = nitems;
3342 	ZONE_UNLOCK(zone);
3343 
3344 	return (nitems);
3345 }
3346 
3347 /* See uma.h */
3348 int
3349 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3350 {
3351 
3352 	ZONE_LOCK(zone);
3353 	zone->uz_bkt_max = nitems;
3354 	ZONE_UNLOCK(zone);
3355 
3356 	return (nitems);
3357 }
3358 
3359 /* See uma.h */
3360 int
3361 uma_zone_get_max(uma_zone_t zone)
3362 {
3363 	int nitems;
3364 
3365 	ZONE_LOCK(zone);
3366 	nitems = zone->uz_max_items;
3367 	ZONE_UNLOCK(zone);
3368 
3369 	return (nitems);
3370 }
3371 
3372 /* See uma.h */
3373 void
3374 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3375 {
3376 
3377 	ZONE_LOCK(zone);
3378 	zone->uz_warning = warning;
3379 	ZONE_UNLOCK(zone);
3380 }
3381 
3382 /* See uma.h */
3383 void
3384 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3385 {
3386 
3387 	ZONE_LOCK(zone);
3388 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3389 	ZONE_UNLOCK(zone);
3390 }
3391 
3392 /* See uma.h */
3393 int
3394 uma_zone_get_cur(uma_zone_t zone)
3395 {
3396 	int64_t nitems;
3397 	u_int i;
3398 
3399 	ZONE_LOCK(zone);
3400 	nitems = counter_u64_fetch(zone->uz_allocs) -
3401 	    counter_u64_fetch(zone->uz_frees);
3402 	CPU_FOREACH(i) {
3403 		/*
3404 		 * See the comment in uma_vm_zone_stats() regarding the
3405 		 * safety of accessing the per-cpu caches. With the zone lock
3406 		 * held, it is safe, but can potentially result in stale data.
3407 		 */
3408 		nitems += zone->uz_cpu[i].uc_allocs -
3409 		    zone->uz_cpu[i].uc_frees;
3410 	}
3411 	ZONE_UNLOCK(zone);
3412 
3413 	return (nitems < 0 ? 0 : nitems);
3414 }
3415 
3416 /* See uma.h */
3417 void
3418 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3419 {
3420 	uma_keg_t keg;
3421 
3422 	KEG_GET(zone, keg);
3423 	KEG_LOCK(keg);
3424 	KASSERT(keg->uk_pages == 0,
3425 	    ("uma_zone_set_init on non-empty keg"));
3426 	keg->uk_init = uminit;
3427 	KEG_UNLOCK(keg);
3428 }
3429 
3430 /* See uma.h */
3431 void
3432 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3433 {
3434 	uma_keg_t keg;
3435 
3436 	KEG_GET(zone, keg);
3437 	KEG_LOCK(keg);
3438 	KASSERT(keg->uk_pages == 0,
3439 	    ("uma_zone_set_fini on non-empty keg"));
3440 	keg->uk_fini = fini;
3441 	KEG_UNLOCK(keg);
3442 }
3443 
3444 /* See uma.h */
3445 void
3446 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3447 {
3448 
3449 	ZONE_LOCK(zone);
3450 	KASSERT(zone->uz_keg->uk_pages == 0,
3451 	    ("uma_zone_set_zinit on non-empty keg"));
3452 	zone->uz_init = zinit;
3453 	ZONE_UNLOCK(zone);
3454 }
3455 
3456 /* See uma.h */
3457 void
3458 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3459 {
3460 
3461 	ZONE_LOCK(zone);
3462 	KASSERT(zone->uz_keg->uk_pages == 0,
3463 	    ("uma_zone_set_zfini on non-empty keg"));
3464 	zone->uz_fini = zfini;
3465 	ZONE_UNLOCK(zone);
3466 }
3467 
3468 /* See uma.h */
3469 /* XXX uk_freef is not actually used with the zone locked */
3470 void
3471 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3472 {
3473 	uma_keg_t keg;
3474 
3475 	KEG_GET(zone, keg);
3476 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3477 	KEG_LOCK(keg);
3478 	keg->uk_freef = freef;
3479 	KEG_UNLOCK(keg);
3480 }
3481 
3482 /* See uma.h */
3483 /* XXX uk_allocf is not actually used with the zone locked */
3484 void
3485 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3486 {
3487 	uma_keg_t keg;
3488 
3489 	KEG_GET(zone, keg);
3490 	KEG_LOCK(keg);
3491 	keg->uk_allocf = allocf;
3492 	KEG_UNLOCK(keg);
3493 }
3494 
3495 /* See uma.h */
3496 void
3497 uma_zone_reserve(uma_zone_t zone, int items)
3498 {
3499 	uma_keg_t keg;
3500 
3501 	KEG_GET(zone, keg);
3502 	KEG_LOCK(keg);
3503 	keg->uk_reserve = items;
3504 	KEG_UNLOCK(keg);
3505 }
3506 
3507 /* See uma.h */
3508 int
3509 uma_zone_reserve_kva(uma_zone_t zone, int count)
3510 {
3511 	uma_keg_t keg;
3512 	vm_offset_t kva;
3513 	u_int pages;
3514 
3515 	KEG_GET(zone, keg);
3516 
3517 	pages = count / keg->uk_ipers;
3518 	if (pages * keg->uk_ipers < count)
3519 		pages++;
3520 	pages *= keg->uk_ppera;
3521 
3522 #ifdef UMA_MD_SMALL_ALLOC
3523 	if (keg->uk_ppera > 1) {
3524 #else
3525 	if (1) {
3526 #endif
3527 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3528 		if (kva == 0)
3529 			return (0);
3530 	} else
3531 		kva = 0;
3532 
3533 	ZONE_LOCK(zone);
3534 	MPASS(keg->uk_kva == 0);
3535 	keg->uk_kva = kva;
3536 	keg->uk_offset = 0;
3537 	zone->uz_max_items = pages * keg->uk_ipers;
3538 #ifdef UMA_MD_SMALL_ALLOC
3539 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3540 #else
3541 	keg->uk_allocf = noobj_alloc;
3542 #endif
3543 	keg->uk_flags |= UMA_ZONE_NOFREE;
3544 	ZONE_UNLOCK(zone);
3545 
3546 	return (1);
3547 }
3548 
3549 /* See uma.h */
3550 void
3551 uma_prealloc(uma_zone_t zone, int items)
3552 {
3553 	struct vm_domainset_iter di;
3554 	uma_domain_t dom;
3555 	uma_slab_t slab;
3556 	uma_keg_t keg;
3557 	int aflags, domain, slabs;
3558 
3559 	KEG_GET(zone, keg);
3560 	KEG_LOCK(keg);
3561 	slabs = items / keg->uk_ipers;
3562 	if (slabs * keg->uk_ipers < items)
3563 		slabs++;
3564 	while (slabs-- > 0) {
3565 		aflags = M_NOWAIT;
3566 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3567 		    &aflags);
3568 		for (;;) {
3569 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3570 			    aflags);
3571 			if (slab != NULL) {
3572 				MPASS(slab->us_keg == keg);
3573 				dom = &keg->uk_domain[slab->us_domain];
3574 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3575 				    us_link);
3576 				break;
3577 			}
3578 			KEG_LOCK(keg);
3579 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3580 				KEG_UNLOCK(keg);
3581 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3582 				KEG_LOCK(keg);
3583 			}
3584 		}
3585 	}
3586 	KEG_UNLOCK(keg);
3587 }
3588 
3589 /* See uma.h */
3590 static void
3591 uma_reclaim_locked(bool kmem_danger)
3592 {
3593 
3594 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3595 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3596 	bucket_enable();
3597 	zone_foreach(zone_drain);
3598 	if (vm_page_count_min() || kmem_danger) {
3599 		cache_drain_safe(NULL);
3600 		zone_foreach(zone_drain);
3601 	}
3602 
3603 	/*
3604 	 * Some slabs may have been freed but this zone will be visited early
3605 	 * we visit again so that we can free pages that are empty once other
3606 	 * zones are drained.  We have to do the same for buckets.
3607 	 */
3608 	zone_drain(slabzone);
3609 	bucket_zone_drain();
3610 }
3611 
3612 void
3613 uma_reclaim(void)
3614 {
3615 
3616 	sx_xlock(&uma_drain_lock);
3617 	uma_reclaim_locked(false);
3618 	sx_xunlock(&uma_drain_lock);
3619 }
3620 
3621 static volatile int uma_reclaim_needed;
3622 
3623 void
3624 uma_reclaim_wakeup(void)
3625 {
3626 
3627 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3628 		wakeup(uma_reclaim);
3629 }
3630 
3631 void
3632 uma_reclaim_worker(void *arg __unused)
3633 {
3634 
3635 	for (;;) {
3636 		sx_xlock(&uma_drain_lock);
3637 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3638 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
3639 			    hz);
3640 		sx_xunlock(&uma_drain_lock);
3641 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3642 		sx_xlock(&uma_drain_lock);
3643 		uma_reclaim_locked(true);
3644 		atomic_store_int(&uma_reclaim_needed, 0);
3645 		sx_xunlock(&uma_drain_lock);
3646 		/* Don't fire more than once per-second. */
3647 		pause("umarclslp", hz);
3648 	}
3649 }
3650 
3651 /* See uma.h */
3652 int
3653 uma_zone_exhausted(uma_zone_t zone)
3654 {
3655 	int full;
3656 
3657 	ZONE_LOCK(zone);
3658 	full = zone->uz_sleepers > 0;
3659 	ZONE_UNLOCK(zone);
3660 	return (full);
3661 }
3662 
3663 int
3664 uma_zone_exhausted_nolock(uma_zone_t zone)
3665 {
3666 	return (zone->uz_sleepers > 0);
3667 }
3668 
3669 void *
3670 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3671 {
3672 	struct domainset *policy;
3673 	vm_offset_t addr;
3674 	uma_slab_t slab;
3675 
3676 	if (domain != UMA_ANYDOMAIN) {
3677 		/* avoid allocs targeting empty domains */
3678 		if (VM_DOMAIN_EMPTY(domain))
3679 			domain = UMA_ANYDOMAIN;
3680 	}
3681 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3682 	if (slab == NULL)
3683 		return (NULL);
3684 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
3685 	    DOMAINSET_FIXED(domain);
3686 	addr = kmem_malloc_domainset(policy, size, wait);
3687 	if (addr != 0) {
3688 		vsetslab(addr, slab);
3689 		slab->us_data = (void *)addr;
3690 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3691 		slab->us_size = size;
3692 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3693 		    pmap_kextract(addr)));
3694 		uma_total_inc(size);
3695 	} else {
3696 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3697 	}
3698 
3699 	return ((void *)addr);
3700 }
3701 
3702 void *
3703 uma_large_malloc(vm_size_t size, int wait)
3704 {
3705 
3706 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3707 }
3708 
3709 void
3710 uma_large_free(uma_slab_t slab)
3711 {
3712 
3713 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3714 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3715 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3716 	uma_total_dec(slab->us_size);
3717 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3718 }
3719 
3720 static void
3721 uma_zero_item(void *item, uma_zone_t zone)
3722 {
3723 
3724 	bzero(item, zone->uz_size);
3725 }
3726 
3727 unsigned long
3728 uma_limit(void)
3729 {
3730 
3731 	return (uma_kmem_limit);
3732 }
3733 
3734 void
3735 uma_set_limit(unsigned long limit)
3736 {
3737 
3738 	uma_kmem_limit = limit;
3739 }
3740 
3741 unsigned long
3742 uma_size(void)
3743 {
3744 
3745 	return (atomic_load_long(&uma_kmem_total));
3746 }
3747 
3748 long
3749 uma_avail(void)
3750 {
3751 
3752 	return (uma_kmem_limit - uma_size());
3753 }
3754 
3755 void
3756 uma_print_stats(void)
3757 {
3758 	zone_foreach(uma_print_zone);
3759 }
3760 
3761 static void
3762 slab_print(uma_slab_t slab)
3763 {
3764 	printf("slab: keg %p, data %p, freecount %d\n",
3765 		slab->us_keg, slab->us_data, slab->us_freecount);
3766 }
3767 
3768 static void
3769 cache_print(uma_cache_t cache)
3770 {
3771 	printf("alloc: %p(%d), free: %p(%d)\n",
3772 		cache->uc_allocbucket,
3773 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3774 		cache->uc_freebucket,
3775 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3776 }
3777 
3778 static void
3779 uma_print_keg(uma_keg_t keg)
3780 {
3781 	uma_domain_t dom;
3782 	uma_slab_t slab;
3783 	int i;
3784 
3785 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3786 	    "out %d free %d\n",
3787 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3788 	    keg->uk_ipers, keg->uk_ppera,
3789 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3790 	    keg->uk_free);
3791 	for (i = 0; i < vm_ndomains; i++) {
3792 		dom = &keg->uk_domain[i];
3793 		printf("Part slabs:\n");
3794 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3795 			slab_print(slab);
3796 		printf("Free slabs:\n");
3797 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3798 			slab_print(slab);
3799 		printf("Full slabs:\n");
3800 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3801 			slab_print(slab);
3802 	}
3803 }
3804 
3805 void
3806 uma_print_zone(uma_zone_t zone)
3807 {
3808 	uma_cache_t cache;
3809 	int i;
3810 
3811 	printf("zone: %s(%p) size %d maxitems %ju flags %#x\n",
3812 	    zone->uz_name, zone, zone->uz_size, (uintmax_t)zone->uz_max_items,
3813 	    zone->uz_flags);
3814 	if (zone->uz_lockptr != &zone->uz_lock)
3815 		uma_print_keg(zone->uz_keg);
3816 	CPU_FOREACH(i) {
3817 		cache = &zone->uz_cpu[i];
3818 		printf("CPU %d Cache:\n", i);
3819 		cache_print(cache);
3820 	}
3821 }
3822 
3823 #ifdef DDB
3824 /*
3825  * Generate statistics across both the zone and its per-cpu cache's.  Return
3826  * desired statistics if the pointer is non-NULL for that statistic.
3827  *
3828  * Note: does not update the zone statistics, as it can't safely clear the
3829  * per-CPU cache statistic.
3830  *
3831  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3832  * safe from off-CPU; we should modify the caches to track this information
3833  * directly so that we don't have to.
3834  */
3835 static void
3836 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
3837     uint64_t *freesp, uint64_t *sleepsp)
3838 {
3839 	uma_cache_t cache;
3840 	uint64_t allocs, frees, sleeps;
3841 	int cachefree, cpu;
3842 
3843 	allocs = frees = sleeps = 0;
3844 	cachefree = 0;
3845 	CPU_FOREACH(cpu) {
3846 		cache = &z->uz_cpu[cpu];
3847 		if (cache->uc_allocbucket != NULL)
3848 			cachefree += cache->uc_allocbucket->ub_cnt;
3849 		if (cache->uc_freebucket != NULL)
3850 			cachefree += cache->uc_freebucket->ub_cnt;
3851 		allocs += cache->uc_allocs;
3852 		frees += cache->uc_frees;
3853 	}
3854 	allocs += counter_u64_fetch(z->uz_allocs);
3855 	frees += counter_u64_fetch(z->uz_frees);
3856 	sleeps += z->uz_sleeps;
3857 	if (cachefreep != NULL)
3858 		*cachefreep = cachefree;
3859 	if (allocsp != NULL)
3860 		*allocsp = allocs;
3861 	if (freesp != NULL)
3862 		*freesp = frees;
3863 	if (sleepsp != NULL)
3864 		*sleepsp = sleeps;
3865 }
3866 #endif /* DDB */
3867 
3868 static int
3869 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3870 {
3871 	uma_keg_t kz;
3872 	uma_zone_t z;
3873 	int count;
3874 
3875 	count = 0;
3876 	rw_rlock(&uma_rwlock);
3877 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3878 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3879 			count++;
3880 	}
3881 	LIST_FOREACH(z, &uma_cachezones, uz_link)
3882 		count++;
3883 
3884 	rw_runlock(&uma_rwlock);
3885 	return (sysctl_handle_int(oidp, &count, 0, req));
3886 }
3887 
3888 static void
3889 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
3890     struct uma_percpu_stat *ups, bool internal)
3891 {
3892 	uma_zone_domain_t zdom;
3893 	uma_cache_t cache;
3894 	int i;
3895 
3896 
3897 	for (i = 0; i < vm_ndomains; i++) {
3898 		zdom = &z->uz_domain[i];
3899 		uth->uth_zone_free += zdom->uzd_nitems;
3900 	}
3901 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
3902 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
3903 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
3904 	uth->uth_sleeps = z->uz_sleeps;
3905 	/*
3906 	 * While it is not normally safe to access the cache
3907 	 * bucket pointers while not on the CPU that owns the
3908 	 * cache, we only allow the pointers to be exchanged
3909 	 * without the zone lock held, not invalidated, so
3910 	 * accept the possible race associated with bucket
3911 	 * exchange during monitoring.
3912 	 */
3913 	for (i = 0; i < mp_maxid + 1; i++) {
3914 		bzero(&ups[i], sizeof(*ups));
3915 		if (internal || CPU_ABSENT(i))
3916 			continue;
3917 		cache = &z->uz_cpu[i];
3918 		if (cache->uc_allocbucket != NULL)
3919 			ups[i].ups_cache_free +=
3920 			    cache->uc_allocbucket->ub_cnt;
3921 		if (cache->uc_freebucket != NULL)
3922 			ups[i].ups_cache_free +=
3923 			    cache->uc_freebucket->ub_cnt;
3924 		ups[i].ups_allocs = cache->uc_allocs;
3925 		ups[i].ups_frees = cache->uc_frees;
3926 	}
3927 }
3928 
3929 static int
3930 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3931 {
3932 	struct uma_stream_header ush;
3933 	struct uma_type_header uth;
3934 	struct uma_percpu_stat *ups;
3935 	struct sbuf sbuf;
3936 	uma_keg_t kz;
3937 	uma_zone_t z;
3938 	int count, error, i;
3939 
3940 	error = sysctl_wire_old_buffer(req, 0);
3941 	if (error != 0)
3942 		return (error);
3943 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3944 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3945 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
3946 
3947 	count = 0;
3948 	rw_rlock(&uma_rwlock);
3949 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3950 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3951 			count++;
3952 	}
3953 
3954 	LIST_FOREACH(z, &uma_cachezones, uz_link)
3955 		count++;
3956 
3957 	/*
3958 	 * Insert stream header.
3959 	 */
3960 	bzero(&ush, sizeof(ush));
3961 	ush.ush_version = UMA_STREAM_VERSION;
3962 	ush.ush_maxcpus = (mp_maxid + 1);
3963 	ush.ush_count = count;
3964 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3965 
3966 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3967 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3968 			bzero(&uth, sizeof(uth));
3969 			ZONE_LOCK(z);
3970 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3971 			uth.uth_align = kz->uk_align;
3972 			uth.uth_size = kz->uk_size;
3973 			uth.uth_rsize = kz->uk_rsize;
3974 			if (z->uz_max_items > 0)
3975 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
3976 					kz->uk_ppera;
3977 			else
3978 				uth.uth_pages = kz->uk_pages;
3979 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
3980 			    kz->uk_ppera;
3981 			uth.uth_limit = z->uz_max_items;
3982 			uth.uth_keg_free = z->uz_keg->uk_free;
3983 
3984 			/*
3985 			 * A zone is secondary is it is not the first entry
3986 			 * on the keg's zone list.
3987 			 */
3988 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3989 			    (LIST_FIRST(&kz->uk_zones) != z))
3990 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3991 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
3992 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
3993 			ZONE_UNLOCK(z);
3994 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3995 			for (i = 0; i < mp_maxid + 1; i++)
3996 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
3997 		}
3998 	}
3999 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4000 		bzero(&uth, sizeof(uth));
4001 		ZONE_LOCK(z);
4002 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4003 		uth.uth_size = z->uz_size;
4004 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4005 		ZONE_UNLOCK(z);
4006 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4007 		for (i = 0; i < mp_maxid + 1; i++)
4008 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4009 	}
4010 
4011 	rw_runlock(&uma_rwlock);
4012 	error = sbuf_finish(&sbuf);
4013 	sbuf_delete(&sbuf);
4014 	free(ups, M_TEMP);
4015 	return (error);
4016 }
4017 
4018 int
4019 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4020 {
4021 	uma_zone_t zone = *(uma_zone_t *)arg1;
4022 	int error, max;
4023 
4024 	max = uma_zone_get_max(zone);
4025 	error = sysctl_handle_int(oidp, &max, 0, req);
4026 	if (error || !req->newptr)
4027 		return (error);
4028 
4029 	uma_zone_set_max(zone, max);
4030 
4031 	return (0);
4032 }
4033 
4034 int
4035 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4036 {
4037 	uma_zone_t zone = *(uma_zone_t *)arg1;
4038 	int cur;
4039 
4040 	cur = uma_zone_get_cur(zone);
4041 	return (sysctl_handle_int(oidp, &cur, 0, req));
4042 }
4043 
4044 #ifdef INVARIANTS
4045 static uma_slab_t
4046 uma_dbg_getslab(uma_zone_t zone, void *item)
4047 {
4048 	uma_slab_t slab;
4049 	uma_keg_t keg;
4050 	uint8_t *mem;
4051 
4052 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4053 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4054 		slab = vtoslab((vm_offset_t)mem);
4055 	} else {
4056 		/*
4057 		 * It is safe to return the slab here even though the
4058 		 * zone is unlocked because the item's allocation state
4059 		 * essentially holds a reference.
4060 		 */
4061 		if (zone->uz_lockptr == &zone->uz_lock)
4062 			return (NULL);
4063 		ZONE_LOCK(zone);
4064 		keg = zone->uz_keg;
4065 		if (keg->uk_flags & UMA_ZONE_HASH)
4066 			slab = hash_sfind(&keg->uk_hash, mem);
4067 		else
4068 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4069 		ZONE_UNLOCK(zone);
4070 	}
4071 
4072 	return (slab);
4073 }
4074 
4075 static bool
4076 uma_dbg_zskip(uma_zone_t zone, void *mem)
4077 {
4078 
4079 	if (zone->uz_lockptr == &zone->uz_lock)
4080 		return (true);
4081 
4082 	return (uma_dbg_kskip(zone->uz_keg, mem));
4083 }
4084 
4085 static bool
4086 uma_dbg_kskip(uma_keg_t keg, void *mem)
4087 {
4088 	uintptr_t idx;
4089 
4090 	if (dbg_divisor == 0)
4091 		return (true);
4092 
4093 	if (dbg_divisor == 1)
4094 		return (false);
4095 
4096 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4097 	if (keg->uk_ipers > 1) {
4098 		idx *= keg->uk_ipers;
4099 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4100 	}
4101 
4102 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4103 		counter_u64_add(uma_skip_cnt, 1);
4104 		return (true);
4105 	}
4106 	counter_u64_add(uma_dbg_cnt, 1);
4107 
4108 	return (false);
4109 }
4110 
4111 /*
4112  * Set up the slab's freei data such that uma_dbg_free can function.
4113  *
4114  */
4115 static void
4116 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4117 {
4118 	uma_keg_t keg;
4119 	int freei;
4120 
4121 	if (slab == NULL) {
4122 		slab = uma_dbg_getslab(zone, item);
4123 		if (slab == NULL)
4124 			panic("uma: item %p did not belong to zone %s\n",
4125 			    item, zone->uz_name);
4126 	}
4127 	keg = slab->us_keg;
4128 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4129 
4130 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4131 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4132 		    item, zone, zone->uz_name, slab, freei);
4133 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4134 
4135 	return;
4136 }
4137 
4138 /*
4139  * Verifies freed addresses.  Checks for alignment, valid slab membership
4140  * and duplicate frees.
4141  *
4142  */
4143 static void
4144 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4145 {
4146 	uma_keg_t keg;
4147 	int freei;
4148 
4149 	if (slab == NULL) {
4150 		slab = uma_dbg_getslab(zone, item);
4151 		if (slab == NULL)
4152 			panic("uma: Freed item %p did not belong to zone %s\n",
4153 			    item, zone->uz_name);
4154 	}
4155 	keg = slab->us_keg;
4156 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4157 
4158 	if (freei >= keg->uk_ipers)
4159 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4160 		    item, zone, zone->uz_name, slab, freei);
4161 
4162 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4163 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4164 		    item, zone, zone->uz_name, slab, freei);
4165 
4166 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4167 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4168 		    item, zone, zone->uz_name, slab, freei);
4169 
4170 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4171 }
4172 #endif /* INVARIANTS */
4173 
4174 #ifdef DDB
4175 DB_SHOW_COMMAND(uma, db_show_uma)
4176 {
4177 	uma_keg_t kz;
4178 	uma_zone_t z;
4179 	uint64_t allocs, frees, sleeps;
4180 	long cachefree;
4181 	int i;
4182 
4183 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
4184 	    "Free", "Requests", "Sleeps", "Bucket");
4185 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4186 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4187 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4188 				allocs = counter_u64_fetch(z->uz_allocs);
4189 				frees = counter_u64_fetch(z->uz_frees);
4190 				sleeps = z->uz_sleeps;
4191 				cachefree = 0;
4192 			} else
4193 				uma_zone_sumstat(z, &cachefree, &allocs,
4194 				    &frees, &sleeps);
4195 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4196 			    (LIST_FIRST(&kz->uk_zones) != z)))
4197 				cachefree += kz->uk_free;
4198 			for (i = 0; i < vm_ndomains; i++)
4199 				cachefree += z->uz_domain[i].uzd_nitems;
4200 
4201 			db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u\n",
4202 			    z->uz_name, (uintmax_t)kz->uk_size,
4203 			    (intmax_t)(allocs - frees), cachefree,
4204 			    (uintmax_t)allocs, sleeps, z->uz_count);
4205 			if (db_pager_quit)
4206 				return;
4207 		}
4208 	}
4209 }
4210 
4211 DB_SHOW_COMMAND(umacache, db_show_umacache)
4212 {
4213 	uma_zone_t z;
4214 	uint64_t allocs, frees;
4215 	long cachefree;
4216 	int i;
4217 
4218 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4219 	    "Requests", "Bucket");
4220 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4221 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
4222 		for (i = 0; i < vm_ndomains; i++)
4223 			cachefree += z->uz_domain[i].uzd_nitems;
4224 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4225 		    z->uz_name, (uintmax_t)z->uz_size,
4226 		    (intmax_t)(allocs - frees), cachefree,
4227 		    (uintmax_t)allocs, z->uz_count);
4228 		if (db_pager_quit)
4229 			return;
4230 	}
4231 }
4232 #endif	/* DDB */
4233