xref: /freebsd/sys/vm/uma_core.c (revision 190cef3d52236565eb22e18b33e9e865ec634aa3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/eventhandler.h>
63 #include <sys/kernel.h>
64 #include <sys/types.h>
65 #include <sys/limits.h>
66 #include <sys/queue.h>
67 #include <sys/malloc.h>
68 #include <sys/ktr.h>
69 #include <sys/lock.h>
70 #include <sys/sysctl.h>
71 #include <sys/mutex.h>
72 #include <sys/proc.h>
73 #include <sys/random.h>
74 #include <sys/rwlock.h>
75 #include <sys/sbuf.h>
76 #include <sys/sched.h>
77 #include <sys/smp.h>
78 #include <sys/taskqueue.h>
79 #include <sys/vmmeter.h>
80 
81 #include <vm/vm.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_phys.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/uma.h>
91 #include <vm/uma_int.h>
92 #include <vm/uma_dbg.h>
93 
94 #include <ddb/ddb.h>
95 
96 #ifdef DEBUG_MEMGUARD
97 #include <vm/memguard.h>
98 #endif
99 
100 /*
101  * This is the zone and keg from which all zones are spawned.
102  */
103 static uma_zone_t kegs;
104 static uma_zone_t zones;
105 
106 /* This is the zone from which all offpage uma_slab_ts are allocated. */
107 static uma_zone_t slabzone;
108 
109 /*
110  * The initial hash tables come out of this zone so they can be allocated
111  * prior to malloc coming up.
112  */
113 static uma_zone_t hashzone;
114 
115 /* The boot-time adjusted value for cache line alignment. */
116 int uma_align_cache = 64 - 1;
117 
118 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
119 
120 /*
121  * Are we allowed to allocate buckets?
122  */
123 static int bucketdisable = 1;
124 
125 /* Linked list of all kegs in the system */
126 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
127 
128 /* Linked list of all cache-only zones in the system */
129 static LIST_HEAD(,uma_zone) uma_cachezones =
130     LIST_HEAD_INITIALIZER(uma_cachezones);
131 
132 /* This RW lock protects the keg list */
133 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
134 
135 /*
136  * Pointer and counter to pool of pages, that is preallocated at
137  * startup to bootstrap UMA.
138  */
139 static char *bootmem;
140 static int boot_pages;
141 
142 static struct sx uma_drain_lock;
143 
144 /* kmem soft limit. */
145 static unsigned long uma_kmem_limit = LONG_MAX;
146 static volatile unsigned long uma_kmem_total;
147 
148 /* Is the VM done starting up? */
149 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
150     BOOT_RUNNING } booted = BOOT_COLD;
151 
152 /*
153  * This is the handle used to schedule events that need to happen
154  * outside of the allocation fast path.
155  */
156 static struct callout uma_callout;
157 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
158 
159 /*
160  * This structure is passed as the zone ctor arg so that I don't have to create
161  * a special allocation function just for zones.
162  */
163 struct uma_zctor_args {
164 	const char *name;
165 	size_t size;
166 	uma_ctor ctor;
167 	uma_dtor dtor;
168 	uma_init uminit;
169 	uma_fini fini;
170 	uma_import import;
171 	uma_release release;
172 	void *arg;
173 	uma_keg_t keg;
174 	int align;
175 	uint32_t flags;
176 };
177 
178 struct uma_kctor_args {
179 	uma_zone_t zone;
180 	size_t size;
181 	uma_init uminit;
182 	uma_fini fini;
183 	int align;
184 	uint32_t flags;
185 };
186 
187 struct uma_bucket_zone {
188 	uma_zone_t	ubz_zone;
189 	char		*ubz_name;
190 	int		ubz_entries;	/* Number of items it can hold. */
191 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
192 };
193 
194 /*
195  * Compute the actual number of bucket entries to pack them in power
196  * of two sizes for more efficient space utilization.
197  */
198 #define	BUCKET_SIZE(n)						\
199     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
200 
201 #define	BUCKET_MAX	BUCKET_SIZE(256)
202 
203 struct uma_bucket_zone bucket_zones[] = {
204 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
205 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
206 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
207 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
208 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
209 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
210 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
211 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
212 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
213 	{ NULL, NULL, 0}
214 };
215 
216 /*
217  * Flags and enumerations to be passed to internal functions.
218  */
219 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
220 
221 #define	UMA_ANYDOMAIN	-1	/* Special value for domain search. */
222 
223 /* Prototypes.. */
224 
225 int	uma_startup_count(int);
226 void	uma_startup(void *, int);
227 void	uma_startup1(void);
228 void	uma_startup2(void);
229 
230 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
231 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
232 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
233 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
234 static void page_free(void *, vm_size_t, uint8_t);
235 static void pcpu_page_free(void *, vm_size_t, uint8_t);
236 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int);
237 static void cache_drain(uma_zone_t);
238 static void bucket_drain(uma_zone_t, uma_bucket_t);
239 static void bucket_cache_drain(uma_zone_t zone);
240 static int keg_ctor(void *, int, void *, int);
241 static void keg_dtor(void *, int, void *);
242 static int zone_ctor(void *, int, void *, int);
243 static void zone_dtor(void *, int, void *);
244 static int zero_init(void *, int, int);
245 static void keg_small_init(uma_keg_t keg);
246 static void keg_large_init(uma_keg_t keg);
247 static void zone_foreach(void (*zfunc)(uma_zone_t));
248 static void zone_timeout(uma_zone_t zone);
249 static int hash_alloc(struct uma_hash *);
250 static int hash_expand(struct uma_hash *, struct uma_hash *);
251 static void hash_free(struct uma_hash *hash);
252 static void uma_timeout(void *);
253 static void uma_startup3(void);
254 static void *zone_alloc_item(uma_zone_t, void *, int, int);
255 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
256 static void bucket_enable(void);
257 static void bucket_init(void);
258 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
259 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
260 static void bucket_zone_drain(void);
261 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
262 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
263 static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int);
264 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
265 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
266 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
267     uma_fini fini, int align, uint32_t flags);
268 static int zone_import(uma_zone_t, void **, int, int, int);
269 static void zone_release(uma_zone_t, void **, int);
270 static void uma_zero_item(void *, uma_zone_t);
271 
272 void uma_print_zone(uma_zone_t);
273 void uma_print_stats(void);
274 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
275 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
276 
277 #ifdef INVARIANTS
278 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
279 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
280 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
281 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
282 
283 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
284     "Memory allocation debugging");
285 
286 static u_int dbg_divisor = 1;
287 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
288     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
289     "Debug & thrash every this item in memory allocator");
290 
291 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
292 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
293 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
294     &uma_dbg_cnt, "memory items debugged");
295 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
296     &uma_skip_cnt, "memory items skipped, not debugged");
297 #endif
298 
299 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
300 
301 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
302     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
303 
304 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
305     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
306 
307 static int zone_warnings = 1;
308 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
309     "Warn when UMA zones becomes full");
310 
311 /* Adjust bytes under management by UMA. */
312 static inline void
313 uma_total_dec(unsigned long size)
314 {
315 
316 	atomic_subtract_long(&uma_kmem_total, size);
317 }
318 
319 static inline void
320 uma_total_inc(unsigned long size)
321 {
322 
323 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
324 		uma_reclaim_wakeup();
325 }
326 
327 /*
328  * This routine checks to see whether or not it's safe to enable buckets.
329  */
330 static void
331 bucket_enable(void)
332 {
333 	bucketdisable = vm_page_count_min();
334 }
335 
336 /*
337  * Initialize bucket_zones, the array of zones of buckets of various sizes.
338  *
339  * For each zone, calculate the memory required for each bucket, consisting
340  * of the header and an array of pointers.
341  */
342 static void
343 bucket_init(void)
344 {
345 	struct uma_bucket_zone *ubz;
346 	int size;
347 
348 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
349 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
350 		size += sizeof(void *) * ubz->ubz_entries;
351 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
352 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
353 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
354 	}
355 }
356 
357 /*
358  * Given a desired number of entries for a bucket, return the zone from which
359  * to allocate the bucket.
360  */
361 static struct uma_bucket_zone *
362 bucket_zone_lookup(int entries)
363 {
364 	struct uma_bucket_zone *ubz;
365 
366 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
367 		if (ubz->ubz_entries >= entries)
368 			return (ubz);
369 	ubz--;
370 	return (ubz);
371 }
372 
373 static int
374 bucket_select(int size)
375 {
376 	struct uma_bucket_zone *ubz;
377 
378 	ubz = &bucket_zones[0];
379 	if (size > ubz->ubz_maxsize)
380 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
381 
382 	for (; ubz->ubz_entries != 0; ubz++)
383 		if (ubz->ubz_maxsize < size)
384 			break;
385 	ubz--;
386 	return (ubz->ubz_entries);
387 }
388 
389 static uma_bucket_t
390 bucket_alloc(uma_zone_t zone, void *udata, int flags)
391 {
392 	struct uma_bucket_zone *ubz;
393 	uma_bucket_t bucket;
394 
395 	/*
396 	 * This is to stop us from allocating per cpu buckets while we're
397 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
398 	 * boot pages.  This also prevents us from allocating buckets in
399 	 * low memory situations.
400 	 */
401 	if (bucketdisable)
402 		return (NULL);
403 	/*
404 	 * To limit bucket recursion we store the original zone flags
405 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
406 	 * NOVM flag to persist even through deep recursions.  We also
407 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
408 	 * a bucket for a bucket zone so we do not allow infinite bucket
409 	 * recursion.  This cookie will even persist to frees of unused
410 	 * buckets via the allocation path or bucket allocations in the
411 	 * free path.
412 	 */
413 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
414 		udata = (void *)(uintptr_t)zone->uz_flags;
415 	else {
416 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
417 			return (NULL);
418 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
419 	}
420 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
421 		flags |= M_NOVM;
422 	ubz = bucket_zone_lookup(zone->uz_count);
423 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
424 		ubz++;
425 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
426 	if (bucket) {
427 #ifdef INVARIANTS
428 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
429 #endif
430 		bucket->ub_cnt = 0;
431 		bucket->ub_entries = ubz->ubz_entries;
432 	}
433 
434 	return (bucket);
435 }
436 
437 static void
438 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
439 {
440 	struct uma_bucket_zone *ubz;
441 
442 	KASSERT(bucket->ub_cnt == 0,
443 	    ("bucket_free: Freeing a non free bucket."));
444 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
445 		udata = (void *)(uintptr_t)zone->uz_flags;
446 	ubz = bucket_zone_lookup(bucket->ub_entries);
447 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
448 }
449 
450 static void
451 bucket_zone_drain(void)
452 {
453 	struct uma_bucket_zone *ubz;
454 
455 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
456 		zone_drain(ubz->ubz_zone);
457 }
458 
459 static void
460 zone_log_warning(uma_zone_t zone)
461 {
462 	static const struct timeval warninterval = { 300, 0 };
463 
464 	if (!zone_warnings || zone->uz_warning == NULL)
465 		return;
466 
467 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
468 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
469 }
470 
471 static inline void
472 zone_maxaction(uma_zone_t zone)
473 {
474 
475 	if (zone->uz_maxaction.ta_func != NULL)
476 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
477 }
478 
479 static void
480 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
481 {
482 	uma_klink_t klink;
483 
484 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
485 		kegfn(klink->kl_keg);
486 }
487 
488 /*
489  * Routine called by timeout which is used to fire off some time interval
490  * based calculations.  (stats, hash size, etc.)
491  *
492  * Arguments:
493  *	arg   Unused
494  *
495  * Returns:
496  *	Nothing
497  */
498 static void
499 uma_timeout(void *unused)
500 {
501 	bucket_enable();
502 	zone_foreach(zone_timeout);
503 
504 	/* Reschedule this event */
505 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
506 }
507 
508 /*
509  * Routine to perform timeout driven calculations.  This expands the
510  * hashes and does per cpu statistics aggregation.
511  *
512  *  Returns nothing.
513  */
514 static void
515 keg_timeout(uma_keg_t keg)
516 {
517 
518 	KEG_LOCK(keg);
519 	/*
520 	 * Expand the keg hash table.
521 	 *
522 	 * This is done if the number of slabs is larger than the hash size.
523 	 * What I'm trying to do here is completely reduce collisions.  This
524 	 * may be a little aggressive.  Should I allow for two collisions max?
525 	 */
526 	if (keg->uk_flags & UMA_ZONE_HASH &&
527 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
528 		struct uma_hash newhash;
529 		struct uma_hash oldhash;
530 		int ret;
531 
532 		/*
533 		 * This is so involved because allocating and freeing
534 		 * while the keg lock is held will lead to deadlock.
535 		 * I have to do everything in stages and check for
536 		 * races.
537 		 */
538 		newhash = keg->uk_hash;
539 		KEG_UNLOCK(keg);
540 		ret = hash_alloc(&newhash);
541 		KEG_LOCK(keg);
542 		if (ret) {
543 			if (hash_expand(&keg->uk_hash, &newhash)) {
544 				oldhash = keg->uk_hash;
545 				keg->uk_hash = newhash;
546 			} else
547 				oldhash = newhash;
548 
549 			KEG_UNLOCK(keg);
550 			hash_free(&oldhash);
551 			return;
552 		}
553 	}
554 	KEG_UNLOCK(keg);
555 }
556 
557 static void
558 zone_timeout(uma_zone_t zone)
559 {
560 
561 	zone_foreach_keg(zone, &keg_timeout);
562 }
563 
564 /*
565  * Allocate and zero fill the next sized hash table from the appropriate
566  * backing store.
567  *
568  * Arguments:
569  *	hash  A new hash structure with the old hash size in uh_hashsize
570  *
571  * Returns:
572  *	1 on success and 0 on failure.
573  */
574 static int
575 hash_alloc(struct uma_hash *hash)
576 {
577 	int oldsize;
578 	int alloc;
579 
580 	oldsize = hash->uh_hashsize;
581 
582 	/* We're just going to go to a power of two greater */
583 	if (oldsize)  {
584 		hash->uh_hashsize = oldsize * 2;
585 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
586 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
587 		    M_UMAHASH, M_NOWAIT);
588 	} else {
589 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
590 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
591 		    UMA_ANYDOMAIN, M_WAITOK);
592 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
593 	}
594 	if (hash->uh_slab_hash) {
595 		bzero(hash->uh_slab_hash, alloc);
596 		hash->uh_hashmask = hash->uh_hashsize - 1;
597 		return (1);
598 	}
599 
600 	return (0);
601 }
602 
603 /*
604  * Expands the hash table for HASH zones.  This is done from zone_timeout
605  * to reduce collisions.  This must not be done in the regular allocation
606  * path, otherwise, we can recurse on the vm while allocating pages.
607  *
608  * Arguments:
609  *	oldhash  The hash you want to expand
610  *	newhash  The hash structure for the new table
611  *
612  * Returns:
613  *	Nothing
614  *
615  * Discussion:
616  */
617 static int
618 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
619 {
620 	uma_slab_t slab;
621 	int hval;
622 	int i;
623 
624 	if (!newhash->uh_slab_hash)
625 		return (0);
626 
627 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
628 		return (0);
629 
630 	/*
631 	 * I need to investigate hash algorithms for resizing without a
632 	 * full rehash.
633 	 */
634 
635 	for (i = 0; i < oldhash->uh_hashsize; i++)
636 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
637 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
638 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
639 			hval = UMA_HASH(newhash, slab->us_data);
640 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
641 			    slab, us_hlink);
642 		}
643 
644 	return (1);
645 }
646 
647 /*
648  * Free the hash bucket to the appropriate backing store.
649  *
650  * Arguments:
651  *	slab_hash  The hash bucket we're freeing
652  *	hashsize   The number of entries in that hash bucket
653  *
654  * Returns:
655  *	Nothing
656  */
657 static void
658 hash_free(struct uma_hash *hash)
659 {
660 	if (hash->uh_slab_hash == NULL)
661 		return;
662 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
663 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
664 	else
665 		free(hash->uh_slab_hash, M_UMAHASH);
666 }
667 
668 /*
669  * Frees all outstanding items in a bucket
670  *
671  * Arguments:
672  *	zone   The zone to free to, must be unlocked.
673  *	bucket The free/alloc bucket with items, cpu queue must be locked.
674  *
675  * Returns:
676  *	Nothing
677  */
678 
679 static void
680 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
681 {
682 	int i;
683 
684 	if (bucket == NULL)
685 		return;
686 
687 	if (zone->uz_fini)
688 		for (i = 0; i < bucket->ub_cnt; i++)
689 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
690 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
691 	bucket->ub_cnt = 0;
692 }
693 
694 /*
695  * Drains the per cpu caches for a zone.
696  *
697  * NOTE: This may only be called while the zone is being turn down, and not
698  * during normal operation.  This is necessary in order that we do not have
699  * to migrate CPUs to drain the per-CPU caches.
700  *
701  * Arguments:
702  *	zone     The zone to drain, must be unlocked.
703  *
704  * Returns:
705  *	Nothing
706  */
707 static void
708 cache_drain(uma_zone_t zone)
709 {
710 	uma_cache_t cache;
711 	int cpu;
712 
713 	/*
714 	 * XXX: It is safe to not lock the per-CPU caches, because we're
715 	 * tearing down the zone anyway.  I.e., there will be no further use
716 	 * of the caches at this point.
717 	 *
718 	 * XXX: It would good to be able to assert that the zone is being
719 	 * torn down to prevent improper use of cache_drain().
720 	 *
721 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
722 	 * it is used elsewhere.  Should the tear-down path be made special
723 	 * there in some form?
724 	 */
725 	CPU_FOREACH(cpu) {
726 		cache = &zone->uz_cpu[cpu];
727 		bucket_drain(zone, cache->uc_allocbucket);
728 		bucket_drain(zone, cache->uc_freebucket);
729 		if (cache->uc_allocbucket != NULL)
730 			bucket_free(zone, cache->uc_allocbucket, NULL);
731 		if (cache->uc_freebucket != NULL)
732 			bucket_free(zone, cache->uc_freebucket, NULL);
733 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
734 	}
735 	ZONE_LOCK(zone);
736 	bucket_cache_drain(zone);
737 	ZONE_UNLOCK(zone);
738 }
739 
740 static void
741 cache_shrink(uma_zone_t zone)
742 {
743 
744 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
745 		return;
746 
747 	ZONE_LOCK(zone);
748 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
749 	ZONE_UNLOCK(zone);
750 }
751 
752 static void
753 cache_drain_safe_cpu(uma_zone_t zone)
754 {
755 	uma_cache_t cache;
756 	uma_bucket_t b1, b2;
757 	int domain;
758 
759 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
760 		return;
761 
762 	b1 = b2 = NULL;
763 	ZONE_LOCK(zone);
764 	critical_enter();
765 	if (zone->uz_flags & UMA_ZONE_NUMA)
766 		domain = PCPU_GET(domain);
767 	else
768 		domain = 0;
769 	cache = &zone->uz_cpu[curcpu];
770 	if (cache->uc_allocbucket) {
771 		if (cache->uc_allocbucket->ub_cnt != 0)
772 			LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
773 			    cache->uc_allocbucket, ub_link);
774 		else
775 			b1 = cache->uc_allocbucket;
776 		cache->uc_allocbucket = NULL;
777 	}
778 	if (cache->uc_freebucket) {
779 		if (cache->uc_freebucket->ub_cnt != 0)
780 			LIST_INSERT_HEAD(&zone->uz_domain[domain].uzd_buckets,
781 			    cache->uc_freebucket, ub_link);
782 		else
783 			b2 = cache->uc_freebucket;
784 		cache->uc_freebucket = NULL;
785 	}
786 	critical_exit();
787 	ZONE_UNLOCK(zone);
788 	if (b1)
789 		bucket_free(zone, b1, NULL);
790 	if (b2)
791 		bucket_free(zone, b2, NULL);
792 }
793 
794 /*
795  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
796  * This is an expensive call because it needs to bind to all CPUs
797  * one by one and enter a critical section on each of them in order
798  * to safely access their cache buckets.
799  * Zone lock must not be held on call this function.
800  */
801 static void
802 cache_drain_safe(uma_zone_t zone)
803 {
804 	int cpu;
805 
806 	/*
807 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
808 	 */
809 	if (zone)
810 		cache_shrink(zone);
811 	else
812 		zone_foreach(cache_shrink);
813 
814 	CPU_FOREACH(cpu) {
815 		thread_lock(curthread);
816 		sched_bind(curthread, cpu);
817 		thread_unlock(curthread);
818 
819 		if (zone)
820 			cache_drain_safe_cpu(zone);
821 		else
822 			zone_foreach(cache_drain_safe_cpu);
823 	}
824 	thread_lock(curthread);
825 	sched_unbind(curthread);
826 	thread_unlock(curthread);
827 }
828 
829 /*
830  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
831  */
832 static void
833 bucket_cache_drain(uma_zone_t zone)
834 {
835 	uma_zone_domain_t zdom;
836 	uma_bucket_t bucket;
837 	int i;
838 
839 	/*
840 	 * Drain the bucket queues and free the buckets.
841 	 */
842 	for (i = 0; i < vm_ndomains; i++) {
843 		zdom = &zone->uz_domain[i];
844 		while ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
845 			LIST_REMOVE(bucket, ub_link);
846 			ZONE_UNLOCK(zone);
847 			bucket_drain(zone, bucket);
848 			bucket_free(zone, bucket, NULL);
849 			ZONE_LOCK(zone);
850 		}
851 	}
852 
853 	/*
854 	 * Shrink further bucket sizes.  Price of single zone lock collision
855 	 * is probably lower then price of global cache drain.
856 	 */
857 	if (zone->uz_count > zone->uz_count_min)
858 		zone->uz_count--;
859 }
860 
861 static void
862 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
863 {
864 	uint8_t *mem;
865 	int i;
866 	uint8_t flags;
867 
868 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
869 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
870 
871 	mem = slab->us_data;
872 	flags = slab->us_flags;
873 	i = start;
874 	if (keg->uk_fini != NULL) {
875 		for (i--; i > -1; i--)
876 #ifdef INVARIANTS
877 		/*
878 		 * trash_fini implies that dtor was trash_dtor. trash_fini
879 		 * would check that memory hasn't been modified since free,
880 		 * which executed trash_dtor.
881 		 * That's why we need to run uma_dbg_kskip() check here,
882 		 * albeit we don't make skip check for other init/fini
883 		 * invocations.
884 		 */
885 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
886 		    keg->uk_fini != trash_fini)
887 #endif
888 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
889 			    keg->uk_size);
890 	}
891 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
892 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
893 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
894 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
895 }
896 
897 /*
898  * Frees pages from a keg back to the system.  This is done on demand from
899  * the pageout daemon.
900  *
901  * Returns nothing.
902  */
903 static void
904 keg_drain(uma_keg_t keg)
905 {
906 	struct slabhead freeslabs = { 0 };
907 	uma_domain_t dom;
908 	uma_slab_t slab, tmp;
909 	int i;
910 
911 	/*
912 	 * We don't want to take pages from statically allocated kegs at this
913 	 * time
914 	 */
915 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
916 		return;
917 
918 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
919 	    keg->uk_name, keg, keg->uk_free);
920 	KEG_LOCK(keg);
921 	if (keg->uk_free == 0)
922 		goto finished;
923 
924 	for (i = 0; i < vm_ndomains; i++) {
925 		dom = &keg->uk_domain[i];
926 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
927 			/* We have nowhere to free these to. */
928 			if (slab->us_flags & UMA_SLAB_BOOT)
929 				continue;
930 
931 			LIST_REMOVE(slab, us_link);
932 			keg->uk_pages -= keg->uk_ppera;
933 			keg->uk_free -= keg->uk_ipers;
934 
935 			if (keg->uk_flags & UMA_ZONE_HASH)
936 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
937 				    slab->us_data);
938 
939 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
940 		}
941 	}
942 
943 finished:
944 	KEG_UNLOCK(keg);
945 
946 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
947 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
948 		keg_free_slab(keg, slab, keg->uk_ipers);
949 	}
950 }
951 
952 static void
953 zone_drain_wait(uma_zone_t zone, int waitok)
954 {
955 
956 	/*
957 	 * Set draining to interlock with zone_dtor() so we can release our
958 	 * locks as we go.  Only dtor() should do a WAITOK call since it
959 	 * is the only call that knows the structure will still be available
960 	 * when it wakes up.
961 	 */
962 	ZONE_LOCK(zone);
963 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
964 		if (waitok == M_NOWAIT)
965 			goto out;
966 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
967 	}
968 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
969 	bucket_cache_drain(zone);
970 	ZONE_UNLOCK(zone);
971 	/*
972 	 * The DRAINING flag protects us from being freed while
973 	 * we're running.  Normally the uma_rwlock would protect us but we
974 	 * must be able to release and acquire the right lock for each keg.
975 	 */
976 	zone_foreach_keg(zone, &keg_drain);
977 	ZONE_LOCK(zone);
978 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
979 	wakeup(zone);
980 out:
981 	ZONE_UNLOCK(zone);
982 }
983 
984 void
985 zone_drain(uma_zone_t zone)
986 {
987 
988 	zone_drain_wait(zone, M_NOWAIT);
989 }
990 
991 /*
992  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
993  *
994  * Arguments:
995  *	wait  Shall we wait?
996  *
997  * Returns:
998  *	The slab that was allocated or NULL if there is no memory and the
999  *	caller specified M_NOWAIT.
1000  */
1001 static uma_slab_t
1002 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
1003 {
1004 	uma_alloc allocf;
1005 	uma_slab_t slab;
1006 	unsigned long size;
1007 	uint8_t *mem;
1008 	uint8_t flags;
1009 	int i;
1010 
1011 	KASSERT(domain >= 0 && domain < vm_ndomains,
1012 	    ("keg_alloc_slab: domain %d out of range", domain));
1013 	mtx_assert(&keg->uk_lock, MA_OWNED);
1014 	slab = NULL;
1015 	mem = NULL;
1016 
1017 	allocf = keg->uk_allocf;
1018 	KEG_UNLOCK(keg);
1019 	size = keg->uk_ppera * PAGE_SIZE;
1020 
1021 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1022 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
1023 		if (slab == NULL)
1024 			goto out;
1025 	}
1026 
1027 	/*
1028 	 * This reproduces the old vm_zone behavior of zero filling pages the
1029 	 * first time they are added to a zone.
1030 	 *
1031 	 * Malloced items are zeroed in uma_zalloc.
1032 	 */
1033 
1034 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1035 		wait |= M_ZERO;
1036 	else
1037 		wait &= ~M_ZERO;
1038 
1039 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1040 		wait |= M_NODUMP;
1041 
1042 	/* zone is passed for legacy reasons. */
1043 	mem = allocf(zone, size, domain, &flags, wait);
1044 	if (mem == NULL) {
1045 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1046 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1047 		slab = NULL;
1048 		goto out;
1049 	}
1050 	uma_total_inc(size);
1051 
1052 	/* Point the slab into the allocated memory */
1053 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1054 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1055 
1056 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1057 		for (i = 0; i < keg->uk_ppera; i++)
1058 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1059 
1060 	slab->us_keg = keg;
1061 	slab->us_data = mem;
1062 	slab->us_freecount = keg->uk_ipers;
1063 	slab->us_flags = flags;
1064 	slab->us_domain = domain;
1065 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1066 #ifdef INVARIANTS
1067 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1068 #endif
1069 
1070 	if (keg->uk_init != NULL) {
1071 		for (i = 0; i < keg->uk_ipers; i++)
1072 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1073 			    keg->uk_size, wait) != 0)
1074 				break;
1075 		if (i != keg->uk_ipers) {
1076 			keg_free_slab(keg, slab, i);
1077 			slab = NULL;
1078 			goto out;
1079 		}
1080 	}
1081 out:
1082 	KEG_LOCK(keg);
1083 
1084 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1085 	    slab, keg->uk_name, keg);
1086 
1087 	if (slab != NULL) {
1088 		if (keg->uk_flags & UMA_ZONE_HASH)
1089 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1090 
1091 		keg->uk_pages += keg->uk_ppera;
1092 		keg->uk_free += keg->uk_ipers;
1093 	}
1094 
1095 	return (slab);
1096 }
1097 
1098 /*
1099  * This function is intended to be used early on in place of page_alloc() so
1100  * that we may use the boot time page cache to satisfy allocations before
1101  * the VM is ready.
1102  */
1103 static void *
1104 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1105     int wait)
1106 {
1107 	uma_keg_t keg;
1108 	void *mem;
1109 	int pages;
1110 
1111 	keg = zone_first_keg(zone);
1112 
1113 	/*
1114 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1115 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1116 	 */
1117 	switch (booted) {
1118 		case BOOT_COLD:
1119 		case BOOT_STRAPPED:
1120 			break;
1121 		case BOOT_PAGEALLOC:
1122 			if (keg->uk_ppera > 1)
1123 				break;
1124 		case BOOT_BUCKETS:
1125 		case BOOT_RUNNING:
1126 #ifdef UMA_MD_SMALL_ALLOC
1127 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1128 			    page_alloc : uma_small_alloc;
1129 #else
1130 			keg->uk_allocf = page_alloc;
1131 #endif
1132 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1133 	}
1134 
1135 	/*
1136 	 * Check our small startup cache to see if it has pages remaining.
1137 	 */
1138 	pages = howmany(bytes, PAGE_SIZE);
1139 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1140 	if (pages > boot_pages)
1141 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1142 #ifdef DIAGNOSTIC
1143 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1144 	    boot_pages);
1145 #endif
1146 	mem = bootmem;
1147 	boot_pages -= pages;
1148 	bootmem += pages * PAGE_SIZE;
1149 	*pflag = UMA_SLAB_BOOT;
1150 
1151 	return (mem);
1152 }
1153 
1154 /*
1155  * Allocates a number of pages from the system
1156  *
1157  * Arguments:
1158  *	bytes  The number of bytes requested
1159  *	wait  Shall we wait?
1160  *
1161  * Returns:
1162  *	A pointer to the alloced memory or possibly
1163  *	NULL if M_NOWAIT is set.
1164  */
1165 static void *
1166 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1167     int wait)
1168 {
1169 	void *p;	/* Returned page */
1170 
1171 	*pflag = UMA_SLAB_KERNEL;
1172 	p = (void *) kmem_malloc_domain(domain, bytes, wait);
1173 
1174 	return (p);
1175 }
1176 
1177 static void *
1178 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1179     int wait)
1180 {
1181 	struct pglist alloctail;
1182 	vm_offset_t addr, zkva;
1183 	int cpu, flags;
1184 	vm_page_t p, p_next;
1185 #ifdef NUMA
1186 	struct pcpu *pc;
1187 #endif
1188 
1189 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1190 
1191 	TAILQ_INIT(&alloctail);
1192 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1193 	    malloc2vm_flags(wait);
1194 	*pflag = UMA_SLAB_KERNEL;
1195 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1196 		if (CPU_ABSENT(cpu)) {
1197 			p = vm_page_alloc(NULL, 0, flags);
1198 		} else {
1199 #ifndef NUMA
1200 			p = vm_page_alloc(NULL, 0, flags);
1201 #else
1202 			pc = pcpu_find(cpu);
1203 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1204 			if (__predict_false(p == NULL))
1205 				p = vm_page_alloc(NULL, 0, flags);
1206 #endif
1207 		}
1208 		if (__predict_false(p == NULL))
1209 			goto fail;
1210 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1211 	}
1212 	if ((addr = kva_alloc(bytes)) == 0)
1213 		goto fail;
1214 	zkva = addr;
1215 	TAILQ_FOREACH(p, &alloctail, listq) {
1216 		pmap_qenter(zkva, &p, 1);
1217 		zkva += PAGE_SIZE;
1218 	}
1219 	return ((void*)addr);
1220  fail:
1221 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1222 		vm_page_unwire(p, PQ_NONE);
1223 		vm_page_free(p);
1224 	}
1225 	return (NULL);
1226 }
1227 
1228 /*
1229  * Allocates a number of pages from within an object
1230  *
1231  * Arguments:
1232  *	bytes  The number of bytes requested
1233  *	wait   Shall we wait?
1234  *
1235  * Returns:
1236  *	A pointer to the alloced memory or possibly
1237  *	NULL if M_NOWAIT is set.
1238  */
1239 static void *
1240 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1241     int wait)
1242 {
1243 	TAILQ_HEAD(, vm_page) alloctail;
1244 	u_long npages;
1245 	vm_offset_t retkva, zkva;
1246 	vm_page_t p, p_next;
1247 	uma_keg_t keg;
1248 
1249 	TAILQ_INIT(&alloctail);
1250 	keg = zone_first_keg(zone);
1251 
1252 	npages = howmany(bytes, PAGE_SIZE);
1253 	while (npages > 0) {
1254 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1255 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1256 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1257 		    VM_ALLOC_NOWAIT));
1258 		if (p != NULL) {
1259 			/*
1260 			 * Since the page does not belong to an object, its
1261 			 * listq is unused.
1262 			 */
1263 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1264 			npages--;
1265 			continue;
1266 		}
1267 		/*
1268 		 * Page allocation failed, free intermediate pages and
1269 		 * exit.
1270 		 */
1271 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1272 			vm_page_unwire(p, PQ_NONE);
1273 			vm_page_free(p);
1274 		}
1275 		return (NULL);
1276 	}
1277 	*flags = UMA_SLAB_PRIV;
1278 	zkva = keg->uk_kva +
1279 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1280 	retkva = zkva;
1281 	TAILQ_FOREACH(p, &alloctail, listq) {
1282 		pmap_qenter(zkva, &p, 1);
1283 		zkva += PAGE_SIZE;
1284 	}
1285 
1286 	return ((void *)retkva);
1287 }
1288 
1289 /*
1290  * Frees a number of pages to the system
1291  *
1292  * Arguments:
1293  *	mem   A pointer to the memory to be freed
1294  *	size  The size of the memory being freed
1295  *	flags The original p->us_flags field
1296  *
1297  * Returns:
1298  *	Nothing
1299  */
1300 static void
1301 page_free(void *mem, vm_size_t size, uint8_t flags)
1302 {
1303 
1304 	if ((flags & UMA_SLAB_KERNEL) == 0)
1305 		panic("UMA: page_free used with invalid flags %x", flags);
1306 
1307 	kmem_free((vm_offset_t)mem, size);
1308 }
1309 
1310 /*
1311  * Frees pcpu zone allocations
1312  *
1313  * Arguments:
1314  *	mem   A pointer to the memory to be freed
1315  *	size  The size of the memory being freed
1316  *	flags The original p->us_flags field
1317  *
1318  * Returns:
1319  *	Nothing
1320  */
1321 static void
1322 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1323 {
1324 	vm_offset_t sva, curva;
1325 	vm_paddr_t paddr;
1326 	vm_page_t m;
1327 
1328 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1329 	sva = (vm_offset_t)mem;
1330 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1331 		paddr = pmap_kextract(curva);
1332 		m = PHYS_TO_VM_PAGE(paddr);
1333 		vm_page_unwire(m, PQ_NONE);
1334 		vm_page_free(m);
1335 	}
1336 	pmap_qremove(sva, size >> PAGE_SHIFT);
1337 	kva_free(sva, size);
1338 }
1339 
1340 
1341 /*
1342  * Zero fill initializer
1343  *
1344  * Arguments/Returns follow uma_init specifications
1345  */
1346 static int
1347 zero_init(void *mem, int size, int flags)
1348 {
1349 	bzero(mem, size);
1350 	return (0);
1351 }
1352 
1353 /*
1354  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1355  *
1356  * Arguments
1357  *	keg  The zone we should initialize
1358  *
1359  * Returns
1360  *	Nothing
1361  */
1362 static void
1363 keg_small_init(uma_keg_t keg)
1364 {
1365 	u_int rsize;
1366 	u_int memused;
1367 	u_int wastedspace;
1368 	u_int shsize;
1369 	u_int slabsize;
1370 
1371 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1372 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1373 
1374 		slabsize = UMA_PCPU_ALLOC_SIZE;
1375 		keg->uk_ppera = ncpus;
1376 	} else {
1377 		slabsize = UMA_SLAB_SIZE;
1378 		keg->uk_ppera = 1;
1379 	}
1380 
1381 	/*
1382 	 * Calculate the size of each allocation (rsize) according to
1383 	 * alignment.  If the requested size is smaller than we have
1384 	 * allocation bits for we round it up.
1385 	 */
1386 	rsize = keg->uk_size;
1387 	if (rsize < slabsize / SLAB_SETSIZE)
1388 		rsize = slabsize / SLAB_SETSIZE;
1389 	if (rsize & keg->uk_align)
1390 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1391 	keg->uk_rsize = rsize;
1392 
1393 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1394 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1395 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1396 
1397 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1398 		shsize = 0;
1399 	else
1400 		shsize = sizeof(struct uma_slab);
1401 
1402 	if (rsize <= slabsize - shsize)
1403 		keg->uk_ipers = (slabsize - shsize) / rsize;
1404 	else {
1405 		/* Handle special case when we have 1 item per slab, so
1406 		 * alignment requirement can be relaxed. */
1407 		KASSERT(keg->uk_size <= slabsize - shsize,
1408 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1409 		keg->uk_ipers = 1;
1410 	}
1411 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1412 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1413 
1414 	memused = keg->uk_ipers * rsize + shsize;
1415 	wastedspace = slabsize - memused;
1416 
1417 	/*
1418 	 * We can't do OFFPAGE if we're internal or if we've been
1419 	 * asked to not go to the VM for buckets.  If we do this we
1420 	 * may end up going to the VM  for slabs which we do not
1421 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1422 	 * of UMA_ZONE_VM, which clearly forbids it.
1423 	 */
1424 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1425 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1426 		return;
1427 
1428 	/*
1429 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1430 	 * this if it permits more items per-slab.
1431 	 *
1432 	 * XXX We could try growing slabsize to limit max waste as well.
1433 	 * Historically this was not done because the VM could not
1434 	 * efficiently handle contiguous allocations.
1435 	 */
1436 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1437 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1438 		keg->uk_ipers = slabsize / keg->uk_rsize;
1439 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1440 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1441 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1442 		    "keg: %s(%p), calculated wastedspace = %d, "
1443 		    "maximum wasted space allowed = %d, "
1444 		    "calculated ipers = %d, "
1445 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1446 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1447 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1448 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1449 	}
1450 
1451 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1452 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1453 		keg->uk_flags |= UMA_ZONE_HASH;
1454 }
1455 
1456 /*
1457  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1458  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1459  * more complicated.
1460  *
1461  * Arguments
1462  *	keg  The keg we should initialize
1463  *
1464  * Returns
1465  *	Nothing
1466  */
1467 static void
1468 keg_large_init(uma_keg_t keg)
1469 {
1470 	u_int shsize;
1471 
1472 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1473 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1474 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1475 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1476 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1477 
1478 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1479 	keg->uk_ipers = 1;
1480 	keg->uk_rsize = keg->uk_size;
1481 
1482 	/* Check whether we have enough space to not do OFFPAGE. */
1483 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1484 		shsize = sizeof(struct uma_slab);
1485 		if (shsize & UMA_ALIGN_PTR)
1486 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1487 			    (UMA_ALIGN_PTR + 1);
1488 
1489 		if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
1490 			/*
1491 			 * We can't do OFFPAGE if we're internal, in which case
1492 			 * we need an extra page per allocation to contain the
1493 			 * slab header.
1494 			 */
1495 			if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1496 				keg->uk_flags |= UMA_ZONE_OFFPAGE;
1497 			else
1498 				keg->uk_ppera++;
1499 		}
1500 	}
1501 
1502 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1503 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1504 		keg->uk_flags |= UMA_ZONE_HASH;
1505 }
1506 
1507 static void
1508 keg_cachespread_init(uma_keg_t keg)
1509 {
1510 	int alignsize;
1511 	int trailer;
1512 	int pages;
1513 	int rsize;
1514 
1515 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1516 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1517 
1518 	alignsize = keg->uk_align + 1;
1519 	rsize = keg->uk_size;
1520 	/*
1521 	 * We want one item to start on every align boundary in a page.  To
1522 	 * do this we will span pages.  We will also extend the item by the
1523 	 * size of align if it is an even multiple of align.  Otherwise, it
1524 	 * would fall on the same boundary every time.
1525 	 */
1526 	if (rsize & keg->uk_align)
1527 		rsize = (rsize & ~keg->uk_align) + alignsize;
1528 	if ((rsize & alignsize) == 0)
1529 		rsize += alignsize;
1530 	trailer = rsize - keg->uk_size;
1531 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1532 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1533 	keg->uk_rsize = rsize;
1534 	keg->uk_ppera = pages;
1535 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1536 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1537 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1538 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1539 	    keg->uk_ipers));
1540 }
1541 
1542 /*
1543  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1544  * the keg onto the global keg list.
1545  *
1546  * Arguments/Returns follow uma_ctor specifications
1547  *	udata  Actually uma_kctor_args
1548  */
1549 static int
1550 keg_ctor(void *mem, int size, void *udata, int flags)
1551 {
1552 	struct uma_kctor_args *arg = udata;
1553 	uma_keg_t keg = mem;
1554 	uma_zone_t zone;
1555 
1556 	bzero(keg, size);
1557 	keg->uk_size = arg->size;
1558 	keg->uk_init = arg->uminit;
1559 	keg->uk_fini = arg->fini;
1560 	keg->uk_align = arg->align;
1561 	keg->uk_cursor = 0;
1562 	keg->uk_free = 0;
1563 	keg->uk_reserve = 0;
1564 	keg->uk_pages = 0;
1565 	keg->uk_flags = arg->flags;
1566 	keg->uk_slabzone = NULL;
1567 
1568 	/*
1569 	 * The master zone is passed to us at keg-creation time.
1570 	 */
1571 	zone = arg->zone;
1572 	keg->uk_name = zone->uz_name;
1573 
1574 	if (arg->flags & UMA_ZONE_VM)
1575 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1576 
1577 	if (arg->flags & UMA_ZONE_ZINIT)
1578 		keg->uk_init = zero_init;
1579 
1580 	if (arg->flags & UMA_ZONE_MALLOC)
1581 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1582 
1583 	if (arg->flags & UMA_ZONE_PCPU)
1584 #ifdef SMP
1585 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1586 #else
1587 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1588 #endif
1589 
1590 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1591 		keg_cachespread_init(keg);
1592 	} else {
1593 		if (keg->uk_size > UMA_SLAB_SPACE)
1594 			keg_large_init(keg);
1595 		else
1596 			keg_small_init(keg);
1597 	}
1598 
1599 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1600 		keg->uk_slabzone = slabzone;
1601 
1602 	/*
1603 	 * If we haven't booted yet we need allocations to go through the
1604 	 * startup cache until the vm is ready.
1605 	 */
1606 	if (booted < BOOT_PAGEALLOC)
1607 		keg->uk_allocf = startup_alloc;
1608 #ifdef UMA_MD_SMALL_ALLOC
1609 	else if (keg->uk_ppera == 1)
1610 		keg->uk_allocf = uma_small_alloc;
1611 #endif
1612 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1613 		keg->uk_allocf = pcpu_page_alloc;
1614 	else
1615 		keg->uk_allocf = page_alloc;
1616 #ifdef UMA_MD_SMALL_ALLOC
1617 	if (keg->uk_ppera == 1)
1618 		keg->uk_freef = uma_small_free;
1619 	else
1620 #endif
1621 	if (keg->uk_flags & UMA_ZONE_PCPU)
1622 		keg->uk_freef = pcpu_page_free;
1623 	else
1624 		keg->uk_freef = page_free;
1625 
1626 	/*
1627 	 * Initialize keg's lock
1628 	 */
1629 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1630 
1631 	/*
1632 	 * If we're putting the slab header in the actual page we need to
1633 	 * figure out where in each page it goes.  This calculates a right
1634 	 * justified offset into the memory on an ALIGN_PTR boundary.
1635 	 */
1636 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1637 		u_int totsize;
1638 
1639 		/* Size of the slab struct and free list */
1640 		totsize = sizeof(struct uma_slab);
1641 
1642 		if (totsize & UMA_ALIGN_PTR)
1643 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1644 			    (UMA_ALIGN_PTR + 1);
1645 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1646 
1647 		/*
1648 		 * The only way the following is possible is if with our
1649 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1650 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1651 		 * mathematically possible for all cases, so we make
1652 		 * sure here anyway.
1653 		 */
1654 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1655 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1656 			printf("zone %s ipers %d rsize %d size %d\n",
1657 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1658 			    keg->uk_size);
1659 			panic("UMA slab won't fit.");
1660 		}
1661 	}
1662 
1663 	if (keg->uk_flags & UMA_ZONE_HASH)
1664 		hash_alloc(&keg->uk_hash);
1665 
1666 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1667 	    keg, zone->uz_name, zone,
1668 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1669 	    keg->uk_free);
1670 
1671 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1672 
1673 	rw_wlock(&uma_rwlock);
1674 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1675 	rw_wunlock(&uma_rwlock);
1676 	return (0);
1677 }
1678 
1679 /*
1680  * Zone header ctor.  This initializes all fields, locks, etc.
1681  *
1682  * Arguments/Returns follow uma_ctor specifications
1683  *	udata  Actually uma_zctor_args
1684  */
1685 static int
1686 zone_ctor(void *mem, int size, void *udata, int flags)
1687 {
1688 	struct uma_zctor_args *arg = udata;
1689 	uma_zone_t zone = mem;
1690 	uma_zone_t z;
1691 	uma_keg_t keg;
1692 
1693 	bzero(zone, size);
1694 	zone->uz_name = arg->name;
1695 	zone->uz_ctor = arg->ctor;
1696 	zone->uz_dtor = arg->dtor;
1697 	zone->uz_slab = zone_fetch_slab;
1698 	zone->uz_init = NULL;
1699 	zone->uz_fini = NULL;
1700 	zone->uz_allocs = 0;
1701 	zone->uz_frees = 0;
1702 	zone->uz_fails = 0;
1703 	zone->uz_sleeps = 0;
1704 	zone->uz_count = 0;
1705 	zone->uz_count_min = 0;
1706 	zone->uz_flags = 0;
1707 	zone->uz_warning = NULL;
1708 	/* The domain structures follow the cpu structures. */
1709 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1710 	timevalclear(&zone->uz_ratecheck);
1711 	keg = arg->keg;
1712 
1713 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1714 
1715 	/*
1716 	 * This is a pure cache zone, no kegs.
1717 	 */
1718 	if (arg->import) {
1719 		if (arg->flags & UMA_ZONE_VM)
1720 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1721 		zone->uz_flags = arg->flags;
1722 		zone->uz_size = arg->size;
1723 		zone->uz_import = arg->import;
1724 		zone->uz_release = arg->release;
1725 		zone->uz_arg = arg->arg;
1726 		zone->uz_lockptr = &zone->uz_lock;
1727 		rw_wlock(&uma_rwlock);
1728 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1729 		rw_wunlock(&uma_rwlock);
1730 		goto out;
1731 	}
1732 
1733 	/*
1734 	 * Use the regular zone/keg/slab allocator.
1735 	 */
1736 	zone->uz_import = (uma_import)zone_import;
1737 	zone->uz_release = (uma_release)zone_release;
1738 	zone->uz_arg = zone;
1739 
1740 	if (arg->flags & UMA_ZONE_SECONDARY) {
1741 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1742 		zone->uz_init = arg->uminit;
1743 		zone->uz_fini = arg->fini;
1744 		zone->uz_lockptr = &keg->uk_lock;
1745 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1746 		rw_wlock(&uma_rwlock);
1747 		ZONE_LOCK(zone);
1748 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1749 			if (LIST_NEXT(z, uz_link) == NULL) {
1750 				LIST_INSERT_AFTER(z, zone, uz_link);
1751 				break;
1752 			}
1753 		}
1754 		ZONE_UNLOCK(zone);
1755 		rw_wunlock(&uma_rwlock);
1756 	} else if (keg == NULL) {
1757 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1758 		    arg->align, arg->flags)) == NULL)
1759 			return (ENOMEM);
1760 	} else {
1761 		struct uma_kctor_args karg;
1762 		int error;
1763 
1764 		/* We should only be here from uma_startup() */
1765 		karg.size = arg->size;
1766 		karg.uminit = arg->uminit;
1767 		karg.fini = arg->fini;
1768 		karg.align = arg->align;
1769 		karg.flags = arg->flags;
1770 		karg.zone = zone;
1771 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1772 		    flags);
1773 		if (error)
1774 			return (error);
1775 	}
1776 
1777 	/*
1778 	 * Link in the first keg.
1779 	 */
1780 	zone->uz_klink.kl_keg = keg;
1781 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1782 	zone->uz_lockptr = &keg->uk_lock;
1783 	zone->uz_size = keg->uk_size;
1784 	zone->uz_flags |= (keg->uk_flags &
1785 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1786 
1787 	/*
1788 	 * Some internal zones don't have room allocated for the per cpu
1789 	 * caches.  If we're internal, bail out here.
1790 	 */
1791 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1792 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1793 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1794 		return (0);
1795 	}
1796 
1797 out:
1798 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1799 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1800 	    ("Invalid zone flag combination"));
1801 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
1802 		zone->uz_count = BUCKET_MAX;
1803 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1804 		zone->uz_count = 0;
1805 	else
1806 		zone->uz_count = bucket_select(zone->uz_size);
1807 	zone->uz_count_min = zone->uz_count;
1808 
1809 	return (0);
1810 }
1811 
1812 /*
1813  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1814  * table and removes the keg from the global list.
1815  *
1816  * Arguments/Returns follow uma_dtor specifications
1817  *	udata  unused
1818  */
1819 static void
1820 keg_dtor(void *arg, int size, void *udata)
1821 {
1822 	uma_keg_t keg;
1823 
1824 	keg = (uma_keg_t)arg;
1825 	KEG_LOCK(keg);
1826 	if (keg->uk_free != 0) {
1827 		printf("Freed UMA keg (%s) was not empty (%d items). "
1828 		    " Lost %d pages of memory.\n",
1829 		    keg->uk_name ? keg->uk_name : "",
1830 		    keg->uk_free, keg->uk_pages);
1831 	}
1832 	KEG_UNLOCK(keg);
1833 
1834 	hash_free(&keg->uk_hash);
1835 
1836 	KEG_LOCK_FINI(keg);
1837 }
1838 
1839 /*
1840  * Zone header dtor.
1841  *
1842  * Arguments/Returns follow uma_dtor specifications
1843  *	udata  unused
1844  */
1845 static void
1846 zone_dtor(void *arg, int size, void *udata)
1847 {
1848 	uma_klink_t klink;
1849 	uma_zone_t zone;
1850 	uma_keg_t keg;
1851 
1852 	zone = (uma_zone_t)arg;
1853 	keg = zone_first_keg(zone);
1854 
1855 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1856 		cache_drain(zone);
1857 
1858 	rw_wlock(&uma_rwlock);
1859 	LIST_REMOVE(zone, uz_link);
1860 	rw_wunlock(&uma_rwlock);
1861 	/*
1862 	 * XXX there are some races here where
1863 	 * the zone can be drained but zone lock
1864 	 * released and then refilled before we
1865 	 * remove it... we dont care for now
1866 	 */
1867 	zone_drain_wait(zone, M_WAITOK);
1868 	/*
1869 	 * Unlink all of our kegs.
1870 	 */
1871 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1872 		klink->kl_keg = NULL;
1873 		LIST_REMOVE(klink, kl_link);
1874 		if (klink == &zone->uz_klink)
1875 			continue;
1876 		free(klink, M_TEMP);
1877 	}
1878 	/*
1879 	 * We only destroy kegs from non secondary zones.
1880 	 */
1881 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1882 		rw_wlock(&uma_rwlock);
1883 		LIST_REMOVE(keg, uk_link);
1884 		rw_wunlock(&uma_rwlock);
1885 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1886 	}
1887 	ZONE_LOCK_FINI(zone);
1888 }
1889 
1890 /*
1891  * Traverses every zone in the system and calls a callback
1892  *
1893  * Arguments:
1894  *	zfunc  A pointer to a function which accepts a zone
1895  *		as an argument.
1896  *
1897  * Returns:
1898  *	Nothing
1899  */
1900 static void
1901 zone_foreach(void (*zfunc)(uma_zone_t))
1902 {
1903 	uma_keg_t keg;
1904 	uma_zone_t zone;
1905 
1906 	rw_rlock(&uma_rwlock);
1907 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1908 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1909 			zfunc(zone);
1910 	}
1911 	rw_runlock(&uma_rwlock);
1912 }
1913 
1914 /*
1915  * Count how many pages do we need to bootstrap.  VM supplies
1916  * its need in early zones in the argument, we add up our zones,
1917  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
1918  * zone of zones and zone of kegs are accounted separately.
1919  */
1920 #define	UMA_BOOT_ZONES	11
1921 /* Zone of zones and zone of kegs have arbitrary alignment. */
1922 #define	UMA_BOOT_ALIGN	32
1923 static int zsize, ksize;
1924 int
1925 uma_startup_count(int vm_zones)
1926 {
1927 	int zones, pages;
1928 
1929 	ksize = sizeof(struct uma_keg) +
1930 	    (sizeof(struct uma_domain) * vm_ndomains);
1931 	zsize = sizeof(struct uma_zone) +
1932 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
1933 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
1934 
1935 	/*
1936 	 * Memory for the zone of kegs and its keg,
1937 	 * and for zone of zones.
1938 	 */
1939 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
1940 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
1941 
1942 #ifdef	UMA_MD_SMALL_ALLOC
1943 	zones = UMA_BOOT_ZONES;
1944 #else
1945 	zones = UMA_BOOT_ZONES + vm_zones;
1946 	vm_zones = 0;
1947 #endif
1948 
1949 	/* Memory for the rest of startup zones, UMA and VM, ... */
1950 	if (zsize > UMA_SLAB_SPACE)
1951 		pages += (zones + vm_zones) *
1952 		    howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE);
1953 	else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
1954 		pages += zones;
1955 	else
1956 		pages += howmany(zones,
1957 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
1958 
1959 	/* ... and their kegs. Note that zone of zones allocates a keg! */
1960 	pages += howmany(zones + 1,
1961 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
1962 
1963 	/*
1964 	 * Most of startup zones are not going to be offpages, that's
1965 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
1966 	 * calculations.  Some large bucket zones will be offpage, and
1967 	 * thus will allocate hashes.  We take conservative approach
1968 	 * and assume that all zones may allocate hash.  This may give
1969 	 * us some positive inaccuracy, usually an extra single page.
1970 	 */
1971 	pages += howmany(zones, UMA_SLAB_SPACE /
1972 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
1973 
1974 	return (pages);
1975 }
1976 
1977 void
1978 uma_startup(void *mem, int npages)
1979 {
1980 	struct uma_zctor_args args;
1981 	uma_keg_t masterkeg;
1982 	uintptr_t m;
1983 
1984 #ifdef DIAGNOSTIC
1985 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
1986 #endif
1987 
1988 	rw_init(&uma_rwlock, "UMA lock");
1989 
1990 	/* Use bootpages memory for the zone of zones and zone of kegs. */
1991 	m = (uintptr_t)mem;
1992 	zones = (uma_zone_t)m;
1993 	m += roundup(zsize, CACHE_LINE_SIZE);
1994 	kegs = (uma_zone_t)m;
1995 	m += roundup(zsize, CACHE_LINE_SIZE);
1996 	masterkeg = (uma_keg_t)m;
1997 	m += roundup(ksize, CACHE_LINE_SIZE);
1998 	m = roundup(m, PAGE_SIZE);
1999 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2000 	mem = (void *)m;
2001 
2002 	/* "manually" create the initial zone */
2003 	memset(&args, 0, sizeof(args));
2004 	args.name = "UMA Kegs";
2005 	args.size = ksize;
2006 	args.ctor = keg_ctor;
2007 	args.dtor = keg_dtor;
2008 	args.uminit = zero_init;
2009 	args.fini = NULL;
2010 	args.keg = masterkeg;
2011 	args.align = UMA_BOOT_ALIGN - 1;
2012 	args.flags = UMA_ZFLAG_INTERNAL;
2013 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2014 
2015 	bootmem = mem;
2016 	boot_pages = npages;
2017 
2018 	args.name = "UMA Zones";
2019 	args.size = zsize;
2020 	args.ctor = zone_ctor;
2021 	args.dtor = zone_dtor;
2022 	args.uminit = zero_init;
2023 	args.fini = NULL;
2024 	args.keg = NULL;
2025 	args.align = UMA_BOOT_ALIGN - 1;
2026 	args.flags = UMA_ZFLAG_INTERNAL;
2027 	zone_ctor(zones, zsize, &args, M_WAITOK);
2028 
2029 	/* Now make a zone for slab headers */
2030 	slabzone = uma_zcreate("UMA Slabs",
2031 				sizeof(struct uma_slab),
2032 				NULL, NULL, NULL, NULL,
2033 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2034 
2035 	hashzone = uma_zcreate("UMA Hash",
2036 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2037 	    NULL, NULL, NULL, NULL,
2038 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2039 
2040 	bucket_init();
2041 
2042 	booted = BOOT_STRAPPED;
2043 }
2044 
2045 void
2046 uma_startup1(void)
2047 {
2048 
2049 #ifdef DIAGNOSTIC
2050 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2051 #endif
2052 	booted = BOOT_PAGEALLOC;
2053 }
2054 
2055 void
2056 uma_startup2(void)
2057 {
2058 
2059 #ifdef DIAGNOSTIC
2060 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2061 #endif
2062 	booted = BOOT_BUCKETS;
2063 	sx_init(&uma_drain_lock, "umadrain");
2064 	bucket_enable();
2065 }
2066 
2067 /*
2068  * Initialize our callout handle
2069  *
2070  */
2071 static void
2072 uma_startup3(void)
2073 {
2074 
2075 #ifdef INVARIANTS
2076 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2077 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2078 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2079 #endif
2080 	callout_init(&uma_callout, 1);
2081 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2082 	booted = BOOT_RUNNING;
2083 }
2084 
2085 static uma_keg_t
2086 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2087 		int align, uint32_t flags)
2088 {
2089 	struct uma_kctor_args args;
2090 
2091 	args.size = size;
2092 	args.uminit = uminit;
2093 	args.fini = fini;
2094 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2095 	args.flags = flags;
2096 	args.zone = zone;
2097 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2098 }
2099 
2100 /* Public functions */
2101 /* See uma.h */
2102 void
2103 uma_set_align(int align)
2104 {
2105 
2106 	if (align != UMA_ALIGN_CACHE)
2107 		uma_align_cache = align;
2108 }
2109 
2110 /* See uma.h */
2111 uma_zone_t
2112 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2113 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2114 
2115 {
2116 	struct uma_zctor_args args;
2117 	uma_zone_t res;
2118 	bool locked;
2119 
2120 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2121 	    align, name));
2122 
2123 	/* This stuff is essential for the zone ctor */
2124 	memset(&args, 0, sizeof(args));
2125 	args.name = name;
2126 	args.size = size;
2127 	args.ctor = ctor;
2128 	args.dtor = dtor;
2129 	args.uminit = uminit;
2130 	args.fini = fini;
2131 #ifdef  INVARIANTS
2132 	/*
2133 	 * If a zone is being created with an empty constructor and
2134 	 * destructor, pass UMA constructor/destructor which checks for
2135 	 * memory use after free.
2136 	 */
2137 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2138 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2139 		args.ctor = trash_ctor;
2140 		args.dtor = trash_dtor;
2141 		args.uminit = trash_init;
2142 		args.fini = trash_fini;
2143 	}
2144 #endif
2145 	args.align = align;
2146 	args.flags = flags;
2147 	args.keg = NULL;
2148 
2149 	if (booted < BOOT_BUCKETS) {
2150 		locked = false;
2151 	} else {
2152 		sx_slock(&uma_drain_lock);
2153 		locked = true;
2154 	}
2155 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2156 	if (locked)
2157 		sx_sunlock(&uma_drain_lock);
2158 	return (res);
2159 }
2160 
2161 /* See uma.h */
2162 uma_zone_t
2163 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2164 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2165 {
2166 	struct uma_zctor_args args;
2167 	uma_keg_t keg;
2168 	uma_zone_t res;
2169 	bool locked;
2170 
2171 	keg = zone_first_keg(master);
2172 	memset(&args, 0, sizeof(args));
2173 	args.name = name;
2174 	args.size = keg->uk_size;
2175 	args.ctor = ctor;
2176 	args.dtor = dtor;
2177 	args.uminit = zinit;
2178 	args.fini = zfini;
2179 	args.align = keg->uk_align;
2180 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2181 	args.keg = keg;
2182 
2183 	if (booted < BOOT_BUCKETS) {
2184 		locked = false;
2185 	} else {
2186 		sx_slock(&uma_drain_lock);
2187 		locked = true;
2188 	}
2189 	/* XXX Attaches only one keg of potentially many. */
2190 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2191 	if (locked)
2192 		sx_sunlock(&uma_drain_lock);
2193 	return (res);
2194 }
2195 
2196 /* See uma.h */
2197 uma_zone_t
2198 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2199 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2200 		    uma_release zrelease, void *arg, int flags)
2201 {
2202 	struct uma_zctor_args args;
2203 
2204 	memset(&args, 0, sizeof(args));
2205 	args.name = name;
2206 	args.size = size;
2207 	args.ctor = ctor;
2208 	args.dtor = dtor;
2209 	args.uminit = zinit;
2210 	args.fini = zfini;
2211 	args.import = zimport;
2212 	args.release = zrelease;
2213 	args.arg = arg;
2214 	args.align = 0;
2215 	args.flags = flags;
2216 
2217 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2218 }
2219 
2220 static void
2221 zone_lock_pair(uma_zone_t a, uma_zone_t b)
2222 {
2223 	if (a < b) {
2224 		ZONE_LOCK(a);
2225 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2226 	} else {
2227 		ZONE_LOCK(b);
2228 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2229 	}
2230 }
2231 
2232 static void
2233 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2234 {
2235 
2236 	ZONE_UNLOCK(a);
2237 	ZONE_UNLOCK(b);
2238 }
2239 
2240 int
2241 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2242 {
2243 	uma_klink_t klink;
2244 	uma_klink_t kl;
2245 	int error;
2246 
2247 	error = 0;
2248 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2249 
2250 	zone_lock_pair(zone, master);
2251 	/*
2252 	 * zone must use vtoslab() to resolve objects and must already be
2253 	 * a secondary.
2254 	 */
2255 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2256 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2257 		error = EINVAL;
2258 		goto out;
2259 	}
2260 	/*
2261 	 * The new master must also use vtoslab().
2262 	 */
2263 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2264 		error = EINVAL;
2265 		goto out;
2266 	}
2267 
2268 	/*
2269 	 * The underlying object must be the same size.  rsize
2270 	 * may be different.
2271 	 */
2272 	if (master->uz_size != zone->uz_size) {
2273 		error = E2BIG;
2274 		goto out;
2275 	}
2276 	/*
2277 	 * Put it at the end of the list.
2278 	 */
2279 	klink->kl_keg = zone_first_keg(master);
2280 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2281 		if (LIST_NEXT(kl, kl_link) == NULL) {
2282 			LIST_INSERT_AFTER(kl, klink, kl_link);
2283 			break;
2284 		}
2285 	}
2286 	klink = NULL;
2287 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2288 	zone->uz_slab = zone_fetch_slab_multi;
2289 
2290 out:
2291 	zone_unlock_pair(zone, master);
2292 	if (klink != NULL)
2293 		free(klink, M_TEMP);
2294 
2295 	return (error);
2296 }
2297 
2298 
2299 /* See uma.h */
2300 void
2301 uma_zdestroy(uma_zone_t zone)
2302 {
2303 
2304 	sx_slock(&uma_drain_lock);
2305 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2306 	sx_sunlock(&uma_drain_lock);
2307 }
2308 
2309 void
2310 uma_zwait(uma_zone_t zone)
2311 {
2312 	void *item;
2313 
2314 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2315 	uma_zfree(zone, item);
2316 }
2317 
2318 void *
2319 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2320 {
2321 	void *item;
2322 #ifdef SMP
2323 	int i;
2324 
2325 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2326 #endif
2327 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2328 	if (item != NULL && (flags & M_ZERO)) {
2329 #ifdef SMP
2330 		for (i = 0; i <= mp_maxid; i++)
2331 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2332 #else
2333 		bzero(item, zone->uz_size);
2334 #endif
2335 	}
2336 	return (item);
2337 }
2338 
2339 /*
2340  * A stub while both regular and pcpu cases are identical.
2341  */
2342 void
2343 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2344 {
2345 
2346 #ifdef SMP
2347 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2348 #endif
2349 	uma_zfree_arg(zone, item, udata);
2350 }
2351 
2352 /* See uma.h */
2353 void *
2354 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2355 {
2356 	uma_zone_domain_t zdom;
2357 	uma_bucket_t bucket;
2358 	uma_cache_t cache;
2359 	void *item;
2360 	int cpu, domain, lockfail;
2361 #ifdef INVARIANTS
2362 	bool skipdbg;
2363 #endif
2364 
2365 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2366 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2367 
2368 	/* This is the fast path allocation */
2369 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2370 	    curthread, zone->uz_name, zone, flags);
2371 
2372 	if (flags & M_WAITOK) {
2373 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2374 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2375 	}
2376 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2377 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2378 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2379 	if (zone->uz_flags & UMA_ZONE_PCPU)
2380 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2381 		    "with M_ZERO passed"));
2382 
2383 #ifdef DEBUG_MEMGUARD
2384 	if (memguard_cmp_zone(zone)) {
2385 		item = memguard_alloc(zone->uz_size, flags);
2386 		if (item != NULL) {
2387 			if (zone->uz_init != NULL &&
2388 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2389 				return (NULL);
2390 			if (zone->uz_ctor != NULL &&
2391 			    zone->uz_ctor(item, zone->uz_size, udata,
2392 			    flags) != 0) {
2393 			    	zone->uz_fini(item, zone->uz_size);
2394 				return (NULL);
2395 			}
2396 			return (item);
2397 		}
2398 		/* This is unfortunate but should not be fatal. */
2399 	}
2400 #endif
2401 	/*
2402 	 * If possible, allocate from the per-CPU cache.  There are two
2403 	 * requirements for safe access to the per-CPU cache: (1) the thread
2404 	 * accessing the cache must not be preempted or yield during access,
2405 	 * and (2) the thread must not migrate CPUs without switching which
2406 	 * cache it accesses.  We rely on a critical section to prevent
2407 	 * preemption and migration.  We release the critical section in
2408 	 * order to acquire the zone mutex if we are unable to allocate from
2409 	 * the current cache; when we re-acquire the critical section, we
2410 	 * must detect and handle migration if it has occurred.
2411 	 */
2412 	critical_enter();
2413 	cpu = curcpu;
2414 	cache = &zone->uz_cpu[cpu];
2415 
2416 zalloc_start:
2417 	bucket = cache->uc_allocbucket;
2418 	if (bucket != NULL && bucket->ub_cnt > 0) {
2419 		bucket->ub_cnt--;
2420 		item = bucket->ub_bucket[bucket->ub_cnt];
2421 #ifdef INVARIANTS
2422 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2423 #endif
2424 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2425 		cache->uc_allocs++;
2426 		critical_exit();
2427 #ifdef INVARIANTS
2428 		skipdbg = uma_dbg_zskip(zone, item);
2429 #endif
2430 		if (zone->uz_ctor != NULL &&
2431 #ifdef INVARIANTS
2432 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2433 		    zone->uz_dtor != trash_dtor) &&
2434 #endif
2435 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2436 			atomic_add_long(&zone->uz_fails, 1);
2437 			zone_free_item(zone, item, udata, SKIP_DTOR);
2438 			return (NULL);
2439 		}
2440 #ifdef INVARIANTS
2441 		if (!skipdbg)
2442 			uma_dbg_alloc(zone, NULL, item);
2443 #endif
2444 		if (flags & M_ZERO)
2445 			uma_zero_item(item, zone);
2446 		return (item);
2447 	}
2448 
2449 	/*
2450 	 * We have run out of items in our alloc bucket.
2451 	 * See if we can switch with our free bucket.
2452 	 */
2453 	bucket = cache->uc_freebucket;
2454 	if (bucket != NULL && bucket->ub_cnt > 0) {
2455 		CTR2(KTR_UMA,
2456 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2457 		    zone->uz_name, zone);
2458 		cache->uc_freebucket = cache->uc_allocbucket;
2459 		cache->uc_allocbucket = bucket;
2460 		goto zalloc_start;
2461 	}
2462 
2463 	/*
2464 	 * Discard any empty allocation bucket while we hold no locks.
2465 	 */
2466 	bucket = cache->uc_allocbucket;
2467 	cache->uc_allocbucket = NULL;
2468 	critical_exit();
2469 	if (bucket != NULL)
2470 		bucket_free(zone, bucket, udata);
2471 
2472 	if (zone->uz_flags & UMA_ZONE_NUMA)
2473 		domain = PCPU_GET(domain);
2474 	else
2475 		domain = UMA_ANYDOMAIN;
2476 
2477 	/* Short-circuit for zones without buckets and low memory. */
2478 	if (zone->uz_count == 0 || bucketdisable)
2479 		goto zalloc_item;
2480 
2481 	/*
2482 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2483 	 * we must go back to the zone.  This requires the zone lock, so we
2484 	 * must drop the critical section, then re-acquire it when we go back
2485 	 * to the cache.  Since the critical section is released, we may be
2486 	 * preempted or migrate.  As such, make sure not to maintain any
2487 	 * thread-local state specific to the cache from prior to releasing
2488 	 * the critical section.
2489 	 */
2490 	lockfail = 0;
2491 	if (ZONE_TRYLOCK(zone) == 0) {
2492 		/* Record contention to size the buckets. */
2493 		ZONE_LOCK(zone);
2494 		lockfail = 1;
2495 	}
2496 	critical_enter();
2497 	cpu = curcpu;
2498 	cache = &zone->uz_cpu[cpu];
2499 
2500 	/* See if we lost the race to fill the cache. */
2501 	if (cache->uc_allocbucket != NULL) {
2502 		ZONE_UNLOCK(zone);
2503 		goto zalloc_start;
2504 	}
2505 
2506 	/*
2507 	 * Check the zone's cache of buckets.
2508 	 */
2509 	if (domain == UMA_ANYDOMAIN)
2510 		zdom = &zone->uz_domain[0];
2511 	else
2512 		zdom = &zone->uz_domain[domain];
2513 	if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) {
2514 		KASSERT(bucket->ub_cnt != 0,
2515 		    ("uma_zalloc_arg: Returning an empty bucket."));
2516 
2517 		LIST_REMOVE(bucket, ub_link);
2518 		cache->uc_allocbucket = bucket;
2519 		ZONE_UNLOCK(zone);
2520 		goto zalloc_start;
2521 	}
2522 	/* We are no longer associated with this CPU. */
2523 	critical_exit();
2524 
2525 	/*
2526 	 * We bump the uz count when the cache size is insufficient to
2527 	 * handle the working set.
2528 	 */
2529 	if (lockfail && zone->uz_count < BUCKET_MAX)
2530 		zone->uz_count++;
2531 	ZONE_UNLOCK(zone);
2532 
2533 	/*
2534 	 * Now lets just fill a bucket and put it on the free list.  If that
2535 	 * works we'll restart the allocation from the beginning and it
2536 	 * will use the just filled bucket.
2537 	 */
2538 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
2539 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2540 	    zone->uz_name, zone, bucket);
2541 	if (bucket != NULL) {
2542 		ZONE_LOCK(zone);
2543 		critical_enter();
2544 		cpu = curcpu;
2545 		cache = &zone->uz_cpu[cpu];
2546 		/*
2547 		 * See if we lost the race or were migrated.  Cache the
2548 		 * initialized bucket to make this less likely or claim
2549 		 * the memory directly.
2550 		 */
2551 		if (cache->uc_allocbucket != NULL ||
2552 		    (zone->uz_flags & UMA_ZONE_NUMA &&
2553 		    domain != PCPU_GET(domain)))
2554 			LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
2555 		else
2556 			cache->uc_allocbucket = bucket;
2557 		ZONE_UNLOCK(zone);
2558 		goto zalloc_start;
2559 	}
2560 
2561 	/*
2562 	 * We may not be able to get a bucket so return an actual item.
2563 	 */
2564 zalloc_item:
2565 	item = zone_alloc_item(zone, udata, domain, flags);
2566 
2567 	return (item);
2568 }
2569 
2570 void *
2571 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2572 {
2573 
2574 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2575 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2576 
2577 	/* This is the fast path allocation */
2578 	CTR5(KTR_UMA,
2579 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2580 	    curthread, zone->uz_name, zone, domain, flags);
2581 
2582 	if (flags & M_WAITOK) {
2583 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2584 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2585 	}
2586 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2587 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2588 
2589 	return (zone_alloc_item(zone, udata, domain, flags));
2590 }
2591 
2592 /*
2593  * Find a slab with some space.  Prefer slabs that are partially used over those
2594  * that are totally full.  This helps to reduce fragmentation.
2595  *
2596  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2597  * only 'domain'.
2598  */
2599 static uma_slab_t
2600 keg_first_slab(uma_keg_t keg, int domain, int rr)
2601 {
2602 	uma_domain_t dom;
2603 	uma_slab_t slab;
2604 	int start;
2605 
2606 	KASSERT(domain >= 0 && domain < vm_ndomains,
2607 	    ("keg_first_slab: domain %d out of range", domain));
2608 
2609 	slab = NULL;
2610 	start = domain;
2611 	do {
2612 		dom = &keg->uk_domain[domain];
2613 		if (!LIST_EMPTY(&dom->ud_part_slab))
2614 			return (LIST_FIRST(&dom->ud_part_slab));
2615 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2616 			slab = LIST_FIRST(&dom->ud_free_slab);
2617 			LIST_REMOVE(slab, us_link);
2618 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2619 			return (slab);
2620 		}
2621 		if (rr)
2622 			domain = (domain + 1) % vm_ndomains;
2623 	} while (domain != start);
2624 
2625 	return (NULL);
2626 }
2627 
2628 static uma_slab_t
2629 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
2630 {
2631 	uma_domain_t dom;
2632 	uma_slab_t slab;
2633 	int allocflags, domain, reserve, rr, start;
2634 
2635 	mtx_assert(&keg->uk_lock, MA_OWNED);
2636 	slab = NULL;
2637 	reserve = 0;
2638 	allocflags = flags;
2639 	if ((flags & M_USE_RESERVE) == 0)
2640 		reserve = keg->uk_reserve;
2641 
2642 	/*
2643 	 * Round-robin for non first-touch zones when there is more than one
2644 	 * domain.
2645 	 */
2646 	if (vm_ndomains == 1)
2647 		rdomain = 0;
2648 	rr = rdomain == UMA_ANYDOMAIN;
2649 	if (rr) {
2650 		keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2651 		domain = start = keg->uk_cursor;
2652 		/* Only block on the second pass. */
2653 		if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK)
2654 			allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT;
2655 	} else
2656 		domain = start = rdomain;
2657 
2658 again:
2659 	do {
2660 		if (keg->uk_free > reserve &&
2661 		    (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2662 			MPASS(slab->us_keg == keg);
2663 			return (slab);
2664 		}
2665 
2666 		/*
2667 		 * M_NOVM means don't ask at all!
2668 		 */
2669 		if (flags & M_NOVM)
2670 			break;
2671 
2672 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2673 			keg->uk_flags |= UMA_ZFLAG_FULL;
2674 			/*
2675 			 * If this is not a multi-zone, set the FULL bit.
2676 			 * Otherwise slab_multi() takes care of it.
2677 			 */
2678 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2679 				zone->uz_flags |= UMA_ZFLAG_FULL;
2680 				zone_log_warning(zone);
2681 				zone_maxaction(zone);
2682 			}
2683 			if (flags & M_NOWAIT)
2684 				return (NULL);
2685 			zone->uz_sleeps++;
2686 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2687 			continue;
2688 		}
2689 		slab = keg_alloc_slab(keg, zone, domain, allocflags);
2690 		/*
2691 		 * If we got a slab here it's safe to mark it partially used
2692 		 * and return.  We assume that the caller is going to remove
2693 		 * at least one item.
2694 		 */
2695 		if (slab) {
2696 			MPASS(slab->us_keg == keg);
2697 			dom = &keg->uk_domain[slab->us_domain];
2698 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2699 			return (slab);
2700 		}
2701 		if (rr) {
2702 			keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
2703 			domain = keg->uk_cursor;
2704 		}
2705 	} while (domain != start);
2706 
2707 	/* Retry domain scan with blocking. */
2708 	if (allocflags != flags) {
2709 		allocflags = flags;
2710 		goto again;
2711 	}
2712 
2713 	/*
2714 	 * We might not have been able to get a slab but another cpu
2715 	 * could have while we were unlocked.  Check again before we
2716 	 * fail.
2717 	 */
2718 	if (keg->uk_free > reserve &&
2719 	    (slab = keg_first_slab(keg, domain, rr)) != NULL) {
2720 		MPASS(slab->us_keg == keg);
2721 		return (slab);
2722 	}
2723 	return (NULL);
2724 }
2725 
2726 static uma_slab_t
2727 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2728 {
2729 	uma_slab_t slab;
2730 
2731 	if (keg == NULL) {
2732 		keg = zone_first_keg(zone);
2733 		KEG_LOCK(keg);
2734 	}
2735 
2736 	for (;;) {
2737 		slab = keg_fetch_slab(keg, zone, domain, flags);
2738 		if (slab)
2739 			return (slab);
2740 		if (flags & (M_NOWAIT | M_NOVM))
2741 			break;
2742 	}
2743 	KEG_UNLOCK(keg);
2744 	return (NULL);
2745 }
2746 
2747 /*
2748  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2749  * with the keg locked.  On NULL no lock is held.
2750  *
2751  * The last pointer is used to seed the search.  It is not required.
2752  */
2753 static uma_slab_t
2754 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags)
2755 {
2756 	uma_klink_t klink;
2757 	uma_slab_t slab;
2758 	uma_keg_t keg;
2759 	int flags;
2760 	int empty;
2761 	int full;
2762 
2763 	/*
2764 	 * Don't wait on the first pass.  This will skip limit tests
2765 	 * as well.  We don't want to block if we can find a provider
2766 	 * without blocking.
2767 	 */
2768 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2769 	/*
2770 	 * Use the last slab allocated as a hint for where to start
2771 	 * the search.
2772 	 */
2773 	if (last != NULL) {
2774 		slab = keg_fetch_slab(last, zone, domain, flags);
2775 		if (slab)
2776 			return (slab);
2777 		KEG_UNLOCK(last);
2778 	}
2779 	/*
2780 	 * Loop until we have a slab incase of transient failures
2781 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2782 	 * required but we've done it for so long now.
2783 	 */
2784 	for (;;) {
2785 		empty = 0;
2786 		full = 0;
2787 		/*
2788 		 * Search the available kegs for slabs.  Be careful to hold the
2789 		 * correct lock while calling into the keg layer.
2790 		 */
2791 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2792 			keg = klink->kl_keg;
2793 			KEG_LOCK(keg);
2794 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2795 				slab = keg_fetch_slab(keg, zone, domain, flags);
2796 				if (slab)
2797 					return (slab);
2798 			}
2799 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2800 				full++;
2801 			else
2802 				empty++;
2803 			KEG_UNLOCK(keg);
2804 		}
2805 		if (rflags & (M_NOWAIT | M_NOVM))
2806 			break;
2807 		flags = rflags;
2808 		/*
2809 		 * All kegs are full.  XXX We can't atomically check all kegs
2810 		 * and sleep so just sleep for a short period and retry.
2811 		 */
2812 		if (full && !empty) {
2813 			ZONE_LOCK(zone);
2814 			zone->uz_flags |= UMA_ZFLAG_FULL;
2815 			zone->uz_sleeps++;
2816 			zone_log_warning(zone);
2817 			zone_maxaction(zone);
2818 			msleep(zone, zone->uz_lockptr, PVM,
2819 			    "zonelimit", hz/100);
2820 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2821 			ZONE_UNLOCK(zone);
2822 			continue;
2823 		}
2824 	}
2825 	return (NULL);
2826 }
2827 
2828 static void *
2829 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2830 {
2831 	uma_domain_t dom;
2832 	void *item;
2833 	uint8_t freei;
2834 
2835 	MPASS(keg == slab->us_keg);
2836 	mtx_assert(&keg->uk_lock, MA_OWNED);
2837 
2838 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2839 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2840 	item = slab->us_data + (keg->uk_rsize * freei);
2841 	slab->us_freecount--;
2842 	keg->uk_free--;
2843 
2844 	/* Move this slab to the full list */
2845 	if (slab->us_freecount == 0) {
2846 		LIST_REMOVE(slab, us_link);
2847 		dom = &keg->uk_domain[slab->us_domain];
2848 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2849 	}
2850 
2851 	return (item);
2852 }
2853 
2854 static int
2855 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2856 {
2857 	uma_slab_t slab;
2858 	uma_keg_t keg;
2859 #ifdef NUMA
2860 	int stripe;
2861 #endif
2862 	int i;
2863 
2864 	slab = NULL;
2865 	keg = NULL;
2866 	/* Try to keep the buckets totally full */
2867 	for (i = 0; i < max; ) {
2868 		if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL)
2869 			break;
2870 		keg = slab->us_keg;
2871 #ifdef NUMA
2872 		stripe = howmany(max, vm_ndomains);
2873 #endif
2874 		while (slab->us_freecount && i < max) {
2875 			bucket[i++] = slab_alloc_item(keg, slab);
2876 			if (keg->uk_free <= keg->uk_reserve)
2877 				break;
2878 #ifdef NUMA
2879 			/*
2880 			 * If the zone is striped we pick a new slab for every
2881 			 * N allocations.  Eliminating this conditional will
2882 			 * instead pick a new domain for each bucket rather
2883 			 * than stripe within each bucket.  The current option
2884 			 * produces more fragmentation and requires more cpu
2885 			 * time but yields better distribution.
2886 			 */
2887 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2888 			    vm_ndomains > 1 && --stripe == 0)
2889 				break;
2890 #endif
2891 		}
2892 		/* Don't block if we allocated any successfully. */
2893 		flags &= ~M_WAITOK;
2894 		flags |= M_NOWAIT;
2895 	}
2896 	if (slab != NULL)
2897 		KEG_UNLOCK(keg);
2898 
2899 	return i;
2900 }
2901 
2902 static uma_bucket_t
2903 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
2904 {
2905 	uma_bucket_t bucket;
2906 	int max;
2907 
2908 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2909 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2910 	if (bucket == NULL)
2911 		return (NULL);
2912 
2913 	max = MIN(bucket->ub_entries, zone->uz_count);
2914 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2915 	    max, domain, flags);
2916 
2917 	/*
2918 	 * Initialize the memory if necessary.
2919 	 */
2920 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2921 		int i;
2922 
2923 		for (i = 0; i < bucket->ub_cnt; i++)
2924 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2925 			    flags) != 0)
2926 				break;
2927 		/*
2928 		 * If we couldn't initialize the whole bucket, put the
2929 		 * rest back onto the freelist.
2930 		 */
2931 		if (i != bucket->ub_cnt) {
2932 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2933 			    bucket->ub_cnt - i);
2934 #ifdef INVARIANTS
2935 			bzero(&bucket->ub_bucket[i],
2936 			    sizeof(void *) * (bucket->ub_cnt - i));
2937 #endif
2938 			bucket->ub_cnt = i;
2939 		}
2940 	}
2941 
2942 	if (bucket->ub_cnt == 0) {
2943 		bucket_free(zone, bucket, udata);
2944 		atomic_add_long(&zone->uz_fails, 1);
2945 		return (NULL);
2946 	}
2947 
2948 	return (bucket);
2949 }
2950 
2951 /*
2952  * Allocates a single item from a zone.
2953  *
2954  * Arguments
2955  *	zone   The zone to alloc for.
2956  *	udata  The data to be passed to the constructor.
2957  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2958  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2959  *
2960  * Returns
2961  *	NULL if there is no memory and M_NOWAIT is set
2962  *	An item if successful
2963  */
2964 
2965 static void *
2966 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2967 {
2968 	void *item;
2969 #ifdef INVARIANTS
2970 	bool skipdbg;
2971 #endif
2972 
2973 	item = NULL;
2974 
2975 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
2976 		goto fail;
2977 	atomic_add_long(&zone->uz_allocs, 1);
2978 
2979 #ifdef INVARIANTS
2980 	skipdbg = uma_dbg_zskip(zone, item);
2981 #endif
2982 	/*
2983 	 * We have to call both the zone's init (not the keg's init)
2984 	 * and the zone's ctor.  This is because the item is going from
2985 	 * a keg slab directly to the user, and the user is expecting it
2986 	 * to be both zone-init'd as well as zone-ctor'd.
2987 	 */
2988 	if (zone->uz_init != NULL) {
2989 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2990 			zone_free_item(zone, item, udata, SKIP_FINI);
2991 			goto fail;
2992 		}
2993 	}
2994 	if (zone->uz_ctor != NULL &&
2995 #ifdef INVARIANTS
2996 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
2997 	    zone->uz_dtor != trash_dtor) &&
2998 #endif
2999 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
3000 		zone_free_item(zone, item, udata, SKIP_DTOR);
3001 		goto fail;
3002 	}
3003 #ifdef INVARIANTS
3004 	if (!skipdbg)
3005 		uma_dbg_alloc(zone, NULL, item);
3006 #endif
3007 	if (flags & M_ZERO)
3008 		uma_zero_item(item, zone);
3009 
3010 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3011 	    zone->uz_name, zone);
3012 
3013 	return (item);
3014 
3015 fail:
3016 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3017 	    zone->uz_name, zone);
3018 	atomic_add_long(&zone->uz_fails, 1);
3019 	return (NULL);
3020 }
3021 
3022 /* See uma.h */
3023 void
3024 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3025 {
3026 	uma_cache_t cache;
3027 	uma_bucket_t bucket;
3028 	uma_zone_domain_t zdom;
3029 	int cpu, domain, lockfail;
3030 #ifdef INVARIANTS
3031 	bool skipdbg;
3032 #endif
3033 
3034 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3035 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3036 
3037 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3038 	    zone->uz_name);
3039 
3040 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3041 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3042 
3043         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3044         if (item == NULL)
3045                 return;
3046 #ifdef DEBUG_MEMGUARD
3047 	if (is_memguard_addr(item)) {
3048 		if (zone->uz_dtor != NULL)
3049 			zone->uz_dtor(item, zone->uz_size, udata);
3050 		if (zone->uz_fini != NULL)
3051 			zone->uz_fini(item, zone->uz_size);
3052 		memguard_free(item);
3053 		return;
3054 	}
3055 #endif
3056 #ifdef INVARIANTS
3057 	skipdbg = uma_dbg_zskip(zone, item);
3058 	if (skipdbg == false) {
3059 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3060 			uma_dbg_free(zone, udata, item);
3061 		else
3062 			uma_dbg_free(zone, NULL, item);
3063 	}
3064 	if (zone->uz_dtor != NULL && (!skipdbg ||
3065 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3066 #else
3067 	if (zone->uz_dtor != NULL)
3068 #endif
3069 		zone->uz_dtor(item, zone->uz_size, udata);
3070 
3071 	/*
3072 	 * The race here is acceptable.  If we miss it we'll just have to wait
3073 	 * a little longer for the limits to be reset.
3074 	 */
3075 	if (zone->uz_flags & UMA_ZFLAG_FULL)
3076 		goto zfree_item;
3077 
3078 	/*
3079 	 * If possible, free to the per-CPU cache.  There are two
3080 	 * requirements for safe access to the per-CPU cache: (1) the thread
3081 	 * accessing the cache must not be preempted or yield during access,
3082 	 * and (2) the thread must not migrate CPUs without switching which
3083 	 * cache it accesses.  We rely on a critical section to prevent
3084 	 * preemption and migration.  We release the critical section in
3085 	 * order to acquire the zone mutex if we are unable to free to the
3086 	 * current cache; when we re-acquire the critical section, we must
3087 	 * detect and handle migration if it has occurred.
3088 	 */
3089 zfree_restart:
3090 	critical_enter();
3091 	cpu = curcpu;
3092 	cache = &zone->uz_cpu[cpu];
3093 
3094 zfree_start:
3095 	/*
3096 	 * Try to free into the allocbucket first to give LIFO ordering
3097 	 * for cache-hot datastructures.  Spill over into the freebucket
3098 	 * if necessary.  Alloc will swap them if one runs dry.
3099 	 */
3100 	bucket = cache->uc_allocbucket;
3101 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3102 		bucket = cache->uc_freebucket;
3103 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3104 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3105 		    ("uma_zfree: Freeing to non free bucket index."));
3106 		bucket->ub_bucket[bucket->ub_cnt] = item;
3107 		bucket->ub_cnt++;
3108 		cache->uc_frees++;
3109 		critical_exit();
3110 		return;
3111 	}
3112 
3113 	/*
3114 	 * We must go back the zone, which requires acquiring the zone lock,
3115 	 * which in turn means we must release and re-acquire the critical
3116 	 * section.  Since the critical section is released, we may be
3117 	 * preempted or migrate.  As such, make sure not to maintain any
3118 	 * thread-local state specific to the cache from prior to releasing
3119 	 * the critical section.
3120 	 */
3121 	critical_exit();
3122 	if (zone->uz_count == 0 || bucketdisable)
3123 		goto zfree_item;
3124 
3125 	lockfail = 0;
3126 	if (ZONE_TRYLOCK(zone) == 0) {
3127 		/* Record contention to size the buckets. */
3128 		ZONE_LOCK(zone);
3129 		lockfail = 1;
3130 	}
3131 	critical_enter();
3132 	cpu = curcpu;
3133 	cache = &zone->uz_cpu[cpu];
3134 
3135 	bucket = cache->uc_freebucket;
3136 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3137 		ZONE_UNLOCK(zone);
3138 		goto zfree_start;
3139 	}
3140 	cache->uc_freebucket = NULL;
3141 	/* We are no longer associated with this CPU. */
3142 	critical_exit();
3143 
3144 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3145 		domain = PCPU_GET(domain);
3146 	else
3147 		domain = 0;
3148 	zdom = &zone->uz_domain[0];
3149 
3150 	/* Can we throw this on the zone full list? */
3151 	if (bucket != NULL) {
3152 		CTR3(KTR_UMA,
3153 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3154 		    zone->uz_name, zone, bucket);
3155 		/* ub_cnt is pointing to the last free item */
3156 		KASSERT(bucket->ub_cnt != 0,
3157 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
3158 		if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) {
3159 			ZONE_UNLOCK(zone);
3160 			bucket_drain(zone, bucket);
3161 			bucket_free(zone, bucket, udata);
3162 			goto zfree_restart;
3163 		} else
3164 			LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
3165 	}
3166 
3167 	/*
3168 	 * We bump the uz count when the cache size is insufficient to
3169 	 * handle the working set.
3170 	 */
3171 	if (lockfail && zone->uz_count < BUCKET_MAX)
3172 		zone->uz_count++;
3173 	ZONE_UNLOCK(zone);
3174 
3175 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3176 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3177 	    zone->uz_name, zone, bucket);
3178 	if (bucket) {
3179 		critical_enter();
3180 		cpu = curcpu;
3181 		cache = &zone->uz_cpu[cpu];
3182 		if (cache->uc_freebucket == NULL &&
3183 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3184 		    domain == PCPU_GET(domain))) {
3185 			cache->uc_freebucket = bucket;
3186 			goto zfree_start;
3187 		}
3188 		/*
3189 		 * We lost the race, start over.  We have to drop our
3190 		 * critical section to free the bucket.
3191 		 */
3192 		critical_exit();
3193 		bucket_free(zone, bucket, udata);
3194 		goto zfree_restart;
3195 	}
3196 
3197 	/*
3198 	 * If nothing else caught this, we'll just do an internal free.
3199 	 */
3200 zfree_item:
3201 	zone_free_item(zone, item, udata, SKIP_DTOR);
3202 
3203 	return;
3204 }
3205 
3206 void
3207 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3208 {
3209 
3210 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3211 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3212 
3213 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3214 	    zone->uz_name);
3215 
3216 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3217 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3218 
3219         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3220         if (item == NULL)
3221                 return;
3222 	zone_free_item(zone, item, udata, SKIP_NONE);
3223 }
3224 
3225 static void
3226 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
3227 {
3228 	uma_domain_t dom;
3229 	uint8_t freei;
3230 
3231 	mtx_assert(&keg->uk_lock, MA_OWNED);
3232 	MPASS(keg == slab->us_keg);
3233 
3234 	dom = &keg->uk_domain[slab->us_domain];
3235 
3236 	/* Do we need to remove from any lists? */
3237 	if (slab->us_freecount+1 == keg->uk_ipers) {
3238 		LIST_REMOVE(slab, us_link);
3239 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3240 	} else if (slab->us_freecount == 0) {
3241 		LIST_REMOVE(slab, us_link);
3242 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3243 	}
3244 
3245 	/* Slab management. */
3246 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3247 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3248 	slab->us_freecount++;
3249 
3250 	/* Keg statistics. */
3251 	keg->uk_free++;
3252 }
3253 
3254 static void
3255 zone_release(uma_zone_t zone, void **bucket, int cnt)
3256 {
3257 	void *item;
3258 	uma_slab_t slab;
3259 	uma_keg_t keg;
3260 	uint8_t *mem;
3261 	int clearfull;
3262 	int i;
3263 
3264 	clearfull = 0;
3265 	keg = zone_first_keg(zone);
3266 	KEG_LOCK(keg);
3267 	for (i = 0; i < cnt; i++) {
3268 		item = bucket[i];
3269 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3270 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3271 			if (zone->uz_flags & UMA_ZONE_HASH) {
3272 				slab = hash_sfind(&keg->uk_hash, mem);
3273 			} else {
3274 				mem += keg->uk_pgoff;
3275 				slab = (uma_slab_t)mem;
3276 			}
3277 		} else {
3278 			slab = vtoslab((vm_offset_t)item);
3279 			if (slab->us_keg != keg) {
3280 				KEG_UNLOCK(keg);
3281 				keg = slab->us_keg;
3282 				KEG_LOCK(keg);
3283 			}
3284 		}
3285 		slab_free_item(keg, slab, item);
3286 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
3287 			if (keg->uk_pages < keg->uk_maxpages) {
3288 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
3289 				clearfull = 1;
3290 			}
3291 
3292 			/*
3293 			 * We can handle one more allocation. Since we're
3294 			 * clearing ZFLAG_FULL, wake up all procs blocked
3295 			 * on pages. This should be uncommon, so keeping this
3296 			 * simple for now (rather than adding count of blocked
3297 			 * threads etc).
3298 			 */
3299 			wakeup(keg);
3300 		}
3301 	}
3302 	KEG_UNLOCK(keg);
3303 	if (clearfull) {
3304 		ZONE_LOCK(zone);
3305 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
3306 		wakeup(zone);
3307 		ZONE_UNLOCK(zone);
3308 	}
3309 
3310 }
3311 
3312 /*
3313  * Frees a single item to any zone.
3314  *
3315  * Arguments:
3316  *	zone   The zone to free to
3317  *	item   The item we're freeing
3318  *	udata  User supplied data for the dtor
3319  *	skip   Skip dtors and finis
3320  */
3321 static void
3322 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3323 {
3324 #ifdef INVARIANTS
3325 	bool skipdbg;
3326 
3327 	skipdbg = uma_dbg_zskip(zone, item);
3328 	if (skip == SKIP_NONE && !skipdbg) {
3329 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3330 			uma_dbg_free(zone, udata, item);
3331 		else
3332 			uma_dbg_free(zone, NULL, item);
3333 	}
3334 
3335 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3336 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3337 	    zone->uz_ctor != trash_ctor))
3338 #else
3339 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3340 #endif
3341 		zone->uz_dtor(item, zone->uz_size, udata);
3342 
3343 	if (skip < SKIP_FINI && zone->uz_fini)
3344 		zone->uz_fini(item, zone->uz_size);
3345 
3346 	atomic_add_long(&zone->uz_frees, 1);
3347 	zone->uz_release(zone->uz_arg, &item, 1);
3348 }
3349 
3350 /* See uma.h */
3351 int
3352 uma_zone_set_max(uma_zone_t zone, int nitems)
3353 {
3354 	uma_keg_t keg;
3355 
3356 	keg = zone_first_keg(zone);
3357 	if (keg == NULL)
3358 		return (0);
3359 	KEG_LOCK(keg);
3360 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
3361 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
3362 		keg->uk_maxpages += keg->uk_ppera;
3363 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3364 	KEG_UNLOCK(keg);
3365 
3366 	return (nitems);
3367 }
3368 
3369 /* See uma.h */
3370 int
3371 uma_zone_get_max(uma_zone_t zone)
3372 {
3373 	int nitems;
3374 	uma_keg_t keg;
3375 
3376 	keg = zone_first_keg(zone);
3377 	if (keg == NULL)
3378 		return (0);
3379 	KEG_LOCK(keg);
3380 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
3381 	KEG_UNLOCK(keg);
3382 
3383 	return (nitems);
3384 }
3385 
3386 /* See uma.h */
3387 void
3388 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3389 {
3390 
3391 	ZONE_LOCK(zone);
3392 	zone->uz_warning = warning;
3393 	ZONE_UNLOCK(zone);
3394 }
3395 
3396 /* See uma.h */
3397 void
3398 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3399 {
3400 
3401 	ZONE_LOCK(zone);
3402 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3403 	ZONE_UNLOCK(zone);
3404 }
3405 
3406 /* See uma.h */
3407 int
3408 uma_zone_get_cur(uma_zone_t zone)
3409 {
3410 	int64_t nitems;
3411 	u_int i;
3412 
3413 	ZONE_LOCK(zone);
3414 	nitems = zone->uz_allocs - zone->uz_frees;
3415 	CPU_FOREACH(i) {
3416 		/*
3417 		 * See the comment in sysctl_vm_zone_stats() regarding the
3418 		 * safety of accessing the per-cpu caches. With the zone lock
3419 		 * held, it is safe, but can potentially result in stale data.
3420 		 */
3421 		nitems += zone->uz_cpu[i].uc_allocs -
3422 		    zone->uz_cpu[i].uc_frees;
3423 	}
3424 	ZONE_UNLOCK(zone);
3425 
3426 	return (nitems < 0 ? 0 : nitems);
3427 }
3428 
3429 /* See uma.h */
3430 void
3431 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3432 {
3433 	uma_keg_t keg;
3434 
3435 	keg = zone_first_keg(zone);
3436 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3437 	KEG_LOCK(keg);
3438 	KASSERT(keg->uk_pages == 0,
3439 	    ("uma_zone_set_init on non-empty keg"));
3440 	keg->uk_init = uminit;
3441 	KEG_UNLOCK(keg);
3442 }
3443 
3444 /* See uma.h */
3445 void
3446 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3447 {
3448 	uma_keg_t keg;
3449 
3450 	keg = zone_first_keg(zone);
3451 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3452 	KEG_LOCK(keg);
3453 	KASSERT(keg->uk_pages == 0,
3454 	    ("uma_zone_set_fini on non-empty keg"));
3455 	keg->uk_fini = fini;
3456 	KEG_UNLOCK(keg);
3457 }
3458 
3459 /* See uma.h */
3460 void
3461 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3462 {
3463 
3464 	ZONE_LOCK(zone);
3465 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3466 	    ("uma_zone_set_zinit on non-empty keg"));
3467 	zone->uz_init = zinit;
3468 	ZONE_UNLOCK(zone);
3469 }
3470 
3471 /* See uma.h */
3472 void
3473 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3474 {
3475 
3476 	ZONE_LOCK(zone);
3477 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3478 	    ("uma_zone_set_zfini on non-empty keg"));
3479 	zone->uz_fini = zfini;
3480 	ZONE_UNLOCK(zone);
3481 }
3482 
3483 /* See uma.h */
3484 /* XXX uk_freef is not actually used with the zone locked */
3485 void
3486 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3487 {
3488 	uma_keg_t keg;
3489 
3490 	keg = zone_first_keg(zone);
3491 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3492 	KEG_LOCK(keg);
3493 	keg->uk_freef = freef;
3494 	KEG_UNLOCK(keg);
3495 }
3496 
3497 /* See uma.h */
3498 /* XXX uk_allocf is not actually used with the zone locked */
3499 void
3500 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3501 {
3502 	uma_keg_t keg;
3503 
3504 	keg = zone_first_keg(zone);
3505 	KEG_LOCK(keg);
3506 	keg->uk_allocf = allocf;
3507 	KEG_UNLOCK(keg);
3508 }
3509 
3510 /* See uma.h */
3511 void
3512 uma_zone_reserve(uma_zone_t zone, int items)
3513 {
3514 	uma_keg_t keg;
3515 
3516 	keg = zone_first_keg(zone);
3517 	if (keg == NULL)
3518 		return;
3519 	KEG_LOCK(keg);
3520 	keg->uk_reserve = items;
3521 	KEG_UNLOCK(keg);
3522 
3523 	return;
3524 }
3525 
3526 /* See uma.h */
3527 int
3528 uma_zone_reserve_kva(uma_zone_t zone, int count)
3529 {
3530 	uma_keg_t keg;
3531 	vm_offset_t kva;
3532 	u_int pages;
3533 
3534 	keg = zone_first_keg(zone);
3535 	if (keg == NULL)
3536 		return (0);
3537 	pages = count / keg->uk_ipers;
3538 
3539 	if (pages * keg->uk_ipers < count)
3540 		pages++;
3541 	pages *= keg->uk_ppera;
3542 
3543 #ifdef UMA_MD_SMALL_ALLOC
3544 	if (keg->uk_ppera > 1) {
3545 #else
3546 	if (1) {
3547 #endif
3548 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3549 		if (kva == 0)
3550 			return (0);
3551 	} else
3552 		kva = 0;
3553 	KEG_LOCK(keg);
3554 	keg->uk_kva = kva;
3555 	keg->uk_offset = 0;
3556 	keg->uk_maxpages = pages;
3557 #ifdef UMA_MD_SMALL_ALLOC
3558 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3559 #else
3560 	keg->uk_allocf = noobj_alloc;
3561 #endif
3562 	keg->uk_flags |= UMA_ZONE_NOFREE;
3563 	KEG_UNLOCK(keg);
3564 
3565 	return (1);
3566 }
3567 
3568 /* See uma.h */
3569 void
3570 uma_prealloc(uma_zone_t zone, int items)
3571 {
3572 	uma_domain_t dom;
3573 	uma_slab_t slab;
3574 	uma_keg_t keg;
3575 	int domain, slabs;
3576 
3577 	keg = zone_first_keg(zone);
3578 	if (keg == NULL)
3579 		return;
3580 	KEG_LOCK(keg);
3581 	slabs = items / keg->uk_ipers;
3582 	domain = 0;
3583 	if (slabs * keg->uk_ipers < items)
3584 		slabs++;
3585 	while (slabs > 0) {
3586 		slab = keg_alloc_slab(keg, zone, domain, M_WAITOK);
3587 		if (slab == NULL)
3588 			break;
3589 		MPASS(slab->us_keg == keg);
3590 		dom = &keg->uk_domain[slab->us_domain];
3591 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3592 		slabs--;
3593 		domain = (domain + 1) % vm_ndomains;
3594 	}
3595 	KEG_UNLOCK(keg);
3596 }
3597 
3598 /* See uma.h */
3599 static void
3600 uma_reclaim_locked(bool kmem_danger)
3601 {
3602 
3603 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3604 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3605 	bucket_enable();
3606 	zone_foreach(zone_drain);
3607 	if (vm_page_count_min() || kmem_danger) {
3608 		cache_drain_safe(NULL);
3609 		zone_foreach(zone_drain);
3610 	}
3611 	/*
3612 	 * Some slabs may have been freed but this zone will be visited early
3613 	 * we visit again so that we can free pages that are empty once other
3614 	 * zones are drained.  We have to do the same for buckets.
3615 	 */
3616 	zone_drain(slabzone);
3617 	bucket_zone_drain();
3618 }
3619 
3620 void
3621 uma_reclaim(void)
3622 {
3623 
3624 	sx_xlock(&uma_drain_lock);
3625 	uma_reclaim_locked(false);
3626 	sx_xunlock(&uma_drain_lock);
3627 }
3628 
3629 static volatile int uma_reclaim_needed;
3630 
3631 void
3632 uma_reclaim_wakeup(void)
3633 {
3634 
3635 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3636 		wakeup(uma_reclaim);
3637 }
3638 
3639 void
3640 uma_reclaim_worker(void *arg __unused)
3641 {
3642 
3643 	for (;;) {
3644 		sx_xlock(&uma_drain_lock);
3645 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3646 			sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl",
3647 			    hz);
3648 		sx_xunlock(&uma_drain_lock);
3649 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3650 		sx_xlock(&uma_drain_lock);
3651 		uma_reclaim_locked(true);
3652 		atomic_store_int(&uma_reclaim_needed, 0);
3653 		sx_xunlock(&uma_drain_lock);
3654 		/* Don't fire more than once per-second. */
3655 		pause("umarclslp", hz);
3656 	}
3657 }
3658 
3659 /* See uma.h */
3660 int
3661 uma_zone_exhausted(uma_zone_t zone)
3662 {
3663 	int full;
3664 
3665 	ZONE_LOCK(zone);
3666 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3667 	ZONE_UNLOCK(zone);
3668 	return (full);
3669 }
3670 
3671 int
3672 uma_zone_exhausted_nolock(uma_zone_t zone)
3673 {
3674 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3675 }
3676 
3677 void *
3678 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3679 {
3680 	vm_offset_t addr;
3681 	uma_slab_t slab;
3682 
3683 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3684 	if (slab == NULL)
3685 		return (NULL);
3686 	if (domain == UMA_ANYDOMAIN)
3687 		addr = kmem_malloc(size, wait);
3688 	else
3689 		addr = kmem_malloc_domain(domain, size, wait);
3690 	if (addr != 0) {
3691 		vsetslab(addr, slab);
3692 		slab->us_data = (void *)addr;
3693 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3694 		slab->us_size = size;
3695 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3696 		    pmap_kextract(addr)));
3697 		uma_total_inc(size);
3698 	} else {
3699 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3700 	}
3701 
3702 	return ((void *)addr);
3703 }
3704 
3705 void *
3706 uma_large_malloc(vm_size_t size, int wait)
3707 {
3708 
3709 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3710 }
3711 
3712 void
3713 uma_large_free(uma_slab_t slab)
3714 {
3715 
3716 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3717 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3718 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3719 	uma_total_dec(slab->us_size);
3720 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3721 }
3722 
3723 static void
3724 uma_zero_item(void *item, uma_zone_t zone)
3725 {
3726 
3727 	bzero(item, zone->uz_size);
3728 }
3729 
3730 unsigned long
3731 uma_limit(void)
3732 {
3733 
3734 	return (uma_kmem_limit);
3735 }
3736 
3737 void
3738 uma_set_limit(unsigned long limit)
3739 {
3740 
3741 	uma_kmem_limit = limit;
3742 }
3743 
3744 unsigned long
3745 uma_size(void)
3746 {
3747 
3748 	return (uma_kmem_total);
3749 }
3750 
3751 long
3752 uma_avail(void)
3753 {
3754 
3755 	return (uma_kmem_limit - uma_kmem_total);
3756 }
3757 
3758 void
3759 uma_print_stats(void)
3760 {
3761 	zone_foreach(uma_print_zone);
3762 }
3763 
3764 static void
3765 slab_print(uma_slab_t slab)
3766 {
3767 	printf("slab: keg %p, data %p, freecount %d\n",
3768 		slab->us_keg, slab->us_data, slab->us_freecount);
3769 }
3770 
3771 static void
3772 cache_print(uma_cache_t cache)
3773 {
3774 	printf("alloc: %p(%d), free: %p(%d)\n",
3775 		cache->uc_allocbucket,
3776 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3777 		cache->uc_freebucket,
3778 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3779 }
3780 
3781 static void
3782 uma_print_keg(uma_keg_t keg)
3783 {
3784 	uma_domain_t dom;
3785 	uma_slab_t slab;
3786 	int i;
3787 
3788 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3789 	    "out %d free %d limit %d\n",
3790 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3791 	    keg->uk_ipers, keg->uk_ppera,
3792 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3793 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3794 	for (i = 0; i < vm_ndomains; i++) {
3795 		dom = &keg->uk_domain[i];
3796 		printf("Part slabs:\n");
3797 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3798 			slab_print(slab);
3799 		printf("Free slabs:\n");
3800 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3801 			slab_print(slab);
3802 		printf("Full slabs:\n");
3803 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3804 			slab_print(slab);
3805 	}
3806 }
3807 
3808 void
3809 uma_print_zone(uma_zone_t zone)
3810 {
3811 	uma_cache_t cache;
3812 	uma_klink_t kl;
3813 	int i;
3814 
3815 	printf("zone: %s(%p) size %d flags %#x\n",
3816 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3817 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3818 		uma_print_keg(kl->kl_keg);
3819 	CPU_FOREACH(i) {
3820 		cache = &zone->uz_cpu[i];
3821 		printf("CPU %d Cache:\n", i);
3822 		cache_print(cache);
3823 	}
3824 }
3825 
3826 #ifdef DDB
3827 /*
3828  * Generate statistics across both the zone and its per-cpu cache's.  Return
3829  * desired statistics if the pointer is non-NULL for that statistic.
3830  *
3831  * Note: does not update the zone statistics, as it can't safely clear the
3832  * per-CPU cache statistic.
3833  *
3834  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3835  * safe from off-CPU; we should modify the caches to track this information
3836  * directly so that we don't have to.
3837  */
3838 static void
3839 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3840     uint64_t *freesp, uint64_t *sleepsp)
3841 {
3842 	uma_cache_t cache;
3843 	uint64_t allocs, frees, sleeps;
3844 	int cachefree, cpu;
3845 
3846 	allocs = frees = sleeps = 0;
3847 	cachefree = 0;
3848 	CPU_FOREACH(cpu) {
3849 		cache = &z->uz_cpu[cpu];
3850 		if (cache->uc_allocbucket != NULL)
3851 			cachefree += cache->uc_allocbucket->ub_cnt;
3852 		if (cache->uc_freebucket != NULL)
3853 			cachefree += cache->uc_freebucket->ub_cnt;
3854 		allocs += cache->uc_allocs;
3855 		frees += cache->uc_frees;
3856 	}
3857 	allocs += z->uz_allocs;
3858 	frees += z->uz_frees;
3859 	sleeps += z->uz_sleeps;
3860 	if (cachefreep != NULL)
3861 		*cachefreep = cachefree;
3862 	if (allocsp != NULL)
3863 		*allocsp = allocs;
3864 	if (freesp != NULL)
3865 		*freesp = frees;
3866 	if (sleepsp != NULL)
3867 		*sleepsp = sleeps;
3868 }
3869 #endif /* DDB */
3870 
3871 static int
3872 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3873 {
3874 	uma_keg_t kz;
3875 	uma_zone_t z;
3876 	int count;
3877 
3878 	count = 0;
3879 	rw_rlock(&uma_rwlock);
3880 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3881 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3882 			count++;
3883 	}
3884 	rw_runlock(&uma_rwlock);
3885 	return (sysctl_handle_int(oidp, &count, 0, req));
3886 }
3887 
3888 static int
3889 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3890 {
3891 	struct uma_stream_header ush;
3892 	struct uma_type_header uth;
3893 	struct uma_percpu_stat *ups;
3894 	uma_bucket_t bucket;
3895 	uma_zone_domain_t zdom;
3896 	struct sbuf sbuf;
3897 	uma_cache_t cache;
3898 	uma_klink_t kl;
3899 	uma_keg_t kz;
3900 	uma_zone_t z;
3901 	uma_keg_t k;
3902 	int count, error, i;
3903 
3904 	error = sysctl_wire_old_buffer(req, 0);
3905 	if (error != 0)
3906 		return (error);
3907 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3908 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3909 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
3910 
3911 	count = 0;
3912 	rw_rlock(&uma_rwlock);
3913 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3914 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3915 			count++;
3916 	}
3917 
3918 	/*
3919 	 * Insert stream header.
3920 	 */
3921 	bzero(&ush, sizeof(ush));
3922 	ush.ush_version = UMA_STREAM_VERSION;
3923 	ush.ush_maxcpus = (mp_maxid + 1);
3924 	ush.ush_count = count;
3925 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3926 
3927 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3928 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3929 			bzero(&uth, sizeof(uth));
3930 			ZONE_LOCK(z);
3931 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3932 			uth.uth_align = kz->uk_align;
3933 			uth.uth_size = kz->uk_size;
3934 			uth.uth_rsize = kz->uk_rsize;
3935 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3936 				k = kl->kl_keg;
3937 				uth.uth_maxpages += k->uk_maxpages;
3938 				uth.uth_pages += k->uk_pages;
3939 				uth.uth_keg_free += k->uk_free;
3940 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3941 				    * k->uk_ipers;
3942 			}
3943 
3944 			/*
3945 			 * A zone is secondary is it is not the first entry
3946 			 * on the keg's zone list.
3947 			 */
3948 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3949 			    (LIST_FIRST(&kz->uk_zones) != z))
3950 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3951 
3952 			for (i = 0; i < vm_ndomains; i++) {
3953 				zdom = &z->uz_domain[i];
3954 				LIST_FOREACH(bucket, &zdom->uzd_buckets,
3955 				    ub_link)
3956 					uth.uth_zone_free += bucket->ub_cnt;
3957 			}
3958 			uth.uth_allocs = z->uz_allocs;
3959 			uth.uth_frees = z->uz_frees;
3960 			uth.uth_fails = z->uz_fails;
3961 			uth.uth_sleeps = z->uz_sleeps;
3962 			/*
3963 			 * While it is not normally safe to access the cache
3964 			 * bucket pointers while not on the CPU that owns the
3965 			 * cache, we only allow the pointers to be exchanged
3966 			 * without the zone lock held, not invalidated, so
3967 			 * accept the possible race associated with bucket
3968 			 * exchange during monitoring.
3969 			 */
3970 			for (i = 0; i < mp_maxid + 1; i++) {
3971 				bzero(&ups[i], sizeof(*ups));
3972 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL ||
3973 				    CPU_ABSENT(i))
3974 					continue;
3975 				cache = &z->uz_cpu[i];
3976 				if (cache->uc_allocbucket != NULL)
3977 					ups[i].ups_cache_free +=
3978 					    cache->uc_allocbucket->ub_cnt;
3979 				if (cache->uc_freebucket != NULL)
3980 					ups[i].ups_cache_free +=
3981 					    cache->uc_freebucket->ub_cnt;
3982 				ups[i].ups_allocs = cache->uc_allocs;
3983 				ups[i].ups_frees = cache->uc_frees;
3984 			}
3985 			ZONE_UNLOCK(z);
3986 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3987 			for (i = 0; i < mp_maxid + 1; i++)
3988 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
3989 		}
3990 	}
3991 	rw_runlock(&uma_rwlock);
3992 	error = sbuf_finish(&sbuf);
3993 	sbuf_delete(&sbuf);
3994 	free(ups, M_TEMP);
3995 	return (error);
3996 }
3997 
3998 int
3999 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4000 {
4001 	uma_zone_t zone = *(uma_zone_t *)arg1;
4002 	int error, max;
4003 
4004 	max = uma_zone_get_max(zone);
4005 	error = sysctl_handle_int(oidp, &max, 0, req);
4006 	if (error || !req->newptr)
4007 		return (error);
4008 
4009 	uma_zone_set_max(zone, max);
4010 
4011 	return (0);
4012 }
4013 
4014 int
4015 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4016 {
4017 	uma_zone_t zone = *(uma_zone_t *)arg1;
4018 	int cur;
4019 
4020 	cur = uma_zone_get_cur(zone);
4021 	return (sysctl_handle_int(oidp, &cur, 0, req));
4022 }
4023 
4024 #ifdef INVARIANTS
4025 static uma_slab_t
4026 uma_dbg_getslab(uma_zone_t zone, void *item)
4027 {
4028 	uma_slab_t slab;
4029 	uma_keg_t keg;
4030 	uint8_t *mem;
4031 
4032 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4033 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4034 		slab = vtoslab((vm_offset_t)mem);
4035 	} else {
4036 		/*
4037 		 * It is safe to return the slab here even though the
4038 		 * zone is unlocked because the item's allocation state
4039 		 * essentially holds a reference.
4040 		 */
4041 		ZONE_LOCK(zone);
4042 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
4043 		if (keg->uk_flags & UMA_ZONE_HASH)
4044 			slab = hash_sfind(&keg->uk_hash, mem);
4045 		else
4046 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4047 		ZONE_UNLOCK(zone);
4048 	}
4049 
4050 	return (slab);
4051 }
4052 
4053 static bool
4054 uma_dbg_zskip(uma_zone_t zone, void *mem)
4055 {
4056 	uma_keg_t keg;
4057 
4058 	if ((keg = zone_first_keg(zone)) == NULL)
4059 		return (true);
4060 
4061 	return (uma_dbg_kskip(keg, mem));
4062 }
4063 
4064 static bool
4065 uma_dbg_kskip(uma_keg_t keg, void *mem)
4066 {
4067 	uintptr_t idx;
4068 
4069 	if (dbg_divisor == 0)
4070 		return (true);
4071 
4072 	if (dbg_divisor == 1)
4073 		return (false);
4074 
4075 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4076 	if (keg->uk_ipers > 1) {
4077 		idx *= keg->uk_ipers;
4078 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4079 	}
4080 
4081 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4082 		counter_u64_add(uma_skip_cnt, 1);
4083 		return (true);
4084 	}
4085 	counter_u64_add(uma_dbg_cnt, 1);
4086 
4087 	return (false);
4088 }
4089 
4090 /*
4091  * Set up the slab's freei data such that uma_dbg_free can function.
4092  *
4093  */
4094 static void
4095 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4096 {
4097 	uma_keg_t keg;
4098 	int freei;
4099 
4100 	if (slab == NULL) {
4101 		slab = uma_dbg_getslab(zone, item);
4102 		if (slab == NULL)
4103 			panic("uma: item %p did not belong to zone %s\n",
4104 			    item, zone->uz_name);
4105 	}
4106 	keg = slab->us_keg;
4107 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4108 
4109 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4110 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4111 		    item, zone, zone->uz_name, slab, freei);
4112 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4113 
4114 	return;
4115 }
4116 
4117 /*
4118  * Verifies freed addresses.  Checks for alignment, valid slab membership
4119  * and duplicate frees.
4120  *
4121  */
4122 static void
4123 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4124 {
4125 	uma_keg_t keg;
4126 	int freei;
4127 
4128 	if (slab == NULL) {
4129 		slab = uma_dbg_getslab(zone, item);
4130 		if (slab == NULL)
4131 			panic("uma: Freed item %p did not belong to zone %s\n",
4132 			    item, zone->uz_name);
4133 	}
4134 	keg = slab->us_keg;
4135 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4136 
4137 	if (freei >= keg->uk_ipers)
4138 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4139 		    item, zone, zone->uz_name, slab, freei);
4140 
4141 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4142 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4143 		    item, zone, zone->uz_name, slab, freei);
4144 
4145 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4146 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4147 		    item, zone, zone->uz_name, slab, freei);
4148 
4149 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4150 }
4151 #endif /* INVARIANTS */
4152 
4153 #ifdef DDB
4154 DB_SHOW_COMMAND(uma, db_show_uma)
4155 {
4156 	uma_bucket_t bucket;
4157 	uma_keg_t kz;
4158 	uma_zone_t z;
4159 	uma_zone_domain_t zdom;
4160 	uint64_t allocs, frees, sleeps;
4161 	int cachefree, i;
4162 
4163 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
4164 	    "Free", "Requests", "Sleeps", "Bucket");
4165 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4166 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4167 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4168 				allocs = z->uz_allocs;
4169 				frees = z->uz_frees;
4170 				sleeps = z->uz_sleeps;
4171 				cachefree = 0;
4172 			} else
4173 				uma_zone_sumstat(z, &cachefree, &allocs,
4174 				    &frees, &sleeps);
4175 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4176 			    (LIST_FIRST(&kz->uk_zones) != z)))
4177 				cachefree += kz->uk_free;
4178 			for (i = 0; i < vm_ndomains; i++) {
4179 				zdom = &z->uz_domain[i];
4180 				LIST_FOREACH(bucket, &zdom->uzd_buckets,
4181 				    ub_link)
4182 					cachefree += bucket->ub_cnt;
4183 			}
4184 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
4185 			    z->uz_name, (uintmax_t)kz->uk_size,
4186 			    (intmax_t)(allocs - frees), cachefree,
4187 			    (uintmax_t)allocs, sleeps, z->uz_count);
4188 			if (db_pager_quit)
4189 				return;
4190 		}
4191 	}
4192 }
4193 
4194 DB_SHOW_COMMAND(umacache, db_show_umacache)
4195 {
4196 	uma_bucket_t bucket;
4197 	uma_zone_t z;
4198 	uma_zone_domain_t zdom;
4199 	uint64_t allocs, frees;
4200 	int cachefree, i;
4201 
4202 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4203 	    "Requests", "Bucket");
4204 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4205 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
4206 		for (i = 0; i < vm_ndomains; i++) {
4207 			zdom = &z->uz_domain[i];
4208 			LIST_FOREACH(bucket, &zdom->uzd_buckets, ub_link)
4209 				cachefree += bucket->ub_cnt;
4210 		}
4211 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
4212 		    z->uz_name, (uintmax_t)z->uz_size,
4213 		    (intmax_t)(allocs - frees), cachefree,
4214 		    (uintmax_t)allocs, z->uz_count);
4215 		if (db_pager_quit)
4216 			return;
4217 	}
4218 }
4219 #endif	/* DDB */
4220