xref: /freebsd/sys/vm/uma_core.c (revision bd18fd57db1df29da1a3adf94d47924a977a29c2)
1 /*-
2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2006 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * effecient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59 
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62 #include "opt_vm.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bitset.h>
67 #include <sys/kernel.h>
68 #include <sys/types.h>
69 #include <sys/queue.h>
70 #include <sys/malloc.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/sysctl.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/random.h>
77 #include <sys/rwlock.h>
78 #include <sys/sbuf.h>
79 #include <sys/sched.h>
80 #include <sys/smp.h>
81 #include <sys/taskqueue.h>
82 #include <sys/vmmeter.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_param.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_extern.h>
92 #include <vm/uma.h>
93 #include <vm/uma_int.h>
94 #include <vm/uma_dbg.h>
95 
96 #include <ddb/ddb.h>
97 
98 #ifdef DEBUG_MEMGUARD
99 #include <vm/memguard.h>
100 #endif
101 
102 /*
103  * This is the zone and keg from which all zones are spawned.  The idea is that
104  * even the zone & keg heads are allocated from the allocator, so we use the
105  * bss section to bootstrap us.
106  */
107 static struct uma_keg masterkeg;
108 static struct uma_zone masterzone_k;
109 static struct uma_zone masterzone_z;
110 static uma_zone_t kegs = &masterzone_k;
111 static uma_zone_t zones = &masterzone_z;
112 
113 /* This is the zone from which all of uma_slab_t's are allocated. */
114 static uma_zone_t slabzone;
115 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
116 
117 /*
118  * The initial hash tables come out of this zone so they can be allocated
119  * prior to malloc coming up.
120  */
121 static uma_zone_t hashzone;
122 
123 /* The boot-time adjusted value for cache line alignment. */
124 int uma_align_cache = 64 - 1;
125 
126 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
127 
128 /*
129  * Are we allowed to allocate buckets?
130  */
131 static int bucketdisable = 1;
132 
133 /* Linked list of all kegs in the system */
134 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
135 
136 /* Linked list of all cache-only zones in the system */
137 static LIST_HEAD(,uma_zone) uma_cachezones =
138     LIST_HEAD_INITIALIZER(uma_cachezones);
139 
140 /* This RW lock protects the keg list */
141 static struct rwlock_padalign uma_rwlock;
142 
143 /* Linked list of boot time pages */
144 static LIST_HEAD(,uma_slab) uma_boot_pages =
145     LIST_HEAD_INITIALIZER(uma_boot_pages);
146 
147 /* This mutex protects the boot time pages list */
148 static struct mtx_padalign uma_boot_pages_mtx;
149 
150 static struct sx uma_drain_lock;
151 
152 /* Is the VM done starting up? */
153 static int booted = 0;
154 #define	UMA_STARTUP	1
155 #define	UMA_STARTUP2	2
156 
157 /*
158  * Only mbuf clusters use ref zones.  Just provide enough references
159  * to support the one user.  New code should not use the ref facility.
160  */
161 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
162 
163 /*
164  * This is the handle used to schedule events that need to happen
165  * outside of the allocation fast path.
166  */
167 static struct callout uma_callout;
168 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
169 
170 /*
171  * This structure is passed as the zone ctor arg so that I don't have to create
172  * a special allocation function just for zones.
173  */
174 struct uma_zctor_args {
175 	const char *name;
176 	size_t size;
177 	uma_ctor ctor;
178 	uma_dtor dtor;
179 	uma_init uminit;
180 	uma_fini fini;
181 	uma_import import;
182 	uma_release release;
183 	void *arg;
184 	uma_keg_t keg;
185 	int align;
186 	uint32_t flags;
187 };
188 
189 struct uma_kctor_args {
190 	uma_zone_t zone;
191 	size_t size;
192 	uma_init uminit;
193 	uma_fini fini;
194 	int align;
195 	uint32_t flags;
196 };
197 
198 struct uma_bucket_zone {
199 	uma_zone_t	ubz_zone;
200 	char		*ubz_name;
201 	int		ubz_entries;	/* Number of items it can hold. */
202 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
203 };
204 
205 /*
206  * Compute the actual number of bucket entries to pack them in power
207  * of two sizes for more efficient space utilization.
208  */
209 #define	BUCKET_SIZE(n)						\
210     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
211 
212 #define	BUCKET_MAX	BUCKET_SIZE(256)
213 
214 struct uma_bucket_zone bucket_zones[] = {
215 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
216 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
217 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
218 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
219 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
220 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
221 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
222 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
223 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
224 	{ NULL, NULL, 0}
225 };
226 
227 /*
228  * Flags and enumerations to be passed to internal functions.
229  */
230 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
231 
232 /* Prototypes.. */
233 
234 static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
235 static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
236 static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
237 static void page_free(void *, vm_size_t, uint8_t);
238 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
239 static void cache_drain(uma_zone_t);
240 static void bucket_drain(uma_zone_t, uma_bucket_t);
241 static void bucket_cache_drain(uma_zone_t zone);
242 static int keg_ctor(void *, int, void *, int);
243 static void keg_dtor(void *, int, void *);
244 static int zone_ctor(void *, int, void *, int);
245 static void zone_dtor(void *, int, void *);
246 static int zero_init(void *, int, int);
247 static void keg_small_init(uma_keg_t keg);
248 static void keg_large_init(uma_keg_t keg);
249 static void zone_foreach(void (*zfunc)(uma_zone_t));
250 static void zone_timeout(uma_zone_t zone);
251 static int hash_alloc(struct uma_hash *);
252 static int hash_expand(struct uma_hash *, struct uma_hash *);
253 static void hash_free(struct uma_hash *hash);
254 static void uma_timeout(void *);
255 static void uma_startup3(void);
256 static void *zone_alloc_item(uma_zone_t, void *, int);
257 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
258 static void bucket_enable(void);
259 static void bucket_init(void);
260 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
261 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
262 static void bucket_zone_drain(void);
263 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
264 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
265 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
266 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
267 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
268 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
269     uma_fini fini, int align, uint32_t flags);
270 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
271 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
272 static void uma_zero_item(void *item, uma_zone_t zone);
273 
274 void uma_print_zone(uma_zone_t);
275 void uma_print_stats(void);
276 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
277 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
278 
279 #ifdef INVARIANTS
280 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
281 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
282 #endif
283 
284 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
285 
286 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
287     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
288 
289 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
290     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
291 
292 static int zone_warnings = 1;
293 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
294     "Warn when UMA zones becomes full");
295 
296 /*
297  * This routine checks to see whether or not it's safe to enable buckets.
298  */
299 static void
300 bucket_enable(void)
301 {
302 	bucketdisable = vm_page_count_min();
303 }
304 
305 /*
306  * Initialize bucket_zones, the array of zones of buckets of various sizes.
307  *
308  * For each zone, calculate the memory required for each bucket, consisting
309  * of the header and an array of pointers.
310  */
311 static void
312 bucket_init(void)
313 {
314 	struct uma_bucket_zone *ubz;
315 	int size;
316 
317 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
318 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
319 		size += sizeof(void *) * ubz->ubz_entries;
320 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
321 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
322 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
323 	}
324 }
325 
326 /*
327  * Given a desired number of entries for a bucket, return the zone from which
328  * to allocate the bucket.
329  */
330 static struct uma_bucket_zone *
331 bucket_zone_lookup(int entries)
332 {
333 	struct uma_bucket_zone *ubz;
334 
335 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
336 		if (ubz->ubz_entries >= entries)
337 			return (ubz);
338 	ubz--;
339 	return (ubz);
340 }
341 
342 static int
343 bucket_select(int size)
344 {
345 	struct uma_bucket_zone *ubz;
346 
347 	ubz = &bucket_zones[0];
348 	if (size > ubz->ubz_maxsize)
349 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
350 
351 	for (; ubz->ubz_entries != 0; ubz++)
352 		if (ubz->ubz_maxsize < size)
353 			break;
354 	ubz--;
355 	return (ubz->ubz_entries);
356 }
357 
358 static uma_bucket_t
359 bucket_alloc(uma_zone_t zone, void *udata, int flags)
360 {
361 	struct uma_bucket_zone *ubz;
362 	uma_bucket_t bucket;
363 
364 	/*
365 	 * This is to stop us from allocating per cpu buckets while we're
366 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
367 	 * boot pages.  This also prevents us from allocating buckets in
368 	 * low memory situations.
369 	 */
370 	if (bucketdisable)
371 		return (NULL);
372 	/*
373 	 * To limit bucket recursion we store the original zone flags
374 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
375 	 * NOVM flag to persist even through deep recursions.  We also
376 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
377 	 * a bucket for a bucket zone so we do not allow infinite bucket
378 	 * recursion.  This cookie will even persist to frees of unused
379 	 * buckets via the allocation path or bucket allocations in the
380 	 * free path.
381 	 */
382 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
383 		udata = (void *)(uintptr_t)zone->uz_flags;
384 	else {
385 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
386 			return (NULL);
387 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
388 	}
389 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
390 		flags |= M_NOVM;
391 	ubz = bucket_zone_lookup(zone->uz_count);
392 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
393 		ubz++;
394 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
395 	if (bucket) {
396 #ifdef INVARIANTS
397 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
398 #endif
399 		bucket->ub_cnt = 0;
400 		bucket->ub_entries = ubz->ubz_entries;
401 	}
402 
403 	return (bucket);
404 }
405 
406 static void
407 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
408 {
409 	struct uma_bucket_zone *ubz;
410 
411 	KASSERT(bucket->ub_cnt == 0,
412 	    ("bucket_free: Freeing a non free bucket."));
413 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
414 		udata = (void *)(uintptr_t)zone->uz_flags;
415 	ubz = bucket_zone_lookup(bucket->ub_entries);
416 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
417 }
418 
419 static void
420 bucket_zone_drain(void)
421 {
422 	struct uma_bucket_zone *ubz;
423 
424 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
425 		zone_drain(ubz->ubz_zone);
426 }
427 
428 static void
429 zone_log_warning(uma_zone_t zone)
430 {
431 	static const struct timeval warninterval = { 300, 0 };
432 
433 	if (!zone_warnings || zone->uz_warning == NULL)
434 		return;
435 
436 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
437 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
438 }
439 
440 static inline void
441 zone_maxaction(uma_zone_t zone)
442 {
443 
444 	if (zone->uz_maxaction.ta_func != NULL)
445 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
446 }
447 
448 static void
449 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
450 {
451 	uma_klink_t klink;
452 
453 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
454 		kegfn(klink->kl_keg);
455 }
456 
457 /*
458  * Routine called by timeout which is used to fire off some time interval
459  * based calculations.  (stats, hash size, etc.)
460  *
461  * Arguments:
462  *	arg   Unused
463  *
464  * Returns:
465  *	Nothing
466  */
467 static void
468 uma_timeout(void *unused)
469 {
470 	bucket_enable();
471 	zone_foreach(zone_timeout);
472 
473 	/* Reschedule this event */
474 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
475 }
476 
477 /*
478  * Routine to perform timeout driven calculations.  This expands the
479  * hashes and does per cpu statistics aggregation.
480  *
481  *  Returns nothing.
482  */
483 static void
484 keg_timeout(uma_keg_t keg)
485 {
486 
487 	KEG_LOCK(keg);
488 	/*
489 	 * Expand the keg hash table.
490 	 *
491 	 * This is done if the number of slabs is larger than the hash size.
492 	 * What I'm trying to do here is completely reduce collisions.  This
493 	 * may be a little aggressive.  Should I allow for two collisions max?
494 	 */
495 	if (keg->uk_flags & UMA_ZONE_HASH &&
496 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
497 		struct uma_hash newhash;
498 		struct uma_hash oldhash;
499 		int ret;
500 
501 		/*
502 		 * This is so involved because allocating and freeing
503 		 * while the keg lock is held will lead to deadlock.
504 		 * I have to do everything in stages and check for
505 		 * races.
506 		 */
507 		newhash = keg->uk_hash;
508 		KEG_UNLOCK(keg);
509 		ret = hash_alloc(&newhash);
510 		KEG_LOCK(keg);
511 		if (ret) {
512 			if (hash_expand(&keg->uk_hash, &newhash)) {
513 				oldhash = keg->uk_hash;
514 				keg->uk_hash = newhash;
515 			} else
516 				oldhash = newhash;
517 
518 			KEG_UNLOCK(keg);
519 			hash_free(&oldhash);
520 			return;
521 		}
522 	}
523 	KEG_UNLOCK(keg);
524 }
525 
526 static void
527 zone_timeout(uma_zone_t zone)
528 {
529 
530 	zone_foreach_keg(zone, &keg_timeout);
531 }
532 
533 /*
534  * Allocate and zero fill the next sized hash table from the appropriate
535  * backing store.
536  *
537  * Arguments:
538  *	hash  A new hash structure with the old hash size in uh_hashsize
539  *
540  * Returns:
541  *	1 on sucess and 0 on failure.
542  */
543 static int
544 hash_alloc(struct uma_hash *hash)
545 {
546 	int oldsize;
547 	int alloc;
548 
549 	oldsize = hash->uh_hashsize;
550 
551 	/* We're just going to go to a power of two greater */
552 	if (oldsize)  {
553 		hash->uh_hashsize = oldsize * 2;
554 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
555 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
556 		    M_UMAHASH, M_NOWAIT);
557 	} else {
558 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
559 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
560 		    M_WAITOK);
561 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
562 	}
563 	if (hash->uh_slab_hash) {
564 		bzero(hash->uh_slab_hash, alloc);
565 		hash->uh_hashmask = hash->uh_hashsize - 1;
566 		return (1);
567 	}
568 
569 	return (0);
570 }
571 
572 /*
573  * Expands the hash table for HASH zones.  This is done from zone_timeout
574  * to reduce collisions.  This must not be done in the regular allocation
575  * path, otherwise, we can recurse on the vm while allocating pages.
576  *
577  * Arguments:
578  *	oldhash  The hash you want to expand
579  *	newhash  The hash structure for the new table
580  *
581  * Returns:
582  *	Nothing
583  *
584  * Discussion:
585  */
586 static int
587 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
588 {
589 	uma_slab_t slab;
590 	int hval;
591 	int i;
592 
593 	if (!newhash->uh_slab_hash)
594 		return (0);
595 
596 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
597 		return (0);
598 
599 	/*
600 	 * I need to investigate hash algorithms for resizing without a
601 	 * full rehash.
602 	 */
603 
604 	for (i = 0; i < oldhash->uh_hashsize; i++)
605 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
606 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
607 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
608 			hval = UMA_HASH(newhash, slab->us_data);
609 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
610 			    slab, us_hlink);
611 		}
612 
613 	return (1);
614 }
615 
616 /*
617  * Free the hash bucket to the appropriate backing store.
618  *
619  * Arguments:
620  *	slab_hash  The hash bucket we're freeing
621  *	hashsize   The number of entries in that hash bucket
622  *
623  * Returns:
624  *	Nothing
625  */
626 static void
627 hash_free(struct uma_hash *hash)
628 {
629 	if (hash->uh_slab_hash == NULL)
630 		return;
631 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
632 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
633 	else
634 		free(hash->uh_slab_hash, M_UMAHASH);
635 }
636 
637 /*
638  * Frees all outstanding items in a bucket
639  *
640  * Arguments:
641  *	zone   The zone to free to, must be unlocked.
642  *	bucket The free/alloc bucket with items, cpu queue must be locked.
643  *
644  * Returns:
645  *	Nothing
646  */
647 
648 static void
649 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
650 {
651 	int i;
652 
653 	if (bucket == NULL)
654 		return;
655 
656 	if (zone->uz_fini)
657 		for (i = 0; i < bucket->ub_cnt; i++)
658 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
659 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
660 	bucket->ub_cnt = 0;
661 }
662 
663 /*
664  * Drains the per cpu caches for a zone.
665  *
666  * NOTE: This may only be called while the zone is being turn down, and not
667  * during normal operation.  This is necessary in order that we do not have
668  * to migrate CPUs to drain the per-CPU caches.
669  *
670  * Arguments:
671  *	zone     The zone to drain, must be unlocked.
672  *
673  * Returns:
674  *	Nothing
675  */
676 static void
677 cache_drain(uma_zone_t zone)
678 {
679 	uma_cache_t cache;
680 	int cpu;
681 
682 	/*
683 	 * XXX: It is safe to not lock the per-CPU caches, because we're
684 	 * tearing down the zone anyway.  I.e., there will be no further use
685 	 * of the caches at this point.
686 	 *
687 	 * XXX: It would good to be able to assert that the zone is being
688 	 * torn down to prevent improper use of cache_drain().
689 	 *
690 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
691 	 * it is used elsewhere.  Should the tear-down path be made special
692 	 * there in some form?
693 	 */
694 	CPU_FOREACH(cpu) {
695 		cache = &zone->uz_cpu[cpu];
696 		bucket_drain(zone, cache->uc_allocbucket);
697 		bucket_drain(zone, cache->uc_freebucket);
698 		if (cache->uc_allocbucket != NULL)
699 			bucket_free(zone, cache->uc_allocbucket, NULL);
700 		if (cache->uc_freebucket != NULL)
701 			bucket_free(zone, cache->uc_freebucket, NULL);
702 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
703 	}
704 	ZONE_LOCK(zone);
705 	bucket_cache_drain(zone);
706 	ZONE_UNLOCK(zone);
707 }
708 
709 static void
710 cache_shrink(uma_zone_t zone)
711 {
712 
713 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
714 		return;
715 
716 	ZONE_LOCK(zone);
717 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
718 	ZONE_UNLOCK(zone);
719 }
720 
721 static void
722 cache_drain_safe_cpu(uma_zone_t zone)
723 {
724 	uma_cache_t cache;
725 	uma_bucket_t b1, b2;
726 
727 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
728 		return;
729 
730 	b1 = b2 = NULL;
731 	ZONE_LOCK(zone);
732 	critical_enter();
733 	cache = &zone->uz_cpu[curcpu];
734 	if (cache->uc_allocbucket) {
735 		if (cache->uc_allocbucket->ub_cnt != 0)
736 			LIST_INSERT_HEAD(&zone->uz_buckets,
737 			    cache->uc_allocbucket, ub_link);
738 		else
739 			b1 = cache->uc_allocbucket;
740 		cache->uc_allocbucket = NULL;
741 	}
742 	if (cache->uc_freebucket) {
743 		if (cache->uc_freebucket->ub_cnt != 0)
744 			LIST_INSERT_HEAD(&zone->uz_buckets,
745 			    cache->uc_freebucket, ub_link);
746 		else
747 			b2 = cache->uc_freebucket;
748 		cache->uc_freebucket = NULL;
749 	}
750 	critical_exit();
751 	ZONE_UNLOCK(zone);
752 	if (b1)
753 		bucket_free(zone, b1, NULL);
754 	if (b2)
755 		bucket_free(zone, b2, NULL);
756 }
757 
758 /*
759  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
760  * This is an expensive call because it needs to bind to all CPUs
761  * one by one and enter a critical section on each of them in order
762  * to safely access their cache buckets.
763  * Zone lock must not be held on call this function.
764  */
765 static void
766 cache_drain_safe(uma_zone_t zone)
767 {
768 	int cpu;
769 
770 	/*
771 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
772 	 */
773 	if (zone)
774 		cache_shrink(zone);
775 	else
776 		zone_foreach(cache_shrink);
777 
778 	CPU_FOREACH(cpu) {
779 		thread_lock(curthread);
780 		sched_bind(curthread, cpu);
781 		thread_unlock(curthread);
782 
783 		if (zone)
784 			cache_drain_safe_cpu(zone);
785 		else
786 			zone_foreach(cache_drain_safe_cpu);
787 	}
788 	thread_lock(curthread);
789 	sched_unbind(curthread);
790 	thread_unlock(curthread);
791 }
792 
793 /*
794  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
795  */
796 static void
797 bucket_cache_drain(uma_zone_t zone)
798 {
799 	uma_bucket_t bucket;
800 
801 	/*
802 	 * Drain the bucket queues and free the buckets, we just keep two per
803 	 * cpu (alloc/free).
804 	 */
805 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
806 		LIST_REMOVE(bucket, ub_link);
807 		ZONE_UNLOCK(zone);
808 		bucket_drain(zone, bucket);
809 		bucket_free(zone, bucket, NULL);
810 		ZONE_LOCK(zone);
811 	}
812 
813 	/*
814 	 * Shrink further bucket sizes.  Price of single zone lock collision
815 	 * is probably lower then price of global cache drain.
816 	 */
817 	if (zone->uz_count > zone->uz_count_min)
818 		zone->uz_count--;
819 }
820 
821 static void
822 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
823 {
824 	uint8_t *mem;
825 	int i;
826 	uint8_t flags;
827 
828 	mem = slab->us_data;
829 	flags = slab->us_flags;
830 	i = start;
831 	if (keg->uk_fini != NULL) {
832 		for (i--; i > -1; i--)
833 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
834 			    keg->uk_size);
835 	}
836 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
837 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
838 #ifdef UMA_DEBUG
839 	printf("%s: Returning %d bytes.\n", keg->uk_name,
840 	    PAGE_SIZE * keg->uk_ppera);
841 #endif
842 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
843 }
844 
845 /*
846  * Frees pages from a keg back to the system.  This is done on demand from
847  * the pageout daemon.
848  *
849  * Returns nothing.
850  */
851 static void
852 keg_drain(uma_keg_t keg)
853 {
854 	struct slabhead freeslabs = { 0 };
855 	uma_slab_t slab;
856 	uma_slab_t n;
857 
858 	/*
859 	 * We don't want to take pages from statically allocated kegs at this
860 	 * time
861 	 */
862 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
863 		return;
864 
865 #ifdef UMA_DEBUG
866 	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
867 #endif
868 	KEG_LOCK(keg);
869 	if (keg->uk_free == 0)
870 		goto finished;
871 
872 	slab = LIST_FIRST(&keg->uk_free_slab);
873 	while (slab) {
874 		n = LIST_NEXT(slab, us_link);
875 
876 		/* We have no where to free these to */
877 		if (slab->us_flags & UMA_SLAB_BOOT) {
878 			slab = n;
879 			continue;
880 		}
881 
882 		LIST_REMOVE(slab, us_link);
883 		keg->uk_pages -= keg->uk_ppera;
884 		keg->uk_free -= keg->uk_ipers;
885 
886 		if (keg->uk_flags & UMA_ZONE_HASH)
887 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
888 
889 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
890 
891 		slab = n;
892 	}
893 finished:
894 	KEG_UNLOCK(keg);
895 
896 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
897 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
898 		keg_free_slab(keg, slab, keg->uk_ipers);
899 	}
900 }
901 
902 static void
903 zone_drain_wait(uma_zone_t zone, int waitok)
904 {
905 
906 	/*
907 	 * Set draining to interlock with zone_dtor() so we can release our
908 	 * locks as we go.  Only dtor() should do a WAITOK call since it
909 	 * is the only call that knows the structure will still be available
910 	 * when it wakes up.
911 	 */
912 	ZONE_LOCK(zone);
913 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
914 		if (waitok == M_NOWAIT)
915 			goto out;
916 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
917 	}
918 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
919 	bucket_cache_drain(zone);
920 	ZONE_UNLOCK(zone);
921 	/*
922 	 * The DRAINING flag protects us from being freed while
923 	 * we're running.  Normally the uma_rwlock would protect us but we
924 	 * must be able to release and acquire the right lock for each keg.
925 	 */
926 	zone_foreach_keg(zone, &keg_drain);
927 	ZONE_LOCK(zone);
928 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
929 	wakeup(zone);
930 out:
931 	ZONE_UNLOCK(zone);
932 }
933 
934 void
935 zone_drain(uma_zone_t zone)
936 {
937 
938 	zone_drain_wait(zone, M_NOWAIT);
939 }
940 
941 /*
942  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
943  *
944  * Arguments:
945  *	wait  Shall we wait?
946  *
947  * Returns:
948  *	The slab that was allocated or NULL if there is no memory and the
949  *	caller specified M_NOWAIT.
950  */
951 static uma_slab_t
952 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
953 {
954 	uma_slabrefcnt_t slabref;
955 	uma_alloc allocf;
956 	uma_slab_t slab;
957 	uint8_t *mem;
958 	uint8_t flags;
959 	int i;
960 
961 	mtx_assert(&keg->uk_lock, MA_OWNED);
962 	slab = NULL;
963 	mem = NULL;
964 
965 #ifdef UMA_DEBUG
966 	printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
967 #endif
968 	allocf = keg->uk_allocf;
969 	KEG_UNLOCK(keg);
970 
971 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
972 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
973 		if (slab == NULL)
974 			goto out;
975 	}
976 
977 	/*
978 	 * This reproduces the old vm_zone behavior of zero filling pages the
979 	 * first time they are added to a zone.
980 	 *
981 	 * Malloced items are zeroed in uma_zalloc.
982 	 */
983 
984 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
985 		wait |= M_ZERO;
986 	else
987 		wait &= ~M_ZERO;
988 
989 	if (keg->uk_flags & UMA_ZONE_NODUMP)
990 		wait |= M_NODUMP;
991 
992 	/* zone is passed for legacy reasons. */
993 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
994 	if (mem == NULL) {
995 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
996 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
997 		slab = NULL;
998 		goto out;
999 	}
1000 
1001 	/* Point the slab into the allocated memory */
1002 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1003 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1004 
1005 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1006 		for (i = 0; i < keg->uk_ppera; i++)
1007 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1008 
1009 	slab->us_keg = keg;
1010 	slab->us_data = mem;
1011 	slab->us_freecount = keg->uk_ipers;
1012 	slab->us_flags = flags;
1013 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1014 #ifdef INVARIANTS
1015 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1016 #endif
1017 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1018 		slabref = (uma_slabrefcnt_t)slab;
1019 		for (i = 0; i < keg->uk_ipers; i++)
1020 			slabref->us_refcnt[i] = 0;
1021 	}
1022 
1023 	if (keg->uk_init != NULL) {
1024 		for (i = 0; i < keg->uk_ipers; i++)
1025 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1026 			    keg->uk_size, wait) != 0)
1027 				break;
1028 		if (i != keg->uk_ipers) {
1029 			keg_free_slab(keg, slab, i);
1030 			slab = NULL;
1031 			goto out;
1032 		}
1033 	}
1034 out:
1035 	KEG_LOCK(keg);
1036 
1037 	if (slab != NULL) {
1038 		if (keg->uk_flags & UMA_ZONE_HASH)
1039 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1040 
1041 		keg->uk_pages += keg->uk_ppera;
1042 		keg->uk_free += keg->uk_ipers;
1043 	}
1044 
1045 	return (slab);
1046 }
1047 
1048 /*
1049  * This function is intended to be used early on in place of page_alloc() so
1050  * that we may use the boot time page cache to satisfy allocations before
1051  * the VM is ready.
1052  */
1053 static void *
1054 startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1055 {
1056 	uma_keg_t keg;
1057 	uma_slab_t tmps;
1058 	int pages, check_pages;
1059 
1060 	keg = zone_first_keg(zone);
1061 	pages = howmany(bytes, PAGE_SIZE);
1062 	check_pages = pages - 1;
1063 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1064 
1065 	/*
1066 	 * Check our small startup cache to see if it has pages remaining.
1067 	 */
1068 	mtx_lock(&uma_boot_pages_mtx);
1069 
1070 	/* First check if we have enough room. */
1071 	tmps = LIST_FIRST(&uma_boot_pages);
1072 	while (tmps != NULL && check_pages-- > 0)
1073 		tmps = LIST_NEXT(tmps, us_link);
1074 	if (tmps != NULL) {
1075 		/*
1076 		 * It's ok to lose tmps references.  The last one will
1077 		 * have tmps->us_data pointing to the start address of
1078 		 * "pages" contiguous pages of memory.
1079 		 */
1080 		while (pages-- > 0) {
1081 			tmps = LIST_FIRST(&uma_boot_pages);
1082 			LIST_REMOVE(tmps, us_link);
1083 		}
1084 		mtx_unlock(&uma_boot_pages_mtx);
1085 		*pflag = tmps->us_flags;
1086 		return (tmps->us_data);
1087 	}
1088 	mtx_unlock(&uma_boot_pages_mtx);
1089 	if (booted < UMA_STARTUP2)
1090 		panic("UMA: Increase vm.boot_pages");
1091 	/*
1092 	 * Now that we've booted reset these users to their real allocator.
1093 	 */
1094 #ifdef UMA_MD_SMALL_ALLOC
1095 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1096 #else
1097 	keg->uk_allocf = page_alloc;
1098 #endif
1099 	return keg->uk_allocf(zone, bytes, pflag, wait);
1100 }
1101 
1102 /*
1103  * Allocates a number of pages from the system
1104  *
1105  * Arguments:
1106  *	bytes  The number of bytes requested
1107  *	wait  Shall we wait?
1108  *
1109  * Returns:
1110  *	A pointer to the alloced memory or possibly
1111  *	NULL if M_NOWAIT is set.
1112  */
1113 static void *
1114 page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1115 {
1116 	void *p;	/* Returned page */
1117 
1118 	*pflag = UMA_SLAB_KMEM;
1119 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1120 
1121 	return (p);
1122 }
1123 
1124 /*
1125  * Allocates a number of pages from within an object
1126  *
1127  * Arguments:
1128  *	bytes  The number of bytes requested
1129  *	wait   Shall we wait?
1130  *
1131  * Returns:
1132  *	A pointer to the alloced memory or possibly
1133  *	NULL if M_NOWAIT is set.
1134  */
1135 static void *
1136 noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
1137 {
1138 	TAILQ_HEAD(, vm_page) alloctail;
1139 	u_long npages;
1140 	vm_offset_t retkva, zkva;
1141 	vm_page_t p, p_next;
1142 	uma_keg_t keg;
1143 
1144 	TAILQ_INIT(&alloctail);
1145 	keg = zone_first_keg(zone);
1146 
1147 	npages = howmany(bytes, PAGE_SIZE);
1148 	while (npages > 0) {
1149 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1150 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1151 		if (p != NULL) {
1152 			/*
1153 			 * Since the page does not belong to an object, its
1154 			 * listq is unused.
1155 			 */
1156 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1157 			npages--;
1158 			continue;
1159 		}
1160 		if (wait & M_WAITOK) {
1161 			VM_WAIT;
1162 			continue;
1163 		}
1164 
1165 		/*
1166 		 * Page allocation failed, free intermediate pages and
1167 		 * exit.
1168 		 */
1169 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1170 			vm_page_unwire(p, PQ_NONE);
1171 			vm_page_free(p);
1172 		}
1173 		return (NULL);
1174 	}
1175 	*flags = UMA_SLAB_PRIV;
1176 	zkva = keg->uk_kva +
1177 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1178 	retkva = zkva;
1179 	TAILQ_FOREACH(p, &alloctail, listq) {
1180 		pmap_qenter(zkva, &p, 1);
1181 		zkva += PAGE_SIZE;
1182 	}
1183 
1184 	return ((void *)retkva);
1185 }
1186 
1187 /*
1188  * Frees a number of pages to the system
1189  *
1190  * Arguments:
1191  *	mem   A pointer to the memory to be freed
1192  *	size  The size of the memory being freed
1193  *	flags The original p->us_flags field
1194  *
1195  * Returns:
1196  *	Nothing
1197  */
1198 static void
1199 page_free(void *mem, vm_size_t size, uint8_t flags)
1200 {
1201 	struct vmem *vmem;
1202 
1203 	if (flags & UMA_SLAB_KMEM)
1204 		vmem = kmem_arena;
1205 	else if (flags & UMA_SLAB_KERNEL)
1206 		vmem = kernel_arena;
1207 	else
1208 		panic("UMA: page_free used with invalid flags %d", flags);
1209 
1210 	kmem_free(vmem, (vm_offset_t)mem, size);
1211 }
1212 
1213 /*
1214  * Zero fill initializer
1215  *
1216  * Arguments/Returns follow uma_init specifications
1217  */
1218 static int
1219 zero_init(void *mem, int size, int flags)
1220 {
1221 	bzero(mem, size);
1222 	return (0);
1223 }
1224 
1225 /*
1226  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1227  *
1228  * Arguments
1229  *	keg  The zone we should initialize
1230  *
1231  * Returns
1232  *	Nothing
1233  */
1234 static void
1235 keg_small_init(uma_keg_t keg)
1236 {
1237 	u_int rsize;
1238 	u_int memused;
1239 	u_int wastedspace;
1240 	u_int shsize;
1241 
1242 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1243 		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1244 
1245 		keg->uk_slabsize = sizeof(struct pcpu);
1246 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1247 		    PAGE_SIZE);
1248 	} else {
1249 		keg->uk_slabsize = UMA_SLAB_SIZE;
1250 		keg->uk_ppera = 1;
1251 	}
1252 
1253 	/*
1254 	 * Calculate the size of each allocation (rsize) according to
1255 	 * alignment.  If the requested size is smaller than we have
1256 	 * allocation bits for we round it up.
1257 	 */
1258 	rsize = keg->uk_size;
1259 	if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1260 		rsize = keg->uk_slabsize / SLAB_SETSIZE;
1261 	if (rsize & keg->uk_align)
1262 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1263 	keg->uk_rsize = rsize;
1264 
1265 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1266 	    keg->uk_rsize < sizeof(struct pcpu),
1267 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1268 
1269 	if (keg->uk_flags & UMA_ZONE_REFCNT)
1270 		rsize += sizeof(uint32_t);
1271 
1272 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1273 		shsize = 0;
1274 	else
1275 		shsize = sizeof(struct uma_slab);
1276 
1277 	keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1278 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1279 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1280 
1281 	memused = keg->uk_ipers * rsize + shsize;
1282 	wastedspace = keg->uk_slabsize - memused;
1283 
1284 	/*
1285 	 * We can't do OFFPAGE if we're internal or if we've been
1286 	 * asked to not go to the VM for buckets.  If we do this we
1287 	 * may end up going to the VM  for slabs which we do not
1288 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1289 	 * of UMA_ZONE_VM, which clearly forbids it.
1290 	 */
1291 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1292 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1293 		return;
1294 
1295 	/*
1296 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1297 	 * this if it permits more items per-slab.
1298 	 *
1299 	 * XXX We could try growing slabsize to limit max waste as well.
1300 	 * Historically this was not done because the VM could not
1301 	 * efficiently handle contiguous allocations.
1302 	 */
1303 	if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1304 	    (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1305 		keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1306 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1307 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1308 #ifdef UMA_DEBUG
1309 		printf("UMA decided we need offpage slab headers for "
1310 		    "keg: %s, calculated wastedspace = %d, "
1311 		    "maximum wasted space allowed = %d, "
1312 		    "calculated ipers = %d, "
1313 		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1314 		    keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1315 		    keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1316 #endif
1317 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1318 	}
1319 
1320 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1321 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1322 		keg->uk_flags |= UMA_ZONE_HASH;
1323 }
1324 
1325 /*
1326  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1327  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1328  * more complicated.
1329  *
1330  * Arguments
1331  *	keg  The keg we should initialize
1332  *
1333  * Returns
1334  *	Nothing
1335  */
1336 static void
1337 keg_large_init(uma_keg_t keg)
1338 {
1339 	u_int shsize;
1340 
1341 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1342 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1343 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1344 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1345 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1346 
1347 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1348 	keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1349 	keg->uk_ipers = 1;
1350 	keg->uk_rsize = keg->uk_size;
1351 
1352 	/* We can't do OFFPAGE if we're internal, bail out here. */
1353 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1354 		return;
1355 
1356 	/* Check whether we have enough space to not do OFFPAGE. */
1357 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1358 		shsize = sizeof(struct uma_slab);
1359 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1360 			shsize += keg->uk_ipers * sizeof(uint32_t);
1361 		if (shsize & UMA_ALIGN_PTR)
1362 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1363 			    (UMA_ALIGN_PTR + 1);
1364 
1365 		if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1366 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1367 	}
1368 
1369 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1370 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1371 		keg->uk_flags |= UMA_ZONE_HASH;
1372 }
1373 
1374 static void
1375 keg_cachespread_init(uma_keg_t keg)
1376 {
1377 	int alignsize;
1378 	int trailer;
1379 	int pages;
1380 	int rsize;
1381 
1382 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1383 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1384 
1385 	alignsize = keg->uk_align + 1;
1386 	rsize = keg->uk_size;
1387 	/*
1388 	 * We want one item to start on every align boundary in a page.  To
1389 	 * do this we will span pages.  We will also extend the item by the
1390 	 * size of align if it is an even multiple of align.  Otherwise, it
1391 	 * would fall on the same boundary every time.
1392 	 */
1393 	if (rsize & keg->uk_align)
1394 		rsize = (rsize & ~keg->uk_align) + alignsize;
1395 	if ((rsize & alignsize) == 0)
1396 		rsize += alignsize;
1397 	trailer = rsize - keg->uk_size;
1398 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1399 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1400 	keg->uk_rsize = rsize;
1401 	keg->uk_ppera = pages;
1402 	keg->uk_slabsize = UMA_SLAB_SIZE;
1403 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1404 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1405 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1406 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1407 	    keg->uk_ipers));
1408 }
1409 
1410 /*
1411  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1412  * the keg onto the global keg list.
1413  *
1414  * Arguments/Returns follow uma_ctor specifications
1415  *	udata  Actually uma_kctor_args
1416  */
1417 static int
1418 keg_ctor(void *mem, int size, void *udata, int flags)
1419 {
1420 	struct uma_kctor_args *arg = udata;
1421 	uma_keg_t keg = mem;
1422 	uma_zone_t zone;
1423 
1424 	bzero(keg, size);
1425 	keg->uk_size = arg->size;
1426 	keg->uk_init = arg->uminit;
1427 	keg->uk_fini = arg->fini;
1428 	keg->uk_align = arg->align;
1429 	keg->uk_free = 0;
1430 	keg->uk_reserve = 0;
1431 	keg->uk_pages = 0;
1432 	keg->uk_flags = arg->flags;
1433 	keg->uk_allocf = page_alloc;
1434 	keg->uk_freef = page_free;
1435 	keg->uk_slabzone = NULL;
1436 
1437 	/*
1438 	 * The master zone is passed to us at keg-creation time.
1439 	 */
1440 	zone = arg->zone;
1441 	keg->uk_name = zone->uz_name;
1442 
1443 	if (arg->flags & UMA_ZONE_VM)
1444 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1445 
1446 	if (arg->flags & UMA_ZONE_ZINIT)
1447 		keg->uk_init = zero_init;
1448 
1449 	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1450 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1451 
1452 	if (arg->flags & UMA_ZONE_PCPU)
1453 #ifdef SMP
1454 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1455 #else
1456 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1457 #endif
1458 
1459 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1460 		keg_cachespread_init(keg);
1461 	} else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1462 		if (keg->uk_size >
1463 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1464 		    sizeof(uint32_t)))
1465 			keg_large_init(keg);
1466 		else
1467 			keg_small_init(keg);
1468 	} else {
1469 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1470 			keg_large_init(keg);
1471 		else
1472 			keg_small_init(keg);
1473 	}
1474 
1475 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1476 		if (keg->uk_flags & UMA_ZONE_REFCNT) {
1477 			if (keg->uk_ipers > uma_max_ipers_ref)
1478 				panic("Too many ref items per zone: %d > %d\n",
1479 				    keg->uk_ipers, uma_max_ipers_ref);
1480 			keg->uk_slabzone = slabrefzone;
1481 		} else
1482 			keg->uk_slabzone = slabzone;
1483 	}
1484 
1485 	/*
1486 	 * If we haven't booted yet we need allocations to go through the
1487 	 * startup cache until the vm is ready.
1488 	 */
1489 	if (keg->uk_ppera == 1) {
1490 #ifdef UMA_MD_SMALL_ALLOC
1491 		keg->uk_allocf = uma_small_alloc;
1492 		keg->uk_freef = uma_small_free;
1493 
1494 		if (booted < UMA_STARTUP)
1495 			keg->uk_allocf = startup_alloc;
1496 #else
1497 		if (booted < UMA_STARTUP2)
1498 			keg->uk_allocf = startup_alloc;
1499 #endif
1500 	} else if (booted < UMA_STARTUP2 &&
1501 	    (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1502 		keg->uk_allocf = startup_alloc;
1503 
1504 	/*
1505 	 * Initialize keg's lock
1506 	 */
1507 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1508 
1509 	/*
1510 	 * If we're putting the slab header in the actual page we need to
1511 	 * figure out where in each page it goes.  This calculates a right
1512 	 * justified offset into the memory on an ALIGN_PTR boundary.
1513 	 */
1514 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1515 		u_int totsize;
1516 
1517 		/* Size of the slab struct and free list */
1518 		totsize = sizeof(struct uma_slab);
1519 
1520 		/* Size of the reference counts. */
1521 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1522 			totsize += keg->uk_ipers * sizeof(uint32_t);
1523 
1524 		if (totsize & UMA_ALIGN_PTR)
1525 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1526 			    (UMA_ALIGN_PTR + 1);
1527 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1528 
1529 		/*
1530 		 * The only way the following is possible is if with our
1531 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1532 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1533 		 * mathematically possible for all cases, so we make
1534 		 * sure here anyway.
1535 		 */
1536 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1537 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1538 			totsize += keg->uk_ipers * sizeof(uint32_t);
1539 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1540 			printf("zone %s ipers %d rsize %d size %d\n",
1541 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1542 			    keg->uk_size);
1543 			panic("UMA slab won't fit.");
1544 		}
1545 	}
1546 
1547 	if (keg->uk_flags & UMA_ZONE_HASH)
1548 		hash_alloc(&keg->uk_hash);
1549 
1550 #ifdef UMA_DEBUG
1551 	printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1552 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1553 	    keg->uk_ipers, keg->uk_ppera,
1554 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1555 #endif
1556 
1557 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1558 
1559 	rw_wlock(&uma_rwlock);
1560 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1561 	rw_wunlock(&uma_rwlock);
1562 	return (0);
1563 }
1564 
1565 /*
1566  * Zone header ctor.  This initializes all fields, locks, etc.
1567  *
1568  * Arguments/Returns follow uma_ctor specifications
1569  *	udata  Actually uma_zctor_args
1570  */
1571 static int
1572 zone_ctor(void *mem, int size, void *udata, int flags)
1573 {
1574 	struct uma_zctor_args *arg = udata;
1575 	uma_zone_t zone = mem;
1576 	uma_zone_t z;
1577 	uma_keg_t keg;
1578 
1579 	bzero(zone, size);
1580 	zone->uz_name = arg->name;
1581 	zone->uz_ctor = arg->ctor;
1582 	zone->uz_dtor = arg->dtor;
1583 	zone->uz_slab = zone_fetch_slab;
1584 	zone->uz_init = NULL;
1585 	zone->uz_fini = NULL;
1586 	zone->uz_allocs = 0;
1587 	zone->uz_frees = 0;
1588 	zone->uz_fails = 0;
1589 	zone->uz_sleeps = 0;
1590 	zone->uz_count = 0;
1591 	zone->uz_count_min = 0;
1592 	zone->uz_flags = 0;
1593 	zone->uz_warning = NULL;
1594 	timevalclear(&zone->uz_ratecheck);
1595 	keg = arg->keg;
1596 
1597 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1598 
1599 	/*
1600 	 * This is a pure cache zone, no kegs.
1601 	 */
1602 	if (arg->import) {
1603 		if (arg->flags & UMA_ZONE_VM)
1604 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1605 		zone->uz_flags = arg->flags;
1606 		zone->uz_size = arg->size;
1607 		zone->uz_import = arg->import;
1608 		zone->uz_release = arg->release;
1609 		zone->uz_arg = arg->arg;
1610 		zone->uz_lockptr = &zone->uz_lock;
1611 		rw_wlock(&uma_rwlock);
1612 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1613 		rw_wunlock(&uma_rwlock);
1614 		goto out;
1615 	}
1616 
1617 	/*
1618 	 * Use the regular zone/keg/slab allocator.
1619 	 */
1620 	zone->uz_import = (uma_import)zone_import;
1621 	zone->uz_release = (uma_release)zone_release;
1622 	zone->uz_arg = zone;
1623 
1624 	if (arg->flags & UMA_ZONE_SECONDARY) {
1625 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1626 		zone->uz_init = arg->uminit;
1627 		zone->uz_fini = arg->fini;
1628 		zone->uz_lockptr = &keg->uk_lock;
1629 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1630 		rw_wlock(&uma_rwlock);
1631 		ZONE_LOCK(zone);
1632 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1633 			if (LIST_NEXT(z, uz_link) == NULL) {
1634 				LIST_INSERT_AFTER(z, zone, uz_link);
1635 				break;
1636 			}
1637 		}
1638 		ZONE_UNLOCK(zone);
1639 		rw_wunlock(&uma_rwlock);
1640 	} else if (keg == NULL) {
1641 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1642 		    arg->align, arg->flags)) == NULL)
1643 			return (ENOMEM);
1644 	} else {
1645 		struct uma_kctor_args karg;
1646 		int error;
1647 
1648 		/* We should only be here from uma_startup() */
1649 		karg.size = arg->size;
1650 		karg.uminit = arg->uminit;
1651 		karg.fini = arg->fini;
1652 		karg.align = arg->align;
1653 		karg.flags = arg->flags;
1654 		karg.zone = zone;
1655 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1656 		    flags);
1657 		if (error)
1658 			return (error);
1659 	}
1660 
1661 	/*
1662 	 * Link in the first keg.
1663 	 */
1664 	zone->uz_klink.kl_keg = keg;
1665 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1666 	zone->uz_lockptr = &keg->uk_lock;
1667 	zone->uz_size = keg->uk_size;
1668 	zone->uz_flags |= (keg->uk_flags &
1669 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1670 
1671 	/*
1672 	 * Some internal zones don't have room allocated for the per cpu
1673 	 * caches.  If we're internal, bail out here.
1674 	 */
1675 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1676 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1677 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1678 		return (0);
1679 	}
1680 
1681 out:
1682 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1683 		zone->uz_count = bucket_select(zone->uz_size);
1684 	else
1685 		zone->uz_count = BUCKET_MAX;
1686 	zone->uz_count_min = zone->uz_count;
1687 
1688 	return (0);
1689 }
1690 
1691 /*
1692  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1693  * table and removes the keg from the global list.
1694  *
1695  * Arguments/Returns follow uma_dtor specifications
1696  *	udata  unused
1697  */
1698 static void
1699 keg_dtor(void *arg, int size, void *udata)
1700 {
1701 	uma_keg_t keg;
1702 
1703 	keg = (uma_keg_t)arg;
1704 	KEG_LOCK(keg);
1705 	if (keg->uk_free != 0) {
1706 		printf("Freed UMA keg (%s) was not empty (%d items). "
1707 		    " Lost %d pages of memory.\n",
1708 		    keg->uk_name ? keg->uk_name : "",
1709 		    keg->uk_free, keg->uk_pages);
1710 	}
1711 	KEG_UNLOCK(keg);
1712 
1713 	hash_free(&keg->uk_hash);
1714 
1715 	KEG_LOCK_FINI(keg);
1716 }
1717 
1718 /*
1719  * Zone header dtor.
1720  *
1721  * Arguments/Returns follow uma_dtor specifications
1722  *	udata  unused
1723  */
1724 static void
1725 zone_dtor(void *arg, int size, void *udata)
1726 {
1727 	uma_klink_t klink;
1728 	uma_zone_t zone;
1729 	uma_keg_t keg;
1730 
1731 	zone = (uma_zone_t)arg;
1732 	keg = zone_first_keg(zone);
1733 
1734 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1735 		cache_drain(zone);
1736 
1737 	rw_wlock(&uma_rwlock);
1738 	LIST_REMOVE(zone, uz_link);
1739 	rw_wunlock(&uma_rwlock);
1740 	/*
1741 	 * XXX there are some races here where
1742 	 * the zone can be drained but zone lock
1743 	 * released and then refilled before we
1744 	 * remove it... we dont care for now
1745 	 */
1746 	zone_drain_wait(zone, M_WAITOK);
1747 	/*
1748 	 * Unlink all of our kegs.
1749 	 */
1750 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1751 		klink->kl_keg = NULL;
1752 		LIST_REMOVE(klink, kl_link);
1753 		if (klink == &zone->uz_klink)
1754 			continue;
1755 		free(klink, M_TEMP);
1756 	}
1757 	/*
1758 	 * We only destroy kegs from non secondary zones.
1759 	 */
1760 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1761 		rw_wlock(&uma_rwlock);
1762 		LIST_REMOVE(keg, uk_link);
1763 		rw_wunlock(&uma_rwlock);
1764 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1765 	}
1766 	ZONE_LOCK_FINI(zone);
1767 }
1768 
1769 /*
1770  * Traverses every zone in the system and calls a callback
1771  *
1772  * Arguments:
1773  *	zfunc  A pointer to a function which accepts a zone
1774  *		as an argument.
1775  *
1776  * Returns:
1777  *	Nothing
1778  */
1779 static void
1780 zone_foreach(void (*zfunc)(uma_zone_t))
1781 {
1782 	uma_keg_t keg;
1783 	uma_zone_t zone;
1784 
1785 	rw_rlock(&uma_rwlock);
1786 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1787 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1788 			zfunc(zone);
1789 	}
1790 	rw_runlock(&uma_rwlock);
1791 }
1792 
1793 /* Public functions */
1794 /* See uma.h */
1795 void
1796 uma_startup(void *bootmem, int boot_pages)
1797 {
1798 	struct uma_zctor_args args;
1799 	uma_slab_t slab;
1800 	u_int slabsize;
1801 	int i;
1802 
1803 #ifdef UMA_DEBUG
1804 	printf("Creating uma keg headers zone and keg.\n");
1805 #endif
1806 	rw_init(&uma_rwlock, "UMA lock");
1807 
1808 	/* "manually" create the initial zone */
1809 	memset(&args, 0, sizeof(args));
1810 	args.name = "UMA Kegs";
1811 	args.size = sizeof(struct uma_keg);
1812 	args.ctor = keg_ctor;
1813 	args.dtor = keg_dtor;
1814 	args.uminit = zero_init;
1815 	args.fini = NULL;
1816 	args.keg = &masterkeg;
1817 	args.align = 32 - 1;
1818 	args.flags = UMA_ZFLAG_INTERNAL;
1819 	/* The initial zone has no Per cpu queues so it's smaller */
1820 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1821 
1822 #ifdef UMA_DEBUG
1823 	printf("Filling boot free list.\n");
1824 #endif
1825 	for (i = 0; i < boot_pages; i++) {
1826 		slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1827 		slab->us_data = (uint8_t *)slab;
1828 		slab->us_flags = UMA_SLAB_BOOT;
1829 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1830 	}
1831 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1832 
1833 #ifdef UMA_DEBUG
1834 	printf("Creating uma zone headers zone and keg.\n");
1835 #endif
1836 	args.name = "UMA Zones";
1837 	args.size = sizeof(struct uma_zone) +
1838 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1839 	args.ctor = zone_ctor;
1840 	args.dtor = zone_dtor;
1841 	args.uminit = zero_init;
1842 	args.fini = NULL;
1843 	args.keg = NULL;
1844 	args.align = 32 - 1;
1845 	args.flags = UMA_ZFLAG_INTERNAL;
1846 	/* The initial zone has no Per cpu queues so it's smaller */
1847 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1848 
1849 #ifdef UMA_DEBUG
1850 	printf("Creating slab and hash zones.\n");
1851 #endif
1852 
1853 	/* Now make a zone for slab headers */
1854 	slabzone = uma_zcreate("UMA Slabs",
1855 				sizeof(struct uma_slab),
1856 				NULL, NULL, NULL, NULL,
1857 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1858 
1859 	/*
1860 	 * We also create a zone for the bigger slabs with reference
1861 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1862 	 */
1863 	slabsize = sizeof(struct uma_slab_refcnt);
1864 	slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1865 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1866 				  slabsize,
1867 				  NULL, NULL, NULL, NULL,
1868 				  UMA_ALIGN_PTR,
1869 				  UMA_ZFLAG_INTERNAL);
1870 
1871 	hashzone = uma_zcreate("UMA Hash",
1872 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1873 	    NULL, NULL, NULL, NULL,
1874 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1875 
1876 	bucket_init();
1877 
1878 	booted = UMA_STARTUP;
1879 
1880 #ifdef UMA_DEBUG
1881 	printf("UMA startup complete.\n");
1882 #endif
1883 }
1884 
1885 /* see uma.h */
1886 void
1887 uma_startup2(void)
1888 {
1889 	booted = UMA_STARTUP2;
1890 	bucket_enable();
1891 	sx_init(&uma_drain_lock, "umadrain");
1892 #ifdef UMA_DEBUG
1893 	printf("UMA startup2 complete.\n");
1894 #endif
1895 }
1896 
1897 /*
1898  * Initialize our callout handle
1899  *
1900  */
1901 
1902 static void
1903 uma_startup3(void)
1904 {
1905 #ifdef UMA_DEBUG
1906 	printf("Starting callout.\n");
1907 #endif
1908 	callout_init(&uma_callout, 1);
1909 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1910 #ifdef UMA_DEBUG
1911 	printf("UMA startup3 complete.\n");
1912 #endif
1913 }
1914 
1915 static uma_keg_t
1916 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1917 		int align, uint32_t flags)
1918 {
1919 	struct uma_kctor_args args;
1920 
1921 	args.size = size;
1922 	args.uminit = uminit;
1923 	args.fini = fini;
1924 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1925 	args.flags = flags;
1926 	args.zone = zone;
1927 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1928 }
1929 
1930 /* See uma.h */
1931 void
1932 uma_set_align(int align)
1933 {
1934 
1935 	if (align != UMA_ALIGN_CACHE)
1936 		uma_align_cache = align;
1937 }
1938 
1939 /* See uma.h */
1940 uma_zone_t
1941 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1942 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1943 
1944 {
1945 	struct uma_zctor_args args;
1946 	uma_zone_t res;
1947 	bool locked;
1948 
1949 	/* This stuff is essential for the zone ctor */
1950 	memset(&args, 0, sizeof(args));
1951 	args.name = name;
1952 	args.size = size;
1953 	args.ctor = ctor;
1954 	args.dtor = dtor;
1955 	args.uminit = uminit;
1956 	args.fini = fini;
1957 #ifdef  INVARIANTS
1958 	/*
1959 	 * If a zone is being created with an empty constructor and
1960 	 * destructor, pass UMA constructor/destructor which checks for
1961 	 * memory use after free.
1962 	 */
1963 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1964 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1965 		args.ctor = trash_ctor;
1966 		args.dtor = trash_dtor;
1967 		args.uminit = trash_init;
1968 		args.fini = trash_fini;
1969 	}
1970 #endif
1971 	args.align = align;
1972 	args.flags = flags;
1973 	args.keg = NULL;
1974 
1975 	if (booted < UMA_STARTUP2) {
1976 		locked = false;
1977 	} else {
1978 		sx_slock(&uma_drain_lock);
1979 		locked = true;
1980 	}
1981 	res = zone_alloc_item(zones, &args, M_WAITOK);
1982 	if (locked)
1983 		sx_sunlock(&uma_drain_lock);
1984 	return (res);
1985 }
1986 
1987 /* See uma.h */
1988 uma_zone_t
1989 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1990 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1991 {
1992 	struct uma_zctor_args args;
1993 	uma_keg_t keg;
1994 	uma_zone_t res;
1995 	bool locked;
1996 
1997 	keg = zone_first_keg(master);
1998 	memset(&args, 0, sizeof(args));
1999 	args.name = name;
2000 	args.size = keg->uk_size;
2001 	args.ctor = ctor;
2002 	args.dtor = dtor;
2003 	args.uminit = zinit;
2004 	args.fini = zfini;
2005 	args.align = keg->uk_align;
2006 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2007 	args.keg = keg;
2008 
2009 	if (booted < UMA_STARTUP2) {
2010 		locked = false;
2011 	} else {
2012 		sx_slock(&uma_drain_lock);
2013 		locked = true;
2014 	}
2015 	/* XXX Attaches only one keg of potentially many. */
2016 	res = zone_alloc_item(zones, &args, M_WAITOK);
2017 	if (locked)
2018 		sx_sunlock(&uma_drain_lock);
2019 	return (res);
2020 }
2021 
2022 /* See uma.h */
2023 uma_zone_t
2024 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2025 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2026 		    uma_release zrelease, void *arg, int flags)
2027 {
2028 	struct uma_zctor_args args;
2029 
2030 	memset(&args, 0, sizeof(args));
2031 	args.name = name;
2032 	args.size = size;
2033 	args.ctor = ctor;
2034 	args.dtor = dtor;
2035 	args.uminit = zinit;
2036 	args.fini = zfini;
2037 	args.import = zimport;
2038 	args.release = zrelease;
2039 	args.arg = arg;
2040 	args.align = 0;
2041 	args.flags = flags;
2042 
2043 	return (zone_alloc_item(zones, &args, M_WAITOK));
2044 }
2045 
2046 static void
2047 zone_lock_pair(uma_zone_t a, uma_zone_t b)
2048 {
2049 	if (a < b) {
2050 		ZONE_LOCK(a);
2051 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2052 	} else {
2053 		ZONE_LOCK(b);
2054 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2055 	}
2056 }
2057 
2058 static void
2059 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2060 {
2061 
2062 	ZONE_UNLOCK(a);
2063 	ZONE_UNLOCK(b);
2064 }
2065 
2066 int
2067 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2068 {
2069 	uma_klink_t klink;
2070 	uma_klink_t kl;
2071 	int error;
2072 
2073 	error = 0;
2074 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2075 
2076 	zone_lock_pair(zone, master);
2077 	/*
2078 	 * zone must use vtoslab() to resolve objects and must already be
2079 	 * a secondary.
2080 	 */
2081 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2082 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2083 		error = EINVAL;
2084 		goto out;
2085 	}
2086 	/*
2087 	 * The new master must also use vtoslab().
2088 	 */
2089 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2090 		error = EINVAL;
2091 		goto out;
2092 	}
2093 	/*
2094 	 * Both must either be refcnt, or not be refcnt.
2095 	 */
2096 	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
2097 	    (master->uz_flags & UMA_ZONE_REFCNT)) {
2098 		error = EINVAL;
2099 		goto out;
2100 	}
2101 	/*
2102 	 * The underlying object must be the same size.  rsize
2103 	 * may be different.
2104 	 */
2105 	if (master->uz_size != zone->uz_size) {
2106 		error = E2BIG;
2107 		goto out;
2108 	}
2109 	/*
2110 	 * Put it at the end of the list.
2111 	 */
2112 	klink->kl_keg = zone_first_keg(master);
2113 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2114 		if (LIST_NEXT(kl, kl_link) == NULL) {
2115 			LIST_INSERT_AFTER(kl, klink, kl_link);
2116 			break;
2117 		}
2118 	}
2119 	klink = NULL;
2120 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2121 	zone->uz_slab = zone_fetch_slab_multi;
2122 
2123 out:
2124 	zone_unlock_pair(zone, master);
2125 	if (klink != NULL)
2126 		free(klink, M_TEMP);
2127 
2128 	return (error);
2129 }
2130 
2131 
2132 /* See uma.h */
2133 void
2134 uma_zdestroy(uma_zone_t zone)
2135 {
2136 
2137 	sx_slock(&uma_drain_lock);
2138 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2139 	sx_sunlock(&uma_drain_lock);
2140 }
2141 
2142 /* See uma.h */
2143 void *
2144 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2145 {
2146 	void *item;
2147 	uma_cache_t cache;
2148 	uma_bucket_t bucket;
2149 	int lockfail;
2150 	int cpu;
2151 
2152 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2153 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2154 
2155 	/* This is the fast path allocation */
2156 #ifdef UMA_DEBUG_ALLOC_1
2157 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2158 #endif
2159 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2160 	    zone->uz_name, flags);
2161 
2162 	if (flags & M_WAITOK) {
2163 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2164 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2165 	}
2166 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2167 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2168 
2169 #ifdef DEBUG_MEMGUARD
2170 	if (memguard_cmp_zone(zone)) {
2171 		item = memguard_alloc(zone->uz_size, flags);
2172 		if (item != NULL) {
2173 			/*
2174 			 * Avoid conflict with the use-after-free
2175 			 * protecting infrastructure from INVARIANTS.
2176 			 */
2177 			if (zone->uz_init != NULL &&
2178 			    zone->uz_init != mtrash_init &&
2179 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2180 				return (NULL);
2181 			if (zone->uz_ctor != NULL &&
2182 			    zone->uz_ctor != mtrash_ctor &&
2183 			    zone->uz_ctor(item, zone->uz_size, udata,
2184 			    flags) != 0) {
2185 			    	zone->uz_fini(item, zone->uz_size);
2186 				return (NULL);
2187 			}
2188 			return (item);
2189 		}
2190 		/* This is unfortunate but should not be fatal. */
2191 	}
2192 #endif
2193 	/*
2194 	 * If possible, allocate from the per-CPU cache.  There are two
2195 	 * requirements for safe access to the per-CPU cache: (1) the thread
2196 	 * accessing the cache must not be preempted or yield during access,
2197 	 * and (2) the thread must not migrate CPUs without switching which
2198 	 * cache it accesses.  We rely on a critical section to prevent
2199 	 * preemption and migration.  We release the critical section in
2200 	 * order to acquire the zone mutex if we are unable to allocate from
2201 	 * the current cache; when we re-acquire the critical section, we
2202 	 * must detect and handle migration if it has occurred.
2203 	 */
2204 	critical_enter();
2205 	cpu = curcpu;
2206 	cache = &zone->uz_cpu[cpu];
2207 
2208 zalloc_start:
2209 	bucket = cache->uc_allocbucket;
2210 	if (bucket != NULL && bucket->ub_cnt > 0) {
2211 		bucket->ub_cnt--;
2212 		item = bucket->ub_bucket[bucket->ub_cnt];
2213 #ifdef INVARIANTS
2214 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2215 #endif
2216 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2217 		cache->uc_allocs++;
2218 		critical_exit();
2219 		if (zone->uz_ctor != NULL &&
2220 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2221 			atomic_add_long(&zone->uz_fails, 1);
2222 			zone_free_item(zone, item, udata, SKIP_DTOR);
2223 			return (NULL);
2224 		}
2225 #ifdef INVARIANTS
2226 		uma_dbg_alloc(zone, NULL, item);
2227 #endif
2228 		if (flags & M_ZERO)
2229 			uma_zero_item(item, zone);
2230 		return (item);
2231 	}
2232 
2233 	/*
2234 	 * We have run out of items in our alloc bucket.
2235 	 * See if we can switch with our free bucket.
2236 	 */
2237 	bucket = cache->uc_freebucket;
2238 	if (bucket != NULL && bucket->ub_cnt > 0) {
2239 #ifdef UMA_DEBUG_ALLOC
2240 		printf("uma_zalloc: Swapping empty with alloc.\n");
2241 #endif
2242 		cache->uc_freebucket = cache->uc_allocbucket;
2243 		cache->uc_allocbucket = bucket;
2244 		goto zalloc_start;
2245 	}
2246 
2247 	/*
2248 	 * Discard any empty allocation bucket while we hold no locks.
2249 	 */
2250 	bucket = cache->uc_allocbucket;
2251 	cache->uc_allocbucket = NULL;
2252 	critical_exit();
2253 	if (bucket != NULL)
2254 		bucket_free(zone, bucket, udata);
2255 
2256 	/* Short-circuit for zones without buckets and low memory. */
2257 	if (zone->uz_count == 0 || bucketdisable)
2258 		goto zalloc_item;
2259 
2260 	/*
2261 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2262 	 * we must go back to the zone.  This requires the zone lock, so we
2263 	 * must drop the critical section, then re-acquire it when we go back
2264 	 * to the cache.  Since the critical section is released, we may be
2265 	 * preempted or migrate.  As such, make sure not to maintain any
2266 	 * thread-local state specific to the cache from prior to releasing
2267 	 * the critical section.
2268 	 */
2269 	lockfail = 0;
2270 	if (ZONE_TRYLOCK(zone) == 0) {
2271 		/* Record contention to size the buckets. */
2272 		ZONE_LOCK(zone);
2273 		lockfail = 1;
2274 	}
2275 	critical_enter();
2276 	cpu = curcpu;
2277 	cache = &zone->uz_cpu[cpu];
2278 
2279 	/*
2280 	 * Since we have locked the zone we may as well send back our stats.
2281 	 */
2282 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2283 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2284 	cache->uc_allocs = 0;
2285 	cache->uc_frees = 0;
2286 
2287 	/* See if we lost the race to fill the cache. */
2288 	if (cache->uc_allocbucket != NULL) {
2289 		ZONE_UNLOCK(zone);
2290 		goto zalloc_start;
2291 	}
2292 
2293 	/*
2294 	 * Check the zone's cache of buckets.
2295 	 */
2296 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2297 		KASSERT(bucket->ub_cnt != 0,
2298 		    ("uma_zalloc_arg: Returning an empty bucket."));
2299 
2300 		LIST_REMOVE(bucket, ub_link);
2301 		cache->uc_allocbucket = bucket;
2302 		ZONE_UNLOCK(zone);
2303 		goto zalloc_start;
2304 	}
2305 	/* We are no longer associated with this CPU. */
2306 	critical_exit();
2307 
2308 	/*
2309 	 * We bump the uz count when the cache size is insufficient to
2310 	 * handle the working set.
2311 	 */
2312 	if (lockfail && zone->uz_count < BUCKET_MAX)
2313 		zone->uz_count++;
2314 	ZONE_UNLOCK(zone);
2315 
2316 	/*
2317 	 * Now lets just fill a bucket and put it on the free list.  If that
2318 	 * works we'll restart the allocation from the begining and it
2319 	 * will use the just filled bucket.
2320 	 */
2321 	bucket = zone_alloc_bucket(zone, udata, flags);
2322 	if (bucket != NULL) {
2323 		ZONE_LOCK(zone);
2324 		critical_enter();
2325 		cpu = curcpu;
2326 		cache = &zone->uz_cpu[cpu];
2327 		/*
2328 		 * See if we lost the race or were migrated.  Cache the
2329 		 * initialized bucket to make this less likely or claim
2330 		 * the memory directly.
2331 		 */
2332 		if (cache->uc_allocbucket == NULL)
2333 			cache->uc_allocbucket = bucket;
2334 		else
2335 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2336 		ZONE_UNLOCK(zone);
2337 		goto zalloc_start;
2338 	}
2339 
2340 	/*
2341 	 * We may not be able to get a bucket so return an actual item.
2342 	 */
2343 #ifdef UMA_DEBUG
2344 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2345 #endif
2346 
2347 zalloc_item:
2348 	item = zone_alloc_item(zone, udata, flags);
2349 
2350 	return (item);
2351 }
2352 
2353 static uma_slab_t
2354 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2355 {
2356 	uma_slab_t slab;
2357 	int reserve;
2358 
2359 	mtx_assert(&keg->uk_lock, MA_OWNED);
2360 	slab = NULL;
2361 	reserve = 0;
2362 	if ((flags & M_USE_RESERVE) == 0)
2363 		reserve = keg->uk_reserve;
2364 
2365 	for (;;) {
2366 		/*
2367 		 * Find a slab with some space.  Prefer slabs that are partially
2368 		 * used over those that are totally full.  This helps to reduce
2369 		 * fragmentation.
2370 		 */
2371 		if (keg->uk_free > reserve) {
2372 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2373 				slab = LIST_FIRST(&keg->uk_part_slab);
2374 			} else {
2375 				slab = LIST_FIRST(&keg->uk_free_slab);
2376 				LIST_REMOVE(slab, us_link);
2377 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2378 				    us_link);
2379 			}
2380 			MPASS(slab->us_keg == keg);
2381 			return (slab);
2382 		}
2383 
2384 		/*
2385 		 * M_NOVM means don't ask at all!
2386 		 */
2387 		if (flags & M_NOVM)
2388 			break;
2389 
2390 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2391 			keg->uk_flags |= UMA_ZFLAG_FULL;
2392 			/*
2393 			 * If this is not a multi-zone, set the FULL bit.
2394 			 * Otherwise slab_multi() takes care of it.
2395 			 */
2396 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2397 				zone->uz_flags |= UMA_ZFLAG_FULL;
2398 				zone_log_warning(zone);
2399 				zone_maxaction(zone);
2400 			}
2401 			if (flags & M_NOWAIT)
2402 				break;
2403 			zone->uz_sleeps++;
2404 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2405 			continue;
2406 		}
2407 		slab = keg_alloc_slab(keg, zone, flags);
2408 		/*
2409 		 * If we got a slab here it's safe to mark it partially used
2410 		 * and return.  We assume that the caller is going to remove
2411 		 * at least one item.
2412 		 */
2413 		if (slab) {
2414 			MPASS(slab->us_keg == keg);
2415 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2416 			return (slab);
2417 		}
2418 		/*
2419 		 * We might not have been able to get a slab but another cpu
2420 		 * could have while we were unlocked.  Check again before we
2421 		 * fail.
2422 		 */
2423 		flags |= M_NOVM;
2424 	}
2425 	return (slab);
2426 }
2427 
2428 static uma_slab_t
2429 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2430 {
2431 	uma_slab_t slab;
2432 
2433 	if (keg == NULL) {
2434 		keg = zone_first_keg(zone);
2435 		KEG_LOCK(keg);
2436 	}
2437 
2438 	for (;;) {
2439 		slab = keg_fetch_slab(keg, zone, flags);
2440 		if (slab)
2441 			return (slab);
2442 		if (flags & (M_NOWAIT | M_NOVM))
2443 			break;
2444 	}
2445 	KEG_UNLOCK(keg);
2446 	return (NULL);
2447 }
2448 
2449 /*
2450  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2451  * with the keg locked.  On NULL no lock is held.
2452  *
2453  * The last pointer is used to seed the search.  It is not required.
2454  */
2455 static uma_slab_t
2456 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2457 {
2458 	uma_klink_t klink;
2459 	uma_slab_t slab;
2460 	uma_keg_t keg;
2461 	int flags;
2462 	int empty;
2463 	int full;
2464 
2465 	/*
2466 	 * Don't wait on the first pass.  This will skip limit tests
2467 	 * as well.  We don't want to block if we can find a provider
2468 	 * without blocking.
2469 	 */
2470 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2471 	/*
2472 	 * Use the last slab allocated as a hint for where to start
2473 	 * the search.
2474 	 */
2475 	if (last != NULL) {
2476 		slab = keg_fetch_slab(last, zone, flags);
2477 		if (slab)
2478 			return (slab);
2479 		KEG_UNLOCK(last);
2480 	}
2481 	/*
2482 	 * Loop until we have a slab incase of transient failures
2483 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2484 	 * required but we've done it for so long now.
2485 	 */
2486 	for (;;) {
2487 		empty = 0;
2488 		full = 0;
2489 		/*
2490 		 * Search the available kegs for slabs.  Be careful to hold the
2491 		 * correct lock while calling into the keg layer.
2492 		 */
2493 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2494 			keg = klink->kl_keg;
2495 			KEG_LOCK(keg);
2496 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2497 				slab = keg_fetch_slab(keg, zone, flags);
2498 				if (slab)
2499 					return (slab);
2500 			}
2501 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2502 				full++;
2503 			else
2504 				empty++;
2505 			KEG_UNLOCK(keg);
2506 		}
2507 		if (rflags & (M_NOWAIT | M_NOVM))
2508 			break;
2509 		flags = rflags;
2510 		/*
2511 		 * All kegs are full.  XXX We can't atomically check all kegs
2512 		 * and sleep so just sleep for a short period and retry.
2513 		 */
2514 		if (full && !empty) {
2515 			ZONE_LOCK(zone);
2516 			zone->uz_flags |= UMA_ZFLAG_FULL;
2517 			zone->uz_sleeps++;
2518 			zone_log_warning(zone);
2519 			zone_maxaction(zone);
2520 			msleep(zone, zone->uz_lockptr, PVM,
2521 			    "zonelimit", hz/100);
2522 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2523 			ZONE_UNLOCK(zone);
2524 			continue;
2525 		}
2526 	}
2527 	return (NULL);
2528 }
2529 
2530 static void *
2531 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2532 {
2533 	void *item;
2534 	uint8_t freei;
2535 
2536 	MPASS(keg == slab->us_keg);
2537 	mtx_assert(&keg->uk_lock, MA_OWNED);
2538 
2539 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2540 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2541 	item = slab->us_data + (keg->uk_rsize * freei);
2542 	slab->us_freecount--;
2543 	keg->uk_free--;
2544 
2545 	/* Move this slab to the full list */
2546 	if (slab->us_freecount == 0) {
2547 		LIST_REMOVE(slab, us_link);
2548 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2549 	}
2550 
2551 	return (item);
2552 }
2553 
2554 static int
2555 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2556 {
2557 	uma_slab_t slab;
2558 	uma_keg_t keg;
2559 	int i;
2560 
2561 	slab = NULL;
2562 	keg = NULL;
2563 	/* Try to keep the buckets totally full */
2564 	for (i = 0; i < max; ) {
2565 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2566 			break;
2567 		keg = slab->us_keg;
2568 		while (slab->us_freecount && i < max) {
2569 			bucket[i++] = slab_alloc_item(keg, slab);
2570 			if (keg->uk_free <= keg->uk_reserve)
2571 				break;
2572 		}
2573 		/* Don't grab more than one slab at a time. */
2574 		flags &= ~M_WAITOK;
2575 		flags |= M_NOWAIT;
2576 	}
2577 	if (slab != NULL)
2578 		KEG_UNLOCK(keg);
2579 
2580 	return i;
2581 }
2582 
2583 static uma_bucket_t
2584 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2585 {
2586 	uma_bucket_t bucket;
2587 	int max;
2588 
2589 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2590 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2591 	if (bucket == NULL)
2592 		return (NULL);
2593 
2594 	max = MIN(bucket->ub_entries, zone->uz_count);
2595 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2596 	    max, flags);
2597 
2598 	/*
2599 	 * Initialize the memory if necessary.
2600 	 */
2601 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2602 		int i;
2603 
2604 		for (i = 0; i < bucket->ub_cnt; i++)
2605 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2606 			    flags) != 0)
2607 				break;
2608 		/*
2609 		 * If we couldn't initialize the whole bucket, put the
2610 		 * rest back onto the freelist.
2611 		 */
2612 		if (i != bucket->ub_cnt) {
2613 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2614 			    bucket->ub_cnt - i);
2615 #ifdef INVARIANTS
2616 			bzero(&bucket->ub_bucket[i],
2617 			    sizeof(void *) * (bucket->ub_cnt - i));
2618 #endif
2619 			bucket->ub_cnt = i;
2620 		}
2621 	}
2622 
2623 	if (bucket->ub_cnt == 0) {
2624 		bucket_free(zone, bucket, udata);
2625 		atomic_add_long(&zone->uz_fails, 1);
2626 		return (NULL);
2627 	}
2628 
2629 	return (bucket);
2630 }
2631 
2632 /*
2633  * Allocates a single item from a zone.
2634  *
2635  * Arguments
2636  *	zone   The zone to alloc for.
2637  *	udata  The data to be passed to the constructor.
2638  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2639  *
2640  * Returns
2641  *	NULL if there is no memory and M_NOWAIT is set
2642  *	An item if successful
2643  */
2644 
2645 static void *
2646 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2647 {
2648 	void *item;
2649 
2650 	item = NULL;
2651 
2652 #ifdef UMA_DEBUG_ALLOC
2653 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2654 #endif
2655 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2656 		goto fail;
2657 	atomic_add_long(&zone->uz_allocs, 1);
2658 
2659 	/*
2660 	 * We have to call both the zone's init (not the keg's init)
2661 	 * and the zone's ctor.  This is because the item is going from
2662 	 * a keg slab directly to the user, and the user is expecting it
2663 	 * to be both zone-init'd as well as zone-ctor'd.
2664 	 */
2665 	if (zone->uz_init != NULL) {
2666 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2667 			zone_free_item(zone, item, udata, SKIP_FINI);
2668 			goto fail;
2669 		}
2670 	}
2671 	if (zone->uz_ctor != NULL) {
2672 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2673 			zone_free_item(zone, item, udata, SKIP_DTOR);
2674 			goto fail;
2675 		}
2676 	}
2677 #ifdef INVARIANTS
2678 	uma_dbg_alloc(zone, NULL, item);
2679 #endif
2680 	if (flags & M_ZERO)
2681 		uma_zero_item(item, zone);
2682 
2683 	return (item);
2684 
2685 fail:
2686 	atomic_add_long(&zone->uz_fails, 1);
2687 	return (NULL);
2688 }
2689 
2690 /* See uma.h */
2691 void
2692 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2693 {
2694 	uma_cache_t cache;
2695 	uma_bucket_t bucket;
2696 	int lockfail;
2697 	int cpu;
2698 
2699 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2700 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2701 
2702 #ifdef UMA_DEBUG_ALLOC_1
2703 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2704 #endif
2705 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2706 	    zone->uz_name);
2707 
2708 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2709 	    ("uma_zfree_arg: called with spinlock or critical section held"));
2710 
2711         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2712         if (item == NULL)
2713                 return;
2714 #ifdef DEBUG_MEMGUARD
2715 	if (is_memguard_addr(item)) {
2716 		if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2717 			zone->uz_dtor(item, zone->uz_size, udata);
2718 		if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2719 			zone->uz_fini(item, zone->uz_size);
2720 		memguard_free(item);
2721 		return;
2722 	}
2723 #endif
2724 #ifdef INVARIANTS
2725 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2726 		uma_dbg_free(zone, udata, item);
2727 	else
2728 		uma_dbg_free(zone, NULL, item);
2729 #endif
2730 	if (zone->uz_dtor != NULL)
2731 		zone->uz_dtor(item, zone->uz_size, udata);
2732 
2733 	/*
2734 	 * The race here is acceptable.  If we miss it we'll just have to wait
2735 	 * a little longer for the limits to be reset.
2736 	 */
2737 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2738 		goto zfree_item;
2739 
2740 	/*
2741 	 * If possible, free to the per-CPU cache.  There are two
2742 	 * requirements for safe access to the per-CPU cache: (1) the thread
2743 	 * accessing the cache must not be preempted or yield during access,
2744 	 * and (2) the thread must not migrate CPUs without switching which
2745 	 * cache it accesses.  We rely on a critical section to prevent
2746 	 * preemption and migration.  We release the critical section in
2747 	 * order to acquire the zone mutex if we are unable to free to the
2748 	 * current cache; when we re-acquire the critical section, we must
2749 	 * detect and handle migration if it has occurred.
2750 	 */
2751 zfree_restart:
2752 	critical_enter();
2753 	cpu = curcpu;
2754 	cache = &zone->uz_cpu[cpu];
2755 
2756 zfree_start:
2757 	/*
2758 	 * Try to free into the allocbucket first to give LIFO ordering
2759 	 * for cache-hot datastructures.  Spill over into the freebucket
2760 	 * if necessary.  Alloc will swap them if one runs dry.
2761 	 */
2762 	bucket = cache->uc_allocbucket;
2763 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2764 		bucket = cache->uc_freebucket;
2765 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2766 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2767 		    ("uma_zfree: Freeing to non free bucket index."));
2768 		bucket->ub_bucket[bucket->ub_cnt] = item;
2769 		bucket->ub_cnt++;
2770 		cache->uc_frees++;
2771 		critical_exit();
2772 		return;
2773 	}
2774 
2775 	/*
2776 	 * We must go back the zone, which requires acquiring the zone lock,
2777 	 * which in turn means we must release and re-acquire the critical
2778 	 * section.  Since the critical section is released, we may be
2779 	 * preempted or migrate.  As such, make sure not to maintain any
2780 	 * thread-local state specific to the cache from prior to releasing
2781 	 * the critical section.
2782 	 */
2783 	critical_exit();
2784 	if (zone->uz_count == 0 || bucketdisable)
2785 		goto zfree_item;
2786 
2787 	lockfail = 0;
2788 	if (ZONE_TRYLOCK(zone) == 0) {
2789 		/* Record contention to size the buckets. */
2790 		ZONE_LOCK(zone);
2791 		lockfail = 1;
2792 	}
2793 	critical_enter();
2794 	cpu = curcpu;
2795 	cache = &zone->uz_cpu[cpu];
2796 
2797 	/*
2798 	 * Since we have locked the zone we may as well send back our stats.
2799 	 */
2800 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2801 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2802 	cache->uc_allocs = 0;
2803 	cache->uc_frees = 0;
2804 
2805 	bucket = cache->uc_freebucket;
2806 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2807 		ZONE_UNLOCK(zone);
2808 		goto zfree_start;
2809 	}
2810 	cache->uc_freebucket = NULL;
2811 
2812 	/* Can we throw this on the zone full list? */
2813 	if (bucket != NULL) {
2814 #ifdef UMA_DEBUG_ALLOC
2815 		printf("uma_zfree: Putting old bucket on the free list.\n");
2816 #endif
2817 		/* ub_cnt is pointing to the last free item */
2818 		KASSERT(bucket->ub_cnt != 0,
2819 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2820 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2821 	}
2822 
2823 	/* We are no longer associated with this CPU. */
2824 	critical_exit();
2825 
2826 	/*
2827 	 * We bump the uz count when the cache size is insufficient to
2828 	 * handle the working set.
2829 	 */
2830 	if (lockfail && zone->uz_count < BUCKET_MAX)
2831 		zone->uz_count++;
2832 	ZONE_UNLOCK(zone);
2833 
2834 #ifdef UMA_DEBUG_ALLOC
2835 	printf("uma_zfree: Allocating new free bucket.\n");
2836 #endif
2837 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2838 	if (bucket) {
2839 		critical_enter();
2840 		cpu = curcpu;
2841 		cache = &zone->uz_cpu[cpu];
2842 		if (cache->uc_freebucket == NULL) {
2843 			cache->uc_freebucket = bucket;
2844 			goto zfree_start;
2845 		}
2846 		/*
2847 		 * We lost the race, start over.  We have to drop our
2848 		 * critical section to free the bucket.
2849 		 */
2850 		critical_exit();
2851 		bucket_free(zone, bucket, udata);
2852 		goto zfree_restart;
2853 	}
2854 
2855 	/*
2856 	 * If nothing else caught this, we'll just do an internal free.
2857 	 */
2858 zfree_item:
2859 	zone_free_item(zone, item, udata, SKIP_DTOR);
2860 
2861 	return;
2862 }
2863 
2864 static void
2865 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2866 {
2867 	uint8_t freei;
2868 
2869 	mtx_assert(&keg->uk_lock, MA_OWNED);
2870 	MPASS(keg == slab->us_keg);
2871 
2872 	/* Do we need to remove from any lists? */
2873 	if (slab->us_freecount+1 == keg->uk_ipers) {
2874 		LIST_REMOVE(slab, us_link);
2875 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2876 	} else if (slab->us_freecount == 0) {
2877 		LIST_REMOVE(slab, us_link);
2878 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2879 	}
2880 
2881 	/* Slab management. */
2882 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2883 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2884 	slab->us_freecount++;
2885 
2886 	/* Keg statistics. */
2887 	keg->uk_free++;
2888 }
2889 
2890 static void
2891 zone_release(uma_zone_t zone, void **bucket, int cnt)
2892 {
2893 	void *item;
2894 	uma_slab_t slab;
2895 	uma_keg_t keg;
2896 	uint8_t *mem;
2897 	int clearfull;
2898 	int i;
2899 
2900 	clearfull = 0;
2901 	keg = zone_first_keg(zone);
2902 	KEG_LOCK(keg);
2903 	for (i = 0; i < cnt; i++) {
2904 		item = bucket[i];
2905 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2906 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2907 			if (zone->uz_flags & UMA_ZONE_HASH) {
2908 				slab = hash_sfind(&keg->uk_hash, mem);
2909 			} else {
2910 				mem += keg->uk_pgoff;
2911 				slab = (uma_slab_t)mem;
2912 			}
2913 		} else {
2914 			slab = vtoslab((vm_offset_t)item);
2915 			if (slab->us_keg != keg) {
2916 				KEG_UNLOCK(keg);
2917 				keg = slab->us_keg;
2918 				KEG_LOCK(keg);
2919 			}
2920 		}
2921 		slab_free_item(keg, slab, item);
2922 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2923 			if (keg->uk_pages < keg->uk_maxpages) {
2924 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2925 				clearfull = 1;
2926 			}
2927 
2928 			/*
2929 			 * We can handle one more allocation. Since we're
2930 			 * clearing ZFLAG_FULL, wake up all procs blocked
2931 			 * on pages. This should be uncommon, so keeping this
2932 			 * simple for now (rather than adding count of blocked
2933 			 * threads etc).
2934 			 */
2935 			wakeup(keg);
2936 		}
2937 	}
2938 	KEG_UNLOCK(keg);
2939 	if (clearfull) {
2940 		ZONE_LOCK(zone);
2941 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2942 		wakeup(zone);
2943 		ZONE_UNLOCK(zone);
2944 	}
2945 
2946 }
2947 
2948 /*
2949  * Frees a single item to any zone.
2950  *
2951  * Arguments:
2952  *	zone   The zone to free to
2953  *	item   The item we're freeing
2954  *	udata  User supplied data for the dtor
2955  *	skip   Skip dtors and finis
2956  */
2957 static void
2958 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2959 {
2960 
2961 #ifdef INVARIANTS
2962 	if (skip == SKIP_NONE) {
2963 		if (zone->uz_flags & UMA_ZONE_MALLOC)
2964 			uma_dbg_free(zone, udata, item);
2965 		else
2966 			uma_dbg_free(zone, NULL, item);
2967 	}
2968 #endif
2969 	if (skip < SKIP_DTOR && zone->uz_dtor)
2970 		zone->uz_dtor(item, zone->uz_size, udata);
2971 
2972 	if (skip < SKIP_FINI && zone->uz_fini)
2973 		zone->uz_fini(item, zone->uz_size);
2974 
2975 	atomic_add_long(&zone->uz_frees, 1);
2976 	zone->uz_release(zone->uz_arg, &item, 1);
2977 }
2978 
2979 /* See uma.h */
2980 int
2981 uma_zone_set_max(uma_zone_t zone, int nitems)
2982 {
2983 	uma_keg_t keg;
2984 
2985 	keg = zone_first_keg(zone);
2986 	if (keg == NULL)
2987 		return (0);
2988 	KEG_LOCK(keg);
2989 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2990 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2991 		keg->uk_maxpages += keg->uk_ppera;
2992 	nitems = keg->uk_maxpages * keg->uk_ipers;
2993 	KEG_UNLOCK(keg);
2994 
2995 	return (nitems);
2996 }
2997 
2998 /* See uma.h */
2999 int
3000 uma_zone_get_max(uma_zone_t zone)
3001 {
3002 	int nitems;
3003 	uma_keg_t keg;
3004 
3005 	keg = zone_first_keg(zone);
3006 	if (keg == NULL)
3007 		return (0);
3008 	KEG_LOCK(keg);
3009 	nitems = keg->uk_maxpages * keg->uk_ipers;
3010 	KEG_UNLOCK(keg);
3011 
3012 	return (nitems);
3013 }
3014 
3015 /* See uma.h */
3016 void
3017 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3018 {
3019 
3020 	ZONE_LOCK(zone);
3021 	zone->uz_warning = warning;
3022 	ZONE_UNLOCK(zone);
3023 }
3024 
3025 /* See uma.h */
3026 void
3027 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3028 {
3029 
3030 	ZONE_LOCK(zone);
3031 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3032 	ZONE_UNLOCK(zone);
3033 }
3034 
3035 /* See uma.h */
3036 int
3037 uma_zone_get_cur(uma_zone_t zone)
3038 {
3039 	int64_t nitems;
3040 	u_int i;
3041 
3042 	ZONE_LOCK(zone);
3043 	nitems = zone->uz_allocs - zone->uz_frees;
3044 	CPU_FOREACH(i) {
3045 		/*
3046 		 * See the comment in sysctl_vm_zone_stats() regarding the
3047 		 * safety of accessing the per-cpu caches. With the zone lock
3048 		 * held, it is safe, but can potentially result in stale data.
3049 		 */
3050 		nitems += zone->uz_cpu[i].uc_allocs -
3051 		    zone->uz_cpu[i].uc_frees;
3052 	}
3053 	ZONE_UNLOCK(zone);
3054 
3055 	return (nitems < 0 ? 0 : nitems);
3056 }
3057 
3058 /* See uma.h */
3059 void
3060 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3061 {
3062 	uma_keg_t keg;
3063 
3064 	keg = zone_first_keg(zone);
3065 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3066 	KEG_LOCK(keg);
3067 	KASSERT(keg->uk_pages == 0,
3068 	    ("uma_zone_set_init on non-empty keg"));
3069 	keg->uk_init = uminit;
3070 	KEG_UNLOCK(keg);
3071 }
3072 
3073 /* See uma.h */
3074 void
3075 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3076 {
3077 	uma_keg_t keg;
3078 
3079 	keg = zone_first_keg(zone);
3080 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3081 	KEG_LOCK(keg);
3082 	KASSERT(keg->uk_pages == 0,
3083 	    ("uma_zone_set_fini on non-empty keg"));
3084 	keg->uk_fini = fini;
3085 	KEG_UNLOCK(keg);
3086 }
3087 
3088 /* See uma.h */
3089 void
3090 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3091 {
3092 
3093 	ZONE_LOCK(zone);
3094 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3095 	    ("uma_zone_set_zinit on non-empty keg"));
3096 	zone->uz_init = zinit;
3097 	ZONE_UNLOCK(zone);
3098 }
3099 
3100 /* See uma.h */
3101 void
3102 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3103 {
3104 
3105 	ZONE_LOCK(zone);
3106 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3107 	    ("uma_zone_set_zfini on non-empty keg"));
3108 	zone->uz_fini = zfini;
3109 	ZONE_UNLOCK(zone);
3110 }
3111 
3112 /* See uma.h */
3113 /* XXX uk_freef is not actually used with the zone locked */
3114 void
3115 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3116 {
3117 	uma_keg_t keg;
3118 
3119 	keg = zone_first_keg(zone);
3120 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3121 	KEG_LOCK(keg);
3122 	keg->uk_freef = freef;
3123 	KEG_UNLOCK(keg);
3124 }
3125 
3126 /* See uma.h */
3127 /* XXX uk_allocf is not actually used with the zone locked */
3128 void
3129 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3130 {
3131 	uma_keg_t keg;
3132 
3133 	keg = zone_first_keg(zone);
3134 	KEG_LOCK(keg);
3135 	keg->uk_allocf = allocf;
3136 	KEG_UNLOCK(keg);
3137 }
3138 
3139 /* See uma.h */
3140 void
3141 uma_zone_reserve(uma_zone_t zone, int items)
3142 {
3143 	uma_keg_t keg;
3144 
3145 	keg = zone_first_keg(zone);
3146 	if (keg == NULL)
3147 		return;
3148 	KEG_LOCK(keg);
3149 	keg->uk_reserve = items;
3150 	KEG_UNLOCK(keg);
3151 
3152 	return;
3153 }
3154 
3155 /* See uma.h */
3156 int
3157 uma_zone_reserve_kva(uma_zone_t zone, int count)
3158 {
3159 	uma_keg_t keg;
3160 	vm_offset_t kva;
3161 	u_int pages;
3162 
3163 	keg = zone_first_keg(zone);
3164 	if (keg == NULL)
3165 		return (0);
3166 	pages = count / keg->uk_ipers;
3167 
3168 	if (pages * keg->uk_ipers < count)
3169 		pages++;
3170 
3171 #ifdef UMA_MD_SMALL_ALLOC
3172 	if (keg->uk_ppera > 1) {
3173 #else
3174 	if (1) {
3175 #endif
3176 		kva = kva_alloc((vm_size_t)pages * UMA_SLAB_SIZE);
3177 		if (kva == 0)
3178 			return (0);
3179 	} else
3180 		kva = 0;
3181 	KEG_LOCK(keg);
3182 	keg->uk_kva = kva;
3183 	keg->uk_offset = 0;
3184 	keg->uk_maxpages = pages;
3185 #ifdef UMA_MD_SMALL_ALLOC
3186 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3187 #else
3188 	keg->uk_allocf = noobj_alloc;
3189 #endif
3190 	keg->uk_flags |= UMA_ZONE_NOFREE;
3191 	KEG_UNLOCK(keg);
3192 
3193 	return (1);
3194 }
3195 
3196 /* See uma.h */
3197 void
3198 uma_prealloc(uma_zone_t zone, int items)
3199 {
3200 	int slabs;
3201 	uma_slab_t slab;
3202 	uma_keg_t keg;
3203 
3204 	keg = zone_first_keg(zone);
3205 	if (keg == NULL)
3206 		return;
3207 	KEG_LOCK(keg);
3208 	slabs = items / keg->uk_ipers;
3209 	if (slabs * keg->uk_ipers < items)
3210 		slabs++;
3211 	while (slabs > 0) {
3212 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3213 		if (slab == NULL)
3214 			break;
3215 		MPASS(slab->us_keg == keg);
3216 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3217 		slabs--;
3218 	}
3219 	KEG_UNLOCK(keg);
3220 }
3221 
3222 /* See uma.h */
3223 uint32_t *
3224 uma_find_refcnt(uma_zone_t zone, void *item)
3225 {
3226 	uma_slabrefcnt_t slabref;
3227 	uma_slab_t slab;
3228 	uma_keg_t keg;
3229 	uint32_t *refcnt;
3230 	int idx;
3231 
3232 	slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3233 	slabref = (uma_slabrefcnt_t)slab;
3234 	keg = slab->us_keg;
3235 	KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3236 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3237 	idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3238 	refcnt = &slabref->us_refcnt[idx];
3239 	return refcnt;
3240 }
3241 
3242 /* See uma.h */
3243 static void
3244 uma_reclaim_locked(bool kmem_danger)
3245 {
3246 
3247 #ifdef UMA_DEBUG
3248 	printf("UMA: vm asked us to release pages!\n");
3249 #endif
3250 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3251 	bucket_enable();
3252 	zone_foreach(zone_drain);
3253 	if (vm_page_count_min() || kmem_danger) {
3254 		cache_drain_safe(NULL);
3255 		zone_foreach(zone_drain);
3256 	}
3257 	/*
3258 	 * Some slabs may have been freed but this zone will be visited early
3259 	 * we visit again so that we can free pages that are empty once other
3260 	 * zones are drained.  We have to do the same for buckets.
3261 	 */
3262 	zone_drain(slabzone);
3263 	zone_drain(slabrefzone);
3264 	bucket_zone_drain();
3265 }
3266 
3267 void
3268 uma_reclaim(void)
3269 {
3270 
3271 	sx_xlock(&uma_drain_lock);
3272 	uma_reclaim_locked(false);
3273 	sx_xunlock(&uma_drain_lock);
3274 }
3275 
3276 static int uma_reclaim_needed;
3277 
3278 void
3279 uma_reclaim_wakeup(void)
3280 {
3281 
3282 	uma_reclaim_needed = 1;
3283 	wakeup(&uma_reclaim_needed);
3284 }
3285 
3286 void
3287 uma_reclaim_worker(void *arg __unused)
3288 {
3289 
3290 	sx_xlock(&uma_drain_lock);
3291 	for (;;) {
3292 		sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3293 		    "umarcl", 0);
3294 		if (uma_reclaim_needed) {
3295 			uma_reclaim_needed = 0;
3296 			uma_reclaim_locked(true);
3297 		}
3298 	}
3299 }
3300 
3301 /* See uma.h */
3302 int
3303 uma_zone_exhausted(uma_zone_t zone)
3304 {
3305 	int full;
3306 
3307 	ZONE_LOCK(zone);
3308 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3309 	ZONE_UNLOCK(zone);
3310 	return (full);
3311 }
3312 
3313 int
3314 uma_zone_exhausted_nolock(uma_zone_t zone)
3315 {
3316 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3317 }
3318 
3319 void *
3320 uma_large_malloc(vm_size_t size, int wait)
3321 {
3322 	void *mem;
3323 	uma_slab_t slab;
3324 	uint8_t flags;
3325 
3326 	slab = zone_alloc_item(slabzone, NULL, wait);
3327 	if (slab == NULL)
3328 		return (NULL);
3329 	mem = page_alloc(NULL, size, &flags, wait);
3330 	if (mem) {
3331 		vsetslab((vm_offset_t)mem, slab);
3332 		slab->us_data = mem;
3333 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3334 		slab->us_size = size;
3335 	} else {
3336 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3337 	}
3338 
3339 	return (mem);
3340 }
3341 
3342 void
3343 uma_large_free(uma_slab_t slab)
3344 {
3345 
3346 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3347 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3348 }
3349 
3350 static void
3351 uma_zero_item(void *item, uma_zone_t zone)
3352 {
3353 
3354 	if (zone->uz_flags & UMA_ZONE_PCPU) {
3355 		for (int i = 0; i < mp_ncpus; i++)
3356 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3357 	} else
3358 		bzero(item, zone->uz_size);
3359 }
3360 
3361 void
3362 uma_print_stats(void)
3363 {
3364 	zone_foreach(uma_print_zone);
3365 }
3366 
3367 static void
3368 slab_print(uma_slab_t slab)
3369 {
3370 	printf("slab: keg %p, data %p, freecount %d\n",
3371 		slab->us_keg, slab->us_data, slab->us_freecount);
3372 }
3373 
3374 static void
3375 cache_print(uma_cache_t cache)
3376 {
3377 	printf("alloc: %p(%d), free: %p(%d)\n",
3378 		cache->uc_allocbucket,
3379 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3380 		cache->uc_freebucket,
3381 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3382 }
3383 
3384 static void
3385 uma_print_keg(uma_keg_t keg)
3386 {
3387 	uma_slab_t slab;
3388 
3389 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3390 	    "out %d free %d limit %d\n",
3391 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3392 	    keg->uk_ipers, keg->uk_ppera,
3393 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3394 	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3395 	printf("Part slabs:\n");
3396 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3397 		slab_print(slab);
3398 	printf("Free slabs:\n");
3399 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3400 		slab_print(slab);
3401 	printf("Full slabs:\n");
3402 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3403 		slab_print(slab);
3404 }
3405 
3406 void
3407 uma_print_zone(uma_zone_t zone)
3408 {
3409 	uma_cache_t cache;
3410 	uma_klink_t kl;
3411 	int i;
3412 
3413 	printf("zone: %s(%p) size %d flags %#x\n",
3414 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3415 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3416 		uma_print_keg(kl->kl_keg);
3417 	CPU_FOREACH(i) {
3418 		cache = &zone->uz_cpu[i];
3419 		printf("CPU %d Cache:\n", i);
3420 		cache_print(cache);
3421 	}
3422 }
3423 
3424 #ifdef DDB
3425 /*
3426  * Generate statistics across both the zone and its per-cpu cache's.  Return
3427  * desired statistics if the pointer is non-NULL for that statistic.
3428  *
3429  * Note: does not update the zone statistics, as it can't safely clear the
3430  * per-CPU cache statistic.
3431  *
3432  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3433  * safe from off-CPU; we should modify the caches to track this information
3434  * directly so that we don't have to.
3435  */
3436 static void
3437 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3438     uint64_t *freesp, uint64_t *sleepsp)
3439 {
3440 	uma_cache_t cache;
3441 	uint64_t allocs, frees, sleeps;
3442 	int cachefree, cpu;
3443 
3444 	allocs = frees = sleeps = 0;
3445 	cachefree = 0;
3446 	CPU_FOREACH(cpu) {
3447 		cache = &z->uz_cpu[cpu];
3448 		if (cache->uc_allocbucket != NULL)
3449 			cachefree += cache->uc_allocbucket->ub_cnt;
3450 		if (cache->uc_freebucket != NULL)
3451 			cachefree += cache->uc_freebucket->ub_cnt;
3452 		allocs += cache->uc_allocs;
3453 		frees += cache->uc_frees;
3454 	}
3455 	allocs += z->uz_allocs;
3456 	frees += z->uz_frees;
3457 	sleeps += z->uz_sleeps;
3458 	if (cachefreep != NULL)
3459 		*cachefreep = cachefree;
3460 	if (allocsp != NULL)
3461 		*allocsp = allocs;
3462 	if (freesp != NULL)
3463 		*freesp = frees;
3464 	if (sleepsp != NULL)
3465 		*sleepsp = sleeps;
3466 }
3467 #endif /* DDB */
3468 
3469 static int
3470 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3471 {
3472 	uma_keg_t kz;
3473 	uma_zone_t z;
3474 	int count;
3475 
3476 	count = 0;
3477 	rw_rlock(&uma_rwlock);
3478 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3479 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3480 			count++;
3481 	}
3482 	rw_runlock(&uma_rwlock);
3483 	return (sysctl_handle_int(oidp, &count, 0, req));
3484 }
3485 
3486 static int
3487 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3488 {
3489 	struct uma_stream_header ush;
3490 	struct uma_type_header uth;
3491 	struct uma_percpu_stat ups;
3492 	uma_bucket_t bucket;
3493 	struct sbuf sbuf;
3494 	uma_cache_t cache;
3495 	uma_klink_t kl;
3496 	uma_keg_t kz;
3497 	uma_zone_t z;
3498 	uma_keg_t k;
3499 	int count, error, i;
3500 
3501 	error = sysctl_wire_old_buffer(req, 0);
3502 	if (error != 0)
3503 		return (error);
3504 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3505 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3506 
3507 	count = 0;
3508 	rw_rlock(&uma_rwlock);
3509 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3510 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3511 			count++;
3512 	}
3513 
3514 	/*
3515 	 * Insert stream header.
3516 	 */
3517 	bzero(&ush, sizeof(ush));
3518 	ush.ush_version = UMA_STREAM_VERSION;
3519 	ush.ush_maxcpus = (mp_maxid + 1);
3520 	ush.ush_count = count;
3521 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3522 
3523 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3524 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3525 			bzero(&uth, sizeof(uth));
3526 			ZONE_LOCK(z);
3527 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3528 			uth.uth_align = kz->uk_align;
3529 			uth.uth_size = kz->uk_size;
3530 			uth.uth_rsize = kz->uk_rsize;
3531 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3532 				k = kl->kl_keg;
3533 				uth.uth_maxpages += k->uk_maxpages;
3534 				uth.uth_pages += k->uk_pages;
3535 				uth.uth_keg_free += k->uk_free;
3536 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3537 				    * k->uk_ipers;
3538 			}
3539 
3540 			/*
3541 			 * A zone is secondary is it is not the first entry
3542 			 * on the keg's zone list.
3543 			 */
3544 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3545 			    (LIST_FIRST(&kz->uk_zones) != z))
3546 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3547 
3548 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3549 				uth.uth_zone_free += bucket->ub_cnt;
3550 			uth.uth_allocs = z->uz_allocs;
3551 			uth.uth_frees = z->uz_frees;
3552 			uth.uth_fails = z->uz_fails;
3553 			uth.uth_sleeps = z->uz_sleeps;
3554 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3555 			/*
3556 			 * While it is not normally safe to access the cache
3557 			 * bucket pointers while not on the CPU that owns the
3558 			 * cache, we only allow the pointers to be exchanged
3559 			 * without the zone lock held, not invalidated, so
3560 			 * accept the possible race associated with bucket
3561 			 * exchange during monitoring.
3562 			 */
3563 			for (i = 0; i < (mp_maxid + 1); i++) {
3564 				bzero(&ups, sizeof(ups));
3565 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3566 					goto skip;
3567 				if (CPU_ABSENT(i))
3568 					goto skip;
3569 				cache = &z->uz_cpu[i];
3570 				if (cache->uc_allocbucket != NULL)
3571 					ups.ups_cache_free +=
3572 					    cache->uc_allocbucket->ub_cnt;
3573 				if (cache->uc_freebucket != NULL)
3574 					ups.ups_cache_free +=
3575 					    cache->uc_freebucket->ub_cnt;
3576 				ups.ups_allocs = cache->uc_allocs;
3577 				ups.ups_frees = cache->uc_frees;
3578 skip:
3579 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3580 			}
3581 			ZONE_UNLOCK(z);
3582 		}
3583 	}
3584 	rw_runlock(&uma_rwlock);
3585 	error = sbuf_finish(&sbuf);
3586 	sbuf_delete(&sbuf);
3587 	return (error);
3588 }
3589 
3590 int
3591 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3592 {
3593 	uma_zone_t zone = *(uma_zone_t *)arg1;
3594 	int error, max;
3595 
3596 	max = uma_zone_get_max(zone);
3597 	error = sysctl_handle_int(oidp, &max, 0, req);
3598 	if (error || !req->newptr)
3599 		return (error);
3600 
3601 	uma_zone_set_max(zone, max);
3602 
3603 	return (0);
3604 }
3605 
3606 int
3607 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3608 {
3609 	uma_zone_t zone = *(uma_zone_t *)arg1;
3610 	int cur;
3611 
3612 	cur = uma_zone_get_cur(zone);
3613 	return (sysctl_handle_int(oidp, &cur, 0, req));
3614 }
3615 
3616 #ifdef INVARIANTS
3617 static uma_slab_t
3618 uma_dbg_getslab(uma_zone_t zone, void *item)
3619 {
3620 	uma_slab_t slab;
3621 	uma_keg_t keg;
3622 	uint8_t *mem;
3623 
3624 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3625 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3626 		slab = vtoslab((vm_offset_t)mem);
3627 	} else {
3628 		/*
3629 		 * It is safe to return the slab here even though the
3630 		 * zone is unlocked because the item's allocation state
3631 		 * essentially holds a reference.
3632 		 */
3633 		ZONE_LOCK(zone);
3634 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3635 		if (keg->uk_flags & UMA_ZONE_HASH)
3636 			slab = hash_sfind(&keg->uk_hash, mem);
3637 		else
3638 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
3639 		ZONE_UNLOCK(zone);
3640 	}
3641 
3642 	return (slab);
3643 }
3644 
3645 /*
3646  * Set up the slab's freei data such that uma_dbg_free can function.
3647  *
3648  */
3649 static void
3650 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3651 {
3652 	uma_keg_t keg;
3653 	int freei;
3654 
3655 	if (zone_first_keg(zone) == NULL)
3656 		return;
3657 	if (slab == NULL) {
3658 		slab = uma_dbg_getslab(zone, item);
3659 		if (slab == NULL)
3660 			panic("uma: item %p did not belong to zone %s\n",
3661 			    item, zone->uz_name);
3662 	}
3663 	keg = slab->us_keg;
3664 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3665 
3666 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3667 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3668 		    item, zone, zone->uz_name, slab, freei);
3669 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3670 
3671 	return;
3672 }
3673 
3674 /*
3675  * Verifies freed addresses.  Checks for alignment, valid slab membership
3676  * and duplicate frees.
3677  *
3678  */
3679 static void
3680 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3681 {
3682 	uma_keg_t keg;
3683 	int freei;
3684 
3685 	if (zone_first_keg(zone) == NULL)
3686 		return;
3687 	if (slab == NULL) {
3688 		slab = uma_dbg_getslab(zone, item);
3689 		if (slab == NULL)
3690 			panic("uma: Freed item %p did not belong to zone %s\n",
3691 			    item, zone->uz_name);
3692 	}
3693 	keg = slab->us_keg;
3694 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3695 
3696 	if (freei >= keg->uk_ipers)
3697 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3698 		    item, zone, zone->uz_name, slab, freei);
3699 
3700 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
3701 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3702 		    item, zone, zone->uz_name, slab, freei);
3703 
3704 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3705 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3706 		    item, zone, zone->uz_name, slab, freei);
3707 
3708 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3709 }
3710 #endif /* INVARIANTS */
3711 
3712 #ifdef DDB
3713 DB_SHOW_COMMAND(uma, db_show_uma)
3714 {
3715 	uint64_t allocs, frees, sleeps;
3716 	uma_bucket_t bucket;
3717 	uma_keg_t kz;
3718 	uma_zone_t z;
3719 	int cachefree;
3720 
3721 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3722 	    "Free", "Requests", "Sleeps", "Bucket");
3723 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3724 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3725 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3726 				allocs = z->uz_allocs;
3727 				frees = z->uz_frees;
3728 				sleeps = z->uz_sleeps;
3729 				cachefree = 0;
3730 			} else
3731 				uma_zone_sumstat(z, &cachefree, &allocs,
3732 				    &frees, &sleeps);
3733 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3734 			    (LIST_FIRST(&kz->uk_zones) != z)))
3735 				cachefree += kz->uk_free;
3736 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3737 				cachefree += bucket->ub_cnt;
3738 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3739 			    z->uz_name, (uintmax_t)kz->uk_size,
3740 			    (intmax_t)(allocs - frees), cachefree,
3741 			    (uintmax_t)allocs, sleeps, z->uz_count);
3742 			if (db_pager_quit)
3743 				return;
3744 		}
3745 	}
3746 }
3747 
3748 DB_SHOW_COMMAND(umacache, db_show_umacache)
3749 {
3750 	uint64_t allocs, frees;
3751 	uma_bucket_t bucket;
3752 	uma_zone_t z;
3753 	int cachefree;
3754 
3755 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3756 	    "Requests", "Bucket");
3757 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
3758 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3759 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3760 			cachefree += bucket->ub_cnt;
3761 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3762 		    z->uz_name, (uintmax_t)z->uz_size,
3763 		    (intmax_t)(allocs - frees), cachefree,
3764 		    (uintmax_t)allocs, z->uz_count);
3765 		if (db_pager_quit)
3766 			return;
3767 	}
3768 }
3769 #endif	/* DDB */
3770