xref: /freebsd/sys/vm/uma_core.c (revision 66e576525d35c68fcb86f142ebaa5a448555c0c7)
1 /*-
2  * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2006 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * effecient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59 
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/smp.h>
76 #include <sys/vmmeter.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 #include <vm/uma.h>
86 #include <vm/uma_int.h>
87 #include <vm/uma_dbg.h>
88 
89 #include <machine/vmparam.h>
90 
91 #include <ddb/ddb.h>
92 
93 /*
94  * This is the zone and keg from which all zones are spawned.  The idea is that
95  * even the zone & keg heads are allocated from the allocator, so we use the
96  * bss section to bootstrap us.
97  */
98 static struct uma_keg masterkeg;
99 static struct uma_zone masterzone_k;
100 static struct uma_zone masterzone_z;
101 static uma_zone_t kegs = &masterzone_k;
102 static uma_zone_t zones = &masterzone_z;
103 
104 /* This is the zone from which all of uma_slab_t's are allocated. */
105 static uma_zone_t slabzone;
106 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
107 
108 /*
109  * The initial hash tables come out of this zone so they can be allocated
110  * prior to malloc coming up.
111  */
112 static uma_zone_t hashzone;
113 
114 /* The boot-time adjusted value for cache line alignment. */
115 static int uma_align_cache = 64 - 1;
116 
117 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
118 
119 /*
120  * Are we allowed to allocate buckets?
121  */
122 static int bucketdisable = 1;
123 
124 /* Linked list of all kegs in the system */
125 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
126 
127 /* This mutex protects the keg list */
128 static struct mtx uma_mtx;
129 
130 /* Linked list of boot time pages */
131 static LIST_HEAD(,uma_slab) uma_boot_pages =
132     LIST_HEAD_INITIALIZER(uma_boot_pages);
133 
134 /* This mutex protects the boot time pages list */
135 static struct mtx uma_boot_pages_mtx;
136 
137 /* Is the VM done starting up? */
138 static int booted = 0;
139 
140 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
141 static u_int uma_max_ipers;
142 static u_int uma_max_ipers_ref;
143 
144 /*
145  * This is the handle used to schedule events that need to happen
146  * outside of the allocation fast path.
147  */
148 static struct callout uma_callout;
149 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
150 
151 /*
152  * This structure is passed as the zone ctor arg so that I don't have to create
153  * a special allocation function just for zones.
154  */
155 struct uma_zctor_args {
156 	char *name;
157 	size_t size;
158 	uma_ctor ctor;
159 	uma_dtor dtor;
160 	uma_init uminit;
161 	uma_fini fini;
162 	uma_keg_t keg;
163 	int align;
164 	u_int32_t flags;
165 };
166 
167 struct uma_kctor_args {
168 	uma_zone_t zone;
169 	size_t size;
170 	uma_init uminit;
171 	uma_fini fini;
172 	int align;
173 	u_int32_t flags;
174 };
175 
176 struct uma_bucket_zone {
177 	uma_zone_t	ubz_zone;
178 	char		*ubz_name;
179 	int		ubz_entries;
180 };
181 
182 #define	BUCKET_MAX	128
183 
184 struct uma_bucket_zone bucket_zones[] = {
185 	{ NULL, "16 Bucket", 16 },
186 	{ NULL, "32 Bucket", 32 },
187 	{ NULL, "64 Bucket", 64 },
188 	{ NULL, "128 Bucket", 128 },
189 	{ NULL, NULL, 0}
190 };
191 
192 #define	BUCKET_SHIFT	4
193 #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
194 
195 /*
196  * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
197  * of approximately the right size.
198  */
199 static uint8_t bucket_size[BUCKET_ZONES];
200 
201 /*
202  * Flags and enumerations to be passed to internal functions.
203  */
204 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
205 
206 #define	ZFREE_STATFAIL	0x00000001	/* Update zone failure statistic. */
207 #define	ZFREE_STATFREE	0x00000002	/* Update zone free statistic. */
208 
209 /* Prototypes.. */
210 
211 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
212 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
213 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
214 static void page_free(void *, int, u_int8_t);
215 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
216 static void cache_drain(uma_zone_t);
217 static void bucket_drain(uma_zone_t, uma_bucket_t);
218 static void bucket_cache_drain(uma_zone_t zone);
219 static int keg_ctor(void *, int, void *, int);
220 static void keg_dtor(void *, int, void *);
221 static int zone_ctor(void *, int, void *, int);
222 static void zone_dtor(void *, int, void *);
223 static int zero_init(void *, int, int);
224 static void keg_small_init(uma_keg_t keg);
225 static void keg_large_init(uma_keg_t keg);
226 static void zone_foreach(void (*zfunc)(uma_zone_t));
227 static void zone_timeout(uma_zone_t zone);
228 static int hash_alloc(struct uma_hash *);
229 static int hash_expand(struct uma_hash *, struct uma_hash *);
230 static void hash_free(struct uma_hash *hash);
231 static void uma_timeout(void *);
232 static void uma_startup3(void);
233 static void *zone_alloc_item(uma_zone_t, void *, int);
234 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip,
235     int);
236 static void bucket_enable(void);
237 static void bucket_init(void);
238 static uma_bucket_t bucket_alloc(int, int);
239 static void bucket_free(uma_bucket_t);
240 static void bucket_zone_drain(void);
241 static int zone_alloc_bucket(uma_zone_t zone, int flags);
242 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
243 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
244 static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
245 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
246     uma_fini fini, int align, u_int32_t flags);
247 static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
248 static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
249 
250 void uma_print_zone(uma_zone_t);
251 void uma_print_stats(void);
252 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
253 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
254 
255 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
256 
257 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
258     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
259 
260 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
261     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
262 
263 /*
264  * This routine checks to see whether or not it's safe to enable buckets.
265  */
266 
267 static void
268 bucket_enable(void)
269 {
270 	if (cnt.v_free_count < cnt.v_free_min)
271 		bucketdisable = 1;
272 	else
273 		bucketdisable = 0;
274 }
275 
276 /*
277  * Initialize bucket_zones, the array of zones of buckets of various sizes.
278  *
279  * For each zone, calculate the memory required for each bucket, consisting
280  * of the header and an array of pointers.  Initialize bucket_size[] to point
281  * the range of appropriate bucket sizes at the zone.
282  */
283 static void
284 bucket_init(void)
285 {
286 	struct uma_bucket_zone *ubz;
287 	int i;
288 	int j;
289 
290 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
291 		int size;
292 
293 		ubz = &bucket_zones[j];
294 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
295 		size += sizeof(void *) * ubz->ubz_entries;
296 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
297 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
298 		    UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET);
299 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
300 			bucket_size[i >> BUCKET_SHIFT] = j;
301 	}
302 }
303 
304 /*
305  * Given a desired number of entries for a bucket, return the zone from which
306  * to allocate the bucket.
307  */
308 static struct uma_bucket_zone *
309 bucket_zone_lookup(int entries)
310 {
311 	int idx;
312 
313 	idx = howmany(entries, 1 << BUCKET_SHIFT);
314 	return (&bucket_zones[bucket_size[idx]]);
315 }
316 
317 static uma_bucket_t
318 bucket_alloc(int entries, int bflags)
319 {
320 	struct uma_bucket_zone *ubz;
321 	uma_bucket_t bucket;
322 
323 	/*
324 	 * This is to stop us from allocating per cpu buckets while we're
325 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
326 	 * boot pages.  This also prevents us from allocating buckets in
327 	 * low memory situations.
328 	 */
329 	if (bucketdisable)
330 		return (NULL);
331 
332 	ubz = bucket_zone_lookup(entries);
333 	bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags);
334 	if (bucket) {
335 #ifdef INVARIANTS
336 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
337 #endif
338 		bucket->ub_cnt = 0;
339 		bucket->ub_entries = ubz->ubz_entries;
340 	}
341 
342 	return (bucket);
343 }
344 
345 static void
346 bucket_free(uma_bucket_t bucket)
347 {
348 	struct uma_bucket_zone *ubz;
349 
350 	ubz = bucket_zone_lookup(bucket->ub_entries);
351 	zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
352 	    ZFREE_STATFREE);
353 }
354 
355 static void
356 bucket_zone_drain(void)
357 {
358 	struct uma_bucket_zone *ubz;
359 
360 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
361 		zone_drain(ubz->ubz_zone);
362 }
363 
364 static inline uma_keg_t
365 zone_first_keg(uma_zone_t zone)
366 {
367 
368 	return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
369 }
370 
371 static void
372 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
373 {
374 	uma_klink_t klink;
375 
376 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
377 		kegfn(klink->kl_keg);
378 }
379 
380 /*
381  * Routine called by timeout which is used to fire off some time interval
382  * based calculations.  (stats, hash size, etc.)
383  *
384  * Arguments:
385  *	arg   Unused
386  *
387  * Returns:
388  *	Nothing
389  */
390 static void
391 uma_timeout(void *unused)
392 {
393 	bucket_enable();
394 	zone_foreach(zone_timeout);
395 
396 	/* Reschedule this event */
397 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
398 }
399 
400 /*
401  * Routine to perform timeout driven calculations.  This expands the
402  * hashes and does per cpu statistics aggregation.
403  *
404  *  Returns nothing.
405  */
406 static void
407 keg_timeout(uma_keg_t keg)
408 {
409 
410 	KEG_LOCK(keg);
411 	/*
412 	 * Expand the keg hash table.
413 	 *
414 	 * This is done if the number of slabs is larger than the hash size.
415 	 * What I'm trying to do here is completely reduce collisions.  This
416 	 * may be a little aggressive.  Should I allow for two collisions max?
417 	 */
418 	if (keg->uk_flags & UMA_ZONE_HASH &&
419 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
420 		struct uma_hash newhash;
421 		struct uma_hash oldhash;
422 		int ret;
423 
424 		/*
425 		 * This is so involved because allocating and freeing
426 		 * while the keg lock is held will lead to deadlock.
427 		 * I have to do everything in stages and check for
428 		 * races.
429 		 */
430 		newhash = keg->uk_hash;
431 		KEG_UNLOCK(keg);
432 		ret = hash_alloc(&newhash);
433 		KEG_LOCK(keg);
434 		if (ret) {
435 			if (hash_expand(&keg->uk_hash, &newhash)) {
436 				oldhash = keg->uk_hash;
437 				keg->uk_hash = newhash;
438 			} else
439 				oldhash = newhash;
440 
441 			KEG_UNLOCK(keg);
442 			hash_free(&oldhash);
443 			KEG_LOCK(keg);
444 		}
445 	}
446 	KEG_UNLOCK(keg);
447 }
448 
449 static void
450 zone_timeout(uma_zone_t zone)
451 {
452 
453 	zone_foreach_keg(zone, &keg_timeout);
454 }
455 
456 /*
457  * Allocate and zero fill the next sized hash table from the appropriate
458  * backing store.
459  *
460  * Arguments:
461  *	hash  A new hash structure with the old hash size in uh_hashsize
462  *
463  * Returns:
464  *	1 on sucess and 0 on failure.
465  */
466 static int
467 hash_alloc(struct uma_hash *hash)
468 {
469 	int oldsize;
470 	int alloc;
471 
472 	oldsize = hash->uh_hashsize;
473 
474 	/* We're just going to go to a power of two greater */
475 	if (oldsize)  {
476 		hash->uh_hashsize = oldsize * 2;
477 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
478 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
479 		    M_UMAHASH, M_NOWAIT);
480 	} else {
481 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
482 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
483 		    M_WAITOK);
484 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
485 	}
486 	if (hash->uh_slab_hash) {
487 		bzero(hash->uh_slab_hash, alloc);
488 		hash->uh_hashmask = hash->uh_hashsize - 1;
489 		return (1);
490 	}
491 
492 	return (0);
493 }
494 
495 /*
496  * Expands the hash table for HASH zones.  This is done from zone_timeout
497  * to reduce collisions.  This must not be done in the regular allocation
498  * path, otherwise, we can recurse on the vm while allocating pages.
499  *
500  * Arguments:
501  *	oldhash  The hash you want to expand
502  *	newhash  The hash structure for the new table
503  *
504  * Returns:
505  *	Nothing
506  *
507  * Discussion:
508  */
509 static int
510 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
511 {
512 	uma_slab_t slab;
513 	int hval;
514 	int i;
515 
516 	if (!newhash->uh_slab_hash)
517 		return (0);
518 
519 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
520 		return (0);
521 
522 	/*
523 	 * I need to investigate hash algorithms for resizing without a
524 	 * full rehash.
525 	 */
526 
527 	for (i = 0; i < oldhash->uh_hashsize; i++)
528 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
529 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
530 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
531 			hval = UMA_HASH(newhash, slab->us_data);
532 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
533 			    slab, us_hlink);
534 		}
535 
536 	return (1);
537 }
538 
539 /*
540  * Free the hash bucket to the appropriate backing store.
541  *
542  * Arguments:
543  *	slab_hash  The hash bucket we're freeing
544  *	hashsize   The number of entries in that hash bucket
545  *
546  * Returns:
547  *	Nothing
548  */
549 static void
550 hash_free(struct uma_hash *hash)
551 {
552 	if (hash->uh_slab_hash == NULL)
553 		return;
554 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
555 		zone_free_item(hashzone,
556 		    hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
557 	else
558 		free(hash->uh_slab_hash, M_UMAHASH);
559 }
560 
561 /*
562  * Frees all outstanding items in a bucket
563  *
564  * Arguments:
565  *	zone   The zone to free to, must be unlocked.
566  *	bucket The free/alloc bucket with items, cpu queue must be locked.
567  *
568  * Returns:
569  *	Nothing
570  */
571 
572 static void
573 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
574 {
575 	void *item;
576 
577 	if (bucket == NULL)
578 		return;
579 
580 	while (bucket->ub_cnt > 0)  {
581 		bucket->ub_cnt--;
582 		item = bucket->ub_bucket[bucket->ub_cnt];
583 #ifdef INVARIANTS
584 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
585 		KASSERT(item != NULL,
586 		    ("bucket_drain: botched ptr, item is NULL"));
587 #endif
588 		zone_free_item(zone, item, NULL, SKIP_DTOR, 0);
589 	}
590 }
591 
592 /*
593  * Drains the per cpu caches for a zone.
594  *
595  * NOTE: This may only be called while the zone is being turn down, and not
596  * during normal operation.  This is necessary in order that we do not have
597  * to migrate CPUs to drain the per-CPU caches.
598  *
599  * Arguments:
600  *	zone     The zone to drain, must be unlocked.
601  *
602  * Returns:
603  *	Nothing
604  */
605 static void
606 cache_drain(uma_zone_t zone)
607 {
608 	uma_cache_t cache;
609 	int cpu;
610 
611 	/*
612 	 * XXX: It is safe to not lock the per-CPU caches, because we're
613 	 * tearing down the zone anyway.  I.e., there will be no further use
614 	 * of the caches at this point.
615 	 *
616 	 * XXX: It would good to be able to assert that the zone is being
617 	 * torn down to prevent improper use of cache_drain().
618 	 *
619 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
620 	 * it is used elsewhere.  Should the tear-down path be made special
621 	 * there in some form?
622 	 */
623 	CPU_FOREACH(cpu) {
624 		cache = &zone->uz_cpu[cpu];
625 		bucket_drain(zone, cache->uc_allocbucket);
626 		bucket_drain(zone, cache->uc_freebucket);
627 		if (cache->uc_allocbucket != NULL)
628 			bucket_free(cache->uc_allocbucket);
629 		if (cache->uc_freebucket != NULL)
630 			bucket_free(cache->uc_freebucket);
631 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
632 	}
633 	ZONE_LOCK(zone);
634 	bucket_cache_drain(zone);
635 	ZONE_UNLOCK(zone);
636 }
637 
638 /*
639  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
640  */
641 static void
642 bucket_cache_drain(uma_zone_t zone)
643 {
644 	uma_bucket_t bucket;
645 
646 	/*
647 	 * Drain the bucket queues and free the buckets, we just keep two per
648 	 * cpu (alloc/free).
649 	 */
650 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
651 		LIST_REMOVE(bucket, ub_link);
652 		ZONE_UNLOCK(zone);
653 		bucket_drain(zone, bucket);
654 		bucket_free(bucket);
655 		ZONE_LOCK(zone);
656 	}
657 
658 	/* Now we do the free queue.. */
659 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
660 		LIST_REMOVE(bucket, ub_link);
661 		bucket_free(bucket);
662 	}
663 }
664 
665 /*
666  * Frees pages from a keg back to the system.  This is done on demand from
667  * the pageout daemon.
668  *
669  * Returns nothing.
670  */
671 static void
672 keg_drain(uma_keg_t keg)
673 {
674 	struct slabhead freeslabs = { 0 };
675 	uma_slab_t slab;
676 	uma_slab_t n;
677 	u_int8_t flags;
678 	u_int8_t *mem;
679 	int i;
680 
681 	/*
682 	 * We don't want to take pages from statically allocated kegs at this
683 	 * time
684 	 */
685 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
686 		return;
687 
688 #ifdef UMA_DEBUG
689 	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
690 #endif
691 	KEG_LOCK(keg);
692 	if (keg->uk_free == 0)
693 		goto finished;
694 
695 	slab = LIST_FIRST(&keg->uk_free_slab);
696 	while (slab) {
697 		n = LIST_NEXT(slab, us_link);
698 
699 		/* We have no where to free these to */
700 		if (slab->us_flags & UMA_SLAB_BOOT) {
701 			slab = n;
702 			continue;
703 		}
704 
705 		LIST_REMOVE(slab, us_link);
706 		keg->uk_pages -= keg->uk_ppera;
707 		keg->uk_free -= keg->uk_ipers;
708 
709 		if (keg->uk_flags & UMA_ZONE_HASH)
710 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
711 
712 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
713 
714 		slab = n;
715 	}
716 finished:
717 	KEG_UNLOCK(keg);
718 
719 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
720 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
721 		if (keg->uk_fini)
722 			for (i = 0; i < keg->uk_ipers; i++)
723 				keg->uk_fini(
724 				    slab->us_data + (keg->uk_rsize * i),
725 				    keg->uk_size);
726 		flags = slab->us_flags;
727 		mem = slab->us_data;
728 
729 		if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
730 			vm_object_t obj;
731 
732 			if (flags & UMA_SLAB_KMEM)
733 				obj = kmem_object;
734 			else if (flags & UMA_SLAB_KERNEL)
735 				obj = kernel_object;
736 			else
737 				obj = NULL;
738 			for (i = 0; i < keg->uk_ppera; i++)
739 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
740 				    obj);
741 		}
742 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
743 			zone_free_item(keg->uk_slabzone, slab, NULL,
744 			    SKIP_NONE, ZFREE_STATFREE);
745 #ifdef UMA_DEBUG
746 		printf("%s: Returning %d bytes.\n",
747 		    keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera);
748 #endif
749 		keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
750 	}
751 }
752 
753 static void
754 zone_drain_wait(uma_zone_t zone, int waitok)
755 {
756 
757 	/*
758 	 * Set draining to interlock with zone_dtor() so we can release our
759 	 * locks as we go.  Only dtor() should do a WAITOK call since it
760 	 * is the only call that knows the structure will still be available
761 	 * when it wakes up.
762 	 */
763 	ZONE_LOCK(zone);
764 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
765 		if (waitok == M_NOWAIT)
766 			goto out;
767 		mtx_unlock(&uma_mtx);
768 		msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
769 		mtx_lock(&uma_mtx);
770 	}
771 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
772 	bucket_cache_drain(zone);
773 	ZONE_UNLOCK(zone);
774 	/*
775 	 * The DRAINING flag protects us from being freed while
776 	 * we're running.  Normally the uma_mtx would protect us but we
777 	 * must be able to release and acquire the right lock for each keg.
778 	 */
779 	zone_foreach_keg(zone, &keg_drain);
780 	ZONE_LOCK(zone);
781 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
782 	wakeup(zone);
783 out:
784 	ZONE_UNLOCK(zone);
785 }
786 
787 void
788 zone_drain(uma_zone_t zone)
789 {
790 
791 	zone_drain_wait(zone, M_NOWAIT);
792 }
793 
794 /*
795  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
796  *
797  * Arguments:
798  *	wait  Shall we wait?
799  *
800  * Returns:
801  *	The slab that was allocated or NULL if there is no memory and the
802  *	caller specified M_NOWAIT.
803  */
804 static uma_slab_t
805 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
806 {
807 	uma_slabrefcnt_t slabref;
808 	uma_alloc allocf;
809 	uma_slab_t slab;
810 	u_int8_t *mem;
811 	u_int8_t flags;
812 	int i;
813 
814 	mtx_assert(&keg->uk_lock, MA_OWNED);
815 	slab = NULL;
816 
817 #ifdef UMA_DEBUG
818 	printf("slab_zalloc:  Allocating a new slab for %s\n", keg->uk_name);
819 #endif
820 	allocf = keg->uk_allocf;
821 	KEG_UNLOCK(keg);
822 
823 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
824 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
825 		if (slab == NULL) {
826 			KEG_LOCK(keg);
827 			return NULL;
828 		}
829 	}
830 
831 	/*
832 	 * This reproduces the old vm_zone behavior of zero filling pages the
833 	 * first time they are added to a zone.
834 	 *
835 	 * Malloced items are zeroed in uma_zalloc.
836 	 */
837 
838 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
839 		wait |= M_ZERO;
840 	else
841 		wait &= ~M_ZERO;
842 
843 	/* zone is passed for legacy reasons. */
844 	mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
845 	if (mem == NULL) {
846 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
847 			zone_free_item(keg->uk_slabzone, slab, NULL,
848 			    SKIP_NONE, ZFREE_STATFREE);
849 		KEG_LOCK(keg);
850 		return (NULL);
851 	}
852 
853 	/* Point the slab into the allocated memory */
854 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
855 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
856 
857 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
858 		for (i = 0; i < keg->uk_ppera; i++)
859 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
860 
861 	slab->us_keg = keg;
862 	slab->us_data = mem;
863 	slab->us_freecount = keg->uk_ipers;
864 	slab->us_firstfree = 0;
865 	slab->us_flags = flags;
866 
867 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
868 		slabref = (uma_slabrefcnt_t)slab;
869 		for (i = 0; i < keg->uk_ipers; i++) {
870 			slabref->us_freelist[i].us_refcnt = 0;
871 			slabref->us_freelist[i].us_item = i+1;
872 		}
873 	} else {
874 		for (i = 0; i < keg->uk_ipers; i++)
875 			slab->us_freelist[i].us_item = i+1;
876 	}
877 
878 	if (keg->uk_init != NULL) {
879 		for (i = 0; i < keg->uk_ipers; i++)
880 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
881 			    keg->uk_size, wait) != 0)
882 				break;
883 		if (i != keg->uk_ipers) {
884 			if (keg->uk_fini != NULL) {
885 				for (i--; i > -1; i--)
886 					keg->uk_fini(slab->us_data +
887 					    (keg->uk_rsize * i),
888 					    keg->uk_size);
889 			}
890 			if (keg->uk_flags & UMA_ZONE_VTOSLAB) {
891 				vm_object_t obj;
892 
893 				if (flags & UMA_SLAB_KMEM)
894 					obj = kmem_object;
895 				else if (flags & UMA_SLAB_KERNEL)
896 					obj = kernel_object;
897 				else
898 					obj = NULL;
899 				for (i = 0; i < keg->uk_ppera; i++)
900 					vsetobj((vm_offset_t)mem +
901 					    (i * PAGE_SIZE), obj);
902 			}
903 			if (keg->uk_flags & UMA_ZONE_OFFPAGE)
904 				zone_free_item(keg->uk_slabzone, slab,
905 				    NULL, SKIP_NONE, ZFREE_STATFREE);
906 			keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
907 			    flags);
908 			KEG_LOCK(keg);
909 			return (NULL);
910 		}
911 	}
912 	KEG_LOCK(keg);
913 
914 	if (keg->uk_flags & UMA_ZONE_HASH)
915 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
916 
917 	keg->uk_pages += keg->uk_ppera;
918 	keg->uk_free += keg->uk_ipers;
919 
920 	return (slab);
921 }
922 
923 /*
924  * This function is intended to be used early on in place of page_alloc() so
925  * that we may use the boot time page cache to satisfy allocations before
926  * the VM is ready.
927  */
928 static void *
929 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
930 {
931 	uma_keg_t keg;
932 	uma_slab_t tmps;
933 
934 	keg = zone_first_keg(zone);
935 
936 	/*
937 	 * Check our small startup cache to see if it has pages remaining.
938 	 */
939 	mtx_lock(&uma_boot_pages_mtx);
940 	if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
941 		LIST_REMOVE(tmps, us_link);
942 		mtx_unlock(&uma_boot_pages_mtx);
943 		*pflag = tmps->us_flags;
944 		return (tmps->us_data);
945 	}
946 	mtx_unlock(&uma_boot_pages_mtx);
947 	if (booted == 0)
948 		panic("UMA: Increase vm.boot_pages");
949 	/*
950 	 * Now that we've booted reset these users to their real allocator.
951 	 */
952 #ifdef UMA_MD_SMALL_ALLOC
953 	keg->uk_allocf = uma_small_alloc;
954 #else
955 	keg->uk_allocf = page_alloc;
956 #endif
957 	return keg->uk_allocf(zone, bytes, pflag, wait);
958 }
959 
960 /*
961  * Allocates a number of pages from the system
962  *
963  * Arguments:
964  *	bytes  The number of bytes requested
965  *	wait  Shall we wait?
966  *
967  * Returns:
968  *	A pointer to the alloced memory or possibly
969  *	NULL if M_NOWAIT is set.
970  */
971 static void *
972 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
973 {
974 	void *p;	/* Returned page */
975 
976 	*pflag = UMA_SLAB_KMEM;
977 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
978 
979 	return (p);
980 }
981 
982 /*
983  * Allocates a number of pages from within an object
984  *
985  * Arguments:
986  *	bytes  The number of bytes requested
987  *	wait   Shall we wait?
988  *
989  * Returns:
990  *	A pointer to the alloced memory or possibly
991  *	NULL if M_NOWAIT is set.
992  */
993 static void *
994 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
995 {
996 	vm_object_t object;
997 	vm_offset_t retkva, zkva;
998 	vm_page_t p;
999 	int pages, startpages;
1000 	uma_keg_t keg;
1001 
1002 	keg = zone_first_keg(zone);
1003 	object = keg->uk_obj;
1004 	retkva = 0;
1005 
1006 	/*
1007 	 * This looks a little weird since we're getting one page at a time.
1008 	 */
1009 	VM_OBJECT_LOCK(object);
1010 	p = TAILQ_LAST(&object->memq, pglist);
1011 	pages = p != NULL ? p->pindex + 1 : 0;
1012 	startpages = pages;
1013 	zkva = keg->uk_kva + pages * PAGE_SIZE;
1014 	for (; bytes > 0; bytes -= PAGE_SIZE) {
1015 		p = vm_page_alloc(object, pages,
1016 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
1017 		if (p == NULL) {
1018 			if (pages != startpages)
1019 				pmap_qremove(retkva, pages - startpages);
1020 			while (pages != startpages) {
1021 				pages--;
1022 				p = TAILQ_LAST(&object->memq, pglist);
1023 				vm_page_unwire(p, 0);
1024 				vm_page_free(p);
1025 			}
1026 			retkva = 0;
1027 			goto done;
1028 		}
1029 		pmap_qenter(zkva, &p, 1);
1030 		if (retkva == 0)
1031 			retkva = zkva;
1032 		zkva += PAGE_SIZE;
1033 		pages += 1;
1034 	}
1035 done:
1036 	VM_OBJECT_UNLOCK(object);
1037 	*flags = UMA_SLAB_PRIV;
1038 
1039 	return ((void *)retkva);
1040 }
1041 
1042 /*
1043  * Frees a number of pages to the system
1044  *
1045  * Arguments:
1046  *	mem   A pointer to the memory to be freed
1047  *	size  The size of the memory being freed
1048  *	flags The original p->us_flags field
1049  *
1050  * Returns:
1051  *	Nothing
1052  */
1053 static void
1054 page_free(void *mem, int size, u_int8_t flags)
1055 {
1056 	vm_map_t map;
1057 
1058 	if (flags & UMA_SLAB_KMEM)
1059 		map = kmem_map;
1060 	else if (flags & UMA_SLAB_KERNEL)
1061 		map = kernel_map;
1062 	else
1063 		panic("UMA: page_free used with invalid flags %d", flags);
1064 
1065 	kmem_free(map, (vm_offset_t)mem, size);
1066 }
1067 
1068 /*
1069  * Zero fill initializer
1070  *
1071  * Arguments/Returns follow uma_init specifications
1072  */
1073 static int
1074 zero_init(void *mem, int size, int flags)
1075 {
1076 	bzero(mem, size);
1077 	return (0);
1078 }
1079 
1080 /*
1081  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1082  *
1083  * Arguments
1084  *	keg  The zone we should initialize
1085  *
1086  * Returns
1087  *	Nothing
1088  */
1089 static void
1090 keg_small_init(uma_keg_t keg)
1091 {
1092 	u_int rsize;
1093 	u_int memused;
1094 	u_int wastedspace;
1095 	u_int shsize;
1096 
1097 	KASSERT(keg != NULL, ("Keg is null in keg_small_init"));
1098 	rsize = keg->uk_size;
1099 
1100 	if (rsize < UMA_SMALLEST_UNIT)
1101 		rsize = UMA_SMALLEST_UNIT;
1102 	if (rsize & keg->uk_align)
1103 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1104 
1105 	keg->uk_rsize = rsize;
1106 	keg->uk_ppera = 1;
1107 
1108 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1109 		rsize += UMA_FRITMREF_SZ;	/* linkage & refcnt */
1110 		shsize = sizeof(struct uma_slab_refcnt);
1111 	} else {
1112 		rsize += UMA_FRITM_SZ;	/* Account for linkage */
1113 		shsize = sizeof(struct uma_slab);
1114 	}
1115 
1116 	keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1117 	KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0"));
1118 	memused = keg->uk_ipers * rsize + shsize;
1119 	wastedspace = UMA_SLAB_SIZE - memused;
1120 
1121 	/*
1122 	 * We can't do OFFPAGE if we're internal or if we've been
1123 	 * asked to not go to the VM for buckets.  If we do this we
1124 	 * may end up going to the VM (kmem_map) for slabs which we
1125 	 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1126 	 * result of UMA_ZONE_VM, which clearly forbids it.
1127 	 */
1128 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1129 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1130 		return;
1131 
1132 	if ((wastedspace >= UMA_MAX_WASTE) &&
1133 	    (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1134 		keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1135 		KASSERT(keg->uk_ipers <= 255,
1136 		    ("keg_small_init: keg->uk_ipers too high!"));
1137 #ifdef UMA_DEBUG
1138 		printf("UMA decided we need offpage slab headers for "
1139 		    "keg: %s, calculated wastedspace = %d, "
1140 		    "maximum wasted space allowed = %d, "
1141 		    "calculated ipers = %d, "
1142 		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1143 		    UMA_MAX_WASTE, keg->uk_ipers,
1144 		    UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1145 #endif
1146 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1147 		if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1148 			keg->uk_flags |= UMA_ZONE_HASH;
1149 	}
1150 }
1151 
1152 /*
1153  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1154  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1155  * more complicated.
1156  *
1157  * Arguments
1158  *	keg  The keg we should initialize
1159  *
1160  * Returns
1161  *	Nothing
1162  */
1163 static void
1164 keg_large_init(uma_keg_t keg)
1165 {
1166 	int pages;
1167 
1168 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1169 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1170 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1171 
1172 	pages = keg->uk_size / UMA_SLAB_SIZE;
1173 
1174 	/* Account for remainder */
1175 	if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1176 		pages++;
1177 
1178 	keg->uk_ppera = pages;
1179 	keg->uk_ipers = 1;
1180 
1181 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1182 	if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1183 		keg->uk_flags |= UMA_ZONE_HASH;
1184 
1185 	keg->uk_rsize = keg->uk_size;
1186 }
1187 
1188 static void
1189 keg_cachespread_init(uma_keg_t keg)
1190 {
1191 	int alignsize;
1192 	int trailer;
1193 	int pages;
1194 	int rsize;
1195 
1196 	alignsize = keg->uk_align + 1;
1197 	rsize = keg->uk_size;
1198 	/*
1199 	 * We want one item to start on every align boundary in a page.  To
1200 	 * do this we will span pages.  We will also extend the item by the
1201 	 * size of align if it is an even multiple of align.  Otherwise, it
1202 	 * would fall on the same boundary every time.
1203 	 */
1204 	if (rsize & keg->uk_align)
1205 		rsize = (rsize & ~keg->uk_align) + alignsize;
1206 	if ((rsize & alignsize) == 0)
1207 		rsize += alignsize;
1208 	trailer = rsize - keg->uk_size;
1209 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1210 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1211 	keg->uk_rsize = rsize;
1212 	keg->uk_ppera = pages;
1213 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1214 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1215 	KASSERT(keg->uk_ipers <= uma_max_ipers,
1216 	    ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers",
1217 	    keg->uk_ipers));
1218 }
1219 
1220 /*
1221  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1222  * the keg onto the global keg list.
1223  *
1224  * Arguments/Returns follow uma_ctor specifications
1225  *	udata  Actually uma_kctor_args
1226  */
1227 static int
1228 keg_ctor(void *mem, int size, void *udata, int flags)
1229 {
1230 	struct uma_kctor_args *arg = udata;
1231 	uma_keg_t keg = mem;
1232 	uma_zone_t zone;
1233 
1234 	bzero(keg, size);
1235 	keg->uk_size = arg->size;
1236 	keg->uk_init = arg->uminit;
1237 	keg->uk_fini = arg->fini;
1238 	keg->uk_align = arg->align;
1239 	keg->uk_free = 0;
1240 	keg->uk_pages = 0;
1241 	keg->uk_flags = arg->flags;
1242 	keg->uk_allocf = page_alloc;
1243 	keg->uk_freef = page_free;
1244 	keg->uk_recurse = 0;
1245 	keg->uk_slabzone = NULL;
1246 
1247 	/*
1248 	 * The master zone is passed to us at keg-creation time.
1249 	 */
1250 	zone = arg->zone;
1251 	keg->uk_name = zone->uz_name;
1252 
1253 	if (arg->flags & UMA_ZONE_VM)
1254 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1255 
1256 	if (arg->flags & UMA_ZONE_ZINIT)
1257 		keg->uk_init = zero_init;
1258 
1259 	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1260 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1261 
1262 	/*
1263 	 * The +UMA_FRITM_SZ added to uk_size is to account for the
1264 	 * linkage that is added to the size in keg_small_init().  If
1265 	 * we don't account for this here then we may end up in
1266 	 * keg_small_init() with a calculated 'ipers' of 0.
1267 	 */
1268 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1269 		if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1270 			keg_cachespread_init(keg);
1271 		else if ((keg->uk_size+UMA_FRITMREF_SZ) >
1272 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1273 			keg_large_init(keg);
1274 		else
1275 			keg_small_init(keg);
1276 	} else {
1277 		if (keg->uk_flags & UMA_ZONE_CACHESPREAD)
1278 			keg_cachespread_init(keg);
1279 		else if ((keg->uk_size+UMA_FRITM_SZ) >
1280 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1281 			keg_large_init(keg);
1282 		else
1283 			keg_small_init(keg);
1284 	}
1285 
1286 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1287 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1288 			keg->uk_slabzone = slabrefzone;
1289 		else
1290 			keg->uk_slabzone = slabzone;
1291 	}
1292 
1293 	/*
1294 	 * If we haven't booted yet we need allocations to go through the
1295 	 * startup cache until the vm is ready.
1296 	 */
1297 	if (keg->uk_ppera == 1) {
1298 #ifdef UMA_MD_SMALL_ALLOC
1299 		keg->uk_allocf = uma_small_alloc;
1300 		keg->uk_freef = uma_small_free;
1301 #endif
1302 		if (booted == 0)
1303 			keg->uk_allocf = startup_alloc;
1304 	}
1305 
1306 	/*
1307 	 * Initialize keg's lock (shared among zones).
1308 	 */
1309 	if (arg->flags & UMA_ZONE_MTXCLASS)
1310 		KEG_LOCK_INIT(keg, 1);
1311 	else
1312 		KEG_LOCK_INIT(keg, 0);
1313 
1314 	/*
1315 	 * If we're putting the slab header in the actual page we need to
1316 	 * figure out where in each page it goes.  This calculates a right
1317 	 * justified offset into the memory on an ALIGN_PTR boundary.
1318 	 */
1319 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1320 		u_int totsize;
1321 
1322 		/* Size of the slab struct and free list */
1323 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1324 			totsize = sizeof(struct uma_slab_refcnt) +
1325 			    keg->uk_ipers * UMA_FRITMREF_SZ;
1326 		else
1327 			totsize = sizeof(struct uma_slab) +
1328 			    keg->uk_ipers * UMA_FRITM_SZ;
1329 
1330 		if (totsize & UMA_ALIGN_PTR)
1331 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1332 			    (UMA_ALIGN_PTR + 1);
1333 		keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1334 
1335 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1336 			totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1337 			    + keg->uk_ipers * UMA_FRITMREF_SZ;
1338 		else
1339 			totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1340 			    + keg->uk_ipers * UMA_FRITM_SZ;
1341 
1342 		/*
1343 		 * The only way the following is possible is if with our
1344 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1345 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1346 		 * mathematically possible for all cases, so we make
1347 		 * sure here anyway.
1348 		 */
1349 		if (totsize > UMA_SLAB_SIZE) {
1350 			printf("zone %s ipers %d rsize %d size %d\n",
1351 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1352 			    keg->uk_size);
1353 			panic("UMA slab won't fit.");
1354 		}
1355 	}
1356 
1357 	if (keg->uk_flags & UMA_ZONE_HASH)
1358 		hash_alloc(&keg->uk_hash);
1359 
1360 #ifdef UMA_DEBUG
1361 	printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
1362 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1363 	    keg->uk_ipers, keg->uk_ppera,
1364 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1365 #endif
1366 
1367 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1368 
1369 	mtx_lock(&uma_mtx);
1370 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1371 	mtx_unlock(&uma_mtx);
1372 	return (0);
1373 }
1374 
1375 /*
1376  * Zone header ctor.  This initializes all fields, locks, etc.
1377  *
1378  * Arguments/Returns follow uma_ctor specifications
1379  *	udata  Actually uma_zctor_args
1380  */
1381 static int
1382 zone_ctor(void *mem, int size, void *udata, int flags)
1383 {
1384 	struct uma_zctor_args *arg = udata;
1385 	uma_zone_t zone = mem;
1386 	uma_zone_t z;
1387 	uma_keg_t keg;
1388 
1389 	bzero(zone, size);
1390 	zone->uz_name = arg->name;
1391 	zone->uz_ctor = arg->ctor;
1392 	zone->uz_dtor = arg->dtor;
1393 	zone->uz_slab = zone_fetch_slab;
1394 	zone->uz_init = NULL;
1395 	zone->uz_fini = NULL;
1396 	zone->uz_allocs = 0;
1397 	zone->uz_frees = 0;
1398 	zone->uz_fails = 0;
1399 	zone->uz_sleeps = 0;
1400 	zone->uz_fills = zone->uz_count = 0;
1401 	zone->uz_flags = 0;
1402 	keg = arg->keg;
1403 
1404 	if (arg->flags & UMA_ZONE_SECONDARY) {
1405 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1406 		zone->uz_init = arg->uminit;
1407 		zone->uz_fini = arg->fini;
1408 		zone->uz_lock = &keg->uk_lock;
1409 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1410 		mtx_lock(&uma_mtx);
1411 		ZONE_LOCK(zone);
1412 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1413 			if (LIST_NEXT(z, uz_link) == NULL) {
1414 				LIST_INSERT_AFTER(z, zone, uz_link);
1415 				break;
1416 			}
1417 		}
1418 		ZONE_UNLOCK(zone);
1419 		mtx_unlock(&uma_mtx);
1420 	} else if (keg == NULL) {
1421 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1422 		    arg->align, arg->flags)) == NULL)
1423 			return (ENOMEM);
1424 	} else {
1425 		struct uma_kctor_args karg;
1426 		int error;
1427 
1428 		/* We should only be here from uma_startup() */
1429 		karg.size = arg->size;
1430 		karg.uminit = arg->uminit;
1431 		karg.fini = arg->fini;
1432 		karg.align = arg->align;
1433 		karg.flags = arg->flags;
1434 		karg.zone = zone;
1435 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1436 		    flags);
1437 		if (error)
1438 			return (error);
1439 	}
1440 	/*
1441 	 * Link in the first keg.
1442 	 */
1443 	zone->uz_klink.kl_keg = keg;
1444 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1445 	zone->uz_lock = &keg->uk_lock;
1446 	zone->uz_size = keg->uk_size;
1447 	zone->uz_flags |= (keg->uk_flags &
1448 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1449 
1450 	/*
1451 	 * Some internal zones don't have room allocated for the per cpu
1452 	 * caches.  If we're internal, bail out here.
1453 	 */
1454 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1455 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1456 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1457 		return (0);
1458 	}
1459 
1460 	if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1461 		zone->uz_count = BUCKET_MAX;
1462 	else if (keg->uk_ipers <= BUCKET_MAX)
1463 		zone->uz_count = keg->uk_ipers;
1464 	else
1465 		zone->uz_count = BUCKET_MAX;
1466 	return (0);
1467 }
1468 
1469 /*
1470  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1471  * table and removes the keg from the global list.
1472  *
1473  * Arguments/Returns follow uma_dtor specifications
1474  *	udata  unused
1475  */
1476 static void
1477 keg_dtor(void *arg, int size, void *udata)
1478 {
1479 	uma_keg_t keg;
1480 
1481 	keg = (uma_keg_t)arg;
1482 	KEG_LOCK(keg);
1483 	if (keg->uk_free != 0) {
1484 		printf("Freed UMA keg was not empty (%d items). "
1485 		    " Lost %d pages of memory.\n",
1486 		    keg->uk_free, keg->uk_pages);
1487 	}
1488 	KEG_UNLOCK(keg);
1489 
1490 	hash_free(&keg->uk_hash);
1491 
1492 	KEG_LOCK_FINI(keg);
1493 }
1494 
1495 /*
1496  * Zone header dtor.
1497  *
1498  * Arguments/Returns follow uma_dtor specifications
1499  *	udata  unused
1500  */
1501 static void
1502 zone_dtor(void *arg, int size, void *udata)
1503 {
1504 	uma_klink_t klink;
1505 	uma_zone_t zone;
1506 	uma_keg_t keg;
1507 
1508 	zone = (uma_zone_t)arg;
1509 	keg = zone_first_keg(zone);
1510 
1511 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1512 		cache_drain(zone);
1513 
1514 	mtx_lock(&uma_mtx);
1515 	LIST_REMOVE(zone, uz_link);
1516 	mtx_unlock(&uma_mtx);
1517 	/*
1518 	 * XXX there are some races here where
1519 	 * the zone can be drained but zone lock
1520 	 * released and then refilled before we
1521 	 * remove it... we dont care for now
1522 	 */
1523 	zone_drain_wait(zone, M_WAITOK);
1524 	/*
1525 	 * Unlink all of our kegs.
1526 	 */
1527 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1528 		klink->kl_keg = NULL;
1529 		LIST_REMOVE(klink, kl_link);
1530 		if (klink == &zone->uz_klink)
1531 			continue;
1532 		free(klink, M_TEMP);
1533 	}
1534 	/*
1535 	 * We only destroy kegs from non secondary zones.
1536 	 */
1537 	if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1538 		mtx_lock(&uma_mtx);
1539 		LIST_REMOVE(keg, uk_link);
1540 		mtx_unlock(&uma_mtx);
1541 		zone_free_item(kegs, keg, NULL, SKIP_NONE,
1542 		    ZFREE_STATFREE);
1543 	}
1544 }
1545 
1546 /*
1547  * Traverses every zone in the system and calls a callback
1548  *
1549  * Arguments:
1550  *	zfunc  A pointer to a function which accepts a zone
1551  *		as an argument.
1552  *
1553  * Returns:
1554  *	Nothing
1555  */
1556 static void
1557 zone_foreach(void (*zfunc)(uma_zone_t))
1558 {
1559 	uma_keg_t keg;
1560 	uma_zone_t zone;
1561 
1562 	mtx_lock(&uma_mtx);
1563 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1564 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1565 			zfunc(zone);
1566 	}
1567 	mtx_unlock(&uma_mtx);
1568 }
1569 
1570 /* Public functions */
1571 /* See uma.h */
1572 void
1573 uma_startup(void *bootmem, int boot_pages)
1574 {
1575 	struct uma_zctor_args args;
1576 	uma_slab_t slab;
1577 	u_int slabsize;
1578 	u_int objsize, totsize, wsize;
1579 	int i;
1580 
1581 #ifdef UMA_DEBUG
1582 	printf("Creating uma keg headers zone and keg.\n");
1583 #endif
1584 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1585 
1586 	/*
1587 	 * Figure out the maximum number of items-per-slab we'll have if
1588 	 * we're using the OFFPAGE slab header to track free items, given
1589 	 * all possible object sizes and the maximum desired wastage
1590 	 * (UMA_MAX_WASTE).
1591 	 *
1592 	 * We iterate until we find an object size for
1593 	 * which the calculated wastage in keg_small_init() will be
1594 	 * enough to warrant OFFPAGE.  Since wastedspace versus objsize
1595 	 * is an overall increasing see-saw function, we find the smallest
1596 	 * objsize such that the wastage is always acceptable for objects
1597 	 * with that objsize or smaller.  Since a smaller objsize always
1598 	 * generates a larger possible uma_max_ipers, we use this computed
1599 	 * objsize to calculate the largest ipers possible.  Since the
1600 	 * ipers calculated for OFFPAGE slab headers is always larger than
1601 	 * the ipers initially calculated in keg_small_init(), we use
1602 	 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1603 	 * obtain the maximum ipers possible for offpage slab headers.
1604 	 *
1605 	 * It should be noted that ipers versus objsize is an inversly
1606 	 * proportional function which drops off rather quickly so as
1607 	 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1608 	 * falls into the portion of the inverse relation AFTER the steep
1609 	 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1610 	 *
1611 	 * Note that we have 8-bits (1 byte) to use as a freelist index
1612 	 * inside the actual slab header itself and this is enough to
1613 	 * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
1614 	 * object with offpage slab header would have ipers =
1615 	 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1616 	 * 1 greater than what our byte-integer freelist index can
1617 	 * accomodate, but we know that this situation never occurs as
1618 	 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1619 	 * that we need to go to offpage slab headers.  Or, if we do,
1620 	 * then we trap that condition below and panic in the INVARIANTS case.
1621 	 */
1622 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1623 	totsize = wsize;
1624 	objsize = UMA_SMALLEST_UNIT;
1625 	while (totsize >= wsize) {
1626 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1627 		    (objsize + UMA_FRITM_SZ);
1628 		totsize *= (UMA_FRITM_SZ + objsize);
1629 		objsize++;
1630 	}
1631 	if (objsize > UMA_SMALLEST_UNIT)
1632 		objsize--;
1633 	uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64);
1634 
1635 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1636 	totsize = wsize;
1637 	objsize = UMA_SMALLEST_UNIT;
1638 	while (totsize >= wsize) {
1639 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1640 		    (objsize + UMA_FRITMREF_SZ);
1641 		totsize *= (UMA_FRITMREF_SZ + objsize);
1642 		objsize++;
1643 	}
1644 	if (objsize > UMA_SMALLEST_UNIT)
1645 		objsize--;
1646 	uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64);
1647 
1648 	KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1649 	    ("uma_startup: calculated uma_max_ipers values too large!"));
1650 
1651 #ifdef UMA_DEBUG
1652 	printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1653 	printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1654 	    uma_max_ipers_ref);
1655 #endif
1656 
1657 	/* "manually" create the initial zone */
1658 	args.name = "UMA Kegs";
1659 	args.size = sizeof(struct uma_keg);
1660 	args.ctor = keg_ctor;
1661 	args.dtor = keg_dtor;
1662 	args.uminit = zero_init;
1663 	args.fini = NULL;
1664 	args.keg = &masterkeg;
1665 	args.align = 32 - 1;
1666 	args.flags = UMA_ZFLAG_INTERNAL;
1667 	/* The initial zone has no Per cpu queues so it's smaller */
1668 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1669 
1670 #ifdef UMA_DEBUG
1671 	printf("Filling boot free list.\n");
1672 #endif
1673 	for (i = 0; i < boot_pages; i++) {
1674 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1675 		slab->us_data = (u_int8_t *)slab;
1676 		slab->us_flags = UMA_SLAB_BOOT;
1677 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1678 	}
1679 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1680 
1681 #ifdef UMA_DEBUG
1682 	printf("Creating uma zone headers zone and keg.\n");
1683 #endif
1684 	args.name = "UMA Zones";
1685 	args.size = sizeof(struct uma_zone) +
1686 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1687 	args.ctor = zone_ctor;
1688 	args.dtor = zone_dtor;
1689 	args.uminit = zero_init;
1690 	args.fini = NULL;
1691 	args.keg = NULL;
1692 	args.align = 32 - 1;
1693 	args.flags = UMA_ZFLAG_INTERNAL;
1694 	/* The initial zone has no Per cpu queues so it's smaller */
1695 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1696 
1697 #ifdef UMA_DEBUG
1698 	printf("Initializing pcpu cache locks.\n");
1699 #endif
1700 #ifdef UMA_DEBUG
1701 	printf("Creating slab and hash zones.\n");
1702 #endif
1703 
1704 	/*
1705 	 * This is the max number of free list items we'll have with
1706 	 * offpage slabs.
1707 	 */
1708 	slabsize = uma_max_ipers * UMA_FRITM_SZ;
1709 	slabsize += sizeof(struct uma_slab);
1710 
1711 	/* Now make a zone for slab headers */
1712 	slabzone = uma_zcreate("UMA Slabs",
1713 				slabsize,
1714 				NULL, NULL, NULL, NULL,
1715 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1716 
1717 	/*
1718 	 * We also create a zone for the bigger slabs with reference
1719 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1720 	 */
1721 	slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1722 	slabsize += sizeof(struct uma_slab_refcnt);
1723 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1724 				  slabsize,
1725 				  NULL, NULL, NULL, NULL,
1726 				  UMA_ALIGN_PTR,
1727 				  UMA_ZFLAG_INTERNAL);
1728 
1729 	hashzone = uma_zcreate("UMA Hash",
1730 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1731 	    NULL, NULL, NULL, NULL,
1732 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1733 
1734 	bucket_init();
1735 
1736 #if defined(UMA_MD_SMALL_ALLOC) && !defined(UMA_MD_SMALL_ALLOC_NEEDS_VM)
1737 	booted = 1;
1738 #endif
1739 
1740 #ifdef UMA_DEBUG
1741 	printf("UMA startup complete.\n");
1742 #endif
1743 }
1744 
1745 /* see uma.h */
1746 void
1747 uma_startup2(void)
1748 {
1749 	booted = 1;
1750 	bucket_enable();
1751 #ifdef UMA_DEBUG
1752 	printf("UMA startup2 complete.\n");
1753 #endif
1754 }
1755 
1756 /*
1757  * Initialize our callout handle
1758  *
1759  */
1760 
1761 static void
1762 uma_startup3(void)
1763 {
1764 #ifdef UMA_DEBUG
1765 	printf("Starting callout.\n");
1766 #endif
1767 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1768 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1769 #ifdef UMA_DEBUG
1770 	printf("UMA startup3 complete.\n");
1771 #endif
1772 }
1773 
1774 static uma_keg_t
1775 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1776 		int align, u_int32_t flags)
1777 {
1778 	struct uma_kctor_args args;
1779 
1780 	args.size = size;
1781 	args.uminit = uminit;
1782 	args.fini = fini;
1783 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1784 	args.flags = flags;
1785 	args.zone = zone;
1786 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1787 }
1788 
1789 /* See uma.h */
1790 void
1791 uma_set_align(int align)
1792 {
1793 
1794 	if (align != UMA_ALIGN_CACHE)
1795 		uma_align_cache = align;
1796 }
1797 
1798 /* See uma.h */
1799 uma_zone_t
1800 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1801 		uma_init uminit, uma_fini fini, int align, u_int32_t flags)
1802 
1803 {
1804 	struct uma_zctor_args args;
1805 
1806 	/* This stuff is essential for the zone ctor */
1807 	args.name = name;
1808 	args.size = size;
1809 	args.ctor = ctor;
1810 	args.dtor = dtor;
1811 	args.uminit = uminit;
1812 	args.fini = fini;
1813 	args.align = align;
1814 	args.flags = flags;
1815 	args.keg = NULL;
1816 
1817 	return (zone_alloc_item(zones, &args, M_WAITOK));
1818 }
1819 
1820 /* See uma.h */
1821 uma_zone_t
1822 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1823 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1824 {
1825 	struct uma_zctor_args args;
1826 	uma_keg_t keg;
1827 
1828 	keg = zone_first_keg(master);
1829 	args.name = name;
1830 	args.size = keg->uk_size;
1831 	args.ctor = ctor;
1832 	args.dtor = dtor;
1833 	args.uminit = zinit;
1834 	args.fini = zfini;
1835 	args.align = keg->uk_align;
1836 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1837 	args.keg = keg;
1838 
1839 	/* XXX Attaches only one keg of potentially many. */
1840 	return (zone_alloc_item(zones, &args, M_WAITOK));
1841 }
1842 
1843 static void
1844 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1845 {
1846 	if (a < b) {
1847 		ZONE_LOCK(a);
1848 		mtx_lock_flags(b->uz_lock, MTX_DUPOK);
1849 	} else {
1850 		ZONE_LOCK(b);
1851 		mtx_lock_flags(a->uz_lock, MTX_DUPOK);
1852 	}
1853 }
1854 
1855 static void
1856 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1857 {
1858 
1859 	ZONE_UNLOCK(a);
1860 	ZONE_UNLOCK(b);
1861 }
1862 
1863 int
1864 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1865 {
1866 	uma_klink_t klink;
1867 	uma_klink_t kl;
1868 	int error;
1869 
1870 	error = 0;
1871 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1872 
1873 	zone_lock_pair(zone, master);
1874 	/*
1875 	 * zone must use vtoslab() to resolve objects and must already be
1876 	 * a secondary.
1877 	 */
1878 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1879 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1880 		error = EINVAL;
1881 		goto out;
1882 	}
1883 	/*
1884 	 * The new master must also use vtoslab().
1885 	 */
1886 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1887 		error = EINVAL;
1888 		goto out;
1889 	}
1890 	/*
1891 	 * Both must either be refcnt, or not be refcnt.
1892 	 */
1893 	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
1894 	    (master->uz_flags & UMA_ZONE_REFCNT)) {
1895 		error = EINVAL;
1896 		goto out;
1897 	}
1898 	/*
1899 	 * The underlying object must be the same size.  rsize
1900 	 * may be different.
1901 	 */
1902 	if (master->uz_size != zone->uz_size) {
1903 		error = E2BIG;
1904 		goto out;
1905 	}
1906 	/*
1907 	 * Put it at the end of the list.
1908 	 */
1909 	klink->kl_keg = zone_first_keg(master);
1910 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
1911 		if (LIST_NEXT(kl, kl_link) == NULL) {
1912 			LIST_INSERT_AFTER(kl, klink, kl_link);
1913 			break;
1914 		}
1915 	}
1916 	klink = NULL;
1917 	zone->uz_flags |= UMA_ZFLAG_MULTI;
1918 	zone->uz_slab = zone_fetch_slab_multi;
1919 
1920 out:
1921 	zone_unlock_pair(zone, master);
1922 	if (klink != NULL)
1923 		free(klink, M_TEMP);
1924 
1925 	return (error);
1926 }
1927 
1928 
1929 /* See uma.h */
1930 void
1931 uma_zdestroy(uma_zone_t zone)
1932 {
1933 
1934 	zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1935 }
1936 
1937 /* See uma.h */
1938 void *
1939 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1940 {
1941 	void *item;
1942 	uma_cache_t cache;
1943 	uma_bucket_t bucket;
1944 	int cpu;
1945 
1946 	/* This is the fast path allocation */
1947 #ifdef UMA_DEBUG_ALLOC_1
1948 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1949 #endif
1950 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1951 	    zone->uz_name, flags);
1952 
1953 	if (flags & M_WAITOK) {
1954 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1955 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
1956 	}
1957 
1958 	/*
1959 	 * If possible, allocate from the per-CPU cache.  There are two
1960 	 * requirements for safe access to the per-CPU cache: (1) the thread
1961 	 * accessing the cache must not be preempted or yield during access,
1962 	 * and (2) the thread must not migrate CPUs without switching which
1963 	 * cache it accesses.  We rely on a critical section to prevent
1964 	 * preemption and migration.  We release the critical section in
1965 	 * order to acquire the zone mutex if we are unable to allocate from
1966 	 * the current cache; when we re-acquire the critical section, we
1967 	 * must detect and handle migration if it has occurred.
1968 	 */
1969 zalloc_restart:
1970 	critical_enter();
1971 	cpu = curcpu;
1972 	cache = &zone->uz_cpu[cpu];
1973 
1974 zalloc_start:
1975 	bucket = cache->uc_allocbucket;
1976 
1977 	if (bucket) {
1978 		if (bucket->ub_cnt > 0) {
1979 			bucket->ub_cnt--;
1980 			item = bucket->ub_bucket[bucket->ub_cnt];
1981 #ifdef INVARIANTS
1982 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
1983 #endif
1984 			KASSERT(item != NULL,
1985 			    ("uma_zalloc: Bucket pointer mangled."));
1986 			cache->uc_allocs++;
1987 			critical_exit();
1988 #ifdef INVARIANTS
1989 			ZONE_LOCK(zone);
1990 			uma_dbg_alloc(zone, NULL, item);
1991 			ZONE_UNLOCK(zone);
1992 #endif
1993 			if (zone->uz_ctor != NULL) {
1994 				if (zone->uz_ctor(item, zone->uz_size,
1995 				    udata, flags) != 0) {
1996 					zone_free_item(zone, item, udata,
1997 					    SKIP_DTOR, ZFREE_STATFAIL |
1998 					    ZFREE_STATFREE);
1999 					return (NULL);
2000 				}
2001 			}
2002 			if (flags & M_ZERO)
2003 				bzero(item, zone->uz_size);
2004 			return (item);
2005 		} else if (cache->uc_freebucket) {
2006 			/*
2007 			 * We have run out of items in our allocbucket.
2008 			 * See if we can switch with our free bucket.
2009 			 */
2010 			if (cache->uc_freebucket->ub_cnt > 0) {
2011 #ifdef UMA_DEBUG_ALLOC
2012 				printf("uma_zalloc: Swapping empty with"
2013 				    " alloc.\n");
2014 #endif
2015 				bucket = cache->uc_freebucket;
2016 				cache->uc_freebucket = cache->uc_allocbucket;
2017 				cache->uc_allocbucket = bucket;
2018 
2019 				goto zalloc_start;
2020 			}
2021 		}
2022 	}
2023 	/*
2024 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2025 	 * we must go back to the zone.  This requires the zone lock, so we
2026 	 * must drop the critical section, then re-acquire it when we go back
2027 	 * to the cache.  Since the critical section is released, we may be
2028 	 * preempted or migrate.  As such, make sure not to maintain any
2029 	 * thread-local state specific to the cache from prior to releasing
2030 	 * the critical section.
2031 	 */
2032 	critical_exit();
2033 	ZONE_LOCK(zone);
2034 	critical_enter();
2035 	cpu = curcpu;
2036 	cache = &zone->uz_cpu[cpu];
2037 	bucket = cache->uc_allocbucket;
2038 	if (bucket != NULL) {
2039 		if (bucket->ub_cnt > 0) {
2040 			ZONE_UNLOCK(zone);
2041 			goto zalloc_start;
2042 		}
2043 		bucket = cache->uc_freebucket;
2044 		if (bucket != NULL && bucket->ub_cnt > 0) {
2045 			ZONE_UNLOCK(zone);
2046 			goto zalloc_start;
2047 		}
2048 	}
2049 
2050 	/* Since we have locked the zone we may as well send back our stats */
2051 	zone->uz_allocs += cache->uc_allocs;
2052 	cache->uc_allocs = 0;
2053 	zone->uz_frees += cache->uc_frees;
2054 	cache->uc_frees = 0;
2055 
2056 	/* Our old one is now a free bucket */
2057 	if (cache->uc_allocbucket) {
2058 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
2059 		    ("uma_zalloc_arg: Freeing a non free bucket."));
2060 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
2061 		    cache->uc_allocbucket, ub_link);
2062 		cache->uc_allocbucket = NULL;
2063 	}
2064 
2065 	/* Check the free list for a new alloc bucket */
2066 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
2067 		KASSERT(bucket->ub_cnt != 0,
2068 		    ("uma_zalloc_arg: Returning an empty bucket."));
2069 
2070 		LIST_REMOVE(bucket, ub_link);
2071 		cache->uc_allocbucket = bucket;
2072 		ZONE_UNLOCK(zone);
2073 		goto zalloc_start;
2074 	}
2075 	/* We are no longer associated with this CPU. */
2076 	critical_exit();
2077 
2078 	/* Bump up our uz_count so we get here less */
2079 	if (zone->uz_count < BUCKET_MAX)
2080 		zone->uz_count++;
2081 
2082 	/*
2083 	 * Now lets just fill a bucket and put it on the free list.  If that
2084 	 * works we'll restart the allocation from the begining.
2085 	 */
2086 	if (zone_alloc_bucket(zone, flags)) {
2087 		ZONE_UNLOCK(zone);
2088 		goto zalloc_restart;
2089 	}
2090 	ZONE_UNLOCK(zone);
2091 	/*
2092 	 * We may not be able to get a bucket so return an actual item.
2093 	 */
2094 #ifdef UMA_DEBUG
2095 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2096 #endif
2097 
2098 	item = zone_alloc_item(zone, udata, flags);
2099 	return (item);
2100 }
2101 
2102 static uma_slab_t
2103 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2104 {
2105 	uma_slab_t slab;
2106 
2107 	mtx_assert(&keg->uk_lock, MA_OWNED);
2108 	slab = NULL;
2109 
2110 	for (;;) {
2111 		/*
2112 		 * Find a slab with some space.  Prefer slabs that are partially
2113 		 * used over those that are totally full.  This helps to reduce
2114 		 * fragmentation.
2115 		 */
2116 		if (keg->uk_free != 0) {
2117 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2118 				slab = LIST_FIRST(&keg->uk_part_slab);
2119 			} else {
2120 				slab = LIST_FIRST(&keg->uk_free_slab);
2121 				LIST_REMOVE(slab, us_link);
2122 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2123 				    us_link);
2124 			}
2125 			MPASS(slab->us_keg == keg);
2126 			return (slab);
2127 		}
2128 
2129 		/*
2130 		 * M_NOVM means don't ask at all!
2131 		 */
2132 		if (flags & M_NOVM)
2133 			break;
2134 
2135 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2136 			keg->uk_flags |= UMA_ZFLAG_FULL;
2137 			/*
2138 			 * If this is not a multi-zone, set the FULL bit.
2139 			 * Otherwise slab_multi() takes care of it.
2140 			 */
2141 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0)
2142 				zone->uz_flags |= UMA_ZFLAG_FULL;
2143 			if (flags & M_NOWAIT)
2144 				break;
2145 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2146 			continue;
2147 		}
2148 		keg->uk_recurse++;
2149 		slab = keg_alloc_slab(keg, zone, flags);
2150 		keg->uk_recurse--;
2151 		/*
2152 		 * If we got a slab here it's safe to mark it partially used
2153 		 * and return.  We assume that the caller is going to remove
2154 		 * at least one item.
2155 		 */
2156 		if (slab) {
2157 			MPASS(slab->us_keg == keg);
2158 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2159 			return (slab);
2160 		}
2161 		/*
2162 		 * We might not have been able to get a slab but another cpu
2163 		 * could have while we were unlocked.  Check again before we
2164 		 * fail.
2165 		 */
2166 		flags |= M_NOVM;
2167 	}
2168 	return (slab);
2169 }
2170 
2171 static inline void
2172 zone_relock(uma_zone_t zone, uma_keg_t keg)
2173 {
2174 	if (zone->uz_lock != &keg->uk_lock) {
2175 		KEG_UNLOCK(keg);
2176 		ZONE_LOCK(zone);
2177 	}
2178 }
2179 
2180 static inline void
2181 keg_relock(uma_keg_t keg, uma_zone_t zone)
2182 {
2183 	if (zone->uz_lock != &keg->uk_lock) {
2184 		ZONE_UNLOCK(zone);
2185 		KEG_LOCK(keg);
2186 	}
2187 }
2188 
2189 static uma_slab_t
2190 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2191 {
2192 	uma_slab_t slab;
2193 
2194 	if (keg == NULL)
2195 		keg = zone_first_keg(zone);
2196 	/*
2197 	 * This is to prevent us from recursively trying to allocate
2198 	 * buckets.  The problem is that if an allocation forces us to
2199 	 * grab a new bucket we will call page_alloc, which will go off
2200 	 * and cause the vm to allocate vm_map_entries.  If we need new
2201 	 * buckets there too we will recurse in kmem_alloc and bad
2202 	 * things happen.  So instead we return a NULL bucket, and make
2203 	 * the code that allocates buckets smart enough to deal with it
2204 	 */
2205 	if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0)
2206 		return (NULL);
2207 
2208 	for (;;) {
2209 		slab = keg_fetch_slab(keg, zone, flags);
2210 		if (slab)
2211 			return (slab);
2212 		if (flags & (M_NOWAIT | M_NOVM))
2213 			break;
2214 	}
2215 	return (NULL);
2216 }
2217 
2218 /*
2219  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2220  * with the keg locked.  Caller must call zone_relock() afterwards if the
2221  * zone lock is required.  On NULL the zone lock is held.
2222  *
2223  * The last pointer is used to seed the search.  It is not required.
2224  */
2225 static uma_slab_t
2226 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2227 {
2228 	uma_klink_t klink;
2229 	uma_slab_t slab;
2230 	uma_keg_t keg;
2231 	int flags;
2232 	int empty;
2233 	int full;
2234 
2235 	/*
2236 	 * Don't wait on the first pass.  This will skip limit tests
2237 	 * as well.  We don't want to block if we can find a provider
2238 	 * without blocking.
2239 	 */
2240 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2241 	/*
2242 	 * Use the last slab allocated as a hint for where to start
2243 	 * the search.
2244 	 */
2245 	if (last) {
2246 		slab = keg_fetch_slab(last, zone, flags);
2247 		if (slab)
2248 			return (slab);
2249 		zone_relock(zone, last);
2250 		last = NULL;
2251 	}
2252 	/*
2253 	 * Loop until we have a slab incase of transient failures
2254 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2255 	 * required but we've done it for so long now.
2256 	 */
2257 	for (;;) {
2258 		empty = 0;
2259 		full = 0;
2260 		/*
2261 		 * Search the available kegs for slabs.  Be careful to hold the
2262 		 * correct lock while calling into the keg layer.
2263 		 */
2264 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2265 			keg = klink->kl_keg;
2266 			keg_relock(keg, zone);
2267 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2268 				slab = keg_fetch_slab(keg, zone, flags);
2269 				if (slab)
2270 					return (slab);
2271 			}
2272 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2273 				full++;
2274 			else
2275 				empty++;
2276 			zone_relock(zone, keg);
2277 		}
2278 		if (rflags & (M_NOWAIT | M_NOVM))
2279 			break;
2280 		flags = rflags;
2281 		/*
2282 		 * All kegs are full.  XXX We can't atomically check all kegs
2283 		 * and sleep so just sleep for a short period and retry.
2284 		 */
2285 		if (full && !empty) {
2286 			zone->uz_flags |= UMA_ZFLAG_FULL;
2287 			zone->uz_sleeps++;
2288 			msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
2289 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2290 			continue;
2291 		}
2292 	}
2293 	return (NULL);
2294 }
2295 
2296 static void *
2297 slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
2298 {
2299 	uma_keg_t keg;
2300 	uma_slabrefcnt_t slabref;
2301 	void *item;
2302 	u_int8_t freei;
2303 
2304 	keg = slab->us_keg;
2305 	mtx_assert(&keg->uk_lock, MA_OWNED);
2306 
2307 	freei = slab->us_firstfree;
2308 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
2309 		slabref = (uma_slabrefcnt_t)slab;
2310 		slab->us_firstfree = slabref->us_freelist[freei].us_item;
2311 	} else {
2312 		slab->us_firstfree = slab->us_freelist[freei].us_item;
2313 	}
2314 	item = slab->us_data + (keg->uk_rsize * freei);
2315 
2316 	slab->us_freecount--;
2317 	keg->uk_free--;
2318 #ifdef INVARIANTS
2319 	uma_dbg_alloc(zone, slab, item);
2320 #endif
2321 	/* Move this slab to the full list */
2322 	if (slab->us_freecount == 0) {
2323 		LIST_REMOVE(slab, us_link);
2324 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2325 	}
2326 
2327 	return (item);
2328 }
2329 
2330 static int
2331 zone_alloc_bucket(uma_zone_t zone, int flags)
2332 {
2333 	uma_bucket_t bucket;
2334 	uma_slab_t slab;
2335 	uma_keg_t keg;
2336 	int16_t saved;
2337 	int max, origflags = flags;
2338 
2339 	/*
2340 	 * Try this zone's free list first so we don't allocate extra buckets.
2341 	 */
2342 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2343 		KASSERT(bucket->ub_cnt == 0,
2344 		    ("zone_alloc_bucket: Bucket on free list is not empty."));
2345 		LIST_REMOVE(bucket, ub_link);
2346 	} else {
2347 		int bflags;
2348 
2349 		bflags = (flags & ~M_ZERO);
2350 		if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2351 			bflags |= M_NOVM;
2352 
2353 		ZONE_UNLOCK(zone);
2354 		bucket = bucket_alloc(zone->uz_count, bflags);
2355 		ZONE_LOCK(zone);
2356 	}
2357 
2358 	if (bucket == NULL) {
2359 		return (0);
2360 	}
2361 
2362 #ifdef SMP
2363 	/*
2364 	 * This code is here to limit the number of simultaneous bucket fills
2365 	 * for any given zone to the number of per cpu caches in this zone. This
2366 	 * is done so that we don't allocate more memory than we really need.
2367 	 */
2368 	if (zone->uz_fills >= mp_ncpus)
2369 		goto done;
2370 
2371 #endif
2372 	zone->uz_fills++;
2373 
2374 	max = MIN(bucket->ub_entries, zone->uz_count);
2375 	/* Try to keep the buckets totally full */
2376 	saved = bucket->ub_cnt;
2377 	slab = NULL;
2378 	keg = NULL;
2379 	while (bucket->ub_cnt < max &&
2380 	    (slab = zone->uz_slab(zone, keg, flags)) != NULL) {
2381 		keg = slab->us_keg;
2382 		while (slab->us_freecount && bucket->ub_cnt < max) {
2383 			bucket->ub_bucket[bucket->ub_cnt++] =
2384 			    slab_alloc_item(zone, slab);
2385 		}
2386 
2387 		/* Don't block on the next fill */
2388 		flags |= M_NOWAIT;
2389 	}
2390 	if (slab)
2391 		zone_relock(zone, keg);
2392 
2393 	/*
2394 	 * We unlock here because we need to call the zone's init.
2395 	 * It should be safe to unlock because the slab dealt with
2396 	 * above is already on the appropriate list within the keg
2397 	 * and the bucket we filled is not yet on any list, so we
2398 	 * own it.
2399 	 */
2400 	if (zone->uz_init != NULL) {
2401 		int i;
2402 
2403 		ZONE_UNLOCK(zone);
2404 		for (i = saved; i < bucket->ub_cnt; i++)
2405 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2406 			    origflags) != 0)
2407 				break;
2408 		/*
2409 		 * If we couldn't initialize the whole bucket, put the
2410 		 * rest back onto the freelist.
2411 		 */
2412 		if (i != bucket->ub_cnt) {
2413 			int j;
2414 
2415 			for (j = i; j < bucket->ub_cnt; j++) {
2416 				zone_free_item(zone, bucket->ub_bucket[j],
2417 				    NULL, SKIP_FINI, 0);
2418 #ifdef INVARIANTS
2419 				bucket->ub_bucket[j] = NULL;
2420 #endif
2421 			}
2422 			bucket->ub_cnt = i;
2423 		}
2424 		ZONE_LOCK(zone);
2425 	}
2426 
2427 	zone->uz_fills--;
2428 	if (bucket->ub_cnt != 0) {
2429 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2430 		    bucket, ub_link);
2431 		return (1);
2432 	}
2433 #ifdef SMP
2434 done:
2435 #endif
2436 	bucket_free(bucket);
2437 
2438 	return (0);
2439 }
2440 /*
2441  * Allocates an item for an internal zone
2442  *
2443  * Arguments
2444  *	zone   The zone to alloc for.
2445  *	udata  The data to be passed to the constructor.
2446  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2447  *
2448  * Returns
2449  *	NULL if there is no memory and M_NOWAIT is set
2450  *	An item if successful
2451  */
2452 
2453 static void *
2454 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2455 {
2456 	uma_slab_t slab;
2457 	void *item;
2458 
2459 	item = NULL;
2460 
2461 #ifdef UMA_DEBUG_ALLOC
2462 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2463 #endif
2464 	ZONE_LOCK(zone);
2465 
2466 	slab = zone->uz_slab(zone, NULL, flags);
2467 	if (slab == NULL) {
2468 		zone->uz_fails++;
2469 		ZONE_UNLOCK(zone);
2470 		return (NULL);
2471 	}
2472 
2473 	item = slab_alloc_item(zone, slab);
2474 
2475 	zone_relock(zone, slab->us_keg);
2476 	zone->uz_allocs++;
2477 	ZONE_UNLOCK(zone);
2478 
2479 	/*
2480 	 * We have to call both the zone's init (not the keg's init)
2481 	 * and the zone's ctor.  This is because the item is going from
2482 	 * a keg slab directly to the user, and the user is expecting it
2483 	 * to be both zone-init'd as well as zone-ctor'd.
2484 	 */
2485 	if (zone->uz_init != NULL) {
2486 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2487 			zone_free_item(zone, item, udata, SKIP_FINI,
2488 			    ZFREE_STATFAIL | ZFREE_STATFREE);
2489 			return (NULL);
2490 		}
2491 	}
2492 	if (zone->uz_ctor != NULL) {
2493 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2494 			zone_free_item(zone, item, udata, SKIP_DTOR,
2495 			    ZFREE_STATFAIL | ZFREE_STATFREE);
2496 			return (NULL);
2497 		}
2498 	}
2499 	if (flags & M_ZERO)
2500 		bzero(item, zone->uz_size);
2501 
2502 	return (item);
2503 }
2504 
2505 /* See uma.h */
2506 void
2507 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2508 {
2509 	uma_cache_t cache;
2510 	uma_bucket_t bucket;
2511 	int bflags;
2512 	int cpu;
2513 
2514 #ifdef UMA_DEBUG_ALLOC_1
2515 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2516 #endif
2517 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2518 	    zone->uz_name);
2519 
2520 	if (zone->uz_dtor)
2521 		zone->uz_dtor(item, zone->uz_size, udata);
2522 
2523 #ifdef INVARIANTS
2524 	ZONE_LOCK(zone);
2525 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2526 		uma_dbg_free(zone, udata, item);
2527 	else
2528 		uma_dbg_free(zone, NULL, item);
2529 	ZONE_UNLOCK(zone);
2530 #endif
2531 	/*
2532 	 * The race here is acceptable.  If we miss it we'll just have to wait
2533 	 * a little longer for the limits to be reset.
2534 	 */
2535 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2536 		goto zfree_internal;
2537 
2538 	/*
2539 	 * If possible, free to the per-CPU cache.  There are two
2540 	 * requirements for safe access to the per-CPU cache: (1) the thread
2541 	 * accessing the cache must not be preempted or yield during access,
2542 	 * and (2) the thread must not migrate CPUs without switching which
2543 	 * cache it accesses.  We rely on a critical section to prevent
2544 	 * preemption and migration.  We release the critical section in
2545 	 * order to acquire the zone mutex if we are unable to free to the
2546 	 * current cache; when we re-acquire the critical section, we must
2547 	 * detect and handle migration if it has occurred.
2548 	 */
2549 zfree_restart:
2550 	critical_enter();
2551 	cpu = curcpu;
2552 	cache = &zone->uz_cpu[cpu];
2553 
2554 zfree_start:
2555 	bucket = cache->uc_freebucket;
2556 
2557 	if (bucket) {
2558 		/*
2559 		 * Do we have room in our bucket? It is OK for this uz count
2560 		 * check to be slightly out of sync.
2561 		 */
2562 
2563 		if (bucket->ub_cnt < bucket->ub_entries) {
2564 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2565 			    ("uma_zfree: Freeing to non free bucket index."));
2566 			bucket->ub_bucket[bucket->ub_cnt] = item;
2567 			bucket->ub_cnt++;
2568 			cache->uc_frees++;
2569 			critical_exit();
2570 			return;
2571 		} else if (cache->uc_allocbucket) {
2572 #ifdef UMA_DEBUG_ALLOC
2573 			printf("uma_zfree: Swapping buckets.\n");
2574 #endif
2575 			/*
2576 			 * We have run out of space in our freebucket.
2577 			 * See if we can switch with our alloc bucket.
2578 			 */
2579 			if (cache->uc_allocbucket->ub_cnt <
2580 			    cache->uc_freebucket->ub_cnt) {
2581 				bucket = cache->uc_freebucket;
2582 				cache->uc_freebucket = cache->uc_allocbucket;
2583 				cache->uc_allocbucket = bucket;
2584 				goto zfree_start;
2585 			}
2586 		}
2587 	}
2588 	/*
2589 	 * We can get here for two reasons:
2590 	 *
2591 	 * 1) The buckets are NULL
2592 	 * 2) The alloc and free buckets are both somewhat full.
2593 	 *
2594 	 * We must go back the zone, which requires acquiring the zone lock,
2595 	 * which in turn means we must release and re-acquire the critical
2596 	 * section.  Since the critical section is released, we may be
2597 	 * preempted or migrate.  As such, make sure not to maintain any
2598 	 * thread-local state specific to the cache from prior to releasing
2599 	 * the critical section.
2600 	 */
2601 	critical_exit();
2602 	ZONE_LOCK(zone);
2603 	critical_enter();
2604 	cpu = curcpu;
2605 	cache = &zone->uz_cpu[cpu];
2606 	if (cache->uc_freebucket != NULL) {
2607 		if (cache->uc_freebucket->ub_cnt <
2608 		    cache->uc_freebucket->ub_entries) {
2609 			ZONE_UNLOCK(zone);
2610 			goto zfree_start;
2611 		}
2612 		if (cache->uc_allocbucket != NULL &&
2613 		    (cache->uc_allocbucket->ub_cnt <
2614 		    cache->uc_freebucket->ub_cnt)) {
2615 			ZONE_UNLOCK(zone);
2616 			goto zfree_start;
2617 		}
2618 	}
2619 
2620 	/* Since we have locked the zone we may as well send back our stats */
2621 	zone->uz_allocs += cache->uc_allocs;
2622 	cache->uc_allocs = 0;
2623 	zone->uz_frees += cache->uc_frees;
2624 	cache->uc_frees = 0;
2625 
2626 	bucket = cache->uc_freebucket;
2627 	cache->uc_freebucket = NULL;
2628 
2629 	/* Can we throw this on the zone full list? */
2630 	if (bucket != NULL) {
2631 #ifdef UMA_DEBUG_ALLOC
2632 		printf("uma_zfree: Putting old bucket on the free list.\n");
2633 #endif
2634 		/* ub_cnt is pointing to the last free item */
2635 		KASSERT(bucket->ub_cnt != 0,
2636 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2637 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2638 		    bucket, ub_link);
2639 	}
2640 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2641 		LIST_REMOVE(bucket, ub_link);
2642 		ZONE_UNLOCK(zone);
2643 		cache->uc_freebucket = bucket;
2644 		goto zfree_start;
2645 	}
2646 	/* We are no longer associated with this CPU. */
2647 	critical_exit();
2648 
2649 	/* And the zone.. */
2650 	ZONE_UNLOCK(zone);
2651 
2652 #ifdef UMA_DEBUG_ALLOC
2653 	printf("uma_zfree: Allocating new free bucket.\n");
2654 #endif
2655 	bflags = M_NOWAIT;
2656 
2657 	if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
2658 		bflags |= M_NOVM;
2659 	bucket = bucket_alloc(zone->uz_count, bflags);
2660 	if (bucket) {
2661 		ZONE_LOCK(zone);
2662 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
2663 		    bucket, ub_link);
2664 		ZONE_UNLOCK(zone);
2665 		goto zfree_restart;
2666 	}
2667 
2668 	/*
2669 	 * If nothing else caught this, we'll just do an internal free.
2670 	 */
2671 zfree_internal:
2672 	zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE);
2673 
2674 	return;
2675 }
2676 
2677 /*
2678  * Frees an item to an INTERNAL zone or allocates a free bucket
2679  *
2680  * Arguments:
2681  *	zone   The zone to free to
2682  *	item   The item we're freeing
2683  *	udata  User supplied data for the dtor
2684  *	skip   Skip dtors and finis
2685  */
2686 static void
2687 zone_free_item(uma_zone_t zone, void *item, void *udata,
2688     enum zfreeskip skip, int flags)
2689 {
2690 	uma_slab_t slab;
2691 	uma_slabrefcnt_t slabref;
2692 	uma_keg_t keg;
2693 	u_int8_t *mem;
2694 	u_int8_t freei;
2695 	int clearfull;
2696 
2697 	if (skip < SKIP_DTOR && zone->uz_dtor)
2698 		zone->uz_dtor(item, zone->uz_size, udata);
2699 
2700 	if (skip < SKIP_FINI && zone->uz_fini)
2701 		zone->uz_fini(item, zone->uz_size);
2702 
2703 	ZONE_LOCK(zone);
2704 
2705 	if (flags & ZFREE_STATFAIL)
2706 		zone->uz_fails++;
2707 	if (flags & ZFREE_STATFREE)
2708 		zone->uz_frees++;
2709 
2710 	if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2711 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2712 		keg = zone_first_keg(zone); /* Must only be one. */
2713 		if (zone->uz_flags & UMA_ZONE_HASH) {
2714 			slab = hash_sfind(&keg->uk_hash, mem);
2715 		} else {
2716 			mem += keg->uk_pgoff;
2717 			slab = (uma_slab_t)mem;
2718 		}
2719 	} else {
2720 		/* This prevents redundant lookups via free(). */
2721 		if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL)
2722 			slab = (uma_slab_t)udata;
2723 		else
2724 			slab = vtoslab((vm_offset_t)item);
2725 		keg = slab->us_keg;
2726 		keg_relock(keg, zone);
2727 	}
2728 	MPASS(keg == slab->us_keg);
2729 
2730 	/* Do we need to remove from any lists? */
2731 	if (slab->us_freecount+1 == keg->uk_ipers) {
2732 		LIST_REMOVE(slab, us_link);
2733 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2734 	} else if (slab->us_freecount == 0) {
2735 		LIST_REMOVE(slab, us_link);
2736 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2737 	}
2738 
2739 	/* Slab management stuff */
2740 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
2741 		/ keg->uk_rsize;
2742 
2743 #ifdef INVARIANTS
2744 	if (!skip)
2745 		uma_dbg_free(zone, slab, item);
2746 #endif
2747 
2748 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
2749 		slabref = (uma_slabrefcnt_t)slab;
2750 		slabref->us_freelist[freei].us_item = slab->us_firstfree;
2751 	} else {
2752 		slab->us_freelist[freei].us_item = slab->us_firstfree;
2753 	}
2754 	slab->us_firstfree = freei;
2755 	slab->us_freecount++;
2756 
2757 	/* Zone statistics */
2758 	keg->uk_free++;
2759 
2760 	clearfull = 0;
2761 	if (keg->uk_flags & UMA_ZFLAG_FULL) {
2762 		if (keg->uk_pages < keg->uk_maxpages) {
2763 			keg->uk_flags &= ~UMA_ZFLAG_FULL;
2764 			clearfull = 1;
2765 		}
2766 
2767 		/*
2768 		 * We can handle one more allocation. Since we're clearing ZFLAG_FULL,
2769 		 * wake up all procs blocked on pages. This should be uncommon, so
2770 		 * keeping this simple for now (rather than adding count of blocked
2771 		 * threads etc).
2772 		 */
2773 		wakeup(keg);
2774 	}
2775 	if (clearfull) {
2776 		zone_relock(zone, keg);
2777 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2778 		wakeup(zone);
2779 		ZONE_UNLOCK(zone);
2780 	} else
2781 		KEG_UNLOCK(keg);
2782 }
2783 
2784 /* See uma.h */
2785 void
2786 uma_zone_set_max(uma_zone_t zone, int nitems)
2787 {
2788 	uma_keg_t keg;
2789 
2790 	ZONE_LOCK(zone);
2791 	keg = zone_first_keg(zone);
2792 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2793 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2794 		keg->uk_maxpages += keg->uk_ppera;
2795 
2796 	ZONE_UNLOCK(zone);
2797 }
2798 
2799 /* See uma.h */
2800 int
2801 uma_zone_get_max(uma_zone_t zone)
2802 {
2803 	int nitems;
2804 	uma_keg_t keg;
2805 
2806 	ZONE_LOCK(zone);
2807 	keg = zone_first_keg(zone);
2808 	if (keg->uk_maxpages)
2809 		nitems = keg->uk_maxpages * keg->uk_ipers;
2810 	else
2811 		nitems = 0;
2812 	ZONE_UNLOCK(zone);
2813 
2814 	return (nitems);
2815 }
2816 
2817 /* See uma.h */
2818 void
2819 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2820 {
2821 	uma_keg_t keg;
2822 
2823 	ZONE_LOCK(zone);
2824 	keg = zone_first_keg(zone);
2825 	KASSERT(keg->uk_pages == 0,
2826 	    ("uma_zone_set_init on non-empty keg"));
2827 	keg->uk_init = uminit;
2828 	ZONE_UNLOCK(zone);
2829 }
2830 
2831 /* See uma.h */
2832 void
2833 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2834 {
2835 	uma_keg_t keg;
2836 
2837 	ZONE_LOCK(zone);
2838 	keg = zone_first_keg(zone);
2839 	KASSERT(keg->uk_pages == 0,
2840 	    ("uma_zone_set_fini on non-empty keg"));
2841 	keg->uk_fini = fini;
2842 	ZONE_UNLOCK(zone);
2843 }
2844 
2845 /* See uma.h */
2846 void
2847 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2848 {
2849 	ZONE_LOCK(zone);
2850 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2851 	    ("uma_zone_set_zinit on non-empty keg"));
2852 	zone->uz_init = zinit;
2853 	ZONE_UNLOCK(zone);
2854 }
2855 
2856 /* See uma.h */
2857 void
2858 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2859 {
2860 	ZONE_LOCK(zone);
2861 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2862 	    ("uma_zone_set_zfini on non-empty keg"));
2863 	zone->uz_fini = zfini;
2864 	ZONE_UNLOCK(zone);
2865 }
2866 
2867 /* See uma.h */
2868 /* XXX uk_freef is not actually used with the zone locked */
2869 void
2870 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2871 {
2872 
2873 	ZONE_LOCK(zone);
2874 	zone_first_keg(zone)->uk_freef = freef;
2875 	ZONE_UNLOCK(zone);
2876 }
2877 
2878 /* See uma.h */
2879 /* XXX uk_allocf is not actually used with the zone locked */
2880 void
2881 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2882 {
2883 	uma_keg_t keg;
2884 
2885 	ZONE_LOCK(zone);
2886 	keg = zone_first_keg(zone);
2887 	keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2888 	keg->uk_allocf = allocf;
2889 	ZONE_UNLOCK(zone);
2890 }
2891 
2892 /* See uma.h */
2893 int
2894 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2895 {
2896 	uma_keg_t keg;
2897 	vm_offset_t kva;
2898 	int pages;
2899 
2900 	keg = zone_first_keg(zone);
2901 	pages = count / keg->uk_ipers;
2902 
2903 	if (pages * keg->uk_ipers < count)
2904 		pages++;
2905 
2906 	kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2907 
2908 	if (kva == 0)
2909 		return (0);
2910 	if (obj == NULL)
2911 		obj = vm_object_allocate(OBJT_PHYS, pages);
2912 	else {
2913 		VM_OBJECT_LOCK_INIT(obj, "uma object");
2914 		_vm_object_allocate(OBJT_PHYS, pages, obj);
2915 	}
2916 	ZONE_LOCK(zone);
2917 	keg->uk_kva = kva;
2918 	keg->uk_obj = obj;
2919 	keg->uk_maxpages = pages;
2920 	keg->uk_allocf = obj_alloc;
2921 	keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2922 	ZONE_UNLOCK(zone);
2923 	return (1);
2924 }
2925 
2926 /* See uma.h */
2927 void
2928 uma_prealloc(uma_zone_t zone, int items)
2929 {
2930 	int slabs;
2931 	uma_slab_t slab;
2932 	uma_keg_t keg;
2933 
2934 	keg = zone_first_keg(zone);
2935 	ZONE_LOCK(zone);
2936 	slabs = items / keg->uk_ipers;
2937 	if (slabs * keg->uk_ipers < items)
2938 		slabs++;
2939 	while (slabs > 0) {
2940 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
2941 		if (slab == NULL)
2942 			break;
2943 		MPASS(slab->us_keg == keg);
2944 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2945 		slabs--;
2946 	}
2947 	ZONE_UNLOCK(zone);
2948 }
2949 
2950 /* See uma.h */
2951 u_int32_t *
2952 uma_find_refcnt(uma_zone_t zone, void *item)
2953 {
2954 	uma_slabrefcnt_t slabref;
2955 	uma_keg_t keg;
2956 	u_int32_t *refcnt;
2957 	int idx;
2958 
2959 	slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
2960 	    (~UMA_SLAB_MASK));
2961 	keg = slabref->us_keg;
2962 	KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
2963 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2964 	idx = ((unsigned long)item - (unsigned long)slabref->us_data)
2965 	    / keg->uk_rsize;
2966 	refcnt = &slabref->us_freelist[idx].us_refcnt;
2967 	return refcnt;
2968 }
2969 
2970 /* See uma.h */
2971 void
2972 uma_reclaim(void)
2973 {
2974 #ifdef UMA_DEBUG
2975 	printf("UMA: vm asked us to release pages!\n");
2976 #endif
2977 	bucket_enable();
2978 	zone_foreach(zone_drain);
2979 	/*
2980 	 * Some slabs may have been freed but this zone will be visited early
2981 	 * we visit again so that we can free pages that are empty once other
2982 	 * zones are drained.  We have to do the same for buckets.
2983 	 */
2984 	zone_drain(slabzone);
2985 	zone_drain(slabrefzone);
2986 	bucket_zone_drain();
2987 }
2988 
2989 /* See uma.h */
2990 int
2991 uma_zone_exhausted(uma_zone_t zone)
2992 {
2993 	int full;
2994 
2995 	ZONE_LOCK(zone);
2996 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
2997 	ZONE_UNLOCK(zone);
2998 	return (full);
2999 }
3000 
3001 int
3002 uma_zone_exhausted_nolock(uma_zone_t zone)
3003 {
3004 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3005 }
3006 
3007 void *
3008 uma_large_malloc(int size, int wait)
3009 {
3010 	void *mem;
3011 	uma_slab_t slab;
3012 	u_int8_t flags;
3013 
3014 	slab = zone_alloc_item(slabzone, NULL, wait);
3015 	if (slab == NULL)
3016 		return (NULL);
3017 	mem = page_alloc(NULL, size, &flags, wait);
3018 	if (mem) {
3019 		vsetslab((vm_offset_t)mem, slab);
3020 		slab->us_data = mem;
3021 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3022 		slab->us_size = size;
3023 	} else {
3024 		zone_free_item(slabzone, slab, NULL, SKIP_NONE,
3025 		    ZFREE_STATFAIL | ZFREE_STATFREE);
3026 	}
3027 
3028 	return (mem);
3029 }
3030 
3031 void
3032 uma_large_free(uma_slab_t slab)
3033 {
3034 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
3035 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3036 	zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
3037 }
3038 
3039 void
3040 uma_print_stats(void)
3041 {
3042 	zone_foreach(uma_print_zone);
3043 }
3044 
3045 static void
3046 slab_print(uma_slab_t slab)
3047 {
3048 	printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
3049 		slab->us_keg, slab->us_data, slab->us_freecount,
3050 		slab->us_firstfree);
3051 }
3052 
3053 static void
3054 cache_print(uma_cache_t cache)
3055 {
3056 	printf("alloc: %p(%d), free: %p(%d)\n",
3057 		cache->uc_allocbucket,
3058 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3059 		cache->uc_freebucket,
3060 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3061 }
3062 
3063 static void
3064 uma_print_keg(uma_keg_t keg)
3065 {
3066 	uma_slab_t slab;
3067 
3068 	printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d "
3069 	    "out %d free %d limit %d\n",
3070 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3071 	    keg->uk_ipers, keg->uk_ppera,
3072 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3073 	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3074 	printf("Part slabs:\n");
3075 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3076 		slab_print(slab);
3077 	printf("Free slabs:\n");
3078 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3079 		slab_print(slab);
3080 	printf("Full slabs:\n");
3081 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3082 		slab_print(slab);
3083 }
3084 
3085 void
3086 uma_print_zone(uma_zone_t zone)
3087 {
3088 	uma_cache_t cache;
3089 	uma_klink_t kl;
3090 	int i;
3091 
3092 	printf("zone: %s(%p) size %d flags %d\n",
3093 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3094 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3095 		uma_print_keg(kl->kl_keg);
3096 	CPU_FOREACH(i) {
3097 		cache = &zone->uz_cpu[i];
3098 		printf("CPU %d Cache:\n", i);
3099 		cache_print(cache);
3100 	}
3101 }
3102 
3103 #ifdef DDB
3104 /*
3105  * Generate statistics across both the zone and its per-cpu cache's.  Return
3106  * desired statistics if the pointer is non-NULL for that statistic.
3107  *
3108  * Note: does not update the zone statistics, as it can't safely clear the
3109  * per-CPU cache statistic.
3110  *
3111  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3112  * safe from off-CPU; we should modify the caches to track this information
3113  * directly so that we don't have to.
3114  */
3115 static void
3116 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
3117     u_int64_t *freesp, u_int64_t *sleepsp)
3118 {
3119 	uma_cache_t cache;
3120 	u_int64_t allocs, frees, sleeps;
3121 	int cachefree, cpu;
3122 
3123 	allocs = frees = sleeps = 0;
3124 	cachefree = 0;
3125 	CPU_FOREACH(cpu) {
3126 		cache = &z->uz_cpu[cpu];
3127 		if (cache->uc_allocbucket != NULL)
3128 			cachefree += cache->uc_allocbucket->ub_cnt;
3129 		if (cache->uc_freebucket != NULL)
3130 			cachefree += cache->uc_freebucket->ub_cnt;
3131 		allocs += cache->uc_allocs;
3132 		frees += cache->uc_frees;
3133 	}
3134 	allocs += z->uz_allocs;
3135 	frees += z->uz_frees;
3136 	sleeps += z->uz_sleeps;
3137 	if (cachefreep != NULL)
3138 		*cachefreep = cachefree;
3139 	if (allocsp != NULL)
3140 		*allocsp = allocs;
3141 	if (freesp != NULL)
3142 		*freesp = frees;
3143 	if (sleepsp != NULL)
3144 		*sleepsp = sleeps;
3145 }
3146 #endif /* DDB */
3147 
3148 static int
3149 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3150 {
3151 	uma_keg_t kz;
3152 	uma_zone_t z;
3153 	int count;
3154 
3155 	count = 0;
3156 	mtx_lock(&uma_mtx);
3157 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3158 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3159 			count++;
3160 	}
3161 	mtx_unlock(&uma_mtx);
3162 	return (sysctl_handle_int(oidp, &count, 0, req));
3163 }
3164 
3165 static int
3166 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3167 {
3168 	struct uma_stream_header ush;
3169 	struct uma_type_header uth;
3170 	struct uma_percpu_stat ups;
3171 	uma_bucket_t bucket;
3172 	struct sbuf sbuf;
3173 	uma_cache_t cache;
3174 	uma_klink_t kl;
3175 	uma_keg_t kz;
3176 	uma_zone_t z;
3177 	uma_keg_t k;
3178 	int count, error, i;
3179 
3180 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3181 
3182 	count = 0;
3183 	mtx_lock(&uma_mtx);
3184 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3185 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3186 			count++;
3187 	}
3188 
3189 	/*
3190 	 * Insert stream header.
3191 	 */
3192 	bzero(&ush, sizeof(ush));
3193 	ush.ush_version = UMA_STREAM_VERSION;
3194 	ush.ush_maxcpus = (mp_maxid + 1);
3195 	ush.ush_count = count;
3196 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3197 
3198 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3199 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3200 			bzero(&uth, sizeof(uth));
3201 			ZONE_LOCK(z);
3202 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3203 			uth.uth_align = kz->uk_align;
3204 			uth.uth_size = kz->uk_size;
3205 			uth.uth_rsize = kz->uk_rsize;
3206 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3207 				k = kl->kl_keg;
3208 				uth.uth_maxpages += k->uk_maxpages;
3209 				uth.uth_pages += k->uk_pages;
3210 				uth.uth_keg_free += k->uk_free;
3211 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3212 				    * k->uk_ipers;
3213 			}
3214 
3215 			/*
3216 			 * A zone is secondary is it is not the first entry
3217 			 * on the keg's zone list.
3218 			 */
3219 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3220 			    (LIST_FIRST(&kz->uk_zones) != z))
3221 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3222 
3223 			LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3224 				uth.uth_zone_free += bucket->ub_cnt;
3225 			uth.uth_allocs = z->uz_allocs;
3226 			uth.uth_frees = z->uz_frees;
3227 			uth.uth_fails = z->uz_fails;
3228 			uth.uth_sleeps = z->uz_sleeps;
3229 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3230 			/*
3231 			 * While it is not normally safe to access the cache
3232 			 * bucket pointers while not on the CPU that owns the
3233 			 * cache, we only allow the pointers to be exchanged
3234 			 * without the zone lock held, not invalidated, so
3235 			 * accept the possible race associated with bucket
3236 			 * exchange during monitoring.
3237 			 */
3238 			for (i = 0; i < (mp_maxid + 1); i++) {
3239 				bzero(&ups, sizeof(ups));
3240 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3241 					goto skip;
3242 				if (CPU_ABSENT(i))
3243 					goto skip;
3244 				cache = &z->uz_cpu[i];
3245 				if (cache->uc_allocbucket != NULL)
3246 					ups.ups_cache_free +=
3247 					    cache->uc_allocbucket->ub_cnt;
3248 				if (cache->uc_freebucket != NULL)
3249 					ups.ups_cache_free +=
3250 					    cache->uc_freebucket->ub_cnt;
3251 				ups.ups_allocs = cache->uc_allocs;
3252 				ups.ups_frees = cache->uc_frees;
3253 skip:
3254 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3255 			}
3256 			ZONE_UNLOCK(z);
3257 		}
3258 	}
3259 	mtx_unlock(&uma_mtx);
3260 	error = sbuf_finish(&sbuf);
3261 	sbuf_delete(&sbuf);
3262 	return (error);
3263 }
3264 
3265 #ifdef DDB
3266 DB_SHOW_COMMAND(uma, db_show_uma)
3267 {
3268 	u_int64_t allocs, frees, sleeps;
3269 	uma_bucket_t bucket;
3270 	uma_keg_t kz;
3271 	uma_zone_t z;
3272 	int cachefree;
3273 
3274 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3275 	    "Requests", "Sleeps");
3276 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3277 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3278 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3279 				allocs = z->uz_allocs;
3280 				frees = z->uz_frees;
3281 				sleeps = z->uz_sleeps;
3282 				cachefree = 0;
3283 			} else
3284 				uma_zone_sumstat(z, &cachefree, &allocs,
3285 				    &frees, &sleeps);
3286 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3287 			    (LIST_FIRST(&kz->uk_zones) != z)))
3288 				cachefree += kz->uk_free;
3289 			LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3290 				cachefree += bucket->ub_cnt;
3291 			db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3292 			    (uintmax_t)kz->uk_size,
3293 			    (intmax_t)(allocs - frees), cachefree,
3294 			    (uintmax_t)allocs, sleeps);
3295 		}
3296 	}
3297 }
3298 #endif
3299