xref: /freebsd/sys/vm/uma_core.c (revision 262e143bd46171a6415a5b28af260a5efa2a3db8)
1 /*-
2  * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2005 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * effecient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59 
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/types.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/sbuf.h>
75 #include <sys/smp.h>
76 #include <sys/vmmeter.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_extern.h>
85 #include <vm/uma.h>
86 #include <vm/uma_int.h>
87 #include <vm/uma_dbg.h>
88 
89 #include <machine/vmparam.h>
90 
91 #include <ddb/ddb.h>
92 
93 /*
94  * This is the zone and keg from which all zones are spawned.  The idea is that
95  * even the zone & keg heads are allocated from the allocator, so we use the
96  * bss section to bootstrap us.
97  */
98 static struct uma_keg masterkeg;
99 static struct uma_zone masterzone_k;
100 static struct uma_zone masterzone_z;
101 static uma_zone_t kegs = &masterzone_k;
102 static uma_zone_t zones = &masterzone_z;
103 
104 /* This is the zone from which all of uma_slab_t's are allocated. */
105 static uma_zone_t slabzone;
106 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
107 
108 /*
109  * The initial hash tables come out of this zone so they can be allocated
110  * prior to malloc coming up.
111  */
112 static uma_zone_t hashzone;
113 
114 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
115 
116 /*
117  * Are we allowed to allocate buckets?
118  */
119 static int bucketdisable = 1;
120 
121 /* Linked list of all kegs in the system */
122 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
123 
124 /* This mutex protects the keg list */
125 static struct mtx uma_mtx;
126 
127 /* Linked list of boot time pages */
128 static LIST_HEAD(,uma_slab) uma_boot_pages =
129     LIST_HEAD_INITIALIZER(&uma_boot_pages);
130 
131 /* This mutex protects the boot time pages list */
132 static struct mtx uma_boot_pages_mtx;
133 
134 /* Is the VM done starting up? */
135 static int booted = 0;
136 
137 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
138 static u_int uma_max_ipers;
139 static u_int uma_max_ipers_ref;
140 
141 /*
142  * This is the handle used to schedule events that need to happen
143  * outside of the allocation fast path.
144  */
145 static struct callout uma_callout;
146 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
147 
148 /*
149  * This structure is passed as the zone ctor arg so that I don't have to create
150  * a special allocation function just for zones.
151  */
152 struct uma_zctor_args {
153 	char *name;
154 	size_t size;
155 	uma_ctor ctor;
156 	uma_dtor dtor;
157 	uma_init uminit;
158 	uma_fini fini;
159 	uma_keg_t keg;
160 	int align;
161 	u_int32_t flags;
162 };
163 
164 struct uma_kctor_args {
165 	uma_zone_t zone;
166 	size_t size;
167 	uma_init uminit;
168 	uma_fini fini;
169 	int align;
170 	u_int32_t flags;
171 };
172 
173 struct uma_bucket_zone {
174 	uma_zone_t	ubz_zone;
175 	char		*ubz_name;
176 	int		ubz_entries;
177 };
178 
179 #define	BUCKET_MAX	128
180 
181 struct uma_bucket_zone bucket_zones[] = {
182 	{ NULL, "16 Bucket", 16 },
183 	{ NULL, "32 Bucket", 32 },
184 	{ NULL, "64 Bucket", 64 },
185 	{ NULL, "128 Bucket", 128 },
186 	{ NULL, NULL, 0}
187 };
188 
189 #define	BUCKET_SHIFT	4
190 #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
191 
192 /*
193  * bucket_size[] maps requested bucket sizes to zones that allocate a bucket
194  * of approximately the right size.
195  */
196 static uint8_t bucket_size[BUCKET_ZONES];
197 
198 /*
199  * Flags and enumerations to be passed to internal functions.
200  */
201 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
202 
203 #define	ZFREE_STATFAIL	0x00000001	/* Update zone failure statistic. */
204 #define	ZFREE_STATFREE	0x00000002	/* Update zone free statistic. */
205 
206 /* Prototypes.. */
207 
208 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
209 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
210 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
211 static void page_free(void *, int, u_int8_t);
212 static uma_slab_t slab_zalloc(uma_zone_t, int);
213 static void cache_drain(uma_zone_t);
214 static void bucket_drain(uma_zone_t, uma_bucket_t);
215 static void bucket_cache_drain(uma_zone_t zone);
216 static int keg_ctor(void *, int, void *, int);
217 static void keg_dtor(void *, int, void *);
218 static int zone_ctor(void *, int, void *, int);
219 static void zone_dtor(void *, int, void *);
220 static int zero_init(void *, int, int);
221 static void zone_small_init(uma_zone_t zone);
222 static void zone_large_init(uma_zone_t zone);
223 static void zone_foreach(void (*zfunc)(uma_zone_t));
224 static void zone_timeout(uma_zone_t zone);
225 static int hash_alloc(struct uma_hash *);
226 static int hash_expand(struct uma_hash *, struct uma_hash *);
227 static void hash_free(struct uma_hash *hash);
228 static void uma_timeout(void *);
229 static void uma_startup3(void);
230 static void *uma_zalloc_internal(uma_zone_t, void *, int);
231 static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip,
232     int);
233 static void bucket_enable(void);
234 static void bucket_init(void);
235 static uma_bucket_t bucket_alloc(int, int);
236 static void bucket_free(uma_bucket_t);
237 static void bucket_zone_drain(void);
238 static int uma_zalloc_bucket(uma_zone_t zone, int flags);
239 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
240 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
241 static void zone_drain(uma_zone_t);
242 static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
243     uma_fini fini, int align, u_int32_t flags);
244 
245 void uma_print_zone(uma_zone_t);
246 void uma_print_stats(void);
247 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
248 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
249 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
250 
251 #ifdef WITNESS
252 static int nosleepwithlocks = 1;
253 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
254     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
255 #else
256 static int nosleepwithlocks = 0;
257 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
258     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
259 #endif
260 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
261     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
262 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
263 
264 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
265     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
266 
267 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
268     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
269 
270 /*
271  * This routine checks to see whether or not it's safe to enable buckets.
272  */
273 
274 static void
275 bucket_enable(void)
276 {
277 	if (cnt.v_free_count < cnt.v_free_min)
278 		bucketdisable = 1;
279 	else
280 		bucketdisable = 0;
281 }
282 
283 /*
284  * Initialize bucket_zones, the array of zones of buckets of various sizes.
285  *
286  * For each zone, calculate the memory required for each bucket, consisting
287  * of the header and an array of pointers.  Initialize bucket_size[] to point
288  * the range of appropriate bucket sizes at the zone.
289  */
290 static void
291 bucket_init(void)
292 {
293 	struct uma_bucket_zone *ubz;
294 	int i;
295 	int j;
296 
297 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
298 		int size;
299 
300 		ubz = &bucket_zones[j];
301 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
302 		size += sizeof(void *) * ubz->ubz_entries;
303 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
304 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
305 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
306 			bucket_size[i >> BUCKET_SHIFT] = j;
307 	}
308 }
309 
310 /*
311  * Given a desired number of entries for a bucket, return the zone from which
312  * to allocate the bucket.
313  */
314 static struct uma_bucket_zone *
315 bucket_zone_lookup(int entries)
316 {
317 	int idx;
318 
319 	idx = howmany(entries, 1 << BUCKET_SHIFT);
320 	return (&bucket_zones[bucket_size[idx]]);
321 }
322 
323 static uma_bucket_t
324 bucket_alloc(int entries, int bflags)
325 {
326 	struct uma_bucket_zone *ubz;
327 	uma_bucket_t bucket;
328 
329 	/*
330 	 * This is to stop us from allocating per cpu buckets while we're
331 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
332 	 * boot pages.  This also prevents us from allocating buckets in
333 	 * low memory situations.
334 	 */
335 	if (bucketdisable)
336 		return (NULL);
337 
338 	ubz = bucket_zone_lookup(entries);
339 	bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
340 	if (bucket) {
341 #ifdef INVARIANTS
342 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
343 #endif
344 		bucket->ub_cnt = 0;
345 		bucket->ub_entries = ubz->ubz_entries;
346 	}
347 
348 	return (bucket);
349 }
350 
351 static void
352 bucket_free(uma_bucket_t bucket)
353 {
354 	struct uma_bucket_zone *ubz;
355 
356 	ubz = bucket_zone_lookup(bucket->ub_entries);
357 	uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE,
358 	    ZFREE_STATFREE);
359 }
360 
361 static void
362 bucket_zone_drain(void)
363 {
364 	struct uma_bucket_zone *ubz;
365 
366 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
367 		zone_drain(ubz->ubz_zone);
368 }
369 
370 
371 /*
372  * Routine called by timeout which is used to fire off some time interval
373  * based calculations.  (stats, hash size, etc.)
374  *
375  * Arguments:
376  *	arg   Unused
377  *
378  * Returns:
379  *	Nothing
380  */
381 static void
382 uma_timeout(void *unused)
383 {
384 	bucket_enable();
385 	zone_foreach(zone_timeout);
386 
387 	/* Reschedule this event */
388 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
389 }
390 
391 /*
392  * Routine to perform timeout driven calculations.  This expands the
393  * hashes and does per cpu statistics aggregation.
394  *
395  *  Arguments:
396  *	zone  The zone to operate on
397  *
398  *  Returns:
399  *	Nothing
400  */
401 static void
402 zone_timeout(uma_zone_t zone)
403 {
404 	uma_keg_t keg;
405 	u_int64_t alloc;
406 
407 	keg = zone->uz_keg;
408 	alloc = 0;
409 
410 	/*
411 	 * Expand the zone hash table.
412 	 *
413 	 * This is done if the number of slabs is larger than the hash size.
414 	 * What I'm trying to do here is completely reduce collisions.  This
415 	 * may be a little aggressive.  Should I allow for two collisions max?
416 	 */
417 	ZONE_LOCK(zone);
418 	if (keg->uk_flags & UMA_ZONE_HASH &&
419 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
420 		struct uma_hash newhash;
421 		struct uma_hash oldhash;
422 		int ret;
423 
424 		/*
425 		 * This is so involved because allocating and freeing
426 		 * while the zone lock is held will lead to deadlock.
427 		 * I have to do everything in stages and check for
428 		 * races.
429 		 */
430 		newhash = keg->uk_hash;
431 		ZONE_UNLOCK(zone);
432 		ret = hash_alloc(&newhash);
433 		ZONE_LOCK(zone);
434 		if (ret) {
435 			if (hash_expand(&keg->uk_hash, &newhash)) {
436 				oldhash = keg->uk_hash;
437 				keg->uk_hash = newhash;
438 			} else
439 				oldhash = newhash;
440 
441 			ZONE_UNLOCK(zone);
442 			hash_free(&oldhash);
443 			ZONE_LOCK(zone);
444 		}
445 	}
446 	ZONE_UNLOCK(zone);
447 }
448 
449 /*
450  * Allocate and zero fill the next sized hash table from the appropriate
451  * backing store.
452  *
453  * Arguments:
454  *	hash  A new hash structure with the old hash size in uh_hashsize
455  *
456  * Returns:
457  *	1 on sucess and 0 on failure.
458  */
459 static int
460 hash_alloc(struct uma_hash *hash)
461 {
462 	int oldsize;
463 	int alloc;
464 
465 	oldsize = hash->uh_hashsize;
466 
467 	/* We're just going to go to a power of two greater */
468 	if (oldsize)  {
469 		hash->uh_hashsize = oldsize * 2;
470 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
471 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
472 		    M_UMAHASH, M_NOWAIT);
473 	} else {
474 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
475 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
476 		    M_WAITOK);
477 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
478 	}
479 	if (hash->uh_slab_hash) {
480 		bzero(hash->uh_slab_hash, alloc);
481 		hash->uh_hashmask = hash->uh_hashsize - 1;
482 		return (1);
483 	}
484 
485 	return (0);
486 }
487 
488 /*
489  * Expands the hash table for HASH zones.  This is done from zone_timeout
490  * to reduce collisions.  This must not be done in the regular allocation
491  * path, otherwise, we can recurse on the vm while allocating pages.
492  *
493  * Arguments:
494  *	oldhash  The hash you want to expand
495  *	newhash  The hash structure for the new table
496  *
497  * Returns:
498  *	Nothing
499  *
500  * Discussion:
501  */
502 static int
503 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
504 {
505 	uma_slab_t slab;
506 	int hval;
507 	int i;
508 
509 	if (!newhash->uh_slab_hash)
510 		return (0);
511 
512 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
513 		return (0);
514 
515 	/*
516 	 * I need to investigate hash algorithms for resizing without a
517 	 * full rehash.
518 	 */
519 
520 	for (i = 0; i < oldhash->uh_hashsize; i++)
521 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
522 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
523 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
524 			hval = UMA_HASH(newhash, slab->us_data);
525 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
526 			    slab, us_hlink);
527 		}
528 
529 	return (1);
530 }
531 
532 /*
533  * Free the hash bucket to the appropriate backing store.
534  *
535  * Arguments:
536  *	slab_hash  The hash bucket we're freeing
537  *	hashsize   The number of entries in that hash bucket
538  *
539  * Returns:
540  *	Nothing
541  */
542 static void
543 hash_free(struct uma_hash *hash)
544 {
545 	if (hash->uh_slab_hash == NULL)
546 		return;
547 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
548 		uma_zfree_internal(hashzone,
549 		    hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE);
550 	else
551 		free(hash->uh_slab_hash, M_UMAHASH);
552 }
553 
554 /*
555  * Frees all outstanding items in a bucket
556  *
557  * Arguments:
558  *	zone   The zone to free to, must be unlocked.
559  *	bucket The free/alloc bucket with items, cpu queue must be locked.
560  *
561  * Returns:
562  *	Nothing
563  */
564 
565 static void
566 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
567 {
568 	uma_slab_t slab;
569 	int mzone;
570 	void *item;
571 
572 	if (bucket == NULL)
573 		return;
574 
575 	slab = NULL;
576 	mzone = 0;
577 
578 	/* We have to lookup the slab again for malloc.. */
579 	if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
580 		mzone = 1;
581 
582 	while (bucket->ub_cnt > 0)  {
583 		bucket->ub_cnt--;
584 		item = bucket->ub_bucket[bucket->ub_cnt];
585 #ifdef INVARIANTS
586 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
587 		KASSERT(item != NULL,
588 		    ("bucket_drain: botched ptr, item is NULL"));
589 #endif
590 		/*
591 		 * This is extremely inefficient.  The slab pointer was passed
592 		 * to uma_zfree_arg, but we lost it because the buckets don't
593 		 * hold them.  This will go away when free() gets a size passed
594 		 * to it.
595 		 */
596 		if (mzone)
597 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
598 		uma_zfree_internal(zone, item, slab, SKIP_DTOR, 0);
599 	}
600 }
601 
602 /*
603  * Drains the per cpu caches for a zone.
604  *
605  * NOTE: This may only be called while the zone is being turn down, and not
606  * during normal operation.  This is necessary in order that we do not have
607  * to migrate CPUs to drain the per-CPU caches.
608  *
609  * Arguments:
610  *	zone     The zone to drain, must be unlocked.
611  *
612  * Returns:
613  *	Nothing
614  */
615 static void
616 cache_drain(uma_zone_t zone)
617 {
618 	uma_cache_t cache;
619 	int cpu;
620 
621 	/*
622 	 * XXX: It is safe to not lock the per-CPU caches, because we're
623 	 * tearing down the zone anyway.  I.e., there will be no further use
624 	 * of the caches at this point.
625 	 *
626 	 * XXX: It would good to be able to assert that the zone is being
627 	 * torn down to prevent improper use of cache_drain().
628 	 *
629 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
630 	 * it is used elsewhere.  Should the tear-down path be made special
631 	 * there in some form?
632 	 */
633 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
634 		if (CPU_ABSENT(cpu))
635 			continue;
636 		cache = &zone->uz_cpu[cpu];
637 		bucket_drain(zone, cache->uc_allocbucket);
638 		bucket_drain(zone, cache->uc_freebucket);
639 		if (cache->uc_allocbucket != NULL)
640 			bucket_free(cache->uc_allocbucket);
641 		if (cache->uc_freebucket != NULL)
642 			bucket_free(cache->uc_freebucket);
643 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
644 	}
645 	ZONE_LOCK(zone);
646 	bucket_cache_drain(zone);
647 	ZONE_UNLOCK(zone);
648 }
649 
650 /*
651  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
652  */
653 static void
654 bucket_cache_drain(uma_zone_t zone)
655 {
656 	uma_bucket_t bucket;
657 
658 	/*
659 	 * Drain the bucket queues and free the buckets, we just keep two per
660 	 * cpu (alloc/free).
661 	 */
662 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
663 		LIST_REMOVE(bucket, ub_link);
664 		ZONE_UNLOCK(zone);
665 		bucket_drain(zone, bucket);
666 		bucket_free(bucket);
667 		ZONE_LOCK(zone);
668 	}
669 
670 	/* Now we do the free queue.. */
671 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
672 		LIST_REMOVE(bucket, ub_link);
673 		bucket_free(bucket);
674 	}
675 }
676 
677 /*
678  * Frees pages from a zone back to the system.  This is done on demand from
679  * the pageout daemon.
680  *
681  * Arguments:
682  *	zone  The zone to free pages from
683  *	 all  Should we drain all items?
684  *
685  * Returns:
686  *	Nothing.
687  */
688 static void
689 zone_drain(uma_zone_t zone)
690 {
691 	struct slabhead freeslabs = { 0 };
692 	uma_keg_t keg;
693 	uma_slab_t slab;
694 	uma_slab_t n;
695 	u_int8_t flags;
696 	u_int8_t *mem;
697 	int i;
698 
699 	keg = zone->uz_keg;
700 
701 	/*
702 	 * We don't want to take pages from statically allocated zones at this
703 	 * time
704 	 */
705 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
706 		return;
707 
708 	ZONE_LOCK(zone);
709 
710 #ifdef UMA_DEBUG
711 	printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
712 #endif
713 	bucket_cache_drain(zone);
714 	if (keg->uk_free == 0)
715 		goto finished;
716 
717 	slab = LIST_FIRST(&keg->uk_free_slab);
718 	while (slab) {
719 		n = LIST_NEXT(slab, us_link);
720 
721 		/* We have no where to free these to */
722 		if (slab->us_flags & UMA_SLAB_BOOT) {
723 			slab = n;
724 			continue;
725 		}
726 
727 		LIST_REMOVE(slab, us_link);
728 		keg->uk_pages -= keg->uk_ppera;
729 		keg->uk_free -= keg->uk_ipers;
730 
731 		if (keg->uk_flags & UMA_ZONE_HASH)
732 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
733 
734 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
735 
736 		slab = n;
737 	}
738 finished:
739 	ZONE_UNLOCK(zone);
740 
741 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
742 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
743 		if (keg->uk_fini)
744 			for (i = 0; i < keg->uk_ipers; i++)
745 				keg->uk_fini(
746 				    slab->us_data + (keg->uk_rsize * i),
747 				    keg->uk_size);
748 		flags = slab->us_flags;
749 		mem = slab->us_data;
750 
751 		if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
752 		    (keg->uk_flags & UMA_ZONE_REFCNT)) {
753 			vm_object_t obj;
754 
755 			if (flags & UMA_SLAB_KMEM)
756 				obj = kmem_object;
757 			else
758 				obj = NULL;
759 			for (i = 0; i < keg->uk_ppera; i++)
760 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
761 				    obj);
762 		}
763 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
764 			uma_zfree_internal(keg->uk_slabzone, slab, NULL,
765 			    SKIP_NONE, ZFREE_STATFREE);
766 #ifdef UMA_DEBUG
767 		printf("%s: Returning %d bytes.\n",
768 		    zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
769 #endif
770 		keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
771 	}
772 }
773 
774 /*
775  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
776  *
777  * Arguments:
778  *	zone  The zone to allocate slabs for
779  *	wait  Shall we wait?
780  *
781  * Returns:
782  *	The slab that was allocated or NULL if there is no memory and the
783  *	caller specified M_NOWAIT.
784  */
785 static uma_slab_t
786 slab_zalloc(uma_zone_t zone, int wait)
787 {
788 	uma_slabrefcnt_t slabref;
789 	uma_slab_t slab;
790 	uma_keg_t keg;
791 	u_int8_t *mem;
792 	u_int8_t flags;
793 	int i;
794 
795 	slab = NULL;
796 	keg = zone->uz_keg;
797 
798 #ifdef UMA_DEBUG
799 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
800 #endif
801 	ZONE_UNLOCK(zone);
802 
803 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
804 		slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
805 		if (slab == NULL) {
806 			ZONE_LOCK(zone);
807 			return NULL;
808 		}
809 	}
810 
811 	/*
812 	 * This reproduces the old vm_zone behavior of zero filling pages the
813 	 * first time they are added to a zone.
814 	 *
815 	 * Malloced items are zeroed in uma_zalloc.
816 	 */
817 
818 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
819 		wait |= M_ZERO;
820 	else
821 		wait &= ~M_ZERO;
822 
823 	mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
824 	    &flags, wait);
825 	if (mem == NULL) {
826 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
827 			uma_zfree_internal(keg->uk_slabzone, slab, NULL,
828 			    SKIP_NONE, ZFREE_STATFREE);
829 		ZONE_LOCK(zone);
830 		return (NULL);
831 	}
832 
833 	/* Point the slab into the allocated memory */
834 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
835 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
836 
837 	if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
838 	    (keg->uk_flags & UMA_ZONE_REFCNT))
839 		for (i = 0; i < keg->uk_ppera; i++)
840 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
841 
842 	slab->us_keg = keg;
843 	slab->us_data = mem;
844 	slab->us_freecount = keg->uk_ipers;
845 	slab->us_firstfree = 0;
846 	slab->us_flags = flags;
847 
848 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
849 		slabref = (uma_slabrefcnt_t)slab;
850 		for (i = 0; i < keg->uk_ipers; i++) {
851 			slabref->us_freelist[i].us_refcnt = 0;
852 			slabref->us_freelist[i].us_item = i+1;
853 		}
854 	} else {
855 		for (i = 0; i < keg->uk_ipers; i++)
856 			slab->us_freelist[i].us_item = i+1;
857 	}
858 
859 	if (keg->uk_init != NULL) {
860 		for (i = 0; i < keg->uk_ipers; i++)
861 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
862 			    keg->uk_size, wait) != 0)
863 				break;
864 		if (i != keg->uk_ipers) {
865 			if (keg->uk_fini != NULL) {
866 				for (i--; i > -1; i--)
867 					keg->uk_fini(slab->us_data +
868 					    (keg->uk_rsize * i),
869 					    keg->uk_size);
870 			}
871 			if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
872 			    (keg->uk_flags & UMA_ZONE_REFCNT)) {
873 				vm_object_t obj;
874 
875 				if (flags & UMA_SLAB_KMEM)
876 					obj = kmem_object;
877 				else
878 					obj = NULL;
879 				for (i = 0; i < keg->uk_ppera; i++)
880 					vsetobj((vm_offset_t)mem +
881 					    (i * PAGE_SIZE), obj);
882 			}
883 			if (keg->uk_flags & UMA_ZONE_OFFPAGE)
884 				uma_zfree_internal(keg->uk_slabzone, slab,
885 				    NULL, SKIP_NONE, ZFREE_STATFREE);
886 			keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
887 			    flags);
888 			ZONE_LOCK(zone);
889 			return (NULL);
890 		}
891 	}
892 	ZONE_LOCK(zone);
893 
894 	if (keg->uk_flags & UMA_ZONE_HASH)
895 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
896 
897 	keg->uk_pages += keg->uk_ppera;
898 	keg->uk_free += keg->uk_ipers;
899 
900 	return (slab);
901 }
902 
903 /*
904  * This function is intended to be used early on in place of page_alloc() so
905  * that we may use the boot time page cache to satisfy allocations before
906  * the VM is ready.
907  */
908 static void *
909 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
910 {
911 	uma_keg_t keg;
912 	uma_slab_t tmps;
913 
914 	keg = zone->uz_keg;
915 
916 	/*
917 	 * Check our small startup cache to see if it has pages remaining.
918 	 */
919 	mtx_lock(&uma_boot_pages_mtx);
920 	if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) {
921 		LIST_REMOVE(tmps, us_link);
922 		mtx_unlock(&uma_boot_pages_mtx);
923 		*pflag = tmps->us_flags;
924 		return (tmps->us_data);
925 	}
926 	mtx_unlock(&uma_boot_pages_mtx);
927 	if (booted == 0)
928 		panic("UMA: Increase vm.boot_pages");
929 	/*
930 	 * Now that we've booted reset these users to their real allocator.
931 	 */
932 #ifdef UMA_MD_SMALL_ALLOC
933 	keg->uk_allocf = uma_small_alloc;
934 #else
935 	keg->uk_allocf = page_alloc;
936 #endif
937 	return keg->uk_allocf(zone, bytes, pflag, wait);
938 }
939 
940 /*
941  * Allocates a number of pages from the system
942  *
943  * Arguments:
944  *	zone  Unused
945  *	bytes  The number of bytes requested
946  *	wait  Shall we wait?
947  *
948  * Returns:
949  *	A pointer to the alloced memory or possibly
950  *	NULL if M_NOWAIT is set.
951  */
952 static void *
953 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
954 {
955 	void *p;	/* Returned page */
956 
957 	*pflag = UMA_SLAB_KMEM;
958 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
959 
960 	return (p);
961 }
962 
963 /*
964  * Allocates a number of pages from within an object
965  *
966  * Arguments:
967  *	zone   Unused
968  *	bytes  The number of bytes requested
969  *	wait   Shall we wait?
970  *
971  * Returns:
972  *	A pointer to the alloced memory or possibly
973  *	NULL if M_NOWAIT is set.
974  */
975 static void *
976 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
977 {
978 	vm_object_t object;
979 	vm_offset_t retkva, zkva;
980 	vm_page_t p;
981 	int pages, startpages;
982 
983 	object = zone->uz_keg->uk_obj;
984 	retkva = 0;
985 
986 	/*
987 	 * This looks a little weird since we're getting one page at a time.
988 	 */
989 	VM_OBJECT_LOCK(object);
990 	p = TAILQ_LAST(&object->memq, pglist);
991 	pages = p != NULL ? p->pindex + 1 : 0;
992 	startpages = pages;
993 	zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
994 	for (; bytes > 0; bytes -= PAGE_SIZE) {
995 		p = vm_page_alloc(object, pages,
996 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
997 		if (p == NULL) {
998 			if (pages != startpages)
999 				pmap_qremove(retkva, pages - startpages);
1000 			while (pages != startpages) {
1001 				pages--;
1002 				p = TAILQ_LAST(&object->memq, pglist);
1003 				vm_page_lock_queues();
1004 				vm_page_unwire(p, 0);
1005 				vm_page_free(p);
1006 				vm_page_unlock_queues();
1007 			}
1008 			retkva = 0;
1009 			goto done;
1010 		}
1011 		pmap_qenter(zkva, &p, 1);
1012 		if (retkva == 0)
1013 			retkva = zkva;
1014 		zkva += PAGE_SIZE;
1015 		pages += 1;
1016 	}
1017 done:
1018 	VM_OBJECT_UNLOCK(object);
1019 	*flags = UMA_SLAB_PRIV;
1020 
1021 	return ((void *)retkva);
1022 }
1023 
1024 /*
1025  * Frees a number of pages to the system
1026  *
1027  * Arguments:
1028  *	mem   A pointer to the memory to be freed
1029  *	size  The size of the memory being freed
1030  *	flags The original p->us_flags field
1031  *
1032  * Returns:
1033  *	Nothing
1034  */
1035 static void
1036 page_free(void *mem, int size, u_int8_t flags)
1037 {
1038 	vm_map_t map;
1039 
1040 	if (flags & UMA_SLAB_KMEM)
1041 		map = kmem_map;
1042 	else
1043 		panic("UMA: page_free used with invalid flags %d\n", flags);
1044 
1045 	kmem_free(map, (vm_offset_t)mem, size);
1046 }
1047 
1048 /*
1049  * Zero fill initializer
1050  *
1051  * Arguments/Returns follow uma_init specifications
1052  */
1053 static int
1054 zero_init(void *mem, int size, int flags)
1055 {
1056 	bzero(mem, size);
1057 	return (0);
1058 }
1059 
1060 /*
1061  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
1062  *
1063  * Arguments
1064  *	zone  The zone we should initialize
1065  *
1066  * Returns
1067  *	Nothing
1068  */
1069 static void
1070 zone_small_init(uma_zone_t zone)
1071 {
1072 	uma_keg_t keg;
1073 	u_int rsize;
1074 	u_int memused;
1075 	u_int wastedspace;
1076 	u_int shsize;
1077 
1078 	keg = zone->uz_keg;
1079 	KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
1080 	rsize = keg->uk_size;
1081 
1082 	if (rsize < UMA_SMALLEST_UNIT)
1083 		rsize = UMA_SMALLEST_UNIT;
1084 	if (rsize & keg->uk_align)
1085 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1086 
1087 	keg->uk_rsize = rsize;
1088 	keg->uk_ppera = 1;
1089 
1090 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1091 		rsize += UMA_FRITMREF_SZ;	/* linkage & refcnt */
1092 		shsize = sizeof(struct uma_slab_refcnt);
1093 	} else {
1094 		rsize += UMA_FRITM_SZ;	/* Account for linkage */
1095 		shsize = sizeof(struct uma_slab);
1096 	}
1097 
1098 	keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1099 	KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
1100 	memused = keg->uk_ipers * rsize + shsize;
1101 	wastedspace = UMA_SLAB_SIZE - memused;
1102 
1103 	/*
1104 	 * We can't do OFFPAGE if we're internal or if we've been
1105 	 * asked to not go to the VM for buckets.  If we do this we
1106 	 * may end up going to the VM (kmem_map) for slabs which we
1107 	 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1108 	 * result of UMA_ZONE_VM, which clearly forbids it.
1109 	 */
1110 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1111 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1112 		return;
1113 
1114 	if ((wastedspace >= UMA_MAX_WASTE) &&
1115 	    (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1116 		keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1117 		KASSERT(keg->uk_ipers <= 255,
1118 		    ("zone_small_init: keg->uk_ipers too high!"));
1119 #ifdef UMA_DEBUG
1120 		printf("UMA decided we need offpage slab headers for "
1121 		    "zone: %s, calculated wastedspace = %d, "
1122 		    "maximum wasted space allowed = %d, "
1123 		    "calculated ipers = %d, "
1124 		    "new wasted space = %d\n", zone->uz_name, wastedspace,
1125 		    UMA_MAX_WASTE, keg->uk_ipers,
1126 		    UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1127 #endif
1128 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1129 		if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1130 			keg->uk_flags |= UMA_ZONE_HASH;
1131 	}
1132 }
1133 
1134 /*
1135  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
1136  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1137  * more complicated.
1138  *
1139  * Arguments
1140  *	zone  The zone we should initialize
1141  *
1142  * Returns
1143  *	Nothing
1144  */
1145 static void
1146 zone_large_init(uma_zone_t zone)
1147 {
1148 	uma_keg_t keg;
1149 	int pages;
1150 
1151 	keg = zone->uz_keg;
1152 
1153 	KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
1154 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1155 	    ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1156 
1157 	pages = keg->uk_size / UMA_SLAB_SIZE;
1158 
1159 	/* Account for remainder */
1160 	if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1161 		pages++;
1162 
1163 	keg->uk_ppera = pages;
1164 	keg->uk_ipers = 1;
1165 
1166 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1167 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1168 		keg->uk_flags |= UMA_ZONE_HASH;
1169 
1170 	keg->uk_rsize = keg->uk_size;
1171 }
1172 
1173 /*
1174  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1175  * the keg onto the global keg list.
1176  *
1177  * Arguments/Returns follow uma_ctor specifications
1178  *	udata  Actually uma_kctor_args
1179  */
1180 static int
1181 keg_ctor(void *mem, int size, void *udata, int flags)
1182 {
1183 	struct uma_kctor_args *arg = udata;
1184 	uma_keg_t keg = mem;
1185 	uma_zone_t zone;
1186 
1187 	bzero(keg, size);
1188 	keg->uk_size = arg->size;
1189 	keg->uk_init = arg->uminit;
1190 	keg->uk_fini = arg->fini;
1191 	keg->uk_align = arg->align;
1192 	keg->uk_free = 0;
1193 	keg->uk_pages = 0;
1194 	keg->uk_flags = arg->flags;
1195 	keg->uk_allocf = page_alloc;
1196 	keg->uk_freef = page_free;
1197 	keg->uk_recurse = 0;
1198 	keg->uk_slabzone = NULL;
1199 
1200 	/*
1201 	 * The master zone is passed to us at keg-creation time.
1202 	 */
1203 	zone = arg->zone;
1204 	zone->uz_keg = keg;
1205 
1206 	if (arg->flags & UMA_ZONE_VM)
1207 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1208 
1209 	if (arg->flags & UMA_ZONE_ZINIT)
1210 		keg->uk_init = zero_init;
1211 
1212 	/*
1213 	 * The +UMA_FRITM_SZ added to uk_size is to account for the
1214 	 * linkage that is added to the size in zone_small_init().  If
1215 	 * we don't account for this here then we may end up in
1216 	 * zone_small_init() with a calculated 'ipers' of 0.
1217 	 */
1218 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1219 		if ((keg->uk_size+UMA_FRITMREF_SZ) >
1220 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1221 			zone_large_init(zone);
1222 		else
1223 			zone_small_init(zone);
1224 	} else {
1225 		if ((keg->uk_size+UMA_FRITM_SZ) >
1226 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1227 			zone_large_init(zone);
1228 		else
1229 			zone_small_init(zone);
1230 	}
1231 
1232 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1233 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1234 			keg->uk_slabzone = slabrefzone;
1235 		else
1236 			keg->uk_slabzone = slabzone;
1237 	}
1238 
1239 	/*
1240 	 * If we haven't booted yet we need allocations to go through the
1241 	 * startup cache until the vm is ready.
1242 	 */
1243 	if (keg->uk_ppera == 1) {
1244 #ifdef UMA_MD_SMALL_ALLOC
1245 		keg->uk_allocf = uma_small_alloc;
1246 		keg->uk_freef = uma_small_free;
1247 #endif
1248 		if (booted == 0)
1249 			keg->uk_allocf = startup_alloc;
1250 	}
1251 
1252 	/*
1253 	 * Initialize keg's lock (shared among zones) through
1254 	 * Master zone
1255 	 */
1256 	zone->uz_lock = &keg->uk_lock;
1257 	if (arg->flags & UMA_ZONE_MTXCLASS)
1258 		ZONE_LOCK_INIT(zone, 1);
1259 	else
1260 		ZONE_LOCK_INIT(zone, 0);
1261 
1262 	/*
1263 	 * If we're putting the slab header in the actual page we need to
1264 	 * figure out where in each page it goes.  This calculates a right
1265 	 * justified offset into the memory on an ALIGN_PTR boundary.
1266 	 */
1267 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1268 		u_int totsize;
1269 
1270 		/* Size of the slab struct and free list */
1271 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1272 			totsize = sizeof(struct uma_slab_refcnt) +
1273 			    keg->uk_ipers * UMA_FRITMREF_SZ;
1274 		else
1275 			totsize = sizeof(struct uma_slab) +
1276 			    keg->uk_ipers * UMA_FRITM_SZ;
1277 
1278 		if (totsize & UMA_ALIGN_PTR)
1279 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1280 			    (UMA_ALIGN_PTR + 1);
1281 		keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1282 
1283 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1284 			totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1285 			    + keg->uk_ipers * UMA_FRITMREF_SZ;
1286 		else
1287 			totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1288 			    + keg->uk_ipers * UMA_FRITM_SZ;
1289 
1290 		/*
1291 		 * The only way the following is possible is if with our
1292 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1293 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1294 		 * mathematically possible for all cases, so we make
1295 		 * sure here anyway.
1296 		 */
1297 		if (totsize > UMA_SLAB_SIZE) {
1298 			printf("zone %s ipers %d rsize %d size %d\n",
1299 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1300 			    keg->uk_size);
1301 			panic("UMA slab won't fit.\n");
1302 		}
1303 	}
1304 
1305 	if (keg->uk_flags & UMA_ZONE_HASH)
1306 		hash_alloc(&keg->uk_hash);
1307 
1308 #ifdef UMA_DEBUG
1309 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1310 	    zone->uz_name, zone,
1311 	    keg->uk_size, keg->uk_ipers,
1312 	    keg->uk_ppera, keg->uk_pgoff);
1313 #endif
1314 
1315 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1316 
1317 	mtx_lock(&uma_mtx);
1318 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1319 	mtx_unlock(&uma_mtx);
1320 	return (0);
1321 }
1322 
1323 /*
1324  * Zone header ctor.  This initializes all fields, locks, etc.
1325  *
1326  * Arguments/Returns follow uma_ctor specifications
1327  *	udata  Actually uma_zctor_args
1328  */
1329 
1330 static int
1331 zone_ctor(void *mem, int size, void *udata, int flags)
1332 {
1333 	struct uma_zctor_args *arg = udata;
1334 	uma_zone_t zone = mem;
1335 	uma_zone_t z;
1336 	uma_keg_t keg;
1337 
1338 	bzero(zone, size);
1339 	zone->uz_name = arg->name;
1340 	zone->uz_ctor = arg->ctor;
1341 	zone->uz_dtor = arg->dtor;
1342 	zone->uz_init = NULL;
1343 	zone->uz_fini = NULL;
1344 	zone->uz_allocs = 0;
1345 	zone->uz_frees = 0;
1346 	zone->uz_fails = 0;
1347 	zone->uz_fills = zone->uz_count = 0;
1348 
1349 	if (arg->flags & UMA_ZONE_SECONDARY) {
1350 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1351 		keg = arg->keg;
1352 		zone->uz_keg = keg;
1353 		zone->uz_init = arg->uminit;
1354 		zone->uz_fini = arg->fini;
1355 		zone->uz_lock = &keg->uk_lock;
1356 		mtx_lock(&uma_mtx);
1357 		ZONE_LOCK(zone);
1358 		keg->uk_flags |= UMA_ZONE_SECONDARY;
1359 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1360 			if (LIST_NEXT(z, uz_link) == NULL) {
1361 				LIST_INSERT_AFTER(z, zone, uz_link);
1362 				break;
1363 			}
1364 		}
1365 		ZONE_UNLOCK(zone);
1366 		mtx_unlock(&uma_mtx);
1367 	} else if (arg->keg == NULL) {
1368 		if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1369 		    arg->align, arg->flags) == NULL)
1370 			return (ENOMEM);
1371 	} else {
1372 		struct uma_kctor_args karg;
1373 		int error;
1374 
1375 		/* We should only be here from uma_startup() */
1376 		karg.size = arg->size;
1377 		karg.uminit = arg->uminit;
1378 		karg.fini = arg->fini;
1379 		karg.align = arg->align;
1380 		karg.flags = arg->flags;
1381 		karg.zone = zone;
1382 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1383 		    flags);
1384 		if (error)
1385 			return (error);
1386 	}
1387 	keg = zone->uz_keg;
1388 	zone->uz_lock = &keg->uk_lock;
1389 
1390 	/*
1391 	 * Some internal zones don't have room allocated for the per cpu
1392 	 * caches.  If we're internal, bail out here.
1393 	 */
1394 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1395 		KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
1396 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1397 		return (0);
1398 	}
1399 
1400 	if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1401 		zone->uz_count = BUCKET_MAX;
1402 	else if (keg->uk_ipers <= BUCKET_MAX)
1403 		zone->uz_count = keg->uk_ipers;
1404 	else
1405 		zone->uz_count = BUCKET_MAX;
1406 	return (0);
1407 }
1408 
1409 /*
1410  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1411  * table and removes the keg from the global list.
1412  *
1413  * Arguments/Returns follow uma_dtor specifications
1414  *	udata  unused
1415  */
1416 static void
1417 keg_dtor(void *arg, int size, void *udata)
1418 {
1419 	uma_keg_t keg;
1420 
1421 	keg = (uma_keg_t)arg;
1422 	mtx_lock(&keg->uk_lock);
1423 	if (keg->uk_free != 0) {
1424 		printf("Freed UMA keg was not empty (%d items). "
1425 		    " Lost %d pages of memory.\n",
1426 		    keg->uk_free, keg->uk_pages);
1427 	}
1428 	mtx_unlock(&keg->uk_lock);
1429 
1430 	if (keg->uk_flags & UMA_ZONE_HASH)
1431 		hash_free(&keg->uk_hash);
1432 
1433 	mtx_destroy(&keg->uk_lock);
1434 }
1435 
1436 /*
1437  * Zone header dtor.
1438  *
1439  * Arguments/Returns follow uma_dtor specifications
1440  *	udata  unused
1441  */
1442 static void
1443 zone_dtor(void *arg, int size, void *udata)
1444 {
1445 	uma_zone_t zone;
1446 	uma_keg_t keg;
1447 
1448 	zone = (uma_zone_t)arg;
1449 	keg = zone->uz_keg;
1450 
1451 	if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
1452 		cache_drain(zone);
1453 
1454 	mtx_lock(&uma_mtx);
1455 	zone_drain(zone);
1456 	if (keg->uk_flags & UMA_ZONE_SECONDARY) {
1457 		LIST_REMOVE(zone, uz_link);
1458 		/*
1459 		 * XXX there are some races here where
1460 		 * the zone can be drained but zone lock
1461 		 * released and then refilled before we
1462 		 * remove it... we dont care for now
1463 		 */
1464 		ZONE_LOCK(zone);
1465 		if (LIST_EMPTY(&keg->uk_zones))
1466 			keg->uk_flags &= ~UMA_ZONE_SECONDARY;
1467 		ZONE_UNLOCK(zone);
1468 		mtx_unlock(&uma_mtx);
1469 	} else {
1470 		LIST_REMOVE(keg, uk_link);
1471 		LIST_REMOVE(zone, uz_link);
1472 		mtx_unlock(&uma_mtx);
1473 		uma_zfree_internal(kegs, keg, NULL, SKIP_NONE,
1474 		    ZFREE_STATFREE);
1475 	}
1476 	zone->uz_keg = NULL;
1477 }
1478 
1479 /*
1480  * Traverses every zone in the system and calls a callback
1481  *
1482  * Arguments:
1483  *	zfunc  A pointer to a function which accepts a zone
1484  *		as an argument.
1485  *
1486  * Returns:
1487  *	Nothing
1488  */
1489 static void
1490 zone_foreach(void (*zfunc)(uma_zone_t))
1491 {
1492 	uma_keg_t keg;
1493 	uma_zone_t zone;
1494 
1495 	mtx_lock(&uma_mtx);
1496 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1497 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1498 			zfunc(zone);
1499 	}
1500 	mtx_unlock(&uma_mtx);
1501 }
1502 
1503 /* Public functions */
1504 /* See uma.h */
1505 void
1506 uma_startup(void *bootmem, int boot_pages)
1507 {
1508 	struct uma_zctor_args args;
1509 	uma_slab_t slab;
1510 	u_int slabsize;
1511 	u_int objsize, totsize, wsize;
1512 	int i;
1513 
1514 #ifdef UMA_DEBUG
1515 	printf("Creating uma keg headers zone and keg.\n");
1516 #endif
1517 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1518 
1519 	/*
1520 	 * Figure out the maximum number of items-per-slab we'll have if
1521 	 * we're using the OFFPAGE slab header to track free items, given
1522 	 * all possible object sizes and the maximum desired wastage
1523 	 * (UMA_MAX_WASTE).
1524 	 *
1525 	 * We iterate until we find an object size for
1526 	 * which the calculated wastage in zone_small_init() will be
1527 	 * enough to warrant OFFPAGE.  Since wastedspace versus objsize
1528 	 * is an overall increasing see-saw function, we find the smallest
1529 	 * objsize such that the wastage is always acceptable for objects
1530 	 * with that objsize or smaller.  Since a smaller objsize always
1531 	 * generates a larger possible uma_max_ipers, we use this computed
1532 	 * objsize to calculate the largest ipers possible.  Since the
1533 	 * ipers calculated for OFFPAGE slab headers is always larger than
1534 	 * the ipers initially calculated in zone_small_init(), we use
1535 	 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1536 	 * obtain the maximum ipers possible for offpage slab headers.
1537 	 *
1538 	 * It should be noted that ipers versus objsize is an inversly
1539 	 * proportional function which drops off rather quickly so as
1540 	 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1541 	 * falls into the portion of the inverse relation AFTER the steep
1542 	 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1543 	 *
1544 	 * Note that we have 8-bits (1 byte) to use as a freelist index
1545 	 * inside the actual slab header itself and this is enough to
1546 	 * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
1547 	 * object with offpage slab header would have ipers =
1548 	 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1549 	 * 1 greater than what our byte-integer freelist index can
1550 	 * accomodate, but we know that this situation never occurs as
1551 	 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1552 	 * that we need to go to offpage slab headers.  Or, if we do,
1553 	 * then we trap that condition below and panic in the INVARIANTS case.
1554 	 */
1555 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1556 	totsize = wsize;
1557 	objsize = UMA_SMALLEST_UNIT;
1558 	while (totsize >= wsize) {
1559 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1560 		    (objsize + UMA_FRITM_SZ);
1561 		totsize *= (UMA_FRITM_SZ + objsize);
1562 		objsize++;
1563 	}
1564 	if (objsize > UMA_SMALLEST_UNIT)
1565 		objsize--;
1566 	uma_max_ipers = UMA_SLAB_SIZE / objsize;
1567 
1568 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1569 	totsize = wsize;
1570 	objsize = UMA_SMALLEST_UNIT;
1571 	while (totsize >= wsize) {
1572 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1573 		    (objsize + UMA_FRITMREF_SZ);
1574 		totsize *= (UMA_FRITMREF_SZ + objsize);
1575 		objsize++;
1576 	}
1577 	if (objsize > UMA_SMALLEST_UNIT)
1578 		objsize--;
1579 	uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
1580 
1581 	KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1582 	    ("uma_startup: calculated uma_max_ipers values too large!"));
1583 
1584 #ifdef UMA_DEBUG
1585 	printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1586 	printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1587 	    uma_max_ipers_ref);
1588 #endif
1589 
1590 	/* "manually" create the initial zone */
1591 	args.name = "UMA Kegs";
1592 	args.size = sizeof(struct uma_keg);
1593 	args.ctor = keg_ctor;
1594 	args.dtor = keg_dtor;
1595 	args.uminit = zero_init;
1596 	args.fini = NULL;
1597 	args.keg = &masterkeg;
1598 	args.align = 32 - 1;
1599 	args.flags = UMA_ZFLAG_INTERNAL;
1600 	/* The initial zone has no Per cpu queues so it's smaller */
1601 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1602 
1603 #ifdef UMA_DEBUG
1604 	printf("Filling boot free list.\n");
1605 #endif
1606 	for (i = 0; i < boot_pages; i++) {
1607 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1608 		slab->us_data = (u_int8_t *)slab;
1609 		slab->us_flags = UMA_SLAB_BOOT;
1610 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1611 	}
1612 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1613 
1614 #ifdef UMA_DEBUG
1615 	printf("Creating uma zone headers zone and keg.\n");
1616 #endif
1617 	args.name = "UMA Zones";
1618 	args.size = sizeof(struct uma_zone) +
1619 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1620 	args.ctor = zone_ctor;
1621 	args.dtor = zone_dtor;
1622 	args.uminit = zero_init;
1623 	args.fini = NULL;
1624 	args.keg = NULL;
1625 	args.align = 32 - 1;
1626 	args.flags = UMA_ZFLAG_INTERNAL;
1627 	/* The initial zone has no Per cpu queues so it's smaller */
1628 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1629 
1630 #ifdef UMA_DEBUG
1631 	printf("Initializing pcpu cache locks.\n");
1632 #endif
1633 #ifdef UMA_DEBUG
1634 	printf("Creating slab and hash zones.\n");
1635 #endif
1636 
1637 	/*
1638 	 * This is the max number of free list items we'll have with
1639 	 * offpage slabs.
1640 	 */
1641 	slabsize = uma_max_ipers * UMA_FRITM_SZ;
1642 	slabsize += sizeof(struct uma_slab);
1643 
1644 	/* Now make a zone for slab headers */
1645 	slabzone = uma_zcreate("UMA Slabs",
1646 				slabsize,
1647 				NULL, NULL, NULL, NULL,
1648 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1649 
1650 	/*
1651 	 * We also create a zone for the bigger slabs with reference
1652 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1653 	 */
1654 	slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1655 	slabsize += sizeof(struct uma_slab_refcnt);
1656 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1657 				  slabsize,
1658 				  NULL, NULL, NULL, NULL,
1659 				  UMA_ALIGN_PTR,
1660 				  UMA_ZFLAG_INTERNAL);
1661 
1662 	hashzone = uma_zcreate("UMA Hash",
1663 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1664 	    NULL, NULL, NULL, NULL,
1665 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1666 
1667 	bucket_init();
1668 
1669 #ifdef UMA_MD_SMALL_ALLOC
1670 	booted = 1;
1671 #endif
1672 
1673 #ifdef UMA_DEBUG
1674 	printf("UMA startup complete.\n");
1675 #endif
1676 }
1677 
1678 /* see uma.h */
1679 void
1680 uma_startup2(void)
1681 {
1682 	booted = 1;
1683 	bucket_enable();
1684 #ifdef UMA_DEBUG
1685 	printf("UMA startup2 complete.\n");
1686 #endif
1687 }
1688 
1689 /*
1690  * Initialize our callout handle
1691  *
1692  */
1693 
1694 static void
1695 uma_startup3(void)
1696 {
1697 #ifdef UMA_DEBUG
1698 	printf("Starting callout.\n");
1699 #endif
1700 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1701 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1702 #ifdef UMA_DEBUG
1703 	printf("UMA startup3 complete.\n");
1704 #endif
1705 }
1706 
1707 static uma_zone_t
1708 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1709 		int align, u_int32_t flags)
1710 {
1711 	struct uma_kctor_args args;
1712 
1713 	args.size = size;
1714 	args.uminit = uminit;
1715 	args.fini = fini;
1716 	args.align = align;
1717 	args.flags = flags;
1718 	args.zone = zone;
1719 	return (uma_zalloc_internal(kegs, &args, M_WAITOK));
1720 }
1721 
1722 /* See uma.h */
1723 uma_zone_t
1724 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1725 		uma_init uminit, uma_fini fini, int align, u_int32_t flags)
1726 
1727 {
1728 	struct uma_zctor_args args;
1729 
1730 	/* This stuff is essential for the zone ctor */
1731 	args.name = name;
1732 	args.size = size;
1733 	args.ctor = ctor;
1734 	args.dtor = dtor;
1735 	args.uminit = uminit;
1736 	args.fini = fini;
1737 	args.align = align;
1738 	args.flags = flags;
1739 	args.keg = NULL;
1740 
1741 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1742 }
1743 
1744 /* See uma.h */
1745 uma_zone_t
1746 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1747 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1748 {
1749 	struct uma_zctor_args args;
1750 
1751 	args.name = name;
1752 	args.size = master->uz_keg->uk_size;
1753 	args.ctor = ctor;
1754 	args.dtor = dtor;
1755 	args.uminit = zinit;
1756 	args.fini = zfini;
1757 	args.align = master->uz_keg->uk_align;
1758 	args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
1759 	args.keg = master->uz_keg;
1760 
1761 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1762 }
1763 
1764 /* See uma.h */
1765 void
1766 uma_zdestroy(uma_zone_t zone)
1767 {
1768 
1769 	uma_zfree_internal(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE);
1770 }
1771 
1772 /* See uma.h */
1773 void *
1774 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1775 {
1776 	void *item;
1777 	uma_cache_t cache;
1778 	uma_bucket_t bucket;
1779 	int cpu;
1780 	int badness;
1781 
1782 	/* This is the fast path allocation */
1783 #ifdef UMA_DEBUG_ALLOC_1
1784 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1785 #endif
1786 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1787 	    zone->uz_name, flags);
1788 
1789 	if (!(flags & M_NOWAIT)) {
1790 		KASSERT(curthread->td_intr_nesting_level == 0,
1791 		   ("malloc(M_WAITOK) in interrupt context"));
1792 		if (nosleepwithlocks) {
1793 #ifdef WITNESS
1794 			badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1795 			    NULL,
1796 			    "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
1797 			    zone->uz_name);
1798 #else
1799 			badness = 1;
1800 #endif
1801 		} else {
1802 			badness = 0;
1803 #ifdef WITNESS
1804 			WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1805 			    "malloc(M_WAITOK) of \"%s\"", zone->uz_name);
1806 #endif
1807 		}
1808 		if (badness) {
1809 			flags &= ~M_WAITOK;
1810 			flags |= M_NOWAIT;
1811 		}
1812 	}
1813 
1814 	/*
1815 	 * If possible, allocate from the per-CPU cache.  There are two
1816 	 * requirements for safe access to the per-CPU cache: (1) the thread
1817 	 * accessing the cache must not be preempted or yield during access,
1818 	 * and (2) the thread must not migrate CPUs without switching which
1819 	 * cache it accesses.  We rely on a critical section to prevent
1820 	 * preemption and migration.  We release the critical section in
1821 	 * order to acquire the zone mutex if we are unable to allocate from
1822 	 * the current cache; when we re-acquire the critical section, we
1823 	 * must detect and handle migration if it has occurred.
1824 	 */
1825 zalloc_restart:
1826 	critical_enter();
1827 	cpu = curcpu;
1828 	cache = &zone->uz_cpu[cpu];
1829 
1830 zalloc_start:
1831 	bucket = cache->uc_allocbucket;
1832 
1833 	if (bucket) {
1834 		if (bucket->ub_cnt > 0) {
1835 			bucket->ub_cnt--;
1836 			item = bucket->ub_bucket[bucket->ub_cnt];
1837 #ifdef INVARIANTS
1838 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
1839 #endif
1840 			KASSERT(item != NULL,
1841 			    ("uma_zalloc: Bucket pointer mangled."));
1842 			cache->uc_allocs++;
1843 			critical_exit();
1844 #ifdef INVARIANTS
1845 			ZONE_LOCK(zone);
1846 			uma_dbg_alloc(zone, NULL, item);
1847 			ZONE_UNLOCK(zone);
1848 #endif
1849 			if (zone->uz_ctor != NULL) {
1850 				if (zone->uz_ctor(item, zone->uz_keg->uk_size,
1851 				    udata, flags) != 0) {
1852 					uma_zfree_internal(zone, item, udata,
1853 					    SKIP_DTOR, ZFREE_STATFAIL |
1854 					    ZFREE_STATFREE);
1855 					return (NULL);
1856 				}
1857 			}
1858 			if (flags & M_ZERO)
1859 				bzero(item, zone->uz_keg->uk_size);
1860 			return (item);
1861 		} else if (cache->uc_freebucket) {
1862 			/*
1863 			 * We have run out of items in our allocbucket.
1864 			 * See if we can switch with our free bucket.
1865 			 */
1866 			if (cache->uc_freebucket->ub_cnt > 0) {
1867 #ifdef UMA_DEBUG_ALLOC
1868 				printf("uma_zalloc: Swapping empty with"
1869 				    " alloc.\n");
1870 #endif
1871 				bucket = cache->uc_freebucket;
1872 				cache->uc_freebucket = cache->uc_allocbucket;
1873 				cache->uc_allocbucket = bucket;
1874 
1875 				goto zalloc_start;
1876 			}
1877 		}
1878 	}
1879 	/*
1880 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
1881 	 * we must go back to the zone.  This requires the zone lock, so we
1882 	 * must drop the critical section, then re-acquire it when we go back
1883 	 * to the cache.  Since the critical section is released, we may be
1884 	 * preempted or migrate.  As such, make sure not to maintain any
1885 	 * thread-local state specific to the cache from prior to releasing
1886 	 * the critical section.
1887 	 */
1888 	critical_exit();
1889 	ZONE_LOCK(zone);
1890 	critical_enter();
1891 	cpu = curcpu;
1892 	cache = &zone->uz_cpu[cpu];
1893 	bucket = cache->uc_allocbucket;
1894 	if (bucket != NULL) {
1895 		if (bucket->ub_cnt > 0) {
1896 			ZONE_UNLOCK(zone);
1897 			goto zalloc_start;
1898 		}
1899 		bucket = cache->uc_freebucket;
1900 		if (bucket != NULL && bucket->ub_cnt > 0) {
1901 			ZONE_UNLOCK(zone);
1902 			goto zalloc_start;
1903 		}
1904 	}
1905 
1906 	/* Since we have locked the zone we may as well send back our stats */
1907 	zone->uz_allocs += cache->uc_allocs;
1908 	cache->uc_allocs = 0;
1909 	zone->uz_frees += cache->uc_frees;
1910 	cache->uc_frees = 0;
1911 
1912 	/* Our old one is now a free bucket */
1913 	if (cache->uc_allocbucket) {
1914 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1915 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1916 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1917 		    cache->uc_allocbucket, ub_link);
1918 		cache->uc_allocbucket = NULL;
1919 	}
1920 
1921 	/* Check the free list for a new alloc bucket */
1922 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1923 		KASSERT(bucket->ub_cnt != 0,
1924 		    ("uma_zalloc_arg: Returning an empty bucket."));
1925 
1926 		LIST_REMOVE(bucket, ub_link);
1927 		cache->uc_allocbucket = bucket;
1928 		ZONE_UNLOCK(zone);
1929 		goto zalloc_start;
1930 	}
1931 	/* We are no longer associated with this CPU. */
1932 	critical_exit();
1933 
1934 	/* Bump up our uz_count so we get here less */
1935 	if (zone->uz_count < BUCKET_MAX)
1936 		zone->uz_count++;
1937 
1938 	/*
1939 	 * Now lets just fill a bucket and put it on the free list.  If that
1940 	 * works we'll restart the allocation from the begining.
1941 	 */
1942 	if (uma_zalloc_bucket(zone, flags)) {
1943 		ZONE_UNLOCK(zone);
1944 		goto zalloc_restart;
1945 	}
1946 	ZONE_UNLOCK(zone);
1947 	/*
1948 	 * We may not be able to get a bucket so return an actual item.
1949 	 */
1950 #ifdef UMA_DEBUG
1951 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1952 #endif
1953 
1954 	return (uma_zalloc_internal(zone, udata, flags));
1955 }
1956 
1957 static uma_slab_t
1958 uma_zone_slab(uma_zone_t zone, int flags)
1959 {
1960 	uma_slab_t slab;
1961 	uma_keg_t keg;
1962 
1963 	keg = zone->uz_keg;
1964 
1965 	/*
1966 	 * This is to prevent us from recursively trying to allocate
1967 	 * buckets.  The problem is that if an allocation forces us to
1968 	 * grab a new bucket we will call page_alloc, which will go off
1969 	 * and cause the vm to allocate vm_map_entries.  If we need new
1970 	 * buckets there too we will recurse in kmem_alloc and bad
1971 	 * things happen.  So instead we return a NULL bucket, and make
1972 	 * the code that allocates buckets smart enough to deal with it
1973 	 *
1974 	 * XXX: While we want this protection for the bucket zones so that
1975 	 * recursion from the VM is handled (and the calling code that
1976 	 * allocates buckets knows how to deal with it), we do not want
1977 	 * to prevent allocation from the slab header zones (slabzone
1978 	 * and slabrefzone) if uk_recurse is not zero for them.  The
1979 	 * reason is that it could lead to NULL being returned for
1980 	 * slab header allocations even in the M_WAITOK case, and the
1981 	 * caller can't handle that.
1982 	 */
1983 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
1984 		if ((zone != slabzone) && (zone != slabrefzone))
1985 			return (NULL);
1986 
1987 	slab = NULL;
1988 
1989 	for (;;) {
1990 		/*
1991 		 * Find a slab with some space.  Prefer slabs that are partially
1992 		 * used over those that are totally full.  This helps to reduce
1993 		 * fragmentation.
1994 		 */
1995 		if (keg->uk_free != 0) {
1996 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
1997 				slab = LIST_FIRST(&keg->uk_part_slab);
1998 			} else {
1999 				slab = LIST_FIRST(&keg->uk_free_slab);
2000 				LIST_REMOVE(slab, us_link);
2001 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2002 				    us_link);
2003 			}
2004 			return (slab);
2005 		}
2006 
2007 		/*
2008 		 * M_NOVM means don't ask at all!
2009 		 */
2010 		if (flags & M_NOVM)
2011 			break;
2012 
2013 		if (keg->uk_maxpages &&
2014 		    keg->uk_pages >= keg->uk_maxpages) {
2015 			keg->uk_flags |= UMA_ZFLAG_FULL;
2016 
2017 			if (flags & M_NOWAIT)
2018 				break;
2019 			else
2020 				msleep(keg, &keg->uk_lock, PVM,
2021 				    "zonelimit", 0);
2022 			continue;
2023 		}
2024 		keg->uk_recurse++;
2025 		slab = slab_zalloc(zone, flags);
2026 		keg->uk_recurse--;
2027 
2028 		/*
2029 		 * If we got a slab here it's safe to mark it partially used
2030 		 * and return.  We assume that the caller is going to remove
2031 		 * at least one item.
2032 		 */
2033 		if (slab) {
2034 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2035 			return (slab);
2036 		}
2037 		/*
2038 		 * We might not have been able to get a slab but another cpu
2039 		 * could have while we were unlocked.  Check again before we
2040 		 * fail.
2041 		 */
2042 		if (flags & M_NOWAIT)
2043 			flags |= M_NOVM;
2044 	}
2045 	return (slab);
2046 }
2047 
2048 static void *
2049 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
2050 {
2051 	uma_keg_t keg;
2052 	uma_slabrefcnt_t slabref;
2053 	void *item;
2054 	u_int8_t freei;
2055 
2056 	keg = zone->uz_keg;
2057 
2058 	freei = slab->us_firstfree;
2059 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
2060 		slabref = (uma_slabrefcnt_t)slab;
2061 		slab->us_firstfree = slabref->us_freelist[freei].us_item;
2062 	} else {
2063 		slab->us_firstfree = slab->us_freelist[freei].us_item;
2064 	}
2065 	item = slab->us_data + (keg->uk_rsize * freei);
2066 
2067 	slab->us_freecount--;
2068 	keg->uk_free--;
2069 #ifdef INVARIANTS
2070 	uma_dbg_alloc(zone, slab, item);
2071 #endif
2072 	/* Move this slab to the full list */
2073 	if (slab->us_freecount == 0) {
2074 		LIST_REMOVE(slab, us_link);
2075 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2076 	}
2077 
2078 	return (item);
2079 }
2080 
2081 static int
2082 uma_zalloc_bucket(uma_zone_t zone, int flags)
2083 {
2084 	uma_bucket_t bucket;
2085 	uma_slab_t slab;
2086 	int16_t saved;
2087 	int max, origflags = flags;
2088 
2089 	/*
2090 	 * Try this zone's free list first so we don't allocate extra buckets.
2091 	 */
2092 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2093 		KASSERT(bucket->ub_cnt == 0,
2094 		    ("uma_zalloc_bucket: Bucket on free list is not empty."));
2095 		LIST_REMOVE(bucket, ub_link);
2096 	} else {
2097 		int bflags;
2098 
2099 		bflags = (flags & ~M_ZERO);
2100 		if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2101 			bflags |= M_NOVM;
2102 
2103 		ZONE_UNLOCK(zone);
2104 		bucket = bucket_alloc(zone->uz_count, bflags);
2105 		ZONE_LOCK(zone);
2106 	}
2107 
2108 	if (bucket == NULL)
2109 		return (0);
2110 
2111 #ifdef SMP
2112 	/*
2113 	 * This code is here to limit the number of simultaneous bucket fills
2114 	 * for any given zone to the number of per cpu caches in this zone. This
2115 	 * is done so that we don't allocate more memory than we really need.
2116 	 */
2117 	if (zone->uz_fills >= mp_ncpus)
2118 		goto done;
2119 
2120 #endif
2121 	zone->uz_fills++;
2122 
2123 	max = MIN(bucket->ub_entries, zone->uz_count);
2124 	/* Try to keep the buckets totally full */
2125 	saved = bucket->ub_cnt;
2126 	while (bucket->ub_cnt < max &&
2127 	    (slab = uma_zone_slab(zone, flags)) != NULL) {
2128 		while (slab->us_freecount && bucket->ub_cnt < max) {
2129 			bucket->ub_bucket[bucket->ub_cnt++] =
2130 			    uma_slab_alloc(zone, slab);
2131 		}
2132 
2133 		/* Don't block on the next fill */
2134 		flags |= M_NOWAIT;
2135 	}
2136 
2137 	/*
2138 	 * We unlock here because we need to call the zone's init.
2139 	 * It should be safe to unlock because the slab dealt with
2140 	 * above is already on the appropriate list within the keg
2141 	 * and the bucket we filled is not yet on any list, so we
2142 	 * own it.
2143 	 */
2144 	if (zone->uz_init != NULL) {
2145 		int i;
2146 
2147 		ZONE_UNLOCK(zone);
2148 		for (i = saved; i < bucket->ub_cnt; i++)
2149 			if (zone->uz_init(bucket->ub_bucket[i],
2150 			    zone->uz_keg->uk_size, origflags) != 0)
2151 				break;
2152 		/*
2153 		 * If we couldn't initialize the whole bucket, put the
2154 		 * rest back onto the freelist.
2155 		 */
2156 		if (i != bucket->ub_cnt) {
2157 			int j;
2158 
2159 			for (j = i; j < bucket->ub_cnt; j++) {
2160 				uma_zfree_internal(zone, bucket->ub_bucket[j],
2161 				    NULL, SKIP_FINI, 0);
2162 #ifdef INVARIANTS
2163 				bucket->ub_bucket[j] = NULL;
2164 #endif
2165 			}
2166 			bucket->ub_cnt = i;
2167 		}
2168 		ZONE_LOCK(zone);
2169 	}
2170 
2171 	zone->uz_fills--;
2172 	if (bucket->ub_cnt != 0) {
2173 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2174 		    bucket, ub_link);
2175 		return (1);
2176 	}
2177 #ifdef SMP
2178 done:
2179 #endif
2180 	bucket_free(bucket);
2181 
2182 	return (0);
2183 }
2184 /*
2185  * Allocates an item for an internal zone
2186  *
2187  * Arguments
2188  *	zone   The zone to alloc for.
2189  *	udata  The data to be passed to the constructor.
2190  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2191  *
2192  * Returns
2193  *	NULL if there is no memory and M_NOWAIT is set
2194  *	An item if successful
2195  */
2196 
2197 static void *
2198 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
2199 {
2200 	uma_keg_t keg;
2201 	uma_slab_t slab;
2202 	void *item;
2203 
2204 	item = NULL;
2205 	keg = zone->uz_keg;
2206 
2207 #ifdef UMA_DEBUG_ALLOC
2208 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2209 #endif
2210 	ZONE_LOCK(zone);
2211 
2212 	slab = uma_zone_slab(zone, flags);
2213 	if (slab == NULL) {
2214 		zone->uz_fails++;
2215 		ZONE_UNLOCK(zone);
2216 		return (NULL);
2217 	}
2218 
2219 	item = uma_slab_alloc(zone, slab);
2220 
2221 	zone->uz_allocs++;
2222 
2223 	ZONE_UNLOCK(zone);
2224 
2225 	/*
2226 	 * We have to call both the zone's init (not the keg's init)
2227 	 * and the zone's ctor.  This is because the item is going from
2228 	 * a keg slab directly to the user, and the user is expecting it
2229 	 * to be both zone-init'd as well as zone-ctor'd.
2230 	 */
2231 	if (zone->uz_init != NULL) {
2232 		if (zone->uz_init(item, keg->uk_size, flags) != 0) {
2233 			uma_zfree_internal(zone, item, udata, SKIP_FINI,
2234 			    ZFREE_STATFAIL | ZFREE_STATFREE);
2235 			return (NULL);
2236 		}
2237 	}
2238 	if (zone->uz_ctor != NULL) {
2239 		if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
2240 			uma_zfree_internal(zone, item, udata, SKIP_DTOR,
2241 			    ZFREE_STATFAIL | ZFREE_STATFREE);
2242 			return (NULL);
2243 		}
2244 	}
2245 	if (flags & M_ZERO)
2246 		bzero(item, keg->uk_size);
2247 
2248 	return (item);
2249 }
2250 
2251 /* See uma.h */
2252 void
2253 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2254 {
2255 	uma_keg_t keg;
2256 	uma_cache_t cache;
2257 	uma_bucket_t bucket;
2258 	int bflags;
2259 	int cpu;
2260 
2261 	keg = zone->uz_keg;
2262 
2263 #ifdef UMA_DEBUG_ALLOC_1
2264 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2265 #endif
2266 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2267 	    zone->uz_name);
2268 
2269 	if (zone->uz_dtor)
2270 		zone->uz_dtor(item, keg->uk_size, udata);
2271 #ifdef INVARIANTS
2272 	ZONE_LOCK(zone);
2273 	if (keg->uk_flags & UMA_ZONE_MALLOC)
2274 		uma_dbg_free(zone, udata, item);
2275 	else
2276 		uma_dbg_free(zone, NULL, item);
2277 	ZONE_UNLOCK(zone);
2278 #endif
2279 	/*
2280 	 * The race here is acceptable.  If we miss it we'll just have to wait
2281 	 * a little longer for the limits to be reset.
2282 	 */
2283 	if (keg->uk_flags & UMA_ZFLAG_FULL)
2284 		goto zfree_internal;
2285 
2286 	/*
2287 	 * If possible, free to the per-CPU cache.  There are two
2288 	 * requirements for safe access to the per-CPU cache: (1) the thread
2289 	 * accessing the cache must not be preempted or yield during access,
2290 	 * and (2) the thread must not migrate CPUs without switching which
2291 	 * cache it accesses.  We rely on a critical section to prevent
2292 	 * preemption and migration.  We release the critical section in
2293 	 * order to acquire the zone mutex if we are unable to free to the
2294 	 * current cache; when we re-acquire the critical section, we must
2295 	 * detect and handle migration if it has occurred.
2296 	 */
2297 zfree_restart:
2298 	critical_enter();
2299 	cpu = curcpu;
2300 	cache = &zone->uz_cpu[cpu];
2301 
2302 zfree_start:
2303 	bucket = cache->uc_freebucket;
2304 
2305 	if (bucket) {
2306 		/*
2307 		 * Do we have room in our bucket? It is OK for this uz count
2308 		 * check to be slightly out of sync.
2309 		 */
2310 
2311 		if (bucket->ub_cnt < bucket->ub_entries) {
2312 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2313 			    ("uma_zfree: Freeing to non free bucket index."));
2314 			bucket->ub_bucket[bucket->ub_cnt] = item;
2315 			bucket->ub_cnt++;
2316 			cache->uc_frees++;
2317 			critical_exit();
2318 			return;
2319 		} else if (cache->uc_allocbucket) {
2320 #ifdef UMA_DEBUG_ALLOC
2321 			printf("uma_zfree: Swapping buckets.\n");
2322 #endif
2323 			/*
2324 			 * We have run out of space in our freebucket.
2325 			 * See if we can switch with our alloc bucket.
2326 			 */
2327 			if (cache->uc_allocbucket->ub_cnt <
2328 			    cache->uc_freebucket->ub_cnt) {
2329 				bucket = cache->uc_freebucket;
2330 				cache->uc_freebucket = cache->uc_allocbucket;
2331 				cache->uc_allocbucket = bucket;
2332 				goto zfree_start;
2333 			}
2334 		}
2335 	}
2336 	/*
2337 	 * We can get here for two reasons:
2338 	 *
2339 	 * 1) The buckets are NULL
2340 	 * 2) The alloc and free buckets are both somewhat full.
2341 	 *
2342 	 * We must go back the zone, which requires acquiring the zone lock,
2343 	 * which in turn means we must release and re-acquire the critical
2344 	 * section.  Since the critical section is released, we may be
2345 	 * preempted or migrate.  As such, make sure not to maintain any
2346 	 * thread-local state specific to the cache from prior to releasing
2347 	 * the critical section.
2348 	 */
2349 	critical_exit();
2350 	ZONE_LOCK(zone);
2351 	critical_enter();
2352 	cpu = curcpu;
2353 	cache = &zone->uz_cpu[cpu];
2354 	if (cache->uc_freebucket != NULL) {
2355 		if (cache->uc_freebucket->ub_cnt <
2356 		    cache->uc_freebucket->ub_entries) {
2357 			ZONE_UNLOCK(zone);
2358 			goto zfree_start;
2359 		}
2360 		if (cache->uc_allocbucket != NULL &&
2361 		    (cache->uc_allocbucket->ub_cnt <
2362 		    cache->uc_freebucket->ub_cnt)) {
2363 			ZONE_UNLOCK(zone);
2364 			goto zfree_start;
2365 		}
2366 	}
2367 
2368 	/* Since we have locked the zone we may as well send back our stats */
2369 	zone->uz_allocs += cache->uc_allocs;
2370 	cache->uc_allocs = 0;
2371 	zone->uz_frees += cache->uc_frees;
2372 	cache->uc_frees = 0;
2373 
2374 	bucket = cache->uc_freebucket;
2375 	cache->uc_freebucket = NULL;
2376 
2377 	/* Can we throw this on the zone full list? */
2378 	if (bucket != NULL) {
2379 #ifdef UMA_DEBUG_ALLOC
2380 		printf("uma_zfree: Putting old bucket on the free list.\n");
2381 #endif
2382 		/* ub_cnt is pointing to the last free item */
2383 		KASSERT(bucket->ub_cnt != 0,
2384 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2385 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2386 		    bucket, ub_link);
2387 	}
2388 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2389 		LIST_REMOVE(bucket, ub_link);
2390 		ZONE_UNLOCK(zone);
2391 		cache->uc_freebucket = bucket;
2392 		goto zfree_start;
2393 	}
2394 	/* We are no longer associated with this CPU. */
2395 	critical_exit();
2396 
2397 	/* And the zone.. */
2398 	ZONE_UNLOCK(zone);
2399 
2400 #ifdef UMA_DEBUG_ALLOC
2401 	printf("uma_zfree: Allocating new free bucket.\n");
2402 #endif
2403 	bflags = M_NOWAIT;
2404 
2405 	if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2406 		bflags |= M_NOVM;
2407 	bucket = bucket_alloc(zone->uz_count, bflags);
2408 	if (bucket) {
2409 		ZONE_LOCK(zone);
2410 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
2411 		    bucket, ub_link);
2412 		ZONE_UNLOCK(zone);
2413 		goto zfree_restart;
2414 	}
2415 
2416 	/*
2417 	 * If nothing else caught this, we'll just do an internal free.
2418 	 */
2419 zfree_internal:
2420 	uma_zfree_internal(zone, item, udata, SKIP_DTOR, ZFREE_STATFAIL |
2421 	    ZFREE_STATFREE);
2422 
2423 	return;
2424 }
2425 
2426 /*
2427  * Frees an item to an INTERNAL zone or allocates a free bucket
2428  *
2429  * Arguments:
2430  *	zone   The zone to free to
2431  *	item   The item we're freeing
2432  *	udata  User supplied data for the dtor
2433  *	skip   Skip dtors and finis
2434  */
2435 static void
2436 uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
2437     enum zfreeskip skip, int flags)
2438 {
2439 	uma_slab_t slab;
2440 	uma_slabrefcnt_t slabref;
2441 	uma_keg_t keg;
2442 	u_int8_t *mem;
2443 	u_int8_t freei;
2444 
2445 	keg = zone->uz_keg;
2446 
2447 	if (skip < SKIP_DTOR && zone->uz_dtor)
2448 		zone->uz_dtor(item, keg->uk_size, udata);
2449 	if (skip < SKIP_FINI && zone->uz_fini)
2450 		zone->uz_fini(item, keg->uk_size);
2451 
2452 	ZONE_LOCK(zone);
2453 
2454 	if (flags & ZFREE_STATFAIL)
2455 		zone->uz_fails++;
2456 	if (flags & ZFREE_STATFREE)
2457 		zone->uz_frees++;
2458 
2459 	if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
2460 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2461 		if (keg->uk_flags & UMA_ZONE_HASH)
2462 			slab = hash_sfind(&keg->uk_hash, mem);
2463 		else {
2464 			mem += keg->uk_pgoff;
2465 			slab = (uma_slab_t)mem;
2466 		}
2467 	} else {
2468 		slab = (uma_slab_t)udata;
2469 	}
2470 
2471 	/* Do we need to remove from any lists? */
2472 	if (slab->us_freecount+1 == keg->uk_ipers) {
2473 		LIST_REMOVE(slab, us_link);
2474 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2475 	} else if (slab->us_freecount == 0) {
2476 		LIST_REMOVE(slab, us_link);
2477 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2478 	}
2479 
2480 	/* Slab management stuff */
2481 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
2482 		/ keg->uk_rsize;
2483 
2484 #ifdef INVARIANTS
2485 	if (!skip)
2486 		uma_dbg_free(zone, slab, item);
2487 #endif
2488 
2489 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
2490 		slabref = (uma_slabrefcnt_t)slab;
2491 		slabref->us_freelist[freei].us_item = slab->us_firstfree;
2492 	} else {
2493 		slab->us_freelist[freei].us_item = slab->us_firstfree;
2494 	}
2495 	slab->us_firstfree = freei;
2496 	slab->us_freecount++;
2497 
2498 	/* Zone statistics */
2499 	keg->uk_free++;
2500 
2501 	if (keg->uk_flags & UMA_ZFLAG_FULL) {
2502 		if (keg->uk_pages < keg->uk_maxpages)
2503 			keg->uk_flags &= ~UMA_ZFLAG_FULL;
2504 
2505 		/* We can handle one more allocation */
2506 		wakeup_one(keg);
2507 	}
2508 
2509 	ZONE_UNLOCK(zone);
2510 }
2511 
2512 /* See uma.h */
2513 void
2514 uma_zone_set_max(uma_zone_t zone, int nitems)
2515 {
2516 	uma_keg_t keg;
2517 
2518 	keg = zone->uz_keg;
2519 	ZONE_LOCK(zone);
2520 	if (keg->uk_ppera > 1)
2521 		keg->uk_maxpages = nitems * keg->uk_ppera;
2522 	else
2523 		keg->uk_maxpages = nitems / keg->uk_ipers;
2524 
2525 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2526 		keg->uk_maxpages++;
2527 
2528 	ZONE_UNLOCK(zone);
2529 }
2530 
2531 /* See uma.h */
2532 void
2533 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2534 {
2535 	ZONE_LOCK(zone);
2536 	KASSERT(zone->uz_keg->uk_pages == 0,
2537 	    ("uma_zone_set_init on non-empty keg"));
2538 	zone->uz_keg->uk_init = uminit;
2539 	ZONE_UNLOCK(zone);
2540 }
2541 
2542 /* See uma.h */
2543 void
2544 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2545 {
2546 	ZONE_LOCK(zone);
2547 	KASSERT(zone->uz_keg->uk_pages == 0,
2548 	    ("uma_zone_set_fini on non-empty keg"));
2549 	zone->uz_keg->uk_fini = fini;
2550 	ZONE_UNLOCK(zone);
2551 }
2552 
2553 /* See uma.h */
2554 void
2555 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2556 {
2557 	ZONE_LOCK(zone);
2558 	KASSERT(zone->uz_keg->uk_pages == 0,
2559 	    ("uma_zone_set_zinit on non-empty keg"));
2560 	zone->uz_init = zinit;
2561 	ZONE_UNLOCK(zone);
2562 }
2563 
2564 /* See uma.h */
2565 void
2566 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2567 {
2568 	ZONE_LOCK(zone);
2569 	KASSERT(zone->uz_keg->uk_pages == 0,
2570 	    ("uma_zone_set_zfini on non-empty keg"));
2571 	zone->uz_fini = zfini;
2572 	ZONE_UNLOCK(zone);
2573 }
2574 
2575 /* See uma.h */
2576 /* XXX uk_freef is not actually used with the zone locked */
2577 void
2578 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2579 {
2580 	ZONE_LOCK(zone);
2581 	zone->uz_keg->uk_freef = freef;
2582 	ZONE_UNLOCK(zone);
2583 }
2584 
2585 /* See uma.h */
2586 /* XXX uk_allocf is not actually used with the zone locked */
2587 void
2588 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2589 {
2590 	ZONE_LOCK(zone);
2591 	zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2592 	zone->uz_keg->uk_allocf = allocf;
2593 	ZONE_UNLOCK(zone);
2594 }
2595 
2596 /* See uma.h */
2597 int
2598 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2599 {
2600 	uma_keg_t keg;
2601 	vm_offset_t kva;
2602 	int pages;
2603 
2604 	keg = zone->uz_keg;
2605 	pages = count / keg->uk_ipers;
2606 
2607 	if (pages * keg->uk_ipers < count)
2608 		pages++;
2609 
2610 	kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2611 
2612 	if (kva == 0)
2613 		return (0);
2614 	if (obj == NULL) {
2615 		obj = vm_object_allocate(OBJT_DEFAULT,
2616 		    pages);
2617 	} else {
2618 		VM_OBJECT_LOCK_INIT(obj, "uma object");
2619 		_vm_object_allocate(OBJT_DEFAULT,
2620 		    pages, obj);
2621 	}
2622 	ZONE_LOCK(zone);
2623 	keg->uk_kva = kva;
2624 	keg->uk_obj = obj;
2625 	keg->uk_maxpages = pages;
2626 	keg->uk_allocf = obj_alloc;
2627 	keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2628 	ZONE_UNLOCK(zone);
2629 	return (1);
2630 }
2631 
2632 /* See uma.h */
2633 void
2634 uma_prealloc(uma_zone_t zone, int items)
2635 {
2636 	int slabs;
2637 	uma_slab_t slab;
2638 	uma_keg_t keg;
2639 
2640 	keg = zone->uz_keg;
2641 	ZONE_LOCK(zone);
2642 	slabs = items / keg->uk_ipers;
2643 	if (slabs * keg->uk_ipers < items)
2644 		slabs++;
2645 	while (slabs > 0) {
2646 		slab = slab_zalloc(zone, M_WAITOK);
2647 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2648 		slabs--;
2649 	}
2650 	ZONE_UNLOCK(zone);
2651 }
2652 
2653 /* See uma.h */
2654 u_int32_t *
2655 uma_find_refcnt(uma_zone_t zone, void *item)
2656 {
2657 	uma_slabrefcnt_t slabref;
2658 	uma_keg_t keg;
2659 	u_int32_t *refcnt;
2660 	int idx;
2661 
2662 	keg = zone->uz_keg;
2663 	slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
2664 	    (~UMA_SLAB_MASK));
2665 	KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
2666 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2667 	idx = ((unsigned long)item - (unsigned long)slabref->us_data)
2668 	    / keg->uk_rsize;
2669 	refcnt = &slabref->us_freelist[idx].us_refcnt;
2670 	return refcnt;
2671 }
2672 
2673 /* See uma.h */
2674 void
2675 uma_reclaim(void)
2676 {
2677 #ifdef UMA_DEBUG
2678 	printf("UMA: vm asked us to release pages!\n");
2679 #endif
2680 	bucket_enable();
2681 	zone_foreach(zone_drain);
2682 	/*
2683 	 * Some slabs may have been freed but this zone will be visited early
2684 	 * we visit again so that we can free pages that are empty once other
2685 	 * zones are drained.  We have to do the same for buckets.
2686 	 */
2687 	zone_drain(slabzone);
2688 	zone_drain(slabrefzone);
2689 	bucket_zone_drain();
2690 }
2691 
2692 void *
2693 uma_large_malloc(int size, int wait)
2694 {
2695 	void *mem;
2696 	uma_slab_t slab;
2697 	u_int8_t flags;
2698 
2699 	slab = uma_zalloc_internal(slabzone, NULL, wait);
2700 	if (slab == NULL)
2701 		return (NULL);
2702 	mem = page_alloc(NULL, size, &flags, wait);
2703 	if (mem) {
2704 		vsetslab((vm_offset_t)mem, slab);
2705 		slab->us_data = mem;
2706 		slab->us_flags = flags | UMA_SLAB_MALLOC;
2707 		slab->us_size = size;
2708 	} else {
2709 		uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE,
2710 		    ZFREE_STATFAIL | ZFREE_STATFREE);
2711 	}
2712 
2713 	return (mem);
2714 }
2715 
2716 void
2717 uma_large_free(uma_slab_t slab)
2718 {
2719 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
2720 	page_free(slab->us_data, slab->us_size, slab->us_flags);
2721 	uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE);
2722 }
2723 
2724 void
2725 uma_print_stats(void)
2726 {
2727 	zone_foreach(uma_print_zone);
2728 }
2729 
2730 static void
2731 slab_print(uma_slab_t slab)
2732 {
2733 	printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
2734 		slab->us_keg, slab->us_data, slab->us_freecount,
2735 		slab->us_firstfree);
2736 }
2737 
2738 static void
2739 cache_print(uma_cache_t cache)
2740 {
2741 	printf("alloc: %p(%d), free: %p(%d)\n",
2742 		cache->uc_allocbucket,
2743 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
2744 		cache->uc_freebucket,
2745 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
2746 }
2747 
2748 void
2749 uma_print_zone(uma_zone_t zone)
2750 {
2751 	uma_cache_t cache;
2752 	uma_keg_t keg;
2753 	uma_slab_t slab;
2754 	int i;
2755 
2756 	keg = zone->uz_keg;
2757 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2758 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2759 	    keg->uk_ipers, keg->uk_ppera,
2760 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
2761 	printf("Part slabs:\n");
2762 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
2763 		slab_print(slab);
2764 	printf("Free slabs:\n");
2765 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
2766 		slab_print(slab);
2767 	printf("Full slabs:\n");
2768 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
2769 		slab_print(slab);
2770 	for (i = 0; i <= mp_maxid; i++) {
2771 		if (CPU_ABSENT(i))
2772 			continue;
2773 		cache = &zone->uz_cpu[i];
2774 		printf("CPU %d Cache:\n", i);
2775 		cache_print(cache);
2776 	}
2777 }
2778 
2779 /*
2780  * Generate statistics across both the zone and its per-cpu cache's.  Return
2781  * desired statistics if the pointer is non-NULL for that statistic.
2782  *
2783  * Note: does not update the zone statistics, as it can't safely clear the
2784  * per-CPU cache statistic.
2785  *
2786  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
2787  * safe from off-CPU; we should modify the caches to track this information
2788  * directly so that we don't have to.
2789  */
2790 static void
2791 uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
2792     u_int64_t *freesp)
2793 {
2794 	uma_cache_t cache;
2795 	u_int64_t allocs, frees;
2796 	int cachefree, cpu;
2797 
2798 	allocs = frees = 0;
2799 	cachefree = 0;
2800 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
2801 		if (CPU_ABSENT(cpu))
2802 			continue;
2803 		cache = &z->uz_cpu[cpu];
2804 		if (cache->uc_allocbucket != NULL)
2805 			cachefree += cache->uc_allocbucket->ub_cnt;
2806 		if (cache->uc_freebucket != NULL)
2807 			cachefree += cache->uc_freebucket->ub_cnt;
2808 		allocs += cache->uc_allocs;
2809 		frees += cache->uc_frees;
2810 	}
2811 	allocs += z->uz_allocs;
2812 	frees += z->uz_frees;
2813 	if (cachefreep != NULL)
2814 		*cachefreep = cachefree;
2815 	if (allocsp != NULL)
2816 		*allocsp = allocs;
2817 	if (freesp != NULL)
2818 		*freesp = frees;
2819 }
2820 
2821 /*
2822  * Sysctl handler for vm.zone
2823  *
2824  * stolen from vm_zone.c
2825  */
2826 static int
2827 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2828 {
2829 	int error, len, cnt;
2830 	const int linesize = 128;	/* conservative */
2831 	int totalfree;
2832 	char *tmpbuf, *offset;
2833 	uma_zone_t z;
2834 	uma_keg_t zk;
2835 	char *p;
2836 	int cachefree;
2837 	uma_bucket_t bucket;
2838 	u_int64_t allocs, frees;
2839 
2840 	cnt = 0;
2841 	mtx_lock(&uma_mtx);
2842 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2843 		LIST_FOREACH(z, &zk->uk_zones, uz_link)
2844 			cnt++;
2845 	}
2846 	mtx_unlock(&uma_mtx);
2847 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2848 			M_TEMP, M_WAITOK);
2849 	len = snprintf(tmpbuf, linesize,
2850 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
2851 	if (cnt == 0)
2852 		tmpbuf[len - 1] = '\0';
2853 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2854 	if (error || cnt == 0)
2855 		goto out;
2856 	offset = tmpbuf;
2857 	mtx_lock(&uma_mtx);
2858 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2859 	  LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2860 		if (cnt == 0)	/* list may have changed size */
2861 			break;
2862 		ZONE_LOCK(z);
2863 		cachefree = 0;
2864 		if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2865 			uma_zone_sumstat(z, &cachefree, &allocs, &frees);
2866 		} else {
2867 			allocs = z->uz_allocs;
2868 			frees = z->uz_frees;
2869 		}
2870 
2871 		LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2872 			cachefree += bucket->ub_cnt;
2873 		}
2874 		totalfree = zk->uk_free + cachefree;
2875 		len = snprintf(offset, linesize,
2876 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2877 		    z->uz_name, zk->uk_size,
2878 		    zk->uk_maxpages * zk->uk_ipers,
2879 		    (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2880 		    totalfree,
2881 		    (unsigned long long)allocs);
2882 		ZONE_UNLOCK(z);
2883 		for (p = offset + 12; p > offset && *p == ' '; --p)
2884 			/* nothing */ ;
2885 		p[1] = ':';
2886 		cnt--;
2887 		offset += len;
2888 	  }
2889 	}
2890 	mtx_unlock(&uma_mtx);
2891 	*offset++ = '\0';
2892 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2893 out:
2894 	FREE(tmpbuf, M_TEMP);
2895 	return (error);
2896 }
2897 
2898 static int
2899 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
2900 {
2901 	uma_keg_t kz;
2902 	uma_zone_t z;
2903 	int count;
2904 
2905 	count = 0;
2906 	mtx_lock(&uma_mtx);
2907 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
2908 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
2909 			count++;
2910 	}
2911 	mtx_unlock(&uma_mtx);
2912 	return (sysctl_handle_int(oidp, &count, 0, req));
2913 }
2914 
2915 static int
2916 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
2917 {
2918 	struct uma_stream_header ush;
2919 	struct uma_type_header uth;
2920 	struct uma_percpu_stat ups;
2921 	uma_bucket_t bucket;
2922 	struct sbuf sbuf;
2923 	uma_cache_t cache;
2924 	uma_keg_t kz;
2925 	uma_zone_t z;
2926 	char *buffer;
2927 	int buflen, count, error, i;
2928 
2929 	mtx_lock(&uma_mtx);
2930 restart:
2931 	mtx_assert(&uma_mtx, MA_OWNED);
2932 	count = 0;
2933 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
2934 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
2935 			count++;
2936 	}
2937 	mtx_unlock(&uma_mtx);
2938 
2939 	buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
2940 	    (mp_maxid + 1)) + 1;
2941 	buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
2942 
2943 	mtx_lock(&uma_mtx);
2944 	i = 0;
2945 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
2946 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
2947 			i++;
2948 	}
2949 	if (i > count) {
2950 		free(buffer, M_TEMP);
2951 		goto restart;
2952 	}
2953 	count =  i;
2954 
2955 	sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
2956 
2957 	/*
2958 	 * Insert stream header.
2959 	 */
2960 	bzero(&ush, sizeof(ush));
2961 	ush.ush_version = UMA_STREAM_VERSION;
2962 	ush.ush_maxcpus = (mp_maxid + 1);
2963 	ush.ush_count = count;
2964 	if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
2965 		mtx_unlock(&uma_mtx);
2966 		error = ENOMEM;
2967 		goto out;
2968 	}
2969 
2970 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
2971 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
2972 			bzero(&uth, sizeof(uth));
2973 			ZONE_LOCK(z);
2974 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
2975 			uth.uth_align = kz->uk_align;
2976 			uth.uth_pages = kz->uk_pages;
2977 			uth.uth_keg_free = kz->uk_free;
2978 			uth.uth_size = kz->uk_size;
2979 			uth.uth_rsize = kz->uk_rsize;
2980 			uth.uth_maxpages = kz->uk_maxpages;
2981 			if (kz->uk_ppera > 1)
2982 				uth.uth_limit = kz->uk_maxpages /
2983 				    kz->uk_ppera;
2984 			else
2985 				uth.uth_limit = kz->uk_maxpages *
2986 				    kz->uk_ipers;
2987 
2988 			/*
2989 			 * A zone is secondary is it is not the first entry
2990 			 * on the keg's zone list.
2991 			 */
2992 			if ((kz->uk_flags & UMA_ZONE_SECONDARY) &&
2993 			    (LIST_FIRST(&kz->uk_zones) != z))
2994 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
2995 
2996 			LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
2997 				uth.uth_zone_free += bucket->ub_cnt;
2998 			uth.uth_allocs = z->uz_allocs;
2999 			uth.uth_frees = z->uz_frees;
3000 			uth.uth_fails = z->uz_fails;
3001 			if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
3002 				ZONE_UNLOCK(z);
3003 				mtx_unlock(&uma_mtx);
3004 				error = ENOMEM;
3005 				goto out;
3006 			}
3007 			/*
3008 			 * While it is not normally safe to access the cache
3009 			 * bucket pointers while not on the CPU that owns the
3010 			 * cache, we only allow the pointers to be exchanged
3011 			 * without the zone lock held, not invalidated, so
3012 			 * accept the possible race associated with bucket
3013 			 * exchange during monitoring.
3014 			 */
3015 			for (i = 0; i < (mp_maxid + 1); i++) {
3016 				bzero(&ups, sizeof(ups));
3017 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3018 					goto skip;
3019 				cache = &z->uz_cpu[i];
3020 				if (cache->uc_allocbucket != NULL)
3021 					ups.ups_cache_free +=
3022 					    cache->uc_allocbucket->ub_cnt;
3023 				if (cache->uc_freebucket != NULL)
3024 					ups.ups_cache_free +=
3025 					    cache->uc_freebucket->ub_cnt;
3026 				ups.ups_allocs = cache->uc_allocs;
3027 				ups.ups_frees = cache->uc_frees;
3028 skip:
3029 				if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
3030 					ZONE_UNLOCK(z);
3031 					mtx_unlock(&uma_mtx);
3032 					error = ENOMEM;
3033 					goto out;
3034 				}
3035 			}
3036 			ZONE_UNLOCK(z);
3037 		}
3038 	}
3039 	mtx_unlock(&uma_mtx);
3040 	sbuf_finish(&sbuf);
3041 	error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
3042 out:
3043 	free(buffer, M_TEMP);
3044 	return (error);
3045 }
3046 
3047 #ifdef DDB
3048 DB_SHOW_COMMAND(uma, db_show_uma)
3049 {
3050 	u_int64_t allocs, frees;
3051 	uma_bucket_t bucket;
3052 	uma_keg_t kz;
3053 	uma_zone_t z;
3054 	int cachefree;
3055 
3056 	db_printf("%18s %12s %12s %12s %8s\n", "Zone", "Allocs", "Frees",
3057 	    "Used", "Cache");
3058 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3059 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3060 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3061 				allocs = z->uz_allocs;
3062 				frees = z->uz_frees;
3063 				cachefree = 0;
3064 			} else
3065 				uma_zone_sumstat(z, &cachefree, &allocs,
3066 				    &frees);
3067 			if (!((kz->uk_flags & UMA_ZONE_SECONDARY) &&
3068 			    (LIST_FIRST(&kz->uk_zones) != z)))
3069 				cachefree += kz->uk_free;
3070 			LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link)
3071 				cachefree += bucket->ub_cnt;
3072 			db_printf("%18s %12ju %12ju %12ju %8d\n", z->uz_name,
3073 			    allocs, frees, allocs - frees, cachefree);
3074 		}
3075 	}
3076 }
3077 #endif
3078