xref: /freebsd/sys/vm/uma_core.c (revision 6b3455a7665208c366849f0b2b3bc916fb97516e)
1 /*
2  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * uma_core.c  Implementation of the Universal Memory allocator
29  *
30  * This allocator is intended to replace the multitude of similar object caches
31  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
32  * effecient.  A primary design goal is to return unused memory to the rest of
33  * the system.  This will make the system as a whole more flexible due to the
34  * ability to move memory to subsystems which most need it instead of leaving
35  * pools of reserved memory unused.
36  *
37  * The basic ideas stem from similar slab/zone based allocators whose algorithms
38  * are well known.
39  *
40  */
41 
42 /*
43  * TODO:
44  *	- Improve memory usage for large allocations
45  *	- Investigate cache size adjustments
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 /* I should really use ktr.. */
52 /*
53 #define UMA_DEBUG 1
54 #define UMA_DEBUG_ALLOC 1
55 #define UMA_DEBUG_ALLOC_1 1
56 */
57 
58 #include "opt_param.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/types.h>
63 #include <sys/queue.h>
64 #include <sys/malloc.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67 #include <sys/mutex.h>
68 #include <sys/proc.h>
69 #include <sys/smp.h>
70 #include <sys/vmmeter.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_extern.h>
79 #include <vm/uma.h>
80 #include <vm/uma_int.h>
81 #include <vm/uma_dbg.h>
82 
83 #include <machine/vmparam.h>
84 
85 /*
86  * This is the zone and keg from which all zones are spawned.  The idea is that
87  * even the zone & keg heads are allocated from the allocator, so we use the
88  * bss section to bootstrap us.
89  */
90 static struct uma_keg masterkeg;
91 static struct uma_zone masterzone_k;
92 static struct uma_zone masterzone_z;
93 static uma_zone_t kegs = &masterzone_k;
94 static uma_zone_t zones = &masterzone_z;
95 
96 /* This is the zone from which all of uma_slab_t's are allocated. */
97 static uma_zone_t slabzone;
98 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
99 
100 /*
101  * The initial hash tables come out of this zone so they can be allocated
102  * prior to malloc coming up.
103  */
104 static uma_zone_t hashzone;
105 
106 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
107 
108 /*
109  * Are we allowed to allocate buckets?
110  */
111 static int bucketdisable = 1;
112 
113 /* Linked list of all kegs in the system */
114 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
115 
116 /* This mutex protects the keg list */
117 static struct mtx uma_mtx;
118 
119 /* These are the pcpu cache locks */
120 static struct mtx uma_pcpu_mtx[MAXCPU];
121 
122 /* Linked list of boot time pages */
123 static LIST_HEAD(,uma_slab) uma_boot_pages =
124     LIST_HEAD_INITIALIZER(&uma_boot_pages);
125 
126 /* Count of free boottime pages */
127 static int uma_boot_free = 0;
128 
129 /* Is the VM done starting up? */
130 static int booted = 0;
131 
132 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
133 static u_int uma_max_ipers;
134 static u_int uma_max_ipers_ref;
135 
136 /*
137  * This is the handle used to schedule events that need to happen
138  * outside of the allocation fast path.
139  */
140 static struct callout uma_callout;
141 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
142 
143 /*
144  * This structure is passed as the zone ctor arg so that I don't have to create
145  * a special allocation function just for zones.
146  */
147 struct uma_zctor_args {
148 	char *name;
149 	size_t size;
150 	uma_ctor ctor;
151 	uma_dtor dtor;
152 	uma_init uminit;
153 	uma_fini fini;
154 	uma_keg_t keg;
155 	int align;
156 	u_int16_t flags;
157 };
158 
159 struct uma_kctor_args {
160 	uma_zone_t zone;
161 	size_t size;
162 	uma_init uminit;
163 	uma_fini fini;
164 	int align;
165 	u_int16_t flags;
166 };
167 
168 struct uma_bucket_zone {
169 	uma_zone_t	ubz_zone;
170 	char		*ubz_name;
171 	int		ubz_entries;
172 };
173 
174 #define	BUCKET_MAX	128
175 
176 struct uma_bucket_zone bucket_zones[] = {
177 	{ NULL, "16 Bucket", 16 },
178 	{ NULL, "32 Bucket", 32 },
179 	{ NULL, "64 Bucket", 64 },
180 	{ NULL, "128 Bucket", 128 },
181 	{ NULL, NULL, 0}
182 };
183 
184 #define	BUCKET_SHIFT	4
185 #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
186 
187 uint8_t bucket_size[BUCKET_ZONES];
188 
189 /* Prototypes.. */
190 
191 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
192 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
193 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
194 static void page_free(void *, int, u_int8_t);
195 static uma_slab_t slab_zalloc(uma_zone_t, int);
196 static void cache_drain(uma_zone_t);
197 static void bucket_drain(uma_zone_t, uma_bucket_t);
198 static void bucket_cache_drain(uma_zone_t zone);
199 static void keg_ctor(void *, int, void *);
200 static void keg_dtor(void *, int, void *);
201 static void zone_ctor(void *, int, void *);
202 static void zone_dtor(void *, int, void *);
203 static void zero_init(void *, int);
204 static void zone_small_init(uma_zone_t zone);
205 static void zone_large_init(uma_zone_t zone);
206 static void zone_foreach(void (*zfunc)(uma_zone_t));
207 static void zone_timeout(uma_zone_t zone);
208 static int hash_alloc(struct uma_hash *);
209 static int hash_expand(struct uma_hash *, struct uma_hash *);
210 static void hash_free(struct uma_hash *hash);
211 static void uma_timeout(void *);
212 static void uma_startup3(void);
213 static void *uma_zalloc_internal(uma_zone_t, void *, int);
214 static void uma_zfree_internal(uma_zone_t, void *, void *, int);
215 static void bucket_enable(void);
216 static void bucket_init(void);
217 static uma_bucket_t bucket_alloc(int, int);
218 static void bucket_free(uma_bucket_t);
219 static void bucket_zone_drain(void);
220 static int uma_zalloc_bucket(uma_zone_t zone, int flags);
221 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
222 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
223 static void zone_drain(uma_zone_t);
224 static void uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
225     uma_fini fini, int align, u_int16_t flags);
226 
227 void uma_print_zone(uma_zone_t);
228 void uma_print_stats(void);
229 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
230 
231 #ifdef WITNESS
232 static int nosleepwithlocks = 1;
233 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RD, &nosleepwithlocks,
234     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
235 #else
236 static int nosleepwithlocks = 0;
237 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
238     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
239 #endif
240 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
241     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
242 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
243 
244 /*
245  * This routine checks to see whether or not it's safe to enable buckets.
246  */
247 
248 static void
249 bucket_enable(void)
250 {
251 	if (cnt.v_free_count < cnt.v_free_min)
252 		bucketdisable = 1;
253 	else
254 		bucketdisable = 0;
255 }
256 
257 static void
258 bucket_init(void)
259 {
260 	struct uma_bucket_zone *ubz;
261 	int i;
262 	int j;
263 
264 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
265 		int size;
266 
267 		ubz = &bucket_zones[j];
268 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
269 		size += sizeof(void *) * ubz->ubz_entries;
270 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
271 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
272 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
273 			bucket_size[i >> BUCKET_SHIFT] = j;
274 	}
275 }
276 
277 static uma_bucket_t
278 bucket_alloc(int entries, int bflags)
279 {
280 	struct uma_bucket_zone *ubz;
281 	uma_bucket_t bucket;
282 	int idx;
283 
284 	/*
285 	 * This is to stop us from allocating per cpu buckets while we're
286 	 * running out of UMA_BOOT_PAGES.  Otherwise, we would exhaust the
287 	 * boot pages.  This also prevents us from allocating buckets in
288 	 * low memory situations.
289 	 */
290 
291 	if (bucketdisable)
292 		return (NULL);
293 	idx = howmany(entries, 1 << BUCKET_SHIFT);
294 	ubz = &bucket_zones[bucket_size[idx]];
295 	bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
296 	if (bucket) {
297 #ifdef INVARIANTS
298 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
299 #endif
300 		bucket->ub_cnt = 0;
301 		bucket->ub_entries = ubz->ubz_entries;
302 	}
303 
304 	return (bucket);
305 }
306 
307 static void
308 bucket_free(uma_bucket_t bucket)
309 {
310 	struct uma_bucket_zone *ubz;
311 	int idx;
312 
313 	idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT);
314 	ubz = &bucket_zones[bucket_size[idx]];
315 	uma_zfree_internal(ubz->ubz_zone, bucket, NULL, 0);
316 }
317 
318 static void
319 bucket_zone_drain(void)
320 {
321 	struct uma_bucket_zone *ubz;
322 
323 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
324 		zone_drain(ubz->ubz_zone);
325 }
326 
327 
328 /*
329  * Routine called by timeout which is used to fire off some time interval
330  * based calculations.  (stats, hash size, etc.)
331  *
332  * Arguments:
333  *	arg   Unused
334  *
335  * Returns:
336  *	Nothing
337  */
338 static void
339 uma_timeout(void *unused)
340 {
341 	bucket_enable();
342 	zone_foreach(zone_timeout);
343 
344 	/* Reschedule this event */
345 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
346 }
347 
348 /*
349  * Routine to perform timeout driven calculations.  This expands the
350  * hashes and does per cpu statistics aggregation.
351  *
352  *  Arguments:
353  *	zone  The zone to operate on
354  *
355  *  Returns:
356  *	Nothing
357  */
358 static void
359 zone_timeout(uma_zone_t zone)
360 {
361 	uma_keg_t keg;
362 	uma_cache_t cache;
363 	u_int64_t alloc;
364 	int cpu;
365 
366 	keg = zone->uz_keg;
367 	alloc = 0;
368 
369 	/*
370 	 * Aggregate per cpu cache statistics back to the zone.
371 	 *
372 	 * XXX This should be done in the sysctl handler.
373 	 *
374 	 * I may rewrite this to set a flag in the per cpu cache instead of
375 	 * locking.  If the flag is not cleared on the next round I will have
376 	 * to lock and do it here instead so that the statistics don't get too
377 	 * far out of sync.
378 	 */
379 	if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) {
380 		for (cpu = 0; cpu <= mp_maxid; cpu++) {
381 			if (CPU_ABSENT(cpu))
382 				continue;
383 			CPU_LOCK(cpu);
384 			cache = &zone->uz_cpu[cpu];
385 			/* Add them up, and reset */
386 			alloc += cache->uc_allocs;
387 			cache->uc_allocs = 0;
388 			CPU_UNLOCK(cpu);
389 		}
390 	}
391 
392 	/* Now push these stats back into the zone.. */
393 	ZONE_LOCK(zone);
394 	zone->uz_allocs += alloc;
395 
396 	/*
397 	 * Expand the zone hash table.
398 	 *
399 	 * This is done if the number of slabs is larger than the hash size.
400 	 * What I'm trying to do here is completely reduce collisions.  This
401 	 * may be a little aggressive.  Should I allow for two collisions max?
402 	 */
403 
404 	if (keg->uk_flags & UMA_ZONE_HASH &&
405 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
406 		struct uma_hash newhash;
407 		struct uma_hash oldhash;
408 		int ret;
409 
410 		/*
411 		 * This is so involved because allocating and freeing
412 		 * while the zone lock is held will lead to deadlock.
413 		 * I have to do everything in stages and check for
414 		 * races.
415 		 */
416 		newhash = keg->uk_hash;
417 		ZONE_UNLOCK(zone);
418 		ret = hash_alloc(&newhash);
419 		ZONE_LOCK(zone);
420 		if (ret) {
421 			if (hash_expand(&keg->uk_hash, &newhash)) {
422 				oldhash = keg->uk_hash;
423 				keg->uk_hash = newhash;
424 			} else
425 				oldhash = newhash;
426 
427 			ZONE_UNLOCK(zone);
428 			hash_free(&oldhash);
429 			ZONE_LOCK(zone);
430 		}
431 	}
432 	ZONE_UNLOCK(zone);
433 }
434 
435 /*
436  * Allocate and zero fill the next sized hash table from the appropriate
437  * backing store.
438  *
439  * Arguments:
440  *	hash  A new hash structure with the old hash size in uh_hashsize
441  *
442  * Returns:
443  *	1 on sucess and 0 on failure.
444  */
445 static int
446 hash_alloc(struct uma_hash *hash)
447 {
448 	int oldsize;
449 	int alloc;
450 
451 	oldsize = hash->uh_hashsize;
452 
453 	/* We're just going to go to a power of two greater */
454 	if (oldsize)  {
455 		hash->uh_hashsize = oldsize * 2;
456 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
457 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
458 		    M_UMAHASH, M_NOWAIT);
459 	} else {
460 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
461 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
462 		    M_WAITOK);
463 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
464 	}
465 	if (hash->uh_slab_hash) {
466 		bzero(hash->uh_slab_hash, alloc);
467 		hash->uh_hashmask = hash->uh_hashsize - 1;
468 		return (1);
469 	}
470 
471 	return (0);
472 }
473 
474 /*
475  * Expands the hash table for HASH zones.  This is done from zone_timeout
476  * to reduce collisions.  This must not be done in the regular allocation
477  * path, otherwise, we can recurse on the vm while allocating pages.
478  *
479  * Arguments:
480  *	oldhash  The hash you want to expand
481  *	newhash  The hash structure for the new table
482  *
483  * Returns:
484  *	Nothing
485  *
486  * Discussion:
487  */
488 static int
489 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
490 {
491 	uma_slab_t slab;
492 	int hval;
493 	int i;
494 
495 	if (!newhash->uh_slab_hash)
496 		return (0);
497 
498 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
499 		return (0);
500 
501 	/*
502 	 * I need to investigate hash algorithms for resizing without a
503 	 * full rehash.
504 	 */
505 
506 	for (i = 0; i < oldhash->uh_hashsize; i++)
507 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
508 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
509 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
510 			hval = UMA_HASH(newhash, slab->us_data);
511 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
512 			    slab, us_hlink);
513 		}
514 
515 	return (1);
516 }
517 
518 /*
519  * Free the hash bucket to the appropriate backing store.
520  *
521  * Arguments:
522  *	slab_hash  The hash bucket we're freeing
523  *	hashsize   The number of entries in that hash bucket
524  *
525  * Returns:
526  *	Nothing
527  */
528 static void
529 hash_free(struct uma_hash *hash)
530 {
531 	if (hash->uh_slab_hash == NULL)
532 		return;
533 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
534 		uma_zfree_internal(hashzone,
535 		    hash->uh_slab_hash, NULL, 0);
536 	else
537 		free(hash->uh_slab_hash, M_UMAHASH);
538 }
539 
540 /*
541  * Frees all outstanding items in a bucket
542  *
543  * Arguments:
544  *	zone   The zone to free to, must be unlocked.
545  *	bucket The free/alloc bucket with items, cpu queue must be locked.
546  *
547  * Returns:
548  *	Nothing
549  */
550 
551 static void
552 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
553 {
554 	uma_slab_t slab;
555 	int mzone;
556 	void *item;
557 
558 	if (bucket == NULL)
559 		return;
560 
561 	slab = NULL;
562 	mzone = 0;
563 
564 	/* We have to lookup the slab again for malloc.. */
565 	if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
566 		mzone = 1;
567 
568 	while (bucket->ub_cnt > 0)  {
569 		bucket->ub_cnt--;
570 		item = bucket->ub_bucket[bucket->ub_cnt];
571 #ifdef INVARIANTS
572 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
573 		KASSERT(item != NULL,
574 		    ("bucket_drain: botched ptr, item is NULL"));
575 #endif
576 		/*
577 		 * This is extremely inefficient.  The slab pointer was passed
578 		 * to uma_zfree_arg, but we lost it because the buckets don't
579 		 * hold them.  This will go away when free() gets a size passed
580 		 * to it.
581 		 */
582 		if (mzone)
583 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
584 		uma_zfree_internal(zone, item, slab, 1);
585 	}
586 }
587 
588 /*
589  * Drains the per cpu caches for a zone.
590  *
591  * Arguments:
592  *	zone     The zone to drain, must be unlocked.
593  *
594  * Returns:
595  *	Nothing
596  */
597 static void
598 cache_drain(uma_zone_t zone)
599 {
600 	uma_cache_t cache;
601 	int cpu;
602 
603 	/*
604 	 * We have to lock each cpu cache before locking the zone
605 	 */
606 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
607 		if (CPU_ABSENT(cpu))
608 			continue;
609 		CPU_LOCK(cpu);
610 		cache = &zone->uz_cpu[cpu];
611 		bucket_drain(zone, cache->uc_allocbucket);
612 		bucket_drain(zone, cache->uc_freebucket);
613 		if (cache->uc_allocbucket != NULL)
614 			bucket_free(cache->uc_allocbucket);
615 		if (cache->uc_freebucket != NULL)
616 			bucket_free(cache->uc_freebucket);
617 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
618 	}
619 	ZONE_LOCK(zone);
620 	bucket_cache_drain(zone);
621 	ZONE_UNLOCK(zone);
622 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
623 		if (CPU_ABSENT(cpu))
624 			continue;
625 		CPU_UNLOCK(cpu);
626 	}
627 }
628 
629 /*
630  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
631  */
632 static void
633 bucket_cache_drain(uma_zone_t zone)
634 {
635 	uma_bucket_t bucket;
636 
637 	/*
638 	 * Drain the bucket queues and free the buckets, we just keep two per
639 	 * cpu (alloc/free).
640 	 */
641 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
642 		LIST_REMOVE(bucket, ub_link);
643 		ZONE_UNLOCK(zone);
644 		bucket_drain(zone, bucket);
645 		bucket_free(bucket);
646 		ZONE_LOCK(zone);
647 	}
648 
649 	/* Now we do the free queue.. */
650 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
651 		LIST_REMOVE(bucket, ub_link);
652 		bucket_free(bucket);
653 	}
654 }
655 
656 /*
657  * Frees pages from a zone back to the system.  This is done on demand from
658  * the pageout daemon.
659  *
660  * Arguments:
661  *	zone  The zone to free pages from
662  *	 all  Should we drain all items?
663  *
664  * Returns:
665  *	Nothing.
666  */
667 static void
668 zone_drain(uma_zone_t zone)
669 {
670 	struct slabhead freeslabs = {};
671 	uma_keg_t keg;
672 	uma_slab_t slab;
673 	uma_slab_t n;
674 	u_int8_t flags;
675 	u_int8_t *mem;
676 	int i;
677 
678 	keg = zone->uz_keg;
679 
680 	/*
681 	 * We don't want to take pages from statically allocated zones at this
682 	 * time
683 	 */
684 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
685 		return;
686 
687 	ZONE_LOCK(zone);
688 
689 #ifdef UMA_DEBUG
690 	printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
691 #endif
692 	bucket_cache_drain(zone);
693 	if (keg->uk_free == 0)
694 		goto finished;
695 
696 	slab = LIST_FIRST(&keg->uk_free_slab);
697 	while (slab) {
698 		n = LIST_NEXT(slab, us_link);
699 
700 		/* We have no where to free these to */
701 		if (slab->us_flags & UMA_SLAB_BOOT) {
702 			slab = n;
703 			continue;
704 		}
705 
706 		LIST_REMOVE(slab, us_link);
707 		keg->uk_pages -= keg->uk_ppera;
708 		keg->uk_free -= keg->uk_ipers;
709 
710 		if (keg->uk_flags & UMA_ZONE_HASH)
711 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
712 
713 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
714 
715 		slab = n;
716 	}
717 finished:
718 	ZONE_UNLOCK(zone);
719 
720 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
721 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
722 		if (keg->uk_fini)
723 			for (i = 0; i < keg->uk_ipers; i++)
724 				keg->uk_fini(
725 				    slab->us_data + (keg->uk_rsize * i),
726 				    keg->uk_size);
727 		flags = slab->us_flags;
728 		mem = slab->us_data;
729 
730 		if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
731 		    (keg->uk_flags & UMA_ZONE_REFCNT)) {
732 			vm_object_t obj;
733 
734 			if (flags & UMA_SLAB_KMEM)
735 				obj = kmem_object;
736 			else
737 				obj = NULL;
738 			for (i = 0; i < keg->uk_ppera; i++)
739 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
740 				    obj);
741 		}
742 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
743 			uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
744 #ifdef UMA_DEBUG
745 		printf("%s: Returning %d bytes.\n",
746 		    zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
747 #endif
748 		keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
749 	}
750 }
751 
752 /*
753  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
754  *
755  * Arguments:
756  *	zone  The zone to allocate slabs for
757  *	wait  Shall we wait?
758  *
759  * Returns:
760  *	The slab that was allocated or NULL if there is no memory and the
761  *	caller specified M_NOWAIT.
762  */
763 static uma_slab_t
764 slab_zalloc(uma_zone_t zone, int wait)
765 {
766 	uma_slabrefcnt_t slabref;
767 	uma_slab_t slab;
768 	uma_keg_t keg;
769 	u_int8_t *mem;
770 	u_int8_t flags;
771 	int i;
772 
773 	slab = NULL;
774 	keg = zone->uz_keg;
775 
776 #ifdef UMA_DEBUG
777 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
778 #endif
779 	ZONE_UNLOCK(zone);
780 
781 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
782 		slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
783 		if (slab == NULL) {
784 			ZONE_LOCK(zone);
785 			return NULL;
786 		}
787 	}
788 
789 	/*
790 	 * This reproduces the old vm_zone behavior of zero filling pages the
791 	 * first time they are added to a zone.
792 	 *
793 	 * Malloced items are zeroed in uma_zalloc.
794 	 */
795 
796 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
797 		wait |= M_ZERO;
798 	else
799 		wait &= ~M_ZERO;
800 
801 	mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
802 	    &flags, wait);
803 	if (mem == NULL) {
804 		ZONE_LOCK(zone);
805 		return (NULL);
806 	}
807 
808 	/* Point the slab into the allocated memory */
809 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
810 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
811 
812 	if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
813 	    (keg->uk_flags & UMA_ZONE_REFCNT))
814 		for (i = 0; i < keg->uk_ppera; i++)
815 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
816 
817 	slab->us_keg = keg;
818 	slab->us_data = mem;
819 	slab->us_freecount = keg->uk_ipers;
820 	slab->us_firstfree = 0;
821 	slab->us_flags = flags;
822 	for (i = 0; i < keg->uk_ipers; i++)
823 		slab->us_freelist[i].us_item = i+1;
824 
825 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
826 		slabref = (uma_slabrefcnt_t)slab;
827 		for (i = 0; i < keg->uk_ipers; i++)
828 			slabref->us_freelist[i].us_refcnt = 0;
829 	}
830 
831 	if (keg->uk_init)
832 		for (i = 0; i < keg->uk_ipers; i++)
833 			keg->uk_init(slab->us_data + (keg->uk_rsize * i),
834 			    keg->uk_size);
835 	ZONE_LOCK(zone);
836 
837 	if (keg->uk_flags & UMA_ZONE_HASH)
838 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
839 
840 	keg->uk_pages += keg->uk_ppera;
841 	keg->uk_free += keg->uk_ipers;
842 
843 	return (slab);
844 }
845 
846 /*
847  * This function is intended to be used early on in place of page_alloc() so
848  * that we may use the boot time page cache to satisfy allocations before
849  * the VM is ready.
850  */
851 static void *
852 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
853 {
854 	uma_keg_t keg;
855 
856 	keg = zone->uz_keg;
857 
858 	/*
859 	 * Check our small startup cache to see if it has pages remaining.
860 	 */
861 	mtx_lock(&uma_mtx);
862 	if (uma_boot_free != 0) {
863 		uma_slab_t tmps;
864 
865 		tmps = LIST_FIRST(&uma_boot_pages);
866 		LIST_REMOVE(tmps, us_link);
867 		uma_boot_free--;
868 		mtx_unlock(&uma_mtx);
869 		*pflag = tmps->us_flags;
870 		return (tmps->us_data);
871 	}
872 	mtx_unlock(&uma_mtx);
873 	if (booted == 0)
874 		panic("UMA: Increase UMA_BOOT_PAGES");
875 	/*
876 	 * Now that we've booted reset these users to their real allocator.
877 	 */
878 #ifdef UMA_MD_SMALL_ALLOC
879 	keg->uk_allocf = uma_small_alloc;
880 #else
881 	keg->uk_allocf = page_alloc;
882 #endif
883 	return keg->uk_allocf(zone, bytes, pflag, wait);
884 }
885 
886 /*
887  * Allocates a number of pages from the system
888  *
889  * Arguments:
890  *	zone  Unused
891  *	bytes  The number of bytes requested
892  *	wait  Shall we wait?
893  *
894  * Returns:
895  *	A pointer to the alloced memory or possibly
896  *	NULL if M_NOWAIT is set.
897  */
898 static void *
899 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
900 {
901 	void *p;	/* Returned page */
902 
903 	*pflag = UMA_SLAB_KMEM;
904 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
905 
906 	return (p);
907 }
908 
909 /*
910  * Allocates a number of pages from within an object
911  *
912  * Arguments:
913  *	zone   Unused
914  *	bytes  The number of bytes requested
915  *	wait   Shall we wait?
916  *
917  * Returns:
918  *	A pointer to the alloced memory or possibly
919  *	NULL if M_NOWAIT is set.
920  */
921 static void *
922 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
923 {
924 	vm_object_t object;
925 	vm_offset_t retkva, zkva;
926 	vm_page_t p;
927 	int pages, startpages;
928 
929 	object = zone->uz_keg->uk_obj;
930 	retkva = 0;
931 
932 	/*
933 	 * This looks a little weird since we're getting one page at a time.
934 	 */
935 	VM_OBJECT_LOCK(object);
936 	p = TAILQ_LAST(&object->memq, pglist);
937 	pages = p != NULL ? p->pindex + 1 : 0;
938 	startpages = pages;
939 	zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
940 	for (; bytes > 0; bytes -= PAGE_SIZE) {
941 		p = vm_page_alloc(object, pages,
942 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
943 		if (p == NULL) {
944 			if (pages != startpages)
945 				pmap_qremove(retkva, pages - startpages);
946 			while (pages != startpages) {
947 				pages--;
948 				p = TAILQ_LAST(&object->memq, pglist);
949 				vm_page_lock_queues();
950 				vm_page_unwire(p, 0);
951 				vm_page_free(p);
952 				vm_page_unlock_queues();
953 			}
954 			retkva = 0;
955 			goto done;
956 		}
957 		pmap_qenter(zkva, &p, 1);
958 		if (retkva == 0)
959 			retkva = zkva;
960 		zkva += PAGE_SIZE;
961 		pages += 1;
962 	}
963 done:
964 	VM_OBJECT_UNLOCK(object);
965 	*flags = UMA_SLAB_PRIV;
966 
967 	return ((void *)retkva);
968 }
969 
970 /*
971  * Frees a number of pages to the system
972  *
973  * Arguments:
974  *	mem   A pointer to the memory to be freed
975  *	size  The size of the memory being freed
976  *	flags The original p->us_flags field
977  *
978  * Returns:
979  *	Nothing
980  */
981 static void
982 page_free(void *mem, int size, u_int8_t flags)
983 {
984 	vm_map_t map;
985 
986 	if (flags & UMA_SLAB_KMEM)
987 		map = kmem_map;
988 	else
989 		panic("UMA: page_free used with invalid flags %d\n", flags);
990 
991 	kmem_free(map, (vm_offset_t)mem, size);
992 }
993 
994 /*
995  * Zero fill initializer
996  *
997  * Arguments/Returns follow uma_init specifications
998  */
999 static void
1000 zero_init(void *mem, int size)
1001 {
1002 	bzero(mem, size);
1003 }
1004 
1005 /*
1006  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
1007  *
1008  * Arguments
1009  *	zone  The zone we should initialize
1010  *
1011  * Returns
1012  *	Nothing
1013  */
1014 static void
1015 zone_small_init(uma_zone_t zone)
1016 {
1017 	uma_keg_t keg;
1018 	u_int rsize;
1019 	u_int memused;
1020 	u_int wastedspace;
1021 	u_int shsize;
1022 
1023 	keg = zone->uz_keg;
1024 	KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
1025 	rsize = keg->uk_size;
1026 
1027 	if (rsize < UMA_SMALLEST_UNIT)
1028 		rsize = UMA_SMALLEST_UNIT;
1029 	if (rsize & keg->uk_align)
1030 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1031 
1032 	keg->uk_rsize = rsize;
1033 	keg->uk_ppera = 1;
1034 
1035 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1036 		rsize += UMA_FRITMREF_SZ;	/* linkage & refcnt */
1037 		shsize = sizeof(struct uma_slab_refcnt);
1038 	} else {
1039 		rsize += UMA_FRITM_SZ;	/* Account for linkage */
1040 		shsize = sizeof(struct uma_slab);
1041 	}
1042 
1043 	keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1044 	KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
1045 	memused = keg->uk_ipers * rsize + shsize;
1046 	wastedspace = UMA_SLAB_SIZE - memused;
1047 
1048 	/*
1049 	 * We can't do OFFPAGE if we're internal or if we've been
1050 	 * asked to not go to the VM for buckets.  If we do this we
1051 	 * may end up going to the VM (kmem_map) for slabs which we
1052 	 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1053 	 * result of UMA_ZONE_VM, which clearly forbids it.
1054 	 */
1055 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1056 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1057 		return;
1058 
1059 	if ((wastedspace >= UMA_MAX_WASTE) &&
1060 	    (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1061 		keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1062 		KASSERT(keg->uk_ipers <= 255,
1063 		    ("zone_small_init: keg->uk_ipers too high!"));
1064 #ifdef UMA_DEBUG
1065 		printf("UMA decided we need offpage slab headers for "
1066 		    "zone: %s, calculated wastedspace = %d, "
1067 		    "maximum wasted space allowed = %d, "
1068 		    "calculated ipers = %d, "
1069 		    "new wasted space = %d\n", zone->uz_name, wastedspace,
1070 		    UMA_MAX_WASTE, keg->uk_ipers,
1071 		    UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1072 #endif
1073 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1074 		if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1075 			keg->uk_flags |= UMA_ZONE_HASH;
1076 	}
1077 }
1078 
1079 /*
1080  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
1081  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1082  * more complicated.
1083  *
1084  * Arguments
1085  *	zone  The zone we should initialize
1086  *
1087  * Returns
1088  *	Nothing
1089  */
1090 static void
1091 zone_large_init(uma_zone_t zone)
1092 {
1093 	uma_keg_t keg;
1094 	int pages;
1095 
1096 	keg = zone->uz_keg;
1097 
1098 	KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
1099 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1100 	    ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1101 
1102 	pages = keg->uk_size / UMA_SLAB_SIZE;
1103 
1104 	/* Account for remainder */
1105 	if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1106 		pages++;
1107 
1108 	keg->uk_ppera = pages;
1109 	keg->uk_ipers = 1;
1110 
1111 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1112 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1113 		keg->uk_flags |= UMA_ZONE_HASH;
1114 
1115 	keg->uk_rsize = keg->uk_size;
1116 }
1117 
1118 /*
1119  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1120  * the keg onto the global keg list.
1121  *
1122  * Arguments/Returns follow uma_ctor specifications
1123  *	udata  Actually uma_kctor_args
1124  */
1125 static void
1126 keg_ctor(void *mem, int size, void *udata)
1127 {
1128 	struct uma_kctor_args *arg = udata;
1129 	uma_keg_t keg = mem;
1130 	uma_zone_t zone;
1131 
1132 	bzero(keg, size);
1133 	keg->uk_size = arg->size;
1134 	keg->uk_init = arg->uminit;
1135 	keg->uk_fini = arg->fini;
1136 	keg->uk_align = arg->align;
1137 	keg->uk_free = 0;
1138 	keg->uk_pages = 0;
1139 	keg->uk_flags = arg->flags;
1140 	keg->uk_allocf = page_alloc;
1141 	keg->uk_freef = page_free;
1142 	keg->uk_recurse = 0;
1143 	keg->uk_slabzone = NULL;
1144 
1145 	/*
1146 	 * The master zone is passed to us at keg-creation time.
1147 	 */
1148 	zone = arg->zone;
1149 	zone->uz_keg = keg;
1150 
1151 	if (arg->flags & UMA_ZONE_VM)
1152 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1153 
1154 	if (arg->flags & UMA_ZONE_ZINIT)
1155 		keg->uk_init = zero_init;
1156 
1157 	/*
1158 	 * The +UMA_FRITM_SZ added to uk_size is to account for the
1159 	 * linkage that is added to the size in zone_small_init().  If
1160 	 * we don't account for this here then we may end up in
1161 	 * zone_small_init() with a calculated 'ipers' of 0.
1162 	 */
1163 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1164 		if ((keg->uk_size+UMA_FRITMREF_SZ) >
1165 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1166 			zone_large_init(zone);
1167 		else
1168 			zone_small_init(zone);
1169 	} else {
1170 		if ((keg->uk_size+UMA_FRITM_SZ) >
1171 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1172 			zone_large_init(zone);
1173 		else
1174 			zone_small_init(zone);
1175 	}
1176 
1177 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1178 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1179 			keg->uk_slabzone = slabrefzone;
1180 		else
1181 			keg->uk_slabzone = slabzone;
1182 	}
1183 
1184 	/*
1185 	 * If we haven't booted yet we need allocations to go through the
1186 	 * startup cache until the vm is ready.
1187 	 */
1188 	if (keg->uk_ppera == 1) {
1189 #ifdef UMA_MD_SMALL_ALLOC
1190 		keg->uk_allocf = uma_small_alloc;
1191 		keg->uk_freef = uma_small_free;
1192 #endif
1193 		if (booted == 0)
1194 			keg->uk_allocf = startup_alloc;
1195 	}
1196 
1197 	/*
1198 	 * Initialize keg's lock (shared among zones) through
1199 	 * Master zone
1200 	 */
1201 	zone->uz_lock = &keg->uk_lock;
1202 	if (arg->flags & UMA_ZONE_MTXCLASS)
1203 		ZONE_LOCK_INIT(zone, 1);
1204 	else
1205 		ZONE_LOCK_INIT(zone, 0);
1206 
1207 	/*
1208 	 * If we're putting the slab header in the actual page we need to
1209 	 * figure out where in each page it goes.  This calculates a right
1210 	 * justified offset into the memory on an ALIGN_PTR boundary.
1211 	 */
1212 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1213 		u_int totsize;
1214 
1215 		/* Size of the slab struct and free list */
1216 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1217 			totsize = sizeof(struct uma_slab_refcnt) +
1218 			    keg->uk_ipers * UMA_FRITMREF_SZ;
1219 		else
1220 			totsize = sizeof(struct uma_slab) +
1221 			    keg->uk_ipers * UMA_FRITM_SZ;
1222 
1223 		if (totsize & UMA_ALIGN_PTR)
1224 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1225 			    (UMA_ALIGN_PTR + 1);
1226 		keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1227 
1228 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1229 			totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1230 			    + keg->uk_ipers * UMA_FRITMREF_SZ;
1231 		else
1232 			totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1233 			    + keg->uk_ipers * UMA_FRITM_SZ;
1234 
1235 		/*
1236 		 * The only way the following is possible is if with our
1237 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1238 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1239 		 * mathematically possible for all cases, so we make
1240 		 * sure here anyway.
1241 		 */
1242 		if (totsize > UMA_SLAB_SIZE) {
1243 			printf("zone %s ipers %d rsize %d size %d\n",
1244 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1245 			    keg->uk_size);
1246 			panic("UMA slab won't fit.\n");
1247 		}
1248 	}
1249 
1250 	if (keg->uk_flags & UMA_ZONE_HASH)
1251 		hash_alloc(&keg->uk_hash);
1252 
1253 #ifdef UMA_DEBUG
1254 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1255 	    zone->uz_name, zone,
1256 	    keg->uk_size, keg->uk_ipers,
1257 	    keg->uk_ppera, keg->uk_pgoff);
1258 #endif
1259 
1260 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1261 
1262 	mtx_lock(&uma_mtx);
1263 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1264 	mtx_unlock(&uma_mtx);
1265 }
1266 
1267 /*
1268  * Zone header ctor.  This initializes all fields, locks, etc.
1269  *
1270  * Arguments/Returns follow uma_ctor specifications
1271  *	udata  Actually uma_zctor_args
1272  */
1273 
1274 static void
1275 zone_ctor(void *mem, int size, void *udata)
1276 {
1277 	struct uma_zctor_args *arg = udata;
1278 	uma_zone_t zone = mem;
1279 	uma_zone_t z;
1280 	uma_keg_t keg;
1281 
1282 	bzero(zone, size);
1283 	zone->uz_name = arg->name;
1284 	zone->uz_ctor = arg->ctor;
1285 	zone->uz_dtor = arg->dtor;
1286 	zone->uz_init = NULL;
1287 	zone->uz_fini = NULL;
1288 	zone->uz_allocs = 0;
1289 	zone->uz_fills = zone->uz_count = 0;
1290 
1291 	if (arg->flags & UMA_ZONE_SECONDARY) {
1292 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1293 		keg = arg->keg;
1294 		zone->uz_keg = keg;
1295 		zone->uz_init = arg->uminit;
1296 		zone->uz_fini = arg->fini;
1297 		zone->uz_lock = &keg->uk_lock;
1298 		mtx_lock(&uma_mtx);
1299 		ZONE_LOCK(zone);
1300 		keg->uk_flags |= UMA_ZONE_SECONDARY;
1301 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1302 			if (LIST_NEXT(z, uz_link) == NULL) {
1303 				LIST_INSERT_AFTER(z, zone, uz_link);
1304 				break;
1305 			}
1306 		}
1307 		ZONE_UNLOCK(zone);
1308 		mtx_unlock(&uma_mtx);
1309 	} else if (arg->keg == NULL) {
1310 		uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1311 		    arg->align, arg->flags);
1312 	} else {
1313 		struct uma_kctor_args karg;
1314 
1315 		/* We should only be here from uma_startup() */
1316 		karg.size = arg->size;
1317 		karg.uminit = arg->uminit;
1318 		karg.fini = arg->fini;
1319 		karg.align = arg->align;
1320 		karg.flags = arg->flags;
1321 		karg.zone = zone;
1322 		keg_ctor(arg->keg, sizeof(struct uma_keg), &karg);
1323 	}
1324 	keg = zone->uz_keg;
1325 	zone->uz_lock = &keg->uk_lock;
1326 
1327 	/*
1328 	 * Some internal zones don't have room allocated for the per cpu
1329 	 * caches.  If we're internal, bail out here.
1330 	 */
1331 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1332 		KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
1333 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1334 		return;
1335 	}
1336 
1337 	if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1338 		zone->uz_count = BUCKET_MAX;
1339 	else if (keg->uk_ipers <= BUCKET_MAX)
1340 		zone->uz_count = keg->uk_ipers;
1341 	else
1342 		zone->uz_count = BUCKET_MAX;
1343 }
1344 
1345 /*
1346  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1347  * table and removes the keg from the global list.
1348  *
1349  * Arguments/Returns follow uma_dtor specifications
1350  *	udata  unused
1351  */
1352 static void
1353 keg_dtor(void *arg, int size, void *udata)
1354 {
1355 	uma_keg_t keg;
1356 
1357 	keg = (uma_keg_t)arg;
1358 	mtx_lock(&keg->uk_lock);
1359 	if (keg->uk_free != 0) {
1360 		printf("Freed UMA keg was not empty (%d items). "
1361 		    " Lost %d pages of memory.\n",
1362 		    keg->uk_free, keg->uk_pages);
1363 	}
1364 	mtx_unlock(&keg->uk_lock);
1365 
1366 	if (keg->uk_flags & UMA_ZONE_HASH)
1367 		hash_free(&keg->uk_hash);
1368 
1369 	mtx_destroy(&keg->uk_lock);
1370 }
1371 
1372 /*
1373  * Zone header dtor.
1374  *
1375  * Arguments/Returns follow uma_dtor specifications
1376  *	udata  unused
1377  */
1378 static void
1379 zone_dtor(void *arg, int size, void *udata)
1380 {
1381 	uma_zone_t zone;
1382 	uma_keg_t keg;
1383 
1384 	zone = (uma_zone_t)arg;
1385 	keg = zone->uz_keg;
1386 
1387 	if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
1388 		cache_drain(zone);
1389 
1390 	mtx_lock(&uma_mtx);
1391 	zone_drain(zone);
1392 	if (keg->uk_flags & UMA_ZONE_SECONDARY) {
1393 		LIST_REMOVE(zone, uz_link);
1394 		/*
1395 		 * XXX there are some races here where
1396 		 * the zone can be drained but zone lock
1397 		 * released and then refilled before we
1398 		 * remove it... we dont care for now
1399 		 */
1400 		ZONE_LOCK(zone);
1401 		if (LIST_EMPTY(&keg->uk_zones))
1402 			keg->uk_flags &= ~UMA_ZONE_SECONDARY;
1403 		ZONE_UNLOCK(zone);
1404 		mtx_unlock(&uma_mtx);
1405 	} else {
1406 		LIST_REMOVE(keg, uk_link);
1407 		LIST_REMOVE(zone, uz_link);
1408 		mtx_unlock(&uma_mtx);
1409 		uma_zfree_internal(kegs, keg, NULL, 0);
1410 	}
1411 	zone->uz_keg = NULL;
1412 }
1413 
1414 /*
1415  * Traverses every zone in the system and calls a callback
1416  *
1417  * Arguments:
1418  *	zfunc  A pointer to a function which accepts a zone
1419  *		as an argument.
1420  *
1421  * Returns:
1422  *	Nothing
1423  */
1424 static void
1425 zone_foreach(void (*zfunc)(uma_zone_t))
1426 {
1427 	uma_keg_t keg;
1428 	uma_zone_t zone;
1429 
1430 	mtx_lock(&uma_mtx);
1431 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1432 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1433 			zfunc(zone);
1434 	}
1435 	mtx_unlock(&uma_mtx);
1436 }
1437 
1438 /* Public functions */
1439 /* See uma.h */
1440 void
1441 uma_startup(void *bootmem)
1442 {
1443 	struct uma_zctor_args args;
1444 	uma_slab_t slab;
1445 	u_int slabsize;
1446 	u_int objsize, totsize, wsize;
1447 	int i;
1448 
1449 #ifdef UMA_DEBUG
1450 	printf("Creating uma keg headers zone and keg.\n");
1451 #endif
1452 	/*
1453 	 * The general UMA lock is a recursion-allowed lock because
1454 	 * there is a code path where, while we're still configured
1455 	 * to use startup_alloc() for backend page allocations, we
1456 	 * may end up in uma_reclaim() which calls zone_foreach(zone_drain),
1457 	 * which grabs uma_mtx, only to later call into startup_alloc()
1458 	 * because while freeing we needed to allocate a bucket.  Since
1459 	 * startup_alloc() also takes uma_mtx, we need to be able to
1460 	 * recurse on it.
1461 	 */
1462 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
1463 
1464 	/*
1465 	 * Figure out the maximum number of items-per-slab we'll have if
1466 	 * we're using the OFFPAGE slab header to track free items, given
1467 	 * all possible object sizes and the maximum desired wastage
1468 	 * (UMA_MAX_WASTE).
1469 	 *
1470 	 * We iterate until we find an object size for
1471 	 * which the calculated wastage in zone_small_init() will be
1472 	 * enough to warrant OFFPAGE.  Since wastedspace versus objsize
1473 	 * is an overall increasing see-saw function, we find the smallest
1474 	 * objsize such that the wastage is always acceptable for objects
1475 	 * with that objsize or smaller.  Since a smaller objsize always
1476 	 * generates a larger possible uma_max_ipers, we use this computed
1477 	 * objsize to calculate the largest ipers possible.  Since the
1478 	 * ipers calculated for OFFPAGE slab headers is always larger than
1479 	 * the ipers initially calculated in zone_small_init(), we use
1480 	 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1481 	 * obtain the maximum ipers possible for offpage slab headers.
1482 	 *
1483 	 * It should be noted that ipers versus objsize is an inversly
1484 	 * proportional function which drops off rather quickly so as
1485 	 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1486 	 * falls into the portion of the inverse relation AFTER the steep
1487 	 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1488 	 *
1489 	 * Note that we have 8-bits (1 byte) to use as a freelist index
1490 	 * inside the actual slab header itself and this is enough to
1491 	 * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
1492 	 * object with offpage slab header would have ipers =
1493 	 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1494 	 * 1 greater than what our byte-integer freelist index can
1495 	 * accomodate, but we know that this situation never occurs as
1496 	 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1497 	 * that we need to go to offpage slab headers.  Or, if we do,
1498 	 * then we trap that condition below and panic in the INVARIANTS case.
1499 	 */
1500 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1501 	totsize = wsize;
1502 	objsize = UMA_SMALLEST_UNIT;
1503 	while (totsize >= wsize) {
1504 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1505 		    (objsize + UMA_FRITM_SZ);
1506 		totsize *= (UMA_FRITM_SZ + objsize);
1507 		objsize++;
1508 	}
1509 	if (objsize > UMA_SMALLEST_UNIT)
1510 		objsize--;
1511 	uma_max_ipers = UMA_SLAB_SIZE / objsize;
1512 
1513 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1514 	totsize = wsize;
1515 	objsize = UMA_SMALLEST_UNIT;
1516 	while (totsize >= wsize) {
1517 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1518 		    (objsize + UMA_FRITMREF_SZ);
1519 		totsize *= (UMA_FRITMREF_SZ + objsize);
1520 		objsize++;
1521 	}
1522 	if (objsize > UMA_SMALLEST_UNIT)
1523 		objsize--;
1524 	uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
1525 
1526 	KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1527 	    ("uma_startup: calculated uma_max_ipers values too large!"));
1528 
1529 #ifdef UMA_DEBUG
1530 	printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1531 	printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1532 	    uma_max_ipers_ref);
1533 #endif
1534 
1535 	/* "manually" create the initial zone */
1536 	args.name = "UMA Kegs";
1537 	args.size = sizeof(struct uma_keg);
1538 	args.ctor = keg_ctor;
1539 	args.dtor = keg_dtor;
1540 	args.uminit = zero_init;
1541 	args.fini = NULL;
1542 	args.keg = &masterkeg;
1543 	args.align = 32 - 1;
1544 	args.flags = UMA_ZFLAG_INTERNAL;
1545 	/* The initial zone has no Per cpu queues so it's smaller */
1546 	zone_ctor(kegs, sizeof(struct uma_zone), &args);
1547 
1548 #ifdef UMA_DEBUG
1549 	printf("Filling boot free list.\n");
1550 #endif
1551 	for (i = 0; i < UMA_BOOT_PAGES; i++) {
1552 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1553 		slab->us_data = (u_int8_t *)slab;
1554 		slab->us_flags = UMA_SLAB_BOOT;
1555 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1556 		uma_boot_free++;
1557 	}
1558 
1559 #ifdef UMA_DEBUG
1560 	printf("Creating uma zone headers zone and keg.\n");
1561 #endif
1562 	args.name = "UMA Zones";
1563 	args.size = sizeof(struct uma_zone) +
1564 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1565 	args.ctor = zone_ctor;
1566 	args.dtor = zone_dtor;
1567 	args.uminit = zero_init;
1568 	args.fini = NULL;
1569 	args.keg = NULL;
1570 	args.align = 32 - 1;
1571 	args.flags = UMA_ZFLAG_INTERNAL;
1572 	/* The initial zone has no Per cpu queues so it's smaller */
1573 	zone_ctor(zones, sizeof(struct uma_zone), &args);
1574 
1575 #ifdef UMA_DEBUG
1576 	printf("Initializing pcpu cache locks.\n");
1577 #endif
1578 	/* Initialize the pcpu cache lock set once and for all */
1579 	for (i = 0; i <= mp_maxid; i++)
1580 		CPU_LOCK_INIT(i);
1581 
1582 #ifdef UMA_DEBUG
1583 	printf("Creating slab and hash zones.\n");
1584 #endif
1585 
1586 	/*
1587 	 * This is the max number of free list items we'll have with
1588 	 * offpage slabs.
1589 	 */
1590 	slabsize = uma_max_ipers * UMA_FRITM_SZ;
1591 	slabsize += sizeof(struct uma_slab);
1592 
1593 	/* Now make a zone for slab headers */
1594 	slabzone = uma_zcreate("UMA Slabs",
1595 				slabsize,
1596 				NULL, NULL, NULL, NULL,
1597 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1598 
1599 	/*
1600 	 * We also create a zone for the bigger slabs with reference
1601 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1602 	 */
1603 	slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1604 	slabsize += sizeof(struct uma_slab_refcnt);
1605 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1606 				  slabsize,
1607 				  NULL, NULL, NULL, NULL,
1608 				  UMA_ALIGN_PTR,
1609 				  UMA_ZFLAG_INTERNAL);
1610 
1611 	hashzone = uma_zcreate("UMA Hash",
1612 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1613 	    NULL, NULL, NULL, NULL,
1614 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1615 
1616 	bucket_init();
1617 
1618 #ifdef UMA_MD_SMALL_ALLOC
1619 	booted = 1;
1620 #endif
1621 
1622 #ifdef UMA_DEBUG
1623 	printf("UMA startup complete.\n");
1624 #endif
1625 }
1626 
1627 /* see uma.h */
1628 void
1629 uma_startup2(void)
1630 {
1631 	booted = 1;
1632 	bucket_enable();
1633 #ifdef UMA_DEBUG
1634 	printf("UMA startup2 complete.\n");
1635 #endif
1636 }
1637 
1638 /*
1639  * Initialize our callout handle
1640  *
1641  */
1642 
1643 static void
1644 uma_startup3(void)
1645 {
1646 #ifdef UMA_DEBUG
1647 	printf("Starting callout.\n");
1648 #endif
1649 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1650 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1651 #ifdef UMA_DEBUG
1652 	printf("UMA startup3 complete.\n");
1653 #endif
1654 }
1655 
1656 static void
1657 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1658 		int align, u_int16_t flags)
1659 {
1660 	struct uma_kctor_args args;
1661 
1662 	args.size = size;
1663 	args.uminit = uminit;
1664 	args.fini = fini;
1665 	args.align = align;
1666 	args.flags = flags;
1667 	args.zone = zone;
1668 	zone = uma_zalloc_internal(kegs, &args, M_WAITOK);
1669 }
1670 
1671 /* See uma.h */
1672 uma_zone_t
1673 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1674 		uma_init uminit, uma_fini fini, int align, u_int16_t flags)
1675 
1676 {
1677 	struct uma_zctor_args args;
1678 
1679 	/* This stuff is essential for the zone ctor */
1680 	args.name = name;
1681 	args.size = size;
1682 	args.ctor = ctor;
1683 	args.dtor = dtor;
1684 	args.uminit = uminit;
1685 	args.fini = fini;
1686 	args.align = align;
1687 	args.flags = flags;
1688 	args.keg = NULL;
1689 
1690 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1691 }
1692 
1693 /* See uma.h */
1694 uma_zone_t
1695 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1696 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1697 {
1698 	struct uma_zctor_args args;
1699 
1700 	args.name = name;
1701 	args.size = master->uz_keg->uk_size;
1702 	args.ctor = ctor;
1703 	args.dtor = dtor;
1704 	args.uminit = zinit;
1705 	args.fini = zfini;
1706 	args.align = master->uz_keg->uk_align;
1707 	args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
1708 	args.keg = master->uz_keg;
1709 
1710 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1711 }
1712 
1713 /* See uma.h */
1714 void
1715 uma_zdestroy(uma_zone_t zone)
1716 {
1717 	uma_zfree_internal(zones, zone, NULL, 0);
1718 }
1719 
1720 /* See uma.h */
1721 void *
1722 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1723 {
1724 	void *item;
1725 	uma_cache_t cache;
1726 	uma_bucket_t bucket;
1727 	int cpu;
1728 	int badness;
1729 
1730 	/* This is the fast path allocation */
1731 #ifdef UMA_DEBUG_ALLOC_1
1732 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1733 #endif
1734 
1735 	if (!(flags & M_NOWAIT)) {
1736 		KASSERT(curthread->td_intr_nesting_level == 0,
1737 		   ("malloc(M_WAITOK) in interrupt context"));
1738 		badness = nosleepwithlocks;
1739 #ifdef WITNESS
1740 		badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1741 		    NULL,
1742 		    "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
1743 		    zone->uz_name);
1744 #endif
1745 		if (badness) {
1746 			flags &= ~M_WAITOK;
1747 			flags |= M_NOWAIT;
1748 		}
1749 	}
1750 
1751 zalloc_restart:
1752 	cpu = PCPU_GET(cpuid);
1753 	CPU_LOCK(cpu);
1754 	cache = &zone->uz_cpu[cpu];
1755 
1756 zalloc_start:
1757 	bucket = cache->uc_allocbucket;
1758 
1759 	if (bucket) {
1760 		if (bucket->ub_cnt > 0) {
1761 			bucket->ub_cnt--;
1762 			item = bucket->ub_bucket[bucket->ub_cnt];
1763 #ifdef INVARIANTS
1764 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
1765 #endif
1766 			KASSERT(item != NULL,
1767 			    ("uma_zalloc: Bucket pointer mangled."));
1768 			cache->uc_allocs++;
1769 #ifdef INVARIANTS
1770 			ZONE_LOCK(zone);
1771 			uma_dbg_alloc(zone, NULL, item);
1772 			ZONE_UNLOCK(zone);
1773 #endif
1774 			CPU_UNLOCK(cpu);
1775 			if (zone->uz_ctor)
1776 				zone->uz_ctor(item,zone->uz_keg->uk_size,udata);
1777 			if (flags & M_ZERO)
1778 				bzero(item, zone->uz_keg->uk_size);
1779 			return (item);
1780 		} else if (cache->uc_freebucket) {
1781 			/*
1782 			 * We have run out of items in our allocbucket.
1783 			 * See if we can switch with our free bucket.
1784 			 */
1785 			if (cache->uc_freebucket->ub_cnt > 0) {
1786 #ifdef UMA_DEBUG_ALLOC
1787 				printf("uma_zalloc: Swapping empty with"
1788 				    " alloc.\n");
1789 #endif
1790 				bucket = cache->uc_freebucket;
1791 				cache->uc_freebucket = cache->uc_allocbucket;
1792 				cache->uc_allocbucket = bucket;
1793 
1794 				goto zalloc_start;
1795 			}
1796 		}
1797 	}
1798 	ZONE_LOCK(zone);
1799 	/* Since we have locked the zone we may as well send back our stats */
1800 	zone->uz_allocs += cache->uc_allocs;
1801 	cache->uc_allocs = 0;
1802 
1803 	/* Our old one is now a free bucket */
1804 	if (cache->uc_allocbucket) {
1805 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1806 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1807 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1808 		    cache->uc_allocbucket, ub_link);
1809 		cache->uc_allocbucket = NULL;
1810 	}
1811 
1812 	/* Check the free list for a new alloc bucket */
1813 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1814 		KASSERT(bucket->ub_cnt != 0,
1815 		    ("uma_zalloc_arg: Returning an empty bucket."));
1816 
1817 		LIST_REMOVE(bucket, ub_link);
1818 		cache->uc_allocbucket = bucket;
1819 		ZONE_UNLOCK(zone);
1820 		goto zalloc_start;
1821 	}
1822 	/* We are no longer associated with this cpu!!! */
1823 	CPU_UNLOCK(cpu);
1824 
1825 	/* Bump up our uz_count so we get here less */
1826 	if (zone->uz_count < BUCKET_MAX)
1827 		zone->uz_count++;
1828 
1829 	/*
1830 	 * Now lets just fill a bucket and put it on the free list.  If that
1831 	 * works we'll restart the allocation from the begining.
1832 	 */
1833 	if (uma_zalloc_bucket(zone, flags)) {
1834 		ZONE_UNLOCK(zone);
1835 		goto zalloc_restart;
1836 	}
1837 	ZONE_UNLOCK(zone);
1838 	/*
1839 	 * We may not be able to get a bucket so return an actual item.
1840 	 */
1841 #ifdef UMA_DEBUG
1842 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1843 #endif
1844 
1845 	return (uma_zalloc_internal(zone, udata, flags));
1846 }
1847 
1848 static uma_slab_t
1849 uma_zone_slab(uma_zone_t zone, int flags)
1850 {
1851 	uma_slab_t slab;
1852 	uma_keg_t keg;
1853 
1854 	keg = zone->uz_keg;
1855 
1856 	/*
1857 	 * This is to prevent us from recursively trying to allocate
1858 	 * buckets.  The problem is that if an allocation forces us to
1859 	 * grab a new bucket we will call page_alloc, which will go off
1860 	 * and cause the vm to allocate vm_map_entries.  If we need new
1861 	 * buckets there too we will recurse in kmem_alloc and bad
1862 	 * things happen.  So instead we return a NULL bucket, and make
1863 	 * the code that allocates buckets smart enough to deal with it
1864 	 */
1865 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
1866 		return (NULL);
1867 
1868 	slab = NULL;
1869 
1870 	for (;;) {
1871 		/*
1872 		 * Find a slab with some space.  Prefer slabs that are partially
1873 		 * used over those that are totally full.  This helps to reduce
1874 		 * fragmentation.
1875 		 */
1876 		if (keg->uk_free != 0) {
1877 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
1878 				slab = LIST_FIRST(&keg->uk_part_slab);
1879 			} else {
1880 				slab = LIST_FIRST(&keg->uk_free_slab);
1881 				LIST_REMOVE(slab, us_link);
1882 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
1883 				    us_link);
1884 			}
1885 			return (slab);
1886 		}
1887 
1888 		/*
1889 		 * M_NOVM means don't ask at all!
1890 		 */
1891 		if (flags & M_NOVM)
1892 			break;
1893 
1894 		if (keg->uk_maxpages &&
1895 		    keg->uk_pages >= keg->uk_maxpages) {
1896 			keg->uk_flags |= UMA_ZFLAG_FULL;
1897 
1898 			if (flags & M_NOWAIT)
1899 				break;
1900 			else
1901 				msleep(keg, &keg->uk_lock, PVM,
1902 				    "zonelimit", 0);
1903 			continue;
1904 		}
1905 		keg->uk_recurse++;
1906 		slab = slab_zalloc(zone, flags);
1907 		keg->uk_recurse--;
1908 
1909 		/*
1910 		 * If we got a slab here it's safe to mark it partially used
1911 		 * and return.  We assume that the caller is going to remove
1912 		 * at least one item.
1913 		 */
1914 		if (slab) {
1915 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
1916 			return (slab);
1917 		}
1918 		/*
1919 		 * We might not have been able to get a slab but another cpu
1920 		 * could have while we were unlocked.  Check again before we
1921 		 * fail.
1922 		 */
1923 		if (flags & M_NOWAIT)
1924 			flags |= M_NOVM;
1925 	}
1926 	return (slab);
1927 }
1928 
1929 static void *
1930 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
1931 {
1932 	uma_keg_t keg;
1933 	void *item;
1934 	u_int8_t freei;
1935 
1936 	keg = zone->uz_keg;
1937 
1938 	freei = slab->us_firstfree;
1939 	slab->us_firstfree = slab->us_freelist[freei].us_item;
1940 	item = slab->us_data + (keg->uk_rsize * freei);
1941 
1942 	slab->us_freecount--;
1943 	keg->uk_free--;
1944 #ifdef INVARIANTS
1945 	uma_dbg_alloc(zone, slab, item);
1946 #endif
1947 	/* Move this slab to the full list */
1948 	if (slab->us_freecount == 0) {
1949 		LIST_REMOVE(slab, us_link);
1950 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
1951 	}
1952 
1953 	return (item);
1954 }
1955 
1956 static int
1957 uma_zalloc_bucket(uma_zone_t zone, int flags)
1958 {
1959 	uma_bucket_t bucket;
1960 	uma_slab_t slab;
1961 	int16_t saved;
1962 	int max;
1963 
1964 	/*
1965 	 * Try this zone's free list first so we don't allocate extra buckets.
1966 	 */
1967 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1968 		KASSERT(bucket->ub_cnt == 0,
1969 		    ("uma_zalloc_bucket: Bucket on free list is not empty."));
1970 		LIST_REMOVE(bucket, ub_link);
1971 	} else {
1972 		int bflags;
1973 
1974 		bflags = (flags & ~M_ZERO);
1975 		if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
1976 			bflags |= M_NOVM;
1977 
1978 		ZONE_UNLOCK(zone);
1979 		bucket = bucket_alloc(zone->uz_count, bflags);
1980 		ZONE_LOCK(zone);
1981 	}
1982 
1983 	if (bucket == NULL)
1984 		return (0);
1985 
1986 #ifdef SMP
1987 	/*
1988 	 * This code is here to limit the number of simultaneous bucket fills
1989 	 * for any given zone to the number of per cpu caches in this zone. This
1990 	 * is done so that we don't allocate more memory than we really need.
1991 	 */
1992 	if (zone->uz_fills >= mp_ncpus)
1993 		goto done;
1994 
1995 #endif
1996 	zone->uz_fills++;
1997 
1998 	max = MIN(bucket->ub_entries, zone->uz_count);
1999 	/* Try to keep the buckets totally full */
2000 	saved = bucket->ub_cnt;
2001 	while (bucket->ub_cnt < max &&
2002 	    (slab = uma_zone_slab(zone, flags)) != NULL) {
2003 		while (slab->us_freecount && bucket->ub_cnt < max) {
2004 			bucket->ub_bucket[bucket->ub_cnt++] =
2005 			    uma_slab_alloc(zone, slab);
2006 		}
2007 
2008 		/* Don't block on the next fill */
2009 		flags |= M_NOWAIT;
2010 	}
2011 
2012 	/*
2013 	 * We unlock here because we need to call the zone's init.
2014 	 * It should be safe to unlock because the slab dealt with
2015 	 * above is already on the appropriate list within the keg
2016 	 * and the bucket we filled is not yet on any list, so we
2017 	 * own it.
2018 	 */
2019 	if (zone->uz_init != NULL) {
2020 		int i;
2021 
2022 		ZONE_UNLOCK(zone);
2023 		for (i = saved; i < bucket->ub_cnt; i++)
2024 			zone->uz_init(bucket->ub_bucket[i],
2025 			    zone->uz_keg->uk_size);
2026 		ZONE_LOCK(zone);
2027 	}
2028 
2029 	zone->uz_fills--;
2030 	if (bucket->ub_cnt != 0) {
2031 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2032 		    bucket, ub_link);
2033 		return (1);
2034 	}
2035 #ifdef SMP
2036 done:
2037 #endif
2038 	bucket_free(bucket);
2039 
2040 	return (0);
2041 }
2042 /*
2043  * Allocates an item for an internal zone
2044  *
2045  * Arguments
2046  *	zone   The zone to alloc for.
2047  *	udata  The data to be passed to the constructor.
2048  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2049  *
2050  * Returns
2051  *	NULL if there is no memory and M_NOWAIT is set
2052  *	An item if successful
2053  */
2054 
2055 static void *
2056 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
2057 {
2058 	uma_keg_t keg;
2059 	uma_slab_t slab;
2060 	void *item;
2061 
2062 	item = NULL;
2063 	keg = zone->uz_keg;
2064 
2065 #ifdef UMA_DEBUG_ALLOC
2066 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2067 #endif
2068 	ZONE_LOCK(zone);
2069 
2070 	slab = uma_zone_slab(zone, flags);
2071 	if (slab == NULL) {
2072 		ZONE_UNLOCK(zone);
2073 		return (NULL);
2074 	}
2075 
2076 	item = uma_slab_alloc(zone, slab);
2077 
2078 	ZONE_UNLOCK(zone);
2079 
2080 	/*
2081 	 * We have to call both the zone's init (not the keg's init)
2082 	 * and the zone's ctor.  This is because the item is going from
2083 	 * a keg slab directly to the user, and the user is expecting it
2084 	 * to be both zone-init'd as well as zone-ctor'd.
2085 	 */
2086 	if (zone->uz_init != NULL)
2087 		zone->uz_init(item, keg->uk_size);
2088 	if (zone->uz_ctor != NULL)
2089 		zone->uz_ctor(item, keg->uk_size, udata);
2090 	if (flags & M_ZERO)
2091 		bzero(item, keg->uk_size);
2092 
2093 	return (item);
2094 }
2095 
2096 /* See uma.h */
2097 void
2098 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2099 {
2100 	uma_keg_t keg;
2101 	uma_cache_t cache;
2102 	uma_bucket_t bucket;
2103 	int bflags;
2104 	int cpu;
2105 	int skip;
2106 
2107 	/* This is the fast path free */
2108 	skip = 0;
2109 	keg = zone->uz_keg;
2110 
2111 #ifdef UMA_DEBUG_ALLOC_1
2112 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2113 #endif
2114 	/*
2115 	 * The race here is acceptable.  If we miss it we'll just have to wait
2116 	 * a little longer for the limits to be reset.
2117 	 */
2118 
2119 	if (keg->uk_flags & UMA_ZFLAG_FULL)
2120 		goto zfree_internal;
2121 
2122 	if (zone->uz_dtor) {
2123 		zone->uz_dtor(item, keg->uk_size, udata);
2124 		skip = 1;
2125 	}
2126 
2127 zfree_restart:
2128 	cpu = PCPU_GET(cpuid);
2129 	CPU_LOCK(cpu);
2130 	cache = &zone->uz_cpu[cpu];
2131 
2132 zfree_start:
2133 	bucket = cache->uc_freebucket;
2134 
2135 	if (bucket) {
2136 		/*
2137 		 * Do we have room in our bucket? It is OK for this uz count
2138 		 * check to be slightly out of sync.
2139 		 */
2140 
2141 		if (bucket->ub_cnt < bucket->ub_entries) {
2142 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2143 			    ("uma_zfree: Freeing to non free bucket index."));
2144 			bucket->ub_bucket[bucket->ub_cnt] = item;
2145 			bucket->ub_cnt++;
2146 #ifdef INVARIANTS
2147 			ZONE_LOCK(zone);
2148 			if (keg->uk_flags & UMA_ZONE_MALLOC)
2149 				uma_dbg_free(zone, udata, item);
2150 			else
2151 				uma_dbg_free(zone, NULL, item);
2152 			ZONE_UNLOCK(zone);
2153 #endif
2154 			CPU_UNLOCK(cpu);
2155 			return;
2156 		} else if (cache->uc_allocbucket) {
2157 #ifdef UMA_DEBUG_ALLOC
2158 			printf("uma_zfree: Swapping buckets.\n");
2159 #endif
2160 			/*
2161 			 * We have run out of space in our freebucket.
2162 			 * See if we can switch with our alloc bucket.
2163 			 */
2164 			if (cache->uc_allocbucket->ub_cnt <
2165 			    cache->uc_freebucket->ub_cnt) {
2166 				bucket = cache->uc_freebucket;
2167 				cache->uc_freebucket = cache->uc_allocbucket;
2168 				cache->uc_allocbucket = bucket;
2169 				goto zfree_start;
2170 			}
2171 		}
2172 	}
2173 	/*
2174 	 * We can get here for two reasons:
2175 	 *
2176 	 * 1) The buckets are NULL
2177 	 * 2) The alloc and free buckets are both somewhat full.
2178 	 */
2179 
2180 	ZONE_LOCK(zone);
2181 
2182 	bucket = cache->uc_freebucket;
2183 	cache->uc_freebucket = NULL;
2184 
2185 	/* Can we throw this on the zone full list? */
2186 	if (bucket != NULL) {
2187 #ifdef UMA_DEBUG_ALLOC
2188 		printf("uma_zfree: Putting old bucket on the free list.\n");
2189 #endif
2190 		/* ub_cnt is pointing to the last free item */
2191 		KASSERT(bucket->ub_cnt != 0,
2192 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2193 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2194 		    bucket, ub_link);
2195 	}
2196 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2197 		LIST_REMOVE(bucket, ub_link);
2198 		ZONE_UNLOCK(zone);
2199 		cache->uc_freebucket = bucket;
2200 		goto zfree_start;
2201 	}
2202 	/* We're done with this CPU now */
2203 	CPU_UNLOCK(cpu);
2204 
2205 	/* And the zone.. */
2206 	ZONE_UNLOCK(zone);
2207 
2208 #ifdef UMA_DEBUG_ALLOC
2209 	printf("uma_zfree: Allocating new free bucket.\n");
2210 #endif
2211 	bflags = M_NOWAIT;
2212 
2213 	if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2214 		bflags |= M_NOVM;
2215 	bucket = bucket_alloc(zone->uz_count, bflags);
2216 	if (bucket) {
2217 		ZONE_LOCK(zone);
2218 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
2219 		    bucket, ub_link);
2220 		ZONE_UNLOCK(zone);
2221 		goto zfree_restart;
2222 	}
2223 
2224 	/*
2225 	 * If nothing else caught this, we'll just do an internal free.
2226 	 */
2227 
2228 zfree_internal:
2229 
2230 #ifdef INVARIANTS
2231 	/*
2232 	 * If we need to skip the dtor and the uma_dbg_free in
2233 	 * uma_zfree_internal because we've already called the dtor
2234 	 * above, but we ended up here, then we need to make sure
2235 	 * that we take care of the uma_dbg_free immediately.
2236 	 */
2237 	if (skip) {
2238 		ZONE_LOCK(zone);
2239 		if (keg->uk_flags & UMA_ZONE_MALLOC)
2240 			uma_dbg_free(zone, udata, item);
2241 		else
2242 			uma_dbg_free(zone, NULL, item);
2243 		ZONE_UNLOCK(zone);
2244 	}
2245 #endif
2246 	uma_zfree_internal(zone, item, udata, skip);
2247 
2248 	return;
2249 }
2250 
2251 /*
2252  * Frees an item to an INTERNAL zone or allocates a free bucket
2253  *
2254  * Arguments:
2255  *	zone   The zone to free to
2256  *	item   The item we're freeing
2257  *	udata  User supplied data for the dtor
2258  *	skip   Skip the dtor, it was done in uma_zfree_arg
2259  */
2260 static void
2261 uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
2262 {
2263 	uma_slab_t slab;
2264 	uma_keg_t keg;
2265 	u_int8_t *mem;
2266 	u_int8_t freei;
2267 
2268 	keg = zone->uz_keg;
2269 
2270 	if (!skip && zone->uz_dtor)
2271 		zone->uz_dtor(item, keg->uk_size, udata);
2272 	if (zone->uz_fini)
2273 		zone->uz_fini(item, keg->uk_size);
2274 
2275 	ZONE_LOCK(zone);
2276 
2277 	if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
2278 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2279 		if (keg->uk_flags & UMA_ZONE_HASH)
2280 			slab = hash_sfind(&keg->uk_hash, mem);
2281 		else {
2282 			mem += keg->uk_pgoff;
2283 			slab = (uma_slab_t)mem;
2284 		}
2285 	} else {
2286 		slab = (uma_slab_t)udata;
2287 	}
2288 
2289 	/* Do we need to remove from any lists? */
2290 	if (slab->us_freecount+1 == keg->uk_ipers) {
2291 		LIST_REMOVE(slab, us_link);
2292 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2293 	} else if (slab->us_freecount == 0) {
2294 		LIST_REMOVE(slab, us_link);
2295 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2296 	}
2297 
2298 	/* Slab management stuff */
2299 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
2300 		/ keg->uk_rsize;
2301 
2302 #ifdef INVARIANTS
2303 	if (!skip)
2304 		uma_dbg_free(zone, slab, item);
2305 #endif
2306 
2307 	slab->us_freelist[freei].us_item = slab->us_firstfree;
2308 	slab->us_firstfree = freei;
2309 	slab->us_freecount++;
2310 
2311 	/* Zone statistics */
2312 	keg->uk_free++;
2313 
2314 	if (keg->uk_flags & UMA_ZFLAG_FULL) {
2315 		if (keg->uk_pages < keg->uk_maxpages)
2316 			keg->uk_flags &= ~UMA_ZFLAG_FULL;
2317 
2318 		/* We can handle one more allocation */
2319 		wakeup_one(keg);
2320 	}
2321 
2322 	ZONE_UNLOCK(zone);
2323 }
2324 
2325 /* See uma.h */
2326 void
2327 uma_zone_set_max(uma_zone_t zone, int nitems)
2328 {
2329 	uma_keg_t keg;
2330 
2331 	keg = zone->uz_keg;
2332 	ZONE_LOCK(zone);
2333 	if (keg->uk_ppera > 1)
2334 		keg->uk_maxpages = nitems * keg->uk_ppera;
2335 	else
2336 		keg->uk_maxpages = nitems / keg->uk_ipers;
2337 
2338 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2339 		keg->uk_maxpages++;
2340 
2341 	ZONE_UNLOCK(zone);
2342 }
2343 
2344 /* See uma.h */
2345 void
2346 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2347 {
2348 	ZONE_LOCK(zone);
2349 	KASSERT(zone->uz_keg->uk_pages == 0,
2350 	    ("uma_zone_set_init on non-empty keg"));
2351 	zone->uz_keg->uk_init = uminit;
2352 	ZONE_UNLOCK(zone);
2353 }
2354 
2355 /* See uma.h */
2356 void
2357 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2358 {
2359 	ZONE_LOCK(zone);
2360 	KASSERT(zone->uz_keg->uk_pages == 0,
2361 	    ("uma_zone_set_fini on non-empty keg"));
2362 	zone->uz_keg->uk_fini = fini;
2363 	ZONE_UNLOCK(zone);
2364 }
2365 
2366 /* See uma.h */
2367 void
2368 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2369 {
2370 	ZONE_LOCK(zone);
2371 	KASSERT(zone->uz_keg->uk_pages == 0,
2372 	    ("uma_zone_set_zinit on non-empty keg"));
2373 	zone->uz_init = zinit;
2374 	ZONE_UNLOCK(zone);
2375 }
2376 
2377 /* See uma.h */
2378 void
2379 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2380 {
2381 	ZONE_LOCK(zone);
2382 	KASSERT(zone->uz_keg->uk_pages == 0,
2383 	    ("uma_zone_set_zfini on non-empty keg"));
2384 	zone->uz_fini = zfini;
2385 	ZONE_UNLOCK(zone);
2386 }
2387 
2388 /* See uma.h */
2389 void
2390 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2391 {
2392 	ZONE_LOCK(zone);
2393 	zone->uz_keg->uk_freef = freef;
2394 	ZONE_UNLOCK(zone);
2395 }
2396 
2397 /* See uma.h */
2398 void
2399 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2400 {
2401 	ZONE_LOCK(zone);
2402 	zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2403 	zone->uz_keg->uk_allocf = allocf;
2404 	ZONE_UNLOCK(zone);
2405 }
2406 
2407 /* See uma.h */
2408 int
2409 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2410 {
2411 	uma_keg_t keg;
2412 	vm_offset_t kva;
2413 	int pages;
2414 
2415 	keg = zone->uz_keg;
2416 	pages = count / keg->uk_ipers;
2417 
2418 	if (pages * keg->uk_ipers < count)
2419 		pages++;
2420 
2421 	kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2422 
2423 	if (kva == 0)
2424 		return (0);
2425 	if (obj == NULL) {
2426 		obj = vm_object_allocate(OBJT_DEFAULT,
2427 		    pages);
2428 	} else {
2429 		VM_OBJECT_LOCK_INIT(obj, "uma object");
2430 		_vm_object_allocate(OBJT_DEFAULT,
2431 		    pages, obj);
2432 	}
2433 	ZONE_LOCK(zone);
2434 	keg->uk_kva = kva;
2435 	keg->uk_obj = obj;
2436 	keg->uk_maxpages = pages;
2437 	keg->uk_allocf = obj_alloc;
2438 	keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2439 	ZONE_UNLOCK(zone);
2440 	return (1);
2441 }
2442 
2443 /* See uma.h */
2444 void
2445 uma_prealloc(uma_zone_t zone, int items)
2446 {
2447 	int slabs;
2448 	uma_slab_t slab;
2449 	uma_keg_t keg;
2450 
2451 	keg = zone->uz_keg;
2452 	ZONE_LOCK(zone);
2453 	slabs = items / keg->uk_ipers;
2454 	if (slabs * keg->uk_ipers < items)
2455 		slabs++;
2456 	while (slabs > 0) {
2457 		slab = slab_zalloc(zone, M_WAITOK);
2458 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2459 		slabs--;
2460 	}
2461 	ZONE_UNLOCK(zone);
2462 }
2463 
2464 /* See uma.h */
2465 u_int32_t *
2466 uma_find_refcnt(uma_zone_t zone, void *item)
2467 {
2468 	uma_slabrefcnt_t slab;
2469 	uma_keg_t keg;
2470 	u_int32_t *refcnt;
2471 	int idx;
2472 
2473 	keg = zone->uz_keg;
2474 	slab = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
2475 	KASSERT(slab != NULL,
2476 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2477 	idx = ((unsigned long)item - (unsigned long)slab->us_data)
2478 	    / keg->uk_rsize;
2479 	refcnt = &(slab->us_freelist[idx].us_refcnt);
2480 	return refcnt;
2481 }
2482 
2483 /* See uma.h */
2484 void
2485 uma_reclaim(void)
2486 {
2487 #ifdef UMA_DEBUG
2488 	printf("UMA: vm asked us to release pages!\n");
2489 #endif
2490 	bucket_enable();
2491 	zone_foreach(zone_drain);
2492 	/*
2493 	 * Some slabs may have been freed but this zone will be visited early
2494 	 * we visit again so that we can free pages that are empty once other
2495 	 * zones are drained.  We have to do the same for buckets.
2496 	 */
2497 	zone_drain(slabzone);
2498 	zone_drain(slabrefzone);
2499 	bucket_zone_drain();
2500 }
2501 
2502 void *
2503 uma_large_malloc(int size, int wait)
2504 {
2505 	void *mem;
2506 	uma_slab_t slab;
2507 	u_int8_t flags;
2508 
2509 	slab = uma_zalloc_internal(slabzone, NULL, wait);
2510 	if (slab == NULL)
2511 		return (NULL);
2512 	mem = page_alloc(NULL, size, &flags, wait);
2513 	if (mem) {
2514 		vsetslab((vm_offset_t)mem, slab);
2515 		slab->us_data = mem;
2516 		slab->us_flags = flags | UMA_SLAB_MALLOC;
2517 		slab->us_size = size;
2518 	} else {
2519 		uma_zfree_internal(slabzone, slab, NULL, 0);
2520 	}
2521 
2522 	return (mem);
2523 }
2524 
2525 void
2526 uma_large_free(uma_slab_t slab)
2527 {
2528 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
2529 	page_free(slab->us_data, slab->us_size, slab->us_flags);
2530 	uma_zfree_internal(slabzone, slab, NULL, 0);
2531 }
2532 
2533 void
2534 uma_print_stats(void)
2535 {
2536 	zone_foreach(uma_print_zone);
2537 }
2538 
2539 static void
2540 slab_print(uma_slab_t slab)
2541 {
2542 	printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
2543 		slab->us_keg, slab->us_data, slab->us_freecount,
2544 		slab->us_firstfree);
2545 }
2546 
2547 static void
2548 cache_print(uma_cache_t cache)
2549 {
2550 	printf("alloc: %p(%d), free: %p(%d)\n",
2551 		cache->uc_allocbucket,
2552 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
2553 		cache->uc_freebucket,
2554 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
2555 }
2556 
2557 void
2558 uma_print_zone(uma_zone_t zone)
2559 {
2560 	uma_cache_t cache;
2561 	uma_keg_t keg;
2562 	uma_slab_t slab;
2563 	int i;
2564 
2565 	keg = zone->uz_keg;
2566 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2567 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2568 	    keg->uk_ipers, keg->uk_ppera,
2569 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
2570 	printf("Part slabs:\n");
2571 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
2572 		slab_print(slab);
2573 	printf("Free slabs:\n");
2574 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
2575 		slab_print(slab);
2576 	printf("Full slabs:\n");
2577 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
2578 		slab_print(slab);
2579 	for (i = 0; i <= mp_maxid; i++) {
2580 		if (CPU_ABSENT(i))
2581 			continue;
2582 		cache = &zone->uz_cpu[i];
2583 		printf("CPU %d Cache:\n", i);
2584 		cache_print(cache);
2585 	}
2586 }
2587 
2588 /*
2589  * Sysctl handler for vm.zone
2590  *
2591  * stolen from vm_zone.c
2592  */
2593 static int
2594 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2595 {
2596 	int error, len, cnt;
2597 	const int linesize = 128;	/* conservative */
2598 	int totalfree;
2599 	char *tmpbuf, *offset;
2600 	uma_zone_t z;
2601 	uma_keg_t zk;
2602 	char *p;
2603 	int cpu;
2604 	int cachefree;
2605 	uma_bucket_t bucket;
2606 	uma_cache_t cache;
2607 
2608 	cnt = 0;
2609 	mtx_lock(&uma_mtx);
2610 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2611 		LIST_FOREACH(z, &zk->uk_zones, uz_link)
2612 			cnt++;
2613 	}
2614 	mtx_unlock(&uma_mtx);
2615 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2616 			M_TEMP, M_WAITOK);
2617 	len = snprintf(tmpbuf, linesize,
2618 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
2619 	if (cnt == 0)
2620 		tmpbuf[len - 1] = '\0';
2621 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2622 	if (error || cnt == 0)
2623 		goto out;
2624 	offset = tmpbuf;
2625 	mtx_lock(&uma_mtx);
2626 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2627 	  LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2628 		if (cnt == 0)	/* list may have changed size */
2629 			break;
2630 		if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2631 			for (cpu = 0; cpu <= mp_maxid; cpu++) {
2632 				if (CPU_ABSENT(cpu))
2633 					continue;
2634 				CPU_LOCK(cpu);
2635 			}
2636 		}
2637 		ZONE_LOCK(z);
2638 		cachefree = 0;
2639 		if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2640 			for (cpu = 0; cpu <= mp_maxid; cpu++) {
2641 				if (CPU_ABSENT(cpu))
2642 					continue;
2643 				cache = &z->uz_cpu[cpu];
2644 				if (cache->uc_allocbucket != NULL)
2645 					cachefree += cache->uc_allocbucket->ub_cnt;
2646 				if (cache->uc_freebucket != NULL)
2647 					cachefree += cache->uc_freebucket->ub_cnt;
2648 				CPU_UNLOCK(cpu);
2649 			}
2650 		}
2651 		LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2652 			cachefree += bucket->ub_cnt;
2653 		}
2654 		totalfree = zk->uk_free + cachefree;
2655 		len = snprintf(offset, linesize,
2656 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2657 		    z->uz_name, zk->uk_size,
2658 		    zk->uk_maxpages * zk->uk_ipers,
2659 		    (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2660 		    totalfree,
2661 		    (unsigned long long)z->uz_allocs);
2662 		ZONE_UNLOCK(z);
2663 		for (p = offset + 12; p > offset && *p == ' '; --p)
2664 			/* nothing */ ;
2665 		p[1] = ':';
2666 		cnt--;
2667 		offset += len;
2668 	  }
2669 	}
2670 	mtx_unlock(&uma_mtx);
2671 	*offset++ = '\0';
2672 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2673 out:
2674 	FREE(tmpbuf, M_TEMP);
2675 	return (error);
2676 }
2677