xref: /freebsd/sys/vm/uma_core.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * uma_core.c  Implementation of the Universal Memory allocator
29  *
30  * This allocator is intended to replace the multitude of similar object caches
31  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
32  * effecient.  A primary design goal is to return unused memory to the rest of
33  * the system.  This will make the system as a whole more flexible due to the
34  * ability to move memory to subsystems which most need it instead of leaving
35  * pools of reserved memory unused.
36  *
37  * The basic ideas stem from similar slab/zone based allocators whose algorithms
38  * are well known.
39  *
40  */
41 
42 /*
43  * TODO:
44  *	- Improve memory usage for large allocations
45  *	- Investigate cache size adjustments
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 /* I should really use ktr.. */
52 /*
53 #define UMA_DEBUG 1
54 #define UMA_DEBUG_ALLOC 1
55 #define UMA_DEBUG_ALLOC_1 1
56 */
57 
58 #include "opt_param.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/types.h>
63 #include <sys/queue.h>
64 #include <sys/malloc.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67 #include <sys/mutex.h>
68 #include <sys/proc.h>
69 #include <sys/smp.h>
70 #include <sys/vmmeter.h>
71 #include <sys/mbuf.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_extern.h>
80 #include <vm/uma.h>
81 #include <vm/uma_int.h>
82 #include <vm/uma_dbg.h>
83 
84 #include <machine/vmparam.h>
85 
86 /*
87  * This is the zone from which all zones are spawned.  The idea is that even
88  * the zone heads are allocated from the allocator, so we use the bss section
89  * to bootstrap us.
90  */
91 static struct uma_zone masterzone;
92 static uma_zone_t zones = &masterzone;
93 
94 /* This is the zone from which all of uma_slab_t's are allocated. */
95 static uma_zone_t slabzone;
96 
97 /*
98  * The initial hash tables come out of this zone so they can be allocated
99  * prior to malloc coming up.
100  */
101 static uma_zone_t hashzone;
102 
103 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
104 
105 /*
106  * Are we allowed to allocate buckets?
107  */
108 static int bucketdisable = 1;
109 
110 /* Linked list of all zones in the system */
111 static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
112 
113 /* This mutex protects the zone list */
114 static struct mtx uma_mtx;
115 
116 /* These are the pcpu cache locks */
117 static struct mtx uma_pcpu_mtx[MAXCPU];
118 
119 /* Linked list of boot time pages */
120 static LIST_HEAD(,uma_slab) uma_boot_pages =
121     LIST_HEAD_INITIALIZER(&uma_boot_pages);
122 
123 /* Count of free boottime pages */
124 static int uma_boot_free = 0;
125 
126 /* Is the VM done starting up? */
127 static int booted = 0;
128 
129 /* This is the handle used to schedule our working set calculator */
130 static struct callout uma_callout;
131 
132 /* This is mp_maxid + 1, for use while looping over each cpu */
133 static int maxcpu;
134 
135 /*
136  * This structure is passed as the zone ctor arg so that I don't have to create
137  * a special allocation function just for zones.
138  */
139 struct uma_zctor_args {
140 	char *name;
141 	size_t size;
142 	uma_ctor ctor;
143 	uma_dtor dtor;
144 	uma_init uminit;
145 	uma_fini fini;
146 	int align;
147 	u_int16_t flags;
148 };
149 
150 struct uma_bucket_zone {
151 	uma_zone_t	ubz_zone;
152 	char		*ubz_name;
153 	int		ubz_entries;
154 };
155 
156 #define	BUCKET_MAX	128
157 
158 struct uma_bucket_zone bucket_zones[] = {
159 	{ NULL, "16 Bucket", 16 },
160 	{ NULL, "32 Bucket", 32 },
161 	{ NULL, "64 Bucket", 64 },
162 	{ NULL, "128 Bucket", 128 },
163 	{ NULL, NULL, 0}
164 };
165 
166 #define	BUCKET_SHIFT	4
167 #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
168 
169 uint8_t bucket_size[BUCKET_ZONES];
170 
171 /* Prototypes.. */
172 
173 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
174 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
175 static void page_free(void *, int, u_int8_t);
176 static uma_slab_t slab_zalloc(uma_zone_t, int);
177 static void cache_drain(uma_zone_t, int);
178 static void bucket_drain(uma_zone_t, uma_bucket_t);
179 static void zone_drain_common(uma_zone_t, int);
180 static void zone_ctor(void *, int, void *);
181 static void zone_dtor(void *, int, void *);
182 static void zero_init(void *, int);
183 static void zone_small_init(uma_zone_t zone);
184 static void zone_large_init(uma_zone_t zone);
185 static void zone_foreach(void (*zfunc)(uma_zone_t));
186 static void zone_timeout(uma_zone_t zone);
187 static int hash_alloc(struct uma_hash *);
188 static int hash_expand(struct uma_hash *, struct uma_hash *);
189 static void hash_free(struct uma_hash *hash);
190 static void uma_timeout(void *);
191 static void uma_startup3(void);
192 static void *uma_zalloc_internal(uma_zone_t, void *, int);
193 static void uma_zfree_internal(uma_zone_t, void *, void *, int);
194 static void bucket_enable(void);
195 static void bucket_init(void);
196 static uma_bucket_t bucket_alloc(int, int);
197 static void bucket_free(uma_bucket_t);
198 static void bucket_zone_drain(void);
199 static int uma_zalloc_bucket(uma_zone_t zone, int flags);
200 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
201 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
202 static __inline void zone_drain(uma_zone_t);
203 
204 void uma_print_zone(uma_zone_t);
205 void uma_print_stats(void);
206 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
207 
208 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
209     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
210 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
211 
212 /*
213  * This routine checks to see whether or not it's safe to enable buckets.
214  */
215 
216 static void
217 bucket_enable(void)
218 {
219 	if (cnt.v_free_count < cnt.v_free_min)
220 		bucketdisable = 1;
221 	else
222 		bucketdisable = 0;
223 }
224 
225 static void
226 bucket_init(void)
227 {
228 	struct uma_bucket_zone *ubz;
229 	int i;
230 	int j;
231 
232 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
233 		int size;
234 
235 		ubz = &bucket_zones[j];
236 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
237 		size += sizeof(void *) * ubz->ubz_entries;
238 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
239 	    	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
240 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
241 			bucket_size[i >> BUCKET_SHIFT] = j;
242 	}
243 }
244 
245 static uma_bucket_t
246 bucket_alloc(int entries, int bflags)
247 {
248 	struct uma_bucket_zone *ubz;
249 	uma_bucket_t bucket;
250 	int idx;
251 
252 	/*
253 	 * This is to stop us from allocating per cpu buckets while we're
254 	 * running out of UMA_BOOT_PAGES.  Otherwise, we would exhaust the
255 	 * boot pages.  This also prevents us from allocating buckets in
256 	 * low memory situations.
257 	 */
258 
259 	if (bucketdisable)
260 		return (NULL);
261 	idx = howmany(entries, 1 << BUCKET_SHIFT);
262 	ubz = &bucket_zones[bucket_size[idx]];
263 	bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
264 	if (bucket) {
265 #ifdef INVARIANTS
266 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
267 #endif
268 		bucket->ub_cnt = 0;
269 		bucket->ub_entries = ubz->ubz_entries;
270 	}
271 
272 	return (bucket);
273 }
274 
275 static void
276 bucket_free(uma_bucket_t bucket)
277 {
278 	struct uma_bucket_zone *ubz;
279 	int idx;
280 
281 	idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT);
282 	ubz = &bucket_zones[bucket_size[idx]];
283 	uma_zfree_internal(ubz->ubz_zone, bucket, NULL, 0);
284 }
285 
286 static void
287 bucket_zone_drain(void)
288 {
289 	struct uma_bucket_zone *ubz;
290 
291 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
292 		zone_drain(ubz->ubz_zone);
293 }
294 
295 
296 /*
297  * Routine called by timeout which is used to fire off some time interval
298  * based calculations.  (working set, stats, etc.)
299  *
300  * Arguments:
301  *	arg   Unused
302  *
303  * Returns:
304  *	Nothing
305  */
306 static void
307 uma_timeout(void *unused)
308 {
309 	bucket_enable();
310 	zone_foreach(zone_timeout);
311 
312 	/* Reschedule this event */
313 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
314 }
315 
316 /*
317  * Routine to perform timeout driven calculations.  This does the working set
318  * as well as hash expanding, and per cpu statistics aggregation.
319  *
320  *  Arguments:
321  *	zone  The zone to operate on
322  *
323  *  Returns:
324  *	Nothing
325  */
326 static void
327 zone_timeout(uma_zone_t zone)
328 {
329 	uma_cache_t cache;
330 	u_int64_t alloc;
331 	int cpu;
332 
333 	alloc = 0;
334 
335 	/*
336 	 * Aggregate per cpu cache statistics back to the zone.
337 	 *
338 	 * I may rewrite this to set a flag in the per cpu cache instead of
339 	 * locking.  If the flag is not cleared on the next round I will have
340 	 * to lock and do it here instead so that the statistics don't get too
341 	 * far out of sync.
342 	 */
343 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
344 		for (cpu = 0; cpu < maxcpu; cpu++) {
345 			if (CPU_ABSENT(cpu))
346 				continue;
347 			CPU_LOCK(cpu);
348 			cache = &zone->uz_cpu[cpu];
349 			/* Add them up, and reset */
350 			alloc += cache->uc_allocs;
351 			cache->uc_allocs = 0;
352 			CPU_UNLOCK(cpu);
353 		}
354 	}
355 
356 	/* Now push these stats back into the zone.. */
357 	ZONE_LOCK(zone);
358 	zone->uz_allocs += alloc;
359 
360 	/*
361 	 * Expand the zone hash table.
362 	 *
363 	 * This is done if the number of slabs is larger than the hash size.
364 	 * What I'm trying to do here is completely reduce collisions.  This
365 	 * may be a little aggressive.  Should I allow for two collisions max?
366 	 */
367 
368 	if (zone->uz_flags & UMA_ZONE_HASH &&
369 	    zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
370 		struct uma_hash newhash;
371 		struct uma_hash oldhash;
372 		int ret;
373 
374 		/*
375 		 * This is so involved because allocating and freeing
376 		 * while the zone lock is held will lead to deadlock.
377 		 * I have to do everything in stages and check for
378 		 * races.
379 		 */
380 		newhash = zone->uz_hash;
381 		ZONE_UNLOCK(zone);
382 		ret = hash_alloc(&newhash);
383 		ZONE_LOCK(zone);
384 		if (ret) {
385 			if (hash_expand(&zone->uz_hash, &newhash)) {
386 				oldhash = zone->uz_hash;
387 				zone->uz_hash = newhash;
388 			} else
389 				oldhash = newhash;
390 
391 			ZONE_UNLOCK(zone);
392 			hash_free(&oldhash);
393 			ZONE_LOCK(zone);
394 		}
395 	}
396 
397 	/*
398 	 * Here we compute the working set size as the total number of items
399 	 * left outstanding since the last time interval.  This is slightly
400 	 * suboptimal. What we really want is the highest number of outstanding
401 	 * items during the last time quantum.  This should be close enough.
402 	 *
403 	 * The working set size is used to throttle the zone_drain function.
404 	 * We don't want to return memory that we may need again immediately.
405 	 */
406 	alloc = zone->uz_allocs - zone->uz_oallocs;
407 	zone->uz_oallocs = zone->uz_allocs;
408 	zone->uz_wssize = alloc;
409 
410 	ZONE_UNLOCK(zone);
411 }
412 
413 /*
414  * Allocate and zero fill the next sized hash table from the appropriate
415  * backing store.
416  *
417  * Arguments:
418  *	hash  A new hash structure with the old hash size in uh_hashsize
419  *
420  * Returns:
421  *	1 on sucess and 0 on failure.
422  */
423 static int
424 hash_alloc(struct uma_hash *hash)
425 {
426 	int oldsize;
427 	int alloc;
428 
429 	oldsize = hash->uh_hashsize;
430 
431 	/* We're just going to go to a power of two greater */
432 	if (oldsize)  {
433 		hash->uh_hashsize = oldsize * 2;
434 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
435 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
436 		    M_UMAHASH, M_NOWAIT);
437 	} else {
438 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
439 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
440 		    M_WAITOK);
441 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
442 	}
443 	if (hash->uh_slab_hash) {
444 		bzero(hash->uh_slab_hash, alloc);
445 		hash->uh_hashmask = hash->uh_hashsize - 1;
446 		return (1);
447 	}
448 
449 	return (0);
450 }
451 
452 /*
453  * Expands the hash table for HASH zones.  This is done from zone_timeout
454  * to reduce collisions.  This must not be done in the regular allocation
455  * path, otherwise, we can recurse on the vm while allocating pages.
456  *
457  * Arguments:
458  *	oldhash  The hash you want to expand
459  *	newhash  The hash structure for the new table
460  *
461  * Returns:
462  * 	Nothing
463  *
464  * Discussion:
465  */
466 static int
467 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
468 {
469 	uma_slab_t slab;
470 	int hval;
471 	int i;
472 
473 	if (!newhash->uh_slab_hash)
474 		return (0);
475 
476 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
477 		return (0);
478 
479 	/*
480 	 * I need to investigate hash algorithms for resizing without a
481 	 * full rehash.
482 	 */
483 
484 	for (i = 0; i < oldhash->uh_hashsize; i++)
485 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
486 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
487 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
488 			hval = UMA_HASH(newhash, slab->us_data);
489 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
490 			    slab, us_hlink);
491 		}
492 
493 	return (1);
494 }
495 
496 /*
497  * Free the hash bucket to the appropriate backing store.
498  *
499  * Arguments:
500  *	slab_hash  The hash bucket we're freeing
501  *	hashsize   The number of entries in that hash bucket
502  *
503  * Returns:
504  *	Nothing
505  */
506 static void
507 hash_free(struct uma_hash *hash)
508 {
509 	if (hash->uh_slab_hash == NULL)
510 		return;
511 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
512 		uma_zfree_internal(hashzone,
513 		    hash->uh_slab_hash, NULL, 0);
514 	else
515 		free(hash->uh_slab_hash, M_UMAHASH);
516 }
517 
518 /*
519  * Frees all outstanding items in a bucket
520  *
521  * Arguments:
522  *	zone   The zone to free to, must be unlocked.
523  *	bucket The free/alloc bucket with items, cpu queue must be locked.
524  *
525  * Returns:
526  *	Nothing
527  */
528 
529 static void
530 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
531 {
532 	uma_slab_t slab;
533 	int mzone;
534 	void *item;
535 
536 	if (bucket == NULL)
537 		return;
538 
539 	slab = NULL;
540 	mzone = 0;
541 
542 	/* We have to lookup the slab again for malloc.. */
543 	if (zone->uz_flags & UMA_ZONE_MALLOC)
544 		mzone = 1;
545 
546 	while (bucket->ub_cnt > 0)  {
547 		bucket->ub_cnt--;
548 		item = bucket->ub_bucket[bucket->ub_cnt];
549 #ifdef INVARIANTS
550 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
551 		KASSERT(item != NULL,
552 		    ("bucket_drain: botched ptr, item is NULL"));
553 #endif
554 		/*
555 		 * This is extremely inefficient.  The slab pointer was passed
556 		 * to uma_zfree_arg, but we lost it because the buckets don't
557 		 * hold them.  This will go away when free() gets a size passed
558 		 * to it.
559 		 */
560 		if (mzone)
561 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
562 		uma_zfree_internal(zone, item, slab, 1);
563 	}
564 }
565 
566 /*
567  * Drains the per cpu caches for a zone.
568  *
569  * Arguments:
570  *	zone     The zone to drain, must be unlocked.
571  *	destroy  Whether or not to destroy the pcpu buckets (from zone_dtor)
572  *
573  * Returns:
574  *	Nothing
575  *
576  * This function returns with the zone locked so that the per cpu queues can
577  * not be filled until zone_drain is finished.
578  */
579 static void
580 cache_drain(uma_zone_t zone, int destroy)
581 {
582 	uma_bucket_t bucket;
583 	uma_cache_t cache;
584 	int cpu;
585 
586 	/*
587 	 * Flush out the per cpu queues.
588 	 *
589 	 * XXX This causes unnecessary thrashing due to immediately having
590 	 * empty per cpu queues.  I need to improve this.
591 	 */
592 
593 	/*
594 	 * We have to lock each cpu cache before locking the zone
595 	 */
596 	ZONE_UNLOCK(zone);
597 
598 	for (cpu = 0; cpu < maxcpu; cpu++) {
599 		if (CPU_ABSENT(cpu))
600 			continue;
601 		CPU_LOCK(cpu);
602 		cache = &zone->uz_cpu[cpu];
603 		bucket_drain(zone, cache->uc_allocbucket);
604 		bucket_drain(zone, cache->uc_freebucket);
605 		if (destroy) {
606 			if (cache->uc_allocbucket != NULL)
607 				bucket_free(cache->uc_allocbucket);
608 			if (cache->uc_freebucket != NULL)
609 				bucket_free(cache->uc_freebucket);
610 			cache->uc_allocbucket = cache->uc_freebucket = NULL;
611 		}
612 	}
613 
614 	/*
615 	 * Drain the bucket queues and free the buckets, we just keep two per
616 	 * cpu (alloc/free).
617 	 */
618 	ZONE_LOCK(zone);
619 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
620 		LIST_REMOVE(bucket, ub_link);
621 		ZONE_UNLOCK(zone);
622 		bucket_drain(zone, bucket);
623 		bucket_free(bucket);
624 		ZONE_LOCK(zone);
625 	}
626 
627 	/* Now we do the free queue.. */
628 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
629 		LIST_REMOVE(bucket, ub_link);
630 		bucket_free(bucket);
631 	}
632 
633 	/* We unlock here, but they will all block until the zone is unlocked */
634 	for (cpu = 0; cpu < maxcpu; cpu++) {
635 		if (CPU_ABSENT(cpu))
636 			continue;
637 		CPU_UNLOCK(cpu);
638 	}
639 }
640 
641 /*
642  * Frees pages from a zone back to the system.  This is done on demand from
643  * the pageout daemon.
644  *
645  * Arguments:
646  *	zone  The zone to free pages from
647  *	 all  Should we drain all items?
648  *   destroy  Whether to destroy the zone and pcpu buckets (from zone_dtor)
649  *
650  * Returns:
651  *	Nothing.
652  */
653 static void
654 zone_drain_common(uma_zone_t zone, int destroy)
655 {
656 	struct slabhead freeslabs = {};
657 	uma_slab_t slab;
658 	uma_slab_t n;
659 	u_int64_t extra;
660 	u_int8_t flags;
661 	u_int8_t *mem;
662 	int i;
663 
664 	/*
665 	 * We don't want to take pages from staticly allocated zones at this
666 	 * time
667 	 */
668 	if (zone->uz_flags & UMA_ZONE_NOFREE || zone->uz_freef == NULL)
669 		return;
670 
671 	ZONE_LOCK(zone);
672 
673 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
674 		cache_drain(zone, destroy);
675 
676 	if (destroy)
677 		zone->uz_wssize = 0;
678 
679 	if (zone->uz_free < zone->uz_wssize)
680 		goto finished;
681 #ifdef UMA_DEBUG
682 	printf("%s working set size: %llu free items: %u\n",
683 	    zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free);
684 #endif
685 	extra = zone->uz_free - zone->uz_wssize;
686 	extra /= zone->uz_ipers;
687 
688 	/* extra is now the number of extra slabs that we can free */
689 
690 	if (extra == 0)
691 		goto finished;
692 
693 	slab = LIST_FIRST(&zone->uz_free_slab);
694 	while (slab && extra) {
695 		n = LIST_NEXT(slab, us_link);
696 
697 		/* We have no where to free these to */
698 		if (slab->us_flags & UMA_SLAB_BOOT) {
699 			slab = n;
700 			continue;
701 		}
702 
703 		LIST_REMOVE(slab, us_link);
704 		zone->uz_pages -= zone->uz_ppera;
705 		zone->uz_free -= zone->uz_ipers;
706 
707 		if (zone->uz_flags & UMA_ZONE_HASH)
708 			UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
709 
710 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
711 
712 		slab = n;
713 		extra--;
714 	}
715 finished:
716 	ZONE_UNLOCK(zone);
717 
718 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
719 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
720 		if (zone->uz_fini)
721 			for (i = 0; i < zone->uz_ipers; i++)
722 				zone->uz_fini(
723 				    slab->us_data + (zone->uz_rsize * i),
724 				    zone->uz_size);
725 		flags = slab->us_flags;
726 		mem = slab->us_data;
727 
728 		if (zone->uz_flags & UMA_ZONE_OFFPAGE)
729 			uma_zfree_internal(slabzone, slab, NULL, 0);
730 		if (zone->uz_flags & UMA_ZONE_MALLOC) {
731 			vm_object_t obj;
732 
733 			if (flags & UMA_SLAB_KMEM)
734 				obj = kmem_object;
735 			else
736 				obj = NULL;
737 			for (i = 0; i < zone->uz_ppera; i++)
738 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
739 				    obj);
740 		}
741 #ifdef UMA_DEBUG
742 		printf("%s: Returning %d bytes.\n",
743 		    zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
744 #endif
745 		zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
746 	}
747 
748 }
749 
750 static __inline void
751 zone_drain(uma_zone_t zone)
752 {
753 	zone_drain_common(zone, 0);
754 }
755 
756 /*
757  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
758  *
759  * Arguments:
760  *	zone  The zone to allocate slabs for
761  *	wait  Shall we wait?
762  *
763  * Returns:
764  *	The slab that was allocated or NULL if there is no memory and the
765  *	caller specified M_NOWAIT.
766  */
767 static uma_slab_t
768 slab_zalloc(uma_zone_t zone, int wait)
769 {
770 	uma_slab_t slab;	/* Starting slab */
771 	u_int8_t *mem;
772 	u_int8_t flags;
773 	int i;
774 
775 	slab = NULL;
776 
777 #ifdef UMA_DEBUG
778 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
779 #endif
780 	ZONE_UNLOCK(zone);
781 
782 	if (zone->uz_flags & UMA_ZONE_OFFPAGE) {
783 		slab = uma_zalloc_internal(slabzone, NULL, wait);
784 		if (slab == NULL) {
785 			ZONE_LOCK(zone);
786 			return NULL;
787 		}
788 	}
789 
790 	/*
791 	 * This reproduces the old vm_zone behavior of zero filling pages the
792 	 * first time they are added to a zone.
793 	 *
794 	 * Malloced items are zeroed in uma_zalloc.
795 	 */
796 
797 	if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
798 		wait |= M_ZERO;
799 	else
800 		wait &= ~M_ZERO;
801 
802 	mem = zone->uz_allocf(zone, zone->uz_ppera * UMA_SLAB_SIZE,
803 	    &flags, wait);
804 	if (mem == NULL) {
805 		ZONE_LOCK(zone);
806 		return (NULL);
807 	}
808 
809 	/* Point the slab into the allocated memory */
810 	if (!(zone->uz_flags & UMA_ZONE_OFFPAGE))
811 		slab = (uma_slab_t )(mem + zone->uz_pgoff);
812 
813 	if (zone->uz_flags & UMA_ZONE_MALLOC)
814 		for (i = 0; i < zone->uz_ppera; i++)
815 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
816 
817 	slab->us_zone = zone;
818 	slab->us_data = mem;
819 	slab->us_freecount = zone->uz_ipers;
820 	slab->us_firstfree = 0;
821 	slab->us_flags = flags;
822 	for (i = 0; i < zone->uz_ipers; i++)
823 		slab->us_freelist[i] = i+1;
824 
825 	if (zone->uz_init)
826 		for (i = 0; i < zone->uz_ipers; i++)
827 			zone->uz_init(slab->us_data + (zone->uz_rsize * i),
828 			    zone->uz_size);
829 	ZONE_LOCK(zone);
830 
831 	if (zone->uz_flags & UMA_ZONE_HASH)
832 		UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
833 
834 	zone->uz_pages += zone->uz_ppera;
835 	zone->uz_free += zone->uz_ipers;
836 
837 	return (slab);
838 }
839 
840 /*
841  * Allocates a number of pages from the system
842  *
843  * Arguments:
844  *	zone  Unused
845  *	bytes  The number of bytes requested
846  *	wait  Shall we wait?
847  *
848  * Returns:
849  *	A pointer to the alloced memory or possibly
850  *	NULL if M_NOWAIT is set.
851  */
852 static void *
853 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
854 {
855 	void *p;	/* Returned page */
856 
857 	/*
858 	 * Check our small startup cache to see if it has pages remaining.
859 	 */
860 	if (uma_boot_free != 0 && bytes <= PAGE_SIZE) {
861 		uma_slab_t tmps;
862 
863 		tmps = LIST_FIRST(&uma_boot_pages);
864 		LIST_REMOVE(tmps, us_link);
865 		uma_boot_free--;
866 		*pflag = tmps->us_flags;
867 		return (tmps->us_data);
868 	} else if (booted == 0) {
869 		if (bytes > PAGE_SIZE)
870 			panic("UMA: Can't allocate multiple pages before vm "
871 			    "has started.\n");
872 		panic("UMA: Increase UMA_BOOT_PAGES");
873 	}
874 	*pflag = UMA_SLAB_KMEM;
875 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
876 
877 	return (p);
878 }
879 
880 /*
881  * Allocates a number of pages from within an object
882  *
883  * Arguments:
884  *	zone   Unused
885  *	bytes  The number of bytes requested
886  *	wait   Shall we wait?
887  *
888  * Returns:
889  *	A pointer to the alloced memory or possibly
890  *	NULL if M_NOWAIT is set.
891  */
892 static void *
893 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
894 {
895 	vm_object_t object;
896 	vm_offset_t retkva, zkva;
897 	vm_page_t p;
898 	int pages, startpages;
899 
900 	object = zone->uz_obj;
901 	retkva = 0;
902 
903 	/*
904 	 * This looks a little weird since we're getting one page at a time.
905 	 */
906 	VM_OBJECT_LOCK(object);
907 	p = TAILQ_LAST(&object->memq, pglist);
908 	pages = p != NULL ? p->pindex + 1 : 0;
909 	startpages = pages;
910 	zkva = zone->uz_kva + pages * PAGE_SIZE;
911 	for (; bytes > 0; bytes -= PAGE_SIZE) {
912 		p = vm_page_alloc(object, pages,
913 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
914 		if (p == NULL) {
915 			if (pages != startpages)
916 				pmap_qremove(retkva, pages - startpages);
917 			while (pages != startpages) {
918 				pages--;
919 				p = TAILQ_LAST(&object->memq, pglist);
920 				vm_page_lock_queues();
921 				vm_page_unwire(p, 0);
922 				vm_page_free(p);
923 				vm_page_unlock_queues();
924 			}
925 			retkva = 0;
926 			goto done;
927 		}
928 		pmap_qenter(zkva, &p, 1);
929 		if (retkva == 0)
930 			retkva = zkva;
931 		zkva += PAGE_SIZE;
932 		pages += 1;
933 	}
934 done:
935 	VM_OBJECT_UNLOCK(object);
936 	*flags = UMA_SLAB_PRIV;
937 
938 	return ((void *)retkva);
939 }
940 
941 /*
942  * Frees a number of pages to the system
943  *
944  * Arguments:
945  *	mem   A pointer to the memory to be freed
946  *	size  The size of the memory being freed
947  *	flags The original p->us_flags field
948  *
949  * Returns:
950  *	Nothing
951  */
952 static void
953 page_free(void *mem, int size, u_int8_t flags)
954 {
955 	vm_map_t map;
956 
957 	if (flags & UMA_SLAB_KMEM)
958 		map = kmem_map;
959 	else
960 		panic("UMA: page_free used with invalid flags %d\n", flags);
961 
962 	kmem_free(map, (vm_offset_t)mem, size);
963 }
964 
965 /*
966  * Zero fill initializer
967  *
968  * Arguments/Returns follow uma_init specifications
969  */
970 static void
971 zero_init(void *mem, int size)
972 {
973 	bzero(mem, size);
974 }
975 
976 /*
977  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
978  *
979  * Arguments
980  *	zone  The zone we should initialize
981  *
982  * Returns
983  *	Nothing
984  */
985 static void
986 zone_small_init(uma_zone_t zone)
987 {
988 	int rsize;
989 	int memused;
990 	int ipers;
991 
992 	rsize = zone->uz_size;
993 
994 	if (rsize < UMA_SMALLEST_UNIT)
995 		rsize = UMA_SMALLEST_UNIT;
996 
997 	if (rsize & zone->uz_align)
998 		rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
999 
1000 	zone->uz_rsize = rsize;
1001 
1002 	rsize += 1;	/* Account for the byte of linkage */
1003 	zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
1004 	zone->uz_ppera = 1;
1005 
1006 	KASSERT(zone->uz_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!"));
1007 	memused = zone->uz_ipers * zone->uz_rsize;
1008 
1009 	/* Can we do any better? */
1010 	if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
1011 		/*
1012 		 * We can't do this if we're internal or if we've been
1013 		 * asked to not go to the VM for buckets.  If we do this we
1014 		 * may end up going to the VM (kmem_map) for slabs which we
1015 		 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1016 		 * result of UMA_ZONE_VM, which clearly forbids it.
1017 		 */
1018 		if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) ||
1019 		    (zone->uz_flags & UMA_ZFLAG_CACHEONLY))
1020 			return;
1021 		ipers = UMA_SLAB_SIZE / zone->uz_rsize;
1022 		if (ipers > zone->uz_ipers) {
1023 			zone->uz_flags |= UMA_ZONE_OFFPAGE;
1024 			if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
1025 				zone->uz_flags |= UMA_ZONE_HASH;
1026 			zone->uz_ipers = ipers;
1027 		}
1028 	}
1029 }
1030 
1031 /*
1032  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
1033  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1034  * more complicated.
1035  *
1036  * Arguments
1037  *	zone  The zone we should initialize
1038  *
1039  * Returns
1040  *	Nothing
1041  */
1042 static void
1043 zone_large_init(uma_zone_t zone)
1044 {
1045 	int pages;
1046 
1047 	KASSERT((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0,
1048 	    ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1049 
1050 	pages = zone->uz_size / UMA_SLAB_SIZE;
1051 
1052 	/* Account for remainder */
1053 	if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
1054 		pages++;
1055 
1056 	zone->uz_ppera = pages;
1057 	zone->uz_ipers = 1;
1058 
1059 	zone->uz_flags |= UMA_ZONE_OFFPAGE;
1060 	if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
1061 		zone->uz_flags |= UMA_ZONE_HASH;
1062 
1063 	zone->uz_rsize = zone->uz_size;
1064 }
1065 
1066 /*
1067  * Zone header ctor.  This initializes all fields, locks, etc.  And inserts
1068  * the zone onto the global zone list.
1069  *
1070  * Arguments/Returns follow uma_ctor specifications
1071  *	udata  Actually uma_zcreat_args
1072  */
1073 
1074 static void
1075 zone_ctor(void *mem, int size, void *udata)
1076 {
1077 	struct uma_zctor_args *arg = udata;
1078 	uma_zone_t zone = mem;
1079 	int privlc;
1080 
1081 	bzero(zone, size);
1082 	zone->uz_name = arg->name;
1083 	zone->uz_size = arg->size;
1084 	zone->uz_ctor = arg->ctor;
1085 	zone->uz_dtor = arg->dtor;
1086 	zone->uz_init = arg->uminit;
1087 	zone->uz_fini = arg->fini;
1088 	zone->uz_align = arg->align;
1089 	zone->uz_free = 0;
1090 	zone->uz_pages = 0;
1091 	zone->uz_flags = arg->flags;
1092 	zone->uz_allocf = page_alloc;
1093 	zone->uz_freef = page_free;
1094 
1095 	if (arg->flags & UMA_ZONE_ZINIT)
1096 		zone->uz_init = zero_init;
1097 
1098 	if (arg->flags & UMA_ZONE_VM)
1099 		zone->uz_flags |= UMA_ZFLAG_CACHEONLY;
1100 
1101 	/*
1102 	 * XXX:
1103 	 * The +1 byte added to uz_size is to account for the byte of
1104 	 * linkage that is added to the size in zone_small_init().  If
1105 	 * we don't account for this here then we may end up in
1106 	 * zone_small_init() with a calculated 'ipers' of 0.
1107 	 */
1108 	if ((zone->uz_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1109 		zone_large_init(zone);
1110 	else
1111 		zone_small_init(zone);
1112 #ifdef UMA_MD_SMALL_ALLOC
1113 	if (zone->uz_ppera == 1) {
1114 		zone->uz_allocf = uma_small_alloc;
1115 		zone->uz_freef = uma_small_free;
1116 	}
1117 #endif	/* UMA_MD_SMALL_ALLOC */
1118 
1119 	if (arg->flags & UMA_ZONE_MTXCLASS)
1120 		privlc = 1;
1121 	else
1122 		privlc = 0;
1123 
1124 	/*
1125 	 * If we're putting the slab header in the actual page we need to
1126 	 * figure out where in each page it goes.  This calculates a right
1127 	 * justified offset into the memory on an ALIGN_PTR boundary.
1128 	 */
1129 	if (!(zone->uz_flags & UMA_ZONE_OFFPAGE)) {
1130 		int totsize;
1131 
1132 		/* Size of the slab struct and free list */
1133 		totsize = sizeof(struct uma_slab) + zone->uz_ipers;
1134 		if (totsize & UMA_ALIGN_PTR)
1135 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1136 			    (UMA_ALIGN_PTR + 1);
1137 		zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
1138 		totsize = zone->uz_pgoff + sizeof(struct uma_slab)
1139 		    + zone->uz_ipers;
1140 		/* I don't think it's possible, but I'll make sure anyway */
1141 		if (totsize > UMA_SLAB_SIZE) {
1142 			printf("zone %s ipers %d rsize %d size %d\n",
1143 			    zone->uz_name, zone->uz_ipers, zone->uz_rsize,
1144 			    zone->uz_size);
1145 			panic("UMA slab won't fit.\n");
1146 		}
1147 	}
1148 
1149 	if (zone->uz_flags & UMA_ZONE_HASH)
1150 		hash_alloc(&zone->uz_hash);
1151 
1152 #ifdef UMA_DEBUG
1153 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1154 	    zone->uz_name, zone,
1155 	    zone->uz_size, zone->uz_ipers,
1156 	    zone->uz_ppera, zone->uz_pgoff);
1157 #endif
1158 	ZONE_LOCK_INIT(zone, privlc);
1159 
1160 	mtx_lock(&uma_mtx);
1161 	LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
1162 	mtx_unlock(&uma_mtx);
1163 
1164 	/*
1165 	 * Some internal zones don't have room allocated for the per cpu
1166 	 * caches.  If we're internal, bail out here.
1167 	 */
1168 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1169 		return;
1170 
1171 	if (zone->uz_ipers <= BUCKET_MAX)
1172 		zone->uz_count = zone->uz_ipers;
1173 	else
1174 		zone->uz_count = BUCKET_MAX;
1175 }
1176 
1177 /*
1178  * Zone header dtor.  This frees all data, destroys locks, frees the hash table
1179  * and removes the zone from the global list.
1180  *
1181  * Arguments/Returns follow uma_dtor specifications
1182  *	udata  unused
1183  */
1184 
1185 static void
1186 zone_dtor(void *arg, int size, void *udata)
1187 {
1188 	uma_zone_t zone;
1189 
1190 	zone = (uma_zone_t)arg;
1191 	mtx_lock(&uma_mtx);
1192 	LIST_REMOVE(zone, uz_link);
1193 	zone_drain_common(zone, 1);
1194 	mtx_unlock(&uma_mtx);
1195 
1196 	ZONE_LOCK(zone);
1197 	if (zone->uz_free != 0)
1198 		printf("Zone %s was not empty (%d items). "
1199 		    " Lost %d pages of memory.\n",
1200 		    zone->uz_name, zone->uz_free, zone->uz_pages);
1201 
1202 	ZONE_UNLOCK(zone);
1203 	if (zone->uz_flags & UMA_ZONE_HASH)
1204 		hash_free(&zone->uz_hash);
1205 
1206 	ZONE_LOCK_FINI(zone);
1207 }
1208 /*
1209  * Traverses every zone in the system and calls a callback
1210  *
1211  * Arguments:
1212  *	zfunc  A pointer to a function which accepts a zone
1213  *		as an argument.
1214  *
1215  * Returns:
1216  *	Nothing
1217  */
1218 static void
1219 zone_foreach(void (*zfunc)(uma_zone_t))
1220 {
1221 	uma_zone_t zone;
1222 
1223 	mtx_lock(&uma_mtx);
1224 	LIST_FOREACH(zone, &uma_zones, uz_link)
1225 		zfunc(zone);
1226 	mtx_unlock(&uma_mtx);
1227 }
1228 
1229 /* Public functions */
1230 /* See uma.h */
1231 void
1232 uma_startup(void *bootmem)
1233 {
1234 	struct uma_zctor_args args;
1235 	uma_slab_t slab;
1236 	int slabsize;
1237 	int i;
1238 
1239 #ifdef UMA_DEBUG
1240 	printf("Creating uma zone headers zone.\n");
1241 #endif
1242 #ifdef SMP
1243 	maxcpu = mp_maxid + 1;
1244 #else
1245 	maxcpu = 1;
1246 #endif
1247 #ifdef UMA_DEBUG
1248 	printf("Max cpu = %d, mp_maxid = %d\n", maxcpu, mp_maxid);
1249 #endif
1250 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1251 	/* "manually" Create the initial zone */
1252 	args.name = "UMA Zones";
1253 	args.size = sizeof(struct uma_zone) +
1254 	    (sizeof(struct uma_cache) * (maxcpu - 1));
1255 	args.ctor = zone_ctor;
1256 	args.dtor = zone_dtor;
1257 	args.uminit = zero_init;
1258 	args.fini = NULL;
1259 	args.align = 32 - 1;
1260 	args.flags = UMA_ZFLAG_INTERNAL;
1261 	/* The initial zone has no Per cpu queues so it's smaller */
1262 	zone_ctor(zones, sizeof(struct uma_zone), &args);
1263 
1264 	/* Initialize the pcpu cache lock set once and for all */
1265 	for (i = 0; i < maxcpu; i++)
1266 		CPU_LOCK_INIT(i);
1267 
1268 #ifdef UMA_DEBUG
1269 	printf("Filling boot free list.\n");
1270 #endif
1271 	for (i = 0; i < UMA_BOOT_PAGES; i++) {
1272 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1273 		slab->us_data = (u_int8_t *)slab;
1274 		slab->us_flags = UMA_SLAB_BOOT;
1275 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1276 		uma_boot_free++;
1277 	}
1278 
1279 #ifdef UMA_DEBUG
1280 	printf("Creating slab zone.\n");
1281 #endif
1282 
1283 	/*
1284 	 * This is the max number of free list items we'll have with
1285 	 * offpage slabs.
1286 	 */
1287 	slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
1288 	slabsize /= UMA_MAX_WASTE;
1289 	slabsize++;			/* In case there it's rounded */
1290 	slabsize += sizeof(struct uma_slab);
1291 
1292 	/* Now make a zone for slab headers */
1293 	slabzone = uma_zcreate("UMA Slabs",
1294 				slabsize,
1295 				NULL, NULL, NULL, NULL,
1296 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1297 
1298 	hashzone = uma_zcreate("UMA Hash",
1299 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1300 	    NULL, NULL, NULL, NULL,
1301 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1302 
1303 	bucket_init();
1304 
1305 #ifdef UMA_MD_SMALL_ALLOC
1306 	booted = 1;
1307 #endif
1308 
1309 #ifdef UMA_DEBUG
1310 	printf("UMA startup complete.\n");
1311 #endif
1312 }
1313 
1314 /* see uma.h */
1315 void
1316 uma_startup2(void)
1317 {
1318 	booted = 1;
1319 	bucket_enable();
1320 #ifdef UMA_DEBUG
1321 	printf("UMA startup2 complete.\n");
1322 #endif
1323 }
1324 
1325 /*
1326  * Initialize our callout handle
1327  *
1328  */
1329 
1330 static void
1331 uma_startup3(void)
1332 {
1333 #ifdef UMA_DEBUG
1334 	printf("Starting callout.\n");
1335 #endif
1336 	callout_init(&uma_callout, 0);
1337 	callout_reset(&uma_callout, UMA_WORKING_TIME * hz, uma_timeout, NULL);
1338 #ifdef UMA_DEBUG
1339 	printf("UMA startup3 complete.\n");
1340 #endif
1341 }
1342 
1343 /* See uma.h */
1344 uma_zone_t
1345 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1346 		uma_init uminit, uma_fini fini, int align, u_int16_t flags)
1347 
1348 {
1349 	struct uma_zctor_args args;
1350 
1351 	/* This stuff is essential for the zone ctor */
1352 	args.name = name;
1353 	args.size = size;
1354 	args.ctor = ctor;
1355 	args.dtor = dtor;
1356 	args.uminit = uminit;
1357 	args.fini = fini;
1358 	args.align = align;
1359 	args.flags = flags;
1360 
1361 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1362 }
1363 
1364 /* See uma.h */
1365 void
1366 uma_zdestroy(uma_zone_t zone)
1367 {
1368 	uma_zfree_internal(zones, zone, NULL, 0);
1369 }
1370 
1371 /* See uma.h */
1372 void *
1373 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1374 {
1375 	void *item;
1376 	uma_cache_t cache;
1377 	uma_bucket_t bucket;
1378 	int cpu;
1379 
1380 	/* This is the fast path allocation */
1381 #ifdef UMA_DEBUG_ALLOC_1
1382 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1383 #endif
1384 
1385 #ifdef INVARIANTS
1386 	/*
1387 	 * To make sure that WAITOK or NOWAIT is set, but not more than
1388 	 * one, and check against the API botches that are common.
1389 	 * The uma code implies M_WAITOK if M_NOWAIT is not set, so
1390 	 * we default to waiting if none of the flags is set.
1391 	 */
1392 	cpu = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
1393 	if (cpu != M_NOWAIT && cpu != M_WAITOK) {
1394 		static	struct timeval lasterr;
1395 		static	int curerr, once;
1396 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
1397 			printf("Bad uma_zalloc flags: %x\n", cpu);
1398 			backtrace();
1399 			once++;
1400 		}
1401 	}
1402 #endif
1403 	if (!(flags & M_NOWAIT)) {
1404 		KASSERT(curthread->td_intr_nesting_level == 0,
1405 		   ("malloc(M_WAITOK) in interrupt context"));
1406 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1407 		    "malloc() of \"%s\"", zone->uz_name);
1408 	}
1409 
1410 zalloc_restart:
1411 	cpu = PCPU_GET(cpuid);
1412 	CPU_LOCK(cpu);
1413 	cache = &zone->uz_cpu[cpu];
1414 
1415 zalloc_start:
1416 	bucket = cache->uc_allocbucket;
1417 
1418 	if (bucket) {
1419 		if (bucket->ub_cnt > 0) {
1420 			bucket->ub_cnt--;
1421 			item = bucket->ub_bucket[bucket->ub_cnt];
1422 #ifdef INVARIANTS
1423 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
1424 #endif
1425 			KASSERT(item != NULL,
1426 			    ("uma_zalloc: Bucket pointer mangled."));
1427 			cache->uc_allocs++;
1428 #ifdef INVARIANTS
1429 			ZONE_LOCK(zone);
1430 			uma_dbg_alloc(zone, NULL, item);
1431 			ZONE_UNLOCK(zone);
1432 #endif
1433 			CPU_UNLOCK(cpu);
1434 			if (zone->uz_ctor)
1435 				zone->uz_ctor(item, zone->uz_size, udata);
1436 			if (flags & M_ZERO)
1437 				bzero(item, zone->uz_size);
1438 			return (item);
1439 		} else if (cache->uc_freebucket) {
1440 			/*
1441 			 * We have run out of items in our allocbucket.
1442 			 * See if we can switch with our free bucket.
1443 			 */
1444 			if (cache->uc_freebucket->ub_cnt > 0) {
1445 #ifdef UMA_DEBUG_ALLOC
1446 				printf("uma_zalloc: Swapping empty with"
1447 				    " alloc.\n");
1448 #endif
1449 				bucket = cache->uc_freebucket;
1450 				cache->uc_freebucket = cache->uc_allocbucket;
1451 				cache->uc_allocbucket = bucket;
1452 
1453 				goto zalloc_start;
1454 			}
1455 		}
1456 	}
1457 	ZONE_LOCK(zone);
1458 	/* Since we have locked the zone we may as well send back our stats */
1459 	zone->uz_allocs += cache->uc_allocs;
1460 	cache->uc_allocs = 0;
1461 
1462 	/* Our old one is now a free bucket */
1463 	if (cache->uc_allocbucket) {
1464 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1465 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1466 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1467 		    cache->uc_allocbucket, ub_link);
1468 		cache->uc_allocbucket = NULL;
1469 	}
1470 
1471 	/* Check the free list for a new alloc bucket */
1472 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1473 		KASSERT(bucket->ub_cnt != 0,
1474 		    ("uma_zalloc_arg: Returning an empty bucket."));
1475 
1476 		LIST_REMOVE(bucket, ub_link);
1477 		cache->uc_allocbucket = bucket;
1478 		ZONE_UNLOCK(zone);
1479 		goto zalloc_start;
1480 	}
1481 	/* We are no longer associated with this cpu!!! */
1482 	CPU_UNLOCK(cpu);
1483 
1484 	/* Bump up our uz_count so we get here less */
1485 	if (zone->uz_count < BUCKET_MAX)
1486 		zone->uz_count++;
1487 	/*
1488 	 * Now lets just fill a bucket and put it on the free list.  If that
1489 	 * works we'll restart the allocation from the begining.
1490 	 */
1491 	if (uma_zalloc_bucket(zone, flags)) {
1492 		ZONE_UNLOCK(zone);
1493 		goto zalloc_restart;
1494 	}
1495 	ZONE_UNLOCK(zone);
1496 	/*
1497 	 * We may not be able to get a bucket so return an actual item.
1498 	 */
1499 #ifdef UMA_DEBUG
1500 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1501 #endif
1502 
1503 	return (uma_zalloc_internal(zone, udata, flags));
1504 }
1505 
1506 static uma_slab_t
1507 uma_zone_slab(uma_zone_t zone, int flags)
1508 {
1509 	uma_slab_t slab;
1510 
1511 	/*
1512 	 * This is to prevent us from recursively trying to allocate
1513 	 * buckets.  The problem is that if an allocation forces us to
1514 	 * grab a new bucket we will call page_alloc, which will go off
1515 	 * and cause the vm to allocate vm_map_entries.  If we need new
1516 	 * buckets there too we will recurse in kmem_alloc and bad
1517 	 * things happen.  So instead we return a NULL bucket, and make
1518 	 * the code that allocates buckets smart enough to deal with it
1519 	 */
1520 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL && zone->uz_recurse != 0)
1521 		return (NULL);
1522 
1523 	slab = NULL;
1524 
1525 	for (;;) {
1526 		/*
1527 		 * Find a slab with some space.  Prefer slabs that are partially
1528 		 * used over those that are totally full.  This helps to reduce
1529 		 * fragmentation.
1530 		 */
1531 		if (zone->uz_free != 0) {
1532 			if (!LIST_EMPTY(&zone->uz_part_slab)) {
1533 				slab = LIST_FIRST(&zone->uz_part_slab);
1534 			} else {
1535 				slab = LIST_FIRST(&zone->uz_free_slab);
1536 				LIST_REMOVE(slab, us_link);
1537 				LIST_INSERT_HEAD(&zone->uz_part_slab, slab,
1538 				us_link);
1539 			}
1540 			return (slab);
1541 		}
1542 
1543 		/*
1544 		 * M_NOVM means don't ask at all!
1545 		 */
1546 		if (flags & M_NOVM)
1547 			break;
1548 
1549 		if (zone->uz_maxpages &&
1550 		    zone->uz_pages >= zone->uz_maxpages) {
1551 			zone->uz_flags |= UMA_ZFLAG_FULL;
1552 
1553 			if (flags & M_NOWAIT)
1554 				break;
1555 			else
1556 				msleep(zone, &zone->uz_lock, PVM,
1557 				    "zonelimit", 0);
1558 			continue;
1559 		}
1560 		zone->uz_recurse++;
1561 		slab = slab_zalloc(zone, flags);
1562 		zone->uz_recurse--;
1563 		/*
1564 		 * If we got a slab here it's safe to mark it partially used
1565 		 * and return.  We assume that the caller is going to remove
1566 		 * at least one item.
1567 		 */
1568 		if (slab) {
1569 			LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1570 			return (slab);
1571 		}
1572 		/*
1573 		 * We might not have been able to get a slab but another cpu
1574 		 * could have while we were unlocked.  Check again before we
1575 		 * fail.
1576 		 */
1577 		if (flags & M_NOWAIT)
1578 			flags |= M_NOVM;
1579 	}
1580 	return (slab);
1581 }
1582 
1583 static void *
1584 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
1585 {
1586 	void *item;
1587 	u_int8_t freei;
1588 
1589 	freei = slab->us_firstfree;
1590 	slab->us_firstfree = slab->us_freelist[freei];
1591 	item = slab->us_data + (zone->uz_rsize * freei);
1592 
1593 	slab->us_freecount--;
1594 	zone->uz_free--;
1595 #ifdef INVARIANTS
1596 	uma_dbg_alloc(zone, slab, item);
1597 #endif
1598 	/* Move this slab to the full list */
1599 	if (slab->us_freecount == 0) {
1600 		LIST_REMOVE(slab, us_link);
1601 		LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
1602 	}
1603 
1604 	return (item);
1605 }
1606 
1607 static int
1608 uma_zalloc_bucket(uma_zone_t zone, int flags)
1609 {
1610 	uma_bucket_t bucket;
1611 	uma_slab_t slab;
1612 	int max;
1613 
1614 	/*
1615 	 * Try this zone's free list first so we don't allocate extra buckets.
1616 	 */
1617 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1618 		KASSERT(bucket->ub_cnt == 0,
1619 		    ("uma_zalloc_bucket: Bucket on free list is not empty."));
1620 		LIST_REMOVE(bucket, ub_link);
1621 	} else {
1622 		int bflags;
1623 
1624 		bflags = (flags & ~M_ZERO);
1625 		if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
1626 			bflags |= M_NOVM;
1627 
1628 		ZONE_UNLOCK(zone);
1629 		bucket = bucket_alloc(zone->uz_count, bflags);
1630 		ZONE_LOCK(zone);
1631 	}
1632 
1633 	if (bucket == NULL)
1634 		return (0);
1635 
1636 #ifdef SMP
1637 	/*
1638 	 * This code is here to limit the number of simultaneous bucket fills
1639 	 * for any given zone to the number of per cpu caches in this zone. This
1640 	 * is done so that we don't allocate more memory than we really need.
1641 	 */
1642 	if (zone->uz_fills >= mp_ncpus)
1643 		goto done;
1644 
1645 #endif
1646 	zone->uz_fills++;
1647 
1648 	max = MIN(bucket->ub_entries, zone->uz_count);
1649 	/* Try to keep the buckets totally full */
1650 	while (bucket->ub_cnt < max &&
1651 	    (slab = uma_zone_slab(zone, flags)) != NULL) {
1652 		while (slab->us_freecount && bucket->ub_cnt < max) {
1653 			bucket->ub_bucket[bucket->ub_cnt++] =
1654 			    uma_slab_alloc(zone, slab);
1655 		}
1656 		/* Don't block on the next fill */
1657 		flags |= M_NOWAIT;
1658 	}
1659 
1660 	zone->uz_fills--;
1661 
1662 	if (bucket->ub_cnt != 0) {
1663 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
1664 		    bucket, ub_link);
1665 		return (1);
1666 	}
1667 #ifdef SMP
1668 done:
1669 #endif
1670 	bucket_free(bucket);
1671 
1672 	return (0);
1673 }
1674 /*
1675  * Allocates an item for an internal zone
1676  *
1677  * Arguments
1678  *	zone   The zone to alloc for.
1679  *	udata  The data to be passed to the constructor.
1680  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
1681  *
1682  * Returns
1683  *	NULL if there is no memory and M_NOWAIT is set
1684  *	An item if successful
1685  */
1686 
1687 static void *
1688 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
1689 {
1690 	uma_slab_t slab;
1691 	void *item;
1692 
1693 	item = NULL;
1694 
1695 #ifdef UMA_DEBUG_ALLOC
1696 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
1697 #endif
1698 	ZONE_LOCK(zone);
1699 
1700 	slab = uma_zone_slab(zone, flags);
1701 	if (slab == NULL) {
1702 		ZONE_UNLOCK(zone);
1703 		return (NULL);
1704 	}
1705 
1706 	item = uma_slab_alloc(zone, slab);
1707 
1708 	ZONE_UNLOCK(zone);
1709 
1710 	if (zone->uz_ctor != NULL)
1711 		zone->uz_ctor(item, zone->uz_size, udata);
1712 	if (flags & M_ZERO)
1713 		bzero(item, zone->uz_size);
1714 
1715 	return (item);
1716 }
1717 
1718 /* See uma.h */
1719 void
1720 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
1721 {
1722 	uma_cache_t cache;
1723 	uma_bucket_t bucket;
1724 	int bflags;
1725 	int cpu;
1726 	int skip;
1727 
1728 	/* This is the fast path free */
1729 	skip = 0;
1730 #ifdef UMA_DEBUG_ALLOC_1
1731 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
1732 #endif
1733 	/*
1734 	 * The race here is acceptable.  If we miss it we'll just have to wait
1735 	 * a little longer for the limits to be reset.
1736 	 */
1737 
1738 	if (zone->uz_flags & UMA_ZFLAG_FULL)
1739 		goto zfree_internal;
1740 
1741 	if (zone->uz_dtor) {
1742 		zone->uz_dtor(item, zone->uz_size, udata);
1743 		skip = 1;
1744 	}
1745 
1746 zfree_restart:
1747 	cpu = PCPU_GET(cpuid);
1748 	CPU_LOCK(cpu);
1749 	cache = &zone->uz_cpu[cpu];
1750 
1751 zfree_start:
1752 	bucket = cache->uc_freebucket;
1753 
1754 	if (bucket) {
1755 		/*
1756 		 * Do we have room in our bucket? It is OK for this uz count
1757 		 * check to be slightly out of sync.
1758 		 */
1759 
1760 		if (bucket->ub_cnt < bucket->ub_entries) {
1761 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
1762 			    ("uma_zfree: Freeing to non free bucket index."));
1763 			bucket->ub_bucket[bucket->ub_cnt] = item;
1764 			bucket->ub_cnt++;
1765 #ifdef INVARIANTS
1766 			ZONE_LOCK(zone);
1767 			if (zone->uz_flags & UMA_ZONE_MALLOC)
1768 				uma_dbg_free(zone, udata, item);
1769 			else
1770 				uma_dbg_free(zone, NULL, item);
1771 			ZONE_UNLOCK(zone);
1772 #endif
1773 			CPU_UNLOCK(cpu);
1774 			return;
1775 		} else if (cache->uc_allocbucket) {
1776 #ifdef UMA_DEBUG_ALLOC
1777 			printf("uma_zfree: Swapping buckets.\n");
1778 #endif
1779 			/*
1780 			 * We have run out of space in our freebucket.
1781 			 * See if we can switch with our alloc bucket.
1782 			 */
1783 			if (cache->uc_allocbucket->ub_cnt <
1784 			    cache->uc_freebucket->ub_cnt) {
1785 				bucket = cache->uc_freebucket;
1786 				cache->uc_freebucket = cache->uc_allocbucket;
1787 				cache->uc_allocbucket = bucket;
1788 				goto zfree_start;
1789 			}
1790 		}
1791 	}
1792 	/*
1793 	 * We can get here for two reasons:
1794 	 *
1795 	 * 1) The buckets are NULL
1796 	 * 2) The alloc and free buckets are both somewhat full.
1797 	 */
1798 
1799 	ZONE_LOCK(zone);
1800 
1801 	bucket = cache->uc_freebucket;
1802 	cache->uc_freebucket = NULL;
1803 
1804 	/* Can we throw this on the zone full list? */
1805 	if (bucket != NULL) {
1806 #ifdef UMA_DEBUG_ALLOC
1807 		printf("uma_zfree: Putting old bucket on the free list.\n");
1808 #endif
1809 		/* ub_cnt is pointing to the last free item */
1810 		KASSERT(bucket->ub_cnt != 0,
1811 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
1812 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
1813 		    bucket, ub_link);
1814 	}
1815 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
1816 		LIST_REMOVE(bucket, ub_link);
1817 		ZONE_UNLOCK(zone);
1818 		cache->uc_freebucket = bucket;
1819 		goto zfree_start;
1820 	}
1821 	/* We're done with this CPU now */
1822 	CPU_UNLOCK(cpu);
1823 
1824 	/* And the zone.. */
1825 	ZONE_UNLOCK(zone);
1826 
1827 #ifdef UMA_DEBUG_ALLOC
1828 	printf("uma_zfree: Allocating new free bucket.\n");
1829 #endif
1830 	bflags = M_NOWAIT;
1831 
1832 	if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
1833 		bflags |= M_NOVM;
1834 	bucket = bucket_alloc(zone->uz_count, bflags);
1835 	if (bucket) {
1836 		ZONE_LOCK(zone);
1837 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1838 		    bucket, ub_link);
1839 		ZONE_UNLOCK(zone);
1840 		goto zfree_restart;
1841 	}
1842 
1843 	/*
1844 	 * If nothing else caught this, we'll just do an internal free.
1845 	 */
1846 
1847 zfree_internal:
1848 
1849 #ifdef INVARIANTS
1850 	/*
1851 	 * If we need to skip the dtor and the uma_dbg_free in
1852 	 * uma_zfree_internal because we've already called the dtor
1853 	 * above, but we ended up here, then we need to make sure
1854 	 * that we take care of the uma_dbg_free immediately.
1855 	 */
1856 	if (skip) {
1857 		ZONE_LOCK(zone);
1858 		if (zone->uz_flags & UMA_ZONE_MALLOC)
1859 			uma_dbg_free(zone, udata, item);
1860 		else
1861 			uma_dbg_free(zone, NULL, item);
1862 		ZONE_UNLOCK(zone);
1863 	}
1864 #endif
1865 	uma_zfree_internal(zone, item, udata, skip);
1866 
1867 	return;
1868 
1869 }
1870 
1871 /*
1872  * Frees an item to an INTERNAL zone or allocates a free bucket
1873  *
1874  * Arguments:
1875  *	zone   The zone to free to
1876  *	item   The item we're freeing
1877  *	udata  User supplied data for the dtor
1878  *	skip   Skip the dtor, it was done in uma_zfree_arg
1879  */
1880 static void
1881 uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
1882 {
1883 	uma_slab_t slab;
1884 	u_int8_t *mem;
1885 	u_int8_t freei;
1886 
1887 	if (!skip && zone->uz_dtor)
1888 		zone->uz_dtor(item, zone->uz_size, udata);
1889 
1890 	ZONE_LOCK(zone);
1891 
1892 	if (!(zone->uz_flags & UMA_ZONE_MALLOC)) {
1893 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1894 		if (zone->uz_flags & UMA_ZONE_HASH)
1895 			slab = hash_sfind(&zone->uz_hash, mem);
1896 		else {
1897 			mem += zone->uz_pgoff;
1898 			slab = (uma_slab_t)mem;
1899 		}
1900 	} else {
1901 		slab = (uma_slab_t)udata;
1902 	}
1903 
1904 	/* Do we need to remove from any lists? */
1905 	if (slab->us_freecount+1 == zone->uz_ipers) {
1906 		LIST_REMOVE(slab, us_link);
1907 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
1908 	} else if (slab->us_freecount == 0) {
1909 		LIST_REMOVE(slab, us_link);
1910 		LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
1911 	}
1912 
1913 	/* Slab management stuff */
1914 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
1915 		/ zone->uz_rsize;
1916 
1917 #ifdef INVARIANTS
1918 	if (!skip)
1919 		uma_dbg_free(zone, slab, item);
1920 #endif
1921 
1922 	slab->us_freelist[freei] = slab->us_firstfree;
1923 	slab->us_firstfree = freei;
1924 	slab->us_freecount++;
1925 
1926 	/* Zone statistics */
1927 	zone->uz_free++;
1928 
1929 	if (zone->uz_flags & UMA_ZFLAG_FULL) {
1930 		if (zone->uz_pages < zone->uz_maxpages)
1931 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
1932 
1933 		/* We can handle one more allocation */
1934 		wakeup_one(zone);
1935 	}
1936 
1937 	ZONE_UNLOCK(zone);
1938 }
1939 
1940 /* See uma.h */
1941 void
1942 uma_zone_set_max(uma_zone_t zone, int nitems)
1943 {
1944 	ZONE_LOCK(zone);
1945 	if (zone->uz_ppera > 1)
1946 		zone->uz_maxpages = nitems * zone->uz_ppera;
1947 	else
1948 		zone->uz_maxpages = nitems / zone->uz_ipers;
1949 
1950 	if (zone->uz_maxpages * zone->uz_ipers < nitems)
1951 		zone->uz_maxpages++;
1952 
1953 	ZONE_UNLOCK(zone);
1954 }
1955 
1956 /* See uma.h */
1957 void
1958 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
1959 {
1960 	ZONE_LOCK(zone);
1961 	zone->uz_freef = freef;
1962 	ZONE_UNLOCK(zone);
1963 }
1964 
1965 /* See uma.h */
1966 void
1967 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
1968 {
1969 	ZONE_LOCK(zone);
1970 	zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
1971 	zone->uz_allocf = allocf;
1972 	ZONE_UNLOCK(zone);
1973 }
1974 
1975 /* See uma.h */
1976 int
1977 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
1978 {
1979 	int pages;
1980 	vm_offset_t kva;
1981 
1982 	mtx_lock(&Giant);
1983 
1984 	pages = count / zone->uz_ipers;
1985 
1986 	if (pages * zone->uz_ipers < count)
1987 		pages++;
1988 
1989 	kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
1990 
1991 	if (kva == 0) {
1992 		mtx_unlock(&Giant);
1993 		return (0);
1994 	}
1995 	if (obj == NULL) {
1996 		obj = vm_object_allocate(OBJT_DEFAULT,
1997 		    pages);
1998 	} else {
1999 		VM_OBJECT_LOCK_INIT(obj);
2000 		_vm_object_allocate(OBJT_DEFAULT,
2001 		    pages, obj);
2002 	}
2003 	ZONE_LOCK(zone);
2004 	zone->uz_kva = kva;
2005 	zone->uz_obj = obj;
2006 	zone->uz_maxpages = pages;
2007 	zone->uz_allocf = obj_alloc;
2008 	zone->uz_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2009 	ZONE_UNLOCK(zone);
2010 	mtx_unlock(&Giant);
2011 
2012 	return (1);
2013 }
2014 
2015 /* See uma.h */
2016 void
2017 uma_prealloc(uma_zone_t zone, int items)
2018 {
2019 	int slabs;
2020 	uma_slab_t slab;
2021 
2022 	ZONE_LOCK(zone);
2023 	slabs = items / zone->uz_ipers;
2024 	if (slabs * zone->uz_ipers < items)
2025 		slabs++;
2026 	while (slabs > 0) {
2027 		slab = slab_zalloc(zone, M_WAITOK);
2028 		LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
2029 		slabs--;
2030 	}
2031 	ZONE_UNLOCK(zone);
2032 }
2033 
2034 /* See uma.h */
2035 void
2036 uma_reclaim(void)
2037 {
2038 #ifdef UMA_DEBUG
2039 	printf("UMA: vm asked us to release pages!\n");
2040 #endif
2041 	bucket_enable();
2042 	zone_foreach(zone_drain);
2043 	/*
2044 	 * Some slabs may have been freed but this zone will be visited early
2045 	 * we visit again so that we can free pages that are empty once other
2046 	 * zones are drained.  We have to do the same for buckets.
2047 	 */
2048 	zone_drain_common(slabzone, 0);
2049 	bucket_zone_drain();
2050 }
2051 
2052 void *
2053 uma_large_malloc(int size, int wait)
2054 {
2055 	void *mem;
2056 	uma_slab_t slab;
2057 	u_int8_t flags;
2058 
2059 	slab = uma_zalloc_internal(slabzone, NULL, wait);
2060 	if (slab == NULL)
2061 		return (NULL);
2062 	mem = page_alloc(NULL, size, &flags, wait);
2063 	if (mem) {
2064 		vsetslab((vm_offset_t)mem, slab);
2065 		slab->us_data = mem;
2066 		slab->us_flags = flags | UMA_SLAB_MALLOC;
2067 		slab->us_size = size;
2068 	} else {
2069 		uma_zfree_internal(slabzone, slab, NULL, 0);
2070 	}
2071 
2072 
2073 	return (mem);
2074 }
2075 
2076 void
2077 uma_large_free(uma_slab_t slab)
2078 {
2079 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
2080 	/*
2081 	 * XXX: We get a lock order reversal if we don't have Giant:
2082 	 * vm_map_remove (locks system map) -> vm_map_delete ->
2083 	 *    vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant)
2084 	 */
2085 	if (!mtx_owned(&Giant)) {
2086 		mtx_lock(&Giant);
2087 		page_free(slab->us_data, slab->us_size, slab->us_flags);
2088 		mtx_unlock(&Giant);
2089 	} else
2090 		page_free(slab->us_data, slab->us_size, slab->us_flags);
2091 	uma_zfree_internal(slabzone, slab, NULL, 0);
2092 }
2093 
2094 void
2095 uma_print_stats(void)
2096 {
2097 	zone_foreach(uma_print_zone);
2098 }
2099 
2100 void
2101 uma_print_zone(uma_zone_t zone)
2102 {
2103 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2104 	    zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
2105 	    zone->uz_ipers, zone->uz_ppera,
2106 	    (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
2107 }
2108 
2109 /*
2110  * Sysctl handler for vm.zone
2111  *
2112  * stolen from vm_zone.c
2113  */
2114 static int
2115 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2116 {
2117 	int error, len, cnt;
2118 	const int linesize = 128;	/* conservative */
2119 	int totalfree;
2120 	char *tmpbuf, *offset;
2121 	uma_zone_t z;
2122 	char *p;
2123 	int cpu;
2124 	int cachefree;
2125 	uma_bucket_t bucket;
2126 	uma_cache_t cache;
2127 
2128 	cnt = 0;
2129 	mtx_lock(&uma_mtx);
2130 	LIST_FOREACH(z, &uma_zones, uz_link)
2131 		cnt++;
2132 	mtx_unlock(&uma_mtx);
2133 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2134 			M_TEMP, M_WAITOK);
2135 	len = snprintf(tmpbuf, linesize,
2136 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
2137 	if (cnt == 0)
2138 		tmpbuf[len - 1] = '\0';
2139 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2140 	if (error || cnt == 0)
2141 		goto out;
2142 	offset = tmpbuf;
2143 	mtx_lock(&uma_mtx);
2144 	LIST_FOREACH(z, &uma_zones, uz_link) {
2145 		if (cnt == 0)	/* list may have changed size */
2146 			break;
2147 		if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
2148 			for (cpu = 0; cpu < maxcpu; cpu++) {
2149 				if (CPU_ABSENT(cpu))
2150 					continue;
2151 				CPU_LOCK(cpu);
2152 			}
2153 		}
2154 		ZONE_LOCK(z);
2155 		cachefree = 0;
2156 		if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
2157 			for (cpu = 0; cpu < maxcpu; cpu++) {
2158 				if (CPU_ABSENT(cpu))
2159 					continue;
2160 				cache = &z->uz_cpu[cpu];
2161 				if (cache->uc_allocbucket != NULL)
2162 					cachefree += cache->uc_allocbucket->ub_cnt;
2163 				if (cache->uc_freebucket != NULL)
2164 					cachefree += cache->uc_freebucket->ub_cnt;
2165 				CPU_UNLOCK(cpu);
2166 			}
2167 		}
2168 		LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2169 			cachefree += bucket->ub_cnt;
2170 		}
2171 		totalfree = z->uz_free + cachefree;
2172 		len = snprintf(offset, linesize,
2173 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2174 		    z->uz_name, z->uz_size,
2175 		    z->uz_maxpages * z->uz_ipers,
2176 		    (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
2177 		    totalfree,
2178 		    (unsigned long long)z->uz_allocs);
2179 		ZONE_UNLOCK(z);
2180 		for (p = offset + 12; p > offset && *p == ' '; --p)
2181 			/* nothing */ ;
2182 		p[1] = ':';
2183 		cnt--;
2184 		offset += len;
2185 	}
2186 	mtx_unlock(&uma_mtx);
2187 	*offset++ = '\0';
2188 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2189 out:
2190 	FREE(tmpbuf, M_TEMP);
2191 	return (error);
2192 }
2193