xref: /freebsd/sys/vm/uma_core.c (revision 7562eaabc01a48e6b11d5b558c41e3b92dae5c2d)
1 /*
2  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * uma_core.c  Implementation of the Universal Memory allocator
29  *
30  * This allocator is intended to replace the multitude of similar object caches
31  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
32  * effecient.  A primary design goal is to return unused memory to the rest of
33  * the system.  This will make the system as a whole more flexible due to the
34  * ability to move memory to subsystems which most need it instead of leaving
35  * pools of reserved memory unused.
36  *
37  * The basic ideas stem from similar slab/zone based allocators whose algorithms
38  * are well known.
39  *
40  */
41 
42 /*
43  * TODO:
44  *	- Improve memory usage for large allocations
45  *	- Investigate cache size adjustments
46  */
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 /* I should really use ktr.. */
52 /*
53 #define UMA_DEBUG 1
54 #define UMA_DEBUG_ALLOC 1
55 #define UMA_DEBUG_ALLOC_1 1
56 */
57 
58 #include "opt_param.h"
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/kernel.h>
62 #include <sys/types.h>
63 #include <sys/queue.h>
64 #include <sys/malloc.h>
65 #include <sys/ktr.h>
66 #include <sys/lock.h>
67 #include <sys/sysctl.h>
68 #include <sys/mutex.h>
69 #include <sys/proc.h>
70 #include <sys/smp.h>
71 #include <sys/vmmeter.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_extern.h>
80 #include <vm/uma.h>
81 #include <vm/uma_int.h>
82 #include <vm/uma_dbg.h>
83 
84 #include <machine/vmparam.h>
85 
86 /*
87  * This is the zone and keg from which all zones are spawned.  The idea is that
88  * even the zone & keg heads are allocated from the allocator, so we use the
89  * bss section to bootstrap us.
90  */
91 static struct uma_keg masterkeg;
92 static struct uma_zone masterzone_k;
93 static struct uma_zone masterzone_z;
94 static uma_zone_t kegs = &masterzone_k;
95 static uma_zone_t zones = &masterzone_z;
96 
97 /* This is the zone from which all of uma_slab_t's are allocated. */
98 static uma_zone_t slabzone;
99 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
100 
101 /*
102  * The initial hash tables come out of this zone so they can be allocated
103  * prior to malloc coming up.
104  */
105 static uma_zone_t hashzone;
106 
107 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
108 
109 /*
110  * Are we allowed to allocate buckets?
111  */
112 static int bucketdisable = 1;
113 
114 /* Linked list of all kegs in the system */
115 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
116 
117 /* This mutex protects the keg list */
118 static struct mtx uma_mtx;
119 
120 /* These are the pcpu cache locks */
121 static struct mtx uma_pcpu_mtx[MAXCPU];
122 
123 /* Linked list of boot time pages */
124 static LIST_HEAD(,uma_slab) uma_boot_pages =
125     LIST_HEAD_INITIALIZER(&uma_boot_pages);
126 
127 /* Count of free boottime pages */
128 static int uma_boot_free = 0;
129 
130 /* Is the VM done starting up? */
131 static int booted = 0;
132 
133 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
134 static u_int uma_max_ipers;
135 static u_int uma_max_ipers_ref;
136 
137 /*
138  * This is the handle used to schedule events that need to happen
139  * outside of the allocation fast path.
140  */
141 static struct callout uma_callout;
142 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
143 
144 /*
145  * This structure is passed as the zone ctor arg so that I don't have to create
146  * a special allocation function just for zones.
147  */
148 struct uma_zctor_args {
149 	char *name;
150 	size_t size;
151 	uma_ctor ctor;
152 	uma_dtor dtor;
153 	uma_init uminit;
154 	uma_fini fini;
155 	uma_keg_t keg;
156 	int align;
157 	u_int16_t flags;
158 };
159 
160 struct uma_kctor_args {
161 	uma_zone_t zone;
162 	size_t size;
163 	uma_init uminit;
164 	uma_fini fini;
165 	int align;
166 	u_int16_t flags;
167 };
168 
169 struct uma_bucket_zone {
170 	uma_zone_t	ubz_zone;
171 	char		*ubz_name;
172 	int		ubz_entries;
173 };
174 
175 #define	BUCKET_MAX	128
176 
177 struct uma_bucket_zone bucket_zones[] = {
178 	{ NULL, "16 Bucket", 16 },
179 	{ NULL, "32 Bucket", 32 },
180 	{ NULL, "64 Bucket", 64 },
181 	{ NULL, "128 Bucket", 128 },
182 	{ NULL, NULL, 0}
183 };
184 
185 #define	BUCKET_SHIFT	4
186 #define	BUCKET_ZONES	((BUCKET_MAX >> BUCKET_SHIFT) + 1)
187 
188 uint8_t bucket_size[BUCKET_ZONES];
189 
190 enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
191 
192 /* Prototypes.. */
193 
194 static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
195 static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
196 static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
197 static void page_free(void *, int, u_int8_t);
198 static uma_slab_t slab_zalloc(uma_zone_t, int);
199 static void cache_drain(uma_zone_t);
200 static void bucket_drain(uma_zone_t, uma_bucket_t);
201 static void bucket_cache_drain(uma_zone_t zone);
202 static int keg_ctor(void *, int, void *, int);
203 static void keg_dtor(void *, int, void *);
204 static int zone_ctor(void *, int, void *, int);
205 static void zone_dtor(void *, int, void *);
206 static int zero_init(void *, int, int);
207 static void zone_small_init(uma_zone_t zone);
208 static void zone_large_init(uma_zone_t zone);
209 static void zone_foreach(void (*zfunc)(uma_zone_t));
210 static void zone_timeout(uma_zone_t zone);
211 static int hash_alloc(struct uma_hash *);
212 static int hash_expand(struct uma_hash *, struct uma_hash *);
213 static void hash_free(struct uma_hash *hash);
214 static void uma_timeout(void *);
215 static void uma_startup3(void);
216 static void *uma_zalloc_internal(uma_zone_t, void *, int);
217 static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip);
218 static void bucket_enable(void);
219 static void bucket_init(void);
220 static uma_bucket_t bucket_alloc(int, int);
221 static void bucket_free(uma_bucket_t);
222 static void bucket_zone_drain(void);
223 static int uma_zalloc_bucket(uma_zone_t zone, int flags);
224 static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
225 static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
226 static void zone_drain(uma_zone_t);
227 static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
228     uma_fini fini, int align, u_int16_t flags);
229 
230 void uma_print_zone(uma_zone_t);
231 void uma_print_stats(void);
232 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
233 
234 #ifdef WITNESS
235 static int nosleepwithlocks = 1;
236 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
237     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
238 #else
239 static int nosleepwithlocks = 0;
240 SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
241     0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
242 #endif
243 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD,
244     NULL, 0, sysctl_vm_zone, "A", "Zone Info");
245 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
246 
247 /*
248  * This routine checks to see whether or not it's safe to enable buckets.
249  */
250 
251 static void
252 bucket_enable(void)
253 {
254 	if (cnt.v_free_count < cnt.v_free_min)
255 		bucketdisable = 1;
256 	else
257 		bucketdisable = 0;
258 }
259 
260 static void
261 bucket_init(void)
262 {
263 	struct uma_bucket_zone *ubz;
264 	int i;
265 	int j;
266 
267 	for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) {
268 		int size;
269 
270 		ubz = &bucket_zones[j];
271 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
272 		size += sizeof(void *) * ubz->ubz_entries;
273 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
274 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
275 		for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT))
276 			bucket_size[i >> BUCKET_SHIFT] = j;
277 	}
278 }
279 
280 static uma_bucket_t
281 bucket_alloc(int entries, int bflags)
282 {
283 	struct uma_bucket_zone *ubz;
284 	uma_bucket_t bucket;
285 	int idx;
286 
287 	/*
288 	 * This is to stop us from allocating per cpu buckets while we're
289 	 * running out of UMA_BOOT_PAGES.  Otherwise, we would exhaust the
290 	 * boot pages.  This also prevents us from allocating buckets in
291 	 * low memory situations.
292 	 */
293 
294 	if (bucketdisable)
295 		return (NULL);
296 	idx = howmany(entries, 1 << BUCKET_SHIFT);
297 	ubz = &bucket_zones[bucket_size[idx]];
298 	bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags);
299 	if (bucket) {
300 #ifdef INVARIANTS
301 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
302 #endif
303 		bucket->ub_cnt = 0;
304 		bucket->ub_entries = ubz->ubz_entries;
305 	}
306 
307 	return (bucket);
308 }
309 
310 static void
311 bucket_free(uma_bucket_t bucket)
312 {
313 	struct uma_bucket_zone *ubz;
314 	int idx;
315 
316 	idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT);
317 	ubz = &bucket_zones[bucket_size[idx]];
318 	uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE);
319 }
320 
321 static void
322 bucket_zone_drain(void)
323 {
324 	struct uma_bucket_zone *ubz;
325 
326 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
327 		zone_drain(ubz->ubz_zone);
328 }
329 
330 
331 /*
332  * Routine called by timeout which is used to fire off some time interval
333  * based calculations.  (stats, hash size, etc.)
334  *
335  * Arguments:
336  *	arg   Unused
337  *
338  * Returns:
339  *	Nothing
340  */
341 static void
342 uma_timeout(void *unused)
343 {
344 	bucket_enable();
345 	zone_foreach(zone_timeout);
346 
347 	/* Reschedule this event */
348 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
349 }
350 
351 /*
352  * Routine to perform timeout driven calculations.  This expands the
353  * hashes and does per cpu statistics aggregation.
354  *
355  *  Arguments:
356  *	zone  The zone to operate on
357  *
358  *  Returns:
359  *	Nothing
360  */
361 static void
362 zone_timeout(uma_zone_t zone)
363 {
364 	uma_keg_t keg;
365 	uma_cache_t cache;
366 	u_int64_t alloc;
367 	int cpu;
368 
369 	keg = zone->uz_keg;
370 	alloc = 0;
371 
372 	/*
373 	 * Aggregate per cpu cache statistics back to the zone.
374 	 *
375 	 * XXX This should be done in the sysctl handler.
376 	 *
377 	 * I may rewrite this to set a flag in the per cpu cache instead of
378 	 * locking.  If the flag is not cleared on the next round I will have
379 	 * to lock and do it here instead so that the statistics don't get too
380 	 * far out of sync.
381 	 */
382 	if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) {
383 		for (cpu = 0; cpu <= mp_maxid; cpu++) {
384 			if (CPU_ABSENT(cpu))
385 				continue;
386 			CPU_LOCK(cpu);
387 			cache = &zone->uz_cpu[cpu];
388 			/* Add them up, and reset */
389 			alloc += cache->uc_allocs;
390 			cache->uc_allocs = 0;
391 			CPU_UNLOCK(cpu);
392 		}
393 	}
394 
395 	/* Now push these stats back into the zone.. */
396 	ZONE_LOCK(zone);
397 	zone->uz_allocs += alloc;
398 
399 	/*
400 	 * Expand the zone hash table.
401 	 *
402 	 * This is done if the number of slabs is larger than the hash size.
403 	 * What I'm trying to do here is completely reduce collisions.  This
404 	 * may be a little aggressive.  Should I allow for two collisions max?
405 	 */
406 
407 	if (keg->uk_flags & UMA_ZONE_HASH &&
408 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
409 		struct uma_hash newhash;
410 		struct uma_hash oldhash;
411 		int ret;
412 
413 		/*
414 		 * This is so involved because allocating and freeing
415 		 * while the zone lock is held will lead to deadlock.
416 		 * I have to do everything in stages and check for
417 		 * races.
418 		 */
419 		newhash = keg->uk_hash;
420 		ZONE_UNLOCK(zone);
421 		ret = hash_alloc(&newhash);
422 		ZONE_LOCK(zone);
423 		if (ret) {
424 			if (hash_expand(&keg->uk_hash, &newhash)) {
425 				oldhash = keg->uk_hash;
426 				keg->uk_hash = newhash;
427 			} else
428 				oldhash = newhash;
429 
430 			ZONE_UNLOCK(zone);
431 			hash_free(&oldhash);
432 			ZONE_LOCK(zone);
433 		}
434 	}
435 	ZONE_UNLOCK(zone);
436 }
437 
438 /*
439  * Allocate and zero fill the next sized hash table from the appropriate
440  * backing store.
441  *
442  * Arguments:
443  *	hash  A new hash structure with the old hash size in uh_hashsize
444  *
445  * Returns:
446  *	1 on sucess and 0 on failure.
447  */
448 static int
449 hash_alloc(struct uma_hash *hash)
450 {
451 	int oldsize;
452 	int alloc;
453 
454 	oldsize = hash->uh_hashsize;
455 
456 	/* We're just going to go to a power of two greater */
457 	if (oldsize)  {
458 		hash->uh_hashsize = oldsize * 2;
459 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
460 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
461 		    M_UMAHASH, M_NOWAIT);
462 	} else {
463 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
464 		hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL,
465 		    M_WAITOK);
466 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
467 	}
468 	if (hash->uh_slab_hash) {
469 		bzero(hash->uh_slab_hash, alloc);
470 		hash->uh_hashmask = hash->uh_hashsize - 1;
471 		return (1);
472 	}
473 
474 	return (0);
475 }
476 
477 /*
478  * Expands the hash table for HASH zones.  This is done from zone_timeout
479  * to reduce collisions.  This must not be done in the regular allocation
480  * path, otherwise, we can recurse on the vm while allocating pages.
481  *
482  * Arguments:
483  *	oldhash  The hash you want to expand
484  *	newhash  The hash structure for the new table
485  *
486  * Returns:
487  *	Nothing
488  *
489  * Discussion:
490  */
491 static int
492 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
493 {
494 	uma_slab_t slab;
495 	int hval;
496 	int i;
497 
498 	if (!newhash->uh_slab_hash)
499 		return (0);
500 
501 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
502 		return (0);
503 
504 	/*
505 	 * I need to investigate hash algorithms for resizing without a
506 	 * full rehash.
507 	 */
508 
509 	for (i = 0; i < oldhash->uh_hashsize; i++)
510 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
511 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
512 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
513 			hval = UMA_HASH(newhash, slab->us_data);
514 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
515 			    slab, us_hlink);
516 		}
517 
518 	return (1);
519 }
520 
521 /*
522  * Free the hash bucket to the appropriate backing store.
523  *
524  * Arguments:
525  *	slab_hash  The hash bucket we're freeing
526  *	hashsize   The number of entries in that hash bucket
527  *
528  * Returns:
529  *	Nothing
530  */
531 static void
532 hash_free(struct uma_hash *hash)
533 {
534 	if (hash->uh_slab_hash == NULL)
535 		return;
536 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
537 		uma_zfree_internal(hashzone,
538 		    hash->uh_slab_hash, NULL, SKIP_NONE);
539 	else
540 		free(hash->uh_slab_hash, M_UMAHASH);
541 }
542 
543 /*
544  * Frees all outstanding items in a bucket
545  *
546  * Arguments:
547  *	zone   The zone to free to, must be unlocked.
548  *	bucket The free/alloc bucket with items, cpu queue must be locked.
549  *
550  * Returns:
551  *	Nothing
552  */
553 
554 static void
555 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
556 {
557 	uma_slab_t slab;
558 	int mzone;
559 	void *item;
560 
561 	if (bucket == NULL)
562 		return;
563 
564 	slab = NULL;
565 	mzone = 0;
566 
567 	/* We have to lookup the slab again for malloc.. */
568 	if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
569 		mzone = 1;
570 
571 	while (bucket->ub_cnt > 0)  {
572 		bucket->ub_cnt--;
573 		item = bucket->ub_bucket[bucket->ub_cnt];
574 #ifdef INVARIANTS
575 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
576 		KASSERT(item != NULL,
577 		    ("bucket_drain: botched ptr, item is NULL"));
578 #endif
579 		/*
580 		 * This is extremely inefficient.  The slab pointer was passed
581 		 * to uma_zfree_arg, but we lost it because the buckets don't
582 		 * hold them.  This will go away when free() gets a size passed
583 		 * to it.
584 		 */
585 		if (mzone)
586 			slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
587 		uma_zfree_internal(zone, item, slab, SKIP_DTOR);
588 	}
589 }
590 
591 /*
592  * Drains the per cpu caches for a zone.
593  *
594  * Arguments:
595  *	zone     The zone to drain, must be unlocked.
596  *
597  * Returns:
598  *	Nothing
599  */
600 static void
601 cache_drain(uma_zone_t zone)
602 {
603 	uma_cache_t cache;
604 	int cpu;
605 
606 	/*
607 	 * We have to lock each cpu cache before locking the zone
608 	 */
609 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
610 		if (CPU_ABSENT(cpu))
611 			continue;
612 		CPU_LOCK(cpu);
613 		cache = &zone->uz_cpu[cpu];
614 		bucket_drain(zone, cache->uc_allocbucket);
615 		bucket_drain(zone, cache->uc_freebucket);
616 		if (cache->uc_allocbucket != NULL)
617 			bucket_free(cache->uc_allocbucket);
618 		if (cache->uc_freebucket != NULL)
619 			bucket_free(cache->uc_freebucket);
620 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
621 	}
622 	ZONE_LOCK(zone);
623 	bucket_cache_drain(zone);
624 	ZONE_UNLOCK(zone);
625 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
626 		if (CPU_ABSENT(cpu))
627 			continue;
628 		CPU_UNLOCK(cpu);
629 	}
630 }
631 
632 /*
633  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
634  */
635 static void
636 bucket_cache_drain(uma_zone_t zone)
637 {
638 	uma_bucket_t bucket;
639 
640 	/*
641 	 * Drain the bucket queues and free the buckets, we just keep two per
642 	 * cpu (alloc/free).
643 	 */
644 	while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
645 		LIST_REMOVE(bucket, ub_link);
646 		ZONE_UNLOCK(zone);
647 		bucket_drain(zone, bucket);
648 		bucket_free(bucket);
649 		ZONE_LOCK(zone);
650 	}
651 
652 	/* Now we do the free queue.. */
653 	while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
654 		LIST_REMOVE(bucket, ub_link);
655 		bucket_free(bucket);
656 	}
657 }
658 
659 /*
660  * Frees pages from a zone back to the system.  This is done on demand from
661  * the pageout daemon.
662  *
663  * Arguments:
664  *	zone  The zone to free pages from
665  *	 all  Should we drain all items?
666  *
667  * Returns:
668  *	Nothing.
669  */
670 static void
671 zone_drain(uma_zone_t zone)
672 {
673 	struct slabhead freeslabs = {};
674 	uma_keg_t keg;
675 	uma_slab_t slab;
676 	uma_slab_t n;
677 	u_int8_t flags;
678 	u_int8_t *mem;
679 	int i;
680 
681 	keg = zone->uz_keg;
682 
683 	/*
684 	 * We don't want to take pages from statically allocated zones at this
685 	 * time
686 	 */
687 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
688 		return;
689 
690 	ZONE_LOCK(zone);
691 
692 #ifdef UMA_DEBUG
693 	printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
694 #endif
695 	bucket_cache_drain(zone);
696 	if (keg->uk_free == 0)
697 		goto finished;
698 
699 	slab = LIST_FIRST(&keg->uk_free_slab);
700 	while (slab) {
701 		n = LIST_NEXT(slab, us_link);
702 
703 		/* We have no where to free these to */
704 		if (slab->us_flags & UMA_SLAB_BOOT) {
705 			slab = n;
706 			continue;
707 		}
708 
709 		LIST_REMOVE(slab, us_link);
710 		keg->uk_pages -= keg->uk_ppera;
711 		keg->uk_free -= keg->uk_ipers;
712 
713 		if (keg->uk_flags & UMA_ZONE_HASH)
714 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
715 
716 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
717 
718 		slab = n;
719 	}
720 finished:
721 	ZONE_UNLOCK(zone);
722 
723 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
724 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
725 		if (keg->uk_fini)
726 			for (i = 0; i < keg->uk_ipers; i++)
727 				keg->uk_fini(
728 				    slab->us_data + (keg->uk_rsize * i),
729 				    keg->uk_size);
730 		flags = slab->us_flags;
731 		mem = slab->us_data;
732 
733 		if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
734 		    (keg->uk_flags & UMA_ZONE_REFCNT)) {
735 			vm_object_t obj;
736 
737 			if (flags & UMA_SLAB_KMEM)
738 				obj = kmem_object;
739 			else
740 				obj = NULL;
741 			for (i = 0; i < keg->uk_ppera; i++)
742 				vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
743 				    obj);
744 		}
745 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
746 			uma_zfree_internal(keg->uk_slabzone, slab, NULL,
747 			    SKIP_NONE);
748 #ifdef UMA_DEBUG
749 		printf("%s: Returning %d bytes.\n",
750 		    zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
751 #endif
752 		keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
753 	}
754 }
755 
756 /*
757  * Allocate a new slab for a zone.  This does not insert the slab onto a list.
758  *
759  * Arguments:
760  *	zone  The zone to allocate slabs for
761  *	wait  Shall we wait?
762  *
763  * Returns:
764  *	The slab that was allocated or NULL if there is no memory and the
765  *	caller specified M_NOWAIT.
766  */
767 static uma_slab_t
768 slab_zalloc(uma_zone_t zone, int wait)
769 {
770 	uma_slabrefcnt_t slabref;
771 	uma_slab_t slab;
772 	uma_keg_t keg;
773 	u_int8_t *mem;
774 	u_int8_t flags;
775 	int i;
776 
777 	slab = NULL;
778 	keg = zone->uz_keg;
779 
780 #ifdef UMA_DEBUG
781 	printf("slab_zalloc:  Allocating a new slab for %s\n", zone->uz_name);
782 #endif
783 	ZONE_UNLOCK(zone);
784 
785 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
786 		slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
787 		if (slab == NULL) {
788 			ZONE_LOCK(zone);
789 			return NULL;
790 		}
791 	}
792 
793 	/*
794 	 * This reproduces the old vm_zone behavior of zero filling pages the
795 	 * first time they are added to a zone.
796 	 *
797 	 * Malloced items are zeroed in uma_zalloc.
798 	 */
799 
800 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
801 		wait |= M_ZERO;
802 	else
803 		wait &= ~M_ZERO;
804 
805 	mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
806 	    &flags, wait);
807 	if (mem == NULL) {
808 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
809 			uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
810 		ZONE_LOCK(zone);
811 		return (NULL);
812 	}
813 
814 	/* Point the slab into the allocated memory */
815 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
816 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
817 
818 	if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
819 	    (keg->uk_flags & UMA_ZONE_REFCNT))
820 		for (i = 0; i < keg->uk_ppera; i++)
821 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
822 
823 	slab->us_keg = keg;
824 	slab->us_data = mem;
825 	slab->us_freecount = keg->uk_ipers;
826 	slab->us_firstfree = 0;
827 	slab->us_flags = flags;
828 
829 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
830 		slabref = (uma_slabrefcnt_t)slab;
831 		for (i = 0; i < keg->uk_ipers; i++) {
832 			slabref->us_freelist[i].us_refcnt = 0;
833 			slabref->us_freelist[i].us_item = i+1;
834 		}
835 	} else {
836 		for (i = 0; i < keg->uk_ipers; i++)
837 			slab->us_freelist[i].us_item = i+1;
838 	}
839 
840 	if (keg->uk_init != NULL) {
841 		for (i = 0; i < keg->uk_ipers; i++)
842 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
843 			    keg->uk_size, wait) != 0)
844 				break;
845 		if (i != keg->uk_ipers) {
846 			if (keg->uk_fini != NULL) {
847 				for (i--; i > -1; i--)
848 					keg->uk_fini(slab->us_data +
849 					    (keg->uk_rsize * i),
850 					    keg->uk_size);
851 			}
852 			if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
853 			    (keg->uk_flags & UMA_ZONE_REFCNT))
854 				for (i = 0; i < keg->uk_ppera; i++)
855 					vsetobj((vm_offset_t)mem +
856 					    (i * PAGE_SIZE), NULL);
857 			if (keg->uk_flags & UMA_ZONE_OFFPAGE)
858 				uma_zfree_internal(keg->uk_slabzone, slab,
859 				    NULL, SKIP_NONE);
860 			keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
861 			    flags);
862 			ZONE_LOCK(zone);
863 			return (NULL);
864 		}
865 	}
866 	ZONE_LOCK(zone);
867 
868 	if (keg->uk_flags & UMA_ZONE_HASH)
869 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
870 
871 	keg->uk_pages += keg->uk_ppera;
872 	keg->uk_free += keg->uk_ipers;
873 
874 	return (slab);
875 }
876 
877 /*
878  * This function is intended to be used early on in place of page_alloc() so
879  * that we may use the boot time page cache to satisfy allocations before
880  * the VM is ready.
881  */
882 static void *
883 startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
884 {
885 	uma_keg_t keg;
886 
887 	keg = zone->uz_keg;
888 
889 	/*
890 	 * Check our small startup cache to see if it has pages remaining.
891 	 */
892 	mtx_lock(&uma_mtx);
893 	if (uma_boot_free != 0) {
894 		uma_slab_t tmps;
895 
896 		tmps = LIST_FIRST(&uma_boot_pages);
897 		LIST_REMOVE(tmps, us_link);
898 		uma_boot_free--;
899 		mtx_unlock(&uma_mtx);
900 		*pflag = tmps->us_flags;
901 		return (tmps->us_data);
902 	}
903 	mtx_unlock(&uma_mtx);
904 	if (booted == 0)
905 		panic("UMA: Increase UMA_BOOT_PAGES");
906 	/*
907 	 * Now that we've booted reset these users to their real allocator.
908 	 */
909 #ifdef UMA_MD_SMALL_ALLOC
910 	keg->uk_allocf = uma_small_alloc;
911 #else
912 	keg->uk_allocf = page_alloc;
913 #endif
914 	return keg->uk_allocf(zone, bytes, pflag, wait);
915 }
916 
917 /*
918  * Allocates a number of pages from the system
919  *
920  * Arguments:
921  *	zone  Unused
922  *	bytes  The number of bytes requested
923  *	wait  Shall we wait?
924  *
925  * Returns:
926  *	A pointer to the alloced memory or possibly
927  *	NULL if M_NOWAIT is set.
928  */
929 static void *
930 page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
931 {
932 	void *p;	/* Returned page */
933 
934 	*pflag = UMA_SLAB_KMEM;
935 	p = (void *) kmem_malloc(kmem_map, bytes, wait);
936 
937 	return (p);
938 }
939 
940 /*
941  * Allocates a number of pages from within an object
942  *
943  * Arguments:
944  *	zone   Unused
945  *	bytes  The number of bytes requested
946  *	wait   Shall we wait?
947  *
948  * Returns:
949  *	A pointer to the alloced memory or possibly
950  *	NULL if M_NOWAIT is set.
951  */
952 static void *
953 obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
954 {
955 	vm_object_t object;
956 	vm_offset_t retkva, zkva;
957 	vm_page_t p;
958 	int pages, startpages;
959 
960 	object = zone->uz_keg->uk_obj;
961 	retkva = 0;
962 
963 	/*
964 	 * This looks a little weird since we're getting one page at a time.
965 	 */
966 	VM_OBJECT_LOCK(object);
967 	p = TAILQ_LAST(&object->memq, pglist);
968 	pages = p != NULL ? p->pindex + 1 : 0;
969 	startpages = pages;
970 	zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
971 	for (; bytes > 0; bytes -= PAGE_SIZE) {
972 		p = vm_page_alloc(object, pages,
973 		    VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
974 		if (p == NULL) {
975 			if (pages != startpages)
976 				pmap_qremove(retkva, pages - startpages);
977 			while (pages != startpages) {
978 				pages--;
979 				p = TAILQ_LAST(&object->memq, pglist);
980 				vm_page_lock_queues();
981 				vm_page_unwire(p, 0);
982 				vm_page_free(p);
983 				vm_page_unlock_queues();
984 			}
985 			retkva = 0;
986 			goto done;
987 		}
988 		pmap_qenter(zkva, &p, 1);
989 		if (retkva == 0)
990 			retkva = zkva;
991 		zkva += PAGE_SIZE;
992 		pages += 1;
993 	}
994 done:
995 	VM_OBJECT_UNLOCK(object);
996 	*flags = UMA_SLAB_PRIV;
997 
998 	return ((void *)retkva);
999 }
1000 
1001 /*
1002  * Frees a number of pages to the system
1003  *
1004  * Arguments:
1005  *	mem   A pointer to the memory to be freed
1006  *	size  The size of the memory being freed
1007  *	flags The original p->us_flags field
1008  *
1009  * Returns:
1010  *	Nothing
1011  */
1012 static void
1013 page_free(void *mem, int size, u_int8_t flags)
1014 {
1015 	vm_map_t map;
1016 
1017 	if (flags & UMA_SLAB_KMEM)
1018 		map = kmem_map;
1019 	else
1020 		panic("UMA: page_free used with invalid flags %d\n", flags);
1021 
1022 	kmem_free(map, (vm_offset_t)mem, size);
1023 }
1024 
1025 /*
1026  * Zero fill initializer
1027  *
1028  * Arguments/Returns follow uma_init specifications
1029  */
1030 static int
1031 zero_init(void *mem, int size, int flags)
1032 {
1033 	bzero(mem, size);
1034 	return (0);
1035 }
1036 
1037 /*
1038  * Finish creating a small uma zone.  This calculates ipers, and the zone size.
1039  *
1040  * Arguments
1041  *	zone  The zone we should initialize
1042  *
1043  * Returns
1044  *	Nothing
1045  */
1046 static void
1047 zone_small_init(uma_zone_t zone)
1048 {
1049 	uma_keg_t keg;
1050 	u_int rsize;
1051 	u_int memused;
1052 	u_int wastedspace;
1053 	u_int shsize;
1054 
1055 	keg = zone->uz_keg;
1056 	KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
1057 	rsize = keg->uk_size;
1058 
1059 	if (rsize < UMA_SMALLEST_UNIT)
1060 		rsize = UMA_SMALLEST_UNIT;
1061 	if (rsize & keg->uk_align)
1062 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1063 
1064 	keg->uk_rsize = rsize;
1065 	keg->uk_ppera = 1;
1066 
1067 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1068 		rsize += UMA_FRITMREF_SZ;	/* linkage & refcnt */
1069 		shsize = sizeof(struct uma_slab_refcnt);
1070 	} else {
1071 		rsize += UMA_FRITM_SZ;	/* Account for linkage */
1072 		shsize = sizeof(struct uma_slab);
1073 	}
1074 
1075 	keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
1076 	KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
1077 	memused = keg->uk_ipers * rsize + shsize;
1078 	wastedspace = UMA_SLAB_SIZE - memused;
1079 
1080 	/*
1081 	 * We can't do OFFPAGE if we're internal or if we've been
1082 	 * asked to not go to the VM for buckets.  If we do this we
1083 	 * may end up going to the VM (kmem_map) for slabs which we
1084 	 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
1085 	 * result of UMA_ZONE_VM, which clearly forbids it.
1086 	 */
1087 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1088 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1089 		return;
1090 
1091 	if ((wastedspace >= UMA_MAX_WASTE) &&
1092 	    (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
1093 		keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
1094 		KASSERT(keg->uk_ipers <= 255,
1095 		    ("zone_small_init: keg->uk_ipers too high!"));
1096 #ifdef UMA_DEBUG
1097 		printf("UMA decided we need offpage slab headers for "
1098 		    "zone: %s, calculated wastedspace = %d, "
1099 		    "maximum wasted space allowed = %d, "
1100 		    "calculated ipers = %d, "
1101 		    "new wasted space = %d\n", zone->uz_name, wastedspace,
1102 		    UMA_MAX_WASTE, keg->uk_ipers,
1103 		    UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
1104 #endif
1105 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1106 		if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1107 			keg->uk_flags |= UMA_ZONE_HASH;
1108 	}
1109 }
1110 
1111 /*
1112  * Finish creating a large (> UMA_SLAB_SIZE) uma zone.  Just give in and do
1113  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1114  * more complicated.
1115  *
1116  * Arguments
1117  *	zone  The zone we should initialize
1118  *
1119  * Returns
1120  *	Nothing
1121  */
1122 static void
1123 zone_large_init(uma_zone_t zone)
1124 {
1125 	uma_keg_t keg;
1126 	int pages;
1127 
1128 	keg = zone->uz_keg;
1129 
1130 	KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
1131 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1132 	    ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
1133 
1134 	pages = keg->uk_size / UMA_SLAB_SIZE;
1135 
1136 	/* Account for remainder */
1137 	if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
1138 		pages++;
1139 
1140 	keg->uk_ppera = pages;
1141 	keg->uk_ipers = 1;
1142 
1143 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1144 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1145 		keg->uk_flags |= UMA_ZONE_HASH;
1146 
1147 	keg->uk_rsize = keg->uk_size;
1148 }
1149 
1150 /*
1151  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1152  * the keg onto the global keg list.
1153  *
1154  * Arguments/Returns follow uma_ctor specifications
1155  *	udata  Actually uma_kctor_args
1156  */
1157 static int
1158 keg_ctor(void *mem, int size, void *udata, int flags)
1159 {
1160 	struct uma_kctor_args *arg = udata;
1161 	uma_keg_t keg = mem;
1162 	uma_zone_t zone;
1163 
1164 	bzero(keg, size);
1165 	keg->uk_size = arg->size;
1166 	keg->uk_init = arg->uminit;
1167 	keg->uk_fini = arg->fini;
1168 	keg->uk_align = arg->align;
1169 	keg->uk_free = 0;
1170 	keg->uk_pages = 0;
1171 	keg->uk_flags = arg->flags;
1172 	keg->uk_allocf = page_alloc;
1173 	keg->uk_freef = page_free;
1174 	keg->uk_recurse = 0;
1175 	keg->uk_slabzone = NULL;
1176 
1177 	/*
1178 	 * The master zone is passed to us at keg-creation time.
1179 	 */
1180 	zone = arg->zone;
1181 	zone->uz_keg = keg;
1182 
1183 	if (arg->flags & UMA_ZONE_VM)
1184 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1185 
1186 	if (arg->flags & UMA_ZONE_ZINIT)
1187 		keg->uk_init = zero_init;
1188 
1189 	/*
1190 	 * The +UMA_FRITM_SZ added to uk_size is to account for the
1191 	 * linkage that is added to the size in zone_small_init().  If
1192 	 * we don't account for this here then we may end up in
1193 	 * zone_small_init() with a calculated 'ipers' of 0.
1194 	 */
1195 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1196 		if ((keg->uk_size+UMA_FRITMREF_SZ) >
1197 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
1198 			zone_large_init(zone);
1199 		else
1200 			zone_small_init(zone);
1201 	} else {
1202 		if ((keg->uk_size+UMA_FRITM_SZ) >
1203 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1204 			zone_large_init(zone);
1205 		else
1206 			zone_small_init(zone);
1207 	}
1208 
1209 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1210 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1211 			keg->uk_slabzone = slabrefzone;
1212 		else
1213 			keg->uk_slabzone = slabzone;
1214 	}
1215 
1216 	/*
1217 	 * If we haven't booted yet we need allocations to go through the
1218 	 * startup cache until the vm is ready.
1219 	 */
1220 	if (keg->uk_ppera == 1) {
1221 #ifdef UMA_MD_SMALL_ALLOC
1222 		keg->uk_allocf = uma_small_alloc;
1223 		keg->uk_freef = uma_small_free;
1224 #endif
1225 		if (booted == 0)
1226 			keg->uk_allocf = startup_alloc;
1227 	}
1228 
1229 	/*
1230 	 * Initialize keg's lock (shared among zones) through
1231 	 * Master zone
1232 	 */
1233 	zone->uz_lock = &keg->uk_lock;
1234 	if (arg->flags & UMA_ZONE_MTXCLASS)
1235 		ZONE_LOCK_INIT(zone, 1);
1236 	else
1237 		ZONE_LOCK_INIT(zone, 0);
1238 
1239 	/*
1240 	 * If we're putting the slab header in the actual page we need to
1241 	 * figure out where in each page it goes.  This calculates a right
1242 	 * justified offset into the memory on an ALIGN_PTR boundary.
1243 	 */
1244 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1245 		u_int totsize;
1246 
1247 		/* Size of the slab struct and free list */
1248 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1249 			totsize = sizeof(struct uma_slab_refcnt) +
1250 			    keg->uk_ipers * UMA_FRITMREF_SZ;
1251 		else
1252 			totsize = sizeof(struct uma_slab) +
1253 			    keg->uk_ipers * UMA_FRITM_SZ;
1254 
1255 		if (totsize & UMA_ALIGN_PTR)
1256 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1257 			    (UMA_ALIGN_PTR + 1);
1258 		keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
1259 
1260 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1261 			totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
1262 			    + keg->uk_ipers * UMA_FRITMREF_SZ;
1263 		else
1264 			totsize = keg->uk_pgoff + sizeof(struct uma_slab)
1265 			    + keg->uk_ipers * UMA_FRITM_SZ;
1266 
1267 		/*
1268 		 * The only way the following is possible is if with our
1269 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1270 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1271 		 * mathematically possible for all cases, so we make
1272 		 * sure here anyway.
1273 		 */
1274 		if (totsize > UMA_SLAB_SIZE) {
1275 			printf("zone %s ipers %d rsize %d size %d\n",
1276 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1277 			    keg->uk_size);
1278 			panic("UMA slab won't fit.\n");
1279 		}
1280 	}
1281 
1282 	if (keg->uk_flags & UMA_ZONE_HASH)
1283 		hash_alloc(&keg->uk_hash);
1284 
1285 #ifdef UMA_DEBUG
1286 	printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1287 	    zone->uz_name, zone,
1288 	    keg->uk_size, keg->uk_ipers,
1289 	    keg->uk_ppera, keg->uk_pgoff);
1290 #endif
1291 
1292 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1293 
1294 	mtx_lock(&uma_mtx);
1295 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1296 	mtx_unlock(&uma_mtx);
1297 	return (0);
1298 }
1299 
1300 /*
1301  * Zone header ctor.  This initializes all fields, locks, etc.
1302  *
1303  * Arguments/Returns follow uma_ctor specifications
1304  *	udata  Actually uma_zctor_args
1305  */
1306 
1307 static int
1308 zone_ctor(void *mem, int size, void *udata, int flags)
1309 {
1310 	struct uma_zctor_args *arg = udata;
1311 	uma_zone_t zone = mem;
1312 	uma_zone_t z;
1313 	uma_keg_t keg;
1314 
1315 	bzero(zone, size);
1316 	zone->uz_name = arg->name;
1317 	zone->uz_ctor = arg->ctor;
1318 	zone->uz_dtor = arg->dtor;
1319 	zone->uz_init = NULL;
1320 	zone->uz_fini = NULL;
1321 	zone->uz_allocs = 0;
1322 	zone->uz_fills = zone->uz_count = 0;
1323 
1324 	if (arg->flags & UMA_ZONE_SECONDARY) {
1325 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1326 		keg = arg->keg;
1327 		zone->uz_keg = keg;
1328 		zone->uz_init = arg->uminit;
1329 		zone->uz_fini = arg->fini;
1330 		zone->uz_lock = &keg->uk_lock;
1331 		mtx_lock(&uma_mtx);
1332 		ZONE_LOCK(zone);
1333 		keg->uk_flags |= UMA_ZONE_SECONDARY;
1334 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1335 			if (LIST_NEXT(z, uz_link) == NULL) {
1336 				LIST_INSERT_AFTER(z, zone, uz_link);
1337 				break;
1338 			}
1339 		}
1340 		ZONE_UNLOCK(zone);
1341 		mtx_unlock(&uma_mtx);
1342 	} else if (arg->keg == NULL) {
1343 		if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1344 		    arg->align, arg->flags) == NULL)
1345 			return (ENOMEM);
1346 	} else {
1347 		struct uma_kctor_args karg;
1348 		int error;
1349 
1350 		/* We should only be here from uma_startup() */
1351 		karg.size = arg->size;
1352 		karg.uminit = arg->uminit;
1353 		karg.fini = arg->fini;
1354 		karg.align = arg->align;
1355 		karg.flags = arg->flags;
1356 		karg.zone = zone;
1357 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1358 		    flags);
1359 		if (error)
1360 			return (error);
1361 	}
1362 	keg = zone->uz_keg;
1363 	zone->uz_lock = &keg->uk_lock;
1364 
1365 	/*
1366 	 * Some internal zones don't have room allocated for the per cpu
1367 	 * caches.  If we're internal, bail out here.
1368 	 */
1369 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1370 		KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
1371 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1372 		return (0);
1373 	}
1374 
1375 	if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
1376 		zone->uz_count = BUCKET_MAX;
1377 	else if (keg->uk_ipers <= BUCKET_MAX)
1378 		zone->uz_count = keg->uk_ipers;
1379 	else
1380 		zone->uz_count = BUCKET_MAX;
1381 	return (0);
1382 }
1383 
1384 /*
1385  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1386  * table and removes the keg from the global list.
1387  *
1388  * Arguments/Returns follow uma_dtor specifications
1389  *	udata  unused
1390  */
1391 static void
1392 keg_dtor(void *arg, int size, void *udata)
1393 {
1394 	uma_keg_t keg;
1395 
1396 	keg = (uma_keg_t)arg;
1397 	mtx_lock(&keg->uk_lock);
1398 	if (keg->uk_free != 0) {
1399 		printf("Freed UMA keg was not empty (%d items). "
1400 		    " Lost %d pages of memory.\n",
1401 		    keg->uk_free, keg->uk_pages);
1402 	}
1403 	mtx_unlock(&keg->uk_lock);
1404 
1405 	if (keg->uk_flags & UMA_ZONE_HASH)
1406 		hash_free(&keg->uk_hash);
1407 
1408 	mtx_destroy(&keg->uk_lock);
1409 }
1410 
1411 /*
1412  * Zone header dtor.
1413  *
1414  * Arguments/Returns follow uma_dtor specifications
1415  *	udata  unused
1416  */
1417 static void
1418 zone_dtor(void *arg, int size, void *udata)
1419 {
1420 	uma_zone_t zone;
1421 	uma_keg_t keg;
1422 
1423 	zone = (uma_zone_t)arg;
1424 	keg = zone->uz_keg;
1425 
1426 	if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
1427 		cache_drain(zone);
1428 
1429 	mtx_lock(&uma_mtx);
1430 	zone_drain(zone);
1431 	if (keg->uk_flags & UMA_ZONE_SECONDARY) {
1432 		LIST_REMOVE(zone, uz_link);
1433 		/*
1434 		 * XXX there are some races here where
1435 		 * the zone can be drained but zone lock
1436 		 * released and then refilled before we
1437 		 * remove it... we dont care for now
1438 		 */
1439 		ZONE_LOCK(zone);
1440 		if (LIST_EMPTY(&keg->uk_zones))
1441 			keg->uk_flags &= ~UMA_ZONE_SECONDARY;
1442 		ZONE_UNLOCK(zone);
1443 		mtx_unlock(&uma_mtx);
1444 	} else {
1445 		LIST_REMOVE(keg, uk_link);
1446 		LIST_REMOVE(zone, uz_link);
1447 		mtx_unlock(&uma_mtx);
1448 		uma_zfree_internal(kegs, keg, NULL, SKIP_NONE);
1449 	}
1450 	zone->uz_keg = NULL;
1451 }
1452 
1453 /*
1454  * Traverses every zone in the system and calls a callback
1455  *
1456  * Arguments:
1457  *	zfunc  A pointer to a function which accepts a zone
1458  *		as an argument.
1459  *
1460  * Returns:
1461  *	Nothing
1462  */
1463 static void
1464 zone_foreach(void (*zfunc)(uma_zone_t))
1465 {
1466 	uma_keg_t keg;
1467 	uma_zone_t zone;
1468 
1469 	mtx_lock(&uma_mtx);
1470 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1471 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1472 			zfunc(zone);
1473 	}
1474 	mtx_unlock(&uma_mtx);
1475 }
1476 
1477 /* Public functions */
1478 /* See uma.h */
1479 void
1480 uma_startup(void *bootmem)
1481 {
1482 	struct uma_zctor_args args;
1483 	uma_slab_t slab;
1484 	u_int slabsize;
1485 	u_int objsize, totsize, wsize;
1486 	int i;
1487 
1488 #ifdef UMA_DEBUG
1489 	printf("Creating uma keg headers zone and keg.\n");
1490 #endif
1491 	/*
1492 	 * The general UMA lock is a recursion-allowed lock because
1493 	 * there is a code path where, while we're still configured
1494 	 * to use startup_alloc() for backend page allocations, we
1495 	 * may end up in uma_reclaim() which calls zone_foreach(zone_drain),
1496 	 * which grabs uma_mtx, only to later call into startup_alloc()
1497 	 * because while freeing we needed to allocate a bucket.  Since
1498 	 * startup_alloc() also takes uma_mtx, we need to be able to
1499 	 * recurse on it.
1500 	 */
1501 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
1502 
1503 	/*
1504 	 * Figure out the maximum number of items-per-slab we'll have if
1505 	 * we're using the OFFPAGE slab header to track free items, given
1506 	 * all possible object sizes and the maximum desired wastage
1507 	 * (UMA_MAX_WASTE).
1508 	 *
1509 	 * We iterate until we find an object size for
1510 	 * which the calculated wastage in zone_small_init() will be
1511 	 * enough to warrant OFFPAGE.  Since wastedspace versus objsize
1512 	 * is an overall increasing see-saw function, we find the smallest
1513 	 * objsize such that the wastage is always acceptable for objects
1514 	 * with that objsize or smaller.  Since a smaller objsize always
1515 	 * generates a larger possible uma_max_ipers, we use this computed
1516 	 * objsize to calculate the largest ipers possible.  Since the
1517 	 * ipers calculated for OFFPAGE slab headers is always larger than
1518 	 * the ipers initially calculated in zone_small_init(), we use
1519 	 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
1520 	 * obtain the maximum ipers possible for offpage slab headers.
1521 	 *
1522 	 * It should be noted that ipers versus objsize is an inversly
1523 	 * proportional function which drops off rather quickly so as
1524 	 * long as our UMA_MAX_WASTE is such that the objsize we calculate
1525 	 * falls into the portion of the inverse relation AFTER the steep
1526 	 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
1527 	 *
1528 	 * Note that we have 8-bits (1 byte) to use as a freelist index
1529 	 * inside the actual slab header itself and this is enough to
1530 	 * accomodate us.  In the worst case, a UMA_SMALLEST_UNIT sized
1531 	 * object with offpage slab header would have ipers =
1532 	 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
1533 	 * 1 greater than what our byte-integer freelist index can
1534 	 * accomodate, but we know that this situation never occurs as
1535 	 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
1536 	 * that we need to go to offpage slab headers.  Or, if we do,
1537 	 * then we trap that condition below and panic in the INVARIANTS case.
1538 	 */
1539 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
1540 	totsize = wsize;
1541 	objsize = UMA_SMALLEST_UNIT;
1542 	while (totsize >= wsize) {
1543 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
1544 		    (objsize + UMA_FRITM_SZ);
1545 		totsize *= (UMA_FRITM_SZ + objsize);
1546 		objsize++;
1547 	}
1548 	if (objsize > UMA_SMALLEST_UNIT)
1549 		objsize--;
1550 	uma_max_ipers = UMA_SLAB_SIZE / objsize;
1551 
1552 	wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
1553 	totsize = wsize;
1554 	objsize = UMA_SMALLEST_UNIT;
1555 	while (totsize >= wsize) {
1556 		totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
1557 		    (objsize + UMA_FRITMREF_SZ);
1558 		totsize *= (UMA_FRITMREF_SZ + objsize);
1559 		objsize++;
1560 	}
1561 	if (objsize > UMA_SMALLEST_UNIT)
1562 		objsize--;
1563 	uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
1564 
1565 	KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
1566 	    ("uma_startup: calculated uma_max_ipers values too large!"));
1567 
1568 #ifdef UMA_DEBUG
1569 	printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
1570 	printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
1571 	    uma_max_ipers_ref);
1572 #endif
1573 
1574 	/* "manually" create the initial zone */
1575 	args.name = "UMA Kegs";
1576 	args.size = sizeof(struct uma_keg);
1577 	args.ctor = keg_ctor;
1578 	args.dtor = keg_dtor;
1579 	args.uminit = zero_init;
1580 	args.fini = NULL;
1581 	args.keg = &masterkeg;
1582 	args.align = 32 - 1;
1583 	args.flags = UMA_ZFLAG_INTERNAL;
1584 	/* The initial zone has no Per cpu queues so it's smaller */
1585 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1586 
1587 #ifdef UMA_DEBUG
1588 	printf("Filling boot free list.\n");
1589 #endif
1590 	for (i = 0; i < UMA_BOOT_PAGES; i++) {
1591 		slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
1592 		slab->us_data = (u_int8_t *)slab;
1593 		slab->us_flags = UMA_SLAB_BOOT;
1594 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1595 		uma_boot_free++;
1596 	}
1597 
1598 #ifdef UMA_DEBUG
1599 	printf("Creating uma zone headers zone and keg.\n");
1600 #endif
1601 	args.name = "UMA Zones";
1602 	args.size = sizeof(struct uma_zone) +
1603 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1604 	args.ctor = zone_ctor;
1605 	args.dtor = zone_dtor;
1606 	args.uminit = zero_init;
1607 	args.fini = NULL;
1608 	args.keg = NULL;
1609 	args.align = 32 - 1;
1610 	args.flags = UMA_ZFLAG_INTERNAL;
1611 	/* The initial zone has no Per cpu queues so it's smaller */
1612 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1613 
1614 #ifdef UMA_DEBUG
1615 	printf("Initializing pcpu cache locks.\n");
1616 #endif
1617 	/* Initialize the pcpu cache lock set once and for all */
1618 	for (i = 0; i <= mp_maxid; i++)
1619 		CPU_LOCK_INIT(i);
1620 
1621 #ifdef UMA_DEBUG
1622 	printf("Creating slab and hash zones.\n");
1623 #endif
1624 
1625 	/*
1626 	 * This is the max number of free list items we'll have with
1627 	 * offpage slabs.
1628 	 */
1629 	slabsize = uma_max_ipers * UMA_FRITM_SZ;
1630 	slabsize += sizeof(struct uma_slab);
1631 
1632 	/* Now make a zone for slab headers */
1633 	slabzone = uma_zcreate("UMA Slabs",
1634 				slabsize,
1635 				NULL, NULL, NULL, NULL,
1636 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1637 
1638 	/*
1639 	 * We also create a zone for the bigger slabs with reference
1640 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1641 	 */
1642 	slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
1643 	slabsize += sizeof(struct uma_slab_refcnt);
1644 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1645 				  slabsize,
1646 				  NULL, NULL, NULL, NULL,
1647 				  UMA_ALIGN_PTR,
1648 				  UMA_ZFLAG_INTERNAL);
1649 
1650 	hashzone = uma_zcreate("UMA Hash",
1651 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1652 	    NULL, NULL, NULL, NULL,
1653 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1654 
1655 	bucket_init();
1656 
1657 #ifdef UMA_MD_SMALL_ALLOC
1658 	booted = 1;
1659 #endif
1660 
1661 #ifdef UMA_DEBUG
1662 	printf("UMA startup complete.\n");
1663 #endif
1664 }
1665 
1666 /* see uma.h */
1667 void
1668 uma_startup2(void)
1669 {
1670 	booted = 1;
1671 	bucket_enable();
1672 #ifdef UMA_DEBUG
1673 	printf("UMA startup2 complete.\n");
1674 #endif
1675 }
1676 
1677 /*
1678  * Initialize our callout handle
1679  *
1680  */
1681 
1682 static void
1683 uma_startup3(void)
1684 {
1685 #ifdef UMA_DEBUG
1686 	printf("Starting callout.\n");
1687 #endif
1688 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1689 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1690 #ifdef UMA_DEBUG
1691 	printf("UMA startup3 complete.\n");
1692 #endif
1693 }
1694 
1695 static uma_zone_t
1696 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1697 		int align, u_int16_t flags)
1698 {
1699 	struct uma_kctor_args args;
1700 
1701 	args.size = size;
1702 	args.uminit = uminit;
1703 	args.fini = fini;
1704 	args.align = align;
1705 	args.flags = flags;
1706 	args.zone = zone;
1707 	return (uma_zalloc_internal(kegs, &args, M_WAITOK));
1708 }
1709 
1710 /* See uma.h */
1711 uma_zone_t
1712 uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1713 		uma_init uminit, uma_fini fini, int align, u_int16_t flags)
1714 
1715 {
1716 	struct uma_zctor_args args;
1717 
1718 	/* This stuff is essential for the zone ctor */
1719 	args.name = name;
1720 	args.size = size;
1721 	args.ctor = ctor;
1722 	args.dtor = dtor;
1723 	args.uminit = uminit;
1724 	args.fini = fini;
1725 	args.align = align;
1726 	args.flags = flags;
1727 	args.keg = NULL;
1728 
1729 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1730 }
1731 
1732 /* See uma.h */
1733 uma_zone_t
1734 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1735 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1736 {
1737 	struct uma_zctor_args args;
1738 
1739 	args.name = name;
1740 	args.size = master->uz_keg->uk_size;
1741 	args.ctor = ctor;
1742 	args.dtor = dtor;
1743 	args.uminit = zinit;
1744 	args.fini = zfini;
1745 	args.align = master->uz_keg->uk_align;
1746 	args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
1747 	args.keg = master->uz_keg;
1748 
1749 	return (uma_zalloc_internal(zones, &args, M_WAITOK));
1750 }
1751 
1752 /* See uma.h */
1753 void
1754 uma_zdestroy(uma_zone_t zone)
1755 {
1756 	uma_zfree_internal(zones, zone, NULL, SKIP_NONE);
1757 }
1758 
1759 /* See uma.h */
1760 void *
1761 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
1762 {
1763 	void *item;
1764 	uma_cache_t cache;
1765 	uma_bucket_t bucket;
1766 	int cpu;
1767 	int badness;
1768 
1769 	/* This is the fast path allocation */
1770 #ifdef UMA_DEBUG_ALLOC_1
1771 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
1772 #endif
1773 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
1774 	    zone->uz_name, flags);
1775 
1776 	if (!(flags & M_NOWAIT)) {
1777 		KASSERT(curthread->td_intr_nesting_level == 0,
1778 		   ("malloc(M_WAITOK) in interrupt context"));
1779 		if (nosleepwithlocks) {
1780 #ifdef WITNESS
1781 			badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
1782 			    NULL,
1783 			    "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
1784 			    zone->uz_name);
1785 #else
1786 			badness = 1;
1787 #endif
1788 		} else {
1789 			badness = 0;
1790 #ifdef WITNESS
1791 			WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1792 			    "malloc(M_WAITOK) of \"%s\"", zone->uz_name);
1793 #endif
1794 		}
1795 		if (badness) {
1796 			flags &= ~M_WAITOK;
1797 			flags |= M_NOWAIT;
1798 		}
1799 	}
1800 
1801 zalloc_restart:
1802 	cpu = PCPU_GET(cpuid);
1803 	CPU_LOCK(cpu);
1804 	cache = &zone->uz_cpu[cpu];
1805 
1806 zalloc_start:
1807 	bucket = cache->uc_allocbucket;
1808 
1809 	if (bucket) {
1810 		if (bucket->ub_cnt > 0) {
1811 			bucket->ub_cnt--;
1812 			item = bucket->ub_bucket[bucket->ub_cnt];
1813 #ifdef INVARIANTS
1814 			bucket->ub_bucket[bucket->ub_cnt] = NULL;
1815 #endif
1816 			KASSERT(item != NULL,
1817 			    ("uma_zalloc: Bucket pointer mangled."));
1818 			cache->uc_allocs++;
1819 #ifdef INVARIANTS
1820 			ZONE_LOCK(zone);
1821 			uma_dbg_alloc(zone, NULL, item);
1822 			ZONE_UNLOCK(zone);
1823 #endif
1824 			CPU_UNLOCK(cpu);
1825 			if (zone->uz_ctor != NULL) {
1826 				if (zone->uz_ctor(item, zone->uz_keg->uk_size,
1827 				    udata, flags) != 0) {
1828 					uma_zfree_internal(zone, item, udata,
1829 					    SKIP_DTOR);
1830 					return (NULL);
1831 				}
1832 			}
1833 			if (flags & M_ZERO)
1834 				bzero(item, zone->uz_keg->uk_size);
1835 			return (item);
1836 		} else if (cache->uc_freebucket) {
1837 			/*
1838 			 * We have run out of items in our allocbucket.
1839 			 * See if we can switch with our free bucket.
1840 			 */
1841 			if (cache->uc_freebucket->ub_cnt > 0) {
1842 #ifdef UMA_DEBUG_ALLOC
1843 				printf("uma_zalloc: Swapping empty with"
1844 				    " alloc.\n");
1845 #endif
1846 				bucket = cache->uc_freebucket;
1847 				cache->uc_freebucket = cache->uc_allocbucket;
1848 				cache->uc_allocbucket = bucket;
1849 
1850 				goto zalloc_start;
1851 			}
1852 		}
1853 	}
1854 	ZONE_LOCK(zone);
1855 	/* Since we have locked the zone we may as well send back our stats */
1856 	zone->uz_allocs += cache->uc_allocs;
1857 	cache->uc_allocs = 0;
1858 
1859 	/* Our old one is now a free bucket */
1860 	if (cache->uc_allocbucket) {
1861 		KASSERT(cache->uc_allocbucket->ub_cnt == 0,
1862 		    ("uma_zalloc_arg: Freeing a non free bucket."));
1863 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
1864 		    cache->uc_allocbucket, ub_link);
1865 		cache->uc_allocbucket = NULL;
1866 	}
1867 
1868 	/* Check the free list for a new alloc bucket */
1869 	if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) {
1870 		KASSERT(bucket->ub_cnt != 0,
1871 		    ("uma_zalloc_arg: Returning an empty bucket."));
1872 
1873 		LIST_REMOVE(bucket, ub_link);
1874 		cache->uc_allocbucket = bucket;
1875 		ZONE_UNLOCK(zone);
1876 		goto zalloc_start;
1877 	}
1878 	/* We are no longer associated with this cpu!!! */
1879 	CPU_UNLOCK(cpu);
1880 
1881 	/* Bump up our uz_count so we get here less */
1882 	if (zone->uz_count < BUCKET_MAX)
1883 		zone->uz_count++;
1884 
1885 	/*
1886 	 * Now lets just fill a bucket and put it on the free list.  If that
1887 	 * works we'll restart the allocation from the begining.
1888 	 */
1889 	if (uma_zalloc_bucket(zone, flags)) {
1890 		ZONE_UNLOCK(zone);
1891 		goto zalloc_restart;
1892 	}
1893 	ZONE_UNLOCK(zone);
1894 	/*
1895 	 * We may not be able to get a bucket so return an actual item.
1896 	 */
1897 #ifdef UMA_DEBUG
1898 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
1899 #endif
1900 
1901 	return (uma_zalloc_internal(zone, udata, flags));
1902 }
1903 
1904 static uma_slab_t
1905 uma_zone_slab(uma_zone_t zone, int flags)
1906 {
1907 	uma_slab_t slab;
1908 	uma_keg_t keg;
1909 
1910 	keg = zone->uz_keg;
1911 
1912 	/*
1913 	 * This is to prevent us from recursively trying to allocate
1914 	 * buckets.  The problem is that if an allocation forces us to
1915 	 * grab a new bucket we will call page_alloc, which will go off
1916 	 * and cause the vm to allocate vm_map_entries.  If we need new
1917 	 * buckets there too we will recurse in kmem_alloc and bad
1918 	 * things happen.  So instead we return a NULL bucket, and make
1919 	 * the code that allocates buckets smart enough to deal with it
1920 	 */
1921 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
1922 		return (NULL);
1923 
1924 	slab = NULL;
1925 
1926 	for (;;) {
1927 		/*
1928 		 * Find a slab with some space.  Prefer slabs that are partially
1929 		 * used over those that are totally full.  This helps to reduce
1930 		 * fragmentation.
1931 		 */
1932 		if (keg->uk_free != 0) {
1933 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
1934 				slab = LIST_FIRST(&keg->uk_part_slab);
1935 			} else {
1936 				slab = LIST_FIRST(&keg->uk_free_slab);
1937 				LIST_REMOVE(slab, us_link);
1938 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
1939 				    us_link);
1940 			}
1941 			return (slab);
1942 		}
1943 
1944 		/*
1945 		 * M_NOVM means don't ask at all!
1946 		 */
1947 		if (flags & M_NOVM)
1948 			break;
1949 
1950 		if (keg->uk_maxpages &&
1951 		    keg->uk_pages >= keg->uk_maxpages) {
1952 			keg->uk_flags |= UMA_ZFLAG_FULL;
1953 
1954 			if (flags & M_NOWAIT)
1955 				break;
1956 			else
1957 				msleep(keg, &keg->uk_lock, PVM,
1958 				    "zonelimit", 0);
1959 			continue;
1960 		}
1961 		keg->uk_recurse++;
1962 		slab = slab_zalloc(zone, flags);
1963 		keg->uk_recurse--;
1964 
1965 		/*
1966 		 * If we got a slab here it's safe to mark it partially used
1967 		 * and return.  We assume that the caller is going to remove
1968 		 * at least one item.
1969 		 */
1970 		if (slab) {
1971 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
1972 			return (slab);
1973 		}
1974 		/*
1975 		 * We might not have been able to get a slab but another cpu
1976 		 * could have while we were unlocked.  Check again before we
1977 		 * fail.
1978 		 */
1979 		if (flags & M_NOWAIT)
1980 			flags |= M_NOVM;
1981 	}
1982 	return (slab);
1983 }
1984 
1985 static void *
1986 uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
1987 {
1988 	uma_keg_t keg;
1989 	uma_slabrefcnt_t slabref;
1990 	void *item;
1991 	u_int8_t freei;
1992 
1993 	keg = zone->uz_keg;
1994 
1995 	freei = slab->us_firstfree;
1996 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1997 		slabref = (uma_slabrefcnt_t)slab;
1998 		slab->us_firstfree = slabref->us_freelist[freei].us_item;
1999 	} else {
2000 		slab->us_firstfree = slab->us_freelist[freei].us_item;
2001 	}
2002 	item = slab->us_data + (keg->uk_rsize * freei);
2003 
2004 	slab->us_freecount--;
2005 	keg->uk_free--;
2006 #ifdef INVARIANTS
2007 	uma_dbg_alloc(zone, slab, item);
2008 #endif
2009 	/* Move this slab to the full list */
2010 	if (slab->us_freecount == 0) {
2011 		LIST_REMOVE(slab, us_link);
2012 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2013 	}
2014 
2015 	return (item);
2016 }
2017 
2018 static int
2019 uma_zalloc_bucket(uma_zone_t zone, int flags)
2020 {
2021 	uma_bucket_t bucket;
2022 	uma_slab_t slab;
2023 	int16_t saved;
2024 	int max, origflags = flags;
2025 
2026 	/*
2027 	 * Try this zone's free list first so we don't allocate extra buckets.
2028 	 */
2029 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2030 		KASSERT(bucket->ub_cnt == 0,
2031 		    ("uma_zalloc_bucket: Bucket on free list is not empty."));
2032 		LIST_REMOVE(bucket, ub_link);
2033 	} else {
2034 		int bflags;
2035 
2036 		bflags = (flags & ~M_ZERO);
2037 		if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2038 			bflags |= M_NOVM;
2039 
2040 		ZONE_UNLOCK(zone);
2041 		bucket = bucket_alloc(zone->uz_count, bflags);
2042 		ZONE_LOCK(zone);
2043 	}
2044 
2045 	if (bucket == NULL)
2046 		return (0);
2047 
2048 #ifdef SMP
2049 	/*
2050 	 * This code is here to limit the number of simultaneous bucket fills
2051 	 * for any given zone to the number of per cpu caches in this zone. This
2052 	 * is done so that we don't allocate more memory than we really need.
2053 	 */
2054 	if (zone->uz_fills >= mp_ncpus)
2055 		goto done;
2056 
2057 #endif
2058 	zone->uz_fills++;
2059 
2060 	max = MIN(bucket->ub_entries, zone->uz_count);
2061 	/* Try to keep the buckets totally full */
2062 	saved = bucket->ub_cnt;
2063 	while (bucket->ub_cnt < max &&
2064 	    (slab = uma_zone_slab(zone, flags)) != NULL) {
2065 		while (slab->us_freecount && bucket->ub_cnt < max) {
2066 			bucket->ub_bucket[bucket->ub_cnt++] =
2067 			    uma_slab_alloc(zone, slab);
2068 		}
2069 
2070 		/* Don't block on the next fill */
2071 		flags |= M_NOWAIT;
2072 	}
2073 
2074 	/*
2075 	 * We unlock here because we need to call the zone's init.
2076 	 * It should be safe to unlock because the slab dealt with
2077 	 * above is already on the appropriate list within the keg
2078 	 * and the bucket we filled is not yet on any list, so we
2079 	 * own it.
2080 	 */
2081 	if (zone->uz_init != NULL) {
2082 		int i;
2083 
2084 		ZONE_UNLOCK(zone);
2085 		for (i = saved; i < bucket->ub_cnt; i++)
2086 			if (zone->uz_init(bucket->ub_bucket[i],
2087 			    zone->uz_keg->uk_size, origflags) != 0)
2088 				break;
2089 		/*
2090 		 * If we couldn't initialize the whole bucket, put the
2091 		 * rest back onto the freelist.
2092 		 */
2093 		if (i != bucket->ub_cnt) {
2094 			int j;
2095 
2096 			for (j = i; j < bucket->ub_cnt; j++) {
2097 				uma_zfree_internal(zone, bucket->ub_bucket[j],
2098 				    NULL, SKIP_FINI);
2099 #ifdef INVARIANTS
2100 				bucket->ub_bucket[j] = NULL;
2101 #endif
2102 			}
2103 			bucket->ub_cnt = i;
2104 		}
2105 		ZONE_LOCK(zone);
2106 	}
2107 
2108 	zone->uz_fills--;
2109 	if (bucket->ub_cnt != 0) {
2110 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2111 		    bucket, ub_link);
2112 		return (1);
2113 	}
2114 #ifdef SMP
2115 done:
2116 #endif
2117 	bucket_free(bucket);
2118 
2119 	return (0);
2120 }
2121 /*
2122  * Allocates an item for an internal zone
2123  *
2124  * Arguments
2125  *	zone   The zone to alloc for.
2126  *	udata  The data to be passed to the constructor.
2127  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2128  *
2129  * Returns
2130  *	NULL if there is no memory and M_NOWAIT is set
2131  *	An item if successful
2132  */
2133 
2134 static void *
2135 uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
2136 {
2137 	uma_keg_t keg;
2138 	uma_slab_t slab;
2139 	void *item;
2140 
2141 	item = NULL;
2142 	keg = zone->uz_keg;
2143 
2144 #ifdef UMA_DEBUG_ALLOC
2145 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2146 #endif
2147 	ZONE_LOCK(zone);
2148 
2149 	slab = uma_zone_slab(zone, flags);
2150 	if (slab == NULL) {
2151 		ZONE_UNLOCK(zone);
2152 		return (NULL);
2153 	}
2154 
2155 	item = uma_slab_alloc(zone, slab);
2156 
2157 	ZONE_UNLOCK(zone);
2158 
2159 	/*
2160 	 * We have to call both the zone's init (not the keg's init)
2161 	 * and the zone's ctor.  This is because the item is going from
2162 	 * a keg slab directly to the user, and the user is expecting it
2163 	 * to be both zone-init'd as well as zone-ctor'd.
2164 	 */
2165 	if (zone->uz_init != NULL) {
2166 		if (zone->uz_init(item, keg->uk_size, flags) != 0) {
2167 			uma_zfree_internal(zone, item, udata, SKIP_FINI);
2168 			return (NULL);
2169 		}
2170 	}
2171 	if (zone->uz_ctor != NULL) {
2172 		if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
2173 			uma_zfree_internal(zone, item, udata, SKIP_DTOR);
2174 			return (NULL);
2175 		}
2176 	}
2177 	if (flags & M_ZERO)
2178 		bzero(item, keg->uk_size);
2179 
2180 	return (item);
2181 }
2182 
2183 /* See uma.h */
2184 void
2185 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2186 {
2187 	uma_keg_t keg;
2188 	uma_cache_t cache;
2189 	uma_bucket_t bucket;
2190 	int bflags;
2191 	int cpu;
2192 	enum zfreeskip skip;
2193 
2194 	/* This is the fast path free */
2195 	skip = SKIP_NONE;
2196 	keg = zone->uz_keg;
2197 
2198 #ifdef UMA_DEBUG_ALLOC_1
2199 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2200 #endif
2201 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2202 	    zone->uz_name);
2203 
2204 	/*
2205 	 * The race here is acceptable.  If we miss it we'll just have to wait
2206 	 * a little longer for the limits to be reset.
2207 	 */
2208 
2209 	if (keg->uk_flags & UMA_ZFLAG_FULL)
2210 		goto zfree_internal;
2211 
2212 	if (zone->uz_dtor) {
2213 		zone->uz_dtor(item, keg->uk_size, udata);
2214 		skip = SKIP_DTOR;
2215 	}
2216 
2217 zfree_restart:
2218 	cpu = PCPU_GET(cpuid);
2219 	CPU_LOCK(cpu);
2220 	cache = &zone->uz_cpu[cpu];
2221 
2222 zfree_start:
2223 	bucket = cache->uc_freebucket;
2224 
2225 	if (bucket) {
2226 		/*
2227 		 * Do we have room in our bucket? It is OK for this uz count
2228 		 * check to be slightly out of sync.
2229 		 */
2230 
2231 		if (bucket->ub_cnt < bucket->ub_entries) {
2232 			KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2233 			    ("uma_zfree: Freeing to non free bucket index."));
2234 			bucket->ub_bucket[bucket->ub_cnt] = item;
2235 			bucket->ub_cnt++;
2236 #ifdef INVARIANTS
2237 			ZONE_LOCK(zone);
2238 			if (keg->uk_flags & UMA_ZONE_MALLOC)
2239 				uma_dbg_free(zone, udata, item);
2240 			else
2241 				uma_dbg_free(zone, NULL, item);
2242 			ZONE_UNLOCK(zone);
2243 #endif
2244 			CPU_UNLOCK(cpu);
2245 			return;
2246 		} else if (cache->uc_allocbucket) {
2247 #ifdef UMA_DEBUG_ALLOC
2248 			printf("uma_zfree: Swapping buckets.\n");
2249 #endif
2250 			/*
2251 			 * We have run out of space in our freebucket.
2252 			 * See if we can switch with our alloc bucket.
2253 			 */
2254 			if (cache->uc_allocbucket->ub_cnt <
2255 			    cache->uc_freebucket->ub_cnt) {
2256 				bucket = cache->uc_freebucket;
2257 				cache->uc_freebucket = cache->uc_allocbucket;
2258 				cache->uc_allocbucket = bucket;
2259 				goto zfree_start;
2260 			}
2261 		}
2262 	}
2263 	/*
2264 	 * We can get here for two reasons:
2265 	 *
2266 	 * 1) The buckets are NULL
2267 	 * 2) The alloc and free buckets are both somewhat full.
2268 	 */
2269 
2270 	ZONE_LOCK(zone);
2271 
2272 	bucket = cache->uc_freebucket;
2273 	cache->uc_freebucket = NULL;
2274 
2275 	/* Can we throw this on the zone full list? */
2276 	if (bucket != NULL) {
2277 #ifdef UMA_DEBUG_ALLOC
2278 		printf("uma_zfree: Putting old bucket on the free list.\n");
2279 #endif
2280 		/* ub_cnt is pointing to the last free item */
2281 		KASSERT(bucket->ub_cnt != 0,
2282 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2283 		LIST_INSERT_HEAD(&zone->uz_full_bucket,
2284 		    bucket, ub_link);
2285 	}
2286 	if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) {
2287 		LIST_REMOVE(bucket, ub_link);
2288 		ZONE_UNLOCK(zone);
2289 		cache->uc_freebucket = bucket;
2290 		goto zfree_start;
2291 	}
2292 	/* We're done with this CPU now */
2293 	CPU_UNLOCK(cpu);
2294 
2295 	/* And the zone.. */
2296 	ZONE_UNLOCK(zone);
2297 
2298 #ifdef UMA_DEBUG_ALLOC
2299 	printf("uma_zfree: Allocating new free bucket.\n");
2300 #endif
2301 	bflags = M_NOWAIT;
2302 
2303 	if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
2304 		bflags |= M_NOVM;
2305 	bucket = bucket_alloc(zone->uz_count, bflags);
2306 	if (bucket) {
2307 		ZONE_LOCK(zone);
2308 		LIST_INSERT_HEAD(&zone->uz_free_bucket,
2309 		    bucket, ub_link);
2310 		ZONE_UNLOCK(zone);
2311 		goto zfree_restart;
2312 	}
2313 
2314 	/*
2315 	 * If nothing else caught this, we'll just do an internal free.
2316 	 */
2317 
2318 zfree_internal:
2319 
2320 #ifdef INVARIANTS
2321 	/*
2322 	 * If we need to skip the dtor and the uma_dbg_free in
2323 	 * uma_zfree_internal because we've already called the dtor
2324 	 * above, but we ended up here, then we need to make sure
2325 	 * that we take care of the uma_dbg_free immediately.
2326 	 */
2327 	if (skip) {
2328 		ZONE_LOCK(zone);
2329 		if (keg->uk_flags & UMA_ZONE_MALLOC)
2330 			uma_dbg_free(zone, udata, item);
2331 		else
2332 			uma_dbg_free(zone, NULL, item);
2333 		ZONE_UNLOCK(zone);
2334 	}
2335 #endif
2336 	uma_zfree_internal(zone, item, udata, skip);
2337 
2338 	return;
2339 }
2340 
2341 /*
2342  * Frees an item to an INTERNAL zone or allocates a free bucket
2343  *
2344  * Arguments:
2345  *	zone   The zone to free to
2346  *	item   The item we're freeing
2347  *	udata  User supplied data for the dtor
2348  *	skip   Skip dtors and finis
2349  */
2350 static void
2351 uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
2352     enum zfreeskip skip)
2353 {
2354 	uma_slab_t slab;
2355 	uma_slabrefcnt_t slabref;
2356 	uma_keg_t keg;
2357 	u_int8_t *mem;
2358 	u_int8_t freei;
2359 
2360 	keg = zone->uz_keg;
2361 
2362 	if (skip < SKIP_DTOR && zone->uz_dtor)
2363 		zone->uz_dtor(item, keg->uk_size, udata);
2364 	if (skip < SKIP_FINI && zone->uz_fini)
2365 		zone->uz_fini(item, keg->uk_size);
2366 
2367 	ZONE_LOCK(zone);
2368 
2369 	if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
2370 		mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
2371 		if (keg->uk_flags & UMA_ZONE_HASH)
2372 			slab = hash_sfind(&keg->uk_hash, mem);
2373 		else {
2374 			mem += keg->uk_pgoff;
2375 			slab = (uma_slab_t)mem;
2376 		}
2377 	} else {
2378 		slab = (uma_slab_t)udata;
2379 	}
2380 
2381 	/* Do we need to remove from any lists? */
2382 	if (slab->us_freecount+1 == keg->uk_ipers) {
2383 		LIST_REMOVE(slab, us_link);
2384 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2385 	} else if (slab->us_freecount == 0) {
2386 		LIST_REMOVE(slab, us_link);
2387 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2388 	}
2389 
2390 	/* Slab management stuff */
2391 	freei = ((unsigned long)item - (unsigned long)slab->us_data)
2392 		/ keg->uk_rsize;
2393 
2394 #ifdef INVARIANTS
2395 	if (!skip)
2396 		uma_dbg_free(zone, slab, item);
2397 #endif
2398 
2399 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
2400 		slabref = (uma_slabrefcnt_t)slab;
2401 		slabref->us_freelist[freei].us_item = slab->us_firstfree;
2402 	} else {
2403 		slab->us_freelist[freei].us_item = slab->us_firstfree;
2404 	}
2405 	slab->us_firstfree = freei;
2406 	slab->us_freecount++;
2407 
2408 	/* Zone statistics */
2409 	keg->uk_free++;
2410 
2411 	if (keg->uk_flags & UMA_ZFLAG_FULL) {
2412 		if (keg->uk_pages < keg->uk_maxpages)
2413 			keg->uk_flags &= ~UMA_ZFLAG_FULL;
2414 
2415 		/* We can handle one more allocation */
2416 		wakeup_one(keg);
2417 	}
2418 
2419 	ZONE_UNLOCK(zone);
2420 }
2421 
2422 /* See uma.h */
2423 void
2424 uma_zone_set_max(uma_zone_t zone, int nitems)
2425 {
2426 	uma_keg_t keg;
2427 
2428 	keg = zone->uz_keg;
2429 	ZONE_LOCK(zone);
2430 	if (keg->uk_ppera > 1)
2431 		keg->uk_maxpages = nitems * keg->uk_ppera;
2432 	else
2433 		keg->uk_maxpages = nitems / keg->uk_ipers;
2434 
2435 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2436 		keg->uk_maxpages++;
2437 
2438 	ZONE_UNLOCK(zone);
2439 }
2440 
2441 /* See uma.h */
2442 void
2443 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2444 {
2445 	ZONE_LOCK(zone);
2446 	KASSERT(zone->uz_keg->uk_pages == 0,
2447 	    ("uma_zone_set_init on non-empty keg"));
2448 	zone->uz_keg->uk_init = uminit;
2449 	ZONE_UNLOCK(zone);
2450 }
2451 
2452 /* See uma.h */
2453 void
2454 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2455 {
2456 	ZONE_LOCK(zone);
2457 	KASSERT(zone->uz_keg->uk_pages == 0,
2458 	    ("uma_zone_set_fini on non-empty keg"));
2459 	zone->uz_keg->uk_fini = fini;
2460 	ZONE_UNLOCK(zone);
2461 }
2462 
2463 /* See uma.h */
2464 void
2465 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2466 {
2467 	ZONE_LOCK(zone);
2468 	KASSERT(zone->uz_keg->uk_pages == 0,
2469 	    ("uma_zone_set_zinit on non-empty keg"));
2470 	zone->uz_init = zinit;
2471 	ZONE_UNLOCK(zone);
2472 }
2473 
2474 /* See uma.h */
2475 void
2476 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2477 {
2478 	ZONE_LOCK(zone);
2479 	KASSERT(zone->uz_keg->uk_pages == 0,
2480 	    ("uma_zone_set_zfini on non-empty keg"));
2481 	zone->uz_fini = zfini;
2482 	ZONE_UNLOCK(zone);
2483 }
2484 
2485 /* See uma.h */
2486 /* XXX uk_freef is not actually used with the zone locked */
2487 void
2488 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2489 {
2490 	ZONE_LOCK(zone);
2491 	zone->uz_keg->uk_freef = freef;
2492 	ZONE_UNLOCK(zone);
2493 }
2494 
2495 /* See uma.h */
2496 /* XXX uk_allocf is not actually used with the zone locked */
2497 void
2498 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
2499 {
2500 	ZONE_LOCK(zone);
2501 	zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
2502 	zone->uz_keg->uk_allocf = allocf;
2503 	ZONE_UNLOCK(zone);
2504 }
2505 
2506 /* See uma.h */
2507 int
2508 uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
2509 {
2510 	uma_keg_t keg;
2511 	vm_offset_t kva;
2512 	int pages;
2513 
2514 	keg = zone->uz_keg;
2515 	pages = count / keg->uk_ipers;
2516 
2517 	if (pages * keg->uk_ipers < count)
2518 		pages++;
2519 
2520 	kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
2521 
2522 	if (kva == 0)
2523 		return (0);
2524 	if (obj == NULL) {
2525 		obj = vm_object_allocate(OBJT_DEFAULT,
2526 		    pages);
2527 	} else {
2528 		VM_OBJECT_LOCK_INIT(obj, "uma object");
2529 		_vm_object_allocate(OBJT_DEFAULT,
2530 		    pages, obj);
2531 	}
2532 	ZONE_LOCK(zone);
2533 	keg->uk_kva = kva;
2534 	keg->uk_obj = obj;
2535 	keg->uk_maxpages = pages;
2536 	keg->uk_allocf = obj_alloc;
2537 	keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
2538 	ZONE_UNLOCK(zone);
2539 	return (1);
2540 }
2541 
2542 /* See uma.h */
2543 void
2544 uma_prealloc(uma_zone_t zone, int items)
2545 {
2546 	int slabs;
2547 	uma_slab_t slab;
2548 	uma_keg_t keg;
2549 
2550 	keg = zone->uz_keg;
2551 	ZONE_LOCK(zone);
2552 	slabs = items / keg->uk_ipers;
2553 	if (slabs * keg->uk_ipers < items)
2554 		slabs++;
2555 	while (slabs > 0) {
2556 		slab = slab_zalloc(zone, M_WAITOK);
2557 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2558 		slabs--;
2559 	}
2560 	ZONE_UNLOCK(zone);
2561 }
2562 
2563 /* See uma.h */
2564 u_int32_t *
2565 uma_find_refcnt(uma_zone_t zone, void *item)
2566 {
2567 	uma_slabrefcnt_t slabref;
2568 	uma_keg_t keg;
2569 	u_int32_t *refcnt;
2570 	int idx;
2571 
2572 	keg = zone->uz_keg;
2573 	slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
2574 	    (~UMA_SLAB_MASK));
2575 	KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT,
2576 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
2577 	idx = ((unsigned long)item - (unsigned long)slabref->us_data)
2578 	    / keg->uk_rsize;
2579 	refcnt = &slabref->us_freelist[idx].us_refcnt;
2580 	return refcnt;
2581 }
2582 
2583 /* See uma.h */
2584 void
2585 uma_reclaim(void)
2586 {
2587 #ifdef UMA_DEBUG
2588 	printf("UMA: vm asked us to release pages!\n");
2589 #endif
2590 	bucket_enable();
2591 	zone_foreach(zone_drain);
2592 	/*
2593 	 * Some slabs may have been freed but this zone will be visited early
2594 	 * we visit again so that we can free pages that are empty once other
2595 	 * zones are drained.  We have to do the same for buckets.
2596 	 */
2597 	zone_drain(slabzone);
2598 	zone_drain(slabrefzone);
2599 	bucket_zone_drain();
2600 }
2601 
2602 void *
2603 uma_large_malloc(int size, int wait)
2604 {
2605 	void *mem;
2606 	uma_slab_t slab;
2607 	u_int8_t flags;
2608 
2609 	slab = uma_zalloc_internal(slabzone, NULL, wait);
2610 	if (slab == NULL)
2611 		return (NULL);
2612 	mem = page_alloc(NULL, size, &flags, wait);
2613 	if (mem) {
2614 		vsetslab((vm_offset_t)mem, slab);
2615 		slab->us_data = mem;
2616 		slab->us_flags = flags | UMA_SLAB_MALLOC;
2617 		slab->us_size = size;
2618 	} else {
2619 		uma_zfree_internal(slabzone, slab, NULL, 0);
2620 	}
2621 
2622 	return (mem);
2623 }
2624 
2625 void
2626 uma_large_free(uma_slab_t slab)
2627 {
2628 	vsetobj((vm_offset_t)slab->us_data, kmem_object);
2629 	page_free(slab->us_data, slab->us_size, slab->us_flags);
2630 	uma_zfree_internal(slabzone, slab, NULL, 0);
2631 }
2632 
2633 void
2634 uma_print_stats(void)
2635 {
2636 	zone_foreach(uma_print_zone);
2637 }
2638 
2639 static void
2640 slab_print(uma_slab_t slab)
2641 {
2642 	printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
2643 		slab->us_keg, slab->us_data, slab->us_freecount,
2644 		slab->us_firstfree);
2645 }
2646 
2647 static void
2648 cache_print(uma_cache_t cache)
2649 {
2650 	printf("alloc: %p(%d), free: %p(%d)\n",
2651 		cache->uc_allocbucket,
2652 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
2653 		cache->uc_freebucket,
2654 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
2655 }
2656 
2657 void
2658 uma_print_zone(uma_zone_t zone)
2659 {
2660 	uma_cache_t cache;
2661 	uma_keg_t keg;
2662 	uma_slab_t slab;
2663 	int i;
2664 
2665 	keg = zone->uz_keg;
2666 	printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
2667 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
2668 	    keg->uk_ipers, keg->uk_ppera,
2669 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
2670 	printf("Part slabs:\n");
2671 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
2672 		slab_print(slab);
2673 	printf("Free slabs:\n");
2674 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
2675 		slab_print(slab);
2676 	printf("Full slabs:\n");
2677 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
2678 		slab_print(slab);
2679 	for (i = 0; i <= mp_maxid; i++) {
2680 		if (CPU_ABSENT(i))
2681 			continue;
2682 		cache = &zone->uz_cpu[i];
2683 		printf("CPU %d Cache:\n", i);
2684 		cache_print(cache);
2685 	}
2686 }
2687 
2688 /*
2689  * Sysctl handler for vm.zone
2690  *
2691  * stolen from vm_zone.c
2692  */
2693 static int
2694 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
2695 {
2696 	int error, len, cnt;
2697 	const int linesize = 128;	/* conservative */
2698 	int totalfree;
2699 	char *tmpbuf, *offset;
2700 	uma_zone_t z;
2701 	uma_keg_t zk;
2702 	char *p;
2703 	int cpu;
2704 	int cachefree;
2705 	uma_bucket_t bucket;
2706 	uma_cache_t cache;
2707 
2708 	cnt = 0;
2709 	mtx_lock(&uma_mtx);
2710 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2711 		LIST_FOREACH(z, &zk->uk_zones, uz_link)
2712 			cnt++;
2713 	}
2714 	mtx_unlock(&uma_mtx);
2715 	MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
2716 			M_TEMP, M_WAITOK);
2717 	len = snprintf(tmpbuf, linesize,
2718 	    "\nITEM            SIZE     LIMIT     USED    FREE  REQUESTS\n\n");
2719 	if (cnt == 0)
2720 		tmpbuf[len - 1] = '\0';
2721 	error = SYSCTL_OUT(req, tmpbuf, cnt == 0 ? len-1 : len);
2722 	if (error || cnt == 0)
2723 		goto out;
2724 	offset = tmpbuf;
2725 	mtx_lock(&uma_mtx);
2726 	LIST_FOREACH(zk, &uma_kegs, uk_link) {
2727 	  LIST_FOREACH(z, &zk->uk_zones, uz_link) {
2728 		if (cnt == 0)	/* list may have changed size */
2729 			break;
2730 		if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2731 			for (cpu = 0; cpu <= mp_maxid; cpu++) {
2732 				if (CPU_ABSENT(cpu))
2733 					continue;
2734 				CPU_LOCK(cpu);
2735 			}
2736 		}
2737 		ZONE_LOCK(z);
2738 		cachefree = 0;
2739 		if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
2740 			for (cpu = 0; cpu <= mp_maxid; cpu++) {
2741 				if (CPU_ABSENT(cpu))
2742 					continue;
2743 				cache = &z->uz_cpu[cpu];
2744 				if (cache->uc_allocbucket != NULL)
2745 					cachefree += cache->uc_allocbucket->ub_cnt;
2746 				if (cache->uc_freebucket != NULL)
2747 					cachefree += cache->uc_freebucket->ub_cnt;
2748 				CPU_UNLOCK(cpu);
2749 			}
2750 		}
2751 		LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
2752 			cachefree += bucket->ub_cnt;
2753 		}
2754 		totalfree = zk->uk_free + cachefree;
2755 		len = snprintf(offset, linesize,
2756 		    "%-12.12s  %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
2757 		    z->uz_name, zk->uk_size,
2758 		    zk->uk_maxpages * zk->uk_ipers,
2759 		    (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
2760 		    totalfree,
2761 		    (unsigned long long)z->uz_allocs);
2762 		ZONE_UNLOCK(z);
2763 		for (p = offset + 12; p > offset && *p == ' '; --p)
2764 			/* nothing */ ;
2765 		p[1] = ':';
2766 		cnt--;
2767 		offset += len;
2768 	  }
2769 	}
2770 	mtx_unlock(&uma_mtx);
2771 	*offset++ = '\0';
2772 	error = SYSCTL_OUT(req, tmpbuf, offset - tmpbuf);
2773 out:
2774 	FREE(tmpbuf, M_TEMP);
2775 	return (error);
2776 }
2777