xref: /freebsd/sys/vm/uma_core.c (revision f18d3c411697ff46d85e579a72be54ca0cc67dd0)
1 /*-
2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2006 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * effecient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59 
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62 #include "opt_vm.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bitset.h>
67 #include <sys/kernel.h>
68 #include <sys/types.h>
69 #include <sys/queue.h>
70 #include <sys/malloc.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/sysctl.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/sched.h>
79 #include <sys/smp.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/uma.h>
91 #include <vm/uma_int.h>
92 #include <vm/uma_dbg.h>
93 
94 #include <ddb/ddb.h>
95 
96 #ifdef DEBUG_MEMGUARD
97 #include <vm/memguard.h>
98 #endif
99 
100 /*
101  * This is the zone and keg from which all zones are spawned.  The idea is that
102  * even the zone & keg heads are allocated from the allocator, so we use the
103  * bss section to bootstrap us.
104  */
105 static struct uma_keg masterkeg;
106 static struct uma_zone masterzone_k;
107 static struct uma_zone masterzone_z;
108 static uma_zone_t kegs = &masterzone_k;
109 static uma_zone_t zones = &masterzone_z;
110 
111 /* This is the zone from which all of uma_slab_t's are allocated. */
112 static uma_zone_t slabzone;
113 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
114 
115 /*
116  * The initial hash tables come out of this zone so they can be allocated
117  * prior to malloc coming up.
118  */
119 static uma_zone_t hashzone;
120 
121 /* The boot-time adjusted value for cache line alignment. */
122 int uma_align_cache = 64 - 1;
123 
124 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
125 
126 /*
127  * Are we allowed to allocate buckets?
128  */
129 static int bucketdisable = 1;
130 
131 /* Linked list of all kegs in the system */
132 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
133 
134 /* This mutex protects the keg list */
135 static struct mtx_padalign uma_mtx;
136 
137 /* Linked list of boot time pages */
138 static LIST_HEAD(,uma_slab) uma_boot_pages =
139     LIST_HEAD_INITIALIZER(uma_boot_pages);
140 
141 /* This mutex protects the boot time pages list */
142 static struct mtx_padalign uma_boot_pages_mtx;
143 
144 /* Is the VM done starting up? */
145 static int booted = 0;
146 #define	UMA_STARTUP	1
147 #define	UMA_STARTUP2	2
148 
149 /* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
150 static const u_int uma_max_ipers = SLAB_SETSIZE;
151 
152 /*
153  * Only mbuf clusters use ref zones.  Just provide enough references
154  * to support the one user.  New code should not use the ref facility.
155  */
156 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
157 
158 /*
159  * This is the handle used to schedule events that need to happen
160  * outside of the allocation fast path.
161  */
162 static struct callout uma_callout;
163 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
164 
165 /*
166  * This structure is passed as the zone ctor arg so that I don't have to create
167  * a special allocation function just for zones.
168  */
169 struct uma_zctor_args {
170 	const char *name;
171 	size_t size;
172 	uma_ctor ctor;
173 	uma_dtor dtor;
174 	uma_init uminit;
175 	uma_fini fini;
176 	uma_import import;
177 	uma_release release;
178 	void *arg;
179 	uma_keg_t keg;
180 	int align;
181 	uint32_t flags;
182 };
183 
184 struct uma_kctor_args {
185 	uma_zone_t zone;
186 	size_t size;
187 	uma_init uminit;
188 	uma_fini fini;
189 	int align;
190 	uint32_t flags;
191 };
192 
193 struct uma_bucket_zone {
194 	uma_zone_t	ubz_zone;
195 	char		*ubz_name;
196 	int		ubz_entries;	/* Number of items it can hold. */
197 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
198 };
199 
200 /*
201  * Compute the actual number of bucket entries to pack them in power
202  * of two sizes for more efficient space utilization.
203  */
204 #define	BUCKET_SIZE(n)						\
205     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
206 
207 #define	BUCKET_MAX	BUCKET_SIZE(128)
208 
209 struct uma_bucket_zone bucket_zones[] = {
210 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
211 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
212 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
213 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
214 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
215 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
216 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
217 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
218 	{ NULL, NULL, 0}
219 };
220 
221 /*
222  * Flags and enumerations to be passed to internal functions.
223  */
224 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
225 
226 /* Prototypes.. */
227 
228 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
229 static void *page_alloc(uma_zone_t, int, uint8_t *, int);
230 static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
231 static void page_free(void *, int, uint8_t);
232 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
233 static void cache_drain(uma_zone_t);
234 static void bucket_drain(uma_zone_t, uma_bucket_t);
235 static void bucket_cache_drain(uma_zone_t zone);
236 static int keg_ctor(void *, int, void *, int);
237 static void keg_dtor(void *, int, void *);
238 static int zone_ctor(void *, int, void *, int);
239 static void zone_dtor(void *, int, void *);
240 static int zero_init(void *, int, int);
241 static void keg_small_init(uma_keg_t keg);
242 static void keg_large_init(uma_keg_t keg);
243 static void zone_foreach(void (*zfunc)(uma_zone_t));
244 static void zone_timeout(uma_zone_t zone);
245 static int hash_alloc(struct uma_hash *);
246 static int hash_expand(struct uma_hash *, struct uma_hash *);
247 static void hash_free(struct uma_hash *hash);
248 static void uma_timeout(void *);
249 static void uma_startup3(void);
250 static void *zone_alloc_item(uma_zone_t, void *, int);
251 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
252 static void bucket_enable(void);
253 static void bucket_init(void);
254 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
255 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
256 static void bucket_zone_drain(void);
257 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
258 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
259 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
260 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
261 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
262 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
263     uma_fini fini, int align, uint32_t flags);
264 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
265 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
266 
267 void uma_print_zone(uma_zone_t);
268 void uma_print_stats(void);
269 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
270 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
271 
272 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
273 
274 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
275     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
276 
277 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
278     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
279 
280 static int zone_warnings = 1;
281 TUNABLE_INT("vm.zone_warnings", &zone_warnings);
282 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
283     "Warn when UMA zones becomes full");
284 
285 /*
286  * This routine checks to see whether or not it's safe to enable buckets.
287  */
288 static void
289 bucket_enable(void)
290 {
291 	bucketdisable = vm_page_count_min();
292 }
293 
294 /*
295  * Initialize bucket_zones, the array of zones of buckets of various sizes.
296  *
297  * For each zone, calculate the memory required for each bucket, consisting
298  * of the header and an array of pointers.
299  */
300 static void
301 bucket_init(void)
302 {
303 	struct uma_bucket_zone *ubz;
304 	int size;
305 	int i;
306 
307 	for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
308 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
309 		size += sizeof(void *) * ubz->ubz_entries;
310 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
311 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
312 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
313 	}
314 }
315 
316 /*
317  * Given a desired number of entries for a bucket, return the zone from which
318  * to allocate the bucket.
319  */
320 static struct uma_bucket_zone *
321 bucket_zone_lookup(int entries)
322 {
323 	struct uma_bucket_zone *ubz;
324 
325 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
326 		if (ubz->ubz_entries >= entries)
327 			return (ubz);
328 	ubz--;
329 	return (ubz);
330 }
331 
332 static int
333 bucket_select(int size)
334 {
335 	struct uma_bucket_zone *ubz;
336 
337 	ubz = &bucket_zones[0];
338 	if (size > ubz->ubz_maxsize)
339 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
340 
341 	for (; ubz->ubz_entries != 0; ubz++)
342 		if (ubz->ubz_maxsize < size)
343 			break;
344 	ubz--;
345 	return (ubz->ubz_entries);
346 }
347 
348 static uma_bucket_t
349 bucket_alloc(uma_zone_t zone, void *udata, int flags)
350 {
351 	struct uma_bucket_zone *ubz;
352 	uma_bucket_t bucket;
353 
354 	/*
355 	 * This is to stop us from allocating per cpu buckets while we're
356 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
357 	 * boot pages.  This also prevents us from allocating buckets in
358 	 * low memory situations.
359 	 */
360 	if (bucketdisable)
361 		return (NULL);
362 	/*
363 	 * To limit bucket recursion we store the original zone flags
364 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
365 	 * NOVM flag to persist even through deep recursions.  We also
366 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
367 	 * a bucket for a bucket zone so we do not allow infinite bucket
368 	 * recursion.  This cookie will even persist to frees of unused
369 	 * buckets via the allocation path or bucket allocations in the
370 	 * free path.
371 	 */
372 	if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
373 		return (NULL);
374 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
375 		udata = (void *)(uintptr_t)zone->uz_flags;
376 	else
377 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
378 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
379 		flags |= M_NOVM;
380 	ubz = bucket_zone_lookup(zone->uz_count);
381 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
382 	if (bucket) {
383 #ifdef INVARIANTS
384 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
385 #endif
386 		bucket->ub_cnt = 0;
387 		bucket->ub_entries = ubz->ubz_entries;
388 	}
389 
390 	return (bucket);
391 }
392 
393 static void
394 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
395 {
396 	struct uma_bucket_zone *ubz;
397 
398 	KASSERT(bucket->ub_cnt == 0,
399 	    ("bucket_free: Freeing a non free bucket."));
400 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
401 		udata = (void *)(uintptr_t)zone->uz_flags;
402 	ubz = bucket_zone_lookup(bucket->ub_entries);
403 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
404 }
405 
406 static void
407 bucket_zone_drain(void)
408 {
409 	struct uma_bucket_zone *ubz;
410 
411 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
412 		zone_drain(ubz->ubz_zone);
413 }
414 
415 static void
416 zone_log_warning(uma_zone_t zone)
417 {
418 	static const struct timeval warninterval = { 300, 0 };
419 
420 	if (!zone_warnings || zone->uz_warning == NULL)
421 		return;
422 
423 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
424 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
425 }
426 
427 static void
428 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
429 {
430 	uma_klink_t klink;
431 
432 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
433 		kegfn(klink->kl_keg);
434 }
435 
436 /*
437  * Routine called by timeout which is used to fire off some time interval
438  * based calculations.  (stats, hash size, etc.)
439  *
440  * Arguments:
441  *	arg   Unused
442  *
443  * Returns:
444  *	Nothing
445  */
446 static void
447 uma_timeout(void *unused)
448 {
449 	bucket_enable();
450 	zone_foreach(zone_timeout);
451 
452 	/* Reschedule this event */
453 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
454 }
455 
456 /*
457  * Routine to perform timeout driven calculations.  This expands the
458  * hashes and does per cpu statistics aggregation.
459  *
460  *  Returns nothing.
461  */
462 static void
463 keg_timeout(uma_keg_t keg)
464 {
465 
466 	KEG_LOCK(keg);
467 	/*
468 	 * Expand the keg hash table.
469 	 *
470 	 * This is done if the number of slabs is larger than the hash size.
471 	 * What I'm trying to do here is completely reduce collisions.  This
472 	 * may be a little aggressive.  Should I allow for two collisions max?
473 	 */
474 	if (keg->uk_flags & UMA_ZONE_HASH &&
475 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
476 		struct uma_hash newhash;
477 		struct uma_hash oldhash;
478 		int ret;
479 
480 		/*
481 		 * This is so involved because allocating and freeing
482 		 * while the keg lock is held will lead to deadlock.
483 		 * I have to do everything in stages and check for
484 		 * races.
485 		 */
486 		newhash = keg->uk_hash;
487 		KEG_UNLOCK(keg);
488 		ret = hash_alloc(&newhash);
489 		KEG_LOCK(keg);
490 		if (ret) {
491 			if (hash_expand(&keg->uk_hash, &newhash)) {
492 				oldhash = keg->uk_hash;
493 				keg->uk_hash = newhash;
494 			} else
495 				oldhash = newhash;
496 
497 			KEG_UNLOCK(keg);
498 			hash_free(&oldhash);
499 			return;
500 		}
501 	}
502 	KEG_UNLOCK(keg);
503 }
504 
505 static void
506 zone_timeout(uma_zone_t zone)
507 {
508 
509 	zone_foreach_keg(zone, &keg_timeout);
510 }
511 
512 /*
513  * Allocate and zero fill the next sized hash table from the appropriate
514  * backing store.
515  *
516  * Arguments:
517  *	hash  A new hash structure with the old hash size in uh_hashsize
518  *
519  * Returns:
520  *	1 on sucess and 0 on failure.
521  */
522 static int
523 hash_alloc(struct uma_hash *hash)
524 {
525 	int oldsize;
526 	int alloc;
527 
528 	oldsize = hash->uh_hashsize;
529 
530 	/* We're just going to go to a power of two greater */
531 	if (oldsize)  {
532 		hash->uh_hashsize = oldsize * 2;
533 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
534 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
535 		    M_UMAHASH, M_NOWAIT);
536 	} else {
537 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
538 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
539 		    M_WAITOK);
540 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
541 	}
542 	if (hash->uh_slab_hash) {
543 		bzero(hash->uh_slab_hash, alloc);
544 		hash->uh_hashmask = hash->uh_hashsize - 1;
545 		return (1);
546 	}
547 
548 	return (0);
549 }
550 
551 /*
552  * Expands the hash table for HASH zones.  This is done from zone_timeout
553  * to reduce collisions.  This must not be done in the regular allocation
554  * path, otherwise, we can recurse on the vm while allocating pages.
555  *
556  * Arguments:
557  *	oldhash  The hash you want to expand
558  *	newhash  The hash structure for the new table
559  *
560  * Returns:
561  *	Nothing
562  *
563  * Discussion:
564  */
565 static int
566 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
567 {
568 	uma_slab_t slab;
569 	int hval;
570 	int i;
571 
572 	if (!newhash->uh_slab_hash)
573 		return (0);
574 
575 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
576 		return (0);
577 
578 	/*
579 	 * I need to investigate hash algorithms for resizing without a
580 	 * full rehash.
581 	 */
582 
583 	for (i = 0; i < oldhash->uh_hashsize; i++)
584 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
585 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
586 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
587 			hval = UMA_HASH(newhash, slab->us_data);
588 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
589 			    slab, us_hlink);
590 		}
591 
592 	return (1);
593 }
594 
595 /*
596  * Free the hash bucket to the appropriate backing store.
597  *
598  * Arguments:
599  *	slab_hash  The hash bucket we're freeing
600  *	hashsize   The number of entries in that hash bucket
601  *
602  * Returns:
603  *	Nothing
604  */
605 static void
606 hash_free(struct uma_hash *hash)
607 {
608 	if (hash->uh_slab_hash == NULL)
609 		return;
610 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
611 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
612 	else
613 		free(hash->uh_slab_hash, M_UMAHASH);
614 }
615 
616 /*
617  * Frees all outstanding items in a bucket
618  *
619  * Arguments:
620  *	zone   The zone to free to, must be unlocked.
621  *	bucket The free/alloc bucket with items, cpu queue must be locked.
622  *
623  * Returns:
624  *	Nothing
625  */
626 
627 static void
628 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
629 {
630 	int i;
631 
632 	if (bucket == NULL)
633 		return;
634 
635 	if (zone->uz_fini)
636 		for (i = 0; i < bucket->ub_cnt; i++)
637 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
638 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
639 	bucket->ub_cnt = 0;
640 }
641 
642 /*
643  * Drains the per cpu caches for a zone.
644  *
645  * NOTE: This may only be called while the zone is being turn down, and not
646  * during normal operation.  This is necessary in order that we do not have
647  * to migrate CPUs to drain the per-CPU caches.
648  *
649  * Arguments:
650  *	zone     The zone to drain, must be unlocked.
651  *
652  * Returns:
653  *	Nothing
654  */
655 static void
656 cache_drain(uma_zone_t zone)
657 {
658 	uma_cache_t cache;
659 	int cpu;
660 
661 	/*
662 	 * XXX: It is safe to not lock the per-CPU caches, because we're
663 	 * tearing down the zone anyway.  I.e., there will be no further use
664 	 * of the caches at this point.
665 	 *
666 	 * XXX: It would good to be able to assert that the zone is being
667 	 * torn down to prevent improper use of cache_drain().
668 	 *
669 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
670 	 * it is used elsewhere.  Should the tear-down path be made special
671 	 * there in some form?
672 	 */
673 	CPU_FOREACH(cpu) {
674 		cache = &zone->uz_cpu[cpu];
675 		bucket_drain(zone, cache->uc_allocbucket);
676 		bucket_drain(zone, cache->uc_freebucket);
677 		if (cache->uc_allocbucket != NULL)
678 			bucket_free(zone, cache->uc_allocbucket, NULL);
679 		if (cache->uc_freebucket != NULL)
680 			bucket_free(zone, cache->uc_freebucket, NULL);
681 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
682 	}
683 	ZONE_LOCK(zone);
684 	bucket_cache_drain(zone);
685 	ZONE_UNLOCK(zone);
686 }
687 
688 static void
689 cache_shrink(uma_zone_t zone)
690 {
691 
692 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
693 		return;
694 
695 	ZONE_LOCK(zone);
696 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
697 	ZONE_UNLOCK(zone);
698 }
699 
700 static void
701 cache_drain_safe_cpu(uma_zone_t zone)
702 {
703 	uma_cache_t cache;
704 	uma_bucket_t b1, b2;
705 
706 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
707 		return;
708 
709 	b1 = b2 = NULL;
710 	ZONE_LOCK(zone);
711 	critical_enter();
712 	cache = &zone->uz_cpu[curcpu];
713 	if (cache->uc_allocbucket) {
714 		if (cache->uc_allocbucket->ub_cnt != 0)
715 			LIST_INSERT_HEAD(&zone->uz_buckets,
716 			    cache->uc_allocbucket, ub_link);
717 		else
718 			b1 = cache->uc_allocbucket;
719 		cache->uc_allocbucket = NULL;
720 	}
721 	if (cache->uc_freebucket) {
722 		if (cache->uc_freebucket->ub_cnt != 0)
723 			LIST_INSERT_HEAD(&zone->uz_buckets,
724 			    cache->uc_freebucket, ub_link);
725 		else
726 			b2 = cache->uc_freebucket;
727 		cache->uc_freebucket = NULL;
728 	}
729 	critical_exit();
730 	ZONE_UNLOCK(zone);
731 	if (b1)
732 		bucket_free(zone, b1, NULL);
733 	if (b2)
734 		bucket_free(zone, b2, NULL);
735 }
736 
737 /*
738  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
739  * This is an expensive call because it needs to bind to all CPUs
740  * one by one and enter a critical section on each of them in order
741  * to safely access their cache buckets.
742  * Zone lock must not be held on call this function.
743  */
744 static void
745 cache_drain_safe(uma_zone_t zone)
746 {
747 	int cpu;
748 
749 	/*
750 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
751 	 */
752 	if (zone)
753 		cache_shrink(zone);
754 	else
755 		zone_foreach(cache_shrink);
756 
757 	CPU_FOREACH(cpu) {
758 		thread_lock(curthread);
759 		sched_bind(curthread, cpu);
760 		thread_unlock(curthread);
761 
762 		if (zone)
763 			cache_drain_safe_cpu(zone);
764 		else
765 			zone_foreach(cache_drain_safe_cpu);
766 	}
767 	thread_lock(curthread);
768 	sched_unbind(curthread);
769 	thread_unlock(curthread);
770 }
771 
772 /*
773  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
774  */
775 static void
776 bucket_cache_drain(uma_zone_t zone)
777 {
778 	uma_bucket_t bucket;
779 
780 	/*
781 	 * Drain the bucket queues and free the buckets, we just keep two per
782 	 * cpu (alloc/free).
783 	 */
784 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
785 		LIST_REMOVE(bucket, ub_link);
786 		ZONE_UNLOCK(zone);
787 		bucket_drain(zone, bucket);
788 		bucket_free(zone, bucket, NULL);
789 		ZONE_LOCK(zone);
790 	}
791 
792 	/*
793 	 * Shrink further bucket sizes.  Price of single zone lock collision
794 	 * is probably lower then price of global cache drain.
795 	 */
796 	if (zone->uz_count > zone->uz_count_min)
797 		zone->uz_count--;
798 }
799 
800 static void
801 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
802 {
803 	uint8_t *mem;
804 	int i;
805 	uint8_t flags;
806 
807 	mem = slab->us_data;
808 	flags = slab->us_flags;
809 	i = start;
810 	if (keg->uk_fini != NULL) {
811 		for (i--; i > -1; i--)
812 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
813 			    keg->uk_size);
814 	}
815 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
816 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
817 #ifdef UMA_DEBUG
818 	printf("%s: Returning %d bytes.\n", keg->uk_name,
819 	    PAGE_SIZE * keg->uk_ppera);
820 #endif
821 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
822 }
823 
824 /*
825  * Frees pages from a keg back to the system.  This is done on demand from
826  * the pageout daemon.
827  *
828  * Returns nothing.
829  */
830 static void
831 keg_drain(uma_keg_t keg)
832 {
833 	struct slabhead freeslabs = { 0 };
834 	uma_slab_t slab;
835 	uma_slab_t n;
836 
837 	/*
838 	 * We don't want to take pages from statically allocated kegs at this
839 	 * time
840 	 */
841 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
842 		return;
843 
844 #ifdef UMA_DEBUG
845 	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
846 #endif
847 	KEG_LOCK(keg);
848 	if (keg->uk_free == 0)
849 		goto finished;
850 
851 	slab = LIST_FIRST(&keg->uk_free_slab);
852 	while (slab) {
853 		n = LIST_NEXT(slab, us_link);
854 
855 		/* We have no where to free these to */
856 		if (slab->us_flags & UMA_SLAB_BOOT) {
857 			slab = n;
858 			continue;
859 		}
860 
861 		LIST_REMOVE(slab, us_link);
862 		keg->uk_pages -= keg->uk_ppera;
863 		keg->uk_free -= keg->uk_ipers;
864 
865 		if (keg->uk_flags & UMA_ZONE_HASH)
866 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
867 
868 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
869 
870 		slab = n;
871 	}
872 finished:
873 	KEG_UNLOCK(keg);
874 
875 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
876 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
877 		keg_free_slab(keg, slab, keg->uk_ipers);
878 	}
879 }
880 
881 static void
882 zone_drain_wait(uma_zone_t zone, int waitok)
883 {
884 
885 	/*
886 	 * Set draining to interlock with zone_dtor() so we can release our
887 	 * locks as we go.  Only dtor() should do a WAITOK call since it
888 	 * is the only call that knows the structure will still be available
889 	 * when it wakes up.
890 	 */
891 	ZONE_LOCK(zone);
892 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
893 		if (waitok == M_NOWAIT)
894 			goto out;
895 		mtx_unlock(&uma_mtx);
896 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
897 		mtx_lock(&uma_mtx);
898 	}
899 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
900 	bucket_cache_drain(zone);
901 	ZONE_UNLOCK(zone);
902 	/*
903 	 * The DRAINING flag protects us from being freed while
904 	 * we're running.  Normally the uma_mtx would protect us but we
905 	 * must be able to release and acquire the right lock for each keg.
906 	 */
907 	zone_foreach_keg(zone, &keg_drain);
908 	ZONE_LOCK(zone);
909 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
910 	wakeup(zone);
911 out:
912 	ZONE_UNLOCK(zone);
913 }
914 
915 void
916 zone_drain(uma_zone_t zone)
917 {
918 
919 	zone_drain_wait(zone, M_NOWAIT);
920 }
921 
922 /*
923  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
924  *
925  * Arguments:
926  *	wait  Shall we wait?
927  *
928  * Returns:
929  *	The slab that was allocated or NULL if there is no memory and the
930  *	caller specified M_NOWAIT.
931  */
932 static uma_slab_t
933 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
934 {
935 	uma_slabrefcnt_t slabref;
936 	uma_alloc allocf;
937 	uma_slab_t slab;
938 	uint8_t *mem;
939 	uint8_t flags;
940 	int i;
941 
942 	mtx_assert(&keg->uk_lock, MA_OWNED);
943 	slab = NULL;
944 	mem = NULL;
945 
946 #ifdef UMA_DEBUG
947 	printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
948 #endif
949 	allocf = keg->uk_allocf;
950 	KEG_UNLOCK(keg);
951 
952 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
953 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
954 		if (slab == NULL)
955 			goto out;
956 	}
957 
958 	/*
959 	 * This reproduces the old vm_zone behavior of zero filling pages the
960 	 * first time they are added to a zone.
961 	 *
962 	 * Malloced items are zeroed in uma_zalloc.
963 	 */
964 
965 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
966 		wait |= M_ZERO;
967 	else
968 		wait &= ~M_ZERO;
969 
970 	if (keg->uk_flags & UMA_ZONE_NODUMP)
971 		wait |= M_NODUMP;
972 
973 	/* zone is passed for legacy reasons. */
974 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
975 	if (mem == NULL) {
976 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
977 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
978 		slab = NULL;
979 		goto out;
980 	}
981 
982 	/* Point the slab into the allocated memory */
983 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
984 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
985 
986 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
987 		for (i = 0; i < keg->uk_ppera; i++)
988 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
989 
990 	slab->us_keg = keg;
991 	slab->us_data = mem;
992 	slab->us_freecount = keg->uk_ipers;
993 	slab->us_flags = flags;
994 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
995 #ifdef INVARIANTS
996 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
997 #endif
998 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
999 		slabref = (uma_slabrefcnt_t)slab;
1000 		for (i = 0; i < keg->uk_ipers; i++)
1001 			slabref->us_refcnt[i] = 0;
1002 	}
1003 
1004 	if (keg->uk_init != NULL) {
1005 		for (i = 0; i < keg->uk_ipers; i++)
1006 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1007 			    keg->uk_size, wait) != 0)
1008 				break;
1009 		if (i != keg->uk_ipers) {
1010 			keg_free_slab(keg, slab, i);
1011 			slab = NULL;
1012 			goto out;
1013 		}
1014 	}
1015 out:
1016 	KEG_LOCK(keg);
1017 
1018 	if (slab != NULL) {
1019 		if (keg->uk_flags & UMA_ZONE_HASH)
1020 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1021 
1022 		keg->uk_pages += keg->uk_ppera;
1023 		keg->uk_free += keg->uk_ipers;
1024 	}
1025 
1026 	return (slab);
1027 }
1028 
1029 /*
1030  * This function is intended to be used early on in place of page_alloc() so
1031  * that we may use the boot time page cache to satisfy allocations before
1032  * the VM is ready.
1033  */
1034 static void *
1035 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1036 {
1037 	uma_keg_t keg;
1038 	uma_slab_t tmps;
1039 	int pages, check_pages;
1040 
1041 	keg = zone_first_keg(zone);
1042 	pages = howmany(bytes, PAGE_SIZE);
1043 	check_pages = pages - 1;
1044 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1045 
1046 	/*
1047 	 * Check our small startup cache to see if it has pages remaining.
1048 	 */
1049 	mtx_lock(&uma_boot_pages_mtx);
1050 
1051 	/* First check if we have enough room. */
1052 	tmps = LIST_FIRST(&uma_boot_pages);
1053 	while (tmps != NULL && check_pages-- > 0)
1054 		tmps = LIST_NEXT(tmps, us_link);
1055 	if (tmps != NULL) {
1056 		/*
1057 		 * It's ok to lose tmps references.  The last one will
1058 		 * have tmps->us_data pointing to the start address of
1059 		 * "pages" contiguous pages of memory.
1060 		 */
1061 		while (pages-- > 0) {
1062 			tmps = LIST_FIRST(&uma_boot_pages);
1063 			LIST_REMOVE(tmps, us_link);
1064 		}
1065 		mtx_unlock(&uma_boot_pages_mtx);
1066 		*pflag = tmps->us_flags;
1067 		return (tmps->us_data);
1068 	}
1069 	mtx_unlock(&uma_boot_pages_mtx);
1070 	if (booted < UMA_STARTUP2)
1071 		panic("UMA: Increase vm.boot_pages");
1072 	/*
1073 	 * Now that we've booted reset these users to their real allocator.
1074 	 */
1075 #ifdef UMA_MD_SMALL_ALLOC
1076 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1077 #else
1078 	keg->uk_allocf = page_alloc;
1079 #endif
1080 	return keg->uk_allocf(zone, bytes, pflag, wait);
1081 }
1082 
1083 /*
1084  * Allocates a number of pages from the system
1085  *
1086  * Arguments:
1087  *	bytes  The number of bytes requested
1088  *	wait  Shall we wait?
1089  *
1090  * Returns:
1091  *	A pointer to the alloced memory or possibly
1092  *	NULL if M_NOWAIT is set.
1093  */
1094 static void *
1095 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1096 {
1097 	void *p;	/* Returned page */
1098 
1099 	*pflag = UMA_SLAB_KMEM;
1100 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1101 
1102 	return (p);
1103 }
1104 
1105 /*
1106  * Allocates a number of pages from within an object
1107  *
1108  * Arguments:
1109  *	bytes  The number of bytes requested
1110  *	wait   Shall we wait?
1111  *
1112  * Returns:
1113  *	A pointer to the alloced memory or possibly
1114  *	NULL if M_NOWAIT is set.
1115  */
1116 static void *
1117 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
1118 {
1119 	TAILQ_HEAD(, vm_page) alloctail;
1120 	u_long npages;
1121 	vm_offset_t retkva, zkva;
1122 	vm_page_t p, p_next;
1123 	uma_keg_t keg;
1124 
1125 	TAILQ_INIT(&alloctail);
1126 	keg = zone_first_keg(zone);
1127 
1128 	npages = howmany(bytes, PAGE_SIZE);
1129 	while (npages > 0) {
1130 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1131 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1132 		if (p != NULL) {
1133 			/*
1134 			 * Since the page does not belong to an object, its
1135 			 * listq is unused.
1136 			 */
1137 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1138 			npages--;
1139 			continue;
1140 		}
1141 		if (wait & M_WAITOK) {
1142 			VM_WAIT;
1143 			continue;
1144 		}
1145 
1146 		/*
1147 		 * Page allocation failed, free intermediate pages and
1148 		 * exit.
1149 		 */
1150 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1151 			vm_page_unwire(p, 0);
1152 			vm_page_free(p);
1153 		}
1154 		return (NULL);
1155 	}
1156 	*flags = UMA_SLAB_PRIV;
1157 	zkva = keg->uk_kva +
1158 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1159 	retkva = zkva;
1160 	TAILQ_FOREACH(p, &alloctail, listq) {
1161 		pmap_qenter(zkva, &p, 1);
1162 		zkva += PAGE_SIZE;
1163 	}
1164 
1165 	return ((void *)retkva);
1166 }
1167 
1168 /*
1169  * Frees a number of pages to the system
1170  *
1171  * Arguments:
1172  *	mem   A pointer to the memory to be freed
1173  *	size  The size of the memory being freed
1174  *	flags The original p->us_flags field
1175  *
1176  * Returns:
1177  *	Nothing
1178  */
1179 static void
1180 page_free(void *mem, int size, uint8_t flags)
1181 {
1182 	struct vmem *vmem;
1183 
1184 	if (flags & UMA_SLAB_KMEM)
1185 		vmem = kmem_arena;
1186 	else if (flags & UMA_SLAB_KERNEL)
1187 		vmem = kernel_arena;
1188 	else
1189 		panic("UMA: page_free used with invalid flags %d", flags);
1190 
1191 	kmem_free(vmem, (vm_offset_t)mem, size);
1192 }
1193 
1194 /*
1195  * Zero fill initializer
1196  *
1197  * Arguments/Returns follow uma_init specifications
1198  */
1199 static int
1200 zero_init(void *mem, int size, int flags)
1201 {
1202 	bzero(mem, size);
1203 	return (0);
1204 }
1205 
1206 /*
1207  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1208  *
1209  * Arguments
1210  *	keg  The zone we should initialize
1211  *
1212  * Returns
1213  *	Nothing
1214  */
1215 static void
1216 keg_small_init(uma_keg_t keg)
1217 {
1218 	u_int rsize;
1219 	u_int memused;
1220 	u_int wastedspace;
1221 	u_int shsize;
1222 
1223 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1224 		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1225 
1226 		keg->uk_slabsize = sizeof(struct pcpu);
1227 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1228 		    PAGE_SIZE);
1229 	} else {
1230 		keg->uk_slabsize = UMA_SLAB_SIZE;
1231 		keg->uk_ppera = 1;
1232 	}
1233 
1234 	/*
1235 	 * Calculate the size of each allocation (rsize) according to
1236 	 * alignment.  If the requested size is smaller than we have
1237 	 * allocation bits for we round it up.
1238 	 */
1239 	rsize = keg->uk_size;
1240 	if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1241 		rsize = keg->uk_slabsize / SLAB_SETSIZE;
1242 	if (rsize & keg->uk_align)
1243 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1244 	keg->uk_rsize = rsize;
1245 
1246 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1247 	    keg->uk_rsize < sizeof(struct pcpu),
1248 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1249 
1250 	if (keg->uk_flags & UMA_ZONE_REFCNT)
1251 		rsize += sizeof(uint32_t);
1252 
1253 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1254 		shsize = 0;
1255 	else
1256 		shsize = sizeof(struct uma_slab);
1257 
1258 	keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1259 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1260 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1261 
1262 	memused = keg->uk_ipers * rsize + shsize;
1263 	wastedspace = keg->uk_slabsize - memused;
1264 
1265 	/*
1266 	 * We can't do OFFPAGE if we're internal or if we've been
1267 	 * asked to not go to the VM for buckets.  If we do this we
1268 	 * may end up going to the VM  for slabs which we do not
1269 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1270 	 * of UMA_ZONE_VM, which clearly forbids it.
1271 	 */
1272 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1273 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1274 		return;
1275 
1276 	/*
1277 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1278 	 * this if it permits more items per-slab.
1279 	 *
1280 	 * XXX We could try growing slabsize to limit max waste as well.
1281 	 * Historically this was not done because the VM could not
1282 	 * efficiently handle contiguous allocations.
1283 	 */
1284 	if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1285 	    (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1286 		keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1287 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1288 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1289 #ifdef UMA_DEBUG
1290 		printf("UMA decided we need offpage slab headers for "
1291 		    "keg: %s, calculated wastedspace = %d, "
1292 		    "maximum wasted space allowed = %d, "
1293 		    "calculated ipers = %d, "
1294 		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1295 		    keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1296 		    keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1297 #endif
1298 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1299 	}
1300 
1301 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1302 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1303 		keg->uk_flags |= UMA_ZONE_HASH;
1304 }
1305 
1306 /*
1307  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1308  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1309  * more complicated.
1310  *
1311  * Arguments
1312  *	keg  The keg we should initialize
1313  *
1314  * Returns
1315  *	Nothing
1316  */
1317 static void
1318 keg_large_init(uma_keg_t keg)
1319 {
1320 
1321 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1322 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1323 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1324 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1325 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1326 
1327 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1328 	keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1329 	keg->uk_ipers = 1;
1330 	keg->uk_rsize = keg->uk_size;
1331 
1332 	/* We can't do OFFPAGE if we're internal, bail out here. */
1333 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1334 		return;
1335 
1336 	keg->uk_flags |= UMA_ZONE_OFFPAGE;
1337 	if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1338 		keg->uk_flags |= UMA_ZONE_HASH;
1339 }
1340 
1341 static void
1342 keg_cachespread_init(uma_keg_t keg)
1343 {
1344 	int alignsize;
1345 	int trailer;
1346 	int pages;
1347 	int rsize;
1348 
1349 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1350 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1351 
1352 	alignsize = keg->uk_align + 1;
1353 	rsize = keg->uk_size;
1354 	/*
1355 	 * We want one item to start on every align boundary in a page.  To
1356 	 * do this we will span pages.  We will also extend the item by the
1357 	 * size of align if it is an even multiple of align.  Otherwise, it
1358 	 * would fall on the same boundary every time.
1359 	 */
1360 	if (rsize & keg->uk_align)
1361 		rsize = (rsize & ~keg->uk_align) + alignsize;
1362 	if ((rsize & alignsize) == 0)
1363 		rsize += alignsize;
1364 	trailer = rsize - keg->uk_size;
1365 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1366 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1367 	keg->uk_rsize = rsize;
1368 	keg->uk_ppera = pages;
1369 	keg->uk_slabsize = UMA_SLAB_SIZE;
1370 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1371 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1372 	KASSERT(keg->uk_ipers <= uma_max_ipers,
1373 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1374 	    keg->uk_ipers));
1375 }
1376 
1377 /*
1378  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1379  * the keg onto the global keg list.
1380  *
1381  * Arguments/Returns follow uma_ctor specifications
1382  *	udata  Actually uma_kctor_args
1383  */
1384 static int
1385 keg_ctor(void *mem, int size, void *udata, int flags)
1386 {
1387 	struct uma_kctor_args *arg = udata;
1388 	uma_keg_t keg = mem;
1389 	uma_zone_t zone;
1390 
1391 	bzero(keg, size);
1392 	keg->uk_size = arg->size;
1393 	keg->uk_init = arg->uminit;
1394 	keg->uk_fini = arg->fini;
1395 	keg->uk_align = arg->align;
1396 	keg->uk_free = 0;
1397 	keg->uk_reserve = 0;
1398 	keg->uk_pages = 0;
1399 	keg->uk_flags = arg->flags;
1400 	keg->uk_allocf = page_alloc;
1401 	keg->uk_freef = page_free;
1402 	keg->uk_slabzone = NULL;
1403 
1404 	/*
1405 	 * The master zone is passed to us at keg-creation time.
1406 	 */
1407 	zone = arg->zone;
1408 	keg->uk_name = zone->uz_name;
1409 
1410 	if (arg->flags & UMA_ZONE_VM)
1411 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1412 
1413 	if (arg->flags & UMA_ZONE_ZINIT)
1414 		keg->uk_init = zero_init;
1415 
1416 	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1417 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1418 
1419 	if (arg->flags & UMA_ZONE_PCPU)
1420 #ifdef SMP
1421 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1422 #else
1423 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1424 #endif
1425 
1426 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1427 		keg_cachespread_init(keg);
1428 	} else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1429 		if (keg->uk_size >
1430 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1431 		    sizeof(uint32_t)))
1432 			keg_large_init(keg);
1433 		else
1434 			keg_small_init(keg);
1435 	} else {
1436 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1437 			keg_large_init(keg);
1438 		else
1439 			keg_small_init(keg);
1440 	}
1441 
1442 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1443 		if (keg->uk_flags & UMA_ZONE_REFCNT) {
1444 			if (keg->uk_ipers > uma_max_ipers_ref)
1445 				panic("Too many ref items per zone: %d > %d\n",
1446 				    keg->uk_ipers, uma_max_ipers_ref);
1447 			keg->uk_slabzone = slabrefzone;
1448 		} else
1449 			keg->uk_slabzone = slabzone;
1450 	}
1451 
1452 	/*
1453 	 * If we haven't booted yet we need allocations to go through the
1454 	 * startup cache until the vm is ready.
1455 	 */
1456 	if (keg->uk_ppera == 1) {
1457 #ifdef UMA_MD_SMALL_ALLOC
1458 		keg->uk_allocf = uma_small_alloc;
1459 		keg->uk_freef = uma_small_free;
1460 
1461 		if (booted < UMA_STARTUP)
1462 			keg->uk_allocf = startup_alloc;
1463 #else
1464 		if (booted < UMA_STARTUP2)
1465 			keg->uk_allocf = startup_alloc;
1466 #endif
1467 	} else if (booted < UMA_STARTUP2 &&
1468 	    (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1469 		keg->uk_allocf = startup_alloc;
1470 
1471 	/*
1472 	 * Initialize keg's lock
1473 	 */
1474 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1475 
1476 	/*
1477 	 * If we're putting the slab header in the actual page we need to
1478 	 * figure out where in each page it goes.  This calculates a right
1479 	 * justified offset into the memory on an ALIGN_PTR boundary.
1480 	 */
1481 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1482 		u_int totsize;
1483 
1484 		/* Size of the slab struct and free list */
1485 		totsize = sizeof(struct uma_slab);
1486 
1487 		/* Size of the reference counts. */
1488 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1489 			totsize += keg->uk_ipers * sizeof(uint32_t);
1490 
1491 		if (totsize & UMA_ALIGN_PTR)
1492 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1493 			    (UMA_ALIGN_PTR + 1);
1494 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1495 
1496 		/*
1497 		 * The only way the following is possible is if with our
1498 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1499 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1500 		 * mathematically possible for all cases, so we make
1501 		 * sure here anyway.
1502 		 */
1503 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1504 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1505 			totsize += keg->uk_ipers * sizeof(uint32_t);
1506 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1507 			printf("zone %s ipers %d rsize %d size %d\n",
1508 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1509 			    keg->uk_size);
1510 			panic("UMA slab won't fit.");
1511 		}
1512 	}
1513 
1514 	if (keg->uk_flags & UMA_ZONE_HASH)
1515 		hash_alloc(&keg->uk_hash);
1516 
1517 #ifdef UMA_DEBUG
1518 	printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1519 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1520 	    keg->uk_ipers, keg->uk_ppera,
1521 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1522 #endif
1523 
1524 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1525 
1526 	mtx_lock(&uma_mtx);
1527 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1528 	mtx_unlock(&uma_mtx);
1529 	return (0);
1530 }
1531 
1532 /*
1533  * Zone header ctor.  This initializes all fields, locks, etc.
1534  *
1535  * Arguments/Returns follow uma_ctor specifications
1536  *	udata  Actually uma_zctor_args
1537  */
1538 static int
1539 zone_ctor(void *mem, int size, void *udata, int flags)
1540 {
1541 	struct uma_zctor_args *arg = udata;
1542 	uma_zone_t zone = mem;
1543 	uma_zone_t z;
1544 	uma_keg_t keg;
1545 
1546 	bzero(zone, size);
1547 	zone->uz_name = arg->name;
1548 	zone->uz_ctor = arg->ctor;
1549 	zone->uz_dtor = arg->dtor;
1550 	zone->uz_slab = zone_fetch_slab;
1551 	zone->uz_init = NULL;
1552 	zone->uz_fini = NULL;
1553 	zone->uz_allocs = 0;
1554 	zone->uz_frees = 0;
1555 	zone->uz_fails = 0;
1556 	zone->uz_sleeps = 0;
1557 	zone->uz_count = 0;
1558 	zone->uz_count_min = 0;
1559 	zone->uz_flags = 0;
1560 	zone->uz_warning = NULL;
1561 	timevalclear(&zone->uz_ratecheck);
1562 	keg = arg->keg;
1563 
1564 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1565 
1566 	/*
1567 	 * This is a pure cache zone, no kegs.
1568 	 */
1569 	if (arg->import) {
1570 		if (arg->flags & UMA_ZONE_VM)
1571 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1572 		zone->uz_flags = arg->flags;
1573 		zone->uz_size = arg->size;
1574 		zone->uz_import = arg->import;
1575 		zone->uz_release = arg->release;
1576 		zone->uz_arg = arg->arg;
1577 		zone->uz_lockptr = &zone->uz_lock;
1578 		goto out;
1579 	}
1580 
1581 	/*
1582 	 * Use the regular zone/keg/slab allocator.
1583 	 */
1584 	zone->uz_import = (uma_import)zone_import;
1585 	zone->uz_release = (uma_release)zone_release;
1586 	zone->uz_arg = zone;
1587 
1588 	if (arg->flags & UMA_ZONE_SECONDARY) {
1589 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1590 		zone->uz_init = arg->uminit;
1591 		zone->uz_fini = arg->fini;
1592 		zone->uz_lockptr = &keg->uk_lock;
1593 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1594 		mtx_lock(&uma_mtx);
1595 		ZONE_LOCK(zone);
1596 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1597 			if (LIST_NEXT(z, uz_link) == NULL) {
1598 				LIST_INSERT_AFTER(z, zone, uz_link);
1599 				break;
1600 			}
1601 		}
1602 		ZONE_UNLOCK(zone);
1603 		mtx_unlock(&uma_mtx);
1604 	} else if (keg == NULL) {
1605 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1606 		    arg->align, arg->flags)) == NULL)
1607 			return (ENOMEM);
1608 	} else {
1609 		struct uma_kctor_args karg;
1610 		int error;
1611 
1612 		/* We should only be here from uma_startup() */
1613 		karg.size = arg->size;
1614 		karg.uminit = arg->uminit;
1615 		karg.fini = arg->fini;
1616 		karg.align = arg->align;
1617 		karg.flags = arg->flags;
1618 		karg.zone = zone;
1619 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1620 		    flags);
1621 		if (error)
1622 			return (error);
1623 	}
1624 
1625 	/*
1626 	 * Link in the first keg.
1627 	 */
1628 	zone->uz_klink.kl_keg = keg;
1629 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1630 	zone->uz_lockptr = &keg->uk_lock;
1631 	zone->uz_size = keg->uk_size;
1632 	zone->uz_flags |= (keg->uk_flags &
1633 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1634 
1635 	/*
1636 	 * Some internal zones don't have room allocated for the per cpu
1637 	 * caches.  If we're internal, bail out here.
1638 	 */
1639 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1640 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1641 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1642 		return (0);
1643 	}
1644 
1645 out:
1646 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1647 		zone->uz_count = bucket_select(zone->uz_size);
1648 	else
1649 		zone->uz_count = BUCKET_MAX;
1650 	zone->uz_count_min = zone->uz_count;
1651 
1652 	return (0);
1653 }
1654 
1655 /*
1656  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1657  * table and removes the keg from the global list.
1658  *
1659  * Arguments/Returns follow uma_dtor specifications
1660  *	udata  unused
1661  */
1662 static void
1663 keg_dtor(void *arg, int size, void *udata)
1664 {
1665 	uma_keg_t keg;
1666 
1667 	keg = (uma_keg_t)arg;
1668 	KEG_LOCK(keg);
1669 	if (keg->uk_free != 0) {
1670 		printf("Freed UMA keg was not empty (%d items). "
1671 		    " Lost %d pages of memory.\n",
1672 		    keg->uk_free, keg->uk_pages);
1673 	}
1674 	KEG_UNLOCK(keg);
1675 
1676 	hash_free(&keg->uk_hash);
1677 
1678 	KEG_LOCK_FINI(keg);
1679 }
1680 
1681 /*
1682  * Zone header dtor.
1683  *
1684  * Arguments/Returns follow uma_dtor specifications
1685  *	udata  unused
1686  */
1687 static void
1688 zone_dtor(void *arg, int size, void *udata)
1689 {
1690 	uma_klink_t klink;
1691 	uma_zone_t zone;
1692 	uma_keg_t keg;
1693 
1694 	zone = (uma_zone_t)arg;
1695 	keg = zone_first_keg(zone);
1696 
1697 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1698 		cache_drain(zone);
1699 
1700 	mtx_lock(&uma_mtx);
1701 	LIST_REMOVE(zone, uz_link);
1702 	mtx_unlock(&uma_mtx);
1703 	/*
1704 	 * XXX there are some races here where
1705 	 * the zone can be drained but zone lock
1706 	 * released and then refilled before we
1707 	 * remove it... we dont care for now
1708 	 */
1709 	zone_drain_wait(zone, M_WAITOK);
1710 	/*
1711 	 * Unlink all of our kegs.
1712 	 */
1713 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1714 		klink->kl_keg = NULL;
1715 		LIST_REMOVE(klink, kl_link);
1716 		if (klink == &zone->uz_klink)
1717 			continue;
1718 		free(klink, M_TEMP);
1719 	}
1720 	/*
1721 	 * We only destroy kegs from non secondary zones.
1722 	 */
1723 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1724 		mtx_lock(&uma_mtx);
1725 		LIST_REMOVE(keg, uk_link);
1726 		mtx_unlock(&uma_mtx);
1727 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1728 	}
1729 	ZONE_LOCK_FINI(zone);
1730 }
1731 
1732 /*
1733  * Traverses every zone in the system and calls a callback
1734  *
1735  * Arguments:
1736  *	zfunc  A pointer to a function which accepts a zone
1737  *		as an argument.
1738  *
1739  * Returns:
1740  *	Nothing
1741  */
1742 static void
1743 zone_foreach(void (*zfunc)(uma_zone_t))
1744 {
1745 	uma_keg_t keg;
1746 	uma_zone_t zone;
1747 
1748 	mtx_lock(&uma_mtx);
1749 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1750 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1751 			zfunc(zone);
1752 	}
1753 	mtx_unlock(&uma_mtx);
1754 }
1755 
1756 /* Public functions */
1757 /* See uma.h */
1758 void
1759 uma_startup(void *bootmem, int boot_pages)
1760 {
1761 	struct uma_zctor_args args;
1762 	uma_slab_t slab;
1763 	u_int slabsize;
1764 	int i;
1765 
1766 #ifdef UMA_DEBUG
1767 	printf("Creating uma keg headers zone and keg.\n");
1768 #endif
1769 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1770 
1771 	/* "manually" create the initial zone */
1772 	memset(&args, 0, sizeof(args));
1773 	args.name = "UMA Kegs";
1774 	args.size = sizeof(struct uma_keg);
1775 	args.ctor = keg_ctor;
1776 	args.dtor = keg_dtor;
1777 	args.uminit = zero_init;
1778 	args.fini = NULL;
1779 	args.keg = &masterkeg;
1780 	args.align = 32 - 1;
1781 	args.flags = UMA_ZFLAG_INTERNAL;
1782 	/* The initial zone has no Per cpu queues so it's smaller */
1783 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1784 
1785 #ifdef UMA_DEBUG
1786 	printf("Filling boot free list.\n");
1787 #endif
1788 	for (i = 0; i < boot_pages; i++) {
1789 		slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1790 		slab->us_data = (uint8_t *)slab;
1791 		slab->us_flags = UMA_SLAB_BOOT;
1792 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1793 	}
1794 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1795 
1796 #ifdef UMA_DEBUG
1797 	printf("Creating uma zone headers zone and keg.\n");
1798 #endif
1799 	args.name = "UMA Zones";
1800 	args.size = sizeof(struct uma_zone) +
1801 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1802 	args.ctor = zone_ctor;
1803 	args.dtor = zone_dtor;
1804 	args.uminit = zero_init;
1805 	args.fini = NULL;
1806 	args.keg = NULL;
1807 	args.align = 32 - 1;
1808 	args.flags = UMA_ZFLAG_INTERNAL;
1809 	/* The initial zone has no Per cpu queues so it's smaller */
1810 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1811 
1812 #ifdef UMA_DEBUG
1813 	printf("Initializing pcpu cache locks.\n");
1814 #endif
1815 #ifdef UMA_DEBUG
1816 	printf("Creating slab and hash zones.\n");
1817 #endif
1818 
1819 	/* Now make a zone for slab headers */
1820 	slabzone = uma_zcreate("UMA Slabs",
1821 				sizeof(struct uma_slab),
1822 				NULL, NULL, NULL, NULL,
1823 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1824 
1825 	/*
1826 	 * We also create a zone for the bigger slabs with reference
1827 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1828 	 */
1829 	slabsize = sizeof(struct uma_slab_refcnt);
1830 	slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1831 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1832 				  slabsize,
1833 				  NULL, NULL, NULL, NULL,
1834 				  UMA_ALIGN_PTR,
1835 				  UMA_ZFLAG_INTERNAL);
1836 
1837 	hashzone = uma_zcreate("UMA Hash",
1838 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1839 	    NULL, NULL, NULL, NULL,
1840 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1841 
1842 	bucket_init();
1843 
1844 	booted = UMA_STARTUP;
1845 
1846 #ifdef UMA_DEBUG
1847 	printf("UMA startup complete.\n");
1848 #endif
1849 }
1850 
1851 /* see uma.h */
1852 void
1853 uma_startup2(void)
1854 {
1855 	booted = UMA_STARTUP2;
1856 	bucket_enable();
1857 #ifdef UMA_DEBUG
1858 	printf("UMA startup2 complete.\n");
1859 #endif
1860 }
1861 
1862 /*
1863  * Initialize our callout handle
1864  *
1865  */
1866 
1867 static void
1868 uma_startup3(void)
1869 {
1870 #ifdef UMA_DEBUG
1871 	printf("Starting callout.\n");
1872 #endif
1873 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1874 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1875 #ifdef UMA_DEBUG
1876 	printf("UMA startup3 complete.\n");
1877 #endif
1878 }
1879 
1880 static uma_keg_t
1881 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1882 		int align, uint32_t flags)
1883 {
1884 	struct uma_kctor_args args;
1885 
1886 	args.size = size;
1887 	args.uminit = uminit;
1888 	args.fini = fini;
1889 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1890 	args.flags = flags;
1891 	args.zone = zone;
1892 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1893 }
1894 
1895 /* See uma.h */
1896 void
1897 uma_set_align(int align)
1898 {
1899 
1900 	if (align != UMA_ALIGN_CACHE)
1901 		uma_align_cache = align;
1902 }
1903 
1904 /* See uma.h */
1905 uma_zone_t
1906 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1907 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1908 
1909 {
1910 	struct uma_zctor_args args;
1911 
1912 	/* This stuff is essential for the zone ctor */
1913 	memset(&args, 0, sizeof(args));
1914 	args.name = name;
1915 	args.size = size;
1916 	args.ctor = ctor;
1917 	args.dtor = dtor;
1918 	args.uminit = uminit;
1919 	args.fini = fini;
1920 	args.align = align;
1921 	args.flags = flags;
1922 	args.keg = NULL;
1923 
1924 	return (zone_alloc_item(zones, &args, M_WAITOK));
1925 }
1926 
1927 /* See uma.h */
1928 uma_zone_t
1929 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1930 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1931 {
1932 	struct uma_zctor_args args;
1933 	uma_keg_t keg;
1934 
1935 	keg = zone_first_keg(master);
1936 	memset(&args, 0, sizeof(args));
1937 	args.name = name;
1938 	args.size = keg->uk_size;
1939 	args.ctor = ctor;
1940 	args.dtor = dtor;
1941 	args.uminit = zinit;
1942 	args.fini = zfini;
1943 	args.align = keg->uk_align;
1944 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1945 	args.keg = keg;
1946 
1947 	/* XXX Attaches only one keg of potentially many. */
1948 	return (zone_alloc_item(zones, &args, M_WAITOK));
1949 }
1950 
1951 /* See uma.h */
1952 uma_zone_t
1953 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1954 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1955 		    uma_release zrelease, void *arg, int flags)
1956 {
1957 	struct uma_zctor_args args;
1958 
1959 	memset(&args, 0, sizeof(args));
1960 	args.name = name;
1961 	args.size = size;
1962 	args.ctor = ctor;
1963 	args.dtor = dtor;
1964 	args.uminit = zinit;
1965 	args.fini = zfini;
1966 	args.import = zimport;
1967 	args.release = zrelease;
1968 	args.arg = arg;
1969 	args.align = 0;
1970 	args.flags = flags;
1971 
1972 	return (zone_alloc_item(zones, &args, M_WAITOK));
1973 }
1974 
1975 static void
1976 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1977 {
1978 	if (a < b) {
1979 		ZONE_LOCK(a);
1980 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1981 	} else {
1982 		ZONE_LOCK(b);
1983 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1984 	}
1985 }
1986 
1987 static void
1988 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1989 {
1990 
1991 	ZONE_UNLOCK(a);
1992 	ZONE_UNLOCK(b);
1993 }
1994 
1995 int
1996 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1997 {
1998 	uma_klink_t klink;
1999 	uma_klink_t kl;
2000 	int error;
2001 
2002 	error = 0;
2003 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2004 
2005 	zone_lock_pair(zone, master);
2006 	/*
2007 	 * zone must use vtoslab() to resolve objects and must already be
2008 	 * a secondary.
2009 	 */
2010 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2011 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2012 		error = EINVAL;
2013 		goto out;
2014 	}
2015 	/*
2016 	 * The new master must also use vtoslab().
2017 	 */
2018 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2019 		error = EINVAL;
2020 		goto out;
2021 	}
2022 	/*
2023 	 * Both must either be refcnt, or not be refcnt.
2024 	 */
2025 	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
2026 	    (master->uz_flags & UMA_ZONE_REFCNT)) {
2027 		error = EINVAL;
2028 		goto out;
2029 	}
2030 	/*
2031 	 * The underlying object must be the same size.  rsize
2032 	 * may be different.
2033 	 */
2034 	if (master->uz_size != zone->uz_size) {
2035 		error = E2BIG;
2036 		goto out;
2037 	}
2038 	/*
2039 	 * Put it at the end of the list.
2040 	 */
2041 	klink->kl_keg = zone_first_keg(master);
2042 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2043 		if (LIST_NEXT(kl, kl_link) == NULL) {
2044 			LIST_INSERT_AFTER(kl, klink, kl_link);
2045 			break;
2046 		}
2047 	}
2048 	klink = NULL;
2049 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2050 	zone->uz_slab = zone_fetch_slab_multi;
2051 
2052 out:
2053 	zone_unlock_pair(zone, master);
2054 	if (klink != NULL)
2055 		free(klink, M_TEMP);
2056 
2057 	return (error);
2058 }
2059 
2060 
2061 /* See uma.h */
2062 void
2063 uma_zdestroy(uma_zone_t zone)
2064 {
2065 
2066 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2067 }
2068 
2069 /* See uma.h */
2070 void *
2071 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2072 {
2073 	void *item;
2074 	uma_cache_t cache;
2075 	uma_bucket_t bucket;
2076 	int lockfail;
2077 	int cpu;
2078 
2079 	/* This is the fast path allocation */
2080 #ifdef UMA_DEBUG_ALLOC_1
2081 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2082 #endif
2083 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2084 	    zone->uz_name, flags);
2085 
2086 	if (flags & M_WAITOK) {
2087 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2088 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2089 	}
2090 #ifdef DEBUG_MEMGUARD
2091 	if (memguard_cmp_zone(zone)) {
2092 		item = memguard_alloc(zone->uz_size, flags);
2093 		if (item != NULL) {
2094 			/*
2095 			 * Avoid conflict with the use-after-free
2096 			 * protecting infrastructure from INVARIANTS.
2097 			 */
2098 			if (zone->uz_init != NULL &&
2099 			    zone->uz_init != mtrash_init &&
2100 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2101 				return (NULL);
2102 			if (zone->uz_ctor != NULL &&
2103 			    zone->uz_ctor != mtrash_ctor &&
2104 			    zone->uz_ctor(item, zone->uz_size, udata,
2105 			    flags) != 0) {
2106 			    	zone->uz_fini(item, zone->uz_size);
2107 				return (NULL);
2108 			}
2109 			return (item);
2110 		}
2111 		/* This is unfortunate but should not be fatal. */
2112 	}
2113 #endif
2114 	/*
2115 	 * If possible, allocate from the per-CPU cache.  There are two
2116 	 * requirements for safe access to the per-CPU cache: (1) the thread
2117 	 * accessing the cache must not be preempted or yield during access,
2118 	 * and (2) the thread must not migrate CPUs without switching which
2119 	 * cache it accesses.  We rely on a critical section to prevent
2120 	 * preemption and migration.  We release the critical section in
2121 	 * order to acquire the zone mutex if we are unable to allocate from
2122 	 * the current cache; when we re-acquire the critical section, we
2123 	 * must detect and handle migration if it has occurred.
2124 	 */
2125 	critical_enter();
2126 	cpu = curcpu;
2127 	cache = &zone->uz_cpu[cpu];
2128 
2129 zalloc_start:
2130 	bucket = cache->uc_allocbucket;
2131 	if (bucket != NULL && bucket->ub_cnt > 0) {
2132 		bucket->ub_cnt--;
2133 		item = bucket->ub_bucket[bucket->ub_cnt];
2134 #ifdef INVARIANTS
2135 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2136 #endif
2137 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2138 		cache->uc_allocs++;
2139 		critical_exit();
2140 		if (zone->uz_ctor != NULL &&
2141 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2142 			atomic_add_long(&zone->uz_fails, 1);
2143 			zone_free_item(zone, item, udata, SKIP_DTOR);
2144 			return (NULL);
2145 		}
2146 #ifdef INVARIANTS
2147 		uma_dbg_alloc(zone, NULL, item);
2148 #endif
2149 		if (flags & M_ZERO)
2150 			bzero(item, zone->uz_size);
2151 		return (item);
2152 	}
2153 
2154 	/*
2155 	 * We have run out of items in our alloc bucket.
2156 	 * See if we can switch with our free bucket.
2157 	 */
2158 	bucket = cache->uc_freebucket;
2159 	if (bucket != NULL && bucket->ub_cnt > 0) {
2160 #ifdef UMA_DEBUG_ALLOC
2161 		printf("uma_zalloc: Swapping empty with alloc.\n");
2162 #endif
2163 		cache->uc_freebucket = cache->uc_allocbucket;
2164 		cache->uc_allocbucket = bucket;
2165 		goto zalloc_start;
2166 	}
2167 
2168 	/*
2169 	 * Discard any empty allocation bucket while we hold no locks.
2170 	 */
2171 	bucket = cache->uc_allocbucket;
2172 	cache->uc_allocbucket = NULL;
2173 	critical_exit();
2174 	if (bucket != NULL)
2175 		bucket_free(zone, bucket, udata);
2176 
2177 	/* Short-circuit for zones without buckets and low memory. */
2178 	if (zone->uz_count == 0 || bucketdisable)
2179 		goto zalloc_item;
2180 
2181 	/*
2182 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2183 	 * we must go back to the zone.  This requires the zone lock, so we
2184 	 * must drop the critical section, then re-acquire it when we go back
2185 	 * to the cache.  Since the critical section is released, we may be
2186 	 * preempted or migrate.  As such, make sure not to maintain any
2187 	 * thread-local state specific to the cache from prior to releasing
2188 	 * the critical section.
2189 	 */
2190 	lockfail = 0;
2191 	if (ZONE_TRYLOCK(zone) == 0) {
2192 		/* Record contention to size the buckets. */
2193 		ZONE_LOCK(zone);
2194 		lockfail = 1;
2195 	}
2196 	critical_enter();
2197 	cpu = curcpu;
2198 	cache = &zone->uz_cpu[cpu];
2199 
2200 	/*
2201 	 * Since we have locked the zone we may as well send back our stats.
2202 	 */
2203 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2204 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2205 	cache->uc_allocs = 0;
2206 	cache->uc_frees = 0;
2207 
2208 	/* See if we lost the race to fill the cache. */
2209 	if (cache->uc_allocbucket != NULL) {
2210 		ZONE_UNLOCK(zone);
2211 		goto zalloc_start;
2212 	}
2213 
2214 	/*
2215 	 * Check the zone's cache of buckets.
2216 	 */
2217 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2218 		KASSERT(bucket->ub_cnt != 0,
2219 		    ("uma_zalloc_arg: Returning an empty bucket."));
2220 
2221 		LIST_REMOVE(bucket, ub_link);
2222 		cache->uc_allocbucket = bucket;
2223 		ZONE_UNLOCK(zone);
2224 		goto zalloc_start;
2225 	}
2226 	/* We are no longer associated with this CPU. */
2227 	critical_exit();
2228 
2229 	/*
2230 	 * We bump the uz count when the cache size is insufficient to
2231 	 * handle the working set.
2232 	 */
2233 	if (lockfail && zone->uz_count < BUCKET_MAX)
2234 		zone->uz_count++;
2235 	ZONE_UNLOCK(zone);
2236 
2237 	/*
2238 	 * Now lets just fill a bucket and put it on the free list.  If that
2239 	 * works we'll restart the allocation from the begining and it
2240 	 * will use the just filled bucket.
2241 	 */
2242 	bucket = zone_alloc_bucket(zone, udata, flags);
2243 	if (bucket != NULL) {
2244 		ZONE_LOCK(zone);
2245 		critical_enter();
2246 		cpu = curcpu;
2247 		cache = &zone->uz_cpu[cpu];
2248 		/*
2249 		 * See if we lost the race or were migrated.  Cache the
2250 		 * initialized bucket to make this less likely or claim
2251 		 * the memory directly.
2252 		 */
2253 		if (cache->uc_allocbucket == NULL)
2254 			cache->uc_allocbucket = bucket;
2255 		else
2256 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2257 		ZONE_UNLOCK(zone);
2258 		goto zalloc_start;
2259 	}
2260 
2261 	/*
2262 	 * We may not be able to get a bucket so return an actual item.
2263 	 */
2264 #ifdef UMA_DEBUG
2265 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2266 #endif
2267 
2268 zalloc_item:
2269 	item = zone_alloc_item(zone, udata, flags);
2270 
2271 	return (item);
2272 }
2273 
2274 static uma_slab_t
2275 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2276 {
2277 	uma_slab_t slab;
2278 	int reserve;
2279 
2280 	mtx_assert(&keg->uk_lock, MA_OWNED);
2281 	slab = NULL;
2282 	reserve = 0;
2283 	if ((flags & M_USE_RESERVE) == 0)
2284 		reserve = keg->uk_reserve;
2285 
2286 	for (;;) {
2287 		/*
2288 		 * Find a slab with some space.  Prefer slabs that are partially
2289 		 * used over those that are totally full.  This helps to reduce
2290 		 * fragmentation.
2291 		 */
2292 		if (keg->uk_free > reserve) {
2293 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2294 				slab = LIST_FIRST(&keg->uk_part_slab);
2295 			} else {
2296 				slab = LIST_FIRST(&keg->uk_free_slab);
2297 				LIST_REMOVE(slab, us_link);
2298 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2299 				    us_link);
2300 			}
2301 			MPASS(slab->us_keg == keg);
2302 			return (slab);
2303 		}
2304 
2305 		/*
2306 		 * M_NOVM means don't ask at all!
2307 		 */
2308 		if (flags & M_NOVM)
2309 			break;
2310 
2311 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2312 			keg->uk_flags |= UMA_ZFLAG_FULL;
2313 			/*
2314 			 * If this is not a multi-zone, set the FULL bit.
2315 			 * Otherwise slab_multi() takes care of it.
2316 			 */
2317 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2318 				zone->uz_flags |= UMA_ZFLAG_FULL;
2319 				zone_log_warning(zone);
2320 			}
2321 			if (flags & M_NOWAIT)
2322 				break;
2323 			zone->uz_sleeps++;
2324 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2325 			continue;
2326 		}
2327 		slab = keg_alloc_slab(keg, zone, flags);
2328 		/*
2329 		 * If we got a slab here it's safe to mark it partially used
2330 		 * and return.  We assume that the caller is going to remove
2331 		 * at least one item.
2332 		 */
2333 		if (slab) {
2334 			MPASS(slab->us_keg == keg);
2335 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2336 			return (slab);
2337 		}
2338 		/*
2339 		 * We might not have been able to get a slab but another cpu
2340 		 * could have while we were unlocked.  Check again before we
2341 		 * fail.
2342 		 */
2343 		flags |= M_NOVM;
2344 	}
2345 	return (slab);
2346 }
2347 
2348 static uma_slab_t
2349 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2350 {
2351 	uma_slab_t slab;
2352 
2353 	if (keg == NULL) {
2354 		keg = zone_first_keg(zone);
2355 		KEG_LOCK(keg);
2356 	}
2357 
2358 	for (;;) {
2359 		slab = keg_fetch_slab(keg, zone, flags);
2360 		if (slab)
2361 			return (slab);
2362 		if (flags & (M_NOWAIT | M_NOVM))
2363 			break;
2364 	}
2365 	KEG_UNLOCK(keg);
2366 	return (NULL);
2367 }
2368 
2369 /*
2370  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2371  * with the keg locked.  On NULL no lock is held.
2372  *
2373  * The last pointer is used to seed the search.  It is not required.
2374  */
2375 static uma_slab_t
2376 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2377 {
2378 	uma_klink_t klink;
2379 	uma_slab_t slab;
2380 	uma_keg_t keg;
2381 	int flags;
2382 	int empty;
2383 	int full;
2384 
2385 	/*
2386 	 * Don't wait on the first pass.  This will skip limit tests
2387 	 * as well.  We don't want to block if we can find a provider
2388 	 * without blocking.
2389 	 */
2390 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2391 	/*
2392 	 * Use the last slab allocated as a hint for where to start
2393 	 * the search.
2394 	 */
2395 	if (last != NULL) {
2396 		slab = keg_fetch_slab(last, zone, flags);
2397 		if (slab)
2398 			return (slab);
2399 		KEG_UNLOCK(last);
2400 	}
2401 	/*
2402 	 * Loop until we have a slab incase of transient failures
2403 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2404 	 * required but we've done it for so long now.
2405 	 */
2406 	for (;;) {
2407 		empty = 0;
2408 		full = 0;
2409 		/*
2410 		 * Search the available kegs for slabs.  Be careful to hold the
2411 		 * correct lock while calling into the keg layer.
2412 		 */
2413 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2414 			keg = klink->kl_keg;
2415 			KEG_LOCK(keg);
2416 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2417 				slab = keg_fetch_slab(keg, zone, flags);
2418 				if (slab)
2419 					return (slab);
2420 			}
2421 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2422 				full++;
2423 			else
2424 				empty++;
2425 			KEG_UNLOCK(keg);
2426 		}
2427 		if (rflags & (M_NOWAIT | M_NOVM))
2428 			break;
2429 		flags = rflags;
2430 		/*
2431 		 * All kegs are full.  XXX We can't atomically check all kegs
2432 		 * and sleep so just sleep for a short period and retry.
2433 		 */
2434 		if (full && !empty) {
2435 			ZONE_LOCK(zone);
2436 			zone->uz_flags |= UMA_ZFLAG_FULL;
2437 			zone->uz_sleeps++;
2438 			zone_log_warning(zone);
2439 			msleep(zone, zone->uz_lockptr, PVM,
2440 			    "zonelimit", hz/100);
2441 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2442 			ZONE_UNLOCK(zone);
2443 			continue;
2444 		}
2445 	}
2446 	return (NULL);
2447 }
2448 
2449 static void *
2450 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2451 {
2452 	void *item;
2453 	uint8_t freei;
2454 
2455 	MPASS(keg == slab->us_keg);
2456 	mtx_assert(&keg->uk_lock, MA_OWNED);
2457 
2458 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2459 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2460 	item = slab->us_data + (keg->uk_rsize * freei);
2461 	slab->us_freecount--;
2462 	keg->uk_free--;
2463 
2464 	/* Move this slab to the full list */
2465 	if (slab->us_freecount == 0) {
2466 		LIST_REMOVE(slab, us_link);
2467 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2468 	}
2469 
2470 	return (item);
2471 }
2472 
2473 static int
2474 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2475 {
2476 	uma_slab_t slab;
2477 	uma_keg_t keg;
2478 	int i;
2479 
2480 	slab = NULL;
2481 	keg = NULL;
2482 	/* Try to keep the buckets totally full */
2483 	for (i = 0; i < max; ) {
2484 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2485 			break;
2486 		keg = slab->us_keg;
2487 		while (slab->us_freecount && i < max) {
2488 			bucket[i++] = slab_alloc_item(keg, slab);
2489 			if (keg->uk_free <= keg->uk_reserve)
2490 				break;
2491 		}
2492 		/* Don't grab more than one slab at a time. */
2493 		flags &= ~M_WAITOK;
2494 		flags |= M_NOWAIT;
2495 	}
2496 	if (slab != NULL)
2497 		KEG_UNLOCK(keg);
2498 
2499 	return i;
2500 }
2501 
2502 static uma_bucket_t
2503 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2504 {
2505 	uma_bucket_t bucket;
2506 	int max;
2507 
2508 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2509 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2510 	if (bucket == NULL)
2511 		goto out;
2512 
2513 	max = MIN(bucket->ub_entries, zone->uz_count);
2514 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2515 	    max, flags);
2516 
2517 	/*
2518 	 * Initialize the memory if necessary.
2519 	 */
2520 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2521 		int i;
2522 
2523 		for (i = 0; i < bucket->ub_cnt; i++)
2524 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2525 			    flags) != 0)
2526 				break;
2527 		/*
2528 		 * If we couldn't initialize the whole bucket, put the
2529 		 * rest back onto the freelist.
2530 		 */
2531 		if (i != bucket->ub_cnt) {
2532 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2533 			    bucket->ub_cnt - i);
2534 #ifdef INVARIANTS
2535 			bzero(&bucket->ub_bucket[i],
2536 			    sizeof(void *) * (bucket->ub_cnt - i));
2537 #endif
2538 			bucket->ub_cnt = i;
2539 		}
2540 	}
2541 
2542 out:
2543 	if (bucket == NULL || bucket->ub_cnt == 0) {
2544 		if (bucket != NULL)
2545 			bucket_free(zone, bucket, udata);
2546 		atomic_add_long(&zone->uz_fails, 1);
2547 		return (NULL);
2548 	}
2549 
2550 	return (bucket);
2551 }
2552 
2553 /*
2554  * Allocates a single item from a zone.
2555  *
2556  * Arguments
2557  *	zone   The zone to alloc for.
2558  *	udata  The data to be passed to the constructor.
2559  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2560  *
2561  * Returns
2562  *	NULL if there is no memory and M_NOWAIT is set
2563  *	An item if successful
2564  */
2565 
2566 static void *
2567 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2568 {
2569 	void *item;
2570 
2571 	item = NULL;
2572 
2573 #ifdef UMA_DEBUG_ALLOC
2574 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2575 #endif
2576 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2577 		goto fail;
2578 	atomic_add_long(&zone->uz_allocs, 1);
2579 
2580 	/*
2581 	 * We have to call both the zone's init (not the keg's init)
2582 	 * and the zone's ctor.  This is because the item is going from
2583 	 * a keg slab directly to the user, and the user is expecting it
2584 	 * to be both zone-init'd as well as zone-ctor'd.
2585 	 */
2586 	if (zone->uz_init != NULL) {
2587 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2588 			zone_free_item(zone, item, udata, SKIP_FINI);
2589 			goto fail;
2590 		}
2591 	}
2592 	if (zone->uz_ctor != NULL) {
2593 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2594 			zone_free_item(zone, item, udata, SKIP_DTOR);
2595 			goto fail;
2596 		}
2597 	}
2598 #ifdef INVARIANTS
2599 	uma_dbg_alloc(zone, NULL, item);
2600 #endif
2601 	if (flags & M_ZERO)
2602 		bzero(item, zone->uz_size);
2603 
2604 	return (item);
2605 
2606 fail:
2607 	atomic_add_long(&zone->uz_fails, 1);
2608 	return (NULL);
2609 }
2610 
2611 /* See uma.h */
2612 void
2613 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2614 {
2615 	uma_cache_t cache;
2616 	uma_bucket_t bucket;
2617 	int lockfail;
2618 	int cpu;
2619 
2620 #ifdef UMA_DEBUG_ALLOC_1
2621 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2622 #endif
2623 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2624 	    zone->uz_name);
2625 
2626         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2627         if (item == NULL)
2628                 return;
2629 #ifdef DEBUG_MEMGUARD
2630 	if (is_memguard_addr(item)) {
2631 		if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2632 			zone->uz_dtor(item, zone->uz_size, udata);
2633 		if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2634 			zone->uz_fini(item, zone->uz_size);
2635 		memguard_free(item);
2636 		return;
2637 	}
2638 #endif
2639 #ifdef INVARIANTS
2640 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2641 		uma_dbg_free(zone, udata, item);
2642 	else
2643 		uma_dbg_free(zone, NULL, item);
2644 #endif
2645 	if (zone->uz_dtor != NULL)
2646 		zone->uz_dtor(item, zone->uz_size, udata);
2647 
2648 	/*
2649 	 * The race here is acceptable.  If we miss it we'll just have to wait
2650 	 * a little longer for the limits to be reset.
2651 	 */
2652 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2653 		goto zfree_item;
2654 
2655 	/*
2656 	 * If possible, free to the per-CPU cache.  There are two
2657 	 * requirements for safe access to the per-CPU cache: (1) the thread
2658 	 * accessing the cache must not be preempted or yield during access,
2659 	 * and (2) the thread must not migrate CPUs without switching which
2660 	 * cache it accesses.  We rely on a critical section to prevent
2661 	 * preemption and migration.  We release the critical section in
2662 	 * order to acquire the zone mutex if we are unable to free to the
2663 	 * current cache; when we re-acquire the critical section, we must
2664 	 * detect and handle migration if it has occurred.
2665 	 */
2666 zfree_restart:
2667 	critical_enter();
2668 	cpu = curcpu;
2669 	cache = &zone->uz_cpu[cpu];
2670 
2671 zfree_start:
2672 	/*
2673 	 * Try to free into the allocbucket first to give LIFO ordering
2674 	 * for cache-hot datastructures.  Spill over into the freebucket
2675 	 * if necessary.  Alloc will swap them if one runs dry.
2676 	 */
2677 	bucket = cache->uc_allocbucket;
2678 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2679 		bucket = cache->uc_freebucket;
2680 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2681 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2682 		    ("uma_zfree: Freeing to non free bucket index."));
2683 		bucket->ub_bucket[bucket->ub_cnt] = item;
2684 		bucket->ub_cnt++;
2685 		cache->uc_frees++;
2686 		critical_exit();
2687 		return;
2688 	}
2689 
2690 	/*
2691 	 * We must go back the zone, which requires acquiring the zone lock,
2692 	 * which in turn means we must release and re-acquire the critical
2693 	 * section.  Since the critical section is released, we may be
2694 	 * preempted or migrate.  As such, make sure not to maintain any
2695 	 * thread-local state specific to the cache from prior to releasing
2696 	 * the critical section.
2697 	 */
2698 	critical_exit();
2699 	if (zone->uz_count == 0 || bucketdisable)
2700 		goto zfree_item;
2701 
2702 	lockfail = 0;
2703 	if (ZONE_TRYLOCK(zone) == 0) {
2704 		/* Record contention to size the buckets. */
2705 		ZONE_LOCK(zone);
2706 		lockfail = 1;
2707 	}
2708 	critical_enter();
2709 	cpu = curcpu;
2710 	cache = &zone->uz_cpu[cpu];
2711 
2712 	/*
2713 	 * Since we have locked the zone we may as well send back our stats.
2714 	 */
2715 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2716 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2717 	cache->uc_allocs = 0;
2718 	cache->uc_frees = 0;
2719 
2720 	bucket = cache->uc_freebucket;
2721 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2722 		ZONE_UNLOCK(zone);
2723 		goto zfree_start;
2724 	}
2725 	cache->uc_freebucket = NULL;
2726 
2727 	/* Can we throw this on the zone full list? */
2728 	if (bucket != NULL) {
2729 #ifdef UMA_DEBUG_ALLOC
2730 		printf("uma_zfree: Putting old bucket on the free list.\n");
2731 #endif
2732 		/* ub_cnt is pointing to the last free item */
2733 		KASSERT(bucket->ub_cnt != 0,
2734 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2735 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2736 	}
2737 
2738 	/* We are no longer associated with this CPU. */
2739 	critical_exit();
2740 
2741 	/*
2742 	 * We bump the uz count when the cache size is insufficient to
2743 	 * handle the working set.
2744 	 */
2745 	if (lockfail && zone->uz_count < BUCKET_MAX)
2746 		zone->uz_count++;
2747 	ZONE_UNLOCK(zone);
2748 
2749 #ifdef UMA_DEBUG_ALLOC
2750 	printf("uma_zfree: Allocating new free bucket.\n");
2751 #endif
2752 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2753 	if (bucket) {
2754 		critical_enter();
2755 		cpu = curcpu;
2756 		cache = &zone->uz_cpu[cpu];
2757 		if (cache->uc_freebucket == NULL) {
2758 			cache->uc_freebucket = bucket;
2759 			goto zfree_start;
2760 		}
2761 		/*
2762 		 * We lost the race, start over.  We have to drop our
2763 		 * critical section to free the bucket.
2764 		 */
2765 		critical_exit();
2766 		bucket_free(zone, bucket, udata);
2767 		goto zfree_restart;
2768 	}
2769 
2770 	/*
2771 	 * If nothing else caught this, we'll just do an internal free.
2772 	 */
2773 zfree_item:
2774 	zone_free_item(zone, item, udata, SKIP_DTOR);
2775 
2776 	return;
2777 }
2778 
2779 static void
2780 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2781 {
2782 	uint8_t freei;
2783 
2784 	mtx_assert(&keg->uk_lock, MA_OWNED);
2785 	MPASS(keg == slab->us_keg);
2786 
2787 	/* Do we need to remove from any lists? */
2788 	if (slab->us_freecount+1 == keg->uk_ipers) {
2789 		LIST_REMOVE(slab, us_link);
2790 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2791 	} else if (slab->us_freecount == 0) {
2792 		LIST_REMOVE(slab, us_link);
2793 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2794 	}
2795 
2796 	/* Slab management. */
2797 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2798 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2799 	slab->us_freecount++;
2800 
2801 	/* Keg statistics. */
2802 	keg->uk_free++;
2803 }
2804 
2805 static void
2806 zone_release(uma_zone_t zone, void **bucket, int cnt)
2807 {
2808 	void *item;
2809 	uma_slab_t slab;
2810 	uma_keg_t keg;
2811 	uint8_t *mem;
2812 	int clearfull;
2813 	int i;
2814 
2815 	clearfull = 0;
2816 	keg = zone_first_keg(zone);
2817 	KEG_LOCK(keg);
2818 	for (i = 0; i < cnt; i++) {
2819 		item = bucket[i];
2820 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2821 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2822 			if (zone->uz_flags & UMA_ZONE_HASH) {
2823 				slab = hash_sfind(&keg->uk_hash, mem);
2824 			} else {
2825 				mem += keg->uk_pgoff;
2826 				slab = (uma_slab_t)mem;
2827 			}
2828 		} else {
2829 			slab = vtoslab((vm_offset_t)item);
2830 			if (slab->us_keg != keg) {
2831 				KEG_UNLOCK(keg);
2832 				keg = slab->us_keg;
2833 				KEG_LOCK(keg);
2834 			}
2835 		}
2836 		slab_free_item(keg, slab, item);
2837 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2838 			if (keg->uk_pages < keg->uk_maxpages) {
2839 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2840 				clearfull = 1;
2841 			}
2842 
2843 			/*
2844 			 * We can handle one more allocation. Since we're
2845 			 * clearing ZFLAG_FULL, wake up all procs blocked
2846 			 * on pages. This should be uncommon, so keeping this
2847 			 * simple for now (rather than adding count of blocked
2848 			 * threads etc).
2849 			 */
2850 			wakeup(keg);
2851 		}
2852 	}
2853 	KEG_UNLOCK(keg);
2854 	if (clearfull) {
2855 		ZONE_LOCK(zone);
2856 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2857 		wakeup(zone);
2858 		ZONE_UNLOCK(zone);
2859 	}
2860 
2861 }
2862 
2863 /*
2864  * Frees a single item to any zone.
2865  *
2866  * Arguments:
2867  *	zone   The zone to free to
2868  *	item   The item we're freeing
2869  *	udata  User supplied data for the dtor
2870  *	skip   Skip dtors and finis
2871  */
2872 static void
2873 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2874 {
2875 
2876 #ifdef INVARIANTS
2877 	if (skip == SKIP_NONE) {
2878 		if (zone->uz_flags & UMA_ZONE_MALLOC)
2879 			uma_dbg_free(zone, udata, item);
2880 		else
2881 			uma_dbg_free(zone, NULL, item);
2882 	}
2883 #endif
2884 	if (skip < SKIP_DTOR && zone->uz_dtor)
2885 		zone->uz_dtor(item, zone->uz_size, udata);
2886 
2887 	if (skip < SKIP_FINI && zone->uz_fini)
2888 		zone->uz_fini(item, zone->uz_size);
2889 
2890 	atomic_add_long(&zone->uz_frees, 1);
2891 	zone->uz_release(zone->uz_arg, &item, 1);
2892 }
2893 
2894 /* See uma.h */
2895 int
2896 uma_zone_set_max(uma_zone_t zone, int nitems)
2897 {
2898 	uma_keg_t keg;
2899 
2900 	keg = zone_first_keg(zone);
2901 	if (keg == NULL)
2902 		return (0);
2903 	KEG_LOCK(keg);
2904 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2905 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2906 		keg->uk_maxpages += keg->uk_ppera;
2907 	nitems = keg->uk_maxpages * keg->uk_ipers;
2908 	KEG_UNLOCK(keg);
2909 
2910 	return (nitems);
2911 }
2912 
2913 /* See uma.h */
2914 int
2915 uma_zone_get_max(uma_zone_t zone)
2916 {
2917 	int nitems;
2918 	uma_keg_t keg;
2919 
2920 	keg = zone_first_keg(zone);
2921 	if (keg == NULL)
2922 		return (0);
2923 	KEG_LOCK(keg);
2924 	nitems = keg->uk_maxpages * keg->uk_ipers;
2925 	KEG_UNLOCK(keg);
2926 
2927 	return (nitems);
2928 }
2929 
2930 /* See uma.h */
2931 void
2932 uma_zone_set_warning(uma_zone_t zone, const char *warning)
2933 {
2934 
2935 	ZONE_LOCK(zone);
2936 	zone->uz_warning = warning;
2937 	ZONE_UNLOCK(zone);
2938 }
2939 
2940 /* See uma.h */
2941 int
2942 uma_zone_get_cur(uma_zone_t zone)
2943 {
2944 	int64_t nitems;
2945 	u_int i;
2946 
2947 	ZONE_LOCK(zone);
2948 	nitems = zone->uz_allocs - zone->uz_frees;
2949 	CPU_FOREACH(i) {
2950 		/*
2951 		 * See the comment in sysctl_vm_zone_stats() regarding the
2952 		 * safety of accessing the per-cpu caches. With the zone lock
2953 		 * held, it is safe, but can potentially result in stale data.
2954 		 */
2955 		nitems += zone->uz_cpu[i].uc_allocs -
2956 		    zone->uz_cpu[i].uc_frees;
2957 	}
2958 	ZONE_UNLOCK(zone);
2959 
2960 	return (nitems < 0 ? 0 : nitems);
2961 }
2962 
2963 /* See uma.h */
2964 void
2965 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2966 {
2967 	uma_keg_t keg;
2968 
2969 	keg = zone_first_keg(zone);
2970 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2971 	KEG_LOCK(keg);
2972 	KASSERT(keg->uk_pages == 0,
2973 	    ("uma_zone_set_init on non-empty keg"));
2974 	keg->uk_init = uminit;
2975 	KEG_UNLOCK(keg);
2976 }
2977 
2978 /* See uma.h */
2979 void
2980 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2981 {
2982 	uma_keg_t keg;
2983 
2984 	keg = zone_first_keg(zone);
2985 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2986 	KEG_LOCK(keg);
2987 	KASSERT(keg->uk_pages == 0,
2988 	    ("uma_zone_set_fini on non-empty keg"));
2989 	keg->uk_fini = fini;
2990 	KEG_UNLOCK(keg);
2991 }
2992 
2993 /* See uma.h */
2994 void
2995 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2996 {
2997 
2998 	ZONE_LOCK(zone);
2999 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3000 	    ("uma_zone_set_zinit on non-empty keg"));
3001 	zone->uz_init = zinit;
3002 	ZONE_UNLOCK(zone);
3003 }
3004 
3005 /* See uma.h */
3006 void
3007 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3008 {
3009 
3010 	ZONE_LOCK(zone);
3011 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3012 	    ("uma_zone_set_zfini on non-empty keg"));
3013 	zone->uz_fini = zfini;
3014 	ZONE_UNLOCK(zone);
3015 }
3016 
3017 /* See uma.h */
3018 /* XXX uk_freef is not actually used with the zone locked */
3019 void
3020 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3021 {
3022 	uma_keg_t keg;
3023 
3024 	keg = zone_first_keg(zone);
3025 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3026 	KEG_LOCK(keg);
3027 	keg->uk_freef = freef;
3028 	KEG_UNLOCK(keg);
3029 }
3030 
3031 /* See uma.h */
3032 /* XXX uk_allocf is not actually used with the zone locked */
3033 void
3034 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3035 {
3036 	uma_keg_t keg;
3037 
3038 	keg = zone_first_keg(zone);
3039 	KEG_LOCK(keg);
3040 	keg->uk_allocf = allocf;
3041 	KEG_UNLOCK(keg);
3042 }
3043 
3044 /* See uma.h */
3045 void
3046 uma_zone_reserve(uma_zone_t zone, int items)
3047 {
3048 	uma_keg_t keg;
3049 
3050 	keg = zone_first_keg(zone);
3051 	if (keg == NULL)
3052 		return;
3053 	KEG_LOCK(keg);
3054 	keg->uk_reserve = items;
3055 	KEG_UNLOCK(keg);
3056 
3057 	return;
3058 }
3059 
3060 /* See uma.h */
3061 int
3062 uma_zone_reserve_kva(uma_zone_t zone, int count)
3063 {
3064 	uma_keg_t keg;
3065 	vm_offset_t kva;
3066 	int pages;
3067 
3068 	keg = zone_first_keg(zone);
3069 	if (keg == NULL)
3070 		return (0);
3071 	pages = count / keg->uk_ipers;
3072 
3073 	if (pages * keg->uk_ipers < count)
3074 		pages++;
3075 
3076 #ifdef UMA_MD_SMALL_ALLOC
3077 	if (keg->uk_ppera > 1) {
3078 #else
3079 	if (1) {
3080 #endif
3081 		kva = kva_alloc(pages * UMA_SLAB_SIZE);
3082 		if (kva == 0)
3083 			return (0);
3084 	} else
3085 		kva = 0;
3086 	KEG_LOCK(keg);
3087 	keg->uk_kva = kva;
3088 	keg->uk_offset = 0;
3089 	keg->uk_maxpages = pages;
3090 #ifdef UMA_MD_SMALL_ALLOC
3091 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3092 #else
3093 	keg->uk_allocf = noobj_alloc;
3094 #endif
3095 	keg->uk_flags |= UMA_ZONE_NOFREE;
3096 	KEG_UNLOCK(keg);
3097 
3098 	return (1);
3099 }
3100 
3101 /* See uma.h */
3102 void
3103 uma_prealloc(uma_zone_t zone, int items)
3104 {
3105 	int slabs;
3106 	uma_slab_t slab;
3107 	uma_keg_t keg;
3108 
3109 	keg = zone_first_keg(zone);
3110 	if (keg == NULL)
3111 		return;
3112 	KEG_LOCK(keg);
3113 	slabs = items / keg->uk_ipers;
3114 	if (slabs * keg->uk_ipers < items)
3115 		slabs++;
3116 	while (slabs > 0) {
3117 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3118 		if (slab == NULL)
3119 			break;
3120 		MPASS(slab->us_keg == keg);
3121 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3122 		slabs--;
3123 	}
3124 	KEG_UNLOCK(keg);
3125 }
3126 
3127 /* See uma.h */
3128 uint32_t *
3129 uma_find_refcnt(uma_zone_t zone, void *item)
3130 {
3131 	uma_slabrefcnt_t slabref;
3132 	uma_slab_t slab;
3133 	uma_keg_t keg;
3134 	uint32_t *refcnt;
3135 	int idx;
3136 
3137 	slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3138 	slabref = (uma_slabrefcnt_t)slab;
3139 	keg = slab->us_keg;
3140 	KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3141 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3142 	idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3143 	refcnt = &slabref->us_refcnt[idx];
3144 	return refcnt;
3145 }
3146 
3147 /* See uma.h */
3148 void
3149 uma_reclaim(void)
3150 {
3151 #ifdef UMA_DEBUG
3152 	printf("UMA: vm asked us to release pages!\n");
3153 #endif
3154 	bucket_enable();
3155 	zone_foreach(zone_drain);
3156 	if (vm_page_count_min()) {
3157 		cache_drain_safe(NULL);
3158 		zone_foreach(zone_drain);
3159 	}
3160 	/*
3161 	 * Some slabs may have been freed but this zone will be visited early
3162 	 * we visit again so that we can free pages that are empty once other
3163 	 * zones are drained.  We have to do the same for buckets.
3164 	 */
3165 	zone_drain(slabzone);
3166 	zone_drain(slabrefzone);
3167 	bucket_zone_drain();
3168 }
3169 
3170 /* See uma.h */
3171 int
3172 uma_zone_exhausted(uma_zone_t zone)
3173 {
3174 	int full;
3175 
3176 	ZONE_LOCK(zone);
3177 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3178 	ZONE_UNLOCK(zone);
3179 	return (full);
3180 }
3181 
3182 int
3183 uma_zone_exhausted_nolock(uma_zone_t zone)
3184 {
3185 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3186 }
3187 
3188 void *
3189 uma_large_malloc(int size, int wait)
3190 {
3191 	void *mem;
3192 	uma_slab_t slab;
3193 	uint8_t flags;
3194 
3195 	slab = zone_alloc_item(slabzone, NULL, wait);
3196 	if (slab == NULL)
3197 		return (NULL);
3198 	mem = page_alloc(NULL, size, &flags, wait);
3199 	if (mem) {
3200 		vsetslab((vm_offset_t)mem, slab);
3201 		slab->us_data = mem;
3202 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3203 		slab->us_size = size;
3204 	} else {
3205 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3206 	}
3207 
3208 	return (mem);
3209 }
3210 
3211 void
3212 uma_large_free(uma_slab_t slab)
3213 {
3214 
3215 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3216 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3217 }
3218 
3219 void
3220 uma_print_stats(void)
3221 {
3222 	zone_foreach(uma_print_zone);
3223 }
3224 
3225 static void
3226 slab_print(uma_slab_t slab)
3227 {
3228 	printf("slab: keg %p, data %p, freecount %d\n",
3229 		slab->us_keg, slab->us_data, slab->us_freecount);
3230 }
3231 
3232 static void
3233 cache_print(uma_cache_t cache)
3234 {
3235 	printf("alloc: %p(%d), free: %p(%d)\n",
3236 		cache->uc_allocbucket,
3237 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3238 		cache->uc_freebucket,
3239 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3240 }
3241 
3242 static void
3243 uma_print_keg(uma_keg_t keg)
3244 {
3245 	uma_slab_t slab;
3246 
3247 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3248 	    "out %d free %d limit %d\n",
3249 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3250 	    keg->uk_ipers, keg->uk_ppera,
3251 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3252 	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3253 	printf("Part slabs:\n");
3254 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3255 		slab_print(slab);
3256 	printf("Free slabs:\n");
3257 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3258 		slab_print(slab);
3259 	printf("Full slabs:\n");
3260 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3261 		slab_print(slab);
3262 }
3263 
3264 void
3265 uma_print_zone(uma_zone_t zone)
3266 {
3267 	uma_cache_t cache;
3268 	uma_klink_t kl;
3269 	int i;
3270 
3271 	printf("zone: %s(%p) size %d flags %#x\n",
3272 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3273 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3274 		uma_print_keg(kl->kl_keg);
3275 	CPU_FOREACH(i) {
3276 		cache = &zone->uz_cpu[i];
3277 		printf("CPU %d Cache:\n", i);
3278 		cache_print(cache);
3279 	}
3280 }
3281 
3282 #ifdef DDB
3283 /*
3284  * Generate statistics across both the zone and its per-cpu cache's.  Return
3285  * desired statistics if the pointer is non-NULL for that statistic.
3286  *
3287  * Note: does not update the zone statistics, as it can't safely clear the
3288  * per-CPU cache statistic.
3289  *
3290  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3291  * safe from off-CPU; we should modify the caches to track this information
3292  * directly so that we don't have to.
3293  */
3294 static void
3295 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3296     uint64_t *freesp, uint64_t *sleepsp)
3297 {
3298 	uma_cache_t cache;
3299 	uint64_t allocs, frees, sleeps;
3300 	int cachefree, cpu;
3301 
3302 	allocs = frees = sleeps = 0;
3303 	cachefree = 0;
3304 	CPU_FOREACH(cpu) {
3305 		cache = &z->uz_cpu[cpu];
3306 		if (cache->uc_allocbucket != NULL)
3307 			cachefree += cache->uc_allocbucket->ub_cnt;
3308 		if (cache->uc_freebucket != NULL)
3309 			cachefree += cache->uc_freebucket->ub_cnt;
3310 		allocs += cache->uc_allocs;
3311 		frees += cache->uc_frees;
3312 	}
3313 	allocs += z->uz_allocs;
3314 	frees += z->uz_frees;
3315 	sleeps += z->uz_sleeps;
3316 	if (cachefreep != NULL)
3317 		*cachefreep = cachefree;
3318 	if (allocsp != NULL)
3319 		*allocsp = allocs;
3320 	if (freesp != NULL)
3321 		*freesp = frees;
3322 	if (sleepsp != NULL)
3323 		*sleepsp = sleeps;
3324 }
3325 #endif /* DDB */
3326 
3327 static int
3328 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3329 {
3330 	uma_keg_t kz;
3331 	uma_zone_t z;
3332 	int count;
3333 
3334 	count = 0;
3335 	mtx_lock(&uma_mtx);
3336 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3337 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3338 			count++;
3339 	}
3340 	mtx_unlock(&uma_mtx);
3341 	return (sysctl_handle_int(oidp, &count, 0, req));
3342 }
3343 
3344 static int
3345 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3346 {
3347 	struct uma_stream_header ush;
3348 	struct uma_type_header uth;
3349 	struct uma_percpu_stat ups;
3350 	uma_bucket_t bucket;
3351 	struct sbuf sbuf;
3352 	uma_cache_t cache;
3353 	uma_klink_t kl;
3354 	uma_keg_t kz;
3355 	uma_zone_t z;
3356 	uma_keg_t k;
3357 	int count, error, i;
3358 
3359 	error = sysctl_wire_old_buffer(req, 0);
3360 	if (error != 0)
3361 		return (error);
3362 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3363 
3364 	count = 0;
3365 	mtx_lock(&uma_mtx);
3366 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3367 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3368 			count++;
3369 	}
3370 
3371 	/*
3372 	 * Insert stream header.
3373 	 */
3374 	bzero(&ush, sizeof(ush));
3375 	ush.ush_version = UMA_STREAM_VERSION;
3376 	ush.ush_maxcpus = (mp_maxid + 1);
3377 	ush.ush_count = count;
3378 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3379 
3380 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3381 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3382 			bzero(&uth, sizeof(uth));
3383 			ZONE_LOCK(z);
3384 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3385 			uth.uth_align = kz->uk_align;
3386 			uth.uth_size = kz->uk_size;
3387 			uth.uth_rsize = kz->uk_rsize;
3388 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3389 				k = kl->kl_keg;
3390 				uth.uth_maxpages += k->uk_maxpages;
3391 				uth.uth_pages += k->uk_pages;
3392 				uth.uth_keg_free += k->uk_free;
3393 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3394 				    * k->uk_ipers;
3395 			}
3396 
3397 			/*
3398 			 * A zone is secondary is it is not the first entry
3399 			 * on the keg's zone list.
3400 			 */
3401 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3402 			    (LIST_FIRST(&kz->uk_zones) != z))
3403 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3404 
3405 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3406 				uth.uth_zone_free += bucket->ub_cnt;
3407 			uth.uth_allocs = z->uz_allocs;
3408 			uth.uth_frees = z->uz_frees;
3409 			uth.uth_fails = z->uz_fails;
3410 			uth.uth_sleeps = z->uz_sleeps;
3411 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3412 			/*
3413 			 * While it is not normally safe to access the cache
3414 			 * bucket pointers while not on the CPU that owns the
3415 			 * cache, we only allow the pointers to be exchanged
3416 			 * without the zone lock held, not invalidated, so
3417 			 * accept the possible race associated with bucket
3418 			 * exchange during monitoring.
3419 			 */
3420 			for (i = 0; i < (mp_maxid + 1); i++) {
3421 				bzero(&ups, sizeof(ups));
3422 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3423 					goto skip;
3424 				if (CPU_ABSENT(i))
3425 					goto skip;
3426 				cache = &z->uz_cpu[i];
3427 				if (cache->uc_allocbucket != NULL)
3428 					ups.ups_cache_free +=
3429 					    cache->uc_allocbucket->ub_cnt;
3430 				if (cache->uc_freebucket != NULL)
3431 					ups.ups_cache_free +=
3432 					    cache->uc_freebucket->ub_cnt;
3433 				ups.ups_allocs = cache->uc_allocs;
3434 				ups.ups_frees = cache->uc_frees;
3435 skip:
3436 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3437 			}
3438 			ZONE_UNLOCK(z);
3439 		}
3440 	}
3441 	mtx_unlock(&uma_mtx);
3442 	error = sbuf_finish(&sbuf);
3443 	sbuf_delete(&sbuf);
3444 	return (error);
3445 }
3446 
3447 #ifdef DDB
3448 DB_SHOW_COMMAND(uma, db_show_uma)
3449 {
3450 	uint64_t allocs, frees, sleeps;
3451 	uma_bucket_t bucket;
3452 	uma_keg_t kz;
3453 	uma_zone_t z;
3454 	int cachefree;
3455 
3456 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3457 	    "Requests", "Sleeps");
3458 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3459 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3460 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3461 				allocs = z->uz_allocs;
3462 				frees = z->uz_frees;
3463 				sleeps = z->uz_sleeps;
3464 				cachefree = 0;
3465 			} else
3466 				uma_zone_sumstat(z, &cachefree, &allocs,
3467 				    &frees, &sleeps);
3468 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3469 			    (LIST_FIRST(&kz->uk_zones) != z)))
3470 				cachefree += kz->uk_free;
3471 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3472 				cachefree += bucket->ub_cnt;
3473 			db_printf("%18s %8ju %8jd %8d %12ju %8ju\n", z->uz_name,
3474 			    (uintmax_t)kz->uk_size,
3475 			    (intmax_t)(allocs - frees), cachefree,
3476 			    (uintmax_t)allocs, sleeps);
3477 			if (db_pager_quit)
3478 				return;
3479 		}
3480 	}
3481 }
3482 #endif
3483