xref: /freebsd/sys/vm/uma_core.c (revision 3a92d97ff0f22d21608e1c19b83104c4937523b6)
1 /*-
2  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
3  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4  * Copyright (c) 2004-2006 Robert N. M. Watson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * uma_core.c  Implementation of the Universal Memory allocator
31  *
32  * This allocator is intended to replace the multitude of similar object caches
33  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
34  * effecient.  A primary design goal is to return unused memory to the rest of
35  * the system.  This will make the system as a whole more flexible due to the
36  * ability to move memory to subsystems which most need it instead of leaving
37  * pools of reserved memory unused.
38  *
39  * The basic ideas stem from similar slab/zone based allocators whose algorithms
40  * are well known.
41  *
42  */
43 
44 /*
45  * TODO:
46  *	- Improve memory usage for large allocations
47  *	- Investigate cache size adjustments
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /* I should really use ktr.. */
54 /*
55 #define UMA_DEBUG 1
56 #define UMA_DEBUG_ALLOC 1
57 #define UMA_DEBUG_ALLOC_1 1
58 */
59 
60 #include "opt_ddb.h"
61 #include "opt_param.h"
62 #include "opt_vm.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/bitset.h>
67 #include <sys/kernel.h>
68 #include <sys/types.h>
69 #include <sys/queue.h>
70 #include <sys/malloc.h>
71 #include <sys/ktr.h>
72 #include <sys/lock.h>
73 #include <sys/sysctl.h>
74 #include <sys/mutex.h>
75 #include <sys/proc.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/sched.h>
79 #include <sys/smp.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_pageout.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_extern.h>
90 #include <vm/uma.h>
91 #include <vm/uma_int.h>
92 #include <vm/uma_dbg.h>
93 
94 #include <ddb/ddb.h>
95 
96 #ifdef DEBUG_MEMGUARD
97 #include <vm/memguard.h>
98 #endif
99 
100 /*
101  * This is the zone and keg from which all zones are spawned.  The idea is that
102  * even the zone & keg heads are allocated from the allocator, so we use the
103  * bss section to bootstrap us.
104  */
105 static struct uma_keg masterkeg;
106 static struct uma_zone masterzone_k;
107 static struct uma_zone masterzone_z;
108 static uma_zone_t kegs = &masterzone_k;
109 static uma_zone_t zones = &masterzone_z;
110 
111 /* This is the zone from which all of uma_slab_t's are allocated. */
112 static uma_zone_t slabzone;
113 static uma_zone_t slabrefzone;	/* With refcounters (for UMA_ZONE_REFCNT) */
114 
115 /*
116  * The initial hash tables come out of this zone so they can be allocated
117  * prior to malloc coming up.
118  */
119 static uma_zone_t hashzone;
120 
121 /* The boot-time adjusted value for cache line alignment. */
122 int uma_align_cache = 64 - 1;
123 
124 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
125 
126 /*
127  * Are we allowed to allocate buckets?
128  */
129 static int bucketdisable = 1;
130 
131 /* Linked list of all kegs in the system */
132 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
133 
134 /* Linked list of all cache-only zones in the system */
135 static LIST_HEAD(,uma_zone) uma_cachezones =
136     LIST_HEAD_INITIALIZER(uma_cachezones);
137 
138 /* This mutex protects the keg list */
139 static struct mtx_padalign uma_mtx;
140 
141 /* Linked list of boot time pages */
142 static LIST_HEAD(,uma_slab) uma_boot_pages =
143     LIST_HEAD_INITIALIZER(uma_boot_pages);
144 
145 /* This mutex protects the boot time pages list */
146 static struct mtx_padalign uma_boot_pages_mtx;
147 
148 /* Is the VM done starting up? */
149 static int booted = 0;
150 #define	UMA_STARTUP	1
151 #define	UMA_STARTUP2	2
152 
153 /*
154  * Only mbuf clusters use ref zones.  Just provide enough references
155  * to support the one user.  New code should not use the ref facility.
156  */
157 static const u_int uma_max_ipers_ref = PAGE_SIZE / MCLBYTES;
158 
159 /*
160  * This is the handle used to schedule events that need to happen
161  * outside of the allocation fast path.
162  */
163 static struct callout uma_callout;
164 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
165 
166 /*
167  * This structure is passed as the zone ctor arg so that I don't have to create
168  * a special allocation function just for zones.
169  */
170 struct uma_zctor_args {
171 	const char *name;
172 	size_t size;
173 	uma_ctor ctor;
174 	uma_dtor dtor;
175 	uma_init uminit;
176 	uma_fini fini;
177 	uma_import import;
178 	uma_release release;
179 	void *arg;
180 	uma_keg_t keg;
181 	int align;
182 	uint32_t flags;
183 };
184 
185 struct uma_kctor_args {
186 	uma_zone_t zone;
187 	size_t size;
188 	uma_init uminit;
189 	uma_fini fini;
190 	int align;
191 	uint32_t flags;
192 };
193 
194 struct uma_bucket_zone {
195 	uma_zone_t	ubz_zone;
196 	char		*ubz_name;
197 	int		ubz_entries;	/* Number of items it can hold. */
198 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
199 };
200 
201 /*
202  * Compute the actual number of bucket entries to pack them in power
203  * of two sizes for more efficient space utilization.
204  */
205 #define	BUCKET_SIZE(n)						\
206     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
207 
208 #define	BUCKET_MAX	BUCKET_SIZE(128)
209 
210 struct uma_bucket_zone bucket_zones[] = {
211 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
212 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
213 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
214 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
215 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
216 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
217 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
218 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
219 	{ NULL, NULL, 0}
220 };
221 
222 /*
223  * Flags and enumerations to be passed to internal functions.
224  */
225 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
226 
227 /* Prototypes.. */
228 
229 static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
230 static void *page_alloc(uma_zone_t, int, uint8_t *, int);
231 static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
232 static void page_free(void *, int, uint8_t);
233 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
234 static void cache_drain(uma_zone_t);
235 static void bucket_drain(uma_zone_t, uma_bucket_t);
236 static void bucket_cache_drain(uma_zone_t zone);
237 static int keg_ctor(void *, int, void *, int);
238 static void keg_dtor(void *, int, void *);
239 static int zone_ctor(void *, int, void *, int);
240 static void zone_dtor(void *, int, void *);
241 static int zero_init(void *, int, int);
242 static void keg_small_init(uma_keg_t keg);
243 static void keg_large_init(uma_keg_t keg);
244 static void zone_foreach(void (*zfunc)(uma_zone_t));
245 static void zone_timeout(uma_zone_t zone);
246 static int hash_alloc(struct uma_hash *);
247 static int hash_expand(struct uma_hash *, struct uma_hash *);
248 static void hash_free(struct uma_hash *hash);
249 static void uma_timeout(void *);
250 static void uma_startup3(void);
251 static void *zone_alloc_item(uma_zone_t, void *, int);
252 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
253 static void bucket_enable(void);
254 static void bucket_init(void);
255 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
256 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
257 static void bucket_zone_drain(void);
258 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
259 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
260 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
261 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
262 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
263 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
264     uma_fini fini, int align, uint32_t flags);
265 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
266 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
267 static void uma_zero_item(void *item, uma_zone_t zone);
268 
269 void uma_print_zone(uma_zone_t);
270 void uma_print_stats(void);
271 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
272 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
273 
274 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
275 
276 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
277     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
278 
279 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
280     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
281 
282 static int zone_warnings = 1;
283 TUNABLE_INT("vm.zone_warnings", &zone_warnings);
284 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RW, &zone_warnings, 0,
285     "Warn when UMA zones becomes full");
286 
287 /*
288  * This routine checks to see whether or not it's safe to enable buckets.
289  */
290 static void
291 bucket_enable(void)
292 {
293 	bucketdisable = vm_page_count_min();
294 }
295 
296 /*
297  * Initialize bucket_zones, the array of zones of buckets of various sizes.
298  *
299  * For each zone, calculate the memory required for each bucket, consisting
300  * of the header and an array of pointers.
301  */
302 static void
303 bucket_init(void)
304 {
305 	struct uma_bucket_zone *ubz;
306 	int size;
307 	int i;
308 
309 	for (i = 0, ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
310 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
311 		size += sizeof(void *) * ubz->ubz_entries;
312 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
313 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
314 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
315 	}
316 }
317 
318 /*
319  * Given a desired number of entries for a bucket, return the zone from which
320  * to allocate the bucket.
321  */
322 static struct uma_bucket_zone *
323 bucket_zone_lookup(int entries)
324 {
325 	struct uma_bucket_zone *ubz;
326 
327 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
328 		if (ubz->ubz_entries >= entries)
329 			return (ubz);
330 	ubz--;
331 	return (ubz);
332 }
333 
334 static int
335 bucket_select(int size)
336 {
337 	struct uma_bucket_zone *ubz;
338 
339 	ubz = &bucket_zones[0];
340 	if (size > ubz->ubz_maxsize)
341 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
342 
343 	for (; ubz->ubz_entries != 0; ubz++)
344 		if (ubz->ubz_maxsize < size)
345 			break;
346 	ubz--;
347 	return (ubz->ubz_entries);
348 }
349 
350 static uma_bucket_t
351 bucket_alloc(uma_zone_t zone, void *udata, int flags)
352 {
353 	struct uma_bucket_zone *ubz;
354 	uma_bucket_t bucket;
355 
356 	/*
357 	 * This is to stop us from allocating per cpu buckets while we're
358 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
359 	 * boot pages.  This also prevents us from allocating buckets in
360 	 * low memory situations.
361 	 */
362 	if (bucketdisable)
363 		return (NULL);
364 	/*
365 	 * To limit bucket recursion we store the original zone flags
366 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
367 	 * NOVM flag to persist even through deep recursions.  We also
368 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
369 	 * a bucket for a bucket zone so we do not allow infinite bucket
370 	 * recursion.  This cookie will even persist to frees of unused
371 	 * buckets via the allocation path or bucket allocations in the
372 	 * free path.
373 	 */
374 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
375 		udata = (void *)(uintptr_t)zone->uz_flags;
376 	else {
377 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
378 			return (NULL);
379 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
380 	}
381 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
382 		flags |= M_NOVM;
383 	ubz = bucket_zone_lookup(zone->uz_count);
384 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
385 	if (bucket) {
386 #ifdef INVARIANTS
387 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
388 #endif
389 		bucket->ub_cnt = 0;
390 		bucket->ub_entries = ubz->ubz_entries;
391 	}
392 
393 	return (bucket);
394 }
395 
396 static void
397 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
398 {
399 	struct uma_bucket_zone *ubz;
400 
401 	KASSERT(bucket->ub_cnt == 0,
402 	    ("bucket_free: Freeing a non free bucket."));
403 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
404 		udata = (void *)(uintptr_t)zone->uz_flags;
405 	ubz = bucket_zone_lookup(bucket->ub_entries);
406 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
407 }
408 
409 static void
410 bucket_zone_drain(void)
411 {
412 	struct uma_bucket_zone *ubz;
413 
414 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
415 		zone_drain(ubz->ubz_zone);
416 }
417 
418 static void
419 zone_log_warning(uma_zone_t zone)
420 {
421 	static const struct timeval warninterval = { 300, 0 };
422 
423 	if (!zone_warnings || zone->uz_warning == NULL)
424 		return;
425 
426 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
427 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
428 }
429 
430 static void
431 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
432 {
433 	uma_klink_t klink;
434 
435 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
436 		kegfn(klink->kl_keg);
437 }
438 
439 /*
440  * Routine called by timeout which is used to fire off some time interval
441  * based calculations.  (stats, hash size, etc.)
442  *
443  * Arguments:
444  *	arg   Unused
445  *
446  * Returns:
447  *	Nothing
448  */
449 static void
450 uma_timeout(void *unused)
451 {
452 	bucket_enable();
453 	zone_foreach(zone_timeout);
454 
455 	/* Reschedule this event */
456 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
457 }
458 
459 /*
460  * Routine to perform timeout driven calculations.  This expands the
461  * hashes and does per cpu statistics aggregation.
462  *
463  *  Returns nothing.
464  */
465 static void
466 keg_timeout(uma_keg_t keg)
467 {
468 
469 	KEG_LOCK(keg);
470 	/*
471 	 * Expand the keg hash table.
472 	 *
473 	 * This is done if the number of slabs is larger than the hash size.
474 	 * What I'm trying to do here is completely reduce collisions.  This
475 	 * may be a little aggressive.  Should I allow for two collisions max?
476 	 */
477 	if (keg->uk_flags & UMA_ZONE_HASH &&
478 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
479 		struct uma_hash newhash;
480 		struct uma_hash oldhash;
481 		int ret;
482 
483 		/*
484 		 * This is so involved because allocating and freeing
485 		 * while the keg lock is held will lead to deadlock.
486 		 * I have to do everything in stages and check for
487 		 * races.
488 		 */
489 		newhash = keg->uk_hash;
490 		KEG_UNLOCK(keg);
491 		ret = hash_alloc(&newhash);
492 		KEG_LOCK(keg);
493 		if (ret) {
494 			if (hash_expand(&keg->uk_hash, &newhash)) {
495 				oldhash = keg->uk_hash;
496 				keg->uk_hash = newhash;
497 			} else
498 				oldhash = newhash;
499 
500 			KEG_UNLOCK(keg);
501 			hash_free(&oldhash);
502 			return;
503 		}
504 	}
505 	KEG_UNLOCK(keg);
506 }
507 
508 static void
509 zone_timeout(uma_zone_t zone)
510 {
511 
512 	zone_foreach_keg(zone, &keg_timeout);
513 }
514 
515 /*
516  * Allocate and zero fill the next sized hash table from the appropriate
517  * backing store.
518  *
519  * Arguments:
520  *	hash  A new hash structure with the old hash size in uh_hashsize
521  *
522  * Returns:
523  *	1 on sucess and 0 on failure.
524  */
525 static int
526 hash_alloc(struct uma_hash *hash)
527 {
528 	int oldsize;
529 	int alloc;
530 
531 	oldsize = hash->uh_hashsize;
532 
533 	/* We're just going to go to a power of two greater */
534 	if (oldsize)  {
535 		hash->uh_hashsize = oldsize * 2;
536 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
537 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
538 		    M_UMAHASH, M_NOWAIT);
539 	} else {
540 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
541 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
542 		    M_WAITOK);
543 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
544 	}
545 	if (hash->uh_slab_hash) {
546 		bzero(hash->uh_slab_hash, alloc);
547 		hash->uh_hashmask = hash->uh_hashsize - 1;
548 		return (1);
549 	}
550 
551 	return (0);
552 }
553 
554 /*
555  * Expands the hash table for HASH zones.  This is done from zone_timeout
556  * to reduce collisions.  This must not be done in the regular allocation
557  * path, otherwise, we can recurse on the vm while allocating pages.
558  *
559  * Arguments:
560  *	oldhash  The hash you want to expand
561  *	newhash  The hash structure for the new table
562  *
563  * Returns:
564  *	Nothing
565  *
566  * Discussion:
567  */
568 static int
569 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
570 {
571 	uma_slab_t slab;
572 	int hval;
573 	int i;
574 
575 	if (!newhash->uh_slab_hash)
576 		return (0);
577 
578 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
579 		return (0);
580 
581 	/*
582 	 * I need to investigate hash algorithms for resizing without a
583 	 * full rehash.
584 	 */
585 
586 	for (i = 0; i < oldhash->uh_hashsize; i++)
587 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
588 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
589 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
590 			hval = UMA_HASH(newhash, slab->us_data);
591 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
592 			    slab, us_hlink);
593 		}
594 
595 	return (1);
596 }
597 
598 /*
599  * Free the hash bucket to the appropriate backing store.
600  *
601  * Arguments:
602  *	slab_hash  The hash bucket we're freeing
603  *	hashsize   The number of entries in that hash bucket
604  *
605  * Returns:
606  *	Nothing
607  */
608 static void
609 hash_free(struct uma_hash *hash)
610 {
611 	if (hash->uh_slab_hash == NULL)
612 		return;
613 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
614 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
615 	else
616 		free(hash->uh_slab_hash, M_UMAHASH);
617 }
618 
619 /*
620  * Frees all outstanding items in a bucket
621  *
622  * Arguments:
623  *	zone   The zone to free to, must be unlocked.
624  *	bucket The free/alloc bucket with items, cpu queue must be locked.
625  *
626  * Returns:
627  *	Nothing
628  */
629 
630 static void
631 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
632 {
633 	int i;
634 
635 	if (bucket == NULL)
636 		return;
637 
638 	if (zone->uz_fini)
639 		for (i = 0; i < bucket->ub_cnt; i++)
640 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
641 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
642 	bucket->ub_cnt = 0;
643 }
644 
645 /*
646  * Drains the per cpu caches for a zone.
647  *
648  * NOTE: This may only be called while the zone is being turn down, and not
649  * during normal operation.  This is necessary in order that we do not have
650  * to migrate CPUs to drain the per-CPU caches.
651  *
652  * Arguments:
653  *	zone     The zone to drain, must be unlocked.
654  *
655  * Returns:
656  *	Nothing
657  */
658 static void
659 cache_drain(uma_zone_t zone)
660 {
661 	uma_cache_t cache;
662 	int cpu;
663 
664 	/*
665 	 * XXX: It is safe to not lock the per-CPU caches, because we're
666 	 * tearing down the zone anyway.  I.e., there will be no further use
667 	 * of the caches at this point.
668 	 *
669 	 * XXX: It would good to be able to assert that the zone is being
670 	 * torn down to prevent improper use of cache_drain().
671 	 *
672 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
673 	 * it is used elsewhere.  Should the tear-down path be made special
674 	 * there in some form?
675 	 */
676 	CPU_FOREACH(cpu) {
677 		cache = &zone->uz_cpu[cpu];
678 		bucket_drain(zone, cache->uc_allocbucket);
679 		bucket_drain(zone, cache->uc_freebucket);
680 		if (cache->uc_allocbucket != NULL)
681 			bucket_free(zone, cache->uc_allocbucket, NULL);
682 		if (cache->uc_freebucket != NULL)
683 			bucket_free(zone, cache->uc_freebucket, NULL);
684 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
685 	}
686 	ZONE_LOCK(zone);
687 	bucket_cache_drain(zone);
688 	ZONE_UNLOCK(zone);
689 }
690 
691 static void
692 cache_shrink(uma_zone_t zone)
693 {
694 
695 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
696 		return;
697 
698 	ZONE_LOCK(zone);
699 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
700 	ZONE_UNLOCK(zone);
701 }
702 
703 static void
704 cache_drain_safe_cpu(uma_zone_t zone)
705 {
706 	uma_cache_t cache;
707 	uma_bucket_t b1, b2;
708 
709 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
710 		return;
711 
712 	b1 = b2 = NULL;
713 	ZONE_LOCK(zone);
714 	critical_enter();
715 	cache = &zone->uz_cpu[curcpu];
716 	if (cache->uc_allocbucket) {
717 		if (cache->uc_allocbucket->ub_cnt != 0)
718 			LIST_INSERT_HEAD(&zone->uz_buckets,
719 			    cache->uc_allocbucket, ub_link);
720 		else
721 			b1 = cache->uc_allocbucket;
722 		cache->uc_allocbucket = NULL;
723 	}
724 	if (cache->uc_freebucket) {
725 		if (cache->uc_freebucket->ub_cnt != 0)
726 			LIST_INSERT_HEAD(&zone->uz_buckets,
727 			    cache->uc_freebucket, ub_link);
728 		else
729 			b2 = cache->uc_freebucket;
730 		cache->uc_freebucket = NULL;
731 	}
732 	critical_exit();
733 	ZONE_UNLOCK(zone);
734 	if (b1)
735 		bucket_free(zone, b1, NULL);
736 	if (b2)
737 		bucket_free(zone, b2, NULL);
738 }
739 
740 /*
741  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
742  * This is an expensive call because it needs to bind to all CPUs
743  * one by one and enter a critical section on each of them in order
744  * to safely access their cache buckets.
745  * Zone lock must not be held on call this function.
746  */
747 static void
748 cache_drain_safe(uma_zone_t zone)
749 {
750 	int cpu;
751 
752 	/*
753 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
754 	 */
755 	if (zone)
756 		cache_shrink(zone);
757 	else
758 		zone_foreach(cache_shrink);
759 
760 	CPU_FOREACH(cpu) {
761 		thread_lock(curthread);
762 		sched_bind(curthread, cpu);
763 		thread_unlock(curthread);
764 
765 		if (zone)
766 			cache_drain_safe_cpu(zone);
767 		else
768 			zone_foreach(cache_drain_safe_cpu);
769 	}
770 	thread_lock(curthread);
771 	sched_unbind(curthread);
772 	thread_unlock(curthread);
773 }
774 
775 /*
776  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
777  */
778 static void
779 bucket_cache_drain(uma_zone_t zone)
780 {
781 	uma_bucket_t bucket;
782 
783 	/*
784 	 * Drain the bucket queues and free the buckets, we just keep two per
785 	 * cpu (alloc/free).
786 	 */
787 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
788 		LIST_REMOVE(bucket, ub_link);
789 		ZONE_UNLOCK(zone);
790 		bucket_drain(zone, bucket);
791 		bucket_free(zone, bucket, NULL);
792 		ZONE_LOCK(zone);
793 	}
794 
795 	/*
796 	 * Shrink further bucket sizes.  Price of single zone lock collision
797 	 * is probably lower then price of global cache drain.
798 	 */
799 	if (zone->uz_count > zone->uz_count_min)
800 		zone->uz_count--;
801 }
802 
803 static void
804 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
805 {
806 	uint8_t *mem;
807 	int i;
808 	uint8_t flags;
809 
810 	mem = slab->us_data;
811 	flags = slab->us_flags;
812 	i = start;
813 	if (keg->uk_fini != NULL) {
814 		for (i--; i > -1; i--)
815 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
816 			    keg->uk_size);
817 	}
818 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
819 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
820 #ifdef UMA_DEBUG
821 	printf("%s: Returning %d bytes.\n", keg->uk_name,
822 	    PAGE_SIZE * keg->uk_ppera);
823 #endif
824 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
825 }
826 
827 /*
828  * Frees pages from a keg back to the system.  This is done on demand from
829  * the pageout daemon.
830  *
831  * Returns nothing.
832  */
833 static void
834 keg_drain(uma_keg_t keg)
835 {
836 	struct slabhead freeslabs = { 0 };
837 	uma_slab_t slab;
838 	uma_slab_t n;
839 
840 	/*
841 	 * We don't want to take pages from statically allocated kegs at this
842 	 * time
843 	 */
844 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
845 		return;
846 
847 #ifdef UMA_DEBUG
848 	printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
849 #endif
850 	KEG_LOCK(keg);
851 	if (keg->uk_free == 0)
852 		goto finished;
853 
854 	slab = LIST_FIRST(&keg->uk_free_slab);
855 	while (slab) {
856 		n = LIST_NEXT(slab, us_link);
857 
858 		/* We have no where to free these to */
859 		if (slab->us_flags & UMA_SLAB_BOOT) {
860 			slab = n;
861 			continue;
862 		}
863 
864 		LIST_REMOVE(slab, us_link);
865 		keg->uk_pages -= keg->uk_ppera;
866 		keg->uk_free -= keg->uk_ipers;
867 
868 		if (keg->uk_flags & UMA_ZONE_HASH)
869 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
870 
871 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
872 
873 		slab = n;
874 	}
875 finished:
876 	KEG_UNLOCK(keg);
877 
878 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
879 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
880 		keg_free_slab(keg, slab, keg->uk_ipers);
881 	}
882 }
883 
884 static void
885 zone_drain_wait(uma_zone_t zone, int waitok)
886 {
887 
888 	/*
889 	 * Set draining to interlock with zone_dtor() so we can release our
890 	 * locks as we go.  Only dtor() should do a WAITOK call since it
891 	 * is the only call that knows the structure will still be available
892 	 * when it wakes up.
893 	 */
894 	ZONE_LOCK(zone);
895 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
896 		if (waitok == M_NOWAIT)
897 			goto out;
898 		mtx_unlock(&uma_mtx);
899 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
900 		mtx_lock(&uma_mtx);
901 	}
902 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
903 	bucket_cache_drain(zone);
904 	ZONE_UNLOCK(zone);
905 	/*
906 	 * The DRAINING flag protects us from being freed while
907 	 * we're running.  Normally the uma_mtx would protect us but we
908 	 * must be able to release and acquire the right lock for each keg.
909 	 */
910 	zone_foreach_keg(zone, &keg_drain);
911 	ZONE_LOCK(zone);
912 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
913 	wakeup(zone);
914 out:
915 	ZONE_UNLOCK(zone);
916 }
917 
918 void
919 zone_drain(uma_zone_t zone)
920 {
921 
922 	zone_drain_wait(zone, M_NOWAIT);
923 }
924 
925 /*
926  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
927  *
928  * Arguments:
929  *	wait  Shall we wait?
930  *
931  * Returns:
932  *	The slab that was allocated or NULL if there is no memory and the
933  *	caller specified M_NOWAIT.
934  */
935 static uma_slab_t
936 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
937 {
938 	uma_slabrefcnt_t slabref;
939 	uma_alloc allocf;
940 	uma_slab_t slab;
941 	uint8_t *mem;
942 	uint8_t flags;
943 	int i;
944 
945 	mtx_assert(&keg->uk_lock, MA_OWNED);
946 	slab = NULL;
947 	mem = NULL;
948 
949 #ifdef UMA_DEBUG
950 	printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
951 #endif
952 	allocf = keg->uk_allocf;
953 	KEG_UNLOCK(keg);
954 
955 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
956 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
957 		if (slab == NULL)
958 			goto out;
959 	}
960 
961 	/*
962 	 * This reproduces the old vm_zone behavior of zero filling pages the
963 	 * first time they are added to a zone.
964 	 *
965 	 * Malloced items are zeroed in uma_zalloc.
966 	 */
967 
968 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
969 		wait |= M_ZERO;
970 	else
971 		wait &= ~M_ZERO;
972 
973 	if (keg->uk_flags & UMA_ZONE_NODUMP)
974 		wait |= M_NODUMP;
975 
976 	/* zone is passed for legacy reasons. */
977 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
978 	if (mem == NULL) {
979 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
980 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
981 		slab = NULL;
982 		goto out;
983 	}
984 
985 	/* Point the slab into the allocated memory */
986 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
987 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
988 
989 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
990 		for (i = 0; i < keg->uk_ppera; i++)
991 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
992 
993 	slab->us_keg = keg;
994 	slab->us_data = mem;
995 	slab->us_freecount = keg->uk_ipers;
996 	slab->us_flags = flags;
997 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
998 #ifdef INVARIANTS
999 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1000 #endif
1001 	if (keg->uk_flags & UMA_ZONE_REFCNT) {
1002 		slabref = (uma_slabrefcnt_t)slab;
1003 		for (i = 0; i < keg->uk_ipers; i++)
1004 			slabref->us_refcnt[i] = 0;
1005 	}
1006 
1007 	if (keg->uk_init != NULL) {
1008 		for (i = 0; i < keg->uk_ipers; i++)
1009 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1010 			    keg->uk_size, wait) != 0)
1011 				break;
1012 		if (i != keg->uk_ipers) {
1013 			keg_free_slab(keg, slab, i);
1014 			slab = NULL;
1015 			goto out;
1016 		}
1017 	}
1018 out:
1019 	KEG_LOCK(keg);
1020 
1021 	if (slab != NULL) {
1022 		if (keg->uk_flags & UMA_ZONE_HASH)
1023 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1024 
1025 		keg->uk_pages += keg->uk_ppera;
1026 		keg->uk_free += keg->uk_ipers;
1027 	}
1028 
1029 	return (slab);
1030 }
1031 
1032 /*
1033  * This function is intended to be used early on in place of page_alloc() so
1034  * that we may use the boot time page cache to satisfy allocations before
1035  * the VM is ready.
1036  */
1037 static void *
1038 startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1039 {
1040 	uma_keg_t keg;
1041 	uma_slab_t tmps;
1042 	int pages, check_pages;
1043 
1044 	keg = zone_first_keg(zone);
1045 	pages = howmany(bytes, PAGE_SIZE);
1046 	check_pages = pages - 1;
1047 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1048 
1049 	/*
1050 	 * Check our small startup cache to see if it has pages remaining.
1051 	 */
1052 	mtx_lock(&uma_boot_pages_mtx);
1053 
1054 	/* First check if we have enough room. */
1055 	tmps = LIST_FIRST(&uma_boot_pages);
1056 	while (tmps != NULL && check_pages-- > 0)
1057 		tmps = LIST_NEXT(tmps, us_link);
1058 	if (tmps != NULL) {
1059 		/*
1060 		 * It's ok to lose tmps references.  The last one will
1061 		 * have tmps->us_data pointing to the start address of
1062 		 * "pages" contiguous pages of memory.
1063 		 */
1064 		while (pages-- > 0) {
1065 			tmps = LIST_FIRST(&uma_boot_pages);
1066 			LIST_REMOVE(tmps, us_link);
1067 		}
1068 		mtx_unlock(&uma_boot_pages_mtx);
1069 		*pflag = tmps->us_flags;
1070 		return (tmps->us_data);
1071 	}
1072 	mtx_unlock(&uma_boot_pages_mtx);
1073 	if (booted < UMA_STARTUP2)
1074 		panic("UMA: Increase vm.boot_pages");
1075 	/*
1076 	 * Now that we've booted reset these users to their real allocator.
1077 	 */
1078 #ifdef UMA_MD_SMALL_ALLOC
1079 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1080 #else
1081 	keg->uk_allocf = page_alloc;
1082 #endif
1083 	return keg->uk_allocf(zone, bytes, pflag, wait);
1084 }
1085 
1086 /*
1087  * Allocates a number of pages from the system
1088  *
1089  * Arguments:
1090  *	bytes  The number of bytes requested
1091  *	wait  Shall we wait?
1092  *
1093  * Returns:
1094  *	A pointer to the alloced memory or possibly
1095  *	NULL if M_NOWAIT is set.
1096  */
1097 static void *
1098 page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
1099 {
1100 	void *p;	/* Returned page */
1101 
1102 	*pflag = UMA_SLAB_KMEM;
1103 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1104 
1105 	return (p);
1106 }
1107 
1108 /*
1109  * Allocates a number of pages from within an object
1110  *
1111  * Arguments:
1112  *	bytes  The number of bytes requested
1113  *	wait   Shall we wait?
1114  *
1115  * Returns:
1116  *	A pointer to the alloced memory or possibly
1117  *	NULL if M_NOWAIT is set.
1118  */
1119 static void *
1120 noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
1121 {
1122 	TAILQ_HEAD(, vm_page) alloctail;
1123 	u_long npages;
1124 	vm_offset_t retkva, zkva;
1125 	vm_page_t p, p_next;
1126 	uma_keg_t keg;
1127 
1128 	TAILQ_INIT(&alloctail);
1129 	keg = zone_first_keg(zone);
1130 
1131 	npages = howmany(bytes, PAGE_SIZE);
1132 	while (npages > 0) {
1133 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1134 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1135 		if (p != NULL) {
1136 			/*
1137 			 * Since the page does not belong to an object, its
1138 			 * listq is unused.
1139 			 */
1140 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1141 			npages--;
1142 			continue;
1143 		}
1144 		if (wait & M_WAITOK) {
1145 			VM_WAIT;
1146 			continue;
1147 		}
1148 
1149 		/*
1150 		 * Page allocation failed, free intermediate pages and
1151 		 * exit.
1152 		 */
1153 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1154 			vm_page_unwire(p, 0);
1155 			vm_page_free(p);
1156 		}
1157 		return (NULL);
1158 	}
1159 	*flags = UMA_SLAB_PRIV;
1160 	zkva = keg->uk_kva +
1161 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1162 	retkva = zkva;
1163 	TAILQ_FOREACH(p, &alloctail, listq) {
1164 		pmap_qenter(zkva, &p, 1);
1165 		zkva += PAGE_SIZE;
1166 	}
1167 
1168 	return ((void *)retkva);
1169 }
1170 
1171 /*
1172  * Frees a number of pages to the system
1173  *
1174  * Arguments:
1175  *	mem   A pointer to the memory to be freed
1176  *	size  The size of the memory being freed
1177  *	flags The original p->us_flags field
1178  *
1179  * Returns:
1180  *	Nothing
1181  */
1182 static void
1183 page_free(void *mem, int size, uint8_t flags)
1184 {
1185 	struct vmem *vmem;
1186 
1187 	if (flags & UMA_SLAB_KMEM)
1188 		vmem = kmem_arena;
1189 	else if (flags & UMA_SLAB_KERNEL)
1190 		vmem = kernel_arena;
1191 	else
1192 		panic("UMA: page_free used with invalid flags %d", flags);
1193 
1194 	kmem_free(vmem, (vm_offset_t)mem, size);
1195 }
1196 
1197 /*
1198  * Zero fill initializer
1199  *
1200  * Arguments/Returns follow uma_init specifications
1201  */
1202 static int
1203 zero_init(void *mem, int size, int flags)
1204 {
1205 	bzero(mem, size);
1206 	return (0);
1207 }
1208 
1209 /*
1210  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1211  *
1212  * Arguments
1213  *	keg  The zone we should initialize
1214  *
1215  * Returns
1216  *	Nothing
1217  */
1218 static void
1219 keg_small_init(uma_keg_t keg)
1220 {
1221 	u_int rsize;
1222 	u_int memused;
1223 	u_int wastedspace;
1224 	u_int shsize;
1225 
1226 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1227 		u_int ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1228 
1229 		keg->uk_slabsize = sizeof(struct pcpu);
1230 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1231 		    PAGE_SIZE);
1232 	} else {
1233 		keg->uk_slabsize = UMA_SLAB_SIZE;
1234 		keg->uk_ppera = 1;
1235 	}
1236 
1237 	/*
1238 	 * Calculate the size of each allocation (rsize) according to
1239 	 * alignment.  If the requested size is smaller than we have
1240 	 * allocation bits for we round it up.
1241 	 */
1242 	rsize = keg->uk_size;
1243 	if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
1244 		rsize = keg->uk_slabsize / SLAB_SETSIZE;
1245 	if (rsize & keg->uk_align)
1246 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1247 	keg->uk_rsize = rsize;
1248 
1249 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1250 	    keg->uk_rsize < sizeof(struct pcpu),
1251 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1252 
1253 	if (keg->uk_flags & UMA_ZONE_REFCNT)
1254 		rsize += sizeof(uint32_t);
1255 
1256 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1257 		shsize = 0;
1258 	else
1259 		shsize = sizeof(struct uma_slab);
1260 
1261 	keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
1262 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1263 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1264 
1265 	memused = keg->uk_ipers * rsize + shsize;
1266 	wastedspace = keg->uk_slabsize - memused;
1267 
1268 	/*
1269 	 * We can't do OFFPAGE if we're internal or if we've been
1270 	 * asked to not go to the VM for buckets.  If we do this we
1271 	 * may end up going to the VM  for slabs which we do not
1272 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1273 	 * of UMA_ZONE_VM, which clearly forbids it.
1274 	 */
1275 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1276 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1277 		return;
1278 
1279 	/*
1280 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1281 	 * this if it permits more items per-slab.
1282 	 *
1283 	 * XXX We could try growing slabsize to limit max waste as well.
1284 	 * Historically this was not done because the VM could not
1285 	 * efficiently handle contiguous allocations.
1286 	 */
1287 	if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
1288 	    (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
1289 		keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
1290 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1291 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1292 #ifdef UMA_DEBUG
1293 		printf("UMA decided we need offpage slab headers for "
1294 		    "keg: %s, calculated wastedspace = %d, "
1295 		    "maximum wasted space allowed = %d, "
1296 		    "calculated ipers = %d, "
1297 		    "new wasted space = %d\n", keg->uk_name, wastedspace,
1298 		    keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1299 		    keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
1300 #endif
1301 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1302 	}
1303 
1304 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1305 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1306 		keg->uk_flags |= UMA_ZONE_HASH;
1307 }
1308 
1309 /*
1310  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1311  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1312  * more complicated.
1313  *
1314  * Arguments
1315  *	keg  The keg we should initialize
1316  *
1317  * Returns
1318  *	Nothing
1319  */
1320 static void
1321 keg_large_init(uma_keg_t keg)
1322 {
1323 	u_int shsize;
1324 
1325 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1326 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1327 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1328 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1329 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1330 
1331 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1332 	keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
1333 	keg->uk_ipers = 1;
1334 	keg->uk_rsize = keg->uk_size;
1335 
1336 	/* We can't do OFFPAGE if we're internal, bail out here. */
1337 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1338 		return;
1339 
1340 	/* Check whether we have enough space to not do OFFPAGE. */
1341 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1342 		shsize = sizeof(struct uma_slab);
1343 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1344 			shsize += keg->uk_ipers * sizeof(uint32_t);
1345 		if (shsize & UMA_ALIGN_PTR)
1346 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1347 			    (UMA_ALIGN_PTR + 1);
1348 
1349 		if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1350 			keg->uk_flags |= UMA_ZONE_OFFPAGE;
1351 	}
1352 
1353 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1354 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1355 		keg->uk_flags |= UMA_ZONE_HASH;
1356 }
1357 
1358 static void
1359 keg_cachespread_init(uma_keg_t keg)
1360 {
1361 	int alignsize;
1362 	int trailer;
1363 	int pages;
1364 	int rsize;
1365 
1366 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1367 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1368 
1369 	alignsize = keg->uk_align + 1;
1370 	rsize = keg->uk_size;
1371 	/*
1372 	 * We want one item to start on every align boundary in a page.  To
1373 	 * do this we will span pages.  We will also extend the item by the
1374 	 * size of align if it is an even multiple of align.  Otherwise, it
1375 	 * would fall on the same boundary every time.
1376 	 */
1377 	if (rsize & keg->uk_align)
1378 		rsize = (rsize & ~keg->uk_align) + alignsize;
1379 	if ((rsize & alignsize) == 0)
1380 		rsize += alignsize;
1381 	trailer = rsize - keg->uk_size;
1382 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1383 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1384 	keg->uk_rsize = rsize;
1385 	keg->uk_ppera = pages;
1386 	keg->uk_slabsize = UMA_SLAB_SIZE;
1387 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1388 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1389 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1390 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1391 	    keg->uk_ipers));
1392 }
1393 
1394 /*
1395  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1396  * the keg onto the global keg list.
1397  *
1398  * Arguments/Returns follow uma_ctor specifications
1399  *	udata  Actually uma_kctor_args
1400  */
1401 static int
1402 keg_ctor(void *mem, int size, void *udata, int flags)
1403 {
1404 	struct uma_kctor_args *arg = udata;
1405 	uma_keg_t keg = mem;
1406 	uma_zone_t zone;
1407 
1408 	bzero(keg, size);
1409 	keg->uk_size = arg->size;
1410 	keg->uk_init = arg->uminit;
1411 	keg->uk_fini = arg->fini;
1412 	keg->uk_align = arg->align;
1413 	keg->uk_free = 0;
1414 	keg->uk_reserve = 0;
1415 	keg->uk_pages = 0;
1416 	keg->uk_flags = arg->flags;
1417 	keg->uk_allocf = page_alloc;
1418 	keg->uk_freef = page_free;
1419 	keg->uk_slabzone = NULL;
1420 
1421 	/*
1422 	 * The master zone is passed to us at keg-creation time.
1423 	 */
1424 	zone = arg->zone;
1425 	keg->uk_name = zone->uz_name;
1426 
1427 	if (arg->flags & UMA_ZONE_VM)
1428 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1429 
1430 	if (arg->flags & UMA_ZONE_ZINIT)
1431 		keg->uk_init = zero_init;
1432 
1433 	if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC)
1434 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1435 
1436 	if (arg->flags & UMA_ZONE_PCPU)
1437 #ifdef SMP
1438 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1439 #else
1440 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1441 #endif
1442 
1443 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1444 		keg_cachespread_init(keg);
1445 	} else if (keg->uk_flags & UMA_ZONE_REFCNT) {
1446 		if (keg->uk_size >
1447 		    (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) -
1448 		    sizeof(uint32_t)))
1449 			keg_large_init(keg);
1450 		else
1451 			keg_small_init(keg);
1452 	} else {
1453 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1454 			keg_large_init(keg);
1455 		else
1456 			keg_small_init(keg);
1457 	}
1458 
1459 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1460 		if (keg->uk_flags & UMA_ZONE_REFCNT) {
1461 			if (keg->uk_ipers > uma_max_ipers_ref)
1462 				panic("Too many ref items per zone: %d > %d\n",
1463 				    keg->uk_ipers, uma_max_ipers_ref);
1464 			keg->uk_slabzone = slabrefzone;
1465 		} else
1466 			keg->uk_slabzone = slabzone;
1467 	}
1468 
1469 	/*
1470 	 * If we haven't booted yet we need allocations to go through the
1471 	 * startup cache until the vm is ready.
1472 	 */
1473 	if (keg->uk_ppera == 1) {
1474 #ifdef UMA_MD_SMALL_ALLOC
1475 		keg->uk_allocf = uma_small_alloc;
1476 		keg->uk_freef = uma_small_free;
1477 
1478 		if (booted < UMA_STARTUP)
1479 			keg->uk_allocf = startup_alloc;
1480 #else
1481 		if (booted < UMA_STARTUP2)
1482 			keg->uk_allocf = startup_alloc;
1483 #endif
1484 	} else if (booted < UMA_STARTUP2 &&
1485 	    (keg->uk_flags & UMA_ZFLAG_INTERNAL))
1486 		keg->uk_allocf = startup_alloc;
1487 
1488 	/*
1489 	 * Initialize keg's lock
1490 	 */
1491 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1492 
1493 	/*
1494 	 * If we're putting the slab header in the actual page we need to
1495 	 * figure out where in each page it goes.  This calculates a right
1496 	 * justified offset into the memory on an ALIGN_PTR boundary.
1497 	 */
1498 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1499 		u_int totsize;
1500 
1501 		/* Size of the slab struct and free list */
1502 		totsize = sizeof(struct uma_slab);
1503 
1504 		/* Size of the reference counts. */
1505 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1506 			totsize += keg->uk_ipers * sizeof(uint32_t);
1507 
1508 		if (totsize & UMA_ALIGN_PTR)
1509 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1510 			    (UMA_ALIGN_PTR + 1);
1511 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1512 
1513 		/*
1514 		 * The only way the following is possible is if with our
1515 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1516 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1517 		 * mathematically possible for all cases, so we make
1518 		 * sure here anyway.
1519 		 */
1520 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1521 		if (keg->uk_flags & UMA_ZONE_REFCNT)
1522 			totsize += keg->uk_ipers * sizeof(uint32_t);
1523 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1524 			printf("zone %s ipers %d rsize %d size %d\n",
1525 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1526 			    keg->uk_size);
1527 			panic("UMA slab won't fit.");
1528 		}
1529 	}
1530 
1531 	if (keg->uk_flags & UMA_ZONE_HASH)
1532 		hash_alloc(&keg->uk_hash);
1533 
1534 #ifdef UMA_DEBUG
1535 	printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
1536 	    zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1537 	    keg->uk_ipers, keg->uk_ppera,
1538 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
1539 #endif
1540 
1541 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1542 
1543 	mtx_lock(&uma_mtx);
1544 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1545 	mtx_unlock(&uma_mtx);
1546 	return (0);
1547 }
1548 
1549 /*
1550  * Zone header ctor.  This initializes all fields, locks, etc.
1551  *
1552  * Arguments/Returns follow uma_ctor specifications
1553  *	udata  Actually uma_zctor_args
1554  */
1555 static int
1556 zone_ctor(void *mem, int size, void *udata, int flags)
1557 {
1558 	struct uma_zctor_args *arg = udata;
1559 	uma_zone_t zone = mem;
1560 	uma_zone_t z;
1561 	uma_keg_t keg;
1562 
1563 	bzero(zone, size);
1564 	zone->uz_name = arg->name;
1565 	zone->uz_ctor = arg->ctor;
1566 	zone->uz_dtor = arg->dtor;
1567 	zone->uz_slab = zone_fetch_slab;
1568 	zone->uz_init = NULL;
1569 	zone->uz_fini = NULL;
1570 	zone->uz_allocs = 0;
1571 	zone->uz_frees = 0;
1572 	zone->uz_fails = 0;
1573 	zone->uz_sleeps = 0;
1574 	zone->uz_count = 0;
1575 	zone->uz_count_min = 0;
1576 	zone->uz_flags = 0;
1577 	zone->uz_warning = NULL;
1578 	timevalclear(&zone->uz_ratecheck);
1579 	keg = arg->keg;
1580 
1581 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1582 
1583 	/*
1584 	 * This is a pure cache zone, no kegs.
1585 	 */
1586 	if (arg->import) {
1587 		if (arg->flags & UMA_ZONE_VM)
1588 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1589 		zone->uz_flags = arg->flags;
1590 		zone->uz_size = arg->size;
1591 		zone->uz_import = arg->import;
1592 		zone->uz_release = arg->release;
1593 		zone->uz_arg = arg->arg;
1594 		zone->uz_lockptr = &zone->uz_lock;
1595 		mtx_lock(&uma_mtx);
1596 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1597 		mtx_unlock(&uma_mtx);
1598 		goto out;
1599 	}
1600 
1601 	/*
1602 	 * Use the regular zone/keg/slab allocator.
1603 	 */
1604 	zone->uz_import = (uma_import)zone_import;
1605 	zone->uz_release = (uma_release)zone_release;
1606 	zone->uz_arg = zone;
1607 
1608 	if (arg->flags & UMA_ZONE_SECONDARY) {
1609 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1610 		zone->uz_init = arg->uminit;
1611 		zone->uz_fini = arg->fini;
1612 		zone->uz_lockptr = &keg->uk_lock;
1613 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1614 		mtx_lock(&uma_mtx);
1615 		ZONE_LOCK(zone);
1616 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1617 			if (LIST_NEXT(z, uz_link) == NULL) {
1618 				LIST_INSERT_AFTER(z, zone, uz_link);
1619 				break;
1620 			}
1621 		}
1622 		ZONE_UNLOCK(zone);
1623 		mtx_unlock(&uma_mtx);
1624 	} else if (keg == NULL) {
1625 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1626 		    arg->align, arg->flags)) == NULL)
1627 			return (ENOMEM);
1628 	} else {
1629 		struct uma_kctor_args karg;
1630 		int error;
1631 
1632 		/* We should only be here from uma_startup() */
1633 		karg.size = arg->size;
1634 		karg.uminit = arg->uminit;
1635 		karg.fini = arg->fini;
1636 		karg.align = arg->align;
1637 		karg.flags = arg->flags;
1638 		karg.zone = zone;
1639 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1640 		    flags);
1641 		if (error)
1642 			return (error);
1643 	}
1644 
1645 	/*
1646 	 * Link in the first keg.
1647 	 */
1648 	zone->uz_klink.kl_keg = keg;
1649 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1650 	zone->uz_lockptr = &keg->uk_lock;
1651 	zone->uz_size = keg->uk_size;
1652 	zone->uz_flags |= (keg->uk_flags &
1653 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1654 
1655 	/*
1656 	 * Some internal zones don't have room allocated for the per cpu
1657 	 * caches.  If we're internal, bail out here.
1658 	 */
1659 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1660 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1661 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1662 		return (0);
1663 	}
1664 
1665 out:
1666 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1667 		zone->uz_count = bucket_select(zone->uz_size);
1668 	else
1669 		zone->uz_count = BUCKET_MAX;
1670 	zone->uz_count_min = zone->uz_count;
1671 
1672 	return (0);
1673 }
1674 
1675 /*
1676  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1677  * table and removes the keg from the global list.
1678  *
1679  * Arguments/Returns follow uma_dtor specifications
1680  *	udata  unused
1681  */
1682 static void
1683 keg_dtor(void *arg, int size, void *udata)
1684 {
1685 	uma_keg_t keg;
1686 
1687 	keg = (uma_keg_t)arg;
1688 	KEG_LOCK(keg);
1689 	if (keg->uk_free != 0) {
1690 		printf("Freed UMA keg (%s) was not empty (%d items). "
1691 		    " Lost %d pages of memory.\n",
1692 		    keg->uk_name ? keg->uk_name : "",
1693 		    keg->uk_free, keg->uk_pages);
1694 	}
1695 	KEG_UNLOCK(keg);
1696 
1697 	hash_free(&keg->uk_hash);
1698 
1699 	KEG_LOCK_FINI(keg);
1700 }
1701 
1702 /*
1703  * Zone header dtor.
1704  *
1705  * Arguments/Returns follow uma_dtor specifications
1706  *	udata  unused
1707  */
1708 static void
1709 zone_dtor(void *arg, int size, void *udata)
1710 {
1711 	uma_klink_t klink;
1712 	uma_zone_t zone;
1713 	uma_keg_t keg;
1714 
1715 	zone = (uma_zone_t)arg;
1716 	keg = zone_first_keg(zone);
1717 
1718 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1719 		cache_drain(zone);
1720 
1721 	mtx_lock(&uma_mtx);
1722 	LIST_REMOVE(zone, uz_link);
1723 	mtx_unlock(&uma_mtx);
1724 	/*
1725 	 * XXX there are some races here where
1726 	 * the zone can be drained but zone lock
1727 	 * released and then refilled before we
1728 	 * remove it... we dont care for now
1729 	 */
1730 	zone_drain_wait(zone, M_WAITOK);
1731 	/*
1732 	 * Unlink all of our kegs.
1733 	 */
1734 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1735 		klink->kl_keg = NULL;
1736 		LIST_REMOVE(klink, kl_link);
1737 		if (klink == &zone->uz_klink)
1738 			continue;
1739 		free(klink, M_TEMP);
1740 	}
1741 	/*
1742 	 * We only destroy kegs from non secondary zones.
1743 	 */
1744 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1745 		mtx_lock(&uma_mtx);
1746 		LIST_REMOVE(keg, uk_link);
1747 		mtx_unlock(&uma_mtx);
1748 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1749 	}
1750 	ZONE_LOCK_FINI(zone);
1751 }
1752 
1753 /*
1754  * Traverses every zone in the system and calls a callback
1755  *
1756  * Arguments:
1757  *	zfunc  A pointer to a function which accepts a zone
1758  *		as an argument.
1759  *
1760  * Returns:
1761  *	Nothing
1762  */
1763 static void
1764 zone_foreach(void (*zfunc)(uma_zone_t))
1765 {
1766 	uma_keg_t keg;
1767 	uma_zone_t zone;
1768 
1769 	mtx_lock(&uma_mtx);
1770 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1771 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1772 			zfunc(zone);
1773 	}
1774 	mtx_unlock(&uma_mtx);
1775 }
1776 
1777 /* Public functions */
1778 /* See uma.h */
1779 void
1780 uma_startup(void *bootmem, int boot_pages)
1781 {
1782 	struct uma_zctor_args args;
1783 	uma_slab_t slab;
1784 	u_int slabsize;
1785 	int i;
1786 
1787 #ifdef UMA_DEBUG
1788 	printf("Creating uma keg headers zone and keg.\n");
1789 #endif
1790 	mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
1791 
1792 	/* "manually" create the initial zone */
1793 	memset(&args, 0, sizeof(args));
1794 	args.name = "UMA Kegs";
1795 	args.size = sizeof(struct uma_keg);
1796 	args.ctor = keg_ctor;
1797 	args.dtor = keg_dtor;
1798 	args.uminit = zero_init;
1799 	args.fini = NULL;
1800 	args.keg = &masterkeg;
1801 	args.align = 32 - 1;
1802 	args.flags = UMA_ZFLAG_INTERNAL;
1803 	/* The initial zone has no Per cpu queues so it's smaller */
1804 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1805 
1806 #ifdef UMA_DEBUG
1807 	printf("Filling boot free list.\n");
1808 #endif
1809 	for (i = 0; i < boot_pages; i++) {
1810 		slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1811 		slab->us_data = (uint8_t *)slab;
1812 		slab->us_flags = UMA_SLAB_BOOT;
1813 		LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1814 	}
1815 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1816 
1817 #ifdef UMA_DEBUG
1818 	printf("Creating uma zone headers zone and keg.\n");
1819 #endif
1820 	args.name = "UMA Zones";
1821 	args.size = sizeof(struct uma_zone) +
1822 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1823 	args.ctor = zone_ctor;
1824 	args.dtor = zone_dtor;
1825 	args.uminit = zero_init;
1826 	args.fini = NULL;
1827 	args.keg = NULL;
1828 	args.align = 32 - 1;
1829 	args.flags = UMA_ZFLAG_INTERNAL;
1830 	/* The initial zone has no Per cpu queues so it's smaller */
1831 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1832 
1833 #ifdef UMA_DEBUG
1834 	printf("Initializing pcpu cache locks.\n");
1835 #endif
1836 #ifdef UMA_DEBUG
1837 	printf("Creating slab and hash zones.\n");
1838 #endif
1839 
1840 	/* Now make a zone for slab headers */
1841 	slabzone = uma_zcreate("UMA Slabs",
1842 				sizeof(struct uma_slab),
1843 				NULL, NULL, NULL, NULL,
1844 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1845 
1846 	/*
1847 	 * We also create a zone for the bigger slabs with reference
1848 	 * counts in them, to accomodate UMA_ZONE_REFCNT zones.
1849 	 */
1850 	slabsize = sizeof(struct uma_slab_refcnt);
1851 	slabsize += uma_max_ipers_ref * sizeof(uint32_t);
1852 	slabrefzone = uma_zcreate("UMA RCntSlabs",
1853 				  slabsize,
1854 				  NULL, NULL, NULL, NULL,
1855 				  UMA_ALIGN_PTR,
1856 				  UMA_ZFLAG_INTERNAL);
1857 
1858 	hashzone = uma_zcreate("UMA Hash",
1859 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1860 	    NULL, NULL, NULL, NULL,
1861 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1862 
1863 	bucket_init();
1864 
1865 	booted = UMA_STARTUP;
1866 
1867 #ifdef UMA_DEBUG
1868 	printf("UMA startup complete.\n");
1869 #endif
1870 }
1871 
1872 /* see uma.h */
1873 void
1874 uma_startup2(void)
1875 {
1876 	booted = UMA_STARTUP2;
1877 	bucket_enable();
1878 #ifdef UMA_DEBUG
1879 	printf("UMA startup2 complete.\n");
1880 #endif
1881 }
1882 
1883 /*
1884  * Initialize our callout handle
1885  *
1886  */
1887 
1888 static void
1889 uma_startup3(void)
1890 {
1891 #ifdef UMA_DEBUG
1892 	printf("Starting callout.\n");
1893 #endif
1894 	callout_init(&uma_callout, CALLOUT_MPSAFE);
1895 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1896 #ifdef UMA_DEBUG
1897 	printf("UMA startup3 complete.\n");
1898 #endif
1899 }
1900 
1901 static uma_keg_t
1902 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1903 		int align, uint32_t flags)
1904 {
1905 	struct uma_kctor_args args;
1906 
1907 	args.size = size;
1908 	args.uminit = uminit;
1909 	args.fini = fini;
1910 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1911 	args.flags = flags;
1912 	args.zone = zone;
1913 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1914 }
1915 
1916 /* See uma.h */
1917 void
1918 uma_set_align(int align)
1919 {
1920 
1921 	if (align != UMA_ALIGN_CACHE)
1922 		uma_align_cache = align;
1923 }
1924 
1925 /* See uma.h */
1926 uma_zone_t
1927 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1928 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1929 
1930 {
1931 	struct uma_zctor_args args;
1932 
1933 	/* This stuff is essential for the zone ctor */
1934 	memset(&args, 0, sizeof(args));
1935 	args.name = name;
1936 	args.size = size;
1937 	args.ctor = ctor;
1938 	args.dtor = dtor;
1939 	args.uminit = uminit;
1940 	args.fini = fini;
1941 	args.align = align;
1942 	args.flags = flags;
1943 	args.keg = NULL;
1944 
1945 	return (zone_alloc_item(zones, &args, M_WAITOK));
1946 }
1947 
1948 /* See uma.h */
1949 uma_zone_t
1950 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1951 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1952 {
1953 	struct uma_zctor_args args;
1954 	uma_keg_t keg;
1955 
1956 	keg = zone_first_keg(master);
1957 	memset(&args, 0, sizeof(args));
1958 	args.name = name;
1959 	args.size = keg->uk_size;
1960 	args.ctor = ctor;
1961 	args.dtor = dtor;
1962 	args.uminit = zinit;
1963 	args.fini = zfini;
1964 	args.align = keg->uk_align;
1965 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1966 	args.keg = keg;
1967 
1968 	/* XXX Attaches only one keg of potentially many. */
1969 	return (zone_alloc_item(zones, &args, M_WAITOK));
1970 }
1971 
1972 /* See uma.h */
1973 uma_zone_t
1974 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1975 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1976 		    uma_release zrelease, void *arg, int flags)
1977 {
1978 	struct uma_zctor_args args;
1979 
1980 	memset(&args, 0, sizeof(args));
1981 	args.name = name;
1982 	args.size = size;
1983 	args.ctor = ctor;
1984 	args.dtor = dtor;
1985 	args.uminit = zinit;
1986 	args.fini = zfini;
1987 	args.import = zimport;
1988 	args.release = zrelease;
1989 	args.arg = arg;
1990 	args.align = 0;
1991 	args.flags = flags;
1992 
1993 	return (zone_alloc_item(zones, &args, M_WAITOK));
1994 }
1995 
1996 static void
1997 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1998 {
1999 	if (a < b) {
2000 		ZONE_LOCK(a);
2001 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
2002 	} else {
2003 		ZONE_LOCK(b);
2004 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
2005 	}
2006 }
2007 
2008 static void
2009 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2010 {
2011 
2012 	ZONE_UNLOCK(a);
2013 	ZONE_UNLOCK(b);
2014 }
2015 
2016 int
2017 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2018 {
2019 	uma_klink_t klink;
2020 	uma_klink_t kl;
2021 	int error;
2022 
2023 	error = 0;
2024 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2025 
2026 	zone_lock_pair(zone, master);
2027 	/*
2028 	 * zone must use vtoslab() to resolve objects and must already be
2029 	 * a secondary.
2030 	 */
2031 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2032 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2033 		error = EINVAL;
2034 		goto out;
2035 	}
2036 	/*
2037 	 * The new master must also use vtoslab().
2038 	 */
2039 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2040 		error = EINVAL;
2041 		goto out;
2042 	}
2043 	/*
2044 	 * Both must either be refcnt, or not be refcnt.
2045 	 */
2046 	if ((zone->uz_flags & UMA_ZONE_REFCNT) !=
2047 	    (master->uz_flags & UMA_ZONE_REFCNT)) {
2048 		error = EINVAL;
2049 		goto out;
2050 	}
2051 	/*
2052 	 * The underlying object must be the same size.  rsize
2053 	 * may be different.
2054 	 */
2055 	if (master->uz_size != zone->uz_size) {
2056 		error = E2BIG;
2057 		goto out;
2058 	}
2059 	/*
2060 	 * Put it at the end of the list.
2061 	 */
2062 	klink->kl_keg = zone_first_keg(master);
2063 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2064 		if (LIST_NEXT(kl, kl_link) == NULL) {
2065 			LIST_INSERT_AFTER(kl, klink, kl_link);
2066 			break;
2067 		}
2068 	}
2069 	klink = NULL;
2070 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2071 	zone->uz_slab = zone_fetch_slab_multi;
2072 
2073 out:
2074 	zone_unlock_pair(zone, master);
2075 	if (klink != NULL)
2076 		free(klink, M_TEMP);
2077 
2078 	return (error);
2079 }
2080 
2081 
2082 /* See uma.h */
2083 void
2084 uma_zdestroy(uma_zone_t zone)
2085 {
2086 
2087 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2088 }
2089 
2090 /* See uma.h */
2091 void *
2092 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2093 {
2094 	void *item;
2095 	uma_cache_t cache;
2096 	uma_bucket_t bucket;
2097 	int lockfail;
2098 	int cpu;
2099 
2100 	/* This is the fast path allocation */
2101 #ifdef UMA_DEBUG_ALLOC_1
2102 	printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2103 #endif
2104 	CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2105 	    zone->uz_name, flags);
2106 
2107 	if (flags & M_WAITOK) {
2108 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2109 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2110 	}
2111 #ifdef DEBUG_MEMGUARD
2112 	if (memguard_cmp_zone(zone)) {
2113 		item = memguard_alloc(zone->uz_size, flags);
2114 		if (item != NULL) {
2115 			/*
2116 			 * Avoid conflict with the use-after-free
2117 			 * protecting infrastructure from INVARIANTS.
2118 			 */
2119 			if (zone->uz_init != NULL &&
2120 			    zone->uz_init != mtrash_init &&
2121 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2122 				return (NULL);
2123 			if (zone->uz_ctor != NULL &&
2124 			    zone->uz_ctor != mtrash_ctor &&
2125 			    zone->uz_ctor(item, zone->uz_size, udata,
2126 			    flags) != 0) {
2127 			    	zone->uz_fini(item, zone->uz_size);
2128 				return (NULL);
2129 			}
2130 			return (item);
2131 		}
2132 		/* This is unfortunate but should not be fatal. */
2133 	}
2134 #endif
2135 	/*
2136 	 * If possible, allocate from the per-CPU cache.  There are two
2137 	 * requirements for safe access to the per-CPU cache: (1) the thread
2138 	 * accessing the cache must not be preempted or yield during access,
2139 	 * and (2) the thread must not migrate CPUs without switching which
2140 	 * cache it accesses.  We rely on a critical section to prevent
2141 	 * preemption and migration.  We release the critical section in
2142 	 * order to acquire the zone mutex if we are unable to allocate from
2143 	 * the current cache; when we re-acquire the critical section, we
2144 	 * must detect and handle migration if it has occurred.
2145 	 */
2146 	critical_enter();
2147 	cpu = curcpu;
2148 	cache = &zone->uz_cpu[cpu];
2149 
2150 zalloc_start:
2151 	bucket = cache->uc_allocbucket;
2152 	if (bucket != NULL && bucket->ub_cnt > 0) {
2153 		bucket->ub_cnt--;
2154 		item = bucket->ub_bucket[bucket->ub_cnt];
2155 #ifdef INVARIANTS
2156 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2157 #endif
2158 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2159 		cache->uc_allocs++;
2160 		critical_exit();
2161 		if (zone->uz_ctor != NULL &&
2162 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2163 			atomic_add_long(&zone->uz_fails, 1);
2164 			zone_free_item(zone, item, udata, SKIP_DTOR);
2165 			return (NULL);
2166 		}
2167 #ifdef INVARIANTS
2168 		uma_dbg_alloc(zone, NULL, item);
2169 #endif
2170 		if (flags & M_ZERO)
2171 			uma_zero_item(item, zone);
2172 		return (item);
2173 	}
2174 
2175 	/*
2176 	 * We have run out of items in our alloc bucket.
2177 	 * See if we can switch with our free bucket.
2178 	 */
2179 	bucket = cache->uc_freebucket;
2180 	if (bucket != NULL && bucket->ub_cnt > 0) {
2181 #ifdef UMA_DEBUG_ALLOC
2182 		printf("uma_zalloc: Swapping empty with alloc.\n");
2183 #endif
2184 		cache->uc_freebucket = cache->uc_allocbucket;
2185 		cache->uc_allocbucket = bucket;
2186 		goto zalloc_start;
2187 	}
2188 
2189 	/*
2190 	 * Discard any empty allocation bucket while we hold no locks.
2191 	 */
2192 	bucket = cache->uc_allocbucket;
2193 	cache->uc_allocbucket = NULL;
2194 	critical_exit();
2195 	if (bucket != NULL)
2196 		bucket_free(zone, bucket, udata);
2197 
2198 	/* Short-circuit for zones without buckets and low memory. */
2199 	if (zone->uz_count == 0 || bucketdisable)
2200 		goto zalloc_item;
2201 
2202 	/*
2203 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2204 	 * we must go back to the zone.  This requires the zone lock, so we
2205 	 * must drop the critical section, then re-acquire it when we go back
2206 	 * to the cache.  Since the critical section is released, we may be
2207 	 * preempted or migrate.  As such, make sure not to maintain any
2208 	 * thread-local state specific to the cache from prior to releasing
2209 	 * the critical section.
2210 	 */
2211 	lockfail = 0;
2212 	if (ZONE_TRYLOCK(zone) == 0) {
2213 		/* Record contention to size the buckets. */
2214 		ZONE_LOCK(zone);
2215 		lockfail = 1;
2216 	}
2217 	critical_enter();
2218 	cpu = curcpu;
2219 	cache = &zone->uz_cpu[cpu];
2220 
2221 	/*
2222 	 * Since we have locked the zone we may as well send back our stats.
2223 	 */
2224 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2225 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2226 	cache->uc_allocs = 0;
2227 	cache->uc_frees = 0;
2228 
2229 	/* See if we lost the race to fill the cache. */
2230 	if (cache->uc_allocbucket != NULL) {
2231 		ZONE_UNLOCK(zone);
2232 		goto zalloc_start;
2233 	}
2234 
2235 	/*
2236 	 * Check the zone's cache of buckets.
2237 	 */
2238 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2239 		KASSERT(bucket->ub_cnt != 0,
2240 		    ("uma_zalloc_arg: Returning an empty bucket."));
2241 
2242 		LIST_REMOVE(bucket, ub_link);
2243 		cache->uc_allocbucket = bucket;
2244 		ZONE_UNLOCK(zone);
2245 		goto zalloc_start;
2246 	}
2247 	/* We are no longer associated with this CPU. */
2248 	critical_exit();
2249 
2250 	/*
2251 	 * We bump the uz count when the cache size is insufficient to
2252 	 * handle the working set.
2253 	 */
2254 	if (lockfail && zone->uz_count < BUCKET_MAX)
2255 		zone->uz_count++;
2256 	ZONE_UNLOCK(zone);
2257 
2258 	/*
2259 	 * Now lets just fill a bucket and put it on the free list.  If that
2260 	 * works we'll restart the allocation from the begining and it
2261 	 * will use the just filled bucket.
2262 	 */
2263 	bucket = zone_alloc_bucket(zone, udata, flags);
2264 	if (bucket != NULL) {
2265 		ZONE_LOCK(zone);
2266 		critical_enter();
2267 		cpu = curcpu;
2268 		cache = &zone->uz_cpu[cpu];
2269 		/*
2270 		 * See if we lost the race or were migrated.  Cache the
2271 		 * initialized bucket to make this less likely or claim
2272 		 * the memory directly.
2273 		 */
2274 		if (cache->uc_allocbucket == NULL)
2275 			cache->uc_allocbucket = bucket;
2276 		else
2277 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2278 		ZONE_UNLOCK(zone);
2279 		goto zalloc_start;
2280 	}
2281 
2282 	/*
2283 	 * We may not be able to get a bucket so return an actual item.
2284 	 */
2285 #ifdef UMA_DEBUG
2286 	printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2287 #endif
2288 
2289 zalloc_item:
2290 	item = zone_alloc_item(zone, udata, flags);
2291 
2292 	return (item);
2293 }
2294 
2295 static uma_slab_t
2296 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2297 {
2298 	uma_slab_t slab;
2299 	int reserve;
2300 
2301 	mtx_assert(&keg->uk_lock, MA_OWNED);
2302 	slab = NULL;
2303 	reserve = 0;
2304 	if ((flags & M_USE_RESERVE) == 0)
2305 		reserve = keg->uk_reserve;
2306 
2307 	for (;;) {
2308 		/*
2309 		 * Find a slab with some space.  Prefer slabs that are partially
2310 		 * used over those that are totally full.  This helps to reduce
2311 		 * fragmentation.
2312 		 */
2313 		if (keg->uk_free > reserve) {
2314 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2315 				slab = LIST_FIRST(&keg->uk_part_slab);
2316 			} else {
2317 				slab = LIST_FIRST(&keg->uk_free_slab);
2318 				LIST_REMOVE(slab, us_link);
2319 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2320 				    us_link);
2321 			}
2322 			MPASS(slab->us_keg == keg);
2323 			return (slab);
2324 		}
2325 
2326 		/*
2327 		 * M_NOVM means don't ask at all!
2328 		 */
2329 		if (flags & M_NOVM)
2330 			break;
2331 
2332 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2333 			keg->uk_flags |= UMA_ZFLAG_FULL;
2334 			/*
2335 			 * If this is not a multi-zone, set the FULL bit.
2336 			 * Otherwise slab_multi() takes care of it.
2337 			 */
2338 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2339 				zone->uz_flags |= UMA_ZFLAG_FULL;
2340 				zone_log_warning(zone);
2341 			}
2342 			if (flags & M_NOWAIT)
2343 				break;
2344 			zone->uz_sleeps++;
2345 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2346 			continue;
2347 		}
2348 		slab = keg_alloc_slab(keg, zone, flags);
2349 		/*
2350 		 * If we got a slab here it's safe to mark it partially used
2351 		 * and return.  We assume that the caller is going to remove
2352 		 * at least one item.
2353 		 */
2354 		if (slab) {
2355 			MPASS(slab->us_keg == keg);
2356 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2357 			return (slab);
2358 		}
2359 		/*
2360 		 * We might not have been able to get a slab but another cpu
2361 		 * could have while we were unlocked.  Check again before we
2362 		 * fail.
2363 		 */
2364 		flags |= M_NOVM;
2365 	}
2366 	return (slab);
2367 }
2368 
2369 static uma_slab_t
2370 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2371 {
2372 	uma_slab_t slab;
2373 
2374 	if (keg == NULL) {
2375 		keg = zone_first_keg(zone);
2376 		KEG_LOCK(keg);
2377 	}
2378 
2379 	for (;;) {
2380 		slab = keg_fetch_slab(keg, zone, flags);
2381 		if (slab)
2382 			return (slab);
2383 		if (flags & (M_NOWAIT | M_NOVM))
2384 			break;
2385 	}
2386 	KEG_UNLOCK(keg);
2387 	return (NULL);
2388 }
2389 
2390 /*
2391  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2392  * with the keg locked.  On NULL no lock is held.
2393  *
2394  * The last pointer is used to seed the search.  It is not required.
2395  */
2396 static uma_slab_t
2397 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2398 {
2399 	uma_klink_t klink;
2400 	uma_slab_t slab;
2401 	uma_keg_t keg;
2402 	int flags;
2403 	int empty;
2404 	int full;
2405 
2406 	/*
2407 	 * Don't wait on the first pass.  This will skip limit tests
2408 	 * as well.  We don't want to block if we can find a provider
2409 	 * without blocking.
2410 	 */
2411 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2412 	/*
2413 	 * Use the last slab allocated as a hint for where to start
2414 	 * the search.
2415 	 */
2416 	if (last != NULL) {
2417 		slab = keg_fetch_slab(last, zone, flags);
2418 		if (slab)
2419 			return (slab);
2420 		KEG_UNLOCK(last);
2421 	}
2422 	/*
2423 	 * Loop until we have a slab incase of transient failures
2424 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2425 	 * required but we've done it for so long now.
2426 	 */
2427 	for (;;) {
2428 		empty = 0;
2429 		full = 0;
2430 		/*
2431 		 * Search the available kegs for slabs.  Be careful to hold the
2432 		 * correct lock while calling into the keg layer.
2433 		 */
2434 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2435 			keg = klink->kl_keg;
2436 			KEG_LOCK(keg);
2437 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2438 				slab = keg_fetch_slab(keg, zone, flags);
2439 				if (slab)
2440 					return (slab);
2441 			}
2442 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2443 				full++;
2444 			else
2445 				empty++;
2446 			KEG_UNLOCK(keg);
2447 		}
2448 		if (rflags & (M_NOWAIT | M_NOVM))
2449 			break;
2450 		flags = rflags;
2451 		/*
2452 		 * All kegs are full.  XXX We can't atomically check all kegs
2453 		 * and sleep so just sleep for a short period and retry.
2454 		 */
2455 		if (full && !empty) {
2456 			ZONE_LOCK(zone);
2457 			zone->uz_flags |= UMA_ZFLAG_FULL;
2458 			zone->uz_sleeps++;
2459 			zone_log_warning(zone);
2460 			msleep(zone, zone->uz_lockptr, PVM,
2461 			    "zonelimit", hz/100);
2462 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2463 			ZONE_UNLOCK(zone);
2464 			continue;
2465 		}
2466 	}
2467 	return (NULL);
2468 }
2469 
2470 static void *
2471 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2472 {
2473 	void *item;
2474 	uint8_t freei;
2475 
2476 	MPASS(keg == slab->us_keg);
2477 	mtx_assert(&keg->uk_lock, MA_OWNED);
2478 
2479 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2480 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2481 	item = slab->us_data + (keg->uk_rsize * freei);
2482 	slab->us_freecount--;
2483 	keg->uk_free--;
2484 
2485 	/* Move this slab to the full list */
2486 	if (slab->us_freecount == 0) {
2487 		LIST_REMOVE(slab, us_link);
2488 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2489 	}
2490 
2491 	return (item);
2492 }
2493 
2494 static int
2495 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2496 {
2497 	uma_slab_t slab;
2498 	uma_keg_t keg;
2499 	int i;
2500 
2501 	slab = NULL;
2502 	keg = NULL;
2503 	/* Try to keep the buckets totally full */
2504 	for (i = 0; i < max; ) {
2505 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2506 			break;
2507 		keg = slab->us_keg;
2508 		while (slab->us_freecount && i < max) {
2509 			bucket[i++] = slab_alloc_item(keg, slab);
2510 			if (keg->uk_free <= keg->uk_reserve)
2511 				break;
2512 		}
2513 		/* Don't grab more than one slab at a time. */
2514 		flags &= ~M_WAITOK;
2515 		flags |= M_NOWAIT;
2516 	}
2517 	if (slab != NULL)
2518 		KEG_UNLOCK(keg);
2519 
2520 	return i;
2521 }
2522 
2523 static uma_bucket_t
2524 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2525 {
2526 	uma_bucket_t bucket;
2527 	int max;
2528 
2529 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2530 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2531 	if (bucket == NULL)
2532 		return (NULL);
2533 
2534 	max = MIN(bucket->ub_entries, zone->uz_count);
2535 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2536 	    max, flags);
2537 
2538 	/*
2539 	 * Initialize the memory if necessary.
2540 	 */
2541 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2542 		int i;
2543 
2544 		for (i = 0; i < bucket->ub_cnt; i++)
2545 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2546 			    flags) != 0)
2547 				break;
2548 		/*
2549 		 * If we couldn't initialize the whole bucket, put the
2550 		 * rest back onto the freelist.
2551 		 */
2552 		if (i != bucket->ub_cnt) {
2553 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2554 			    bucket->ub_cnt - i);
2555 #ifdef INVARIANTS
2556 			bzero(&bucket->ub_bucket[i],
2557 			    sizeof(void *) * (bucket->ub_cnt - i));
2558 #endif
2559 			bucket->ub_cnt = i;
2560 		}
2561 	}
2562 
2563 	if (bucket->ub_cnt == 0) {
2564 		bucket_free(zone, bucket, udata);
2565 		atomic_add_long(&zone->uz_fails, 1);
2566 		return (NULL);
2567 	}
2568 
2569 	return (bucket);
2570 }
2571 
2572 /*
2573  * Allocates a single item from a zone.
2574  *
2575  * Arguments
2576  *	zone   The zone to alloc for.
2577  *	udata  The data to be passed to the constructor.
2578  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2579  *
2580  * Returns
2581  *	NULL if there is no memory and M_NOWAIT is set
2582  *	An item if successful
2583  */
2584 
2585 static void *
2586 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2587 {
2588 	void *item;
2589 
2590 	item = NULL;
2591 
2592 #ifdef UMA_DEBUG_ALLOC
2593 	printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2594 #endif
2595 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2596 		goto fail;
2597 	atomic_add_long(&zone->uz_allocs, 1);
2598 
2599 	/*
2600 	 * We have to call both the zone's init (not the keg's init)
2601 	 * and the zone's ctor.  This is because the item is going from
2602 	 * a keg slab directly to the user, and the user is expecting it
2603 	 * to be both zone-init'd as well as zone-ctor'd.
2604 	 */
2605 	if (zone->uz_init != NULL) {
2606 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2607 			zone_free_item(zone, item, udata, SKIP_FINI);
2608 			goto fail;
2609 		}
2610 	}
2611 	if (zone->uz_ctor != NULL) {
2612 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2613 			zone_free_item(zone, item, udata, SKIP_DTOR);
2614 			goto fail;
2615 		}
2616 	}
2617 #ifdef INVARIANTS
2618 	uma_dbg_alloc(zone, NULL, item);
2619 #endif
2620 	if (flags & M_ZERO)
2621 		uma_zero_item(item, zone);
2622 
2623 	return (item);
2624 
2625 fail:
2626 	atomic_add_long(&zone->uz_fails, 1);
2627 	return (NULL);
2628 }
2629 
2630 /* See uma.h */
2631 void
2632 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2633 {
2634 	uma_cache_t cache;
2635 	uma_bucket_t bucket;
2636 	int lockfail;
2637 	int cpu;
2638 
2639 #ifdef UMA_DEBUG_ALLOC_1
2640 	printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2641 #endif
2642 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2643 	    zone->uz_name);
2644 
2645         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2646         if (item == NULL)
2647                 return;
2648 #ifdef DEBUG_MEMGUARD
2649 	if (is_memguard_addr(item)) {
2650 		if (zone->uz_dtor != NULL && zone->uz_dtor != mtrash_dtor)
2651 			zone->uz_dtor(item, zone->uz_size, udata);
2652 		if (zone->uz_fini != NULL && zone->uz_fini != mtrash_fini)
2653 			zone->uz_fini(item, zone->uz_size);
2654 		memguard_free(item);
2655 		return;
2656 	}
2657 #endif
2658 #ifdef INVARIANTS
2659 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2660 		uma_dbg_free(zone, udata, item);
2661 	else
2662 		uma_dbg_free(zone, NULL, item);
2663 #endif
2664 	if (zone->uz_dtor != NULL)
2665 		zone->uz_dtor(item, zone->uz_size, udata);
2666 
2667 	/*
2668 	 * The race here is acceptable.  If we miss it we'll just have to wait
2669 	 * a little longer for the limits to be reset.
2670 	 */
2671 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2672 		goto zfree_item;
2673 
2674 	/*
2675 	 * If possible, free to the per-CPU cache.  There are two
2676 	 * requirements for safe access to the per-CPU cache: (1) the thread
2677 	 * accessing the cache must not be preempted or yield during access,
2678 	 * and (2) the thread must not migrate CPUs without switching which
2679 	 * cache it accesses.  We rely on a critical section to prevent
2680 	 * preemption and migration.  We release the critical section in
2681 	 * order to acquire the zone mutex if we are unable to free to the
2682 	 * current cache; when we re-acquire the critical section, we must
2683 	 * detect and handle migration if it has occurred.
2684 	 */
2685 zfree_restart:
2686 	critical_enter();
2687 	cpu = curcpu;
2688 	cache = &zone->uz_cpu[cpu];
2689 
2690 zfree_start:
2691 	/*
2692 	 * Try to free into the allocbucket first to give LIFO ordering
2693 	 * for cache-hot datastructures.  Spill over into the freebucket
2694 	 * if necessary.  Alloc will swap them if one runs dry.
2695 	 */
2696 	bucket = cache->uc_allocbucket;
2697 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2698 		bucket = cache->uc_freebucket;
2699 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2700 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2701 		    ("uma_zfree: Freeing to non free bucket index."));
2702 		bucket->ub_bucket[bucket->ub_cnt] = item;
2703 		bucket->ub_cnt++;
2704 		cache->uc_frees++;
2705 		critical_exit();
2706 		return;
2707 	}
2708 
2709 	/*
2710 	 * We must go back the zone, which requires acquiring the zone lock,
2711 	 * which in turn means we must release and re-acquire the critical
2712 	 * section.  Since the critical section is released, we may be
2713 	 * preempted or migrate.  As such, make sure not to maintain any
2714 	 * thread-local state specific to the cache from prior to releasing
2715 	 * the critical section.
2716 	 */
2717 	critical_exit();
2718 	if (zone->uz_count == 0 || bucketdisable)
2719 		goto zfree_item;
2720 
2721 	lockfail = 0;
2722 	if (ZONE_TRYLOCK(zone) == 0) {
2723 		/* Record contention to size the buckets. */
2724 		ZONE_LOCK(zone);
2725 		lockfail = 1;
2726 	}
2727 	critical_enter();
2728 	cpu = curcpu;
2729 	cache = &zone->uz_cpu[cpu];
2730 
2731 	/*
2732 	 * Since we have locked the zone we may as well send back our stats.
2733 	 */
2734 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2735 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2736 	cache->uc_allocs = 0;
2737 	cache->uc_frees = 0;
2738 
2739 	bucket = cache->uc_freebucket;
2740 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2741 		ZONE_UNLOCK(zone);
2742 		goto zfree_start;
2743 	}
2744 	cache->uc_freebucket = NULL;
2745 
2746 	/* Can we throw this on the zone full list? */
2747 	if (bucket != NULL) {
2748 #ifdef UMA_DEBUG_ALLOC
2749 		printf("uma_zfree: Putting old bucket on the free list.\n");
2750 #endif
2751 		/* ub_cnt is pointing to the last free item */
2752 		KASSERT(bucket->ub_cnt != 0,
2753 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2754 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2755 	}
2756 
2757 	/* We are no longer associated with this CPU. */
2758 	critical_exit();
2759 
2760 	/*
2761 	 * We bump the uz count when the cache size is insufficient to
2762 	 * handle the working set.
2763 	 */
2764 	if (lockfail && zone->uz_count < BUCKET_MAX)
2765 		zone->uz_count++;
2766 	ZONE_UNLOCK(zone);
2767 
2768 #ifdef UMA_DEBUG_ALLOC
2769 	printf("uma_zfree: Allocating new free bucket.\n");
2770 #endif
2771 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2772 	if (bucket) {
2773 		critical_enter();
2774 		cpu = curcpu;
2775 		cache = &zone->uz_cpu[cpu];
2776 		if (cache->uc_freebucket == NULL) {
2777 			cache->uc_freebucket = bucket;
2778 			goto zfree_start;
2779 		}
2780 		/*
2781 		 * We lost the race, start over.  We have to drop our
2782 		 * critical section to free the bucket.
2783 		 */
2784 		critical_exit();
2785 		bucket_free(zone, bucket, udata);
2786 		goto zfree_restart;
2787 	}
2788 
2789 	/*
2790 	 * If nothing else caught this, we'll just do an internal free.
2791 	 */
2792 zfree_item:
2793 	zone_free_item(zone, item, udata, SKIP_DTOR);
2794 
2795 	return;
2796 }
2797 
2798 static void
2799 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2800 {
2801 	uint8_t freei;
2802 
2803 	mtx_assert(&keg->uk_lock, MA_OWNED);
2804 	MPASS(keg == slab->us_keg);
2805 
2806 	/* Do we need to remove from any lists? */
2807 	if (slab->us_freecount+1 == keg->uk_ipers) {
2808 		LIST_REMOVE(slab, us_link);
2809 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2810 	} else if (slab->us_freecount == 0) {
2811 		LIST_REMOVE(slab, us_link);
2812 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2813 	}
2814 
2815 	/* Slab management. */
2816 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2817 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2818 	slab->us_freecount++;
2819 
2820 	/* Keg statistics. */
2821 	keg->uk_free++;
2822 }
2823 
2824 static void
2825 zone_release(uma_zone_t zone, void **bucket, int cnt)
2826 {
2827 	void *item;
2828 	uma_slab_t slab;
2829 	uma_keg_t keg;
2830 	uint8_t *mem;
2831 	int clearfull;
2832 	int i;
2833 
2834 	clearfull = 0;
2835 	keg = zone_first_keg(zone);
2836 	KEG_LOCK(keg);
2837 	for (i = 0; i < cnt; i++) {
2838 		item = bucket[i];
2839 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2840 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2841 			if (zone->uz_flags & UMA_ZONE_HASH) {
2842 				slab = hash_sfind(&keg->uk_hash, mem);
2843 			} else {
2844 				mem += keg->uk_pgoff;
2845 				slab = (uma_slab_t)mem;
2846 			}
2847 		} else {
2848 			slab = vtoslab((vm_offset_t)item);
2849 			if (slab->us_keg != keg) {
2850 				KEG_UNLOCK(keg);
2851 				keg = slab->us_keg;
2852 				KEG_LOCK(keg);
2853 			}
2854 		}
2855 		slab_free_item(keg, slab, item);
2856 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2857 			if (keg->uk_pages < keg->uk_maxpages) {
2858 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2859 				clearfull = 1;
2860 			}
2861 
2862 			/*
2863 			 * We can handle one more allocation. Since we're
2864 			 * clearing ZFLAG_FULL, wake up all procs blocked
2865 			 * on pages. This should be uncommon, so keeping this
2866 			 * simple for now (rather than adding count of blocked
2867 			 * threads etc).
2868 			 */
2869 			wakeup(keg);
2870 		}
2871 	}
2872 	KEG_UNLOCK(keg);
2873 	if (clearfull) {
2874 		ZONE_LOCK(zone);
2875 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2876 		wakeup(zone);
2877 		ZONE_UNLOCK(zone);
2878 	}
2879 
2880 }
2881 
2882 /*
2883  * Frees a single item to any zone.
2884  *
2885  * Arguments:
2886  *	zone   The zone to free to
2887  *	item   The item we're freeing
2888  *	udata  User supplied data for the dtor
2889  *	skip   Skip dtors and finis
2890  */
2891 static void
2892 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2893 {
2894 
2895 #ifdef INVARIANTS
2896 	if (skip == SKIP_NONE) {
2897 		if (zone->uz_flags & UMA_ZONE_MALLOC)
2898 			uma_dbg_free(zone, udata, item);
2899 		else
2900 			uma_dbg_free(zone, NULL, item);
2901 	}
2902 #endif
2903 	if (skip < SKIP_DTOR && zone->uz_dtor)
2904 		zone->uz_dtor(item, zone->uz_size, udata);
2905 
2906 	if (skip < SKIP_FINI && zone->uz_fini)
2907 		zone->uz_fini(item, zone->uz_size);
2908 
2909 	atomic_add_long(&zone->uz_frees, 1);
2910 	zone->uz_release(zone->uz_arg, &item, 1);
2911 }
2912 
2913 /* See uma.h */
2914 int
2915 uma_zone_set_max(uma_zone_t zone, int nitems)
2916 {
2917 	uma_keg_t keg;
2918 
2919 	keg = zone_first_keg(zone);
2920 	if (keg == NULL)
2921 		return (0);
2922 	KEG_LOCK(keg);
2923 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2924 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2925 		keg->uk_maxpages += keg->uk_ppera;
2926 	nitems = keg->uk_maxpages * keg->uk_ipers;
2927 	KEG_UNLOCK(keg);
2928 
2929 	return (nitems);
2930 }
2931 
2932 /* See uma.h */
2933 int
2934 uma_zone_get_max(uma_zone_t zone)
2935 {
2936 	int nitems;
2937 	uma_keg_t keg;
2938 
2939 	keg = zone_first_keg(zone);
2940 	if (keg == NULL)
2941 		return (0);
2942 	KEG_LOCK(keg);
2943 	nitems = keg->uk_maxpages * keg->uk_ipers;
2944 	KEG_UNLOCK(keg);
2945 
2946 	return (nitems);
2947 }
2948 
2949 /* See uma.h */
2950 void
2951 uma_zone_set_warning(uma_zone_t zone, const char *warning)
2952 {
2953 
2954 	ZONE_LOCK(zone);
2955 	zone->uz_warning = warning;
2956 	ZONE_UNLOCK(zone);
2957 }
2958 
2959 /* See uma.h */
2960 int
2961 uma_zone_get_cur(uma_zone_t zone)
2962 {
2963 	int64_t nitems;
2964 	u_int i;
2965 
2966 	ZONE_LOCK(zone);
2967 	nitems = zone->uz_allocs - zone->uz_frees;
2968 	CPU_FOREACH(i) {
2969 		/*
2970 		 * See the comment in sysctl_vm_zone_stats() regarding the
2971 		 * safety of accessing the per-cpu caches. With the zone lock
2972 		 * held, it is safe, but can potentially result in stale data.
2973 		 */
2974 		nitems += zone->uz_cpu[i].uc_allocs -
2975 		    zone->uz_cpu[i].uc_frees;
2976 	}
2977 	ZONE_UNLOCK(zone);
2978 
2979 	return (nitems < 0 ? 0 : nitems);
2980 }
2981 
2982 /* See uma.h */
2983 void
2984 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2985 {
2986 	uma_keg_t keg;
2987 
2988 	keg = zone_first_keg(zone);
2989 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2990 	KEG_LOCK(keg);
2991 	KASSERT(keg->uk_pages == 0,
2992 	    ("uma_zone_set_init on non-empty keg"));
2993 	keg->uk_init = uminit;
2994 	KEG_UNLOCK(keg);
2995 }
2996 
2997 /* See uma.h */
2998 void
2999 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3000 {
3001 	uma_keg_t keg;
3002 
3003 	keg = zone_first_keg(zone);
3004 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3005 	KEG_LOCK(keg);
3006 	KASSERT(keg->uk_pages == 0,
3007 	    ("uma_zone_set_fini on non-empty keg"));
3008 	keg->uk_fini = fini;
3009 	KEG_UNLOCK(keg);
3010 }
3011 
3012 /* See uma.h */
3013 void
3014 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3015 {
3016 
3017 	ZONE_LOCK(zone);
3018 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3019 	    ("uma_zone_set_zinit on non-empty keg"));
3020 	zone->uz_init = zinit;
3021 	ZONE_UNLOCK(zone);
3022 }
3023 
3024 /* See uma.h */
3025 void
3026 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3027 {
3028 
3029 	ZONE_LOCK(zone);
3030 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
3031 	    ("uma_zone_set_zfini on non-empty keg"));
3032 	zone->uz_fini = zfini;
3033 	ZONE_UNLOCK(zone);
3034 }
3035 
3036 /* See uma.h */
3037 /* XXX uk_freef is not actually used with the zone locked */
3038 void
3039 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3040 {
3041 	uma_keg_t keg;
3042 
3043 	keg = zone_first_keg(zone);
3044 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3045 	KEG_LOCK(keg);
3046 	keg->uk_freef = freef;
3047 	KEG_UNLOCK(keg);
3048 }
3049 
3050 /* See uma.h */
3051 /* XXX uk_allocf is not actually used with the zone locked */
3052 void
3053 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3054 {
3055 	uma_keg_t keg;
3056 
3057 	keg = zone_first_keg(zone);
3058 	KEG_LOCK(keg);
3059 	keg->uk_allocf = allocf;
3060 	KEG_UNLOCK(keg);
3061 }
3062 
3063 /* See uma.h */
3064 void
3065 uma_zone_reserve(uma_zone_t zone, int items)
3066 {
3067 	uma_keg_t keg;
3068 
3069 	keg = zone_first_keg(zone);
3070 	if (keg == NULL)
3071 		return;
3072 	KEG_LOCK(keg);
3073 	keg->uk_reserve = items;
3074 	KEG_UNLOCK(keg);
3075 
3076 	return;
3077 }
3078 
3079 /* See uma.h */
3080 int
3081 uma_zone_reserve_kva(uma_zone_t zone, int count)
3082 {
3083 	uma_keg_t keg;
3084 	vm_offset_t kva;
3085 	int pages;
3086 
3087 	keg = zone_first_keg(zone);
3088 	if (keg == NULL)
3089 		return (0);
3090 	pages = count / keg->uk_ipers;
3091 
3092 	if (pages * keg->uk_ipers < count)
3093 		pages++;
3094 
3095 #ifdef UMA_MD_SMALL_ALLOC
3096 	if (keg->uk_ppera > 1) {
3097 #else
3098 	if (1) {
3099 #endif
3100 		kva = kva_alloc(pages * UMA_SLAB_SIZE);
3101 		if (kva == 0)
3102 			return (0);
3103 	} else
3104 		kva = 0;
3105 	KEG_LOCK(keg);
3106 	keg->uk_kva = kva;
3107 	keg->uk_offset = 0;
3108 	keg->uk_maxpages = pages;
3109 #ifdef UMA_MD_SMALL_ALLOC
3110 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3111 #else
3112 	keg->uk_allocf = noobj_alloc;
3113 #endif
3114 	keg->uk_flags |= UMA_ZONE_NOFREE;
3115 	KEG_UNLOCK(keg);
3116 
3117 	return (1);
3118 }
3119 
3120 /* See uma.h */
3121 void
3122 uma_prealloc(uma_zone_t zone, int items)
3123 {
3124 	int slabs;
3125 	uma_slab_t slab;
3126 	uma_keg_t keg;
3127 
3128 	keg = zone_first_keg(zone);
3129 	if (keg == NULL)
3130 		return;
3131 	KEG_LOCK(keg);
3132 	slabs = items / keg->uk_ipers;
3133 	if (slabs * keg->uk_ipers < items)
3134 		slabs++;
3135 	while (slabs > 0) {
3136 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3137 		if (slab == NULL)
3138 			break;
3139 		MPASS(slab->us_keg == keg);
3140 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3141 		slabs--;
3142 	}
3143 	KEG_UNLOCK(keg);
3144 }
3145 
3146 /* See uma.h */
3147 uint32_t *
3148 uma_find_refcnt(uma_zone_t zone, void *item)
3149 {
3150 	uma_slabrefcnt_t slabref;
3151 	uma_slab_t slab;
3152 	uma_keg_t keg;
3153 	uint32_t *refcnt;
3154 	int idx;
3155 
3156 	slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
3157 	slabref = (uma_slabrefcnt_t)slab;
3158 	keg = slab->us_keg;
3159 	KASSERT(keg->uk_flags & UMA_ZONE_REFCNT,
3160 	    ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
3161 	idx = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3162 	refcnt = &slabref->us_refcnt[idx];
3163 	return refcnt;
3164 }
3165 
3166 /* See uma.h */
3167 void
3168 uma_reclaim(void)
3169 {
3170 #ifdef UMA_DEBUG
3171 	printf("UMA: vm asked us to release pages!\n");
3172 #endif
3173 	bucket_enable();
3174 	zone_foreach(zone_drain);
3175 	if (vm_page_count_min()) {
3176 		cache_drain_safe(NULL);
3177 		zone_foreach(zone_drain);
3178 	}
3179 	/*
3180 	 * Some slabs may have been freed but this zone will be visited early
3181 	 * we visit again so that we can free pages that are empty once other
3182 	 * zones are drained.  We have to do the same for buckets.
3183 	 */
3184 	zone_drain(slabzone);
3185 	zone_drain(slabrefzone);
3186 	bucket_zone_drain();
3187 }
3188 
3189 /* See uma.h */
3190 int
3191 uma_zone_exhausted(uma_zone_t zone)
3192 {
3193 	int full;
3194 
3195 	ZONE_LOCK(zone);
3196 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3197 	ZONE_UNLOCK(zone);
3198 	return (full);
3199 }
3200 
3201 int
3202 uma_zone_exhausted_nolock(uma_zone_t zone)
3203 {
3204 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3205 }
3206 
3207 void *
3208 uma_large_malloc(int size, int wait)
3209 {
3210 	void *mem;
3211 	uma_slab_t slab;
3212 	uint8_t flags;
3213 
3214 	slab = zone_alloc_item(slabzone, NULL, wait);
3215 	if (slab == NULL)
3216 		return (NULL);
3217 	mem = page_alloc(NULL, size, &flags, wait);
3218 	if (mem) {
3219 		vsetslab((vm_offset_t)mem, slab);
3220 		slab->us_data = mem;
3221 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3222 		slab->us_size = size;
3223 	} else {
3224 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3225 	}
3226 
3227 	return (mem);
3228 }
3229 
3230 void
3231 uma_large_free(uma_slab_t slab)
3232 {
3233 
3234 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3235 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3236 }
3237 
3238 static void
3239 uma_zero_item(void *item, uma_zone_t zone)
3240 {
3241 
3242 	if (zone->uz_flags & UMA_ZONE_PCPU) {
3243 		for (int i = 0; i < mp_ncpus; i++)
3244 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3245 	} else
3246 		bzero(item, zone->uz_size);
3247 }
3248 
3249 void
3250 uma_print_stats(void)
3251 {
3252 	zone_foreach(uma_print_zone);
3253 }
3254 
3255 static void
3256 slab_print(uma_slab_t slab)
3257 {
3258 	printf("slab: keg %p, data %p, freecount %d\n",
3259 		slab->us_keg, slab->us_data, slab->us_freecount);
3260 }
3261 
3262 static void
3263 cache_print(uma_cache_t cache)
3264 {
3265 	printf("alloc: %p(%d), free: %p(%d)\n",
3266 		cache->uc_allocbucket,
3267 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3268 		cache->uc_freebucket,
3269 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3270 }
3271 
3272 static void
3273 uma_print_keg(uma_keg_t keg)
3274 {
3275 	uma_slab_t slab;
3276 
3277 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3278 	    "out %d free %d limit %d\n",
3279 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3280 	    keg->uk_ipers, keg->uk_ppera,
3281 	    (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free,
3282 	    (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3283 	printf("Part slabs:\n");
3284 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3285 		slab_print(slab);
3286 	printf("Free slabs:\n");
3287 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3288 		slab_print(slab);
3289 	printf("Full slabs:\n");
3290 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3291 		slab_print(slab);
3292 }
3293 
3294 void
3295 uma_print_zone(uma_zone_t zone)
3296 {
3297 	uma_cache_t cache;
3298 	uma_klink_t kl;
3299 	int i;
3300 
3301 	printf("zone: %s(%p) size %d flags %#x\n",
3302 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3303 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3304 		uma_print_keg(kl->kl_keg);
3305 	CPU_FOREACH(i) {
3306 		cache = &zone->uz_cpu[i];
3307 		printf("CPU %d Cache:\n", i);
3308 		cache_print(cache);
3309 	}
3310 }
3311 
3312 #ifdef DDB
3313 /*
3314  * Generate statistics across both the zone and its per-cpu cache's.  Return
3315  * desired statistics if the pointer is non-NULL for that statistic.
3316  *
3317  * Note: does not update the zone statistics, as it can't safely clear the
3318  * per-CPU cache statistic.
3319  *
3320  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3321  * safe from off-CPU; we should modify the caches to track this information
3322  * directly so that we don't have to.
3323  */
3324 static void
3325 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3326     uint64_t *freesp, uint64_t *sleepsp)
3327 {
3328 	uma_cache_t cache;
3329 	uint64_t allocs, frees, sleeps;
3330 	int cachefree, cpu;
3331 
3332 	allocs = frees = sleeps = 0;
3333 	cachefree = 0;
3334 	CPU_FOREACH(cpu) {
3335 		cache = &z->uz_cpu[cpu];
3336 		if (cache->uc_allocbucket != NULL)
3337 			cachefree += cache->uc_allocbucket->ub_cnt;
3338 		if (cache->uc_freebucket != NULL)
3339 			cachefree += cache->uc_freebucket->ub_cnt;
3340 		allocs += cache->uc_allocs;
3341 		frees += cache->uc_frees;
3342 	}
3343 	allocs += z->uz_allocs;
3344 	frees += z->uz_frees;
3345 	sleeps += z->uz_sleeps;
3346 	if (cachefreep != NULL)
3347 		*cachefreep = cachefree;
3348 	if (allocsp != NULL)
3349 		*allocsp = allocs;
3350 	if (freesp != NULL)
3351 		*freesp = frees;
3352 	if (sleepsp != NULL)
3353 		*sleepsp = sleeps;
3354 }
3355 #endif /* DDB */
3356 
3357 static int
3358 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3359 {
3360 	uma_keg_t kz;
3361 	uma_zone_t z;
3362 	int count;
3363 
3364 	count = 0;
3365 	mtx_lock(&uma_mtx);
3366 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3367 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3368 			count++;
3369 	}
3370 	mtx_unlock(&uma_mtx);
3371 	return (sysctl_handle_int(oidp, &count, 0, req));
3372 }
3373 
3374 static int
3375 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3376 {
3377 	struct uma_stream_header ush;
3378 	struct uma_type_header uth;
3379 	struct uma_percpu_stat ups;
3380 	uma_bucket_t bucket;
3381 	struct sbuf sbuf;
3382 	uma_cache_t cache;
3383 	uma_klink_t kl;
3384 	uma_keg_t kz;
3385 	uma_zone_t z;
3386 	uma_keg_t k;
3387 	int count, error, i;
3388 
3389 	error = sysctl_wire_old_buffer(req, 0);
3390 	if (error != 0)
3391 		return (error);
3392 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3393 
3394 	count = 0;
3395 	mtx_lock(&uma_mtx);
3396 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3397 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3398 			count++;
3399 	}
3400 
3401 	/*
3402 	 * Insert stream header.
3403 	 */
3404 	bzero(&ush, sizeof(ush));
3405 	ush.ush_version = UMA_STREAM_VERSION;
3406 	ush.ush_maxcpus = (mp_maxid + 1);
3407 	ush.ush_count = count;
3408 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3409 
3410 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3411 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3412 			bzero(&uth, sizeof(uth));
3413 			ZONE_LOCK(z);
3414 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3415 			uth.uth_align = kz->uk_align;
3416 			uth.uth_size = kz->uk_size;
3417 			uth.uth_rsize = kz->uk_rsize;
3418 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3419 				k = kl->kl_keg;
3420 				uth.uth_maxpages += k->uk_maxpages;
3421 				uth.uth_pages += k->uk_pages;
3422 				uth.uth_keg_free += k->uk_free;
3423 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3424 				    * k->uk_ipers;
3425 			}
3426 
3427 			/*
3428 			 * A zone is secondary is it is not the first entry
3429 			 * on the keg's zone list.
3430 			 */
3431 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3432 			    (LIST_FIRST(&kz->uk_zones) != z))
3433 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3434 
3435 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3436 				uth.uth_zone_free += bucket->ub_cnt;
3437 			uth.uth_allocs = z->uz_allocs;
3438 			uth.uth_frees = z->uz_frees;
3439 			uth.uth_fails = z->uz_fails;
3440 			uth.uth_sleeps = z->uz_sleeps;
3441 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3442 			/*
3443 			 * While it is not normally safe to access the cache
3444 			 * bucket pointers while not on the CPU that owns the
3445 			 * cache, we only allow the pointers to be exchanged
3446 			 * without the zone lock held, not invalidated, so
3447 			 * accept the possible race associated with bucket
3448 			 * exchange during monitoring.
3449 			 */
3450 			for (i = 0; i < (mp_maxid + 1); i++) {
3451 				bzero(&ups, sizeof(ups));
3452 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3453 					goto skip;
3454 				if (CPU_ABSENT(i))
3455 					goto skip;
3456 				cache = &z->uz_cpu[i];
3457 				if (cache->uc_allocbucket != NULL)
3458 					ups.ups_cache_free +=
3459 					    cache->uc_allocbucket->ub_cnt;
3460 				if (cache->uc_freebucket != NULL)
3461 					ups.ups_cache_free +=
3462 					    cache->uc_freebucket->ub_cnt;
3463 				ups.ups_allocs = cache->uc_allocs;
3464 				ups.ups_frees = cache->uc_frees;
3465 skip:
3466 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3467 			}
3468 			ZONE_UNLOCK(z);
3469 		}
3470 	}
3471 	mtx_unlock(&uma_mtx);
3472 	error = sbuf_finish(&sbuf);
3473 	sbuf_delete(&sbuf);
3474 	return (error);
3475 }
3476 
3477 int
3478 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3479 {
3480 	uma_zone_t zone = *(uma_zone_t *)arg1;
3481 	int error, max, old;
3482 
3483 	old = max = uma_zone_get_max(zone);
3484 	error = sysctl_handle_int(oidp, &max, 0, req);
3485 	if (error || !req->newptr)
3486 		return (error);
3487 
3488 	if (max < old)
3489 		return (EINVAL);
3490 
3491 	uma_zone_set_max(zone, max);
3492 
3493 	return (0);
3494 }
3495 
3496 int
3497 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3498 {
3499 	uma_zone_t zone = *(uma_zone_t *)arg1;
3500 	int cur;
3501 
3502 	cur = uma_zone_get_cur(zone);
3503 	return (sysctl_handle_int(oidp, &cur, 0, req));
3504 }
3505 
3506 #ifdef DDB
3507 DB_SHOW_COMMAND(uma, db_show_uma)
3508 {
3509 	uint64_t allocs, frees, sleeps;
3510 	uma_bucket_t bucket;
3511 	uma_keg_t kz;
3512 	uma_zone_t z;
3513 	int cachefree;
3514 
3515 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3516 	    "Free", "Requests", "Sleeps", "Bucket");
3517 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3518 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3519 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3520 				allocs = z->uz_allocs;
3521 				frees = z->uz_frees;
3522 				sleeps = z->uz_sleeps;
3523 				cachefree = 0;
3524 			} else
3525 				uma_zone_sumstat(z, &cachefree, &allocs,
3526 				    &frees, &sleeps);
3527 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3528 			    (LIST_FIRST(&kz->uk_zones) != z)))
3529 				cachefree += kz->uk_free;
3530 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3531 				cachefree += bucket->ub_cnt;
3532 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3533 			    z->uz_name, (uintmax_t)kz->uk_size,
3534 			    (intmax_t)(allocs - frees), cachefree,
3535 			    (uintmax_t)allocs, sleeps, z->uz_count);
3536 			if (db_pager_quit)
3537 				return;
3538 		}
3539 	}
3540 }
3541 
3542 DB_SHOW_COMMAND(umacache, db_show_umacache)
3543 {
3544 	uint64_t allocs, frees;
3545 	uma_bucket_t bucket;
3546 	uma_zone_t z;
3547 	int cachefree;
3548 
3549 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3550 	    "Requests", "Bucket");
3551 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
3552 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3553 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3554 			cachefree += bucket->ub_cnt;
3555 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3556 		    z->uz_name, (uintmax_t)z->uz_size,
3557 		    (intmax_t)(allocs - frees), cachefree,
3558 		    (uintmax_t)allocs, z->uz_count);
3559 		if (db_pager_quit)
3560 			return;
3561 	}
3562 }
3563 #endif
3564