xref: /freebsd/sys/vm/uma_core.c (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/eventhandler.h>
63 #include <sys/kernel.h>
64 #include <sys/types.h>
65 #include <sys/queue.h>
66 #include <sys/malloc.h>
67 #include <sys/ktr.h>
68 #include <sys/lock.h>
69 #include <sys/sysctl.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/random.h>
73 #include <sys/rwlock.h>
74 #include <sys/sbuf.h>
75 #include <sys/sched.h>
76 #include <sys/smp.h>
77 #include <sys/taskqueue.h>
78 #include <sys/vmmeter.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_param.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/uma.h>
89 #include <vm/uma_int.h>
90 #include <vm/uma_dbg.h>
91 
92 #include <ddb/ddb.h>
93 
94 #ifdef DEBUG_MEMGUARD
95 #include <vm/memguard.h>
96 #endif
97 
98 /*
99  * This is the zone and keg from which all zones are spawned.  The idea is that
100  * even the zone & keg heads are allocated from the allocator, so we use the
101  * bss section to bootstrap us.
102  */
103 static struct uma_keg masterkeg;
104 static struct uma_zone masterzone_k;
105 static struct uma_zone masterzone_z;
106 static uma_zone_t kegs = &masterzone_k;
107 static uma_zone_t zones = &masterzone_z;
108 
109 /* This is the zone from which all of uma_slab_t's are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.  Early zones continue to use the pool
141  * until it is depleted, so allocations may happen after boot, thus
142  * we need a mutex to protect it.
143  */
144 static char *bootmem;
145 static int boot_pages;
146 static struct mtx uma_boot_pages_mtx;
147 
148 static struct sx uma_drain_lock;
149 
150 /* Is the VM done starting up? */
151 static int booted = 0;
152 #define	UMA_STARTUP	1
153 #define	UMA_STARTUP2	2
154 
155 /*
156  * This is the handle used to schedule events that need to happen
157  * outside of the allocation fast path.
158  */
159 static struct callout uma_callout;
160 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
161 
162 /*
163  * This structure is passed as the zone ctor arg so that I don't have to create
164  * a special allocation function just for zones.
165  */
166 struct uma_zctor_args {
167 	const char *name;
168 	size_t size;
169 	uma_ctor ctor;
170 	uma_dtor dtor;
171 	uma_init uminit;
172 	uma_fini fini;
173 	uma_import import;
174 	uma_release release;
175 	void *arg;
176 	uma_keg_t keg;
177 	int align;
178 	uint32_t flags;
179 };
180 
181 struct uma_kctor_args {
182 	uma_zone_t zone;
183 	size_t size;
184 	uma_init uminit;
185 	uma_fini fini;
186 	int align;
187 	uint32_t flags;
188 };
189 
190 struct uma_bucket_zone {
191 	uma_zone_t	ubz_zone;
192 	char		*ubz_name;
193 	int		ubz_entries;	/* Number of items it can hold. */
194 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
195 };
196 
197 /*
198  * Compute the actual number of bucket entries to pack them in power
199  * of two sizes for more efficient space utilization.
200  */
201 #define	BUCKET_SIZE(n)						\
202     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
203 
204 #define	BUCKET_MAX	BUCKET_SIZE(256)
205 
206 struct uma_bucket_zone bucket_zones[] = {
207 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
208 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
209 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
210 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
211 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
212 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
213 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
214 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
215 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
216 	{ NULL, NULL, 0}
217 };
218 
219 /*
220  * Flags and enumerations to be passed to internal functions.
221  */
222 enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
223 
224 /* Prototypes.. */
225 
226 static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
227 static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
228 static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
229 static void page_free(void *, vm_size_t, uint8_t);
230 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
231 static void cache_drain(uma_zone_t);
232 static void bucket_drain(uma_zone_t, uma_bucket_t);
233 static void bucket_cache_drain(uma_zone_t zone);
234 static int keg_ctor(void *, int, void *, int);
235 static void keg_dtor(void *, int, void *);
236 static int zone_ctor(void *, int, void *, int);
237 static void zone_dtor(void *, int, void *);
238 static int zero_init(void *, int, int);
239 static void keg_small_init(uma_keg_t keg);
240 static void keg_large_init(uma_keg_t keg);
241 static void zone_foreach(void (*zfunc)(uma_zone_t));
242 static void zone_timeout(uma_zone_t zone);
243 static int hash_alloc(struct uma_hash *);
244 static int hash_expand(struct uma_hash *, struct uma_hash *);
245 static void hash_free(struct uma_hash *hash);
246 static void uma_timeout(void *);
247 static void uma_startup3(void);
248 static void *zone_alloc_item(uma_zone_t, void *, int);
249 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
250 static void bucket_enable(void);
251 static void bucket_init(void);
252 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
253 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
254 static void bucket_zone_drain(void);
255 static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
256 static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
257 static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
258 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
259 static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
260 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
261     uma_fini fini, int align, uint32_t flags);
262 static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
263 static void zone_release(uma_zone_t zone, void **bucket, int cnt);
264 static void uma_zero_item(void *item, uma_zone_t zone);
265 
266 void uma_print_zone(uma_zone_t);
267 void uma_print_stats(void);
268 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
269 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
270 
271 #ifdef INVARIANTS
272 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
273 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
274 #endif
275 
276 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
277 
278 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
279     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
280 
281 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
282     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
283 
284 static int zone_warnings = 1;
285 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
286     "Warn when UMA zones becomes full");
287 
288 /*
289  * This routine checks to see whether or not it's safe to enable buckets.
290  */
291 static void
292 bucket_enable(void)
293 {
294 	bucketdisable = vm_page_count_min();
295 }
296 
297 /*
298  * Initialize bucket_zones, the array of zones of buckets of various sizes.
299  *
300  * For each zone, calculate the memory required for each bucket, consisting
301  * of the header and an array of pointers.
302  */
303 static void
304 bucket_init(void)
305 {
306 	struct uma_bucket_zone *ubz;
307 	int size;
308 
309 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
310 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
311 		size += sizeof(void *) * ubz->ubz_entries;
312 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
313 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
314 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
315 	}
316 }
317 
318 /*
319  * Given a desired number of entries for a bucket, return the zone from which
320  * to allocate the bucket.
321  */
322 static struct uma_bucket_zone *
323 bucket_zone_lookup(int entries)
324 {
325 	struct uma_bucket_zone *ubz;
326 
327 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
328 		if (ubz->ubz_entries >= entries)
329 			return (ubz);
330 	ubz--;
331 	return (ubz);
332 }
333 
334 static int
335 bucket_select(int size)
336 {
337 	struct uma_bucket_zone *ubz;
338 
339 	ubz = &bucket_zones[0];
340 	if (size > ubz->ubz_maxsize)
341 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
342 
343 	for (; ubz->ubz_entries != 0; ubz++)
344 		if (ubz->ubz_maxsize < size)
345 			break;
346 	ubz--;
347 	return (ubz->ubz_entries);
348 }
349 
350 static uma_bucket_t
351 bucket_alloc(uma_zone_t zone, void *udata, int flags)
352 {
353 	struct uma_bucket_zone *ubz;
354 	uma_bucket_t bucket;
355 
356 	/*
357 	 * This is to stop us from allocating per cpu buckets while we're
358 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
359 	 * boot pages.  This also prevents us from allocating buckets in
360 	 * low memory situations.
361 	 */
362 	if (bucketdisable)
363 		return (NULL);
364 	/*
365 	 * To limit bucket recursion we store the original zone flags
366 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
367 	 * NOVM flag to persist even through deep recursions.  We also
368 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
369 	 * a bucket for a bucket zone so we do not allow infinite bucket
370 	 * recursion.  This cookie will even persist to frees of unused
371 	 * buckets via the allocation path or bucket allocations in the
372 	 * free path.
373 	 */
374 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
375 		udata = (void *)(uintptr_t)zone->uz_flags;
376 	else {
377 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
378 			return (NULL);
379 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
380 	}
381 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
382 		flags |= M_NOVM;
383 	ubz = bucket_zone_lookup(zone->uz_count);
384 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
385 		ubz++;
386 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
387 	if (bucket) {
388 #ifdef INVARIANTS
389 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
390 #endif
391 		bucket->ub_cnt = 0;
392 		bucket->ub_entries = ubz->ubz_entries;
393 	}
394 
395 	return (bucket);
396 }
397 
398 static void
399 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
400 {
401 	struct uma_bucket_zone *ubz;
402 
403 	KASSERT(bucket->ub_cnt == 0,
404 	    ("bucket_free: Freeing a non free bucket."));
405 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
406 		udata = (void *)(uintptr_t)zone->uz_flags;
407 	ubz = bucket_zone_lookup(bucket->ub_entries);
408 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
409 }
410 
411 static void
412 bucket_zone_drain(void)
413 {
414 	struct uma_bucket_zone *ubz;
415 
416 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
417 		zone_drain(ubz->ubz_zone);
418 }
419 
420 static void
421 zone_log_warning(uma_zone_t zone)
422 {
423 	static const struct timeval warninterval = { 300, 0 };
424 
425 	if (!zone_warnings || zone->uz_warning == NULL)
426 		return;
427 
428 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
429 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
430 }
431 
432 static inline void
433 zone_maxaction(uma_zone_t zone)
434 {
435 
436 	if (zone->uz_maxaction.ta_func != NULL)
437 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
438 }
439 
440 static void
441 zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
442 {
443 	uma_klink_t klink;
444 
445 	LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
446 		kegfn(klink->kl_keg);
447 }
448 
449 /*
450  * Routine called by timeout which is used to fire off some time interval
451  * based calculations.  (stats, hash size, etc.)
452  *
453  * Arguments:
454  *	arg   Unused
455  *
456  * Returns:
457  *	Nothing
458  */
459 static void
460 uma_timeout(void *unused)
461 {
462 	bucket_enable();
463 	zone_foreach(zone_timeout);
464 
465 	/* Reschedule this event */
466 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
467 }
468 
469 /*
470  * Routine to perform timeout driven calculations.  This expands the
471  * hashes and does per cpu statistics aggregation.
472  *
473  *  Returns nothing.
474  */
475 static void
476 keg_timeout(uma_keg_t keg)
477 {
478 
479 	KEG_LOCK(keg);
480 	/*
481 	 * Expand the keg hash table.
482 	 *
483 	 * This is done if the number of slabs is larger than the hash size.
484 	 * What I'm trying to do here is completely reduce collisions.  This
485 	 * may be a little aggressive.  Should I allow for two collisions max?
486 	 */
487 	if (keg->uk_flags & UMA_ZONE_HASH &&
488 	    keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
489 		struct uma_hash newhash;
490 		struct uma_hash oldhash;
491 		int ret;
492 
493 		/*
494 		 * This is so involved because allocating and freeing
495 		 * while the keg lock is held will lead to deadlock.
496 		 * I have to do everything in stages and check for
497 		 * races.
498 		 */
499 		newhash = keg->uk_hash;
500 		KEG_UNLOCK(keg);
501 		ret = hash_alloc(&newhash);
502 		KEG_LOCK(keg);
503 		if (ret) {
504 			if (hash_expand(&keg->uk_hash, &newhash)) {
505 				oldhash = keg->uk_hash;
506 				keg->uk_hash = newhash;
507 			} else
508 				oldhash = newhash;
509 
510 			KEG_UNLOCK(keg);
511 			hash_free(&oldhash);
512 			return;
513 		}
514 	}
515 	KEG_UNLOCK(keg);
516 }
517 
518 static void
519 zone_timeout(uma_zone_t zone)
520 {
521 
522 	zone_foreach_keg(zone, &keg_timeout);
523 }
524 
525 /*
526  * Allocate and zero fill the next sized hash table from the appropriate
527  * backing store.
528  *
529  * Arguments:
530  *	hash  A new hash structure with the old hash size in uh_hashsize
531  *
532  * Returns:
533  *	1 on success and 0 on failure.
534  */
535 static int
536 hash_alloc(struct uma_hash *hash)
537 {
538 	int oldsize;
539 	int alloc;
540 
541 	oldsize = hash->uh_hashsize;
542 
543 	/* We're just going to go to a power of two greater */
544 	if (oldsize)  {
545 		hash->uh_hashsize = oldsize * 2;
546 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
547 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
548 		    M_UMAHASH, M_NOWAIT);
549 	} else {
550 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
551 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
552 		    M_WAITOK);
553 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
554 	}
555 	if (hash->uh_slab_hash) {
556 		bzero(hash->uh_slab_hash, alloc);
557 		hash->uh_hashmask = hash->uh_hashsize - 1;
558 		return (1);
559 	}
560 
561 	return (0);
562 }
563 
564 /*
565  * Expands the hash table for HASH zones.  This is done from zone_timeout
566  * to reduce collisions.  This must not be done in the regular allocation
567  * path, otherwise, we can recurse on the vm while allocating pages.
568  *
569  * Arguments:
570  *	oldhash  The hash you want to expand
571  *	newhash  The hash structure for the new table
572  *
573  * Returns:
574  *	Nothing
575  *
576  * Discussion:
577  */
578 static int
579 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
580 {
581 	uma_slab_t slab;
582 	int hval;
583 	int i;
584 
585 	if (!newhash->uh_slab_hash)
586 		return (0);
587 
588 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
589 		return (0);
590 
591 	/*
592 	 * I need to investigate hash algorithms for resizing without a
593 	 * full rehash.
594 	 */
595 
596 	for (i = 0; i < oldhash->uh_hashsize; i++)
597 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
598 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
599 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
600 			hval = UMA_HASH(newhash, slab->us_data);
601 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
602 			    slab, us_hlink);
603 		}
604 
605 	return (1);
606 }
607 
608 /*
609  * Free the hash bucket to the appropriate backing store.
610  *
611  * Arguments:
612  *	slab_hash  The hash bucket we're freeing
613  *	hashsize   The number of entries in that hash bucket
614  *
615  * Returns:
616  *	Nothing
617  */
618 static void
619 hash_free(struct uma_hash *hash)
620 {
621 	if (hash->uh_slab_hash == NULL)
622 		return;
623 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
624 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
625 	else
626 		free(hash->uh_slab_hash, M_UMAHASH);
627 }
628 
629 /*
630  * Frees all outstanding items in a bucket
631  *
632  * Arguments:
633  *	zone   The zone to free to, must be unlocked.
634  *	bucket The free/alloc bucket with items, cpu queue must be locked.
635  *
636  * Returns:
637  *	Nothing
638  */
639 
640 static void
641 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
642 {
643 	int i;
644 
645 	if (bucket == NULL)
646 		return;
647 
648 	if (zone->uz_fini)
649 		for (i = 0; i < bucket->ub_cnt; i++)
650 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
651 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
652 	bucket->ub_cnt = 0;
653 }
654 
655 /*
656  * Drains the per cpu caches for a zone.
657  *
658  * NOTE: This may only be called while the zone is being turn down, and not
659  * during normal operation.  This is necessary in order that we do not have
660  * to migrate CPUs to drain the per-CPU caches.
661  *
662  * Arguments:
663  *	zone     The zone to drain, must be unlocked.
664  *
665  * Returns:
666  *	Nothing
667  */
668 static void
669 cache_drain(uma_zone_t zone)
670 {
671 	uma_cache_t cache;
672 	int cpu;
673 
674 	/*
675 	 * XXX: It is safe to not lock the per-CPU caches, because we're
676 	 * tearing down the zone anyway.  I.e., there will be no further use
677 	 * of the caches at this point.
678 	 *
679 	 * XXX: It would good to be able to assert that the zone is being
680 	 * torn down to prevent improper use of cache_drain().
681 	 *
682 	 * XXX: We lock the zone before passing into bucket_cache_drain() as
683 	 * it is used elsewhere.  Should the tear-down path be made special
684 	 * there in some form?
685 	 */
686 	CPU_FOREACH(cpu) {
687 		cache = &zone->uz_cpu[cpu];
688 		bucket_drain(zone, cache->uc_allocbucket);
689 		bucket_drain(zone, cache->uc_freebucket);
690 		if (cache->uc_allocbucket != NULL)
691 			bucket_free(zone, cache->uc_allocbucket, NULL);
692 		if (cache->uc_freebucket != NULL)
693 			bucket_free(zone, cache->uc_freebucket, NULL);
694 		cache->uc_allocbucket = cache->uc_freebucket = NULL;
695 	}
696 	ZONE_LOCK(zone);
697 	bucket_cache_drain(zone);
698 	ZONE_UNLOCK(zone);
699 }
700 
701 static void
702 cache_shrink(uma_zone_t zone)
703 {
704 
705 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
706 		return;
707 
708 	ZONE_LOCK(zone);
709 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
710 	ZONE_UNLOCK(zone);
711 }
712 
713 static void
714 cache_drain_safe_cpu(uma_zone_t zone)
715 {
716 	uma_cache_t cache;
717 	uma_bucket_t b1, b2;
718 
719 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
720 		return;
721 
722 	b1 = b2 = NULL;
723 	ZONE_LOCK(zone);
724 	critical_enter();
725 	cache = &zone->uz_cpu[curcpu];
726 	if (cache->uc_allocbucket) {
727 		if (cache->uc_allocbucket->ub_cnt != 0)
728 			LIST_INSERT_HEAD(&zone->uz_buckets,
729 			    cache->uc_allocbucket, ub_link);
730 		else
731 			b1 = cache->uc_allocbucket;
732 		cache->uc_allocbucket = NULL;
733 	}
734 	if (cache->uc_freebucket) {
735 		if (cache->uc_freebucket->ub_cnt != 0)
736 			LIST_INSERT_HEAD(&zone->uz_buckets,
737 			    cache->uc_freebucket, ub_link);
738 		else
739 			b2 = cache->uc_freebucket;
740 		cache->uc_freebucket = NULL;
741 	}
742 	critical_exit();
743 	ZONE_UNLOCK(zone);
744 	if (b1)
745 		bucket_free(zone, b1, NULL);
746 	if (b2)
747 		bucket_free(zone, b2, NULL);
748 }
749 
750 /*
751  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
752  * This is an expensive call because it needs to bind to all CPUs
753  * one by one and enter a critical section on each of them in order
754  * to safely access their cache buckets.
755  * Zone lock must not be held on call this function.
756  */
757 static void
758 cache_drain_safe(uma_zone_t zone)
759 {
760 	int cpu;
761 
762 	/*
763 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
764 	 */
765 	if (zone)
766 		cache_shrink(zone);
767 	else
768 		zone_foreach(cache_shrink);
769 
770 	CPU_FOREACH(cpu) {
771 		thread_lock(curthread);
772 		sched_bind(curthread, cpu);
773 		thread_unlock(curthread);
774 
775 		if (zone)
776 			cache_drain_safe_cpu(zone);
777 		else
778 			zone_foreach(cache_drain_safe_cpu);
779 	}
780 	thread_lock(curthread);
781 	sched_unbind(curthread);
782 	thread_unlock(curthread);
783 }
784 
785 /*
786  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
787  */
788 static void
789 bucket_cache_drain(uma_zone_t zone)
790 {
791 	uma_bucket_t bucket;
792 
793 	/*
794 	 * Drain the bucket queues and free the buckets, we just keep two per
795 	 * cpu (alloc/free).
796 	 */
797 	while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
798 		LIST_REMOVE(bucket, ub_link);
799 		ZONE_UNLOCK(zone);
800 		bucket_drain(zone, bucket);
801 		bucket_free(zone, bucket, NULL);
802 		ZONE_LOCK(zone);
803 	}
804 
805 	/*
806 	 * Shrink further bucket sizes.  Price of single zone lock collision
807 	 * is probably lower then price of global cache drain.
808 	 */
809 	if (zone->uz_count > zone->uz_count_min)
810 		zone->uz_count--;
811 }
812 
813 static void
814 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
815 {
816 	uint8_t *mem;
817 	int i;
818 	uint8_t flags;
819 
820 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
821 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
822 
823 	mem = slab->us_data;
824 	flags = slab->us_flags;
825 	i = start;
826 	if (keg->uk_fini != NULL) {
827 		for (i--; i > -1; i--)
828 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
829 			    keg->uk_size);
830 	}
831 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
832 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
833 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
834 }
835 
836 /*
837  * Frees pages from a keg back to the system.  This is done on demand from
838  * the pageout daemon.
839  *
840  * Returns nothing.
841  */
842 static void
843 keg_drain(uma_keg_t keg)
844 {
845 	struct slabhead freeslabs = { 0 };
846 	uma_slab_t slab, tmp;
847 
848 	/*
849 	 * We don't want to take pages from statically allocated kegs at this
850 	 * time
851 	 */
852 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
853 		return;
854 
855 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
856 	    keg->uk_name, keg, keg->uk_free);
857 	KEG_LOCK(keg);
858 	if (keg->uk_free == 0)
859 		goto finished;
860 
861 	LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
862 		/* We have nowhere to free these to. */
863 		if (slab->us_flags & UMA_SLAB_BOOT)
864 			continue;
865 
866 		LIST_REMOVE(slab, us_link);
867 		keg->uk_pages -= keg->uk_ppera;
868 		keg->uk_free -= keg->uk_ipers;
869 
870 		if (keg->uk_flags & UMA_ZONE_HASH)
871 			UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
872 
873 		SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
874 	}
875 finished:
876 	KEG_UNLOCK(keg);
877 
878 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
879 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
880 		keg_free_slab(keg, slab, keg->uk_ipers);
881 	}
882 }
883 
884 static void
885 zone_drain_wait(uma_zone_t zone, int waitok)
886 {
887 
888 	/*
889 	 * Set draining to interlock with zone_dtor() so we can release our
890 	 * locks as we go.  Only dtor() should do a WAITOK call since it
891 	 * is the only call that knows the structure will still be available
892 	 * when it wakes up.
893 	 */
894 	ZONE_LOCK(zone);
895 	while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
896 		if (waitok == M_NOWAIT)
897 			goto out;
898 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
899 	}
900 	zone->uz_flags |= UMA_ZFLAG_DRAINING;
901 	bucket_cache_drain(zone);
902 	ZONE_UNLOCK(zone);
903 	/*
904 	 * The DRAINING flag protects us from being freed while
905 	 * we're running.  Normally the uma_rwlock would protect us but we
906 	 * must be able to release and acquire the right lock for each keg.
907 	 */
908 	zone_foreach_keg(zone, &keg_drain);
909 	ZONE_LOCK(zone);
910 	zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
911 	wakeup(zone);
912 out:
913 	ZONE_UNLOCK(zone);
914 }
915 
916 void
917 zone_drain(uma_zone_t zone)
918 {
919 
920 	zone_drain_wait(zone, M_NOWAIT);
921 }
922 
923 /*
924  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
925  *
926  * Arguments:
927  *	wait  Shall we wait?
928  *
929  * Returns:
930  *	The slab that was allocated or NULL if there is no memory and the
931  *	caller specified M_NOWAIT.
932  */
933 static uma_slab_t
934 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
935 {
936 	uma_alloc allocf;
937 	uma_slab_t slab;
938 	uint8_t *mem;
939 	uint8_t flags;
940 	int i;
941 
942 	mtx_assert(&keg->uk_lock, MA_OWNED);
943 	slab = NULL;
944 	mem = NULL;
945 
946 	allocf = keg->uk_allocf;
947 	KEG_UNLOCK(keg);
948 
949 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
950 		slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
951 		if (slab == NULL)
952 			goto out;
953 	}
954 
955 	/*
956 	 * This reproduces the old vm_zone behavior of zero filling pages the
957 	 * first time they are added to a zone.
958 	 *
959 	 * Malloced items are zeroed in uma_zalloc.
960 	 */
961 
962 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
963 		wait |= M_ZERO;
964 	else
965 		wait &= ~M_ZERO;
966 
967 	if (keg->uk_flags & UMA_ZONE_NODUMP)
968 		wait |= M_NODUMP;
969 
970 	/* zone is passed for legacy reasons. */
971 	mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
972 	if (mem == NULL) {
973 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
974 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
975 		slab = NULL;
976 		goto out;
977 	}
978 
979 	/* Point the slab into the allocated memory */
980 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
981 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
982 
983 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
984 		for (i = 0; i < keg->uk_ppera; i++)
985 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
986 
987 	slab->us_keg = keg;
988 	slab->us_data = mem;
989 	slab->us_freecount = keg->uk_ipers;
990 	slab->us_flags = flags;
991 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
992 #ifdef INVARIANTS
993 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
994 #endif
995 
996 	if (keg->uk_init != NULL) {
997 		for (i = 0; i < keg->uk_ipers; i++)
998 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
999 			    keg->uk_size, wait) != 0)
1000 				break;
1001 		if (i != keg->uk_ipers) {
1002 			keg_free_slab(keg, slab, i);
1003 			slab = NULL;
1004 			goto out;
1005 		}
1006 	}
1007 out:
1008 	KEG_LOCK(keg);
1009 
1010 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1011 	    slab, keg->uk_name, keg);
1012 
1013 	if (slab != NULL) {
1014 		if (keg->uk_flags & UMA_ZONE_HASH)
1015 			UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1016 
1017 		keg->uk_pages += keg->uk_ppera;
1018 		keg->uk_free += keg->uk_ipers;
1019 	}
1020 
1021 	return (slab);
1022 }
1023 
1024 /*
1025  * This function is intended to be used early on in place of page_alloc() so
1026  * that we may use the boot time page cache to satisfy allocations before
1027  * the VM is ready.
1028  */
1029 static void *
1030 startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1031 {
1032 	uma_keg_t keg;
1033 	void *mem;
1034 	int pages;
1035 
1036 	keg = zone_first_keg(zone);
1037 	pages = howmany(bytes, PAGE_SIZE);
1038 	KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1039 
1040 	/*
1041 	 * Check our small startup cache to see if it has pages remaining.
1042 	 */
1043 	mtx_lock(&uma_boot_pages_mtx);
1044 	if (pages <= boot_pages) {
1045 		mem = bootmem;
1046 		boot_pages -= pages;
1047 		bootmem += pages * PAGE_SIZE;
1048 		mtx_unlock(&uma_boot_pages_mtx);
1049 		*pflag = UMA_SLAB_BOOT;
1050 		return (mem);
1051 	}
1052 	mtx_unlock(&uma_boot_pages_mtx);
1053 	if (booted < UMA_STARTUP2)
1054 		panic("UMA: Increase vm.boot_pages");
1055 	/*
1056 	 * Now that we've booted reset these users to their real allocator.
1057 	 */
1058 #ifdef UMA_MD_SMALL_ALLOC
1059 	keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1060 #else
1061 	keg->uk_allocf = page_alloc;
1062 #endif
1063 	return keg->uk_allocf(zone, bytes, pflag, wait);
1064 }
1065 
1066 /*
1067  * Allocates a number of pages from the system
1068  *
1069  * Arguments:
1070  *	bytes  The number of bytes requested
1071  *	wait  Shall we wait?
1072  *
1073  * Returns:
1074  *	A pointer to the alloced memory or possibly
1075  *	NULL if M_NOWAIT is set.
1076  */
1077 static void *
1078 page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
1079 {
1080 	void *p;	/* Returned page */
1081 
1082 	*pflag = UMA_SLAB_KMEM;
1083 	p = (void *) kmem_malloc(kmem_arena, bytes, wait);
1084 
1085 	return (p);
1086 }
1087 
1088 /*
1089  * Allocates a number of pages from within an object
1090  *
1091  * Arguments:
1092  *	bytes  The number of bytes requested
1093  *	wait   Shall we wait?
1094  *
1095  * Returns:
1096  *	A pointer to the alloced memory or possibly
1097  *	NULL if M_NOWAIT is set.
1098  */
1099 static void *
1100 noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
1101 {
1102 	TAILQ_HEAD(, vm_page) alloctail;
1103 	u_long npages;
1104 	vm_offset_t retkva, zkva;
1105 	vm_page_t p, p_next;
1106 	uma_keg_t keg;
1107 
1108 	TAILQ_INIT(&alloctail);
1109 	keg = zone_first_keg(zone);
1110 
1111 	npages = howmany(bytes, PAGE_SIZE);
1112 	while (npages > 0) {
1113 		p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1114 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1115 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1116 		    VM_ALLOC_NOWAIT));
1117 		if (p != NULL) {
1118 			/*
1119 			 * Since the page does not belong to an object, its
1120 			 * listq is unused.
1121 			 */
1122 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1123 			npages--;
1124 			continue;
1125 		}
1126 		/*
1127 		 * Page allocation failed, free intermediate pages and
1128 		 * exit.
1129 		 */
1130 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1131 			vm_page_unwire(p, PQ_NONE);
1132 			vm_page_free(p);
1133 		}
1134 		return (NULL);
1135 	}
1136 	*flags = UMA_SLAB_PRIV;
1137 	zkva = keg->uk_kva +
1138 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1139 	retkva = zkva;
1140 	TAILQ_FOREACH(p, &alloctail, listq) {
1141 		pmap_qenter(zkva, &p, 1);
1142 		zkva += PAGE_SIZE;
1143 	}
1144 
1145 	return ((void *)retkva);
1146 }
1147 
1148 /*
1149  * Frees a number of pages to the system
1150  *
1151  * Arguments:
1152  *	mem   A pointer to the memory to be freed
1153  *	size  The size of the memory being freed
1154  *	flags The original p->us_flags field
1155  *
1156  * Returns:
1157  *	Nothing
1158  */
1159 static void
1160 page_free(void *mem, vm_size_t size, uint8_t flags)
1161 {
1162 	struct vmem *vmem;
1163 
1164 	if (flags & UMA_SLAB_KMEM)
1165 		vmem = kmem_arena;
1166 	else if (flags & UMA_SLAB_KERNEL)
1167 		vmem = kernel_arena;
1168 	else
1169 		panic("UMA: page_free used with invalid flags %x", flags);
1170 
1171 	kmem_free(vmem, (vm_offset_t)mem, size);
1172 }
1173 
1174 /*
1175  * Zero fill initializer
1176  *
1177  * Arguments/Returns follow uma_init specifications
1178  */
1179 static int
1180 zero_init(void *mem, int size, int flags)
1181 {
1182 	bzero(mem, size);
1183 	return (0);
1184 }
1185 
1186 /*
1187  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1188  *
1189  * Arguments
1190  *	keg  The zone we should initialize
1191  *
1192  * Returns
1193  *	Nothing
1194  */
1195 static void
1196 keg_small_init(uma_keg_t keg)
1197 {
1198 	u_int rsize;
1199 	u_int memused;
1200 	u_int wastedspace;
1201 	u_int shsize;
1202 	u_int slabsize;
1203 
1204 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1205 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1206 
1207 		slabsize = sizeof(struct pcpu);
1208 		keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1209 		    PAGE_SIZE);
1210 	} else {
1211 		slabsize = UMA_SLAB_SIZE;
1212 		keg->uk_ppera = 1;
1213 	}
1214 
1215 	/*
1216 	 * Calculate the size of each allocation (rsize) according to
1217 	 * alignment.  If the requested size is smaller than we have
1218 	 * allocation bits for we round it up.
1219 	 */
1220 	rsize = keg->uk_size;
1221 	if (rsize < slabsize / SLAB_SETSIZE)
1222 		rsize = slabsize / SLAB_SETSIZE;
1223 	if (rsize & keg->uk_align)
1224 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1225 	keg->uk_rsize = rsize;
1226 
1227 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1228 	    keg->uk_rsize < sizeof(struct pcpu),
1229 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1230 
1231 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1232 		shsize = 0;
1233 	else
1234 		shsize = sizeof(struct uma_slab);
1235 
1236 	keg->uk_ipers = (slabsize - shsize) / rsize;
1237 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1238 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1239 
1240 	memused = keg->uk_ipers * rsize + shsize;
1241 	wastedspace = slabsize - memused;
1242 
1243 	/*
1244 	 * We can't do OFFPAGE if we're internal or if we've been
1245 	 * asked to not go to the VM for buckets.  If we do this we
1246 	 * may end up going to the VM  for slabs which we do not
1247 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1248 	 * of UMA_ZONE_VM, which clearly forbids it.
1249 	 */
1250 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1251 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1252 		return;
1253 
1254 	/*
1255 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1256 	 * this if it permits more items per-slab.
1257 	 *
1258 	 * XXX We could try growing slabsize to limit max waste as well.
1259 	 * Historically this was not done because the VM could not
1260 	 * efficiently handle contiguous allocations.
1261 	 */
1262 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1263 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1264 		keg->uk_ipers = slabsize / keg->uk_rsize;
1265 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1266 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1267 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1268 		    "keg: %s(%p), calculated wastedspace = %d, "
1269 		    "maximum wasted space allowed = %d, "
1270 		    "calculated ipers = %d, "
1271 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1272 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1273 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1274 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1275 	}
1276 
1277 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1278 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1279 		keg->uk_flags |= UMA_ZONE_HASH;
1280 }
1281 
1282 /*
1283  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1284  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1285  * more complicated.
1286  *
1287  * Arguments
1288  *	keg  The keg we should initialize
1289  *
1290  * Returns
1291  *	Nothing
1292  */
1293 static void
1294 keg_large_init(uma_keg_t keg)
1295 {
1296 	u_int shsize;
1297 
1298 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1299 	KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1300 	    ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
1301 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1302 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1303 
1304 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1305 	keg->uk_ipers = 1;
1306 	keg->uk_rsize = keg->uk_size;
1307 
1308 	/* Check whether we have enough space to not do OFFPAGE. */
1309 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1310 		shsize = sizeof(struct uma_slab);
1311 		if (shsize & UMA_ALIGN_PTR)
1312 			shsize = (shsize & ~UMA_ALIGN_PTR) +
1313 			    (UMA_ALIGN_PTR + 1);
1314 
1315 		if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
1316 			/*
1317 			 * We can't do OFFPAGE if we're internal, in which case
1318 			 * we need an extra page per allocation to contain the
1319 			 * slab header.
1320 			 */
1321 			if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1322 				keg->uk_flags |= UMA_ZONE_OFFPAGE;
1323 			else
1324 				keg->uk_ppera++;
1325 		}
1326 	}
1327 
1328 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1329 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1330 		keg->uk_flags |= UMA_ZONE_HASH;
1331 }
1332 
1333 static void
1334 keg_cachespread_init(uma_keg_t keg)
1335 {
1336 	int alignsize;
1337 	int trailer;
1338 	int pages;
1339 	int rsize;
1340 
1341 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1342 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1343 
1344 	alignsize = keg->uk_align + 1;
1345 	rsize = keg->uk_size;
1346 	/*
1347 	 * We want one item to start on every align boundary in a page.  To
1348 	 * do this we will span pages.  We will also extend the item by the
1349 	 * size of align if it is an even multiple of align.  Otherwise, it
1350 	 * would fall on the same boundary every time.
1351 	 */
1352 	if (rsize & keg->uk_align)
1353 		rsize = (rsize & ~keg->uk_align) + alignsize;
1354 	if ((rsize & alignsize) == 0)
1355 		rsize += alignsize;
1356 	trailer = rsize - keg->uk_size;
1357 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1358 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1359 	keg->uk_rsize = rsize;
1360 	keg->uk_ppera = pages;
1361 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1362 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1363 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1364 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1365 	    keg->uk_ipers));
1366 }
1367 
1368 /*
1369  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1370  * the keg onto the global keg list.
1371  *
1372  * Arguments/Returns follow uma_ctor specifications
1373  *	udata  Actually uma_kctor_args
1374  */
1375 static int
1376 keg_ctor(void *mem, int size, void *udata, int flags)
1377 {
1378 	struct uma_kctor_args *arg = udata;
1379 	uma_keg_t keg = mem;
1380 	uma_zone_t zone;
1381 
1382 	bzero(keg, size);
1383 	keg->uk_size = arg->size;
1384 	keg->uk_init = arg->uminit;
1385 	keg->uk_fini = arg->fini;
1386 	keg->uk_align = arg->align;
1387 	keg->uk_free = 0;
1388 	keg->uk_reserve = 0;
1389 	keg->uk_pages = 0;
1390 	keg->uk_flags = arg->flags;
1391 	keg->uk_slabzone = NULL;
1392 
1393 	/*
1394 	 * The master zone is passed to us at keg-creation time.
1395 	 */
1396 	zone = arg->zone;
1397 	keg->uk_name = zone->uz_name;
1398 
1399 	if (arg->flags & UMA_ZONE_VM)
1400 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1401 
1402 	if (arg->flags & UMA_ZONE_ZINIT)
1403 		keg->uk_init = zero_init;
1404 
1405 	if (arg->flags & UMA_ZONE_MALLOC)
1406 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1407 
1408 	if (arg->flags & UMA_ZONE_PCPU)
1409 #ifdef SMP
1410 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1411 #else
1412 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1413 #endif
1414 
1415 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1416 		keg_cachespread_init(keg);
1417 	} else {
1418 		if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
1419 			keg_large_init(keg);
1420 		else
1421 			keg_small_init(keg);
1422 	}
1423 
1424 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1425 		keg->uk_slabzone = slabzone;
1426 
1427 	/*
1428 	 * If we haven't booted yet we need allocations to go through the
1429 	 * startup cache until the vm is ready.
1430 	 */
1431 	if (booted < UMA_STARTUP2)
1432 		keg->uk_allocf = startup_alloc;
1433 #ifdef UMA_MD_SMALL_ALLOC
1434 	else if (keg->uk_ppera == 1)
1435 		keg->uk_allocf = uma_small_alloc;
1436 #endif
1437 	else
1438 		keg->uk_allocf = page_alloc;
1439 #ifdef UMA_MD_SMALL_ALLOC
1440 	if (keg->uk_ppera == 1)
1441 		keg->uk_freef = uma_small_free;
1442 	else
1443 #endif
1444 		keg->uk_freef = page_free;
1445 
1446 	/*
1447 	 * Initialize keg's lock
1448 	 */
1449 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1450 
1451 	/*
1452 	 * If we're putting the slab header in the actual page we need to
1453 	 * figure out where in each page it goes.  This calculates a right
1454 	 * justified offset into the memory on an ALIGN_PTR boundary.
1455 	 */
1456 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1457 		u_int totsize;
1458 
1459 		/* Size of the slab struct and free list */
1460 		totsize = sizeof(struct uma_slab);
1461 
1462 		if (totsize & UMA_ALIGN_PTR)
1463 			totsize = (totsize & ~UMA_ALIGN_PTR) +
1464 			    (UMA_ALIGN_PTR + 1);
1465 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
1466 
1467 		/*
1468 		 * The only way the following is possible is if with our
1469 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1470 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1471 		 * mathematically possible for all cases, so we make
1472 		 * sure here anyway.
1473 		 */
1474 		totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1475 		if (totsize > PAGE_SIZE * keg->uk_ppera) {
1476 			printf("zone %s ipers %d rsize %d size %d\n",
1477 			    zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1478 			    keg->uk_size);
1479 			panic("UMA slab won't fit.");
1480 		}
1481 	}
1482 
1483 	if (keg->uk_flags & UMA_ZONE_HASH)
1484 		hash_alloc(&keg->uk_hash);
1485 
1486 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1487 	    keg, zone->uz_name, zone,
1488 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1489 	    keg->uk_free);
1490 
1491 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1492 
1493 	rw_wlock(&uma_rwlock);
1494 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1495 	rw_wunlock(&uma_rwlock);
1496 	return (0);
1497 }
1498 
1499 /*
1500  * Zone header ctor.  This initializes all fields, locks, etc.
1501  *
1502  * Arguments/Returns follow uma_ctor specifications
1503  *	udata  Actually uma_zctor_args
1504  */
1505 static int
1506 zone_ctor(void *mem, int size, void *udata, int flags)
1507 {
1508 	struct uma_zctor_args *arg = udata;
1509 	uma_zone_t zone = mem;
1510 	uma_zone_t z;
1511 	uma_keg_t keg;
1512 
1513 	bzero(zone, size);
1514 	zone->uz_name = arg->name;
1515 	zone->uz_ctor = arg->ctor;
1516 	zone->uz_dtor = arg->dtor;
1517 	zone->uz_slab = zone_fetch_slab;
1518 	zone->uz_init = NULL;
1519 	zone->uz_fini = NULL;
1520 	zone->uz_allocs = 0;
1521 	zone->uz_frees = 0;
1522 	zone->uz_fails = 0;
1523 	zone->uz_sleeps = 0;
1524 	zone->uz_count = 0;
1525 	zone->uz_count_min = 0;
1526 	zone->uz_flags = 0;
1527 	zone->uz_warning = NULL;
1528 	timevalclear(&zone->uz_ratecheck);
1529 	keg = arg->keg;
1530 
1531 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1532 
1533 	/*
1534 	 * This is a pure cache zone, no kegs.
1535 	 */
1536 	if (arg->import) {
1537 		if (arg->flags & UMA_ZONE_VM)
1538 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1539 		zone->uz_flags = arg->flags;
1540 		zone->uz_size = arg->size;
1541 		zone->uz_import = arg->import;
1542 		zone->uz_release = arg->release;
1543 		zone->uz_arg = arg->arg;
1544 		zone->uz_lockptr = &zone->uz_lock;
1545 		rw_wlock(&uma_rwlock);
1546 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1547 		rw_wunlock(&uma_rwlock);
1548 		goto out;
1549 	}
1550 
1551 	/*
1552 	 * Use the regular zone/keg/slab allocator.
1553 	 */
1554 	zone->uz_import = (uma_import)zone_import;
1555 	zone->uz_release = (uma_release)zone_release;
1556 	zone->uz_arg = zone;
1557 
1558 	if (arg->flags & UMA_ZONE_SECONDARY) {
1559 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1560 		zone->uz_init = arg->uminit;
1561 		zone->uz_fini = arg->fini;
1562 		zone->uz_lockptr = &keg->uk_lock;
1563 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1564 		rw_wlock(&uma_rwlock);
1565 		ZONE_LOCK(zone);
1566 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1567 			if (LIST_NEXT(z, uz_link) == NULL) {
1568 				LIST_INSERT_AFTER(z, zone, uz_link);
1569 				break;
1570 			}
1571 		}
1572 		ZONE_UNLOCK(zone);
1573 		rw_wunlock(&uma_rwlock);
1574 	} else if (keg == NULL) {
1575 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1576 		    arg->align, arg->flags)) == NULL)
1577 			return (ENOMEM);
1578 	} else {
1579 		struct uma_kctor_args karg;
1580 		int error;
1581 
1582 		/* We should only be here from uma_startup() */
1583 		karg.size = arg->size;
1584 		karg.uminit = arg->uminit;
1585 		karg.fini = arg->fini;
1586 		karg.align = arg->align;
1587 		karg.flags = arg->flags;
1588 		karg.zone = zone;
1589 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1590 		    flags);
1591 		if (error)
1592 			return (error);
1593 	}
1594 
1595 	/*
1596 	 * Link in the first keg.
1597 	 */
1598 	zone->uz_klink.kl_keg = keg;
1599 	LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
1600 	zone->uz_lockptr = &keg->uk_lock;
1601 	zone->uz_size = keg->uk_size;
1602 	zone->uz_flags |= (keg->uk_flags &
1603 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1604 
1605 	/*
1606 	 * Some internal zones don't have room allocated for the per cpu
1607 	 * caches.  If we're internal, bail out here.
1608 	 */
1609 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1610 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1611 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1612 		return (0);
1613 	}
1614 
1615 out:
1616 	if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1617 		zone->uz_count = bucket_select(zone->uz_size);
1618 	else
1619 		zone->uz_count = BUCKET_MAX;
1620 	zone->uz_count_min = zone->uz_count;
1621 
1622 	return (0);
1623 }
1624 
1625 /*
1626  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1627  * table and removes the keg from the global list.
1628  *
1629  * Arguments/Returns follow uma_dtor specifications
1630  *	udata  unused
1631  */
1632 static void
1633 keg_dtor(void *arg, int size, void *udata)
1634 {
1635 	uma_keg_t keg;
1636 
1637 	keg = (uma_keg_t)arg;
1638 	KEG_LOCK(keg);
1639 	if (keg->uk_free != 0) {
1640 		printf("Freed UMA keg (%s) was not empty (%d items). "
1641 		    " Lost %d pages of memory.\n",
1642 		    keg->uk_name ? keg->uk_name : "",
1643 		    keg->uk_free, keg->uk_pages);
1644 	}
1645 	KEG_UNLOCK(keg);
1646 
1647 	hash_free(&keg->uk_hash);
1648 
1649 	KEG_LOCK_FINI(keg);
1650 }
1651 
1652 /*
1653  * Zone header dtor.
1654  *
1655  * Arguments/Returns follow uma_dtor specifications
1656  *	udata  unused
1657  */
1658 static void
1659 zone_dtor(void *arg, int size, void *udata)
1660 {
1661 	uma_klink_t klink;
1662 	uma_zone_t zone;
1663 	uma_keg_t keg;
1664 
1665 	zone = (uma_zone_t)arg;
1666 	keg = zone_first_keg(zone);
1667 
1668 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1669 		cache_drain(zone);
1670 
1671 	rw_wlock(&uma_rwlock);
1672 	LIST_REMOVE(zone, uz_link);
1673 	rw_wunlock(&uma_rwlock);
1674 	/*
1675 	 * XXX there are some races here where
1676 	 * the zone can be drained but zone lock
1677 	 * released and then refilled before we
1678 	 * remove it... we dont care for now
1679 	 */
1680 	zone_drain_wait(zone, M_WAITOK);
1681 	/*
1682 	 * Unlink all of our kegs.
1683 	 */
1684 	while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1685 		klink->kl_keg = NULL;
1686 		LIST_REMOVE(klink, kl_link);
1687 		if (klink == &zone->uz_klink)
1688 			continue;
1689 		free(klink, M_TEMP);
1690 	}
1691 	/*
1692 	 * We only destroy kegs from non secondary zones.
1693 	 */
1694 	if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1695 		rw_wlock(&uma_rwlock);
1696 		LIST_REMOVE(keg, uk_link);
1697 		rw_wunlock(&uma_rwlock);
1698 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
1699 	}
1700 	ZONE_LOCK_FINI(zone);
1701 }
1702 
1703 /*
1704  * Traverses every zone in the system and calls a callback
1705  *
1706  * Arguments:
1707  *	zfunc  A pointer to a function which accepts a zone
1708  *		as an argument.
1709  *
1710  * Returns:
1711  *	Nothing
1712  */
1713 static void
1714 zone_foreach(void (*zfunc)(uma_zone_t))
1715 {
1716 	uma_keg_t keg;
1717 	uma_zone_t zone;
1718 
1719 	rw_rlock(&uma_rwlock);
1720 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
1721 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1722 			zfunc(zone);
1723 	}
1724 	rw_runlock(&uma_rwlock);
1725 }
1726 
1727 /* Public functions */
1728 /* See uma.h */
1729 void
1730 uma_startup(void *mem, int npages)
1731 {
1732 	struct uma_zctor_args args;
1733 
1734 	rw_init(&uma_rwlock, "UMA lock");
1735 
1736 	/* "manually" create the initial zone */
1737 	memset(&args, 0, sizeof(args));
1738 	args.name = "UMA Kegs";
1739 	args.size = sizeof(struct uma_keg);
1740 	args.ctor = keg_ctor;
1741 	args.dtor = keg_dtor;
1742 	args.uminit = zero_init;
1743 	args.fini = NULL;
1744 	args.keg = &masterkeg;
1745 	args.align = 32 - 1;
1746 	args.flags = UMA_ZFLAG_INTERNAL;
1747 	/* The initial zone has no Per cpu queues so it's smaller */
1748 	zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1749 
1750 	mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
1751 	bootmem = mem;
1752 	boot_pages = npages;
1753 
1754 	args.name = "UMA Zones";
1755 	args.size = sizeof(struct uma_zone) +
1756 	    (sizeof(struct uma_cache) * (mp_maxid + 1));
1757 	args.ctor = zone_ctor;
1758 	args.dtor = zone_dtor;
1759 	args.uminit = zero_init;
1760 	args.fini = NULL;
1761 	args.keg = NULL;
1762 	args.align = 32 - 1;
1763 	args.flags = UMA_ZFLAG_INTERNAL;
1764 	/* The initial zone has no Per cpu queues so it's smaller */
1765 	zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1766 
1767 	/* Now make a zone for slab headers */
1768 	slabzone = uma_zcreate("UMA Slabs",
1769 				sizeof(struct uma_slab),
1770 				NULL, NULL, NULL, NULL,
1771 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1772 
1773 	hashzone = uma_zcreate("UMA Hash",
1774 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1775 	    NULL, NULL, NULL, NULL,
1776 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1777 
1778 	bucket_init();
1779 
1780 	booted = UMA_STARTUP;
1781 }
1782 
1783 /* see uma.h */
1784 void
1785 uma_startup2(void)
1786 {
1787 	booted = UMA_STARTUP2;
1788 	bucket_enable();
1789 	sx_init(&uma_drain_lock, "umadrain");
1790 }
1791 
1792 /*
1793  * Initialize our callout handle
1794  *
1795  */
1796 
1797 static void
1798 uma_startup3(void)
1799 {
1800 
1801 	callout_init(&uma_callout, 1);
1802 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1803 }
1804 
1805 static uma_keg_t
1806 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
1807 		int align, uint32_t flags)
1808 {
1809 	struct uma_kctor_args args;
1810 
1811 	args.size = size;
1812 	args.uminit = uminit;
1813 	args.fini = fini;
1814 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1815 	args.flags = flags;
1816 	args.zone = zone;
1817 	return (zone_alloc_item(kegs, &args, M_WAITOK));
1818 }
1819 
1820 /* See uma.h */
1821 void
1822 uma_set_align(int align)
1823 {
1824 
1825 	if (align != UMA_ALIGN_CACHE)
1826 		uma_align_cache = align;
1827 }
1828 
1829 /* See uma.h */
1830 uma_zone_t
1831 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
1832 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
1833 
1834 {
1835 	struct uma_zctor_args args;
1836 	uma_zone_t res;
1837 	bool locked;
1838 
1839 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
1840 	    align, name));
1841 
1842 	/* This stuff is essential for the zone ctor */
1843 	memset(&args, 0, sizeof(args));
1844 	args.name = name;
1845 	args.size = size;
1846 	args.ctor = ctor;
1847 	args.dtor = dtor;
1848 	args.uminit = uminit;
1849 	args.fini = fini;
1850 #ifdef  INVARIANTS
1851 	/*
1852 	 * If a zone is being created with an empty constructor and
1853 	 * destructor, pass UMA constructor/destructor which checks for
1854 	 * memory use after free.
1855 	 */
1856 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1857 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1858 		args.ctor = trash_ctor;
1859 		args.dtor = trash_dtor;
1860 		args.uminit = trash_init;
1861 		args.fini = trash_fini;
1862 	}
1863 #endif
1864 	args.align = align;
1865 	args.flags = flags;
1866 	args.keg = NULL;
1867 
1868 	if (booted < UMA_STARTUP2) {
1869 		locked = false;
1870 	} else {
1871 		sx_slock(&uma_drain_lock);
1872 		locked = true;
1873 	}
1874 	res = zone_alloc_item(zones, &args, M_WAITOK);
1875 	if (locked)
1876 		sx_sunlock(&uma_drain_lock);
1877 	return (res);
1878 }
1879 
1880 /* See uma.h */
1881 uma_zone_t
1882 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
1883 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
1884 {
1885 	struct uma_zctor_args args;
1886 	uma_keg_t keg;
1887 	uma_zone_t res;
1888 	bool locked;
1889 
1890 	keg = zone_first_keg(master);
1891 	memset(&args, 0, sizeof(args));
1892 	args.name = name;
1893 	args.size = keg->uk_size;
1894 	args.ctor = ctor;
1895 	args.dtor = dtor;
1896 	args.uminit = zinit;
1897 	args.fini = zfini;
1898 	args.align = keg->uk_align;
1899 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
1900 	args.keg = keg;
1901 
1902 	if (booted < UMA_STARTUP2) {
1903 		locked = false;
1904 	} else {
1905 		sx_slock(&uma_drain_lock);
1906 		locked = true;
1907 	}
1908 	/* XXX Attaches only one keg of potentially many. */
1909 	res = zone_alloc_item(zones, &args, M_WAITOK);
1910 	if (locked)
1911 		sx_sunlock(&uma_drain_lock);
1912 	return (res);
1913 }
1914 
1915 /* See uma.h */
1916 uma_zone_t
1917 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
1918 		    uma_init zinit, uma_fini zfini, uma_import zimport,
1919 		    uma_release zrelease, void *arg, int flags)
1920 {
1921 	struct uma_zctor_args args;
1922 
1923 	memset(&args, 0, sizeof(args));
1924 	args.name = name;
1925 	args.size = size;
1926 	args.ctor = ctor;
1927 	args.dtor = dtor;
1928 	args.uminit = zinit;
1929 	args.fini = zfini;
1930 	args.import = zimport;
1931 	args.release = zrelease;
1932 	args.arg = arg;
1933 	args.align = 0;
1934 	args.flags = flags;
1935 
1936 	return (zone_alloc_item(zones, &args, M_WAITOK));
1937 }
1938 
1939 static void
1940 zone_lock_pair(uma_zone_t a, uma_zone_t b)
1941 {
1942 	if (a < b) {
1943 		ZONE_LOCK(a);
1944 		mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
1945 	} else {
1946 		ZONE_LOCK(b);
1947 		mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
1948 	}
1949 }
1950 
1951 static void
1952 zone_unlock_pair(uma_zone_t a, uma_zone_t b)
1953 {
1954 
1955 	ZONE_UNLOCK(a);
1956 	ZONE_UNLOCK(b);
1957 }
1958 
1959 int
1960 uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
1961 {
1962 	uma_klink_t klink;
1963 	uma_klink_t kl;
1964 	int error;
1965 
1966 	error = 0;
1967 	klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
1968 
1969 	zone_lock_pair(zone, master);
1970 	/*
1971 	 * zone must use vtoslab() to resolve objects and must already be
1972 	 * a secondary.
1973 	 */
1974 	if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
1975 	    != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
1976 		error = EINVAL;
1977 		goto out;
1978 	}
1979 	/*
1980 	 * The new master must also use vtoslab().
1981 	 */
1982 	if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
1983 		error = EINVAL;
1984 		goto out;
1985 	}
1986 
1987 	/*
1988 	 * The underlying object must be the same size.  rsize
1989 	 * may be different.
1990 	 */
1991 	if (master->uz_size != zone->uz_size) {
1992 		error = E2BIG;
1993 		goto out;
1994 	}
1995 	/*
1996 	 * Put it at the end of the list.
1997 	 */
1998 	klink->kl_keg = zone_first_keg(master);
1999 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2000 		if (LIST_NEXT(kl, kl_link) == NULL) {
2001 			LIST_INSERT_AFTER(kl, klink, kl_link);
2002 			break;
2003 		}
2004 	}
2005 	klink = NULL;
2006 	zone->uz_flags |= UMA_ZFLAG_MULTI;
2007 	zone->uz_slab = zone_fetch_slab_multi;
2008 
2009 out:
2010 	zone_unlock_pair(zone, master);
2011 	if (klink != NULL)
2012 		free(klink, M_TEMP);
2013 
2014 	return (error);
2015 }
2016 
2017 
2018 /* See uma.h */
2019 void
2020 uma_zdestroy(uma_zone_t zone)
2021 {
2022 
2023 	sx_slock(&uma_drain_lock);
2024 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2025 	sx_sunlock(&uma_drain_lock);
2026 }
2027 
2028 void
2029 uma_zwait(uma_zone_t zone)
2030 {
2031 	void *item;
2032 
2033 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2034 	uma_zfree(zone, item);
2035 }
2036 
2037 /* See uma.h */
2038 void *
2039 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2040 {
2041 	void *item;
2042 	uma_cache_t cache;
2043 	uma_bucket_t bucket;
2044 	int lockfail;
2045 	int cpu;
2046 
2047 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2048 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2049 
2050 	/* This is the fast path allocation */
2051 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2052 	    curthread, zone->uz_name, zone, flags);
2053 
2054 	if (flags & M_WAITOK) {
2055 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2056 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2057 	}
2058 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2059 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2060 
2061 #ifdef DEBUG_MEMGUARD
2062 	if (memguard_cmp_zone(zone)) {
2063 		item = memguard_alloc(zone->uz_size, flags);
2064 		if (item != NULL) {
2065 			if (zone->uz_init != NULL &&
2066 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2067 				return (NULL);
2068 			if (zone->uz_ctor != NULL &&
2069 			    zone->uz_ctor(item, zone->uz_size, udata,
2070 			    flags) != 0) {
2071 			    	zone->uz_fini(item, zone->uz_size);
2072 				return (NULL);
2073 			}
2074 			return (item);
2075 		}
2076 		/* This is unfortunate but should not be fatal. */
2077 	}
2078 #endif
2079 	/*
2080 	 * If possible, allocate from the per-CPU cache.  There are two
2081 	 * requirements for safe access to the per-CPU cache: (1) the thread
2082 	 * accessing the cache must not be preempted or yield during access,
2083 	 * and (2) the thread must not migrate CPUs without switching which
2084 	 * cache it accesses.  We rely on a critical section to prevent
2085 	 * preemption and migration.  We release the critical section in
2086 	 * order to acquire the zone mutex if we are unable to allocate from
2087 	 * the current cache; when we re-acquire the critical section, we
2088 	 * must detect and handle migration if it has occurred.
2089 	 */
2090 	critical_enter();
2091 	cpu = curcpu;
2092 	cache = &zone->uz_cpu[cpu];
2093 
2094 zalloc_start:
2095 	bucket = cache->uc_allocbucket;
2096 	if (bucket != NULL && bucket->ub_cnt > 0) {
2097 		bucket->ub_cnt--;
2098 		item = bucket->ub_bucket[bucket->ub_cnt];
2099 #ifdef INVARIANTS
2100 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2101 #endif
2102 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2103 		cache->uc_allocs++;
2104 		critical_exit();
2105 		if (zone->uz_ctor != NULL &&
2106 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2107 			atomic_add_long(&zone->uz_fails, 1);
2108 			zone_free_item(zone, item, udata, SKIP_DTOR);
2109 			return (NULL);
2110 		}
2111 #ifdef INVARIANTS
2112 		uma_dbg_alloc(zone, NULL, item);
2113 #endif
2114 		if (flags & M_ZERO)
2115 			uma_zero_item(item, zone);
2116 		return (item);
2117 	}
2118 
2119 	/*
2120 	 * We have run out of items in our alloc bucket.
2121 	 * See if we can switch with our free bucket.
2122 	 */
2123 	bucket = cache->uc_freebucket;
2124 	if (bucket != NULL && bucket->ub_cnt > 0) {
2125 		CTR2(KTR_UMA,
2126 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2127 		    zone->uz_name, zone);
2128 		cache->uc_freebucket = cache->uc_allocbucket;
2129 		cache->uc_allocbucket = bucket;
2130 		goto zalloc_start;
2131 	}
2132 
2133 	/*
2134 	 * Discard any empty allocation bucket while we hold no locks.
2135 	 */
2136 	bucket = cache->uc_allocbucket;
2137 	cache->uc_allocbucket = NULL;
2138 	critical_exit();
2139 	if (bucket != NULL)
2140 		bucket_free(zone, bucket, udata);
2141 
2142 	/* Short-circuit for zones without buckets and low memory. */
2143 	if (zone->uz_count == 0 || bucketdisable)
2144 		goto zalloc_item;
2145 
2146 	/*
2147 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2148 	 * we must go back to the zone.  This requires the zone lock, so we
2149 	 * must drop the critical section, then re-acquire it when we go back
2150 	 * to the cache.  Since the critical section is released, we may be
2151 	 * preempted or migrate.  As such, make sure not to maintain any
2152 	 * thread-local state specific to the cache from prior to releasing
2153 	 * the critical section.
2154 	 */
2155 	lockfail = 0;
2156 	if (ZONE_TRYLOCK(zone) == 0) {
2157 		/* Record contention to size the buckets. */
2158 		ZONE_LOCK(zone);
2159 		lockfail = 1;
2160 	}
2161 	critical_enter();
2162 	cpu = curcpu;
2163 	cache = &zone->uz_cpu[cpu];
2164 
2165 	/*
2166 	 * Since we have locked the zone we may as well send back our stats.
2167 	 */
2168 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2169 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2170 	cache->uc_allocs = 0;
2171 	cache->uc_frees = 0;
2172 
2173 	/* See if we lost the race to fill the cache. */
2174 	if (cache->uc_allocbucket != NULL) {
2175 		ZONE_UNLOCK(zone);
2176 		goto zalloc_start;
2177 	}
2178 
2179 	/*
2180 	 * Check the zone's cache of buckets.
2181 	 */
2182 	if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
2183 		KASSERT(bucket->ub_cnt != 0,
2184 		    ("uma_zalloc_arg: Returning an empty bucket."));
2185 
2186 		LIST_REMOVE(bucket, ub_link);
2187 		cache->uc_allocbucket = bucket;
2188 		ZONE_UNLOCK(zone);
2189 		goto zalloc_start;
2190 	}
2191 	/* We are no longer associated with this CPU. */
2192 	critical_exit();
2193 
2194 	/*
2195 	 * We bump the uz count when the cache size is insufficient to
2196 	 * handle the working set.
2197 	 */
2198 	if (lockfail && zone->uz_count < BUCKET_MAX)
2199 		zone->uz_count++;
2200 	ZONE_UNLOCK(zone);
2201 
2202 	/*
2203 	 * Now lets just fill a bucket and put it on the free list.  If that
2204 	 * works we'll restart the allocation from the beginning and it
2205 	 * will use the just filled bucket.
2206 	 */
2207 	bucket = zone_alloc_bucket(zone, udata, flags);
2208 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2209 	    zone->uz_name, zone, bucket);
2210 	if (bucket != NULL) {
2211 		ZONE_LOCK(zone);
2212 		critical_enter();
2213 		cpu = curcpu;
2214 		cache = &zone->uz_cpu[cpu];
2215 		/*
2216 		 * See if we lost the race or were migrated.  Cache the
2217 		 * initialized bucket to make this less likely or claim
2218 		 * the memory directly.
2219 		 */
2220 		if (cache->uc_allocbucket == NULL)
2221 			cache->uc_allocbucket = bucket;
2222 		else
2223 			LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2224 		ZONE_UNLOCK(zone);
2225 		goto zalloc_start;
2226 	}
2227 
2228 	/*
2229 	 * We may not be able to get a bucket so return an actual item.
2230 	 */
2231 zalloc_item:
2232 	item = zone_alloc_item(zone, udata, flags);
2233 
2234 	return (item);
2235 }
2236 
2237 static uma_slab_t
2238 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2239 {
2240 	uma_slab_t slab;
2241 	int reserve;
2242 
2243 	mtx_assert(&keg->uk_lock, MA_OWNED);
2244 	slab = NULL;
2245 	reserve = 0;
2246 	if ((flags & M_USE_RESERVE) == 0)
2247 		reserve = keg->uk_reserve;
2248 
2249 	for (;;) {
2250 		/*
2251 		 * Find a slab with some space.  Prefer slabs that are partially
2252 		 * used over those that are totally full.  This helps to reduce
2253 		 * fragmentation.
2254 		 */
2255 		if (keg->uk_free > reserve) {
2256 			if (!LIST_EMPTY(&keg->uk_part_slab)) {
2257 				slab = LIST_FIRST(&keg->uk_part_slab);
2258 			} else {
2259 				slab = LIST_FIRST(&keg->uk_free_slab);
2260 				LIST_REMOVE(slab, us_link);
2261 				LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2262 				    us_link);
2263 			}
2264 			MPASS(slab->us_keg == keg);
2265 			return (slab);
2266 		}
2267 
2268 		/*
2269 		 * M_NOVM means don't ask at all!
2270 		 */
2271 		if (flags & M_NOVM)
2272 			break;
2273 
2274 		if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2275 			keg->uk_flags |= UMA_ZFLAG_FULL;
2276 			/*
2277 			 * If this is not a multi-zone, set the FULL bit.
2278 			 * Otherwise slab_multi() takes care of it.
2279 			 */
2280 			if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
2281 				zone->uz_flags |= UMA_ZFLAG_FULL;
2282 				zone_log_warning(zone);
2283 				zone_maxaction(zone);
2284 			}
2285 			if (flags & M_NOWAIT)
2286 				break;
2287 			zone->uz_sleeps++;
2288 			msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2289 			continue;
2290 		}
2291 		slab = keg_alloc_slab(keg, zone, flags);
2292 		/*
2293 		 * If we got a slab here it's safe to mark it partially used
2294 		 * and return.  We assume that the caller is going to remove
2295 		 * at least one item.
2296 		 */
2297 		if (slab) {
2298 			MPASS(slab->us_keg == keg);
2299 			LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2300 			return (slab);
2301 		}
2302 		/*
2303 		 * We might not have been able to get a slab but another cpu
2304 		 * could have while we were unlocked.  Check again before we
2305 		 * fail.
2306 		 */
2307 		flags |= M_NOVM;
2308 	}
2309 	return (slab);
2310 }
2311 
2312 static uma_slab_t
2313 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2314 {
2315 	uma_slab_t slab;
2316 
2317 	if (keg == NULL) {
2318 		keg = zone_first_keg(zone);
2319 		KEG_LOCK(keg);
2320 	}
2321 
2322 	for (;;) {
2323 		slab = keg_fetch_slab(keg, zone, flags);
2324 		if (slab)
2325 			return (slab);
2326 		if (flags & (M_NOWAIT | M_NOVM))
2327 			break;
2328 	}
2329 	KEG_UNLOCK(keg);
2330 	return (NULL);
2331 }
2332 
2333 /*
2334  * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
2335  * with the keg locked.  On NULL no lock is held.
2336  *
2337  * The last pointer is used to seed the search.  It is not required.
2338  */
2339 static uma_slab_t
2340 zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2341 {
2342 	uma_klink_t klink;
2343 	uma_slab_t slab;
2344 	uma_keg_t keg;
2345 	int flags;
2346 	int empty;
2347 	int full;
2348 
2349 	/*
2350 	 * Don't wait on the first pass.  This will skip limit tests
2351 	 * as well.  We don't want to block if we can find a provider
2352 	 * without blocking.
2353 	 */
2354 	flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2355 	/*
2356 	 * Use the last slab allocated as a hint for where to start
2357 	 * the search.
2358 	 */
2359 	if (last != NULL) {
2360 		slab = keg_fetch_slab(last, zone, flags);
2361 		if (slab)
2362 			return (slab);
2363 		KEG_UNLOCK(last);
2364 	}
2365 	/*
2366 	 * Loop until we have a slab incase of transient failures
2367 	 * while M_WAITOK is specified.  I'm not sure this is 100%
2368 	 * required but we've done it for so long now.
2369 	 */
2370 	for (;;) {
2371 		empty = 0;
2372 		full = 0;
2373 		/*
2374 		 * Search the available kegs for slabs.  Be careful to hold the
2375 		 * correct lock while calling into the keg layer.
2376 		 */
2377 		LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2378 			keg = klink->kl_keg;
2379 			KEG_LOCK(keg);
2380 			if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2381 				slab = keg_fetch_slab(keg, zone, flags);
2382 				if (slab)
2383 					return (slab);
2384 			}
2385 			if (keg->uk_flags & UMA_ZFLAG_FULL)
2386 				full++;
2387 			else
2388 				empty++;
2389 			KEG_UNLOCK(keg);
2390 		}
2391 		if (rflags & (M_NOWAIT | M_NOVM))
2392 			break;
2393 		flags = rflags;
2394 		/*
2395 		 * All kegs are full.  XXX We can't atomically check all kegs
2396 		 * and sleep so just sleep for a short period and retry.
2397 		 */
2398 		if (full && !empty) {
2399 			ZONE_LOCK(zone);
2400 			zone->uz_flags |= UMA_ZFLAG_FULL;
2401 			zone->uz_sleeps++;
2402 			zone_log_warning(zone);
2403 			zone_maxaction(zone);
2404 			msleep(zone, zone->uz_lockptr, PVM,
2405 			    "zonelimit", hz/100);
2406 			zone->uz_flags &= ~UMA_ZFLAG_FULL;
2407 			ZONE_UNLOCK(zone);
2408 			continue;
2409 		}
2410 	}
2411 	return (NULL);
2412 }
2413 
2414 static void *
2415 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2416 {
2417 	void *item;
2418 	uint8_t freei;
2419 
2420 	MPASS(keg == slab->us_keg);
2421 	mtx_assert(&keg->uk_lock, MA_OWNED);
2422 
2423 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2424 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2425 	item = slab->us_data + (keg->uk_rsize * freei);
2426 	slab->us_freecount--;
2427 	keg->uk_free--;
2428 
2429 	/* Move this slab to the full list */
2430 	if (slab->us_freecount == 0) {
2431 		LIST_REMOVE(slab, us_link);
2432 		LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2433 	}
2434 
2435 	return (item);
2436 }
2437 
2438 static int
2439 zone_import(uma_zone_t zone, void **bucket, int max, int flags)
2440 {
2441 	uma_slab_t slab;
2442 	uma_keg_t keg;
2443 	int i;
2444 
2445 	slab = NULL;
2446 	keg = NULL;
2447 	/* Try to keep the buckets totally full */
2448 	for (i = 0; i < max; ) {
2449 		if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2450 			break;
2451 		keg = slab->us_keg;
2452 		while (slab->us_freecount && i < max) {
2453 			bucket[i++] = slab_alloc_item(keg, slab);
2454 			if (keg->uk_free <= keg->uk_reserve)
2455 				break;
2456 		}
2457 		/* Don't grab more than one slab at a time. */
2458 		flags &= ~M_WAITOK;
2459 		flags |= M_NOWAIT;
2460 	}
2461 	if (slab != NULL)
2462 		KEG_UNLOCK(keg);
2463 
2464 	return i;
2465 }
2466 
2467 static uma_bucket_t
2468 zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2469 {
2470 	uma_bucket_t bucket;
2471 	int max;
2472 
2473 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2474 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2475 	if (bucket == NULL)
2476 		return (NULL);
2477 
2478 	max = MIN(bucket->ub_entries, zone->uz_count);
2479 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2480 	    max, flags);
2481 
2482 	/*
2483 	 * Initialize the memory if necessary.
2484 	 */
2485 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2486 		int i;
2487 
2488 		for (i = 0; i < bucket->ub_cnt; i++)
2489 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2490 			    flags) != 0)
2491 				break;
2492 		/*
2493 		 * If we couldn't initialize the whole bucket, put the
2494 		 * rest back onto the freelist.
2495 		 */
2496 		if (i != bucket->ub_cnt) {
2497 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2498 			    bucket->ub_cnt - i);
2499 #ifdef INVARIANTS
2500 			bzero(&bucket->ub_bucket[i],
2501 			    sizeof(void *) * (bucket->ub_cnt - i));
2502 #endif
2503 			bucket->ub_cnt = i;
2504 		}
2505 	}
2506 
2507 	if (bucket->ub_cnt == 0) {
2508 		bucket_free(zone, bucket, udata);
2509 		atomic_add_long(&zone->uz_fails, 1);
2510 		return (NULL);
2511 	}
2512 
2513 	return (bucket);
2514 }
2515 
2516 /*
2517  * Allocates a single item from a zone.
2518  *
2519  * Arguments
2520  *	zone   The zone to alloc for.
2521  *	udata  The data to be passed to the constructor.
2522  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2523  *
2524  * Returns
2525  *	NULL if there is no memory and M_NOWAIT is set
2526  *	An item if successful
2527  */
2528 
2529 static void *
2530 zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2531 {
2532 	void *item;
2533 
2534 	item = NULL;
2535 
2536 	if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2537 		goto fail;
2538 	atomic_add_long(&zone->uz_allocs, 1);
2539 
2540 	/*
2541 	 * We have to call both the zone's init (not the keg's init)
2542 	 * and the zone's ctor.  This is because the item is going from
2543 	 * a keg slab directly to the user, and the user is expecting it
2544 	 * to be both zone-init'd as well as zone-ctor'd.
2545 	 */
2546 	if (zone->uz_init != NULL) {
2547 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
2548 			zone_free_item(zone, item, udata, SKIP_FINI);
2549 			goto fail;
2550 		}
2551 	}
2552 	if (zone->uz_ctor != NULL) {
2553 		if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2554 			zone_free_item(zone, item, udata, SKIP_DTOR);
2555 			goto fail;
2556 		}
2557 	}
2558 #ifdef INVARIANTS
2559 	uma_dbg_alloc(zone, NULL, item);
2560 #endif
2561 	if (flags & M_ZERO)
2562 		uma_zero_item(item, zone);
2563 
2564 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
2565 	    zone->uz_name, zone);
2566 
2567 	return (item);
2568 
2569 fail:
2570 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
2571 	    zone->uz_name, zone);
2572 	atomic_add_long(&zone->uz_fails, 1);
2573 	return (NULL);
2574 }
2575 
2576 /* See uma.h */
2577 void
2578 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2579 {
2580 	uma_cache_t cache;
2581 	uma_bucket_t bucket;
2582 	int lockfail;
2583 	int cpu;
2584 
2585 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2586 	random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2587 
2588 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2589 	    zone->uz_name);
2590 
2591 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2592 	    ("uma_zfree_arg: called with spinlock or critical section held"));
2593 
2594         /* uma_zfree(..., NULL) does nothing, to match free(9). */
2595         if (item == NULL)
2596                 return;
2597 #ifdef DEBUG_MEMGUARD
2598 	if (is_memguard_addr(item)) {
2599 		if (zone->uz_dtor != NULL)
2600 			zone->uz_dtor(item, zone->uz_size, udata);
2601 		if (zone->uz_fini != NULL)
2602 			zone->uz_fini(item, zone->uz_size);
2603 		memguard_free(item);
2604 		return;
2605 	}
2606 #endif
2607 #ifdef INVARIANTS
2608 	if (zone->uz_flags & UMA_ZONE_MALLOC)
2609 		uma_dbg_free(zone, udata, item);
2610 	else
2611 		uma_dbg_free(zone, NULL, item);
2612 #endif
2613 	if (zone->uz_dtor != NULL)
2614 		zone->uz_dtor(item, zone->uz_size, udata);
2615 
2616 	/*
2617 	 * The race here is acceptable.  If we miss it we'll just have to wait
2618 	 * a little longer for the limits to be reset.
2619 	 */
2620 	if (zone->uz_flags & UMA_ZFLAG_FULL)
2621 		goto zfree_item;
2622 
2623 	/*
2624 	 * If possible, free to the per-CPU cache.  There are two
2625 	 * requirements for safe access to the per-CPU cache: (1) the thread
2626 	 * accessing the cache must not be preempted or yield during access,
2627 	 * and (2) the thread must not migrate CPUs without switching which
2628 	 * cache it accesses.  We rely on a critical section to prevent
2629 	 * preemption and migration.  We release the critical section in
2630 	 * order to acquire the zone mutex if we are unable to free to the
2631 	 * current cache; when we re-acquire the critical section, we must
2632 	 * detect and handle migration if it has occurred.
2633 	 */
2634 zfree_restart:
2635 	critical_enter();
2636 	cpu = curcpu;
2637 	cache = &zone->uz_cpu[cpu];
2638 
2639 zfree_start:
2640 	/*
2641 	 * Try to free into the allocbucket first to give LIFO ordering
2642 	 * for cache-hot datastructures.  Spill over into the freebucket
2643 	 * if necessary.  Alloc will swap them if one runs dry.
2644 	 */
2645 	bucket = cache->uc_allocbucket;
2646 	if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2647 		bucket = cache->uc_freebucket;
2648 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2649 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2650 		    ("uma_zfree: Freeing to non free bucket index."));
2651 		bucket->ub_bucket[bucket->ub_cnt] = item;
2652 		bucket->ub_cnt++;
2653 		cache->uc_frees++;
2654 		critical_exit();
2655 		return;
2656 	}
2657 
2658 	/*
2659 	 * We must go back the zone, which requires acquiring the zone lock,
2660 	 * which in turn means we must release and re-acquire the critical
2661 	 * section.  Since the critical section is released, we may be
2662 	 * preempted or migrate.  As such, make sure not to maintain any
2663 	 * thread-local state specific to the cache from prior to releasing
2664 	 * the critical section.
2665 	 */
2666 	critical_exit();
2667 	if (zone->uz_count == 0 || bucketdisable)
2668 		goto zfree_item;
2669 
2670 	lockfail = 0;
2671 	if (ZONE_TRYLOCK(zone) == 0) {
2672 		/* Record contention to size the buckets. */
2673 		ZONE_LOCK(zone);
2674 		lockfail = 1;
2675 	}
2676 	critical_enter();
2677 	cpu = curcpu;
2678 	cache = &zone->uz_cpu[cpu];
2679 
2680 	/*
2681 	 * Since we have locked the zone we may as well send back our stats.
2682 	 */
2683 	atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2684 	atomic_add_long(&zone->uz_frees, cache->uc_frees);
2685 	cache->uc_allocs = 0;
2686 	cache->uc_frees = 0;
2687 
2688 	bucket = cache->uc_freebucket;
2689 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2690 		ZONE_UNLOCK(zone);
2691 		goto zfree_start;
2692 	}
2693 	cache->uc_freebucket = NULL;
2694 	/* We are no longer associated with this CPU. */
2695 	critical_exit();
2696 
2697 	/* Can we throw this on the zone full list? */
2698 	if (bucket != NULL) {
2699 		CTR3(KTR_UMA,
2700 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
2701 		    zone->uz_name, zone, bucket);
2702 		/* ub_cnt is pointing to the last free item */
2703 		KASSERT(bucket->ub_cnt != 0,
2704 		    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
2705 		LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
2706 	}
2707 
2708 	/*
2709 	 * We bump the uz count when the cache size is insufficient to
2710 	 * handle the working set.
2711 	 */
2712 	if (lockfail && zone->uz_count < BUCKET_MAX)
2713 		zone->uz_count++;
2714 	ZONE_UNLOCK(zone);
2715 
2716 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
2717 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
2718 	    zone->uz_name, zone, bucket);
2719 	if (bucket) {
2720 		critical_enter();
2721 		cpu = curcpu;
2722 		cache = &zone->uz_cpu[cpu];
2723 		if (cache->uc_freebucket == NULL) {
2724 			cache->uc_freebucket = bucket;
2725 			goto zfree_start;
2726 		}
2727 		/*
2728 		 * We lost the race, start over.  We have to drop our
2729 		 * critical section to free the bucket.
2730 		 */
2731 		critical_exit();
2732 		bucket_free(zone, bucket, udata);
2733 		goto zfree_restart;
2734 	}
2735 
2736 	/*
2737 	 * If nothing else caught this, we'll just do an internal free.
2738 	 */
2739 zfree_item:
2740 	zone_free_item(zone, item, udata, SKIP_DTOR);
2741 
2742 	return;
2743 }
2744 
2745 static void
2746 slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
2747 {
2748 	uint8_t freei;
2749 
2750 	mtx_assert(&keg->uk_lock, MA_OWNED);
2751 	MPASS(keg == slab->us_keg);
2752 
2753 	/* Do we need to remove from any lists? */
2754 	if (slab->us_freecount+1 == keg->uk_ipers) {
2755 		LIST_REMOVE(slab, us_link);
2756 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2757 	} else if (slab->us_freecount == 0) {
2758 		LIST_REMOVE(slab, us_link);
2759 		LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2760 	}
2761 
2762 	/* Slab management. */
2763 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2764 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
2765 	slab->us_freecount++;
2766 
2767 	/* Keg statistics. */
2768 	keg->uk_free++;
2769 }
2770 
2771 static void
2772 zone_release(uma_zone_t zone, void **bucket, int cnt)
2773 {
2774 	void *item;
2775 	uma_slab_t slab;
2776 	uma_keg_t keg;
2777 	uint8_t *mem;
2778 	int clearfull;
2779 	int i;
2780 
2781 	clearfull = 0;
2782 	keg = zone_first_keg(zone);
2783 	KEG_LOCK(keg);
2784 	for (i = 0; i < cnt; i++) {
2785 		item = bucket[i];
2786 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2787 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2788 			if (zone->uz_flags & UMA_ZONE_HASH) {
2789 				slab = hash_sfind(&keg->uk_hash, mem);
2790 			} else {
2791 				mem += keg->uk_pgoff;
2792 				slab = (uma_slab_t)mem;
2793 			}
2794 		} else {
2795 			slab = vtoslab((vm_offset_t)item);
2796 			if (slab->us_keg != keg) {
2797 				KEG_UNLOCK(keg);
2798 				keg = slab->us_keg;
2799 				KEG_LOCK(keg);
2800 			}
2801 		}
2802 		slab_free_item(keg, slab, item);
2803 		if (keg->uk_flags & UMA_ZFLAG_FULL) {
2804 			if (keg->uk_pages < keg->uk_maxpages) {
2805 				keg->uk_flags &= ~UMA_ZFLAG_FULL;
2806 				clearfull = 1;
2807 			}
2808 
2809 			/*
2810 			 * We can handle one more allocation. Since we're
2811 			 * clearing ZFLAG_FULL, wake up all procs blocked
2812 			 * on pages. This should be uncommon, so keeping this
2813 			 * simple for now (rather than adding count of blocked
2814 			 * threads etc).
2815 			 */
2816 			wakeup(keg);
2817 		}
2818 	}
2819 	KEG_UNLOCK(keg);
2820 	if (clearfull) {
2821 		ZONE_LOCK(zone);
2822 		zone->uz_flags &= ~UMA_ZFLAG_FULL;
2823 		wakeup(zone);
2824 		ZONE_UNLOCK(zone);
2825 	}
2826 
2827 }
2828 
2829 /*
2830  * Frees a single item to any zone.
2831  *
2832  * Arguments:
2833  *	zone   The zone to free to
2834  *	item   The item we're freeing
2835  *	udata  User supplied data for the dtor
2836  *	skip   Skip dtors and finis
2837  */
2838 static void
2839 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2840 {
2841 
2842 #ifdef INVARIANTS
2843 	if (skip == SKIP_NONE) {
2844 		if (zone->uz_flags & UMA_ZONE_MALLOC)
2845 			uma_dbg_free(zone, udata, item);
2846 		else
2847 			uma_dbg_free(zone, NULL, item);
2848 	}
2849 #endif
2850 	if (skip < SKIP_DTOR && zone->uz_dtor)
2851 		zone->uz_dtor(item, zone->uz_size, udata);
2852 
2853 	if (skip < SKIP_FINI && zone->uz_fini)
2854 		zone->uz_fini(item, zone->uz_size);
2855 
2856 	atomic_add_long(&zone->uz_frees, 1);
2857 	zone->uz_release(zone->uz_arg, &item, 1);
2858 }
2859 
2860 /* See uma.h */
2861 int
2862 uma_zone_set_max(uma_zone_t zone, int nitems)
2863 {
2864 	uma_keg_t keg;
2865 
2866 	keg = zone_first_keg(zone);
2867 	if (keg == NULL)
2868 		return (0);
2869 	KEG_LOCK(keg);
2870 	keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
2871 	if (keg->uk_maxpages * keg->uk_ipers < nitems)
2872 		keg->uk_maxpages += keg->uk_ppera;
2873 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2874 	KEG_UNLOCK(keg);
2875 
2876 	return (nitems);
2877 }
2878 
2879 /* See uma.h */
2880 int
2881 uma_zone_get_max(uma_zone_t zone)
2882 {
2883 	int nitems;
2884 	uma_keg_t keg;
2885 
2886 	keg = zone_first_keg(zone);
2887 	if (keg == NULL)
2888 		return (0);
2889 	KEG_LOCK(keg);
2890 	nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
2891 	KEG_UNLOCK(keg);
2892 
2893 	return (nitems);
2894 }
2895 
2896 /* See uma.h */
2897 void
2898 uma_zone_set_warning(uma_zone_t zone, const char *warning)
2899 {
2900 
2901 	ZONE_LOCK(zone);
2902 	zone->uz_warning = warning;
2903 	ZONE_UNLOCK(zone);
2904 }
2905 
2906 /* See uma.h */
2907 void
2908 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
2909 {
2910 
2911 	ZONE_LOCK(zone);
2912 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
2913 	ZONE_UNLOCK(zone);
2914 }
2915 
2916 /* See uma.h */
2917 int
2918 uma_zone_get_cur(uma_zone_t zone)
2919 {
2920 	int64_t nitems;
2921 	u_int i;
2922 
2923 	ZONE_LOCK(zone);
2924 	nitems = zone->uz_allocs - zone->uz_frees;
2925 	CPU_FOREACH(i) {
2926 		/*
2927 		 * See the comment in sysctl_vm_zone_stats() regarding the
2928 		 * safety of accessing the per-cpu caches. With the zone lock
2929 		 * held, it is safe, but can potentially result in stale data.
2930 		 */
2931 		nitems += zone->uz_cpu[i].uc_allocs -
2932 		    zone->uz_cpu[i].uc_frees;
2933 	}
2934 	ZONE_UNLOCK(zone);
2935 
2936 	return (nitems < 0 ? 0 : nitems);
2937 }
2938 
2939 /* See uma.h */
2940 void
2941 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
2942 {
2943 	uma_keg_t keg;
2944 
2945 	keg = zone_first_keg(zone);
2946 	KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
2947 	KEG_LOCK(keg);
2948 	KASSERT(keg->uk_pages == 0,
2949 	    ("uma_zone_set_init on non-empty keg"));
2950 	keg->uk_init = uminit;
2951 	KEG_UNLOCK(keg);
2952 }
2953 
2954 /* See uma.h */
2955 void
2956 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
2957 {
2958 	uma_keg_t keg;
2959 
2960 	keg = zone_first_keg(zone);
2961 	KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
2962 	KEG_LOCK(keg);
2963 	KASSERT(keg->uk_pages == 0,
2964 	    ("uma_zone_set_fini on non-empty keg"));
2965 	keg->uk_fini = fini;
2966 	KEG_UNLOCK(keg);
2967 }
2968 
2969 /* See uma.h */
2970 void
2971 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
2972 {
2973 
2974 	ZONE_LOCK(zone);
2975 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2976 	    ("uma_zone_set_zinit on non-empty keg"));
2977 	zone->uz_init = zinit;
2978 	ZONE_UNLOCK(zone);
2979 }
2980 
2981 /* See uma.h */
2982 void
2983 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
2984 {
2985 
2986 	ZONE_LOCK(zone);
2987 	KASSERT(zone_first_keg(zone)->uk_pages == 0,
2988 	    ("uma_zone_set_zfini on non-empty keg"));
2989 	zone->uz_fini = zfini;
2990 	ZONE_UNLOCK(zone);
2991 }
2992 
2993 /* See uma.h */
2994 /* XXX uk_freef is not actually used with the zone locked */
2995 void
2996 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
2997 {
2998 	uma_keg_t keg;
2999 
3000 	keg = zone_first_keg(zone);
3001 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3002 	KEG_LOCK(keg);
3003 	keg->uk_freef = freef;
3004 	KEG_UNLOCK(keg);
3005 }
3006 
3007 /* See uma.h */
3008 /* XXX uk_allocf is not actually used with the zone locked */
3009 void
3010 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3011 {
3012 	uma_keg_t keg;
3013 
3014 	keg = zone_first_keg(zone);
3015 	KEG_LOCK(keg);
3016 	keg->uk_allocf = allocf;
3017 	KEG_UNLOCK(keg);
3018 }
3019 
3020 /* See uma.h */
3021 void
3022 uma_zone_reserve(uma_zone_t zone, int items)
3023 {
3024 	uma_keg_t keg;
3025 
3026 	keg = zone_first_keg(zone);
3027 	if (keg == NULL)
3028 		return;
3029 	KEG_LOCK(keg);
3030 	keg->uk_reserve = items;
3031 	KEG_UNLOCK(keg);
3032 
3033 	return;
3034 }
3035 
3036 /* See uma.h */
3037 int
3038 uma_zone_reserve_kva(uma_zone_t zone, int count)
3039 {
3040 	uma_keg_t keg;
3041 	vm_offset_t kva;
3042 	u_int pages;
3043 
3044 	keg = zone_first_keg(zone);
3045 	if (keg == NULL)
3046 		return (0);
3047 	pages = count / keg->uk_ipers;
3048 
3049 	if (pages * keg->uk_ipers < count)
3050 		pages++;
3051 	pages *= keg->uk_ppera;
3052 
3053 #ifdef UMA_MD_SMALL_ALLOC
3054 	if (keg->uk_ppera > 1) {
3055 #else
3056 	if (1) {
3057 #endif
3058 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3059 		if (kva == 0)
3060 			return (0);
3061 	} else
3062 		kva = 0;
3063 	KEG_LOCK(keg);
3064 	keg->uk_kva = kva;
3065 	keg->uk_offset = 0;
3066 	keg->uk_maxpages = pages;
3067 #ifdef UMA_MD_SMALL_ALLOC
3068 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3069 #else
3070 	keg->uk_allocf = noobj_alloc;
3071 #endif
3072 	keg->uk_flags |= UMA_ZONE_NOFREE;
3073 	KEG_UNLOCK(keg);
3074 
3075 	return (1);
3076 }
3077 
3078 /* See uma.h */
3079 void
3080 uma_prealloc(uma_zone_t zone, int items)
3081 {
3082 	int slabs;
3083 	uma_slab_t slab;
3084 	uma_keg_t keg;
3085 
3086 	keg = zone_first_keg(zone);
3087 	if (keg == NULL)
3088 		return;
3089 	KEG_LOCK(keg);
3090 	slabs = items / keg->uk_ipers;
3091 	if (slabs * keg->uk_ipers < items)
3092 		slabs++;
3093 	while (slabs > 0) {
3094 		slab = keg_alloc_slab(keg, zone, M_WAITOK);
3095 		if (slab == NULL)
3096 			break;
3097 		MPASS(slab->us_keg == keg);
3098 		LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3099 		slabs--;
3100 	}
3101 	KEG_UNLOCK(keg);
3102 }
3103 
3104 /* See uma.h */
3105 static void
3106 uma_reclaim_locked(bool kmem_danger)
3107 {
3108 
3109 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3110 	sx_assert(&uma_drain_lock, SA_XLOCKED);
3111 	bucket_enable();
3112 	zone_foreach(zone_drain);
3113 	if (vm_page_count_min() || kmem_danger) {
3114 		cache_drain_safe(NULL);
3115 		zone_foreach(zone_drain);
3116 	}
3117 	/*
3118 	 * Some slabs may have been freed but this zone will be visited early
3119 	 * we visit again so that we can free pages that are empty once other
3120 	 * zones are drained.  We have to do the same for buckets.
3121 	 */
3122 	zone_drain(slabzone);
3123 	bucket_zone_drain();
3124 }
3125 
3126 void
3127 uma_reclaim(void)
3128 {
3129 
3130 	sx_xlock(&uma_drain_lock);
3131 	uma_reclaim_locked(false);
3132 	sx_xunlock(&uma_drain_lock);
3133 }
3134 
3135 static int uma_reclaim_needed;
3136 
3137 void
3138 uma_reclaim_wakeup(void)
3139 {
3140 
3141 	uma_reclaim_needed = 1;
3142 	wakeup(&uma_reclaim_needed);
3143 }
3144 
3145 void
3146 uma_reclaim_worker(void *arg __unused)
3147 {
3148 
3149 	sx_xlock(&uma_drain_lock);
3150 	for (;;) {
3151 		sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3152 		    "umarcl", 0);
3153 		if (uma_reclaim_needed) {
3154 			uma_reclaim_needed = 0;
3155 			sx_xunlock(&uma_drain_lock);
3156 			EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3157 			sx_xlock(&uma_drain_lock);
3158 			uma_reclaim_locked(true);
3159 		}
3160 	}
3161 }
3162 
3163 /* See uma.h */
3164 int
3165 uma_zone_exhausted(uma_zone_t zone)
3166 {
3167 	int full;
3168 
3169 	ZONE_LOCK(zone);
3170 	full = (zone->uz_flags & UMA_ZFLAG_FULL);
3171 	ZONE_UNLOCK(zone);
3172 	return (full);
3173 }
3174 
3175 int
3176 uma_zone_exhausted_nolock(uma_zone_t zone)
3177 {
3178 	return (zone->uz_flags & UMA_ZFLAG_FULL);
3179 }
3180 
3181 void *
3182 uma_large_malloc(vm_size_t size, int wait)
3183 {
3184 	void *mem;
3185 	uma_slab_t slab;
3186 	uint8_t flags;
3187 
3188 	slab = zone_alloc_item(slabzone, NULL, wait);
3189 	if (slab == NULL)
3190 		return (NULL);
3191 	mem = page_alloc(NULL, size, &flags, wait);
3192 	if (mem) {
3193 		vsetslab((vm_offset_t)mem, slab);
3194 		slab->us_data = mem;
3195 		slab->us_flags = flags | UMA_SLAB_MALLOC;
3196 		slab->us_size = size;
3197 	} else {
3198 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3199 	}
3200 
3201 	return (mem);
3202 }
3203 
3204 void
3205 uma_large_free(uma_slab_t slab)
3206 {
3207 
3208 	page_free(slab->us_data, slab->us_size, slab->us_flags);
3209 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3210 }
3211 
3212 static void
3213 uma_zero_item(void *item, uma_zone_t zone)
3214 {
3215 	int i;
3216 
3217 	if (zone->uz_flags & UMA_ZONE_PCPU) {
3218 		CPU_FOREACH(i)
3219 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3220 	} else
3221 		bzero(item, zone->uz_size);
3222 }
3223 
3224 void
3225 uma_print_stats(void)
3226 {
3227 	zone_foreach(uma_print_zone);
3228 }
3229 
3230 static void
3231 slab_print(uma_slab_t slab)
3232 {
3233 	printf("slab: keg %p, data %p, freecount %d\n",
3234 		slab->us_keg, slab->us_data, slab->us_freecount);
3235 }
3236 
3237 static void
3238 cache_print(uma_cache_t cache)
3239 {
3240 	printf("alloc: %p(%d), free: %p(%d)\n",
3241 		cache->uc_allocbucket,
3242 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3243 		cache->uc_freebucket,
3244 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3245 }
3246 
3247 static void
3248 uma_print_keg(uma_keg_t keg)
3249 {
3250 	uma_slab_t slab;
3251 
3252 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3253 	    "out %d free %d limit %d\n",
3254 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3255 	    keg->uk_ipers, keg->uk_ppera,
3256 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3257 	    keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
3258 	printf("Part slabs:\n");
3259 	LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3260 		slab_print(slab);
3261 	printf("Free slabs:\n");
3262 	LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3263 		slab_print(slab);
3264 	printf("Full slabs:\n");
3265 	LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3266 		slab_print(slab);
3267 }
3268 
3269 void
3270 uma_print_zone(uma_zone_t zone)
3271 {
3272 	uma_cache_t cache;
3273 	uma_klink_t kl;
3274 	int i;
3275 
3276 	printf("zone: %s(%p) size %d flags %#x\n",
3277 	    zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3278 	LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3279 		uma_print_keg(kl->kl_keg);
3280 	CPU_FOREACH(i) {
3281 		cache = &zone->uz_cpu[i];
3282 		printf("CPU %d Cache:\n", i);
3283 		cache_print(cache);
3284 	}
3285 }
3286 
3287 #ifdef DDB
3288 /*
3289  * Generate statistics across both the zone and its per-cpu cache's.  Return
3290  * desired statistics if the pointer is non-NULL for that statistic.
3291  *
3292  * Note: does not update the zone statistics, as it can't safely clear the
3293  * per-CPU cache statistic.
3294  *
3295  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3296  * safe from off-CPU; we should modify the caches to track this information
3297  * directly so that we don't have to.
3298  */
3299 static void
3300 uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3301     uint64_t *freesp, uint64_t *sleepsp)
3302 {
3303 	uma_cache_t cache;
3304 	uint64_t allocs, frees, sleeps;
3305 	int cachefree, cpu;
3306 
3307 	allocs = frees = sleeps = 0;
3308 	cachefree = 0;
3309 	CPU_FOREACH(cpu) {
3310 		cache = &z->uz_cpu[cpu];
3311 		if (cache->uc_allocbucket != NULL)
3312 			cachefree += cache->uc_allocbucket->ub_cnt;
3313 		if (cache->uc_freebucket != NULL)
3314 			cachefree += cache->uc_freebucket->ub_cnt;
3315 		allocs += cache->uc_allocs;
3316 		frees += cache->uc_frees;
3317 	}
3318 	allocs += z->uz_allocs;
3319 	frees += z->uz_frees;
3320 	sleeps += z->uz_sleeps;
3321 	if (cachefreep != NULL)
3322 		*cachefreep = cachefree;
3323 	if (allocsp != NULL)
3324 		*allocsp = allocs;
3325 	if (freesp != NULL)
3326 		*freesp = frees;
3327 	if (sleepsp != NULL)
3328 		*sleepsp = sleeps;
3329 }
3330 #endif /* DDB */
3331 
3332 static int
3333 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3334 {
3335 	uma_keg_t kz;
3336 	uma_zone_t z;
3337 	int count;
3338 
3339 	count = 0;
3340 	rw_rlock(&uma_rwlock);
3341 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3342 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3343 			count++;
3344 	}
3345 	rw_runlock(&uma_rwlock);
3346 	return (sysctl_handle_int(oidp, &count, 0, req));
3347 }
3348 
3349 static int
3350 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3351 {
3352 	struct uma_stream_header ush;
3353 	struct uma_type_header uth;
3354 	struct uma_percpu_stat ups;
3355 	uma_bucket_t bucket;
3356 	struct sbuf sbuf;
3357 	uma_cache_t cache;
3358 	uma_klink_t kl;
3359 	uma_keg_t kz;
3360 	uma_zone_t z;
3361 	uma_keg_t k;
3362 	int count, error, i;
3363 
3364 	error = sysctl_wire_old_buffer(req, 0);
3365 	if (error != 0)
3366 		return (error);
3367 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
3368 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
3369 
3370 	count = 0;
3371 	rw_rlock(&uma_rwlock);
3372 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3373 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
3374 			count++;
3375 	}
3376 
3377 	/*
3378 	 * Insert stream header.
3379 	 */
3380 	bzero(&ush, sizeof(ush));
3381 	ush.ush_version = UMA_STREAM_VERSION;
3382 	ush.ush_maxcpus = (mp_maxid + 1);
3383 	ush.ush_count = count;
3384 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
3385 
3386 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3387 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3388 			bzero(&uth, sizeof(uth));
3389 			ZONE_LOCK(z);
3390 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3391 			uth.uth_align = kz->uk_align;
3392 			uth.uth_size = kz->uk_size;
3393 			uth.uth_rsize = kz->uk_rsize;
3394 			LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3395 				k = kl->kl_keg;
3396 				uth.uth_maxpages += k->uk_maxpages;
3397 				uth.uth_pages += k->uk_pages;
3398 				uth.uth_keg_free += k->uk_free;
3399 				uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3400 				    * k->uk_ipers;
3401 			}
3402 
3403 			/*
3404 			 * A zone is secondary is it is not the first entry
3405 			 * on the keg's zone list.
3406 			 */
3407 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3408 			    (LIST_FIRST(&kz->uk_zones) != z))
3409 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3410 
3411 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3412 				uth.uth_zone_free += bucket->ub_cnt;
3413 			uth.uth_allocs = z->uz_allocs;
3414 			uth.uth_frees = z->uz_frees;
3415 			uth.uth_fails = z->uz_fails;
3416 			uth.uth_sleeps = z->uz_sleeps;
3417 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
3418 			/*
3419 			 * While it is not normally safe to access the cache
3420 			 * bucket pointers while not on the CPU that owns the
3421 			 * cache, we only allow the pointers to be exchanged
3422 			 * without the zone lock held, not invalidated, so
3423 			 * accept the possible race associated with bucket
3424 			 * exchange during monitoring.
3425 			 */
3426 			for (i = 0; i < (mp_maxid + 1); i++) {
3427 				bzero(&ups, sizeof(ups));
3428 				if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3429 					goto skip;
3430 				if (CPU_ABSENT(i))
3431 					goto skip;
3432 				cache = &z->uz_cpu[i];
3433 				if (cache->uc_allocbucket != NULL)
3434 					ups.ups_cache_free +=
3435 					    cache->uc_allocbucket->ub_cnt;
3436 				if (cache->uc_freebucket != NULL)
3437 					ups.ups_cache_free +=
3438 					    cache->uc_freebucket->ub_cnt;
3439 				ups.ups_allocs = cache->uc_allocs;
3440 				ups.ups_frees = cache->uc_frees;
3441 skip:
3442 				(void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
3443 			}
3444 			ZONE_UNLOCK(z);
3445 		}
3446 	}
3447 	rw_runlock(&uma_rwlock);
3448 	error = sbuf_finish(&sbuf);
3449 	sbuf_delete(&sbuf);
3450 	return (error);
3451 }
3452 
3453 int
3454 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3455 {
3456 	uma_zone_t zone = *(uma_zone_t *)arg1;
3457 	int error, max;
3458 
3459 	max = uma_zone_get_max(zone);
3460 	error = sysctl_handle_int(oidp, &max, 0, req);
3461 	if (error || !req->newptr)
3462 		return (error);
3463 
3464 	uma_zone_set_max(zone, max);
3465 
3466 	return (0);
3467 }
3468 
3469 int
3470 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3471 {
3472 	uma_zone_t zone = *(uma_zone_t *)arg1;
3473 	int cur;
3474 
3475 	cur = uma_zone_get_cur(zone);
3476 	return (sysctl_handle_int(oidp, &cur, 0, req));
3477 }
3478 
3479 #ifdef INVARIANTS
3480 static uma_slab_t
3481 uma_dbg_getslab(uma_zone_t zone, void *item)
3482 {
3483 	uma_slab_t slab;
3484 	uma_keg_t keg;
3485 	uint8_t *mem;
3486 
3487 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3488 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3489 		slab = vtoslab((vm_offset_t)mem);
3490 	} else {
3491 		/*
3492 		 * It is safe to return the slab here even though the
3493 		 * zone is unlocked because the item's allocation state
3494 		 * essentially holds a reference.
3495 		 */
3496 		ZONE_LOCK(zone);
3497 		keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3498 		if (keg->uk_flags & UMA_ZONE_HASH)
3499 			slab = hash_sfind(&keg->uk_hash, mem);
3500 		else
3501 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
3502 		ZONE_UNLOCK(zone);
3503 	}
3504 
3505 	return (slab);
3506 }
3507 
3508 /*
3509  * Set up the slab's freei data such that uma_dbg_free can function.
3510  *
3511  */
3512 static void
3513 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3514 {
3515 	uma_keg_t keg;
3516 	int freei;
3517 
3518 	if (zone_first_keg(zone) == NULL)
3519 		return;
3520 	if (slab == NULL) {
3521 		slab = uma_dbg_getslab(zone, item);
3522 		if (slab == NULL)
3523 			panic("uma: item %p did not belong to zone %s\n",
3524 			    item, zone->uz_name);
3525 	}
3526 	keg = slab->us_keg;
3527 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3528 
3529 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3530 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3531 		    item, zone, zone->uz_name, slab, freei);
3532 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3533 
3534 	return;
3535 }
3536 
3537 /*
3538  * Verifies freed addresses.  Checks for alignment, valid slab membership
3539  * and duplicate frees.
3540  *
3541  */
3542 static void
3543 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3544 {
3545 	uma_keg_t keg;
3546 	int freei;
3547 
3548 	if (zone_first_keg(zone) == NULL)
3549 		return;
3550 	if (slab == NULL) {
3551 		slab = uma_dbg_getslab(zone, item);
3552 		if (slab == NULL)
3553 			panic("uma: Freed item %p did not belong to zone %s\n",
3554 			    item, zone->uz_name);
3555 	}
3556 	keg = slab->us_keg;
3557 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3558 
3559 	if (freei >= keg->uk_ipers)
3560 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3561 		    item, zone, zone->uz_name, slab, freei);
3562 
3563 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
3564 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3565 		    item, zone, zone->uz_name, slab, freei);
3566 
3567 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3568 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3569 		    item, zone, zone->uz_name, slab, freei);
3570 
3571 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3572 }
3573 #endif /* INVARIANTS */
3574 
3575 #ifdef DDB
3576 DB_SHOW_COMMAND(uma, db_show_uma)
3577 {
3578 	uint64_t allocs, frees, sleeps;
3579 	uma_bucket_t bucket;
3580 	uma_keg_t kz;
3581 	uma_zone_t z;
3582 	int cachefree;
3583 
3584 	db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3585 	    "Free", "Requests", "Sleeps", "Bucket");
3586 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
3587 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3588 			if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3589 				allocs = z->uz_allocs;
3590 				frees = z->uz_frees;
3591 				sleeps = z->uz_sleeps;
3592 				cachefree = 0;
3593 			} else
3594 				uma_zone_sumstat(z, &cachefree, &allocs,
3595 				    &frees, &sleeps);
3596 			if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3597 			    (LIST_FIRST(&kz->uk_zones) != z)))
3598 				cachefree += kz->uk_free;
3599 			LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3600 				cachefree += bucket->ub_cnt;
3601 			db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3602 			    z->uz_name, (uintmax_t)kz->uk_size,
3603 			    (intmax_t)(allocs - frees), cachefree,
3604 			    (uintmax_t)allocs, sleeps, z->uz_count);
3605 			if (db_pager_quit)
3606 				return;
3607 		}
3608 	}
3609 }
3610 
3611 DB_SHOW_COMMAND(umacache, db_show_umacache)
3612 {
3613 	uint64_t allocs, frees;
3614 	uma_bucket_t bucket;
3615 	uma_zone_t z;
3616 	int cachefree;
3617 
3618 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3619 	    "Requests", "Bucket");
3620 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
3621 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3622 		LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3623 			cachefree += bucket->ub_cnt;
3624 		db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3625 		    z->uz_name, (uintmax_t)z->uz_size,
3626 		    (intmax_t)(allocs - frees), cachefree,
3627 		    (uintmax_t)allocs, z->uz_count);
3628 		if (db_pager_quit)
3629 			return;
3630 	}
3631 }
3632 #endif	/* DDB */
3633