xref: /freebsd/sys/vm/uma_core.c (revision 9af6c78cd43b18e169f10802142c61638bd62bed)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 
123 /*
124  * Are we allowed to allocate buckets?
125  */
126 static int bucketdisable = 1;
127 
128 /* Linked list of all kegs in the system */
129 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
130 
131 /* Linked list of all cache-only zones in the system */
132 static LIST_HEAD(,uma_zone) uma_cachezones =
133     LIST_HEAD_INITIALIZER(uma_cachezones);
134 
135 /* This RW lock protects the keg list */
136 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
137 
138 /*
139  * Pointer and counter to pool of pages, that is preallocated at
140  * startup to bootstrap UMA.
141  */
142 static char *bootmem;
143 static int boot_pages;
144 
145 static struct sx uma_reclaim_lock;
146 
147 /*
148  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
149  * allocations don't trigger a wakeup of the reclaim thread.
150  */
151 static unsigned long uma_kmem_limit = LONG_MAX;
152 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
153     "UMA kernel memory soft limit");
154 static unsigned long uma_kmem_total;
155 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
156     "UMA kernel memory usage");
157 
158 /* Is the VM done starting up? */
159 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
160     BOOT_RUNNING } booted = BOOT_COLD;
161 
162 /*
163  * This is the handle used to schedule events that need to happen
164  * outside of the allocation fast path.
165  */
166 static struct callout uma_callout;
167 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
168 
169 /*
170  * This structure is passed as the zone ctor arg so that I don't have to create
171  * a special allocation function just for zones.
172  */
173 struct uma_zctor_args {
174 	const char *name;
175 	size_t size;
176 	uma_ctor ctor;
177 	uma_dtor dtor;
178 	uma_init uminit;
179 	uma_fini fini;
180 	uma_import import;
181 	uma_release release;
182 	void *arg;
183 	uma_keg_t keg;
184 	int align;
185 	uint32_t flags;
186 };
187 
188 struct uma_kctor_args {
189 	uma_zone_t zone;
190 	size_t size;
191 	uma_init uminit;
192 	uma_fini fini;
193 	int align;
194 	uint32_t flags;
195 };
196 
197 struct uma_bucket_zone {
198 	uma_zone_t	ubz_zone;
199 	char		*ubz_name;
200 	int		ubz_entries;	/* Number of items it can hold. */
201 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
202 };
203 
204 /*
205  * Compute the actual number of bucket entries to pack them in power
206  * of two sizes for more efficient space utilization.
207  */
208 #define	BUCKET_SIZE(n)						\
209     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
210 
211 #define	BUCKET_MAX	BUCKET_SIZE(256)
212 #define	BUCKET_MIN	BUCKET_SIZE(4)
213 
214 struct uma_bucket_zone bucket_zones[] = {
215 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
216 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
217 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
218 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
219 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
220 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
221 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
222 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
223 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
224 	{ NULL, NULL, 0}
225 };
226 
227 /*
228  * Flags and enumerations to be passed to internal functions.
229  */
230 enum zfreeskip {
231 	SKIP_NONE =	0,
232 	SKIP_CNT =	0x00000001,
233 	SKIP_DTOR =	0x00010000,
234 	SKIP_FINI =	0x00020000,
235 };
236 
237 /* Prototypes.. */
238 
239 int	uma_startup_count(int);
240 void	uma_startup(void *, int);
241 void	uma_startup1(void);
242 void	uma_startup2(void);
243 
244 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
245 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void page_free(void *, vm_size_t, uint8_t);
249 static void pcpu_page_free(void *, vm_size_t, uint8_t);
250 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
251 static void cache_drain(uma_zone_t);
252 static void bucket_drain(uma_zone_t, uma_bucket_t);
253 static void bucket_cache_reclaim(uma_zone_t zone, bool);
254 static int keg_ctor(void *, int, void *, int);
255 static void keg_dtor(void *, int, void *);
256 static int zone_ctor(void *, int, void *, int);
257 static void zone_dtor(void *, int, void *);
258 static int zero_init(void *, int, int);
259 static void keg_small_init(uma_keg_t keg);
260 static void keg_large_init(uma_keg_t keg);
261 static void zone_foreach(void (*zfunc)(uma_zone_t));
262 static void zone_timeout(uma_zone_t zone);
263 static int hash_alloc(struct uma_hash *, u_int);
264 static int hash_expand(struct uma_hash *, struct uma_hash *);
265 static void hash_free(struct uma_hash *hash);
266 static void uma_timeout(void *);
267 static void uma_startup3(void);
268 static void *zone_alloc_item(uma_zone_t, void *, int, int);
269 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
270 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
271 static void bucket_enable(void);
272 static void bucket_init(void);
273 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
274 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
275 static void bucket_zone_drain(void);
276 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int, int);
277 static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(uma_zone_t, void **, int, int, int);
283 static void zone_release(uma_zone_t, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 
286 void uma_print_zone(uma_zone_t);
287 void uma_print_stats(void);
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 
291 #ifdef INVARIANTS
292 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
293 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
294 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
295 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
296 
297 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
298     "Memory allocation debugging");
299 
300 static u_int dbg_divisor = 1;
301 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
302     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
303     "Debug & thrash every this item in memory allocator");
304 
305 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
306 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
307 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
308     &uma_dbg_cnt, "memory items debugged");
309 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
310     &uma_skip_cnt, "memory items skipped, not debugged");
311 #endif
312 
313 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
314 
315 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
316     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
317 
318 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
319     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
320 
321 static int zone_warnings = 1;
322 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
323     "Warn when UMA zones becomes full");
324 
325 /* Adjust bytes under management by UMA. */
326 static inline void
327 uma_total_dec(unsigned long size)
328 {
329 
330 	atomic_subtract_long(&uma_kmem_total, size);
331 }
332 
333 static inline void
334 uma_total_inc(unsigned long size)
335 {
336 
337 	if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
338 		uma_reclaim_wakeup();
339 }
340 
341 /*
342  * This routine checks to see whether or not it's safe to enable buckets.
343  */
344 static void
345 bucket_enable(void)
346 {
347 	bucketdisable = vm_page_count_min();
348 }
349 
350 /*
351  * Initialize bucket_zones, the array of zones of buckets of various sizes.
352  *
353  * For each zone, calculate the memory required for each bucket, consisting
354  * of the header and an array of pointers.
355  */
356 static void
357 bucket_init(void)
358 {
359 	struct uma_bucket_zone *ubz;
360 	int size;
361 
362 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
363 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
364 		size += sizeof(void *) * ubz->ubz_entries;
365 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
366 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
367 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
368 	}
369 }
370 
371 /*
372  * Given a desired number of entries for a bucket, return the zone from which
373  * to allocate the bucket.
374  */
375 static struct uma_bucket_zone *
376 bucket_zone_lookup(int entries)
377 {
378 	struct uma_bucket_zone *ubz;
379 
380 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
381 		if (ubz->ubz_entries >= entries)
382 			return (ubz);
383 	ubz--;
384 	return (ubz);
385 }
386 
387 static int
388 bucket_select(int size)
389 {
390 	struct uma_bucket_zone *ubz;
391 
392 	ubz = &bucket_zones[0];
393 	if (size > ubz->ubz_maxsize)
394 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
395 
396 	for (; ubz->ubz_entries != 0; ubz++)
397 		if (ubz->ubz_maxsize < size)
398 			break;
399 	ubz--;
400 	return (ubz->ubz_entries);
401 }
402 
403 static uma_bucket_t
404 bucket_alloc(uma_zone_t zone, void *udata, int flags)
405 {
406 	struct uma_bucket_zone *ubz;
407 	uma_bucket_t bucket;
408 
409 	/*
410 	 * This is to stop us from allocating per cpu buckets while we're
411 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
412 	 * boot pages.  This also prevents us from allocating buckets in
413 	 * low memory situations.
414 	 */
415 	if (bucketdisable)
416 		return (NULL);
417 	/*
418 	 * To limit bucket recursion we store the original zone flags
419 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
420 	 * NOVM flag to persist even through deep recursions.  We also
421 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
422 	 * a bucket for a bucket zone so we do not allow infinite bucket
423 	 * recursion.  This cookie will even persist to frees of unused
424 	 * buckets via the allocation path or bucket allocations in the
425 	 * free path.
426 	 */
427 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
428 		udata = (void *)(uintptr_t)zone->uz_flags;
429 	else {
430 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
431 			return (NULL);
432 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
433 	}
434 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
435 		flags |= M_NOVM;
436 	ubz = bucket_zone_lookup(zone->uz_count);
437 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
438 		ubz++;
439 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
440 	if (bucket) {
441 #ifdef INVARIANTS
442 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
443 #endif
444 		bucket->ub_cnt = 0;
445 		bucket->ub_entries = ubz->ubz_entries;
446 	}
447 
448 	return (bucket);
449 }
450 
451 static void
452 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
453 {
454 	struct uma_bucket_zone *ubz;
455 
456 	KASSERT(bucket->ub_cnt == 0,
457 	    ("bucket_free: Freeing a non free bucket."));
458 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
459 		udata = (void *)(uintptr_t)zone->uz_flags;
460 	ubz = bucket_zone_lookup(bucket->ub_entries);
461 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
462 }
463 
464 static void
465 bucket_zone_drain(void)
466 {
467 	struct uma_bucket_zone *ubz;
468 
469 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
470 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
471 }
472 
473 /*
474  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
475  * zone's caches.
476  */
477 static uma_bucket_t
478 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
479 {
480 	uma_bucket_t bucket;
481 
482 	ZONE_LOCK_ASSERT(zone);
483 
484 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
485 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
486 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
487 		zdom->uzd_nitems -= bucket->ub_cnt;
488 		if (zdom->uzd_imin > zdom->uzd_nitems)
489 			zdom->uzd_imin = zdom->uzd_nitems;
490 		zone->uz_bkt_count -= bucket->ub_cnt;
491 	}
492 	return (bucket);
493 }
494 
495 /*
496  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
497  * whether the bucket's contents should be counted as part of the zone's working
498  * set.
499  */
500 static void
501 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
502     const bool ws)
503 {
504 
505 	ZONE_LOCK_ASSERT(zone);
506 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
507 	    ("%s: zone %p overflow", __func__, zone));
508 
509 	if (ws)
510 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
511 	else
512 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
513 	zdom->uzd_nitems += bucket->ub_cnt;
514 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
515 		zdom->uzd_imax = zdom->uzd_nitems;
516 	zone->uz_bkt_count += bucket->ub_cnt;
517 }
518 
519 static void
520 zone_log_warning(uma_zone_t zone)
521 {
522 	static const struct timeval warninterval = { 300, 0 };
523 
524 	if (!zone_warnings || zone->uz_warning == NULL)
525 		return;
526 
527 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
528 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
529 }
530 
531 static inline void
532 zone_maxaction(uma_zone_t zone)
533 {
534 
535 	if (zone->uz_maxaction.ta_func != NULL)
536 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
537 }
538 
539 /*
540  * Routine called by timeout which is used to fire off some time interval
541  * based calculations.  (stats, hash size, etc.)
542  *
543  * Arguments:
544  *	arg   Unused
545  *
546  * Returns:
547  *	Nothing
548  */
549 static void
550 uma_timeout(void *unused)
551 {
552 	bucket_enable();
553 	zone_foreach(zone_timeout);
554 
555 	/* Reschedule this event */
556 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
557 }
558 
559 /*
560  * Update the working set size estimate for the zone's bucket cache.
561  * The constants chosen here are somewhat arbitrary.  With an update period of
562  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
563  * last 100s.
564  */
565 static void
566 zone_domain_update_wss(uma_zone_domain_t zdom)
567 {
568 	long wss;
569 
570 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
571 	wss = zdom->uzd_imax - zdom->uzd_imin;
572 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
573 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
574 }
575 
576 /*
577  * Routine to perform timeout driven calculations.  This expands the
578  * hashes and does per cpu statistics aggregation.
579  *
580  *  Returns nothing.
581  */
582 static void
583 zone_timeout(uma_zone_t zone)
584 {
585 	uma_keg_t keg;
586 	u_int slabs;
587 
588 	if ((zone->uz_flags & UMA_ZONE_HASH) == 0)
589 		goto update_wss;
590 
591 	keg = zone->uz_keg;
592 	KEG_LOCK(keg);
593 	/*
594 	 * Expand the keg hash table.
595 	 *
596 	 * This is done if the number of slabs is larger than the hash size.
597 	 * What I'm trying to do here is completely reduce collisions.  This
598 	 * may be a little aggressive.  Should I allow for two collisions max?
599 	 */
600 	if (keg->uk_flags & UMA_ZONE_HASH &&
601 	    (slabs = keg->uk_pages / keg->uk_ppera) >
602 	     keg->uk_hash.uh_hashsize) {
603 		struct uma_hash newhash;
604 		struct uma_hash oldhash;
605 		int ret;
606 
607 		/*
608 		 * This is so involved because allocating and freeing
609 		 * while the keg lock is held will lead to deadlock.
610 		 * I have to do everything in stages and check for
611 		 * races.
612 		 */
613 		KEG_UNLOCK(keg);
614 		ret = hash_alloc(&newhash, 1 << fls(slabs));
615 		KEG_LOCK(keg);
616 		if (ret) {
617 			if (hash_expand(&keg->uk_hash, &newhash)) {
618 				oldhash = keg->uk_hash;
619 				keg->uk_hash = newhash;
620 			} else
621 				oldhash = newhash;
622 
623 			KEG_UNLOCK(keg);
624 			hash_free(&oldhash);
625 			return;
626 		}
627 	}
628 	KEG_UNLOCK(keg);
629 
630 update_wss:
631 	ZONE_LOCK(zone);
632 	for (int i = 0; i < vm_ndomains; i++)
633 		zone_domain_update_wss(&zone->uz_domain[i]);
634 	ZONE_UNLOCK(zone);
635 }
636 
637 /*
638  * Allocate and zero fill the next sized hash table from the appropriate
639  * backing store.
640  *
641  * Arguments:
642  *	hash  A new hash structure with the old hash size in uh_hashsize
643  *
644  * Returns:
645  *	1 on success and 0 on failure.
646  */
647 static int
648 hash_alloc(struct uma_hash *hash, u_int size)
649 {
650 	size_t alloc;
651 
652 	KASSERT(powerof2(size), ("hash size must be power of 2"));
653 	if (size > UMA_HASH_SIZE_INIT)  {
654 		hash->uh_hashsize = size;
655 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
656 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
657 		    M_UMAHASH, M_NOWAIT);
658 	} else {
659 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
660 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
661 		    UMA_ANYDOMAIN, M_WAITOK);
662 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
663 	}
664 	if (hash->uh_slab_hash) {
665 		bzero(hash->uh_slab_hash, alloc);
666 		hash->uh_hashmask = hash->uh_hashsize - 1;
667 		return (1);
668 	}
669 
670 	return (0);
671 }
672 
673 /*
674  * Expands the hash table for HASH zones.  This is done from zone_timeout
675  * to reduce collisions.  This must not be done in the regular allocation
676  * path, otherwise, we can recurse on the vm while allocating pages.
677  *
678  * Arguments:
679  *	oldhash  The hash you want to expand
680  *	newhash  The hash structure for the new table
681  *
682  * Returns:
683  *	Nothing
684  *
685  * Discussion:
686  */
687 static int
688 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
689 {
690 	uma_slab_t slab;
691 	u_int hval;
692 	u_int idx;
693 
694 	if (!newhash->uh_slab_hash)
695 		return (0);
696 
697 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
698 		return (0);
699 
700 	/*
701 	 * I need to investigate hash algorithms for resizing without a
702 	 * full rehash.
703 	 */
704 
705 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
706 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
707 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
708 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
709 			hval = UMA_HASH(newhash, slab->us_data);
710 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
711 			    slab, us_hlink);
712 		}
713 
714 	return (1);
715 }
716 
717 /*
718  * Free the hash bucket to the appropriate backing store.
719  *
720  * Arguments:
721  *	slab_hash  The hash bucket we're freeing
722  *	hashsize   The number of entries in that hash bucket
723  *
724  * Returns:
725  *	Nothing
726  */
727 static void
728 hash_free(struct uma_hash *hash)
729 {
730 	if (hash->uh_slab_hash == NULL)
731 		return;
732 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
733 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
734 	else
735 		free(hash->uh_slab_hash, M_UMAHASH);
736 }
737 
738 /*
739  * Frees all outstanding items in a bucket
740  *
741  * Arguments:
742  *	zone   The zone to free to, must be unlocked.
743  *	bucket The free/alloc bucket with items, cpu queue must be locked.
744  *
745  * Returns:
746  *	Nothing
747  */
748 
749 static void
750 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
751 {
752 	int i;
753 
754 	if (bucket == NULL)
755 		return;
756 
757 	if (zone->uz_fini)
758 		for (i = 0; i < bucket->ub_cnt; i++)
759 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
760 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
761 	if (zone->uz_max_items > 0) {
762 		ZONE_LOCK(zone);
763 		zone->uz_items -= bucket->ub_cnt;
764 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
765 			wakeup_one(zone);
766 		ZONE_UNLOCK(zone);
767 	}
768 	bucket->ub_cnt = 0;
769 }
770 
771 /*
772  * Drains the per cpu caches for a zone.
773  *
774  * NOTE: This may only be called while the zone is being turn down, and not
775  * during normal operation.  This is necessary in order that we do not have
776  * to migrate CPUs to drain the per-CPU caches.
777  *
778  * Arguments:
779  *	zone     The zone to drain, must be unlocked.
780  *
781  * Returns:
782  *	Nothing
783  */
784 static void
785 cache_drain(uma_zone_t zone)
786 {
787 	uma_cache_t cache;
788 	int cpu;
789 
790 	/*
791 	 * XXX: It is safe to not lock the per-CPU caches, because we're
792 	 * tearing down the zone anyway.  I.e., there will be no further use
793 	 * of the caches at this point.
794 	 *
795 	 * XXX: It would good to be able to assert that the zone is being
796 	 * torn down to prevent improper use of cache_drain().
797 	 *
798 	 * XXX: We lock the zone before passing into bucket_cache_reclaim() as
799 	 * it is used elsewhere.  Should the tear-down path be made special
800 	 * there in some form?
801 	 */
802 	CPU_FOREACH(cpu) {
803 		cache = &zone->uz_cpu[cpu];
804 		bucket_drain(zone, cache->uc_allocbucket);
805 		if (cache->uc_allocbucket != NULL)
806 			bucket_free(zone, cache->uc_allocbucket, NULL);
807 		cache->uc_allocbucket = NULL;
808 		bucket_drain(zone, cache->uc_freebucket);
809 		if (cache->uc_freebucket != NULL)
810 			bucket_free(zone, cache->uc_freebucket, NULL);
811 		cache->uc_freebucket = NULL;
812 		bucket_drain(zone, cache->uc_crossbucket);
813 		if (cache->uc_crossbucket != NULL)
814 			bucket_free(zone, cache->uc_crossbucket, NULL);
815 		cache->uc_crossbucket = NULL;
816 	}
817 	ZONE_LOCK(zone);
818 	bucket_cache_reclaim(zone, true);
819 	ZONE_UNLOCK(zone);
820 }
821 
822 static void
823 cache_shrink(uma_zone_t zone)
824 {
825 
826 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
827 		return;
828 
829 	ZONE_LOCK(zone);
830 	zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
831 	ZONE_UNLOCK(zone);
832 }
833 
834 static void
835 cache_drain_safe_cpu(uma_zone_t zone)
836 {
837 	uma_cache_t cache;
838 	uma_bucket_t b1, b2, b3;
839 	int domain;
840 
841 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
842 		return;
843 
844 	b1 = b2 = b3 = NULL;
845 	ZONE_LOCK(zone);
846 	critical_enter();
847 	if (zone->uz_flags & UMA_ZONE_NUMA)
848 		domain = PCPU_GET(domain);
849 	else
850 		domain = 0;
851 	cache = &zone->uz_cpu[curcpu];
852 	if (cache->uc_allocbucket) {
853 		if (cache->uc_allocbucket->ub_cnt != 0)
854 			zone_put_bucket(zone, &zone->uz_domain[domain],
855 			    cache->uc_allocbucket, false);
856 		else
857 			b1 = cache->uc_allocbucket;
858 		cache->uc_allocbucket = NULL;
859 	}
860 	if (cache->uc_freebucket) {
861 		if (cache->uc_freebucket->ub_cnt != 0)
862 			zone_put_bucket(zone, &zone->uz_domain[domain],
863 			    cache->uc_freebucket, false);
864 		else
865 			b2 = cache->uc_freebucket;
866 		cache->uc_freebucket = NULL;
867 	}
868 	b3 = cache->uc_crossbucket;
869 	cache->uc_crossbucket = NULL;
870 	critical_exit();
871 	ZONE_UNLOCK(zone);
872 	if (b1)
873 		bucket_free(zone, b1, NULL);
874 	if (b2)
875 		bucket_free(zone, b2, NULL);
876 	if (b3) {
877 		bucket_drain(zone, b3);
878 		bucket_free(zone, b3, NULL);
879 	}
880 }
881 
882 /*
883  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
884  * This is an expensive call because it needs to bind to all CPUs
885  * one by one and enter a critical section on each of them in order
886  * to safely access their cache buckets.
887  * Zone lock must not be held on call this function.
888  */
889 static void
890 pcpu_cache_drain_safe(uma_zone_t zone)
891 {
892 	int cpu;
893 
894 	/*
895 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
896 	 */
897 	if (zone)
898 		cache_shrink(zone);
899 	else
900 		zone_foreach(cache_shrink);
901 
902 	CPU_FOREACH(cpu) {
903 		thread_lock(curthread);
904 		sched_bind(curthread, cpu);
905 		thread_unlock(curthread);
906 
907 		if (zone)
908 			cache_drain_safe_cpu(zone);
909 		else
910 			zone_foreach(cache_drain_safe_cpu);
911 	}
912 	thread_lock(curthread);
913 	sched_unbind(curthread);
914 	thread_unlock(curthread);
915 }
916 
917 /*
918  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
919  * requested a drain, otherwise the per-domain caches are trimmed to either
920  * estimated working set size.
921  */
922 static void
923 bucket_cache_reclaim(uma_zone_t zone, bool drain)
924 {
925 	uma_zone_domain_t zdom;
926 	uma_bucket_t bucket;
927 	long target, tofree;
928 	int i;
929 
930 	for (i = 0; i < vm_ndomains; i++) {
931 		zdom = &zone->uz_domain[i];
932 
933 		/*
934 		 * If we were asked to drain the zone, we are done only once
935 		 * this bucket cache is empty.  Otherwise, we reclaim items in
936 		 * excess of the zone's estimated working set size.  If the
937 		 * difference nitems - imin is larger than the WSS estimate,
938 		 * then the estimate will grow at the end of this interval and
939 		 * we ignore the historical average.
940 		 */
941 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
942 		    zdom->uzd_imin);
943 		while (zdom->uzd_nitems > target) {
944 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
945 			if (bucket == NULL)
946 				break;
947 			tofree = bucket->ub_cnt;
948 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
949 			zdom->uzd_nitems -= tofree;
950 
951 			/*
952 			 * Shift the bounds of the current WSS interval to avoid
953 			 * perturbing the estimate.
954 			 */
955 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
956 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
957 
958 			ZONE_UNLOCK(zone);
959 			bucket_drain(zone, bucket);
960 			bucket_free(zone, bucket, NULL);
961 			ZONE_LOCK(zone);
962 		}
963 	}
964 
965 	/*
966 	 * Shrink the zone bucket size to ensure that the per-CPU caches
967 	 * don't grow too large.
968 	 */
969 	if (zone->uz_count > zone->uz_count_min)
970 		zone->uz_count--;
971 }
972 
973 static void
974 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
975 {
976 	uint8_t *mem;
977 	int i;
978 	uint8_t flags;
979 
980 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
981 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
982 
983 	mem = slab->us_data;
984 	flags = slab->us_flags;
985 	i = start;
986 	if (keg->uk_fini != NULL) {
987 		for (i--; i > -1; i--)
988 #ifdef INVARIANTS
989 		/*
990 		 * trash_fini implies that dtor was trash_dtor. trash_fini
991 		 * would check that memory hasn't been modified since free,
992 		 * which executed trash_dtor.
993 		 * That's why we need to run uma_dbg_kskip() check here,
994 		 * albeit we don't make skip check for other init/fini
995 		 * invocations.
996 		 */
997 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
998 		    keg->uk_fini != trash_fini)
999 #endif
1000 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
1001 			    keg->uk_size);
1002 	}
1003 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1004 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1005 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1006 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1007 }
1008 
1009 /*
1010  * Frees pages from a keg back to the system.  This is done on demand from
1011  * the pageout daemon.
1012  *
1013  * Returns nothing.
1014  */
1015 static void
1016 keg_drain(uma_keg_t keg)
1017 {
1018 	struct slabhead freeslabs = { 0 };
1019 	uma_domain_t dom;
1020 	uma_slab_t slab, tmp;
1021 	int i;
1022 
1023 	/*
1024 	 * We don't want to take pages from statically allocated kegs at this
1025 	 * time
1026 	 */
1027 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1028 		return;
1029 
1030 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
1031 	    keg->uk_name, keg, keg->uk_free);
1032 	KEG_LOCK(keg);
1033 	if (keg->uk_free == 0)
1034 		goto finished;
1035 
1036 	for (i = 0; i < vm_ndomains; i++) {
1037 		dom = &keg->uk_domain[i];
1038 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1039 			/* We have nowhere to free these to. */
1040 			if (slab->us_flags & UMA_SLAB_BOOT)
1041 				continue;
1042 
1043 			LIST_REMOVE(slab, us_link);
1044 			keg->uk_pages -= keg->uk_ppera;
1045 			keg->uk_free -= keg->uk_ipers;
1046 
1047 			if (keg->uk_flags & UMA_ZONE_HASH)
1048 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
1049 				    slab->us_data);
1050 
1051 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1052 		}
1053 	}
1054 
1055 finished:
1056 	KEG_UNLOCK(keg);
1057 
1058 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1059 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1060 		keg_free_slab(keg, slab, keg->uk_ipers);
1061 	}
1062 }
1063 
1064 static void
1065 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1066 {
1067 
1068 	/*
1069 	 * Set draining to interlock with zone_dtor() so we can release our
1070 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1071 	 * is the only call that knows the structure will still be available
1072 	 * when it wakes up.
1073 	 */
1074 	ZONE_LOCK(zone);
1075 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1076 		if (waitok == M_NOWAIT)
1077 			goto out;
1078 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1079 	}
1080 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1081 	bucket_cache_reclaim(zone, drain);
1082 	ZONE_UNLOCK(zone);
1083 
1084 	/*
1085 	 * The DRAINING flag protects us from being freed while
1086 	 * we're running.  Normally the uma_rwlock would protect us but we
1087 	 * must be able to release and acquire the right lock for each keg.
1088 	 */
1089 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1090 		keg_drain(zone->uz_keg);
1091 	ZONE_LOCK(zone);
1092 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1093 	wakeup(zone);
1094 out:
1095 	ZONE_UNLOCK(zone);
1096 }
1097 
1098 static void
1099 zone_drain(uma_zone_t zone)
1100 {
1101 
1102 	zone_reclaim(zone, M_NOWAIT, true);
1103 }
1104 
1105 static void
1106 zone_trim(uma_zone_t zone)
1107 {
1108 
1109 	zone_reclaim(zone, M_NOWAIT, false);
1110 }
1111 
1112 /*
1113  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1114  * If the allocation was successful, the keg lock will be held upon return,
1115  * otherwise the keg will be left unlocked.
1116  *
1117  * Arguments:
1118  *	flags   Wait flags for the item initialization routine
1119  *	aflags  Wait flags for the slab allocation
1120  *
1121  * Returns:
1122  *	The slab that was allocated or NULL if there is no memory and the
1123  *	caller specified M_NOWAIT.
1124  */
1125 static uma_slab_t
1126 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1127     int aflags)
1128 {
1129 	uma_alloc allocf;
1130 	uma_slab_t slab;
1131 	unsigned long size;
1132 	uint8_t *mem;
1133 	uint8_t sflags;
1134 	int i;
1135 
1136 	KASSERT(domain >= 0 && domain < vm_ndomains,
1137 	    ("keg_alloc_slab: domain %d out of range", domain));
1138 	KEG_LOCK_ASSERT(keg);
1139 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1140 
1141 	allocf = keg->uk_allocf;
1142 	KEG_UNLOCK(keg);
1143 
1144 	slab = NULL;
1145 	mem = NULL;
1146 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1147 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1148 		if (slab == NULL)
1149 			goto out;
1150 	}
1151 
1152 	/*
1153 	 * This reproduces the old vm_zone behavior of zero filling pages the
1154 	 * first time they are added to a zone.
1155 	 *
1156 	 * Malloced items are zeroed in uma_zalloc.
1157 	 */
1158 
1159 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1160 		aflags |= M_ZERO;
1161 	else
1162 		aflags &= ~M_ZERO;
1163 
1164 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1165 		aflags |= M_NODUMP;
1166 
1167 	/* zone is passed for legacy reasons. */
1168 	size = keg->uk_ppera * PAGE_SIZE;
1169 	mem = allocf(zone, size, domain, &sflags, aflags);
1170 	if (mem == NULL) {
1171 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1172 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1173 		slab = NULL;
1174 		goto out;
1175 	}
1176 	uma_total_inc(size);
1177 
1178 	/* Point the slab into the allocated memory */
1179 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1180 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1181 
1182 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1183 		for (i = 0; i < keg->uk_ppera; i++)
1184 			vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1185 
1186 	slab->us_keg = keg;
1187 	slab->us_data = mem;
1188 	slab->us_freecount = keg->uk_ipers;
1189 	slab->us_flags = sflags;
1190 	slab->us_domain = domain;
1191 	BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1192 #ifdef INVARIANTS
1193 	BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1194 #endif
1195 
1196 	if (keg->uk_init != NULL) {
1197 		for (i = 0; i < keg->uk_ipers; i++)
1198 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1199 			    keg->uk_size, flags) != 0)
1200 				break;
1201 		if (i != keg->uk_ipers) {
1202 			keg_free_slab(keg, slab, i);
1203 			slab = NULL;
1204 			goto out;
1205 		}
1206 	}
1207 	KEG_LOCK(keg);
1208 
1209 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1210 	    slab, keg->uk_name, keg);
1211 
1212 	if (keg->uk_flags & UMA_ZONE_HASH)
1213 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1214 
1215 	keg->uk_pages += keg->uk_ppera;
1216 	keg->uk_free += keg->uk_ipers;
1217 
1218 out:
1219 	return (slab);
1220 }
1221 
1222 /*
1223  * This function is intended to be used early on in place of page_alloc() so
1224  * that we may use the boot time page cache to satisfy allocations before
1225  * the VM is ready.
1226  */
1227 static void *
1228 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1229     int wait)
1230 {
1231 	uma_keg_t keg;
1232 	void *mem;
1233 	int pages;
1234 
1235 	keg = zone->uz_keg;
1236 	/*
1237 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1238 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1239 	 */
1240 	switch (booted) {
1241 		case BOOT_COLD:
1242 		case BOOT_STRAPPED:
1243 			break;
1244 		case BOOT_PAGEALLOC:
1245 			if (keg->uk_ppera > 1)
1246 				break;
1247 		case BOOT_BUCKETS:
1248 		case BOOT_RUNNING:
1249 #ifdef UMA_MD_SMALL_ALLOC
1250 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1251 			    page_alloc : uma_small_alloc;
1252 #else
1253 			keg->uk_allocf = page_alloc;
1254 #endif
1255 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1256 	}
1257 
1258 	/*
1259 	 * Check our small startup cache to see if it has pages remaining.
1260 	 */
1261 	pages = howmany(bytes, PAGE_SIZE);
1262 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1263 	if (pages > boot_pages)
1264 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1265 #ifdef DIAGNOSTIC
1266 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1267 	    boot_pages);
1268 #endif
1269 	mem = bootmem;
1270 	boot_pages -= pages;
1271 	bootmem += pages * PAGE_SIZE;
1272 	*pflag = UMA_SLAB_BOOT;
1273 
1274 	return (mem);
1275 }
1276 
1277 /*
1278  * Allocates a number of pages from the system
1279  *
1280  * Arguments:
1281  *	bytes  The number of bytes requested
1282  *	wait  Shall we wait?
1283  *
1284  * Returns:
1285  *	A pointer to the alloced memory or possibly
1286  *	NULL if M_NOWAIT is set.
1287  */
1288 static void *
1289 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1290     int wait)
1291 {
1292 	void *p;	/* Returned page */
1293 
1294 	*pflag = UMA_SLAB_KERNEL;
1295 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1296 
1297 	return (p);
1298 }
1299 
1300 static void *
1301 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1302     int wait)
1303 {
1304 	struct pglist alloctail;
1305 	vm_offset_t addr, zkva;
1306 	int cpu, flags;
1307 	vm_page_t p, p_next;
1308 #ifdef NUMA
1309 	struct pcpu *pc;
1310 #endif
1311 
1312 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1313 
1314 	TAILQ_INIT(&alloctail);
1315 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1316 	    malloc2vm_flags(wait);
1317 	*pflag = UMA_SLAB_KERNEL;
1318 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1319 		if (CPU_ABSENT(cpu)) {
1320 			p = vm_page_alloc(NULL, 0, flags);
1321 		} else {
1322 #ifndef NUMA
1323 			p = vm_page_alloc(NULL, 0, flags);
1324 #else
1325 			pc = pcpu_find(cpu);
1326 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1327 			if (__predict_false(p == NULL))
1328 				p = vm_page_alloc(NULL, 0, flags);
1329 #endif
1330 		}
1331 		if (__predict_false(p == NULL))
1332 			goto fail;
1333 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1334 	}
1335 	if ((addr = kva_alloc(bytes)) == 0)
1336 		goto fail;
1337 	zkva = addr;
1338 	TAILQ_FOREACH(p, &alloctail, listq) {
1339 		pmap_qenter(zkva, &p, 1);
1340 		zkva += PAGE_SIZE;
1341 	}
1342 	return ((void*)addr);
1343 fail:
1344 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1345 		vm_page_unwire_noq(p);
1346 		vm_page_free(p);
1347 	}
1348 	return (NULL);
1349 }
1350 
1351 /*
1352  * Allocates a number of pages from within an object
1353  *
1354  * Arguments:
1355  *	bytes  The number of bytes requested
1356  *	wait   Shall we wait?
1357  *
1358  * Returns:
1359  *	A pointer to the alloced memory or possibly
1360  *	NULL if M_NOWAIT is set.
1361  */
1362 static void *
1363 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1364     int wait)
1365 {
1366 	TAILQ_HEAD(, vm_page) alloctail;
1367 	u_long npages;
1368 	vm_offset_t retkva, zkva;
1369 	vm_page_t p, p_next;
1370 	uma_keg_t keg;
1371 
1372 	TAILQ_INIT(&alloctail);
1373 	keg = zone->uz_keg;
1374 
1375 	npages = howmany(bytes, PAGE_SIZE);
1376 	while (npages > 0) {
1377 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1378 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1379 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1380 		    VM_ALLOC_NOWAIT));
1381 		if (p != NULL) {
1382 			/*
1383 			 * Since the page does not belong to an object, its
1384 			 * listq is unused.
1385 			 */
1386 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1387 			npages--;
1388 			continue;
1389 		}
1390 		/*
1391 		 * Page allocation failed, free intermediate pages and
1392 		 * exit.
1393 		 */
1394 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1395 			vm_page_unwire_noq(p);
1396 			vm_page_free(p);
1397 		}
1398 		return (NULL);
1399 	}
1400 	*flags = UMA_SLAB_PRIV;
1401 	zkva = keg->uk_kva +
1402 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1403 	retkva = zkva;
1404 	TAILQ_FOREACH(p, &alloctail, listq) {
1405 		pmap_qenter(zkva, &p, 1);
1406 		zkva += PAGE_SIZE;
1407 	}
1408 
1409 	return ((void *)retkva);
1410 }
1411 
1412 /*
1413  * Frees a number of pages to the system
1414  *
1415  * Arguments:
1416  *	mem   A pointer to the memory to be freed
1417  *	size  The size of the memory being freed
1418  *	flags The original p->us_flags field
1419  *
1420  * Returns:
1421  *	Nothing
1422  */
1423 static void
1424 page_free(void *mem, vm_size_t size, uint8_t flags)
1425 {
1426 
1427 	if ((flags & UMA_SLAB_KERNEL) == 0)
1428 		panic("UMA: page_free used with invalid flags %x", flags);
1429 
1430 	kmem_free((vm_offset_t)mem, size);
1431 }
1432 
1433 /*
1434  * Frees pcpu zone allocations
1435  *
1436  * Arguments:
1437  *	mem   A pointer to the memory to be freed
1438  *	size  The size of the memory being freed
1439  *	flags The original p->us_flags field
1440  *
1441  * Returns:
1442  *	Nothing
1443  */
1444 static void
1445 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1446 {
1447 	vm_offset_t sva, curva;
1448 	vm_paddr_t paddr;
1449 	vm_page_t m;
1450 
1451 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1452 	sva = (vm_offset_t)mem;
1453 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1454 		paddr = pmap_kextract(curva);
1455 		m = PHYS_TO_VM_PAGE(paddr);
1456 		vm_page_unwire_noq(m);
1457 		vm_page_free(m);
1458 	}
1459 	pmap_qremove(sva, size >> PAGE_SHIFT);
1460 	kva_free(sva, size);
1461 }
1462 
1463 
1464 /*
1465  * Zero fill initializer
1466  *
1467  * Arguments/Returns follow uma_init specifications
1468  */
1469 static int
1470 zero_init(void *mem, int size, int flags)
1471 {
1472 	bzero(mem, size);
1473 	return (0);
1474 }
1475 
1476 /*
1477  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1478  *
1479  * Arguments
1480  *	keg  The zone we should initialize
1481  *
1482  * Returns
1483  *	Nothing
1484  */
1485 static void
1486 keg_small_init(uma_keg_t keg)
1487 {
1488 	u_int rsize;
1489 	u_int memused;
1490 	u_int wastedspace;
1491 	u_int shsize;
1492 	u_int slabsize;
1493 
1494 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1495 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1496 
1497 		slabsize = UMA_PCPU_ALLOC_SIZE;
1498 		keg->uk_ppera = ncpus;
1499 	} else {
1500 		slabsize = UMA_SLAB_SIZE;
1501 		keg->uk_ppera = 1;
1502 	}
1503 
1504 	/*
1505 	 * Calculate the size of each allocation (rsize) according to
1506 	 * alignment.  If the requested size is smaller than we have
1507 	 * allocation bits for we round it up.
1508 	 */
1509 	rsize = keg->uk_size;
1510 	if (rsize < slabsize / SLAB_SETSIZE)
1511 		rsize = slabsize / SLAB_SETSIZE;
1512 	if (rsize & keg->uk_align)
1513 		rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1514 	keg->uk_rsize = rsize;
1515 
1516 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1517 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1518 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1519 
1520 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1521 		shsize = 0;
1522 	else
1523 		shsize = SIZEOF_UMA_SLAB;
1524 
1525 	if (rsize <= slabsize - shsize)
1526 		keg->uk_ipers = (slabsize - shsize) / rsize;
1527 	else {
1528 		/* Handle special case when we have 1 item per slab, so
1529 		 * alignment requirement can be relaxed. */
1530 		KASSERT(keg->uk_size <= slabsize - shsize,
1531 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1532 		keg->uk_ipers = 1;
1533 	}
1534 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1535 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1536 
1537 	memused = keg->uk_ipers * rsize + shsize;
1538 	wastedspace = slabsize - memused;
1539 
1540 	/*
1541 	 * We can't do OFFPAGE if we're internal or if we've been
1542 	 * asked to not go to the VM for buckets.  If we do this we
1543 	 * may end up going to the VM  for slabs which we do not
1544 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1545 	 * of UMA_ZONE_VM, which clearly forbids it.
1546 	 */
1547 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1548 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1549 		return;
1550 
1551 	/*
1552 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1553 	 * this if it permits more items per-slab.
1554 	 *
1555 	 * XXX We could try growing slabsize to limit max waste as well.
1556 	 * Historically this was not done because the VM could not
1557 	 * efficiently handle contiguous allocations.
1558 	 */
1559 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1560 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1561 		keg->uk_ipers = slabsize / keg->uk_rsize;
1562 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1563 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1564 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1565 		    "keg: %s(%p), calculated wastedspace = %d, "
1566 		    "maximum wasted space allowed = %d, "
1567 		    "calculated ipers = %d, "
1568 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1569 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1570 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1571 		/*
1572 		 * If we had access to memory to embed a slab header we
1573 		 * also have a page structure to use vtoslab() instead of
1574 		 * hash to find slabs.  If the zone was explicitly created
1575 		 * OFFPAGE we can't necessarily touch the memory.
1576 		 */
1577 		if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
1578 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1579 	}
1580 
1581 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1582 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1583 		keg->uk_flags |= UMA_ZONE_HASH;
1584 }
1585 
1586 /*
1587  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1588  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1589  * more complicated.
1590  *
1591  * Arguments
1592  *	keg  The keg we should initialize
1593  *
1594  * Returns
1595  *	Nothing
1596  */
1597 static void
1598 keg_large_init(uma_keg_t keg)
1599 {
1600 
1601 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1602 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1603 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1604 
1605 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1606 	keg->uk_ipers = 1;
1607 	keg->uk_rsize = keg->uk_size;
1608 
1609 	/* Check whether we have enough space to not do OFFPAGE. */
1610 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1611 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < SIZEOF_UMA_SLAB) {
1612 		/*
1613 		 * We can't do OFFPAGE if we're internal, in which case
1614 		 * we need an extra page per allocation to contain the
1615 		 * slab header.
1616 		 */
1617 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1618 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1619 		else
1620 			keg->uk_ppera++;
1621 	}
1622 
1623 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1624 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1625 		keg->uk_flags |= UMA_ZONE_HASH;
1626 }
1627 
1628 static void
1629 keg_cachespread_init(uma_keg_t keg)
1630 {
1631 	int alignsize;
1632 	int trailer;
1633 	int pages;
1634 	int rsize;
1635 
1636 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1637 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1638 
1639 	alignsize = keg->uk_align + 1;
1640 	rsize = keg->uk_size;
1641 	/*
1642 	 * We want one item to start on every align boundary in a page.  To
1643 	 * do this we will span pages.  We will also extend the item by the
1644 	 * size of align if it is an even multiple of align.  Otherwise, it
1645 	 * would fall on the same boundary every time.
1646 	 */
1647 	if (rsize & keg->uk_align)
1648 		rsize = (rsize & ~keg->uk_align) + alignsize;
1649 	if ((rsize & alignsize) == 0)
1650 		rsize += alignsize;
1651 	trailer = rsize - keg->uk_size;
1652 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1653 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1654 	keg->uk_rsize = rsize;
1655 	keg->uk_ppera = pages;
1656 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1657 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1658 	KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
1659 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1660 	    keg->uk_ipers));
1661 }
1662 
1663 /*
1664  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1665  * the keg onto the global keg list.
1666  *
1667  * Arguments/Returns follow uma_ctor specifications
1668  *	udata  Actually uma_kctor_args
1669  */
1670 static int
1671 keg_ctor(void *mem, int size, void *udata, int flags)
1672 {
1673 	struct uma_kctor_args *arg = udata;
1674 	uma_keg_t keg = mem;
1675 	uma_zone_t zone;
1676 
1677 	bzero(keg, size);
1678 	keg->uk_size = arg->size;
1679 	keg->uk_init = arg->uminit;
1680 	keg->uk_fini = arg->fini;
1681 	keg->uk_align = arg->align;
1682 	keg->uk_free = 0;
1683 	keg->uk_reserve = 0;
1684 	keg->uk_pages = 0;
1685 	keg->uk_flags = arg->flags;
1686 	keg->uk_slabzone = NULL;
1687 
1688 	/*
1689 	 * We use a global round-robin policy by default.  Zones with
1690 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1691 	 * iterator is never run.
1692 	 */
1693 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1694 	keg->uk_dr.dr_iter = 0;
1695 
1696 	/*
1697 	 * The master zone is passed to us at keg-creation time.
1698 	 */
1699 	zone = arg->zone;
1700 	keg->uk_name = zone->uz_name;
1701 
1702 	if (arg->flags & UMA_ZONE_VM)
1703 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1704 
1705 	if (arg->flags & UMA_ZONE_ZINIT)
1706 		keg->uk_init = zero_init;
1707 
1708 	if (arg->flags & UMA_ZONE_MALLOC)
1709 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1710 
1711 	if (arg->flags & UMA_ZONE_PCPU)
1712 #ifdef SMP
1713 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1714 #else
1715 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1716 #endif
1717 
1718 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1719 		keg_cachespread_init(keg);
1720 	} else {
1721 		if (keg->uk_size > UMA_SLAB_SPACE)
1722 			keg_large_init(keg);
1723 		else
1724 			keg_small_init(keg);
1725 	}
1726 
1727 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1728 		keg->uk_slabzone = slabzone;
1729 
1730 	/*
1731 	 * If we haven't booted yet we need allocations to go through the
1732 	 * startup cache until the vm is ready.
1733 	 */
1734 	if (booted < BOOT_PAGEALLOC)
1735 		keg->uk_allocf = startup_alloc;
1736 #ifdef UMA_MD_SMALL_ALLOC
1737 	else if (keg->uk_ppera == 1)
1738 		keg->uk_allocf = uma_small_alloc;
1739 #endif
1740 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1741 		keg->uk_allocf = pcpu_page_alloc;
1742 	else
1743 		keg->uk_allocf = page_alloc;
1744 #ifdef UMA_MD_SMALL_ALLOC
1745 	if (keg->uk_ppera == 1)
1746 		keg->uk_freef = uma_small_free;
1747 	else
1748 #endif
1749 	if (keg->uk_flags & UMA_ZONE_PCPU)
1750 		keg->uk_freef = pcpu_page_free;
1751 	else
1752 		keg->uk_freef = page_free;
1753 
1754 	/*
1755 	 * Initialize keg's lock
1756 	 */
1757 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1758 
1759 	/*
1760 	 * If we're putting the slab header in the actual page we need to
1761 	 * figure out where in each page it goes.  See SIZEOF_UMA_SLAB
1762 	 * macro definition.
1763 	 */
1764 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1765 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - SIZEOF_UMA_SLAB;
1766 		/*
1767 		 * The only way the following is possible is if with our
1768 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1769 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1770 		 * mathematically possible for all cases, so we make
1771 		 * sure here anyway.
1772 		 */
1773 		KASSERT(keg->uk_pgoff + sizeof(struct uma_slab) <=
1774 		    PAGE_SIZE * keg->uk_ppera,
1775 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1776 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1777 	}
1778 
1779 	if (keg->uk_flags & UMA_ZONE_HASH)
1780 		hash_alloc(&keg->uk_hash, 0);
1781 
1782 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1783 	    keg, zone->uz_name, zone,
1784 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1785 	    keg->uk_free);
1786 
1787 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1788 
1789 	rw_wlock(&uma_rwlock);
1790 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1791 	rw_wunlock(&uma_rwlock);
1792 	return (0);
1793 }
1794 
1795 static void
1796 zone_alloc_counters(uma_zone_t zone)
1797 {
1798 
1799 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1800 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1801 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1802 }
1803 
1804 /*
1805  * Zone header ctor.  This initializes all fields, locks, etc.
1806  *
1807  * Arguments/Returns follow uma_ctor specifications
1808  *	udata  Actually uma_zctor_args
1809  */
1810 static int
1811 zone_ctor(void *mem, int size, void *udata, int flags)
1812 {
1813 	struct uma_zctor_args *arg = udata;
1814 	uma_zone_t zone = mem;
1815 	uma_zone_t z;
1816 	uma_keg_t keg;
1817 	int i;
1818 
1819 	bzero(zone, size);
1820 	zone->uz_name = arg->name;
1821 	zone->uz_ctor = arg->ctor;
1822 	zone->uz_dtor = arg->dtor;
1823 	zone->uz_init = NULL;
1824 	zone->uz_fini = NULL;
1825 	zone->uz_sleeps = 0;
1826 	zone->uz_xdomain = 0;
1827 	zone->uz_count = 0;
1828 	zone->uz_count_min = 0;
1829 	zone->uz_count_max = BUCKET_MAX;
1830 	zone->uz_flags = 0;
1831 	zone->uz_warning = NULL;
1832 	/* The domain structures follow the cpu structures. */
1833 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
1834 	zone->uz_bkt_max = ULONG_MAX;
1835 	timevalclear(&zone->uz_ratecheck);
1836 
1837 	if (__predict_true(booted == BOOT_RUNNING))
1838 		zone_alloc_counters(zone);
1839 	else {
1840 		zone->uz_allocs = EARLY_COUNTER;
1841 		zone->uz_frees = EARLY_COUNTER;
1842 		zone->uz_fails = EARLY_COUNTER;
1843 	}
1844 
1845 	for (i = 0; i < vm_ndomains; i++)
1846 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
1847 
1848 	/*
1849 	 * This is a pure cache zone, no kegs.
1850 	 */
1851 	if (arg->import) {
1852 		if (arg->flags & UMA_ZONE_VM)
1853 			arg->flags |= UMA_ZFLAG_CACHEONLY;
1854 		zone->uz_flags = arg->flags;
1855 		zone->uz_size = arg->size;
1856 		zone->uz_import = arg->import;
1857 		zone->uz_release = arg->release;
1858 		zone->uz_arg = arg->arg;
1859 		zone->uz_lockptr = &zone->uz_lock;
1860 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1861 		rw_wlock(&uma_rwlock);
1862 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1863 		rw_wunlock(&uma_rwlock);
1864 		goto out;
1865 	}
1866 
1867 	/*
1868 	 * Use the regular zone/keg/slab allocator.
1869 	 */
1870 	zone->uz_import = (uma_import)zone_import;
1871 	zone->uz_release = (uma_release)zone_release;
1872 	zone->uz_arg = zone;
1873 	keg = arg->keg;
1874 
1875 	if (arg->flags & UMA_ZONE_SECONDARY) {
1876 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1877 		zone->uz_init = arg->uminit;
1878 		zone->uz_fini = arg->fini;
1879 		zone->uz_lockptr = &keg->uk_lock;
1880 		zone->uz_flags |= UMA_ZONE_SECONDARY;
1881 		rw_wlock(&uma_rwlock);
1882 		ZONE_LOCK(zone);
1883 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1884 			if (LIST_NEXT(z, uz_link) == NULL) {
1885 				LIST_INSERT_AFTER(z, zone, uz_link);
1886 				break;
1887 			}
1888 		}
1889 		ZONE_UNLOCK(zone);
1890 		rw_wunlock(&uma_rwlock);
1891 	} else if (keg == NULL) {
1892 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1893 		    arg->align, arg->flags)) == NULL)
1894 			return (ENOMEM);
1895 	} else {
1896 		struct uma_kctor_args karg;
1897 		int error;
1898 
1899 		/* We should only be here from uma_startup() */
1900 		karg.size = arg->size;
1901 		karg.uminit = arg->uminit;
1902 		karg.fini = arg->fini;
1903 		karg.align = arg->align;
1904 		karg.flags = arg->flags;
1905 		karg.zone = zone;
1906 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1907 		    flags);
1908 		if (error)
1909 			return (error);
1910 	}
1911 
1912 	zone->uz_keg = keg;
1913 	zone->uz_size = keg->uk_size;
1914 	zone->uz_flags |= (keg->uk_flags &
1915 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1916 
1917 	/*
1918 	 * Some internal zones don't have room allocated for the per cpu
1919 	 * caches.  If we're internal, bail out here.
1920 	 */
1921 	if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1922 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1923 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1924 		return (0);
1925 	}
1926 
1927 out:
1928 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
1929 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
1930 	    ("Invalid zone flag combination"));
1931 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) {
1932 		zone->uz_count = BUCKET_MAX;
1933 	} else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0) {
1934 		zone->uz_count = BUCKET_MIN;
1935 		zone->uz_count_max = BUCKET_MIN;
1936 	} else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
1937 		zone->uz_count = 0;
1938 	else
1939 		zone->uz_count = bucket_select(zone->uz_size);
1940 	zone->uz_count_min = zone->uz_count;
1941 
1942 	return (0);
1943 }
1944 
1945 /*
1946  * Keg header dtor.  This frees all data, destroys locks, frees the hash
1947  * table and removes the keg from the global list.
1948  *
1949  * Arguments/Returns follow uma_dtor specifications
1950  *	udata  unused
1951  */
1952 static void
1953 keg_dtor(void *arg, int size, void *udata)
1954 {
1955 	uma_keg_t keg;
1956 
1957 	keg = (uma_keg_t)arg;
1958 	KEG_LOCK(keg);
1959 	if (keg->uk_free != 0) {
1960 		printf("Freed UMA keg (%s) was not empty (%d items). "
1961 		    " Lost %d pages of memory.\n",
1962 		    keg->uk_name ? keg->uk_name : "",
1963 		    keg->uk_free, keg->uk_pages);
1964 	}
1965 	KEG_UNLOCK(keg);
1966 
1967 	hash_free(&keg->uk_hash);
1968 
1969 	KEG_LOCK_FINI(keg);
1970 }
1971 
1972 /*
1973  * Zone header dtor.
1974  *
1975  * Arguments/Returns follow uma_dtor specifications
1976  *	udata  unused
1977  */
1978 static void
1979 zone_dtor(void *arg, int size, void *udata)
1980 {
1981 	uma_zone_t zone;
1982 	uma_keg_t keg;
1983 
1984 	zone = (uma_zone_t)arg;
1985 
1986 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1987 		cache_drain(zone);
1988 
1989 	rw_wlock(&uma_rwlock);
1990 	LIST_REMOVE(zone, uz_link);
1991 	rw_wunlock(&uma_rwlock);
1992 	/*
1993 	 * XXX there are some races here where
1994 	 * the zone can be drained but zone lock
1995 	 * released and then refilled before we
1996 	 * remove it... we dont care for now
1997 	 */
1998 	zone_reclaim(zone, M_WAITOK, true);
1999 	/*
2000 	 * We only destroy kegs from non secondary/non cache zones.
2001 	 */
2002 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2003 		keg = zone->uz_keg;
2004 		rw_wlock(&uma_rwlock);
2005 		LIST_REMOVE(keg, uk_link);
2006 		rw_wunlock(&uma_rwlock);
2007 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2008 	}
2009 	counter_u64_free(zone->uz_allocs);
2010 	counter_u64_free(zone->uz_frees);
2011 	counter_u64_free(zone->uz_fails);
2012 	if (zone->uz_lockptr == &zone->uz_lock)
2013 		ZONE_LOCK_FINI(zone);
2014 }
2015 
2016 /*
2017  * Traverses every zone in the system and calls a callback
2018  *
2019  * Arguments:
2020  *	zfunc  A pointer to a function which accepts a zone
2021  *		as an argument.
2022  *
2023  * Returns:
2024  *	Nothing
2025  */
2026 static void
2027 zone_foreach(void (*zfunc)(uma_zone_t))
2028 {
2029 	uma_keg_t keg;
2030 	uma_zone_t zone;
2031 
2032 	/*
2033 	 * Before BOOT_RUNNING we are guaranteed to be single
2034 	 * threaded, so locking isn't needed. Startup functions
2035 	 * are allowed to use M_WAITOK.
2036 	 */
2037 	if (__predict_true(booted == BOOT_RUNNING))
2038 		rw_rlock(&uma_rwlock);
2039 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2040 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2041 			zfunc(zone);
2042 	}
2043 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2044 		zfunc(zone);
2045 	if (__predict_true(booted == BOOT_RUNNING))
2046 		rw_runlock(&uma_rwlock);
2047 }
2048 
2049 /*
2050  * Count how many pages do we need to bootstrap.  VM supplies
2051  * its need in early zones in the argument, we add up our zones,
2052  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
2053  * zone of zones and zone of kegs are accounted separately.
2054  */
2055 #define	UMA_BOOT_ZONES	11
2056 /* Zone of zones and zone of kegs have arbitrary alignment. */
2057 #define	UMA_BOOT_ALIGN	32
2058 static int zsize, ksize;
2059 int
2060 uma_startup_count(int vm_zones)
2061 {
2062 	int zones, pages;
2063 
2064 	ksize = sizeof(struct uma_keg) +
2065 	    (sizeof(struct uma_domain) * vm_ndomains);
2066 	zsize = sizeof(struct uma_zone) +
2067 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2068 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2069 
2070 	/*
2071 	 * Memory for the zone of kegs and its keg,
2072 	 * and for zone of zones.
2073 	 */
2074 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2075 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2076 
2077 #ifdef	UMA_MD_SMALL_ALLOC
2078 	zones = UMA_BOOT_ZONES;
2079 #else
2080 	zones = UMA_BOOT_ZONES + vm_zones;
2081 	vm_zones = 0;
2082 #endif
2083 
2084 	/* Memory for the rest of startup zones, UMA and VM, ... */
2085 	if (zsize > UMA_SLAB_SPACE) {
2086 		/* See keg_large_init(). */
2087 		u_int ppera;
2088 
2089 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2090 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) <
2091 		    SIZEOF_UMA_SLAB)
2092 			ppera++;
2093 		pages += (zones + vm_zones) * ppera;
2094 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE)
2095 		/* See keg_small_init() special case for uk_ppera = 1. */
2096 		pages += zones;
2097 	else
2098 		pages += howmany(zones,
2099 		    UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
2100 
2101 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2102 	pages += howmany(zones + 1,
2103 	    UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
2104 
2105 	/*
2106 	 * Most of startup zones are not going to be offpages, that's
2107 	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
2108 	 * calculations.  Some large bucket zones will be offpage, and
2109 	 * thus will allocate hashes.  We take conservative approach
2110 	 * and assume that all zones may allocate hash.  This may give
2111 	 * us some positive inaccuracy, usually an extra single page.
2112 	 */
2113 	pages += howmany(zones, UMA_SLAB_SPACE /
2114 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
2115 
2116 	return (pages);
2117 }
2118 
2119 void
2120 uma_startup(void *mem, int npages)
2121 {
2122 	struct uma_zctor_args args;
2123 	uma_keg_t masterkeg;
2124 	uintptr_t m;
2125 
2126 #ifdef DIAGNOSTIC
2127 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2128 #endif
2129 
2130 	rw_init(&uma_rwlock, "UMA lock");
2131 
2132 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2133 	m = (uintptr_t)mem;
2134 	zones = (uma_zone_t)m;
2135 	m += roundup(zsize, CACHE_LINE_SIZE);
2136 	kegs = (uma_zone_t)m;
2137 	m += roundup(zsize, CACHE_LINE_SIZE);
2138 	masterkeg = (uma_keg_t)m;
2139 	m += roundup(ksize, CACHE_LINE_SIZE);
2140 	m = roundup(m, PAGE_SIZE);
2141 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2142 	mem = (void *)m;
2143 
2144 	/* "manually" create the initial zone */
2145 	memset(&args, 0, sizeof(args));
2146 	args.name = "UMA Kegs";
2147 	args.size = ksize;
2148 	args.ctor = keg_ctor;
2149 	args.dtor = keg_dtor;
2150 	args.uminit = zero_init;
2151 	args.fini = NULL;
2152 	args.keg = masterkeg;
2153 	args.align = UMA_BOOT_ALIGN - 1;
2154 	args.flags = UMA_ZFLAG_INTERNAL;
2155 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2156 
2157 	bootmem = mem;
2158 	boot_pages = npages;
2159 
2160 	args.name = "UMA Zones";
2161 	args.size = zsize;
2162 	args.ctor = zone_ctor;
2163 	args.dtor = zone_dtor;
2164 	args.uminit = zero_init;
2165 	args.fini = NULL;
2166 	args.keg = NULL;
2167 	args.align = UMA_BOOT_ALIGN - 1;
2168 	args.flags = UMA_ZFLAG_INTERNAL;
2169 	zone_ctor(zones, zsize, &args, M_WAITOK);
2170 
2171 	/* Now make a zone for slab headers */
2172 	slabzone = uma_zcreate("UMA Slabs",
2173 				sizeof(struct uma_slab),
2174 				NULL, NULL, NULL, NULL,
2175 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2176 
2177 	hashzone = uma_zcreate("UMA Hash",
2178 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2179 	    NULL, NULL, NULL, NULL,
2180 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2181 
2182 	bucket_init();
2183 
2184 	booted = BOOT_STRAPPED;
2185 }
2186 
2187 void
2188 uma_startup1(void)
2189 {
2190 
2191 #ifdef DIAGNOSTIC
2192 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2193 #endif
2194 	booted = BOOT_PAGEALLOC;
2195 }
2196 
2197 void
2198 uma_startup2(void)
2199 {
2200 
2201 #ifdef DIAGNOSTIC
2202 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2203 #endif
2204 	booted = BOOT_BUCKETS;
2205 	sx_init(&uma_reclaim_lock, "umareclaim");
2206 	bucket_enable();
2207 }
2208 
2209 /*
2210  * Initialize our callout handle
2211  *
2212  */
2213 static void
2214 uma_startup3(void)
2215 {
2216 
2217 #ifdef INVARIANTS
2218 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2219 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2220 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2221 #endif
2222 	zone_foreach(zone_alloc_counters);
2223 	callout_init(&uma_callout, 1);
2224 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2225 	booted = BOOT_RUNNING;
2226 }
2227 
2228 static uma_keg_t
2229 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2230 		int align, uint32_t flags)
2231 {
2232 	struct uma_kctor_args args;
2233 
2234 	args.size = size;
2235 	args.uminit = uminit;
2236 	args.fini = fini;
2237 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2238 	args.flags = flags;
2239 	args.zone = zone;
2240 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2241 }
2242 
2243 /* Public functions */
2244 /* See uma.h */
2245 void
2246 uma_set_align(int align)
2247 {
2248 
2249 	if (align != UMA_ALIGN_CACHE)
2250 		uma_align_cache = align;
2251 }
2252 
2253 /* See uma.h */
2254 uma_zone_t
2255 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2256 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2257 
2258 {
2259 	struct uma_zctor_args args;
2260 	uma_zone_t res;
2261 	bool locked;
2262 
2263 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2264 	    align, name));
2265 
2266 	/* Sets all zones to a first-touch domain policy. */
2267 #ifdef UMA_FIRSTTOUCH
2268 	flags |= UMA_ZONE_NUMA;
2269 #endif
2270 
2271 	/* This stuff is essential for the zone ctor */
2272 	memset(&args, 0, sizeof(args));
2273 	args.name = name;
2274 	args.size = size;
2275 	args.ctor = ctor;
2276 	args.dtor = dtor;
2277 	args.uminit = uminit;
2278 	args.fini = fini;
2279 #ifdef  INVARIANTS
2280 	/*
2281 	 * If a zone is being created with an empty constructor and
2282 	 * destructor, pass UMA constructor/destructor which checks for
2283 	 * memory use after free.
2284 	 */
2285 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2286 	    ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
2287 		args.ctor = trash_ctor;
2288 		args.dtor = trash_dtor;
2289 		args.uminit = trash_init;
2290 		args.fini = trash_fini;
2291 	}
2292 #endif
2293 	args.align = align;
2294 	args.flags = flags;
2295 	args.keg = NULL;
2296 
2297 	if (booted < BOOT_BUCKETS) {
2298 		locked = false;
2299 	} else {
2300 		sx_slock(&uma_reclaim_lock);
2301 		locked = true;
2302 	}
2303 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2304 	if (locked)
2305 		sx_sunlock(&uma_reclaim_lock);
2306 	return (res);
2307 }
2308 
2309 /* See uma.h */
2310 uma_zone_t
2311 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2312 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2313 {
2314 	struct uma_zctor_args args;
2315 	uma_keg_t keg;
2316 	uma_zone_t res;
2317 	bool locked;
2318 
2319 	keg = master->uz_keg;
2320 	memset(&args, 0, sizeof(args));
2321 	args.name = name;
2322 	args.size = keg->uk_size;
2323 	args.ctor = ctor;
2324 	args.dtor = dtor;
2325 	args.uminit = zinit;
2326 	args.fini = zfini;
2327 	args.align = keg->uk_align;
2328 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2329 	args.keg = keg;
2330 
2331 	if (booted < BOOT_BUCKETS) {
2332 		locked = false;
2333 	} else {
2334 		sx_slock(&uma_reclaim_lock);
2335 		locked = true;
2336 	}
2337 	/* XXX Attaches only one keg of potentially many. */
2338 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2339 	if (locked)
2340 		sx_sunlock(&uma_reclaim_lock);
2341 	return (res);
2342 }
2343 
2344 /* See uma.h */
2345 uma_zone_t
2346 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2347 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2348 		    uma_release zrelease, void *arg, int flags)
2349 {
2350 	struct uma_zctor_args args;
2351 
2352 	memset(&args, 0, sizeof(args));
2353 	args.name = name;
2354 	args.size = size;
2355 	args.ctor = ctor;
2356 	args.dtor = dtor;
2357 	args.uminit = zinit;
2358 	args.fini = zfini;
2359 	args.import = zimport;
2360 	args.release = zrelease;
2361 	args.arg = arg;
2362 	args.align = 0;
2363 	args.flags = flags | UMA_ZFLAG_CACHE;
2364 
2365 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2366 }
2367 
2368 /* See uma.h */
2369 void
2370 uma_zdestroy(uma_zone_t zone)
2371 {
2372 
2373 	sx_slock(&uma_reclaim_lock);
2374 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2375 	sx_sunlock(&uma_reclaim_lock);
2376 }
2377 
2378 void
2379 uma_zwait(uma_zone_t zone)
2380 {
2381 	void *item;
2382 
2383 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2384 	uma_zfree(zone, item);
2385 }
2386 
2387 void *
2388 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2389 {
2390 	void *item;
2391 #ifdef SMP
2392 	int i;
2393 
2394 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2395 #endif
2396 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2397 	if (item != NULL && (flags & M_ZERO)) {
2398 #ifdef SMP
2399 		for (i = 0; i <= mp_maxid; i++)
2400 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2401 #else
2402 		bzero(item, zone->uz_size);
2403 #endif
2404 	}
2405 	return (item);
2406 }
2407 
2408 /*
2409  * A stub while both regular and pcpu cases are identical.
2410  */
2411 void
2412 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2413 {
2414 
2415 #ifdef SMP
2416 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2417 #endif
2418 	uma_zfree_arg(zone, item, udata);
2419 }
2420 
2421 /* See uma.h */
2422 void *
2423 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2424 {
2425 	uma_zone_domain_t zdom;
2426 	uma_bucket_t bucket;
2427 	uma_cache_t cache;
2428 	void *item;
2429 	int cpu, domain, lockfail, maxbucket;
2430 #ifdef INVARIANTS
2431 	bool skipdbg;
2432 #endif
2433 
2434 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2435 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2436 
2437 	/* This is the fast path allocation */
2438 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2439 	    curthread, zone->uz_name, zone, flags);
2440 
2441 	if (flags & M_WAITOK) {
2442 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2443 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2444 	}
2445 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2446 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2447 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2448 	if (zone->uz_flags & UMA_ZONE_PCPU)
2449 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2450 		    "with M_ZERO passed"));
2451 
2452 #ifdef DEBUG_MEMGUARD
2453 	if (memguard_cmp_zone(zone)) {
2454 		item = memguard_alloc(zone->uz_size, flags);
2455 		if (item != NULL) {
2456 			if (zone->uz_init != NULL &&
2457 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2458 				return (NULL);
2459 			if (zone->uz_ctor != NULL &&
2460 			    zone->uz_ctor(item, zone->uz_size, udata,
2461 			    flags) != 0) {
2462 			    	zone->uz_fini(item, zone->uz_size);
2463 				return (NULL);
2464 			}
2465 			return (item);
2466 		}
2467 		/* This is unfortunate but should not be fatal. */
2468 	}
2469 #endif
2470 	/*
2471 	 * If possible, allocate from the per-CPU cache.  There are two
2472 	 * requirements for safe access to the per-CPU cache: (1) the thread
2473 	 * accessing the cache must not be preempted or yield during access,
2474 	 * and (2) the thread must not migrate CPUs without switching which
2475 	 * cache it accesses.  We rely on a critical section to prevent
2476 	 * preemption and migration.  We release the critical section in
2477 	 * order to acquire the zone mutex if we are unable to allocate from
2478 	 * the current cache; when we re-acquire the critical section, we
2479 	 * must detect and handle migration if it has occurred.
2480 	 */
2481 zalloc_restart:
2482 	critical_enter();
2483 	cpu = curcpu;
2484 	cache = &zone->uz_cpu[cpu];
2485 
2486 zalloc_start:
2487 	bucket = cache->uc_allocbucket;
2488 	if (bucket != NULL && bucket->ub_cnt > 0) {
2489 		bucket->ub_cnt--;
2490 		item = bucket->ub_bucket[bucket->ub_cnt];
2491 #ifdef INVARIANTS
2492 		bucket->ub_bucket[bucket->ub_cnt] = NULL;
2493 #endif
2494 		KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2495 		cache->uc_allocs++;
2496 		critical_exit();
2497 #ifdef INVARIANTS
2498 		skipdbg = uma_dbg_zskip(zone, item);
2499 #endif
2500 		if (zone->uz_ctor != NULL &&
2501 #ifdef INVARIANTS
2502 		    (!skipdbg || zone->uz_ctor != trash_ctor ||
2503 		    zone->uz_dtor != trash_dtor) &&
2504 #endif
2505 		    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2506 			counter_u64_add(zone->uz_fails, 1);
2507 			zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2508 			return (NULL);
2509 		}
2510 #ifdef INVARIANTS
2511 		if (!skipdbg)
2512 			uma_dbg_alloc(zone, NULL, item);
2513 #endif
2514 		if (flags & M_ZERO)
2515 			uma_zero_item(item, zone);
2516 		return (item);
2517 	}
2518 
2519 	/*
2520 	 * We have run out of items in our alloc bucket.
2521 	 * See if we can switch with our free bucket.
2522 	 */
2523 	bucket = cache->uc_freebucket;
2524 	if (bucket != NULL && bucket->ub_cnt > 0) {
2525 		CTR2(KTR_UMA,
2526 		    "uma_zalloc: zone %s(%p) swapping empty with alloc",
2527 		    zone->uz_name, zone);
2528 		cache->uc_freebucket = cache->uc_allocbucket;
2529 		cache->uc_allocbucket = bucket;
2530 		goto zalloc_start;
2531 	}
2532 
2533 	/*
2534 	 * Discard any empty allocation bucket while we hold no locks.
2535 	 */
2536 	bucket = cache->uc_allocbucket;
2537 	cache->uc_allocbucket = NULL;
2538 	critical_exit();
2539 	if (bucket != NULL)
2540 		bucket_free(zone, bucket, udata);
2541 
2542 	/* Short-circuit for zones without buckets and low memory. */
2543 	if (zone->uz_count == 0 || bucketdisable) {
2544 		ZONE_LOCK(zone);
2545 		if (zone->uz_flags & UMA_ZONE_NUMA)
2546 			domain = PCPU_GET(domain);
2547 		else
2548 			domain = UMA_ANYDOMAIN;
2549 		goto zalloc_item;
2550 	}
2551 
2552 	/*
2553 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2554 	 * we must go back to the zone.  This requires the zone lock, so we
2555 	 * must drop the critical section, then re-acquire it when we go back
2556 	 * to the cache.  Since the critical section is released, we may be
2557 	 * preempted or migrate.  As such, make sure not to maintain any
2558 	 * thread-local state specific to the cache from prior to releasing
2559 	 * the critical section.
2560 	 */
2561 	lockfail = 0;
2562 	if (ZONE_TRYLOCK(zone) == 0) {
2563 		/* Record contention to size the buckets. */
2564 		ZONE_LOCK(zone);
2565 		lockfail = 1;
2566 	}
2567 	critical_enter();
2568 	cpu = curcpu;
2569 	cache = &zone->uz_cpu[cpu];
2570 
2571 	/* See if we lost the race to fill the cache. */
2572 	if (cache->uc_allocbucket != NULL) {
2573 		ZONE_UNLOCK(zone);
2574 		goto zalloc_start;
2575 	}
2576 
2577 	/*
2578 	 * Check the zone's cache of buckets.
2579 	 */
2580 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2581 		domain = PCPU_GET(domain);
2582 		zdom = &zone->uz_domain[domain];
2583 	} else {
2584 		domain = UMA_ANYDOMAIN;
2585 		zdom = &zone->uz_domain[0];
2586 	}
2587 
2588 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
2589 		KASSERT(bucket->ub_cnt != 0,
2590 		    ("uma_zalloc_arg: Returning an empty bucket."));
2591 		cache->uc_allocbucket = bucket;
2592 		ZONE_UNLOCK(zone);
2593 		goto zalloc_start;
2594 	}
2595 	/* We are no longer associated with this CPU. */
2596 	critical_exit();
2597 
2598 	/*
2599 	 * We bump the uz count when the cache size is insufficient to
2600 	 * handle the working set.
2601 	 */
2602 	if (lockfail && zone->uz_count < zone->uz_count_max)
2603 		zone->uz_count++;
2604 
2605 	if (zone->uz_max_items > 0) {
2606 		if (zone->uz_items >= zone->uz_max_items)
2607 			goto zalloc_item;
2608 		maxbucket = MIN(zone->uz_count,
2609 		    zone->uz_max_items - zone->uz_items);
2610 		zone->uz_items += maxbucket;
2611 	} else
2612 		maxbucket = zone->uz_count;
2613 	ZONE_UNLOCK(zone);
2614 
2615 	/*
2616 	 * Now lets just fill a bucket and put it on the free list.  If that
2617 	 * works we'll restart the allocation from the beginning and it
2618 	 * will use the just filled bucket.
2619 	 */
2620 	bucket = zone_alloc_bucket(zone, udata, domain, flags, maxbucket);
2621 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2622 	    zone->uz_name, zone, bucket);
2623 	ZONE_LOCK(zone);
2624 	if (bucket != NULL) {
2625 		if (zone->uz_max_items > 0 && bucket->ub_cnt < maxbucket) {
2626 			MPASS(zone->uz_items >= maxbucket - bucket->ub_cnt);
2627 			zone->uz_items -= maxbucket - bucket->ub_cnt;
2628 			if (zone->uz_sleepers > 0 &&
2629 			    zone->uz_items < zone->uz_max_items)
2630 				wakeup_one(zone);
2631 		}
2632 		critical_enter();
2633 		cpu = curcpu;
2634 		cache = &zone->uz_cpu[cpu];
2635 
2636 		/*
2637 		 * See if we lost the race or were migrated.  Cache the
2638 		 * initialized bucket to make this less likely or claim
2639 		 * the memory directly.
2640 		 */
2641 		if (cache->uc_allocbucket == NULL &&
2642 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2643 		    domain == PCPU_GET(domain))) {
2644 			cache->uc_allocbucket = bucket;
2645 			zdom->uzd_imax += bucket->ub_cnt;
2646 		} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2647 			critical_exit();
2648 			ZONE_UNLOCK(zone);
2649 			bucket_drain(zone, bucket);
2650 			bucket_free(zone, bucket, udata);
2651 			goto zalloc_restart;
2652 		} else
2653 			zone_put_bucket(zone, zdom, bucket, false);
2654 		ZONE_UNLOCK(zone);
2655 		goto zalloc_start;
2656 	} else if (zone->uz_max_items > 0) {
2657 		zone->uz_items -= maxbucket;
2658 		if (zone->uz_sleepers > 0 &&
2659 		    zone->uz_items + 1 < zone->uz_max_items)
2660 			wakeup_one(zone);
2661 	}
2662 
2663 	/*
2664 	 * We may not be able to get a bucket so return an actual item.
2665 	 */
2666 zalloc_item:
2667 	item = zone_alloc_item_locked(zone, udata, domain, flags);
2668 
2669 	return (item);
2670 }
2671 
2672 void *
2673 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2674 {
2675 
2676 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2677 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2678 
2679 	/* This is the fast path allocation */
2680 	CTR5(KTR_UMA,
2681 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2682 	    curthread, zone->uz_name, zone, domain, flags);
2683 
2684 	if (flags & M_WAITOK) {
2685 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2686 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2687 	}
2688 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2689 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2690 
2691 	return (zone_alloc_item(zone, udata, domain, flags));
2692 }
2693 
2694 /*
2695  * Find a slab with some space.  Prefer slabs that are partially used over those
2696  * that are totally full.  This helps to reduce fragmentation.
2697  *
2698  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2699  * only 'domain'.
2700  */
2701 static uma_slab_t
2702 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2703 {
2704 	uma_domain_t dom;
2705 	uma_slab_t slab;
2706 	int start;
2707 
2708 	KASSERT(domain >= 0 && domain < vm_ndomains,
2709 	    ("keg_first_slab: domain %d out of range", domain));
2710 	KEG_LOCK_ASSERT(keg);
2711 
2712 	slab = NULL;
2713 	start = domain;
2714 	do {
2715 		dom = &keg->uk_domain[domain];
2716 		if (!LIST_EMPTY(&dom->ud_part_slab))
2717 			return (LIST_FIRST(&dom->ud_part_slab));
2718 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2719 			slab = LIST_FIRST(&dom->ud_free_slab);
2720 			LIST_REMOVE(slab, us_link);
2721 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2722 			return (slab);
2723 		}
2724 		if (rr)
2725 			domain = (domain + 1) % vm_ndomains;
2726 	} while (domain != start);
2727 
2728 	return (NULL);
2729 }
2730 
2731 static uma_slab_t
2732 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2733 {
2734 	uint32_t reserve;
2735 
2736 	KEG_LOCK_ASSERT(keg);
2737 
2738 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
2739 	if (keg->uk_free <= reserve)
2740 		return (NULL);
2741 	return (keg_first_slab(keg, domain, rr));
2742 }
2743 
2744 static uma_slab_t
2745 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
2746 {
2747 	struct vm_domainset_iter di;
2748 	uma_domain_t dom;
2749 	uma_slab_t slab;
2750 	int aflags, domain;
2751 	bool rr;
2752 
2753 restart:
2754 	KEG_LOCK_ASSERT(keg);
2755 
2756 	/*
2757 	 * Use the keg's policy if upper layers haven't already specified a
2758 	 * domain (as happens with first-touch zones).
2759 	 *
2760 	 * To avoid races we run the iterator with the keg lock held, but that
2761 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
2762 	 * clear M_WAITOK and handle low memory conditions locally.
2763 	 */
2764 	rr = rdomain == UMA_ANYDOMAIN;
2765 	if (rr) {
2766 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
2767 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
2768 		    &aflags);
2769 	} else {
2770 		aflags = flags;
2771 		domain = rdomain;
2772 	}
2773 
2774 	for (;;) {
2775 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
2776 		if (slab != NULL) {
2777 			MPASS(slab->us_keg == keg);
2778 			return (slab);
2779 		}
2780 
2781 		/*
2782 		 * M_NOVM means don't ask at all!
2783 		 */
2784 		if (flags & M_NOVM)
2785 			break;
2786 
2787 		KASSERT(zone->uz_max_items == 0 ||
2788 		    zone->uz_items <= zone->uz_max_items,
2789 		    ("%s: zone %p overflow", __func__, zone));
2790 
2791 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
2792 		/*
2793 		 * If we got a slab here it's safe to mark it partially used
2794 		 * and return.  We assume that the caller is going to remove
2795 		 * at least one item.
2796 		 */
2797 		if (slab) {
2798 			MPASS(slab->us_keg == keg);
2799 			dom = &keg->uk_domain[slab->us_domain];
2800 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2801 			return (slab);
2802 		}
2803 		KEG_LOCK(keg);
2804 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
2805 			if ((flags & M_WAITOK) != 0) {
2806 				KEG_UNLOCK(keg);
2807 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
2808 				KEG_LOCK(keg);
2809 				goto restart;
2810 			}
2811 			break;
2812 		}
2813 	}
2814 
2815 	/*
2816 	 * We might not have been able to get a slab but another cpu
2817 	 * could have while we were unlocked.  Check again before we
2818 	 * fail.
2819 	 */
2820 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
2821 		MPASS(slab->us_keg == keg);
2822 		return (slab);
2823 	}
2824 	return (NULL);
2825 }
2826 
2827 static uma_slab_t
2828 zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags)
2829 {
2830 	uma_slab_t slab;
2831 
2832 	if (keg == NULL) {
2833 		keg = zone->uz_keg;
2834 		KEG_LOCK(keg);
2835 	}
2836 
2837 	for (;;) {
2838 		slab = keg_fetch_slab(keg, zone, domain, flags);
2839 		if (slab)
2840 			return (slab);
2841 		if (flags & (M_NOWAIT | M_NOVM))
2842 			break;
2843 	}
2844 	KEG_UNLOCK(keg);
2845 	return (NULL);
2846 }
2847 
2848 static void *
2849 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
2850 {
2851 	uma_domain_t dom;
2852 	void *item;
2853 	uint8_t freei;
2854 
2855 	MPASS(keg == slab->us_keg);
2856 	KEG_LOCK_ASSERT(keg);
2857 
2858 	freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2859 	BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
2860 	item = slab->us_data + (keg->uk_rsize * freei);
2861 	slab->us_freecount--;
2862 	keg->uk_free--;
2863 
2864 	/* Move this slab to the full list */
2865 	if (slab->us_freecount == 0) {
2866 		LIST_REMOVE(slab, us_link);
2867 		dom = &keg->uk_domain[slab->us_domain];
2868 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
2869 	}
2870 
2871 	return (item);
2872 }
2873 
2874 static int
2875 zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags)
2876 {
2877 	uma_slab_t slab;
2878 	uma_keg_t keg;
2879 #ifdef NUMA
2880 	int stripe;
2881 #endif
2882 	int i;
2883 
2884 	slab = NULL;
2885 	keg = NULL;
2886 	/* Try to keep the buckets totally full */
2887 	for (i = 0; i < max; ) {
2888 		if ((slab = zone_fetch_slab(zone, keg, domain, flags)) == NULL)
2889 			break;
2890 		keg = slab->us_keg;
2891 #ifdef NUMA
2892 		stripe = howmany(max, vm_ndomains);
2893 #endif
2894 		while (slab->us_freecount && i < max) {
2895 			bucket[i++] = slab_alloc_item(keg, slab);
2896 			if (keg->uk_free <= keg->uk_reserve)
2897 				break;
2898 #ifdef NUMA
2899 			/*
2900 			 * If the zone is striped we pick a new slab for every
2901 			 * N allocations.  Eliminating this conditional will
2902 			 * instead pick a new domain for each bucket rather
2903 			 * than stripe within each bucket.  The current option
2904 			 * produces more fragmentation and requires more cpu
2905 			 * time but yields better distribution.
2906 			 */
2907 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
2908 			    vm_ndomains > 1 && --stripe == 0)
2909 				break;
2910 #endif
2911 		}
2912 		/* Don't block if we allocated any successfully. */
2913 		flags &= ~M_WAITOK;
2914 		flags |= M_NOWAIT;
2915 	}
2916 	if (slab != NULL)
2917 		KEG_UNLOCK(keg);
2918 
2919 	return i;
2920 }
2921 
2922 static uma_bucket_t
2923 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags, int max)
2924 {
2925 	uma_bucket_t bucket;
2926 
2927 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
2928 
2929 	/* Avoid allocs targeting empty domains. */
2930 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
2931 		domain = UMA_ANYDOMAIN;
2932 
2933 	/* Don't wait for buckets, preserve caller's NOVM setting. */
2934 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2935 	if (bucket == NULL)
2936 		return (NULL);
2937 
2938 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2939 	    MIN(max, bucket->ub_entries), domain, flags);
2940 
2941 	/*
2942 	 * Initialize the memory if necessary.
2943 	 */
2944 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
2945 		int i;
2946 
2947 		for (i = 0; i < bucket->ub_cnt; i++)
2948 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
2949 			    flags) != 0)
2950 				break;
2951 		/*
2952 		 * If we couldn't initialize the whole bucket, put the
2953 		 * rest back onto the freelist.
2954 		 */
2955 		if (i != bucket->ub_cnt) {
2956 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2957 			    bucket->ub_cnt - i);
2958 #ifdef INVARIANTS
2959 			bzero(&bucket->ub_bucket[i],
2960 			    sizeof(void *) * (bucket->ub_cnt - i));
2961 #endif
2962 			bucket->ub_cnt = i;
2963 		}
2964 	}
2965 
2966 	if (bucket->ub_cnt == 0) {
2967 		bucket_free(zone, bucket, udata);
2968 		counter_u64_add(zone->uz_fails, 1);
2969 		return (NULL);
2970 	}
2971 
2972 	return (bucket);
2973 }
2974 
2975 /*
2976  * Allocates a single item from a zone.
2977  *
2978  * Arguments
2979  *	zone   The zone to alloc for.
2980  *	udata  The data to be passed to the constructor.
2981  *	domain The domain to allocate from or UMA_ANYDOMAIN.
2982  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
2983  *
2984  * Returns
2985  *	NULL if there is no memory and M_NOWAIT is set
2986  *	An item if successful
2987  */
2988 
2989 static void *
2990 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
2991 {
2992 
2993 	ZONE_LOCK(zone);
2994 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2995 }
2996 
2997 /*
2998  * Returns with zone unlocked.
2999  */
3000 static void *
3001 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
3002 {
3003 	void *item;
3004 #ifdef INVARIANTS
3005 	bool skipdbg;
3006 #endif
3007 
3008 	ZONE_LOCK_ASSERT(zone);
3009 
3010 	if (zone->uz_max_items > 0) {
3011 		if (zone->uz_items >= zone->uz_max_items) {
3012 			zone_log_warning(zone);
3013 			zone_maxaction(zone);
3014 			if (flags & M_NOWAIT) {
3015 				ZONE_UNLOCK(zone);
3016 				return (NULL);
3017 			}
3018 			zone->uz_sleeps++;
3019 			zone->uz_sleepers++;
3020 			while (zone->uz_items >= zone->uz_max_items)
3021 				mtx_sleep(zone, zone->uz_lockptr, PVM,
3022 				    "zonelimit", 0);
3023 			zone->uz_sleepers--;
3024 			if (zone->uz_sleepers > 0 &&
3025 			    zone->uz_items + 1 < zone->uz_max_items)
3026 				wakeup_one(zone);
3027 		}
3028 		zone->uz_items++;
3029 	}
3030 	ZONE_UNLOCK(zone);
3031 
3032 	/* Avoid allocs targeting empty domains. */
3033 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3034 		domain = UMA_ANYDOMAIN;
3035 
3036 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3037 		goto fail;
3038 
3039 #ifdef INVARIANTS
3040 	skipdbg = uma_dbg_zskip(zone, item);
3041 #endif
3042 	/*
3043 	 * We have to call both the zone's init (not the keg's init)
3044 	 * and the zone's ctor.  This is because the item is going from
3045 	 * a keg slab directly to the user, and the user is expecting it
3046 	 * to be both zone-init'd as well as zone-ctor'd.
3047 	 */
3048 	if (zone->uz_init != NULL) {
3049 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3050 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3051 			goto fail;
3052 		}
3053 	}
3054 	if (zone->uz_ctor != NULL &&
3055 #ifdef INVARIANTS
3056 	    (!skipdbg || zone->uz_ctor != trash_ctor ||
3057 	    zone->uz_dtor != trash_dtor) &&
3058 #endif
3059 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
3060 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
3061 		goto fail;
3062 	}
3063 #ifdef INVARIANTS
3064 	if (!skipdbg)
3065 		uma_dbg_alloc(zone, NULL, item);
3066 #endif
3067 	if (flags & M_ZERO)
3068 		uma_zero_item(item, zone);
3069 
3070 	counter_u64_add(zone->uz_allocs, 1);
3071 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3072 	    zone->uz_name, zone);
3073 
3074 	return (item);
3075 
3076 fail:
3077 	if (zone->uz_max_items > 0) {
3078 		ZONE_LOCK(zone);
3079 		zone->uz_items--;
3080 		ZONE_UNLOCK(zone);
3081 	}
3082 	counter_u64_add(zone->uz_fails, 1);
3083 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3084 	    zone->uz_name, zone);
3085 	return (NULL);
3086 }
3087 
3088 /* See uma.h */
3089 void
3090 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3091 {
3092 	uma_cache_t cache;
3093 	uma_bucket_t bucket;
3094 	uma_zone_domain_t zdom;
3095 	int cpu, domain;
3096 #ifdef UMA_XDOMAIN
3097 	int itemdomain;
3098 #endif
3099 	bool lockfail;
3100 #ifdef INVARIANTS
3101 	bool skipdbg;
3102 #endif
3103 
3104 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3105 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3106 
3107 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3108 	    zone->uz_name);
3109 
3110 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3111 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3112 
3113         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3114         if (item == NULL)
3115                 return;
3116 #ifdef DEBUG_MEMGUARD
3117 	if (is_memguard_addr(item)) {
3118 		if (zone->uz_dtor != NULL)
3119 			zone->uz_dtor(item, zone->uz_size, udata);
3120 		if (zone->uz_fini != NULL)
3121 			zone->uz_fini(item, zone->uz_size);
3122 		memguard_free(item);
3123 		return;
3124 	}
3125 #endif
3126 #ifdef INVARIANTS
3127 	skipdbg = uma_dbg_zskip(zone, item);
3128 	if (skipdbg == false) {
3129 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3130 			uma_dbg_free(zone, udata, item);
3131 		else
3132 			uma_dbg_free(zone, NULL, item);
3133 	}
3134 	if (zone->uz_dtor != NULL && (!skipdbg ||
3135 	    zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor))
3136 #else
3137 	if (zone->uz_dtor != NULL)
3138 #endif
3139 		zone->uz_dtor(item, zone->uz_size, udata);
3140 
3141 	/*
3142 	 * The race here is acceptable.  If we miss it we'll just have to wait
3143 	 * a little longer for the limits to be reset.
3144 	 */
3145 	if (zone->uz_sleepers > 0)
3146 		goto zfree_item;
3147 
3148 #ifdef UMA_XDOMAIN
3149 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3150 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3151 #endif
3152 
3153 	/*
3154 	 * If possible, free to the per-CPU cache.  There are two
3155 	 * requirements for safe access to the per-CPU cache: (1) the thread
3156 	 * accessing the cache must not be preempted or yield during access,
3157 	 * and (2) the thread must not migrate CPUs without switching which
3158 	 * cache it accesses.  We rely on a critical section to prevent
3159 	 * preemption and migration.  We release the critical section in
3160 	 * order to acquire the zone mutex if we are unable to free to the
3161 	 * current cache; when we re-acquire the critical section, we must
3162 	 * detect and handle migration if it has occurred.
3163 	 */
3164 zfree_restart:
3165 	critical_enter();
3166 	cpu = curcpu;
3167 	cache = &zone->uz_cpu[cpu];
3168 
3169 zfree_start:
3170 	domain = PCPU_GET(domain);
3171 #ifdef UMA_XDOMAIN
3172 	if ((zone->uz_flags & UMA_ZONE_NUMA) == 0)
3173 		itemdomain = domain;
3174 #endif
3175 	/*
3176 	 * Try to free into the allocbucket first to give LIFO ordering
3177 	 * for cache-hot datastructures.  Spill over into the freebucket
3178 	 * if necessary.  Alloc will swap them if one runs dry.
3179 	 */
3180 #ifdef UMA_XDOMAIN
3181 	if (domain != itemdomain) {
3182 		bucket = cache->uc_crossbucket;
3183 	} else
3184 #endif
3185 	{
3186 		bucket = cache->uc_allocbucket;
3187 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3188 			bucket = cache->uc_freebucket;
3189 	}
3190 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3191 		KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
3192 		    ("uma_zfree: Freeing to non free bucket index."));
3193 		bucket->ub_bucket[bucket->ub_cnt] = item;
3194 		bucket->ub_cnt++;
3195 		cache->uc_frees++;
3196 		critical_exit();
3197 		return;
3198 	}
3199 
3200 	/*
3201 	 * We must go back the zone, which requires acquiring the zone lock,
3202 	 * which in turn means we must release and re-acquire the critical
3203 	 * section.  Since the critical section is released, we may be
3204 	 * preempted or migrate.  As such, make sure not to maintain any
3205 	 * thread-local state specific to the cache from prior to releasing
3206 	 * the critical section.
3207 	 */
3208 	critical_exit();
3209 	if (zone->uz_count == 0 || bucketdisable)
3210 		goto zfree_item;
3211 
3212 	lockfail = false;
3213 	if (ZONE_TRYLOCK(zone) == 0) {
3214 		/* Record contention to size the buckets. */
3215 		ZONE_LOCK(zone);
3216 		lockfail = true;
3217 	}
3218 	critical_enter();
3219 	cpu = curcpu;
3220 	domain = PCPU_GET(domain);
3221 	cache = &zone->uz_cpu[cpu];
3222 
3223 #ifdef UMA_XDOMAIN
3224 	if (domain != itemdomain)
3225 		bucket = cache->uc_crossbucket;
3226 	else
3227 #endif
3228 		bucket = cache->uc_freebucket;
3229 	if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
3230 		ZONE_UNLOCK(zone);
3231 		goto zfree_start;
3232 	}
3233 #ifdef UMA_XDOMAIN
3234 	if (domain != itemdomain)
3235 		cache->uc_crossbucket = NULL;
3236 	else
3237 #endif
3238 		cache->uc_freebucket = NULL;
3239 	/* We are no longer associated with this CPU. */
3240 	critical_exit();
3241 
3242 #ifdef UMA_XDOMAIN
3243 	if (domain != itemdomain) {
3244 		if (bucket != NULL) {
3245 			zone->uz_xdomain += bucket->ub_cnt;
3246 			if (vm_ndomains > 2 ||
3247 			    zone->uz_bkt_count >= zone->uz_bkt_max) {
3248 				ZONE_UNLOCK(zone);
3249 				bucket_drain(zone, bucket);
3250 				bucket_free(zone, bucket, udata);
3251 			} else {
3252 				zdom = &zone->uz_domain[itemdomain];
3253 				zone_put_bucket(zone, zdom, bucket, true);
3254 				ZONE_UNLOCK(zone);
3255 			}
3256 		} else
3257 			ZONE_UNLOCK(zone);
3258 		bucket = bucket_alloc(zone, udata, M_NOWAIT);
3259 		if (bucket == NULL)
3260 			goto zfree_item;
3261 		critical_enter();
3262 		cpu = curcpu;
3263 		cache = &zone->uz_cpu[cpu];
3264 		if (cache->uc_crossbucket == NULL) {
3265 			cache->uc_crossbucket = bucket;
3266 			goto zfree_start;
3267 		}
3268 		critical_exit();
3269 		bucket_free(zone, bucket, udata);
3270 		goto zfree_restart;
3271 	}
3272 #endif
3273 
3274 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3275 		zdom = &zone->uz_domain[domain];
3276 	} else {
3277 		domain = 0;
3278 		zdom = &zone->uz_domain[0];
3279 	}
3280 
3281 	/* Can we throw this on the zone full list? */
3282 	if (bucket != NULL) {
3283 		CTR3(KTR_UMA,
3284 		    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3285 		    zone->uz_name, zone, bucket);
3286 		/* ub_cnt is pointing to the last free item */
3287 		KASSERT(bucket->ub_cnt == bucket->ub_entries,
3288 		    ("uma_zfree: Attempting to insert not full bucket onto the full list.\n"));
3289 		if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3290 			ZONE_UNLOCK(zone);
3291 			bucket_drain(zone, bucket);
3292 			bucket_free(zone, bucket, udata);
3293 			goto zfree_restart;
3294 		} else
3295 			zone_put_bucket(zone, zdom, bucket, true);
3296 	}
3297 
3298 	/*
3299 	 * We bump the uz count when the cache size is insufficient to
3300 	 * handle the working set.
3301 	 */
3302 	if (lockfail && zone->uz_count < zone->uz_count_max)
3303 		zone->uz_count++;
3304 	ZONE_UNLOCK(zone);
3305 
3306 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3307 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3308 	    zone->uz_name, zone, bucket);
3309 	if (bucket) {
3310 		critical_enter();
3311 		cpu = curcpu;
3312 		cache = &zone->uz_cpu[cpu];
3313 		if (cache->uc_freebucket == NULL &&
3314 		    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
3315 		    domain == PCPU_GET(domain))) {
3316 			cache->uc_freebucket = bucket;
3317 			goto zfree_start;
3318 		}
3319 		/*
3320 		 * We lost the race, start over.  We have to drop our
3321 		 * critical section to free the bucket.
3322 		 */
3323 		critical_exit();
3324 		bucket_free(zone, bucket, udata);
3325 		goto zfree_restart;
3326 	}
3327 
3328 	/*
3329 	 * If nothing else caught this, we'll just do an internal free.
3330 	 */
3331 zfree_item:
3332 	zone_free_item(zone, item, udata, SKIP_DTOR);
3333 }
3334 
3335 void
3336 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3337 {
3338 
3339 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3340 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3341 
3342 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3343 	    zone->uz_name);
3344 
3345 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3346 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3347 
3348         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3349         if (item == NULL)
3350                 return;
3351 	zone_free_item(zone, item, udata, SKIP_NONE);
3352 }
3353 
3354 static void
3355 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3356 {
3357 	uma_keg_t keg;
3358 	uma_domain_t dom;
3359 	uint8_t freei;
3360 
3361 	keg = zone->uz_keg;
3362 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3363 	KEG_LOCK_ASSERT(keg);
3364 	MPASS(keg == slab->us_keg);
3365 
3366 	dom = &keg->uk_domain[slab->us_domain];
3367 
3368 	/* Do we need to remove from any lists? */
3369 	if (slab->us_freecount+1 == keg->uk_ipers) {
3370 		LIST_REMOVE(slab, us_link);
3371 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3372 	} else if (slab->us_freecount == 0) {
3373 		LIST_REMOVE(slab, us_link);
3374 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3375 	}
3376 
3377 	/* Slab management. */
3378 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3379 	BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
3380 	slab->us_freecount++;
3381 
3382 	/* Keg statistics. */
3383 	keg->uk_free++;
3384 }
3385 
3386 static void
3387 zone_release(uma_zone_t zone, void **bucket, int cnt)
3388 {
3389 	void *item;
3390 	uma_slab_t slab;
3391 	uma_keg_t keg;
3392 	uint8_t *mem;
3393 	int i;
3394 
3395 	keg = zone->uz_keg;
3396 	KEG_LOCK(keg);
3397 	for (i = 0; i < cnt; i++) {
3398 		item = bucket[i];
3399 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3400 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3401 			if (zone->uz_flags & UMA_ZONE_HASH) {
3402 				slab = hash_sfind(&keg->uk_hash, mem);
3403 			} else {
3404 				mem += keg->uk_pgoff;
3405 				slab = (uma_slab_t)mem;
3406 			}
3407 		} else {
3408 			slab = vtoslab((vm_offset_t)item);
3409 			MPASS(slab->us_keg == keg);
3410 		}
3411 		slab_free_item(zone, slab, item);
3412 	}
3413 	KEG_UNLOCK(keg);
3414 }
3415 
3416 /*
3417  * Frees a single item to any zone.
3418  *
3419  * Arguments:
3420  *	zone   The zone to free to
3421  *	item   The item we're freeing
3422  *	udata  User supplied data for the dtor
3423  *	skip   Skip dtors and finis
3424  */
3425 static void
3426 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3427 {
3428 #ifdef INVARIANTS
3429 	bool skipdbg;
3430 
3431 	skipdbg = uma_dbg_zskip(zone, item);
3432 	if (skip == SKIP_NONE && !skipdbg) {
3433 		if (zone->uz_flags & UMA_ZONE_MALLOC)
3434 			uma_dbg_free(zone, udata, item);
3435 		else
3436 			uma_dbg_free(zone, NULL, item);
3437 	}
3438 
3439 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL &&
3440 	    (!skipdbg || zone->uz_dtor != trash_dtor ||
3441 	    zone->uz_ctor != trash_ctor))
3442 #else
3443 	if (skip < SKIP_DTOR && zone->uz_dtor != NULL)
3444 #endif
3445 		zone->uz_dtor(item, zone->uz_size, udata);
3446 
3447 	if (skip < SKIP_FINI && zone->uz_fini)
3448 		zone->uz_fini(item, zone->uz_size);
3449 
3450 	zone->uz_release(zone->uz_arg, &item, 1);
3451 
3452 	if (skip & SKIP_CNT)
3453 		return;
3454 
3455 	counter_u64_add(zone->uz_frees, 1);
3456 
3457 	if (zone->uz_max_items > 0) {
3458 		ZONE_LOCK(zone);
3459 		zone->uz_items--;
3460 		if (zone->uz_sleepers > 0 &&
3461 		    zone->uz_items < zone->uz_max_items)
3462 			wakeup_one(zone);
3463 		ZONE_UNLOCK(zone);
3464 	}
3465 }
3466 
3467 /* See uma.h */
3468 int
3469 uma_zone_set_max(uma_zone_t zone, int nitems)
3470 {
3471 	struct uma_bucket_zone *ubz;
3472 
3473 	/*
3474 	 * If limit is very low we may need to limit how
3475 	 * much items are allowed in CPU caches.
3476 	 */
3477 	ubz = &bucket_zones[0];
3478 	for (; ubz->ubz_entries != 0; ubz++)
3479 		if (ubz->ubz_entries * 2 * mp_ncpus > nitems)
3480 			break;
3481 	if (ubz == &bucket_zones[0])
3482 		nitems = ubz->ubz_entries * 2 * mp_ncpus;
3483 	else
3484 		ubz--;
3485 
3486 	ZONE_LOCK(zone);
3487 	zone->uz_count_max = zone->uz_count = ubz->ubz_entries;
3488 	if (zone->uz_count_min > zone->uz_count_max)
3489 		zone->uz_count_min = zone->uz_count_max;
3490 	zone->uz_max_items = nitems;
3491 	ZONE_UNLOCK(zone);
3492 
3493 	return (nitems);
3494 }
3495 
3496 /* See uma.h */
3497 int
3498 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3499 {
3500 
3501 	ZONE_LOCK(zone);
3502 	zone->uz_bkt_max = nitems;
3503 	ZONE_UNLOCK(zone);
3504 
3505 	return (nitems);
3506 }
3507 
3508 /* See uma.h */
3509 int
3510 uma_zone_get_max(uma_zone_t zone)
3511 {
3512 	int nitems;
3513 
3514 	ZONE_LOCK(zone);
3515 	nitems = zone->uz_max_items;
3516 	ZONE_UNLOCK(zone);
3517 
3518 	return (nitems);
3519 }
3520 
3521 /* See uma.h */
3522 void
3523 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3524 {
3525 
3526 	ZONE_LOCK(zone);
3527 	zone->uz_warning = warning;
3528 	ZONE_UNLOCK(zone);
3529 }
3530 
3531 /* See uma.h */
3532 void
3533 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3534 {
3535 
3536 	ZONE_LOCK(zone);
3537 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3538 	ZONE_UNLOCK(zone);
3539 }
3540 
3541 /* See uma.h */
3542 int
3543 uma_zone_get_cur(uma_zone_t zone)
3544 {
3545 	int64_t nitems;
3546 	u_int i;
3547 
3548 	ZONE_LOCK(zone);
3549 	nitems = counter_u64_fetch(zone->uz_allocs) -
3550 	    counter_u64_fetch(zone->uz_frees);
3551 	CPU_FOREACH(i) {
3552 		/*
3553 		 * See the comment in uma_vm_zone_stats() regarding the
3554 		 * safety of accessing the per-cpu caches. With the zone lock
3555 		 * held, it is safe, but can potentially result in stale data.
3556 		 */
3557 		nitems += zone->uz_cpu[i].uc_allocs -
3558 		    zone->uz_cpu[i].uc_frees;
3559 	}
3560 	ZONE_UNLOCK(zone);
3561 
3562 	return (nitems < 0 ? 0 : nitems);
3563 }
3564 
3565 /* See uma.h */
3566 void
3567 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3568 {
3569 	uma_keg_t keg;
3570 
3571 	KEG_GET(zone, keg);
3572 	KEG_LOCK(keg);
3573 	KASSERT(keg->uk_pages == 0,
3574 	    ("uma_zone_set_init on non-empty keg"));
3575 	keg->uk_init = uminit;
3576 	KEG_UNLOCK(keg);
3577 }
3578 
3579 /* See uma.h */
3580 void
3581 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3582 {
3583 	uma_keg_t keg;
3584 
3585 	KEG_GET(zone, keg);
3586 	KEG_LOCK(keg);
3587 	KASSERT(keg->uk_pages == 0,
3588 	    ("uma_zone_set_fini on non-empty keg"));
3589 	keg->uk_fini = fini;
3590 	KEG_UNLOCK(keg);
3591 }
3592 
3593 /* See uma.h */
3594 void
3595 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3596 {
3597 
3598 	ZONE_LOCK(zone);
3599 	KASSERT(zone->uz_keg->uk_pages == 0,
3600 	    ("uma_zone_set_zinit on non-empty keg"));
3601 	zone->uz_init = zinit;
3602 	ZONE_UNLOCK(zone);
3603 }
3604 
3605 /* See uma.h */
3606 void
3607 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3608 {
3609 
3610 	ZONE_LOCK(zone);
3611 	KASSERT(zone->uz_keg->uk_pages == 0,
3612 	    ("uma_zone_set_zfini on non-empty keg"));
3613 	zone->uz_fini = zfini;
3614 	ZONE_UNLOCK(zone);
3615 }
3616 
3617 /* See uma.h */
3618 /* XXX uk_freef is not actually used with the zone locked */
3619 void
3620 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3621 {
3622 	uma_keg_t keg;
3623 
3624 	KEG_GET(zone, keg);
3625 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3626 	KEG_LOCK(keg);
3627 	keg->uk_freef = freef;
3628 	KEG_UNLOCK(keg);
3629 }
3630 
3631 /* See uma.h */
3632 /* XXX uk_allocf is not actually used with the zone locked */
3633 void
3634 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3635 {
3636 	uma_keg_t keg;
3637 
3638 	KEG_GET(zone, keg);
3639 	KEG_LOCK(keg);
3640 	keg->uk_allocf = allocf;
3641 	KEG_UNLOCK(keg);
3642 }
3643 
3644 /* See uma.h */
3645 void
3646 uma_zone_reserve(uma_zone_t zone, int items)
3647 {
3648 	uma_keg_t keg;
3649 
3650 	KEG_GET(zone, keg);
3651 	KEG_LOCK(keg);
3652 	keg->uk_reserve = items;
3653 	KEG_UNLOCK(keg);
3654 }
3655 
3656 /* See uma.h */
3657 int
3658 uma_zone_reserve_kva(uma_zone_t zone, int count)
3659 {
3660 	uma_keg_t keg;
3661 	vm_offset_t kva;
3662 	u_int pages;
3663 
3664 	KEG_GET(zone, keg);
3665 
3666 	pages = count / keg->uk_ipers;
3667 	if (pages * keg->uk_ipers < count)
3668 		pages++;
3669 	pages *= keg->uk_ppera;
3670 
3671 #ifdef UMA_MD_SMALL_ALLOC
3672 	if (keg->uk_ppera > 1) {
3673 #else
3674 	if (1) {
3675 #endif
3676 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3677 		if (kva == 0)
3678 			return (0);
3679 	} else
3680 		kva = 0;
3681 
3682 	ZONE_LOCK(zone);
3683 	MPASS(keg->uk_kva == 0);
3684 	keg->uk_kva = kva;
3685 	keg->uk_offset = 0;
3686 	zone->uz_max_items = pages * keg->uk_ipers;
3687 #ifdef UMA_MD_SMALL_ALLOC
3688 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3689 #else
3690 	keg->uk_allocf = noobj_alloc;
3691 #endif
3692 	keg->uk_flags |= UMA_ZONE_NOFREE;
3693 	ZONE_UNLOCK(zone);
3694 
3695 	return (1);
3696 }
3697 
3698 /* See uma.h */
3699 void
3700 uma_prealloc(uma_zone_t zone, int items)
3701 {
3702 	struct vm_domainset_iter di;
3703 	uma_domain_t dom;
3704 	uma_slab_t slab;
3705 	uma_keg_t keg;
3706 	int aflags, domain, slabs;
3707 
3708 	KEG_GET(zone, keg);
3709 	KEG_LOCK(keg);
3710 	slabs = items / keg->uk_ipers;
3711 	if (slabs * keg->uk_ipers < items)
3712 		slabs++;
3713 	while (slabs-- > 0) {
3714 		aflags = M_NOWAIT;
3715 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3716 		    &aflags);
3717 		for (;;) {
3718 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3719 			    aflags);
3720 			if (slab != NULL) {
3721 				MPASS(slab->us_keg == keg);
3722 				dom = &keg->uk_domain[slab->us_domain];
3723 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3724 				    us_link);
3725 				break;
3726 			}
3727 			KEG_LOCK(keg);
3728 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
3729 				KEG_UNLOCK(keg);
3730 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3731 				KEG_LOCK(keg);
3732 			}
3733 		}
3734 	}
3735 	KEG_UNLOCK(keg);
3736 }
3737 
3738 /* See uma.h */
3739 void
3740 uma_reclaim(int req)
3741 {
3742 
3743 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
3744 	sx_xlock(&uma_reclaim_lock);
3745 	bucket_enable();
3746 
3747 	switch (req) {
3748 	case UMA_RECLAIM_TRIM:
3749 		zone_foreach(zone_trim);
3750 		break;
3751 	case UMA_RECLAIM_DRAIN:
3752 	case UMA_RECLAIM_DRAIN_CPU:
3753 		zone_foreach(zone_drain);
3754 		if (req == UMA_RECLAIM_DRAIN_CPU) {
3755 			pcpu_cache_drain_safe(NULL);
3756 			zone_foreach(zone_drain);
3757 		}
3758 		break;
3759 	default:
3760 		panic("unhandled reclamation request %d", req);
3761 	}
3762 
3763 	/*
3764 	 * Some slabs may have been freed but this zone will be visited early
3765 	 * we visit again so that we can free pages that are empty once other
3766 	 * zones are drained.  We have to do the same for buckets.
3767 	 */
3768 	zone_drain(slabzone);
3769 	bucket_zone_drain();
3770 	sx_xunlock(&uma_reclaim_lock);
3771 }
3772 
3773 static volatile int uma_reclaim_needed;
3774 
3775 void
3776 uma_reclaim_wakeup(void)
3777 {
3778 
3779 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
3780 		wakeup(uma_reclaim);
3781 }
3782 
3783 void
3784 uma_reclaim_worker(void *arg __unused)
3785 {
3786 
3787 	for (;;) {
3788 		sx_xlock(&uma_reclaim_lock);
3789 		while (atomic_load_int(&uma_reclaim_needed) == 0)
3790 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
3791 			    hz);
3792 		sx_xunlock(&uma_reclaim_lock);
3793 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3794 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
3795 		atomic_store_int(&uma_reclaim_needed, 0);
3796 		/* Don't fire more than once per-second. */
3797 		pause("umarclslp", hz);
3798 	}
3799 }
3800 
3801 /* See uma.h */
3802 void
3803 uma_zone_reclaim(uma_zone_t zone, int req)
3804 {
3805 
3806 	switch (req) {
3807 	case UMA_RECLAIM_TRIM:
3808 		zone_trim(zone);
3809 		break;
3810 	case UMA_RECLAIM_DRAIN:
3811 		zone_drain(zone);
3812 		break;
3813 	case UMA_RECLAIM_DRAIN_CPU:
3814 		pcpu_cache_drain_safe(zone);
3815 		zone_drain(zone);
3816 		break;
3817 	default:
3818 		panic("unhandled reclamation request %d", req);
3819 	}
3820 }
3821 
3822 /* See uma.h */
3823 int
3824 uma_zone_exhausted(uma_zone_t zone)
3825 {
3826 	int full;
3827 
3828 	ZONE_LOCK(zone);
3829 	full = zone->uz_sleepers > 0;
3830 	ZONE_UNLOCK(zone);
3831 	return (full);
3832 }
3833 
3834 int
3835 uma_zone_exhausted_nolock(uma_zone_t zone)
3836 {
3837 	return (zone->uz_sleepers > 0);
3838 }
3839 
3840 void *
3841 uma_large_malloc_domain(vm_size_t size, int domain, int wait)
3842 {
3843 	struct domainset *policy;
3844 	vm_offset_t addr;
3845 	uma_slab_t slab;
3846 
3847 	if (domain != UMA_ANYDOMAIN) {
3848 		/* avoid allocs targeting empty domains */
3849 		if (VM_DOMAIN_EMPTY(domain))
3850 			domain = UMA_ANYDOMAIN;
3851 	}
3852 	slab = zone_alloc_item(slabzone, NULL, domain, wait);
3853 	if (slab == NULL)
3854 		return (NULL);
3855 	policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() :
3856 	    DOMAINSET_FIXED(domain);
3857 	addr = kmem_malloc_domainset(policy, size, wait);
3858 	if (addr != 0) {
3859 		vsetslab(addr, slab);
3860 		slab->us_data = (void *)addr;
3861 		slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
3862 		slab->us_size = size;
3863 		slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
3864 		    pmap_kextract(addr)));
3865 		uma_total_inc(size);
3866 	} else {
3867 		zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3868 	}
3869 
3870 	return ((void *)addr);
3871 }
3872 
3873 void *
3874 uma_large_malloc(vm_size_t size, int wait)
3875 {
3876 
3877 	return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait);
3878 }
3879 
3880 void
3881 uma_large_free(uma_slab_t slab)
3882 {
3883 
3884 	KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
3885 	    ("uma_large_free:  Memory not allocated with uma_large_malloc."));
3886 	kmem_free((vm_offset_t)slab->us_data, slab->us_size);
3887 	uma_total_dec(slab->us_size);
3888 	zone_free_item(slabzone, slab, NULL, SKIP_NONE);
3889 }
3890 
3891 static void
3892 uma_zero_item(void *item, uma_zone_t zone)
3893 {
3894 
3895 	bzero(item, zone->uz_size);
3896 }
3897 
3898 unsigned long
3899 uma_limit(void)
3900 {
3901 
3902 	return (uma_kmem_limit);
3903 }
3904 
3905 void
3906 uma_set_limit(unsigned long limit)
3907 {
3908 
3909 	uma_kmem_limit = limit;
3910 }
3911 
3912 unsigned long
3913 uma_size(void)
3914 {
3915 
3916 	return (atomic_load_long(&uma_kmem_total));
3917 }
3918 
3919 long
3920 uma_avail(void)
3921 {
3922 
3923 	return (uma_kmem_limit - uma_size());
3924 }
3925 
3926 void
3927 uma_print_stats(void)
3928 {
3929 	zone_foreach(uma_print_zone);
3930 }
3931 
3932 static void
3933 slab_print(uma_slab_t slab)
3934 {
3935 	printf("slab: keg %p, data %p, freecount %d\n",
3936 		slab->us_keg, slab->us_data, slab->us_freecount);
3937 }
3938 
3939 static void
3940 cache_print(uma_cache_t cache)
3941 {
3942 	printf("alloc: %p(%d), free: %p(%d), cross: %p(%d)j\n",
3943 		cache->uc_allocbucket,
3944 		cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3945 		cache->uc_freebucket,
3946 		cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0,
3947 		cache->uc_crossbucket,
3948 		cache->uc_crossbucket?cache->uc_crossbucket->ub_cnt:0);
3949 }
3950 
3951 static void
3952 uma_print_keg(uma_keg_t keg)
3953 {
3954 	uma_domain_t dom;
3955 	uma_slab_t slab;
3956 	int i;
3957 
3958 	printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
3959 	    "out %d free %d\n",
3960 	    keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3961 	    keg->uk_ipers, keg->uk_ppera,
3962 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3963 	    keg->uk_free);
3964 	for (i = 0; i < vm_ndomains; i++) {
3965 		dom = &keg->uk_domain[i];
3966 		printf("Part slabs:\n");
3967 		LIST_FOREACH(slab, &dom->ud_part_slab, us_link)
3968 			slab_print(slab);
3969 		printf("Free slabs:\n");
3970 		LIST_FOREACH(slab, &dom->ud_free_slab, us_link)
3971 			slab_print(slab);
3972 		printf("Full slabs:\n");
3973 		LIST_FOREACH(slab, &dom->ud_full_slab, us_link)
3974 			slab_print(slab);
3975 	}
3976 }
3977 
3978 void
3979 uma_print_zone(uma_zone_t zone)
3980 {
3981 	uma_cache_t cache;
3982 	int i;
3983 
3984 	printf("zone: %s(%p) size %d maxitems %ju flags %#x\n",
3985 	    zone->uz_name, zone, zone->uz_size, (uintmax_t)zone->uz_max_items,
3986 	    zone->uz_flags);
3987 	if (zone->uz_lockptr != &zone->uz_lock)
3988 		uma_print_keg(zone->uz_keg);
3989 	CPU_FOREACH(i) {
3990 		cache = &zone->uz_cpu[i];
3991 		printf("CPU %d Cache:\n", i);
3992 		cache_print(cache);
3993 	}
3994 }
3995 
3996 #ifdef DDB
3997 /*
3998  * Generate statistics across both the zone and its per-cpu cache's.  Return
3999  * desired statistics if the pointer is non-NULL for that statistic.
4000  *
4001  * Note: does not update the zone statistics, as it can't safely clear the
4002  * per-CPU cache statistic.
4003  *
4004  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
4005  * safe from off-CPU; we should modify the caches to track this information
4006  * directly so that we don't have to.
4007  */
4008 static void
4009 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4010     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4011 {
4012 	uma_cache_t cache;
4013 	uint64_t allocs, frees, sleeps, xdomain;
4014 	int cachefree, cpu;
4015 
4016 	allocs = frees = sleeps = xdomain = 0;
4017 	cachefree = 0;
4018 	CPU_FOREACH(cpu) {
4019 		cache = &z->uz_cpu[cpu];
4020 		if (cache->uc_allocbucket != NULL)
4021 			cachefree += cache->uc_allocbucket->ub_cnt;
4022 		if (cache->uc_freebucket != NULL)
4023 			cachefree += cache->uc_freebucket->ub_cnt;
4024 		if (cache->uc_crossbucket != NULL) {
4025 			xdomain += cache->uc_crossbucket->ub_cnt;
4026 			cachefree += cache->uc_crossbucket->ub_cnt;
4027 		}
4028 		allocs += cache->uc_allocs;
4029 		frees += cache->uc_frees;
4030 	}
4031 	allocs += counter_u64_fetch(z->uz_allocs);
4032 	frees += counter_u64_fetch(z->uz_frees);
4033 	sleeps += z->uz_sleeps;
4034 	xdomain += z->uz_xdomain;
4035 	if (cachefreep != NULL)
4036 		*cachefreep = cachefree;
4037 	if (allocsp != NULL)
4038 		*allocsp = allocs;
4039 	if (freesp != NULL)
4040 		*freesp = frees;
4041 	if (sleepsp != NULL)
4042 		*sleepsp = sleeps;
4043 	if (xdomainp != NULL)
4044 		*xdomainp = xdomain;
4045 }
4046 #endif /* DDB */
4047 
4048 static int
4049 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4050 {
4051 	uma_keg_t kz;
4052 	uma_zone_t z;
4053 	int count;
4054 
4055 	count = 0;
4056 	rw_rlock(&uma_rwlock);
4057 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4058 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4059 			count++;
4060 	}
4061 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4062 		count++;
4063 
4064 	rw_runlock(&uma_rwlock);
4065 	return (sysctl_handle_int(oidp, &count, 0, req));
4066 }
4067 
4068 static void
4069 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4070     struct uma_percpu_stat *ups, bool internal)
4071 {
4072 	uma_zone_domain_t zdom;
4073 	uma_bucket_t bucket;
4074 	uma_cache_t cache;
4075 	int i;
4076 
4077 
4078 	for (i = 0; i < vm_ndomains; i++) {
4079 		zdom = &z->uz_domain[i];
4080 		uth->uth_zone_free += zdom->uzd_nitems;
4081 	}
4082 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4083 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4084 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4085 	uth->uth_sleeps = z->uz_sleeps;
4086 	uth->uth_xdomain = z->uz_xdomain;
4087 
4088 	/*
4089 	 * While it is not normally safe to access the cache bucket pointers
4090 	 * while not on the CPU that owns the cache, we only allow the pointers
4091 	 * to be exchanged without the zone lock held, not invalidated, so
4092 	 * accept the possible race associated with bucket exchange during
4093 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4094 	 * are loaded only once.
4095 	 */
4096 	for (i = 0; i < mp_maxid + 1; i++) {
4097 		bzero(&ups[i], sizeof(*ups));
4098 		if (internal || CPU_ABSENT(i))
4099 			continue;
4100 		cache = &z->uz_cpu[i];
4101 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
4102 		if (bucket != NULL)
4103 			ups[i].ups_cache_free += bucket->ub_cnt;
4104 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
4105 		if (bucket != NULL)
4106 			ups[i].ups_cache_free += bucket->ub_cnt;
4107 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
4108 		if (bucket != NULL)
4109 			ups[i].ups_cache_free += bucket->ub_cnt;
4110 		ups[i].ups_allocs = cache->uc_allocs;
4111 		ups[i].ups_frees = cache->uc_frees;
4112 	}
4113 }
4114 
4115 static int
4116 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4117 {
4118 	struct uma_stream_header ush;
4119 	struct uma_type_header uth;
4120 	struct uma_percpu_stat *ups;
4121 	struct sbuf sbuf;
4122 	uma_keg_t kz;
4123 	uma_zone_t z;
4124 	int count, error, i;
4125 
4126 	error = sysctl_wire_old_buffer(req, 0);
4127 	if (error != 0)
4128 		return (error);
4129 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4130 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4131 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4132 
4133 	count = 0;
4134 	rw_rlock(&uma_rwlock);
4135 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4136 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4137 			count++;
4138 	}
4139 
4140 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4141 		count++;
4142 
4143 	/*
4144 	 * Insert stream header.
4145 	 */
4146 	bzero(&ush, sizeof(ush));
4147 	ush.ush_version = UMA_STREAM_VERSION;
4148 	ush.ush_maxcpus = (mp_maxid + 1);
4149 	ush.ush_count = count;
4150 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4151 
4152 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4153 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4154 			bzero(&uth, sizeof(uth));
4155 			ZONE_LOCK(z);
4156 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4157 			uth.uth_align = kz->uk_align;
4158 			uth.uth_size = kz->uk_size;
4159 			uth.uth_rsize = kz->uk_rsize;
4160 			if (z->uz_max_items > 0)
4161 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4162 					kz->uk_ppera;
4163 			else
4164 				uth.uth_pages = kz->uk_pages;
4165 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4166 			    kz->uk_ppera;
4167 			uth.uth_limit = z->uz_max_items;
4168 			uth.uth_keg_free = z->uz_keg->uk_free;
4169 
4170 			/*
4171 			 * A zone is secondary is it is not the first entry
4172 			 * on the keg's zone list.
4173 			 */
4174 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4175 			    (LIST_FIRST(&kz->uk_zones) != z))
4176 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4177 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4178 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4179 			ZONE_UNLOCK(z);
4180 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4181 			for (i = 0; i < mp_maxid + 1; i++)
4182 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4183 		}
4184 	}
4185 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4186 		bzero(&uth, sizeof(uth));
4187 		ZONE_LOCK(z);
4188 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4189 		uth.uth_size = z->uz_size;
4190 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4191 		ZONE_UNLOCK(z);
4192 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4193 		for (i = 0; i < mp_maxid + 1; i++)
4194 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4195 	}
4196 
4197 	rw_runlock(&uma_rwlock);
4198 	error = sbuf_finish(&sbuf);
4199 	sbuf_delete(&sbuf);
4200 	free(ups, M_TEMP);
4201 	return (error);
4202 }
4203 
4204 int
4205 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4206 {
4207 	uma_zone_t zone = *(uma_zone_t *)arg1;
4208 	int error, max;
4209 
4210 	max = uma_zone_get_max(zone);
4211 	error = sysctl_handle_int(oidp, &max, 0, req);
4212 	if (error || !req->newptr)
4213 		return (error);
4214 
4215 	uma_zone_set_max(zone, max);
4216 
4217 	return (0);
4218 }
4219 
4220 int
4221 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4222 {
4223 	uma_zone_t zone = *(uma_zone_t *)arg1;
4224 	int cur;
4225 
4226 	cur = uma_zone_get_cur(zone);
4227 	return (sysctl_handle_int(oidp, &cur, 0, req));
4228 }
4229 
4230 #ifdef INVARIANTS
4231 static uma_slab_t
4232 uma_dbg_getslab(uma_zone_t zone, void *item)
4233 {
4234 	uma_slab_t slab;
4235 	uma_keg_t keg;
4236 	uint8_t *mem;
4237 
4238 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4239 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4240 		slab = vtoslab((vm_offset_t)mem);
4241 	} else {
4242 		/*
4243 		 * It is safe to return the slab here even though the
4244 		 * zone is unlocked because the item's allocation state
4245 		 * essentially holds a reference.
4246 		 */
4247 		if (zone->uz_lockptr == &zone->uz_lock)
4248 			return (NULL);
4249 		ZONE_LOCK(zone);
4250 		keg = zone->uz_keg;
4251 		if (keg->uk_flags & UMA_ZONE_HASH)
4252 			slab = hash_sfind(&keg->uk_hash, mem);
4253 		else
4254 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4255 		ZONE_UNLOCK(zone);
4256 	}
4257 
4258 	return (slab);
4259 }
4260 
4261 static bool
4262 uma_dbg_zskip(uma_zone_t zone, void *mem)
4263 {
4264 
4265 	if (zone->uz_lockptr == &zone->uz_lock)
4266 		return (true);
4267 
4268 	return (uma_dbg_kskip(zone->uz_keg, mem));
4269 }
4270 
4271 static bool
4272 uma_dbg_kskip(uma_keg_t keg, void *mem)
4273 {
4274 	uintptr_t idx;
4275 
4276 	if (dbg_divisor == 0)
4277 		return (true);
4278 
4279 	if (dbg_divisor == 1)
4280 		return (false);
4281 
4282 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4283 	if (keg->uk_ipers > 1) {
4284 		idx *= keg->uk_ipers;
4285 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4286 	}
4287 
4288 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4289 		counter_u64_add(uma_skip_cnt, 1);
4290 		return (true);
4291 	}
4292 	counter_u64_add(uma_dbg_cnt, 1);
4293 
4294 	return (false);
4295 }
4296 
4297 /*
4298  * Set up the slab's freei data such that uma_dbg_free can function.
4299  *
4300  */
4301 static void
4302 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4303 {
4304 	uma_keg_t keg;
4305 	int freei;
4306 
4307 	if (slab == NULL) {
4308 		slab = uma_dbg_getslab(zone, item);
4309 		if (slab == NULL)
4310 			panic("uma: item %p did not belong to zone %s\n",
4311 			    item, zone->uz_name);
4312 	}
4313 	keg = slab->us_keg;
4314 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4315 
4316 	if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4317 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4318 		    item, zone, zone->uz_name, slab, freei);
4319 	BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4320 
4321 	return;
4322 }
4323 
4324 /*
4325  * Verifies freed addresses.  Checks for alignment, valid slab membership
4326  * and duplicate frees.
4327  *
4328  */
4329 static void
4330 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4331 {
4332 	uma_keg_t keg;
4333 	int freei;
4334 
4335 	if (slab == NULL) {
4336 		slab = uma_dbg_getslab(zone, item);
4337 		if (slab == NULL)
4338 			panic("uma: Freed item %p did not belong to zone %s\n",
4339 			    item, zone->uz_name);
4340 	}
4341 	keg = slab->us_keg;
4342 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4343 
4344 	if (freei >= keg->uk_ipers)
4345 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4346 		    item, zone, zone->uz_name, slab, freei);
4347 
4348 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4349 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4350 		    item, zone, zone->uz_name, slab, freei);
4351 
4352 	if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
4353 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4354 		    item, zone, zone->uz_name, slab, freei);
4355 
4356 	BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
4357 }
4358 #endif /* INVARIANTS */
4359 
4360 #ifdef DDB
4361 static int64_t
4362 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4363     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4364 {
4365 	uint64_t frees;
4366 	int i;
4367 
4368 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4369 		*allocs = counter_u64_fetch(z->uz_allocs);
4370 		frees = counter_u64_fetch(z->uz_frees);
4371 		*sleeps = z->uz_sleeps;
4372 		*cachefree = 0;
4373 		*xdomain = 0;
4374 	} else
4375 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4376 		    xdomain);
4377 	if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4378 	    (LIST_FIRST(&kz->uk_zones) != z)))
4379 		*cachefree += kz->uk_free;
4380 	for (i = 0; i < vm_ndomains; i++)
4381 		*cachefree += z->uz_domain[i].uzd_nitems;
4382 	*used = *allocs - frees;
4383 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4384 }
4385 
4386 DB_SHOW_COMMAND(uma, db_show_uma)
4387 {
4388 	const char *fmt_hdr, *fmt_entry;
4389 	uma_keg_t kz;
4390 	uma_zone_t z;
4391 	uint64_t allocs, used, sleeps, xdomain;
4392 	long cachefree;
4393 	/* variables for sorting */
4394 	uma_keg_t cur_keg;
4395 	uma_zone_t cur_zone, last_zone;
4396 	int64_t cur_size, last_size, size;
4397 	int ties;
4398 
4399 	/* /i option produces machine-parseable CSV output */
4400 	if (modif[0] == 'i') {
4401 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4402 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4403 	} else {
4404 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4405 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4406 	}
4407 
4408 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4409 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4410 
4411 	/* Sort the zones with largest size first. */
4412 	last_zone = NULL;
4413 	last_size = INT64_MAX;
4414 	for (;;) {
4415 		cur_zone = NULL;
4416 		cur_size = -1;
4417 		ties = 0;
4418 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4419 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4420 				/*
4421 				 * In the case of size ties, print out zones
4422 				 * in the order they are encountered.  That is,
4423 				 * when we encounter the most recently output
4424 				 * zone, we have already printed all preceding
4425 				 * ties, and we must print all following ties.
4426 				 */
4427 				if (z == last_zone) {
4428 					ties = 1;
4429 					continue;
4430 				}
4431 				size = get_uma_stats(kz, z, &allocs, &used,
4432 				    &sleeps, &cachefree, &xdomain);
4433 				if (size > cur_size && size < last_size + ties)
4434 				{
4435 					cur_size = size;
4436 					cur_zone = z;
4437 					cur_keg = kz;
4438 				}
4439 			}
4440 		}
4441 		if (cur_zone == NULL)
4442 			break;
4443 
4444 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
4445 		    &sleeps, &cachefree, &xdomain);
4446 		db_printf(fmt_entry, cur_zone->uz_name,
4447 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
4448 		    (uintmax_t)allocs, (uintmax_t)sleeps,
4449 		    (unsigned)cur_zone->uz_count, (intmax_t)size, xdomain);
4450 
4451 		if (db_pager_quit)
4452 			return;
4453 		last_zone = cur_zone;
4454 		last_size = cur_size;
4455 	}
4456 }
4457 
4458 DB_SHOW_COMMAND(umacache, db_show_umacache)
4459 {
4460 	uma_zone_t z;
4461 	uint64_t allocs, frees;
4462 	long cachefree;
4463 	int i;
4464 
4465 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4466 	    "Requests", "Bucket");
4467 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4468 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4469 		for (i = 0; i < vm_ndomains; i++)
4470 			cachefree += z->uz_domain[i].uzd_nitems;
4471 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4472 		    z->uz_name, (uintmax_t)z->uz_size,
4473 		    (intmax_t)(allocs - frees), cachefree,
4474 		    (uintmax_t)allocs, z->uz_count);
4475 		if (db_pager_quit)
4476 			return;
4477 	}
4478 }
4479 #endif	/* DDB */
4480