xref: /freebsd/sys/vm/uma_core.c (revision 324cdd9320f58837c2fbaa7f6ceb9ea5c33d5b2a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/smp.h>
79 #include <sys/taskqueue.h>
80 #include <sys/vmmeter.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_domainset.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_phys.h>
89 #include <vm/vm_pagequeue.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94 #include <vm/uma_int.h>
95 #include <vm/uma_dbg.h>
96 
97 #include <ddb/ddb.h>
98 
99 #ifdef DEBUG_MEMGUARD
100 #include <vm/memguard.h>
101 #endif
102 
103 /*
104  * This is the zone and keg from which all zones are spawned.
105  */
106 static uma_zone_t kegs;
107 static uma_zone_t zones;
108 
109 /* This is the zone from which all offpage uma_slab_ts are allocated. */
110 static uma_zone_t slabzone;
111 
112 /*
113  * The initial hash tables come out of this zone so they can be allocated
114  * prior to malloc coming up.
115  */
116 static uma_zone_t hashzone;
117 
118 /* The boot-time adjusted value for cache line alignment. */
119 int uma_align_cache = 64 - 1;
120 
121 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
122 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
123 
124 /*
125  * Are we allowed to allocate buckets?
126  */
127 static int bucketdisable = 1;
128 
129 /* Linked list of all kegs in the system */
130 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
131 
132 /* Linked list of all cache-only zones in the system */
133 static LIST_HEAD(,uma_zone) uma_cachezones =
134     LIST_HEAD_INITIALIZER(uma_cachezones);
135 
136 /* This RW lock protects the keg list */
137 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
138 
139 /*
140  * Pointer and counter to pool of pages, that is preallocated at
141  * startup to bootstrap UMA.
142  */
143 static char *bootmem;
144 static int boot_pages;
145 
146 static struct sx uma_reclaim_lock;
147 
148 /*
149  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
150  * allocations don't trigger a wakeup of the reclaim thread.
151  */
152 unsigned long uma_kmem_limit = LONG_MAX;
153 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
154     "UMA kernel memory soft limit");
155 unsigned long uma_kmem_total;
156 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
157     "UMA kernel memory usage");
158 
159 /* Is the VM done starting up? */
160 static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
161     BOOT_RUNNING } booted = BOOT_COLD;
162 
163 /*
164  * This is the handle used to schedule events that need to happen
165  * outside of the allocation fast path.
166  */
167 static struct callout uma_callout;
168 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
169 
170 /*
171  * This structure is passed as the zone ctor arg so that I don't have to create
172  * a special allocation function just for zones.
173  */
174 struct uma_zctor_args {
175 	const char *name;
176 	size_t size;
177 	uma_ctor ctor;
178 	uma_dtor dtor;
179 	uma_init uminit;
180 	uma_fini fini;
181 	uma_import import;
182 	uma_release release;
183 	void *arg;
184 	uma_keg_t keg;
185 	int align;
186 	uint32_t flags;
187 };
188 
189 struct uma_kctor_args {
190 	uma_zone_t zone;
191 	size_t size;
192 	uma_init uminit;
193 	uma_fini fini;
194 	int align;
195 	uint32_t flags;
196 };
197 
198 struct uma_bucket_zone {
199 	uma_zone_t	ubz_zone;
200 	char		*ubz_name;
201 	int		ubz_entries;	/* Number of items it can hold. */
202 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
203 };
204 
205 /*
206  * Compute the actual number of bucket entries to pack them in power
207  * of two sizes for more efficient space utilization.
208  */
209 #define	BUCKET_SIZE(n)						\
210     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
211 
212 #define	BUCKET_MAX	BUCKET_SIZE(256)
213 #define	BUCKET_MIN	BUCKET_SIZE(4)
214 
215 struct uma_bucket_zone bucket_zones[] = {
216 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
217 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
218 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
219 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
220 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
221 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
222 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
223 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
224 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
225 	{ NULL, NULL, 0}
226 };
227 
228 /*
229  * Flags and enumerations to be passed to internal functions.
230  */
231 enum zfreeskip {
232 	SKIP_NONE =	0,
233 	SKIP_CNT =	0x00000001,
234 	SKIP_DTOR =	0x00010000,
235 	SKIP_FINI =	0x00020000,
236 };
237 
238 /* Prototypes.. */
239 
240 int	uma_startup_count(int);
241 void	uma_startup(void *, int);
242 void	uma_startup1(void);
243 void	uma_startup2(void);
244 
245 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
246 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
247 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
248 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
249 static void page_free(void *, vm_size_t, uint8_t);
250 static void pcpu_page_free(void *, vm_size_t, uint8_t);
251 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
252 static void cache_drain(uma_zone_t);
253 static void bucket_drain(uma_zone_t, uma_bucket_t);
254 static void bucket_cache_reclaim(uma_zone_t zone, bool);
255 static int keg_ctor(void *, int, void *, int);
256 static void keg_dtor(void *, int, void *);
257 static int zone_ctor(void *, int, void *, int);
258 static void zone_dtor(void *, int, void *);
259 static int zero_init(void *, int, int);
260 static void keg_small_init(uma_keg_t keg);
261 static void keg_large_init(uma_keg_t keg);
262 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
263 static void zone_timeout(uma_zone_t zone, void *);
264 static int hash_alloc(struct uma_hash *, u_int);
265 static int hash_expand(struct uma_hash *, struct uma_hash *);
266 static void hash_free(struct uma_hash *hash);
267 static void uma_timeout(void *);
268 static void uma_startup3(void);
269 static void *zone_alloc_item(uma_zone_t, void *, int, int);
270 static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
271 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
272 static void bucket_enable(void);
273 static void bucket_init(void);
274 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
275 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
276 static void bucket_zone_drain(void);
277 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
278 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
279 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
280 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
281     uma_fini fini, int align, uint32_t flags);
282 static int zone_import(void *, void **, int, int, int);
283 static void zone_release(void *, void **, int);
284 static void uma_zero_item(void *, uma_zone_t);
285 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
286 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
287 
288 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
289 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
290 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
291 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
292 
293 #ifdef INVARIANTS
294 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
295 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
296 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
297 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
298 
299 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
300     "Memory allocation debugging");
301 
302 static u_int dbg_divisor = 1;
303 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
304     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
305     "Debug & thrash every this item in memory allocator");
306 
307 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
308 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
309 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
310     &uma_dbg_cnt, "memory items debugged");
311 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
312     &uma_skip_cnt, "memory items skipped, not debugged");
313 #endif
314 
315 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
316 
317 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
318 
319 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
320     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
321 
322 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
323     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
324 
325 static int zone_warnings = 1;
326 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
327     "Warn when UMA zones becomes full");
328 
329 /*
330  * This routine checks to see whether or not it's safe to enable buckets.
331  */
332 static void
333 bucket_enable(void)
334 {
335 	bucketdisable = vm_page_count_min();
336 }
337 
338 /*
339  * Initialize bucket_zones, the array of zones of buckets of various sizes.
340  *
341  * For each zone, calculate the memory required for each bucket, consisting
342  * of the header and an array of pointers.
343  */
344 static void
345 bucket_init(void)
346 {
347 	struct uma_bucket_zone *ubz;
348 	int size;
349 
350 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
351 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
352 		size += sizeof(void *) * ubz->ubz_entries;
353 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
354 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
355 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
356 	}
357 }
358 
359 /*
360  * Given a desired number of entries for a bucket, return the zone from which
361  * to allocate the bucket.
362  */
363 static struct uma_bucket_zone *
364 bucket_zone_lookup(int entries)
365 {
366 	struct uma_bucket_zone *ubz;
367 
368 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
369 		if (ubz->ubz_entries >= entries)
370 			return (ubz);
371 	ubz--;
372 	return (ubz);
373 }
374 
375 static struct uma_bucket_zone *
376 bucket_zone_max(uma_zone_t zone, int nitems)
377 {
378 	struct uma_bucket_zone *ubz;
379 	int bpcpu;
380 
381 	bpcpu = 2;
382 #ifdef UMA_XDOMAIN
383 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
384 		/* Count the cross-domain bucket. */
385 		bpcpu++;
386 #endif
387 
388 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
389 		if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
390 			break;
391 	if (ubz == &bucket_zones[0])
392 		ubz = NULL;
393 	else
394 		ubz--;
395 	return (ubz);
396 }
397 
398 static int
399 bucket_select(int size)
400 {
401 	struct uma_bucket_zone *ubz;
402 
403 	ubz = &bucket_zones[0];
404 	if (size > ubz->ubz_maxsize)
405 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
406 
407 	for (; ubz->ubz_entries != 0; ubz++)
408 		if (ubz->ubz_maxsize < size)
409 			break;
410 	ubz--;
411 	return (ubz->ubz_entries);
412 }
413 
414 static uma_bucket_t
415 bucket_alloc(uma_zone_t zone, void *udata, int flags)
416 {
417 	struct uma_bucket_zone *ubz;
418 	uma_bucket_t bucket;
419 
420 	/*
421 	 * This is to stop us from allocating per cpu buckets while we're
422 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
423 	 * boot pages.  This also prevents us from allocating buckets in
424 	 * low memory situations.
425 	 */
426 	if (bucketdisable)
427 		return (NULL);
428 	/*
429 	 * To limit bucket recursion we store the original zone flags
430 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
431 	 * NOVM flag to persist even through deep recursions.  We also
432 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
433 	 * a bucket for a bucket zone so we do not allow infinite bucket
434 	 * recursion.  This cookie will even persist to frees of unused
435 	 * buckets via the allocation path or bucket allocations in the
436 	 * free path.
437 	 */
438 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
439 		udata = (void *)(uintptr_t)zone->uz_flags;
440 	else {
441 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
442 			return (NULL);
443 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
444 	}
445 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
446 		flags |= M_NOVM;
447 	ubz = bucket_zone_lookup(zone->uz_bucket_size);
448 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
449 		ubz++;
450 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
451 	if (bucket) {
452 #ifdef INVARIANTS
453 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
454 #endif
455 		bucket->ub_cnt = 0;
456 		bucket->ub_entries = ubz->ubz_entries;
457 	}
458 
459 	return (bucket);
460 }
461 
462 static void
463 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
464 {
465 	struct uma_bucket_zone *ubz;
466 
467 	KASSERT(bucket->ub_cnt == 0,
468 	    ("bucket_free: Freeing a non free bucket."));
469 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
470 		udata = (void *)(uintptr_t)zone->uz_flags;
471 	ubz = bucket_zone_lookup(bucket->ub_entries);
472 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
473 }
474 
475 static void
476 bucket_zone_drain(void)
477 {
478 	struct uma_bucket_zone *ubz;
479 
480 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
481 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
482 }
483 
484 /*
485  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
486  * zone's caches.
487  */
488 static uma_bucket_t
489 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
490 {
491 	uma_bucket_t bucket;
492 
493 	ZONE_LOCK_ASSERT(zone);
494 
495 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
496 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
497 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
498 		zdom->uzd_nitems -= bucket->ub_cnt;
499 		if (zdom->uzd_imin > zdom->uzd_nitems)
500 			zdom->uzd_imin = zdom->uzd_nitems;
501 		zone->uz_bkt_count -= bucket->ub_cnt;
502 	}
503 	return (bucket);
504 }
505 
506 /*
507  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
508  * whether the bucket's contents should be counted as part of the zone's working
509  * set.
510  */
511 static void
512 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
513     const bool ws)
514 {
515 
516 	ZONE_LOCK_ASSERT(zone);
517 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
518 	    ("%s: zone %p overflow", __func__, zone));
519 
520 	if (ws)
521 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
522 	else
523 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
524 	zdom->uzd_nitems += bucket->ub_cnt;
525 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
526 		zdom->uzd_imax = zdom->uzd_nitems;
527 	zone->uz_bkt_count += bucket->ub_cnt;
528 }
529 
530 static void
531 zone_log_warning(uma_zone_t zone)
532 {
533 	static const struct timeval warninterval = { 300, 0 };
534 
535 	if (!zone_warnings || zone->uz_warning == NULL)
536 		return;
537 
538 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
539 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
540 }
541 
542 static inline void
543 zone_maxaction(uma_zone_t zone)
544 {
545 
546 	if (zone->uz_maxaction.ta_func != NULL)
547 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
548 }
549 
550 /*
551  * Routine called by timeout which is used to fire off some time interval
552  * based calculations.  (stats, hash size, etc.)
553  *
554  * Arguments:
555  *	arg   Unused
556  *
557  * Returns:
558  *	Nothing
559  */
560 static void
561 uma_timeout(void *unused)
562 {
563 	bucket_enable();
564 	zone_foreach(zone_timeout, NULL);
565 
566 	/* Reschedule this event */
567 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
568 }
569 
570 /*
571  * Update the working set size estimate for the zone's bucket cache.
572  * The constants chosen here are somewhat arbitrary.  With an update period of
573  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
574  * last 100s.
575  */
576 static void
577 zone_domain_update_wss(uma_zone_domain_t zdom)
578 {
579 	long wss;
580 
581 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
582 	wss = zdom->uzd_imax - zdom->uzd_imin;
583 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
584 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
585 }
586 
587 /*
588  * Routine to perform timeout driven calculations.  This expands the
589  * hashes and does per cpu statistics aggregation.
590  *
591  *  Returns nothing.
592  */
593 static void
594 zone_timeout(uma_zone_t zone, void *unused)
595 {
596 	uma_keg_t keg;
597 	u_int slabs;
598 
599 	if ((zone->uz_flags & UMA_ZONE_HASH) == 0)
600 		goto update_wss;
601 
602 	keg = zone->uz_keg;
603 	KEG_LOCK(keg);
604 	/*
605 	 * Expand the keg hash table.
606 	 *
607 	 * This is done if the number of slabs is larger than the hash size.
608 	 * What I'm trying to do here is completely reduce collisions.  This
609 	 * may be a little aggressive.  Should I allow for two collisions max?
610 	 */
611 	if (keg->uk_flags & UMA_ZONE_HASH &&
612 	    (slabs = keg->uk_pages / keg->uk_ppera) >
613 	     keg->uk_hash.uh_hashsize) {
614 		struct uma_hash newhash;
615 		struct uma_hash oldhash;
616 		int ret;
617 
618 		/*
619 		 * This is so involved because allocating and freeing
620 		 * while the keg lock is held will lead to deadlock.
621 		 * I have to do everything in stages and check for
622 		 * races.
623 		 */
624 		KEG_UNLOCK(keg);
625 		ret = hash_alloc(&newhash, 1 << fls(slabs));
626 		KEG_LOCK(keg);
627 		if (ret) {
628 			if (hash_expand(&keg->uk_hash, &newhash)) {
629 				oldhash = keg->uk_hash;
630 				keg->uk_hash = newhash;
631 			} else
632 				oldhash = newhash;
633 
634 			KEG_UNLOCK(keg);
635 			hash_free(&oldhash);
636 			return;
637 		}
638 	}
639 	KEG_UNLOCK(keg);
640 
641 update_wss:
642 	ZONE_LOCK(zone);
643 	for (int i = 0; i < vm_ndomains; i++)
644 		zone_domain_update_wss(&zone->uz_domain[i]);
645 	ZONE_UNLOCK(zone);
646 }
647 
648 /*
649  * Allocate and zero fill the next sized hash table from the appropriate
650  * backing store.
651  *
652  * Arguments:
653  *	hash  A new hash structure with the old hash size in uh_hashsize
654  *
655  * Returns:
656  *	1 on success and 0 on failure.
657  */
658 static int
659 hash_alloc(struct uma_hash *hash, u_int size)
660 {
661 	size_t alloc;
662 
663 	KASSERT(powerof2(size), ("hash size must be power of 2"));
664 	if (size > UMA_HASH_SIZE_INIT)  {
665 		hash->uh_hashsize = size;
666 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
667 		hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
668 		    M_UMAHASH, M_NOWAIT);
669 	} else {
670 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
671 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
672 		    UMA_ANYDOMAIN, M_WAITOK);
673 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
674 	}
675 	if (hash->uh_slab_hash) {
676 		bzero(hash->uh_slab_hash, alloc);
677 		hash->uh_hashmask = hash->uh_hashsize - 1;
678 		return (1);
679 	}
680 
681 	return (0);
682 }
683 
684 /*
685  * Expands the hash table for HASH zones.  This is done from zone_timeout
686  * to reduce collisions.  This must not be done in the regular allocation
687  * path, otherwise, we can recurse on the vm while allocating pages.
688  *
689  * Arguments:
690  *	oldhash  The hash you want to expand
691  *	newhash  The hash structure for the new table
692  *
693  * Returns:
694  *	Nothing
695  *
696  * Discussion:
697  */
698 static int
699 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
700 {
701 	uma_slab_t slab;
702 	u_int hval;
703 	u_int idx;
704 
705 	if (!newhash->uh_slab_hash)
706 		return (0);
707 
708 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
709 		return (0);
710 
711 	/*
712 	 * I need to investigate hash algorithms for resizing without a
713 	 * full rehash.
714 	 */
715 
716 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
717 		while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
718 			slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
719 			SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
720 			hval = UMA_HASH(newhash, slab->us_data);
721 			SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
722 			    slab, us_hlink);
723 		}
724 
725 	return (1);
726 }
727 
728 /*
729  * Free the hash bucket to the appropriate backing store.
730  *
731  * Arguments:
732  *	slab_hash  The hash bucket we're freeing
733  *	hashsize   The number of entries in that hash bucket
734  *
735  * Returns:
736  *	Nothing
737  */
738 static void
739 hash_free(struct uma_hash *hash)
740 {
741 	if (hash->uh_slab_hash == NULL)
742 		return;
743 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
744 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
745 	else
746 		free(hash->uh_slab_hash, M_UMAHASH);
747 }
748 
749 /*
750  * Frees all outstanding items in a bucket
751  *
752  * Arguments:
753  *	zone   The zone to free to, must be unlocked.
754  *	bucket The free/alloc bucket with items, cpu queue must be locked.
755  *
756  * Returns:
757  *	Nothing
758  */
759 
760 static void
761 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
762 {
763 	int i;
764 
765 	if (bucket == NULL)
766 		return;
767 
768 	if (zone->uz_fini)
769 		for (i = 0; i < bucket->ub_cnt; i++)
770 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
771 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
772 	if (zone->uz_max_items > 0) {
773 		ZONE_LOCK(zone);
774 		zone->uz_items -= bucket->ub_cnt;
775 		if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
776 			wakeup_one(zone);
777 		ZONE_UNLOCK(zone);
778 	}
779 	bucket->ub_cnt = 0;
780 }
781 
782 /*
783  * Drains the per cpu caches for a zone.
784  *
785  * NOTE: This may only be called while the zone is being turn down, and not
786  * during normal operation.  This is necessary in order that we do not have
787  * to migrate CPUs to drain the per-CPU caches.
788  *
789  * Arguments:
790  *	zone     The zone to drain, must be unlocked.
791  *
792  * Returns:
793  *	Nothing
794  */
795 static void
796 cache_drain(uma_zone_t zone)
797 {
798 	uma_cache_t cache;
799 	int cpu;
800 
801 	/*
802 	 * XXX: It is safe to not lock the per-CPU caches, because we're
803 	 * tearing down the zone anyway.  I.e., there will be no further use
804 	 * of the caches at this point.
805 	 *
806 	 * XXX: It would good to be able to assert that the zone is being
807 	 * torn down to prevent improper use of cache_drain().
808 	 *
809 	 * XXX: We lock the zone before passing into bucket_cache_reclaim() as
810 	 * it is used elsewhere.  Should the tear-down path be made special
811 	 * there in some form?
812 	 */
813 	CPU_FOREACH(cpu) {
814 		cache = &zone->uz_cpu[cpu];
815 		bucket_drain(zone, cache->uc_allocbucket);
816 		if (cache->uc_allocbucket != NULL)
817 			bucket_free(zone, cache->uc_allocbucket, NULL);
818 		cache->uc_allocbucket = NULL;
819 		bucket_drain(zone, cache->uc_freebucket);
820 		if (cache->uc_freebucket != NULL)
821 			bucket_free(zone, cache->uc_freebucket, NULL);
822 		cache->uc_freebucket = NULL;
823 		bucket_drain(zone, cache->uc_crossbucket);
824 		if (cache->uc_crossbucket != NULL)
825 			bucket_free(zone, cache->uc_crossbucket, NULL);
826 		cache->uc_crossbucket = NULL;
827 	}
828 	ZONE_LOCK(zone);
829 	bucket_cache_reclaim(zone, true);
830 	ZONE_UNLOCK(zone);
831 }
832 
833 static void
834 cache_shrink(uma_zone_t zone, void *unused)
835 {
836 
837 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
838 		return;
839 
840 	ZONE_LOCK(zone);
841 	zone->uz_bucket_size =
842 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
843 	ZONE_UNLOCK(zone);
844 }
845 
846 static void
847 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
848 {
849 	uma_cache_t cache;
850 	uma_bucket_t b1, b2, b3;
851 	int domain;
852 
853 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
854 		return;
855 
856 	b1 = b2 = b3 = NULL;
857 	ZONE_LOCK(zone);
858 	critical_enter();
859 	if (zone->uz_flags & UMA_ZONE_NUMA)
860 		domain = PCPU_GET(domain);
861 	else
862 		domain = 0;
863 	cache = &zone->uz_cpu[curcpu];
864 	if (cache->uc_allocbucket) {
865 		if (cache->uc_allocbucket->ub_cnt != 0)
866 			zone_put_bucket(zone, &zone->uz_domain[domain],
867 			    cache->uc_allocbucket, false);
868 		else
869 			b1 = cache->uc_allocbucket;
870 		cache->uc_allocbucket = NULL;
871 	}
872 	if (cache->uc_freebucket) {
873 		if (cache->uc_freebucket->ub_cnt != 0)
874 			zone_put_bucket(zone, &zone->uz_domain[domain],
875 			    cache->uc_freebucket, false);
876 		else
877 			b2 = cache->uc_freebucket;
878 		cache->uc_freebucket = NULL;
879 	}
880 	b3 = cache->uc_crossbucket;
881 	cache->uc_crossbucket = NULL;
882 	critical_exit();
883 	ZONE_UNLOCK(zone);
884 	if (b1)
885 		bucket_free(zone, b1, NULL);
886 	if (b2)
887 		bucket_free(zone, b2, NULL);
888 	if (b3) {
889 		bucket_drain(zone, b3);
890 		bucket_free(zone, b3, NULL);
891 	}
892 }
893 
894 /*
895  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
896  * This is an expensive call because it needs to bind to all CPUs
897  * one by one and enter a critical section on each of them in order
898  * to safely access their cache buckets.
899  * Zone lock must not be held on call this function.
900  */
901 static void
902 pcpu_cache_drain_safe(uma_zone_t zone)
903 {
904 	int cpu;
905 
906 	/*
907 	 * Polite bucket sizes shrinking was not enouth, shrink aggressively.
908 	 */
909 	if (zone)
910 		cache_shrink(zone, NULL);
911 	else
912 		zone_foreach(cache_shrink, NULL);
913 
914 	CPU_FOREACH(cpu) {
915 		thread_lock(curthread);
916 		sched_bind(curthread, cpu);
917 		thread_unlock(curthread);
918 
919 		if (zone)
920 			cache_drain_safe_cpu(zone, NULL);
921 		else
922 			zone_foreach(cache_drain_safe_cpu, NULL);
923 	}
924 	thread_lock(curthread);
925 	sched_unbind(curthread);
926 	thread_unlock(curthread);
927 }
928 
929 /*
930  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
931  * requested a drain, otherwise the per-domain caches are trimmed to either
932  * estimated working set size.
933  */
934 static void
935 bucket_cache_reclaim(uma_zone_t zone, bool drain)
936 {
937 	uma_zone_domain_t zdom;
938 	uma_bucket_t bucket;
939 	long target, tofree;
940 	int i;
941 
942 	for (i = 0; i < vm_ndomains; i++) {
943 		zdom = &zone->uz_domain[i];
944 
945 		/*
946 		 * If we were asked to drain the zone, we are done only once
947 		 * this bucket cache is empty.  Otherwise, we reclaim items in
948 		 * excess of the zone's estimated working set size.  If the
949 		 * difference nitems - imin is larger than the WSS estimate,
950 		 * then the estimate will grow at the end of this interval and
951 		 * we ignore the historical average.
952 		 */
953 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
954 		    zdom->uzd_imin);
955 		while (zdom->uzd_nitems > target) {
956 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
957 			if (bucket == NULL)
958 				break;
959 			tofree = bucket->ub_cnt;
960 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
961 			zdom->uzd_nitems -= tofree;
962 
963 			/*
964 			 * Shift the bounds of the current WSS interval to avoid
965 			 * perturbing the estimate.
966 			 */
967 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
968 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
969 
970 			ZONE_UNLOCK(zone);
971 			bucket_drain(zone, bucket);
972 			bucket_free(zone, bucket, NULL);
973 			ZONE_LOCK(zone);
974 		}
975 	}
976 
977 	/*
978 	 * Shrink the zone bucket size to ensure that the per-CPU caches
979 	 * don't grow too large.
980 	 */
981 	if (zone->uz_bucket_size > zone->uz_bucket_size_min)
982 		zone->uz_bucket_size--;
983 }
984 
985 static void
986 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
987 {
988 	uint8_t *mem;
989 	int i;
990 	uint8_t flags;
991 
992 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
993 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
994 
995 	mem = slab->us_data;
996 	flags = slab->us_flags;
997 	i = start;
998 	if (keg->uk_fini != NULL) {
999 		for (i--; i > -1; i--)
1000 #ifdef INVARIANTS
1001 		/*
1002 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1003 		 * would check that memory hasn't been modified since free,
1004 		 * which executed trash_dtor.
1005 		 * That's why we need to run uma_dbg_kskip() check here,
1006 		 * albeit we don't make skip check for other init/fini
1007 		 * invocations.
1008 		 */
1009 		if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
1010 		    keg->uk_fini != trash_fini)
1011 #endif
1012 			keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
1013 			    keg->uk_size);
1014 	}
1015 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1016 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1017 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1018 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1019 }
1020 
1021 /*
1022  * Frees pages from a keg back to the system.  This is done on demand from
1023  * the pageout daemon.
1024  *
1025  * Returns nothing.
1026  */
1027 static void
1028 keg_drain(uma_keg_t keg)
1029 {
1030 	struct slabhead freeslabs = { 0 };
1031 	uma_domain_t dom;
1032 	uma_slab_t slab, tmp;
1033 	int i;
1034 
1035 	/*
1036 	 * We don't want to take pages from statically allocated kegs at this
1037 	 * time
1038 	 */
1039 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1040 		return;
1041 
1042 	CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
1043 	    keg->uk_name, keg, keg->uk_free);
1044 	KEG_LOCK(keg);
1045 	if (keg->uk_free == 0)
1046 		goto finished;
1047 
1048 	for (i = 0; i < vm_ndomains; i++) {
1049 		dom = &keg->uk_domain[i];
1050 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1051 			/* We have nowhere to free these to. */
1052 			if (slab->us_flags & UMA_SLAB_BOOT)
1053 				continue;
1054 
1055 			LIST_REMOVE(slab, us_link);
1056 			keg->uk_pages -= keg->uk_ppera;
1057 			keg->uk_free -= keg->uk_ipers;
1058 
1059 			if (keg->uk_flags & UMA_ZONE_HASH)
1060 				UMA_HASH_REMOVE(&keg->uk_hash, slab,
1061 				    slab->us_data);
1062 
1063 			SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
1064 		}
1065 	}
1066 
1067 finished:
1068 	KEG_UNLOCK(keg);
1069 
1070 	while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
1071 		SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
1072 		keg_free_slab(keg, slab, keg->uk_ipers);
1073 	}
1074 }
1075 
1076 static void
1077 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1078 {
1079 
1080 	/*
1081 	 * Set draining to interlock with zone_dtor() so we can release our
1082 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1083 	 * is the only call that knows the structure will still be available
1084 	 * when it wakes up.
1085 	 */
1086 	ZONE_LOCK(zone);
1087 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1088 		if (waitok == M_NOWAIT)
1089 			goto out;
1090 		msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
1091 	}
1092 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1093 	bucket_cache_reclaim(zone, drain);
1094 	ZONE_UNLOCK(zone);
1095 
1096 	/*
1097 	 * The DRAINING flag protects us from being freed while
1098 	 * we're running.  Normally the uma_rwlock would protect us but we
1099 	 * must be able to release and acquire the right lock for each keg.
1100 	 */
1101 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1102 		keg_drain(zone->uz_keg);
1103 	ZONE_LOCK(zone);
1104 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1105 	wakeup(zone);
1106 out:
1107 	ZONE_UNLOCK(zone);
1108 }
1109 
1110 static void
1111 zone_drain(uma_zone_t zone, void *unused)
1112 {
1113 
1114 	zone_reclaim(zone, M_NOWAIT, true);
1115 }
1116 
1117 static void
1118 zone_trim(uma_zone_t zone, void *unused)
1119 {
1120 
1121 	zone_reclaim(zone, M_NOWAIT, false);
1122 }
1123 
1124 /*
1125  * Allocate a new slab for a keg.  This does not insert the slab onto a list.
1126  * If the allocation was successful, the keg lock will be held upon return,
1127  * otherwise the keg will be left unlocked.
1128  *
1129  * Arguments:
1130  *	flags   Wait flags for the item initialization routine
1131  *	aflags  Wait flags for the slab allocation
1132  *
1133  * Returns:
1134  *	The slab that was allocated or NULL if there is no memory and the
1135  *	caller specified M_NOWAIT.
1136  */
1137 static uma_slab_t
1138 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1139     int aflags)
1140 {
1141 	uma_alloc allocf;
1142 	uma_slab_t slab;
1143 	unsigned long size;
1144 	uint8_t *mem;
1145 	uint8_t sflags;
1146 	int i;
1147 
1148 	KASSERT(domain >= 0 && domain < vm_ndomains,
1149 	    ("keg_alloc_slab: domain %d out of range", domain));
1150 	KEG_LOCK_ASSERT(keg);
1151 	MPASS(zone->uz_lockptr == &keg->uk_lock);
1152 
1153 	allocf = keg->uk_allocf;
1154 	KEG_UNLOCK(keg);
1155 
1156 	slab = NULL;
1157 	mem = NULL;
1158 	if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1159 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1160 		if (slab == NULL)
1161 			goto out;
1162 	}
1163 
1164 	/*
1165 	 * This reproduces the old vm_zone behavior of zero filling pages the
1166 	 * first time they are added to a zone.
1167 	 *
1168 	 * Malloced items are zeroed in uma_zalloc.
1169 	 */
1170 
1171 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1172 		aflags |= M_ZERO;
1173 	else
1174 		aflags &= ~M_ZERO;
1175 
1176 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1177 		aflags |= M_NODUMP;
1178 
1179 	/* zone is passed for legacy reasons. */
1180 	size = keg->uk_ppera * PAGE_SIZE;
1181 	mem = allocf(zone, size, domain, &sflags, aflags);
1182 	if (mem == NULL) {
1183 		if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1184 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1185 		slab = NULL;
1186 		goto out;
1187 	}
1188 	uma_total_inc(size);
1189 
1190 	/* Point the slab into the allocated memory */
1191 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1192 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1193 
1194 	if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1195 		for (i = 0; i < keg->uk_ppera; i++)
1196 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1197 			    zone, slab);
1198 
1199 	slab->us_data = mem;
1200 	slab->us_freecount = keg->uk_ipers;
1201 	slab->us_flags = sflags;
1202 	slab->us_domain = domain;
1203 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1204 #ifdef INVARIANTS
1205 	BIT_ZERO(SLAB_MAX_SETSIZE, &slab->us_debugfree);
1206 #endif
1207 
1208 	if (keg->uk_init != NULL) {
1209 		for (i = 0; i < keg->uk_ipers; i++)
1210 			if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1211 			    keg->uk_size, flags) != 0)
1212 				break;
1213 		if (i != keg->uk_ipers) {
1214 			keg_free_slab(keg, slab, i);
1215 			slab = NULL;
1216 			goto out;
1217 		}
1218 	}
1219 	KEG_LOCK(keg);
1220 
1221 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1222 	    slab, keg->uk_name, keg);
1223 
1224 	if (keg->uk_flags & UMA_ZONE_HASH)
1225 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1226 
1227 	keg->uk_pages += keg->uk_ppera;
1228 	keg->uk_free += keg->uk_ipers;
1229 
1230 out:
1231 	return (slab);
1232 }
1233 
1234 /*
1235  * This function is intended to be used early on in place of page_alloc() so
1236  * that we may use the boot time page cache to satisfy allocations before
1237  * the VM is ready.
1238  */
1239 static void *
1240 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1241     int wait)
1242 {
1243 	uma_keg_t keg;
1244 	void *mem;
1245 	int pages;
1246 
1247 	keg = zone->uz_keg;
1248 	/*
1249 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1250 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1251 	 */
1252 	switch (booted) {
1253 		case BOOT_COLD:
1254 		case BOOT_STRAPPED:
1255 			break;
1256 		case BOOT_PAGEALLOC:
1257 			if (keg->uk_ppera > 1)
1258 				break;
1259 		case BOOT_BUCKETS:
1260 		case BOOT_RUNNING:
1261 #ifdef UMA_MD_SMALL_ALLOC
1262 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1263 			    page_alloc : uma_small_alloc;
1264 #else
1265 			keg->uk_allocf = page_alloc;
1266 #endif
1267 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1268 	}
1269 
1270 	/*
1271 	 * Check our small startup cache to see if it has pages remaining.
1272 	 */
1273 	pages = howmany(bytes, PAGE_SIZE);
1274 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1275 	if (pages > boot_pages)
1276 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1277 #ifdef DIAGNOSTIC
1278 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1279 	    boot_pages);
1280 #endif
1281 	mem = bootmem;
1282 	boot_pages -= pages;
1283 	bootmem += pages * PAGE_SIZE;
1284 	*pflag = UMA_SLAB_BOOT;
1285 
1286 	return (mem);
1287 }
1288 
1289 /*
1290  * Allocates a number of pages from the system
1291  *
1292  * Arguments:
1293  *	bytes  The number of bytes requested
1294  *	wait  Shall we wait?
1295  *
1296  * Returns:
1297  *	A pointer to the alloced memory or possibly
1298  *	NULL if M_NOWAIT is set.
1299  */
1300 static void *
1301 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1302     int wait)
1303 {
1304 	void *p;	/* Returned page */
1305 
1306 	*pflag = UMA_SLAB_KERNEL;
1307 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1308 
1309 	return (p);
1310 }
1311 
1312 static void *
1313 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1314     int wait)
1315 {
1316 	struct pglist alloctail;
1317 	vm_offset_t addr, zkva;
1318 	int cpu, flags;
1319 	vm_page_t p, p_next;
1320 #ifdef NUMA
1321 	struct pcpu *pc;
1322 #endif
1323 
1324 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1325 
1326 	TAILQ_INIT(&alloctail);
1327 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1328 	    malloc2vm_flags(wait);
1329 	*pflag = UMA_SLAB_KERNEL;
1330 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1331 		if (CPU_ABSENT(cpu)) {
1332 			p = vm_page_alloc(NULL, 0, flags);
1333 		} else {
1334 #ifndef NUMA
1335 			p = vm_page_alloc(NULL, 0, flags);
1336 #else
1337 			pc = pcpu_find(cpu);
1338 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1339 			if (__predict_false(p == NULL))
1340 				p = vm_page_alloc(NULL, 0, flags);
1341 #endif
1342 		}
1343 		if (__predict_false(p == NULL))
1344 			goto fail;
1345 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1346 	}
1347 	if ((addr = kva_alloc(bytes)) == 0)
1348 		goto fail;
1349 	zkva = addr;
1350 	TAILQ_FOREACH(p, &alloctail, listq) {
1351 		pmap_qenter(zkva, &p, 1);
1352 		zkva += PAGE_SIZE;
1353 	}
1354 	return ((void*)addr);
1355 fail:
1356 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1357 		vm_page_unwire_noq(p);
1358 		vm_page_free(p);
1359 	}
1360 	return (NULL);
1361 }
1362 
1363 /*
1364  * Allocates a number of pages from within an object
1365  *
1366  * Arguments:
1367  *	bytes  The number of bytes requested
1368  *	wait   Shall we wait?
1369  *
1370  * Returns:
1371  *	A pointer to the alloced memory or possibly
1372  *	NULL if M_NOWAIT is set.
1373  */
1374 static void *
1375 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1376     int wait)
1377 {
1378 	TAILQ_HEAD(, vm_page) alloctail;
1379 	u_long npages;
1380 	vm_offset_t retkva, zkva;
1381 	vm_page_t p, p_next;
1382 	uma_keg_t keg;
1383 
1384 	TAILQ_INIT(&alloctail);
1385 	keg = zone->uz_keg;
1386 
1387 	npages = howmany(bytes, PAGE_SIZE);
1388 	while (npages > 0) {
1389 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1390 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1391 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1392 		    VM_ALLOC_NOWAIT));
1393 		if (p != NULL) {
1394 			/*
1395 			 * Since the page does not belong to an object, its
1396 			 * listq is unused.
1397 			 */
1398 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1399 			npages--;
1400 			continue;
1401 		}
1402 		/*
1403 		 * Page allocation failed, free intermediate pages and
1404 		 * exit.
1405 		 */
1406 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1407 			vm_page_unwire_noq(p);
1408 			vm_page_free(p);
1409 		}
1410 		return (NULL);
1411 	}
1412 	*flags = UMA_SLAB_PRIV;
1413 	zkva = keg->uk_kva +
1414 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1415 	retkva = zkva;
1416 	TAILQ_FOREACH(p, &alloctail, listq) {
1417 		pmap_qenter(zkva, &p, 1);
1418 		zkva += PAGE_SIZE;
1419 	}
1420 
1421 	return ((void *)retkva);
1422 }
1423 
1424 /*
1425  * Frees a number of pages to the system
1426  *
1427  * Arguments:
1428  *	mem   A pointer to the memory to be freed
1429  *	size  The size of the memory being freed
1430  *	flags The original p->us_flags field
1431  *
1432  * Returns:
1433  *	Nothing
1434  */
1435 static void
1436 page_free(void *mem, vm_size_t size, uint8_t flags)
1437 {
1438 
1439 	if ((flags & UMA_SLAB_KERNEL) == 0)
1440 		panic("UMA: page_free used with invalid flags %x", flags);
1441 
1442 	kmem_free((vm_offset_t)mem, size);
1443 }
1444 
1445 /*
1446  * Frees pcpu zone allocations
1447  *
1448  * Arguments:
1449  *	mem   A pointer to the memory to be freed
1450  *	size  The size of the memory being freed
1451  *	flags The original p->us_flags field
1452  *
1453  * Returns:
1454  *	Nothing
1455  */
1456 static void
1457 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1458 {
1459 	vm_offset_t sva, curva;
1460 	vm_paddr_t paddr;
1461 	vm_page_t m;
1462 
1463 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1464 	sva = (vm_offset_t)mem;
1465 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1466 		paddr = pmap_kextract(curva);
1467 		m = PHYS_TO_VM_PAGE(paddr);
1468 		vm_page_unwire_noq(m);
1469 		vm_page_free(m);
1470 	}
1471 	pmap_qremove(sva, size >> PAGE_SHIFT);
1472 	kva_free(sva, size);
1473 }
1474 
1475 
1476 /*
1477  * Zero fill initializer
1478  *
1479  * Arguments/Returns follow uma_init specifications
1480  */
1481 static int
1482 zero_init(void *mem, int size, int flags)
1483 {
1484 	bzero(mem, size);
1485 	return (0);
1486 }
1487 
1488 /*
1489  * Actual size of embedded struct slab (!OFFPAGE).
1490  */
1491 size_t
1492 slab_sizeof(int nitems)
1493 {
1494 	size_t s;
1495 
1496 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems);
1497 	return (roundup(s, UMA_ALIGN_PTR + 1));
1498 }
1499 
1500 /*
1501  * Size of memory for embedded slabs (!OFFPAGE).
1502  */
1503 size_t
1504 slab_space(int nitems)
1505 {
1506 	return (UMA_SLAB_SIZE - slab_sizeof(nitems));
1507 }
1508 
1509 /*
1510  * Compute the number of items that will fit in an embedded (!OFFPAGE) slab
1511  * with a given size and alignment.
1512  */
1513 int
1514 slab_ipers(size_t size, int align)
1515 {
1516 	int rsize;
1517 	int nitems;
1518 
1519         /*
1520          * Compute the ideal number of items that will fit in a page and
1521          * then compute the actual number based on a bitset nitems wide.
1522          */
1523 	rsize = roundup(size, align + 1);
1524         nitems = UMA_SLAB_SIZE / rsize;
1525 	return (slab_space(nitems) / rsize);
1526 }
1527 
1528 /*
1529  * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1530  *
1531  * Arguments
1532  *	keg  The zone we should initialize
1533  *
1534  * Returns
1535  *	Nothing
1536  */
1537 static void
1538 keg_small_init(uma_keg_t keg)
1539 {
1540 	u_int rsize;
1541 	u_int memused;
1542 	u_int wastedspace;
1543 	u_int shsize;
1544 	u_int slabsize;
1545 
1546 	if (keg->uk_flags & UMA_ZONE_PCPU) {
1547 		u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1548 
1549 		slabsize = UMA_PCPU_ALLOC_SIZE;
1550 		keg->uk_ppera = ncpus;
1551 	} else {
1552 		slabsize = UMA_SLAB_SIZE;
1553 		keg->uk_ppera = 1;
1554 	}
1555 
1556 	/*
1557 	 * Calculate the size of each allocation (rsize) according to
1558 	 * alignment.  If the requested size is smaller than we have
1559 	 * allocation bits for we round it up.
1560 	 */
1561 	rsize = keg->uk_size;
1562 	if (rsize < slabsize / SLAB_MAX_SETSIZE)
1563 		rsize = slabsize / SLAB_MAX_SETSIZE;
1564 	if (rsize & keg->uk_align)
1565 		rsize = roundup(rsize, keg->uk_align + 1);
1566 	keg->uk_rsize = rsize;
1567 
1568 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1569 	    keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
1570 	    ("%s: size %u too large", __func__, keg->uk_rsize));
1571 
1572 	/*
1573 	 * Use a pessimistic bit count for shsize.  It may be possible to
1574 	 * squeeze one more item in for very particular sizes if we were
1575 	 * to loop and reduce the bitsize if there is waste.
1576 	 */
1577 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1578 		shsize = 0;
1579 	else
1580 		shsize = slab_sizeof(slabsize / rsize);
1581 
1582 	if (rsize <= slabsize - shsize)
1583 		keg->uk_ipers = (slabsize - shsize) / rsize;
1584 	else {
1585 		/* Handle special case when we have 1 item per slab, so
1586 		 * alignment requirement can be relaxed. */
1587 		KASSERT(keg->uk_size <= slabsize - shsize,
1588 		    ("%s: size %u greater than slab", __func__, keg->uk_size));
1589 		keg->uk_ipers = 1;
1590 	}
1591 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1592 	    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1593 
1594 	memused = keg->uk_ipers * rsize + shsize;
1595 	wastedspace = slabsize - memused;
1596 
1597 	/*
1598 	 * We can't do OFFPAGE if we're internal or if we've been
1599 	 * asked to not go to the VM for buckets.  If we do this we
1600 	 * may end up going to the VM  for slabs which we do not
1601 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1602 	 * of UMA_ZONE_VM, which clearly forbids it.
1603 	 */
1604 	if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1605 	    (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1606 		return;
1607 
1608 	/*
1609 	 * See if using an OFFPAGE slab will limit our waste.  Only do
1610 	 * this if it permits more items per-slab.
1611 	 *
1612 	 * XXX We could try growing slabsize to limit max waste as well.
1613 	 * Historically this was not done because the VM could not
1614 	 * efficiently handle contiguous allocations.
1615 	 */
1616 	if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1617 	    (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1618 		keg->uk_ipers = slabsize / keg->uk_rsize;
1619 		KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1620 		    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1621 		CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
1622 		    "keg: %s(%p), calculated wastedspace = %d, "
1623 		    "maximum wasted space allowed = %d, "
1624 		    "calculated ipers = %d, "
1625 		    "new wasted space = %d\n", keg->uk_name, keg, wastedspace,
1626 		    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1627 		    slabsize - keg->uk_ipers * keg->uk_rsize);
1628 		/*
1629 		 * If we had access to memory to embed a slab header we
1630 		 * also have a page structure to use vtoslab() instead of
1631 		 * hash to find slabs.  If the zone was explicitly created
1632 		 * OFFPAGE we can't necessarily touch the memory.
1633 		 */
1634 		if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
1635 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1636 	}
1637 
1638 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1639 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1640 		keg->uk_flags |= UMA_ZONE_HASH;
1641 }
1642 
1643 /*
1644  * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1645  * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1646  * more complicated.
1647  *
1648  * Arguments
1649  *	keg  The keg we should initialize
1650  *
1651  * Returns
1652  *	Nothing
1653  */
1654 static void
1655 keg_large_init(uma_keg_t keg)
1656 {
1657 
1658 	KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1659 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1660 	    ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
1661 
1662 	keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
1663 	keg->uk_ipers = 1;
1664 	keg->uk_rsize = keg->uk_size;
1665 
1666 	/* Check whether we have enough space to not do OFFPAGE. */
1667 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
1668 	    PAGE_SIZE * keg->uk_ppera - keg->uk_rsize <
1669 	    slab_sizeof(SLAB_MIN_SETSIZE)) {
1670 		/*
1671 		 * We can't do OFFPAGE if we're internal, in which case
1672 		 * we need an extra page per allocation to contain the
1673 		 * slab header.
1674 		 */
1675 		if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
1676 			keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1677 		else
1678 			keg->uk_ppera++;
1679 	}
1680 
1681 	if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1682 	    (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1683 		keg->uk_flags |= UMA_ZONE_HASH;
1684 }
1685 
1686 static void
1687 keg_cachespread_init(uma_keg_t keg)
1688 {
1689 	int alignsize;
1690 	int trailer;
1691 	int pages;
1692 	int rsize;
1693 
1694 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1695 	    ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1696 
1697 	alignsize = keg->uk_align + 1;
1698 	rsize = keg->uk_size;
1699 	/*
1700 	 * We want one item to start on every align boundary in a page.  To
1701 	 * do this we will span pages.  We will also extend the item by the
1702 	 * size of align if it is an even multiple of align.  Otherwise, it
1703 	 * would fall on the same boundary every time.
1704 	 */
1705 	if (rsize & keg->uk_align)
1706 		rsize = (rsize & ~keg->uk_align) + alignsize;
1707 	if ((rsize & alignsize) == 0)
1708 		rsize += alignsize;
1709 	trailer = rsize - keg->uk_size;
1710 	pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1711 	pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1712 	keg->uk_rsize = rsize;
1713 	keg->uk_ppera = pages;
1714 	keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
1715 	keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
1716 	KASSERT(keg->uk_ipers <= SLAB_MAX_SETSIZE,
1717 	    ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
1718 	    keg->uk_ipers));
1719 }
1720 
1721 /*
1722  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1723  * the keg onto the global keg list.
1724  *
1725  * Arguments/Returns follow uma_ctor specifications
1726  *	udata  Actually uma_kctor_args
1727  */
1728 static int
1729 keg_ctor(void *mem, int size, void *udata, int flags)
1730 {
1731 	struct uma_kctor_args *arg = udata;
1732 	uma_keg_t keg = mem;
1733 	uma_zone_t zone;
1734 
1735 	bzero(keg, size);
1736 	keg->uk_size = arg->size;
1737 	keg->uk_init = arg->uminit;
1738 	keg->uk_fini = arg->fini;
1739 	keg->uk_align = arg->align;
1740 	keg->uk_free = 0;
1741 	keg->uk_reserve = 0;
1742 	keg->uk_pages = 0;
1743 	keg->uk_flags = arg->flags;
1744 	keg->uk_slabzone = NULL;
1745 
1746 	/*
1747 	 * We use a global round-robin policy by default.  Zones with
1748 	 * UMA_ZONE_NUMA set will use first-touch instead, in which case the
1749 	 * iterator is never run.
1750 	 */
1751 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1752 	keg->uk_dr.dr_iter = 0;
1753 
1754 	/*
1755 	 * The master zone is passed to us at keg-creation time.
1756 	 */
1757 	zone = arg->zone;
1758 	keg->uk_name = zone->uz_name;
1759 
1760 	if (arg->flags & UMA_ZONE_VM)
1761 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1762 
1763 	if (arg->flags & UMA_ZONE_ZINIT)
1764 		keg->uk_init = zero_init;
1765 
1766 	if (arg->flags & UMA_ZONE_MALLOC)
1767 		keg->uk_flags |= UMA_ZONE_VTOSLAB;
1768 
1769 	if (arg->flags & UMA_ZONE_PCPU)
1770 #ifdef SMP
1771 		keg->uk_flags |= UMA_ZONE_OFFPAGE;
1772 #else
1773 		keg->uk_flags &= ~UMA_ZONE_PCPU;
1774 #endif
1775 
1776 	if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1777 		keg_cachespread_init(keg);
1778 	} else {
1779 		if (keg->uk_size > slab_space(SLAB_MIN_SETSIZE))
1780 			keg_large_init(keg);
1781 		else
1782 			keg_small_init(keg);
1783 	}
1784 
1785 	if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1786 		keg->uk_slabzone = slabzone;
1787 
1788 	/*
1789 	 * If we haven't booted yet we need allocations to go through the
1790 	 * startup cache until the vm is ready.
1791 	 */
1792 	if (booted < BOOT_PAGEALLOC)
1793 		keg->uk_allocf = startup_alloc;
1794 #ifdef UMA_MD_SMALL_ALLOC
1795 	else if (keg->uk_ppera == 1)
1796 		keg->uk_allocf = uma_small_alloc;
1797 #endif
1798 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1799 		keg->uk_allocf = pcpu_page_alloc;
1800 	else
1801 		keg->uk_allocf = page_alloc;
1802 #ifdef UMA_MD_SMALL_ALLOC
1803 	if (keg->uk_ppera == 1)
1804 		keg->uk_freef = uma_small_free;
1805 	else
1806 #endif
1807 	if (keg->uk_flags & UMA_ZONE_PCPU)
1808 		keg->uk_freef = pcpu_page_free;
1809 	else
1810 		keg->uk_freef = page_free;
1811 
1812 	/*
1813 	 * Initialize keg's lock
1814 	 */
1815 	KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
1816 
1817 	/*
1818 	 * If we're putting the slab header in the actual page we need to
1819 	 * figure out where in each page it goes.  See slab_sizeof
1820 	 * definition.
1821 	 */
1822 	if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1823 		size_t shsize;
1824 
1825 		shsize = slab_sizeof(keg->uk_ipers);
1826 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
1827 		/*
1828 		 * The only way the following is possible is if with our
1829 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1830 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1831 		 * mathematically possible for all cases, so we make
1832 		 * sure here anyway.
1833 		 */
1834 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
1835 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1836 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1837 	}
1838 
1839 	if (keg->uk_flags & UMA_ZONE_HASH)
1840 		hash_alloc(&keg->uk_hash, 0);
1841 
1842 	CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
1843 	    keg, zone->uz_name, zone,
1844 	    (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1845 	    keg->uk_free);
1846 
1847 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1848 
1849 	rw_wlock(&uma_rwlock);
1850 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
1851 	rw_wunlock(&uma_rwlock);
1852 	return (0);
1853 }
1854 
1855 static void
1856 zone_alloc_counters(uma_zone_t zone, void *unused)
1857 {
1858 
1859 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
1860 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
1861 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
1862 }
1863 
1864 #define	UMA_MAX_DUP	999
1865 static void
1866 zone_alloc_sysctl(uma_zone_t zone, void *unused)
1867 {
1868 	uma_zone_domain_t zdom;
1869 	uma_keg_t keg;
1870 	struct sysctl_oid *oid, *domainoid;
1871 	int domains, i;
1872 	static const char *nokeg = "cache zone";
1873 	char *c;
1874 
1875 	/*
1876 	 * Make a sysctl safe copy of the zone name by removing
1877 	 * any special characters and handling dups by appending
1878 	 * an index.
1879 	 */
1880 	if (zone->uz_namecnt != 0) {
1881 		if (zone->uz_namecnt > UMA_MAX_DUP)
1882 			zone->uz_namecnt = UMA_MAX_DUP;
1883 		zone->uz_ctlname = malloc(strlen(zone->uz_name) +
1884 		    sizeof(__XSTRING(UMA_MAX_DUP)) + 1 , M_UMA, M_WAITOK);
1885 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
1886 		    zone->uz_namecnt);
1887 	} else
1888 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
1889 	for (c = zone->uz_ctlname; *c != '\0'; c++)
1890 		if (strchr("./\\ -", *c) != NULL)
1891 			*c = '_';
1892 
1893 	/*
1894 	 * Basic parameters at the root.
1895 	 */
1896 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
1897 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
1898 	oid = zone->uz_oid;
1899 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1900 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
1901 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1902 	    "flags", CTLFLAG_RD, &zone->uz_flags, 0,
1903 	    "Allocator configuration flags");
1904 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1905 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
1906 	    "Desired per-cpu cache size");
1907 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1908 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
1909 	    "Maximum allowed per-cpu cache size");
1910 
1911 	/*
1912 	 * keg if present.
1913 	 */
1914 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1915 	    "keg", CTLFLAG_RD, NULL, "");
1916 	keg = zone->uz_keg;
1917 	if ((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0) {
1918 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1919 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
1920 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1921 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
1922 		    "Real object size with alignment");
1923 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1924 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
1925 		    "pages per-slab allocation");
1926 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1927 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
1928 		    "items available per-slab");
1929 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1930 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
1931 		    "item alignment mask");
1932 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1933 		    "pages", CTLFLAG_RD, &keg->uk_pages, 0,
1934 		    "Total pages currently allocated from VM");
1935 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1936 		    "free", CTLFLAG_RD, &keg->uk_free, 0,
1937 		    "items free in the slab layer");
1938 	} else
1939 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1940 		    "name", CTLFLAG_RD, nokeg, "Keg name");
1941 
1942 	/*
1943 	 * Information about zone limits.
1944 	 */
1945 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1946 	    "limit", CTLFLAG_RD, NULL, "");
1947 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1948 	    "items", CTLFLAG_RD, &zone->uz_items, 0,
1949 	    "current number of cached items");
1950 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1951 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
1952 	    "Maximum number of cached items");
1953 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1954 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
1955 	    "Number of threads sleeping at limit");
1956 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1957 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
1958 	    "Total zone limit sleeps");
1959 
1960 	/*
1961 	 * Per-domain information.
1962 	 */
1963 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
1964 		domains = vm_ndomains;
1965 	else
1966 		domains = 1;
1967 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
1968 	    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
1969 	for (i = 0; i < domains; i++) {
1970 		zdom = &zone->uz_domain[i];
1971 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
1972 		    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
1973 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1974 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
1975 		    "number of items in this domain");
1976 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1977 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
1978 		    "maximum item count in this period");
1979 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1980 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
1981 		    "minimum item count in this period");
1982 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1983 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
1984 		    "Working set size");
1985 	}
1986 
1987 	/*
1988 	 * General statistics.
1989 	 */
1990 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
1991 	    "stats", CTLFLAG_RD, NULL, "");
1992 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1993 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
1994 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
1995 	    "Current number of allocated items");
1996 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
1997 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
1998 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
1999 	    "Total allocation calls");
2000 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2001 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2002 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2003 	    "Total free calls");
2004 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2005 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2006 	    "Number of allocation failures");
2007 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2008 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
2009 	    "Free calls from the wrong domain");
2010 }
2011 
2012 struct uma_zone_count {
2013 	const char	*name;
2014 	int		count;
2015 };
2016 
2017 static void
2018 zone_count(uma_zone_t zone, void *arg)
2019 {
2020 	struct uma_zone_count *cnt;
2021 
2022 	cnt = arg;
2023 	if (strcmp(zone->uz_name, cnt->name) == 0)
2024 		cnt->count++;
2025 }
2026 
2027 /*
2028  * Zone header ctor.  This initializes all fields, locks, etc.
2029  *
2030  * Arguments/Returns follow uma_ctor specifications
2031  *	udata  Actually uma_zctor_args
2032  */
2033 static int
2034 zone_ctor(void *mem, int size, void *udata, int flags)
2035 {
2036 	struct uma_zone_count cnt;
2037 	struct uma_zctor_args *arg = udata;
2038 	uma_zone_t zone = mem;
2039 	uma_zone_t z;
2040 	uma_keg_t keg;
2041 	int i;
2042 
2043 	bzero(zone, size);
2044 	zone->uz_name = arg->name;
2045 	zone->uz_ctor = arg->ctor;
2046 	zone->uz_dtor = arg->dtor;
2047 	zone->uz_init = NULL;
2048 	zone->uz_fini = NULL;
2049 	zone->uz_sleeps = 0;
2050 	zone->uz_xdomain = 0;
2051 	zone->uz_bucket_size = 0;
2052 	zone->uz_bucket_size_min = 0;
2053 	zone->uz_bucket_size_max = BUCKET_MAX;
2054 	zone->uz_flags = 0;
2055 	zone->uz_warning = NULL;
2056 	/* The domain structures follow the cpu structures. */
2057 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
2058 	zone->uz_bkt_max = ULONG_MAX;
2059 	timevalclear(&zone->uz_ratecheck);
2060 
2061 	/* Count the number of duplicate names. */
2062 	cnt.name = arg->name;
2063 	cnt.count = 0;
2064 	zone_foreach(zone_count, &cnt);
2065 	zone->uz_namecnt = cnt.count;
2066 
2067 	for (i = 0; i < vm_ndomains; i++)
2068 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
2069 
2070 #ifdef INVARIANTS
2071 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2072 		zone->uz_flags |= UMA_ZFLAG_TRASH;
2073 #endif
2074 
2075 	/*
2076 	 * This is a pure cache zone, no kegs.
2077 	 */
2078 	if (arg->import) {
2079 		if (arg->flags & UMA_ZONE_VM)
2080 			arg->flags |= UMA_ZFLAG_CACHEONLY;
2081 		zone->uz_flags = arg->flags;
2082 		zone->uz_size = arg->size;
2083 		zone->uz_import = arg->import;
2084 		zone->uz_release = arg->release;
2085 		zone->uz_arg = arg->arg;
2086 		zone->uz_lockptr = &zone->uz_lock;
2087 		ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
2088 		rw_wlock(&uma_rwlock);
2089 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2090 		rw_wunlock(&uma_rwlock);
2091 		goto out;
2092 	}
2093 
2094 	/*
2095 	 * Use the regular zone/keg/slab allocator.
2096 	 */
2097 	zone->uz_import = zone_import;
2098 	zone->uz_release = zone_release;
2099 	zone->uz_arg = zone;
2100 	keg = arg->keg;
2101 
2102 	if (arg->flags & UMA_ZONE_SECONDARY) {
2103 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2104 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2105 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2106 		zone->uz_init = arg->uminit;
2107 		zone->uz_fini = arg->fini;
2108 		zone->uz_lockptr = &keg->uk_lock;
2109 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2110 		rw_wlock(&uma_rwlock);
2111 		ZONE_LOCK(zone);
2112 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2113 			if (LIST_NEXT(z, uz_link) == NULL) {
2114 				LIST_INSERT_AFTER(z, zone, uz_link);
2115 				break;
2116 			}
2117 		}
2118 		ZONE_UNLOCK(zone);
2119 		rw_wunlock(&uma_rwlock);
2120 	} else if (keg == NULL) {
2121 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2122 		    arg->align, arg->flags)) == NULL)
2123 			return (ENOMEM);
2124 	} else {
2125 		struct uma_kctor_args karg;
2126 		int error;
2127 
2128 		/* We should only be here from uma_startup() */
2129 		karg.size = arg->size;
2130 		karg.uminit = arg->uminit;
2131 		karg.fini = arg->fini;
2132 		karg.align = arg->align;
2133 		karg.flags = arg->flags;
2134 		karg.zone = zone;
2135 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2136 		    flags);
2137 		if (error)
2138 			return (error);
2139 	}
2140 
2141 	/* Inherit properties from the keg. */
2142 	zone->uz_keg = keg;
2143 	zone->uz_size = keg->uk_size;
2144 	zone->uz_flags |= (keg->uk_flags &
2145 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2146 
2147 out:
2148 	if (__predict_true(booted == BOOT_RUNNING)) {
2149 		zone_alloc_counters(zone, NULL);
2150 		zone_alloc_sysctl(zone, NULL);
2151 	} else {
2152 		zone->uz_allocs = EARLY_COUNTER;
2153 		zone->uz_frees = EARLY_COUNTER;
2154 		zone->uz_fails = EARLY_COUNTER;
2155 	}
2156 
2157 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2158 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2159 	    ("Invalid zone flag combination"));
2160 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2161 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2162 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2163 		zone->uz_bucket_size = BUCKET_MAX;
2164 	else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
2165 		zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
2166 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2167 		zone->uz_bucket_size = 0;
2168 	else
2169 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2170 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2171 
2172 	return (0);
2173 }
2174 
2175 /*
2176  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2177  * table and removes the keg from the global list.
2178  *
2179  * Arguments/Returns follow uma_dtor specifications
2180  *	udata  unused
2181  */
2182 static void
2183 keg_dtor(void *arg, int size, void *udata)
2184 {
2185 	uma_keg_t keg;
2186 
2187 	keg = (uma_keg_t)arg;
2188 	KEG_LOCK(keg);
2189 	if (keg->uk_free != 0) {
2190 		printf("Freed UMA keg (%s) was not empty (%d items). "
2191 		    " Lost %d pages of memory.\n",
2192 		    keg->uk_name ? keg->uk_name : "",
2193 		    keg->uk_free, keg->uk_pages);
2194 	}
2195 	KEG_UNLOCK(keg);
2196 
2197 	hash_free(&keg->uk_hash);
2198 
2199 	KEG_LOCK_FINI(keg);
2200 }
2201 
2202 /*
2203  * Zone header dtor.
2204  *
2205  * Arguments/Returns follow uma_dtor specifications
2206  *	udata  unused
2207  */
2208 static void
2209 zone_dtor(void *arg, int size, void *udata)
2210 {
2211 	uma_zone_t zone;
2212 	uma_keg_t keg;
2213 
2214 	zone = (uma_zone_t)arg;
2215 
2216 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2217 
2218 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2219 		cache_drain(zone);
2220 
2221 	rw_wlock(&uma_rwlock);
2222 	LIST_REMOVE(zone, uz_link);
2223 	rw_wunlock(&uma_rwlock);
2224 	/*
2225 	 * XXX there are some races here where
2226 	 * the zone can be drained but zone lock
2227 	 * released and then refilled before we
2228 	 * remove it... we dont care for now
2229 	 */
2230 	zone_reclaim(zone, M_WAITOK, true);
2231 	/*
2232 	 * We only destroy kegs from non secondary/non cache zones.
2233 	 */
2234 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2235 		keg = zone->uz_keg;
2236 		rw_wlock(&uma_rwlock);
2237 		LIST_REMOVE(keg, uk_link);
2238 		rw_wunlock(&uma_rwlock);
2239 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2240 	}
2241 	counter_u64_free(zone->uz_allocs);
2242 	counter_u64_free(zone->uz_frees);
2243 	counter_u64_free(zone->uz_fails);
2244 	free(zone->uz_ctlname, M_UMA);
2245 	if (zone->uz_lockptr == &zone->uz_lock)
2246 		ZONE_LOCK_FINI(zone);
2247 }
2248 
2249 /*
2250  * Traverses every zone in the system and calls a callback
2251  *
2252  * Arguments:
2253  *	zfunc  A pointer to a function which accepts a zone
2254  *		as an argument.
2255  *
2256  * Returns:
2257  *	Nothing
2258  */
2259 static void
2260 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2261 {
2262 	uma_keg_t keg;
2263 	uma_zone_t zone;
2264 
2265 	/*
2266 	 * Before BOOT_RUNNING we are guaranteed to be single
2267 	 * threaded, so locking isn't needed. Startup functions
2268 	 * are allowed to use M_WAITOK.
2269 	 */
2270 	if (__predict_true(booted == BOOT_RUNNING))
2271 		rw_rlock(&uma_rwlock);
2272 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2273 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2274 			zfunc(zone, arg);
2275 	}
2276 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2277 		zfunc(zone, arg);
2278 	if (__predict_true(booted == BOOT_RUNNING))
2279 		rw_runlock(&uma_rwlock);
2280 }
2281 
2282 /*
2283  * Count how many pages do we need to bootstrap.  VM supplies
2284  * its need in early zones in the argument, we add up our zones,
2285  * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
2286  * zone of zones and zone of kegs are accounted separately.
2287  */
2288 #define	UMA_BOOT_ZONES	11
2289 /* Zone of zones and zone of kegs have arbitrary alignment. */
2290 #define	UMA_BOOT_ALIGN	32
2291 static int zsize, ksize;
2292 int
2293 uma_startup_count(int vm_zones)
2294 {
2295 	int zones, pages;
2296 	size_t space, size;
2297 
2298 	ksize = sizeof(struct uma_keg) +
2299 	    (sizeof(struct uma_domain) * vm_ndomains);
2300 	zsize = sizeof(struct uma_zone) +
2301 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2302 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2303 
2304 	/*
2305 	 * Memory for the zone of kegs and its keg,
2306 	 * and for zone of zones.
2307 	 */
2308 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
2309 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
2310 
2311 #ifdef	UMA_MD_SMALL_ALLOC
2312 	zones = UMA_BOOT_ZONES;
2313 #else
2314 	zones = UMA_BOOT_ZONES + vm_zones;
2315 	vm_zones = 0;
2316 #endif
2317 	size = slab_sizeof(SLAB_MAX_SETSIZE);
2318 	space = slab_space(SLAB_MAX_SETSIZE);
2319 
2320 	/* Memory for the rest of startup zones, UMA and VM, ... */
2321 	if (zsize > space) {
2322 		/* See keg_large_init(). */
2323 		u_int ppera;
2324 
2325 		ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
2326 		if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) < size)
2327 			ppera++;
2328 		pages += (zones + vm_zones) * ppera;
2329 	} else if (roundup2(zsize, UMA_BOOT_ALIGN) > space)
2330 		/* See keg_small_init() special case for uk_ppera = 1. */
2331 		pages += zones;
2332 	else
2333 		pages += howmany(zones,
2334 		    space / roundup2(zsize, UMA_BOOT_ALIGN));
2335 
2336 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2337 	pages += howmany(zones + 1,
2338 	    space / roundup2(ksize, UMA_BOOT_ALIGN));
2339 
2340 	return (pages);
2341 }
2342 
2343 void
2344 uma_startup(void *mem, int npages)
2345 {
2346 	struct uma_zctor_args args;
2347 	uma_keg_t masterkeg;
2348 	uintptr_t m;
2349 
2350 #ifdef DIAGNOSTIC
2351 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2352 #endif
2353 
2354 	rw_init(&uma_rwlock, "UMA lock");
2355 
2356 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2357 	m = (uintptr_t)mem;
2358 	zones = (uma_zone_t)m;
2359 	m += roundup(zsize, CACHE_LINE_SIZE);
2360 	kegs = (uma_zone_t)m;
2361 	m += roundup(zsize, CACHE_LINE_SIZE);
2362 	masterkeg = (uma_keg_t)m;
2363 	m += roundup(ksize, CACHE_LINE_SIZE);
2364 	m = roundup(m, PAGE_SIZE);
2365 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2366 	mem = (void *)m;
2367 
2368 	/* "manually" create the initial zone */
2369 	memset(&args, 0, sizeof(args));
2370 	args.name = "UMA Kegs";
2371 	args.size = ksize;
2372 	args.ctor = keg_ctor;
2373 	args.dtor = keg_dtor;
2374 	args.uminit = zero_init;
2375 	args.fini = NULL;
2376 	args.keg = masterkeg;
2377 	args.align = UMA_BOOT_ALIGN - 1;
2378 	args.flags = UMA_ZFLAG_INTERNAL;
2379 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2380 
2381 	bootmem = mem;
2382 	boot_pages = npages;
2383 
2384 	args.name = "UMA Zones";
2385 	args.size = zsize;
2386 	args.ctor = zone_ctor;
2387 	args.dtor = zone_dtor;
2388 	args.uminit = zero_init;
2389 	args.fini = NULL;
2390 	args.keg = NULL;
2391 	args.align = UMA_BOOT_ALIGN - 1;
2392 	args.flags = UMA_ZFLAG_INTERNAL;
2393 	zone_ctor(zones, zsize, &args, M_WAITOK);
2394 
2395 	/* Now make a zone for slab headers */
2396 	slabzone = uma_zcreate("UMA Slabs",
2397 				slab_sizeof(SLAB_MAX_SETSIZE),
2398 				NULL, NULL, NULL, NULL,
2399 				UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2400 
2401 	hashzone = uma_zcreate("UMA Hash",
2402 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2403 	    NULL, NULL, NULL, NULL,
2404 	    UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2405 
2406 	bucket_init();
2407 
2408 	booted = BOOT_STRAPPED;
2409 }
2410 
2411 void
2412 uma_startup1(void)
2413 {
2414 
2415 #ifdef DIAGNOSTIC
2416 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2417 #endif
2418 	booted = BOOT_PAGEALLOC;
2419 }
2420 
2421 void
2422 uma_startup2(void)
2423 {
2424 
2425 #ifdef DIAGNOSTIC
2426 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2427 #endif
2428 	booted = BOOT_BUCKETS;
2429 	sx_init(&uma_reclaim_lock, "umareclaim");
2430 	bucket_enable();
2431 }
2432 
2433 /*
2434  * Initialize our callout handle
2435  *
2436  */
2437 static void
2438 uma_startup3(void)
2439 {
2440 
2441 #ifdef INVARIANTS
2442 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2443 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2444 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2445 #endif
2446 	zone_foreach(zone_alloc_counters, NULL);
2447 	zone_foreach(zone_alloc_sysctl, NULL);
2448 	callout_init(&uma_callout, 1);
2449 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2450 	booted = BOOT_RUNNING;
2451 }
2452 
2453 static uma_keg_t
2454 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2455 		int align, uint32_t flags)
2456 {
2457 	struct uma_kctor_args args;
2458 
2459 	args.size = size;
2460 	args.uminit = uminit;
2461 	args.fini = fini;
2462 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2463 	args.flags = flags;
2464 	args.zone = zone;
2465 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2466 }
2467 
2468 /* Public functions */
2469 /* See uma.h */
2470 void
2471 uma_set_align(int align)
2472 {
2473 
2474 	if (align != UMA_ALIGN_CACHE)
2475 		uma_align_cache = align;
2476 }
2477 
2478 /* See uma.h */
2479 uma_zone_t
2480 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2481 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2482 
2483 {
2484 	struct uma_zctor_args args;
2485 	uma_zone_t res;
2486 	bool locked;
2487 
2488 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2489 	    align, name));
2490 
2491 	/* Sets all zones to a first-touch domain policy. */
2492 #ifdef UMA_FIRSTTOUCH
2493 	flags |= UMA_ZONE_NUMA;
2494 #endif
2495 
2496 	/* This stuff is essential for the zone ctor */
2497 	memset(&args, 0, sizeof(args));
2498 	args.name = name;
2499 	args.size = size;
2500 	args.ctor = ctor;
2501 	args.dtor = dtor;
2502 	args.uminit = uminit;
2503 	args.fini = fini;
2504 #ifdef  INVARIANTS
2505 	/*
2506 	 * Inject procedures which check for memory use after free if we are
2507 	 * allowed to scramble the memory while it is not allocated.  This
2508 	 * requires that: UMA is actually able to access the memory, no init
2509 	 * or fini procedures, no dependency on the initial value of the
2510 	 * memory, and no (legitimate) use of the memory after free.  Note,
2511 	 * the ctor and dtor do not need to be empty.
2512 	 *
2513 	 * XXX UMA_ZONE_OFFPAGE.
2514 	 */
2515 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
2516 	    uminit == NULL && fini == NULL) {
2517 		args.uminit = trash_init;
2518 		args.fini = trash_fini;
2519 	}
2520 #endif
2521 	args.align = align;
2522 	args.flags = flags;
2523 	args.keg = NULL;
2524 
2525 	if (booted < BOOT_BUCKETS) {
2526 		locked = false;
2527 	} else {
2528 		sx_slock(&uma_reclaim_lock);
2529 		locked = true;
2530 	}
2531 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2532 	if (locked)
2533 		sx_sunlock(&uma_reclaim_lock);
2534 	return (res);
2535 }
2536 
2537 /* See uma.h */
2538 uma_zone_t
2539 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2540 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2541 {
2542 	struct uma_zctor_args args;
2543 	uma_keg_t keg;
2544 	uma_zone_t res;
2545 	bool locked;
2546 
2547 	keg = master->uz_keg;
2548 	memset(&args, 0, sizeof(args));
2549 	args.name = name;
2550 	args.size = keg->uk_size;
2551 	args.ctor = ctor;
2552 	args.dtor = dtor;
2553 	args.uminit = zinit;
2554 	args.fini = zfini;
2555 	args.align = keg->uk_align;
2556 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2557 	args.keg = keg;
2558 
2559 	if (booted < BOOT_BUCKETS) {
2560 		locked = false;
2561 	} else {
2562 		sx_slock(&uma_reclaim_lock);
2563 		locked = true;
2564 	}
2565 	/* XXX Attaches only one keg of potentially many. */
2566 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2567 	if (locked)
2568 		sx_sunlock(&uma_reclaim_lock);
2569 	return (res);
2570 }
2571 
2572 /* See uma.h */
2573 uma_zone_t
2574 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2575 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2576 		    uma_release zrelease, void *arg, int flags)
2577 {
2578 	struct uma_zctor_args args;
2579 
2580 	memset(&args, 0, sizeof(args));
2581 	args.name = name;
2582 	args.size = size;
2583 	args.ctor = ctor;
2584 	args.dtor = dtor;
2585 	args.uminit = zinit;
2586 	args.fini = zfini;
2587 	args.import = zimport;
2588 	args.release = zrelease;
2589 	args.arg = arg;
2590 	args.align = 0;
2591 	args.flags = flags | UMA_ZFLAG_CACHE;
2592 
2593 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2594 }
2595 
2596 /* See uma.h */
2597 void
2598 uma_zdestroy(uma_zone_t zone)
2599 {
2600 
2601 	sx_slock(&uma_reclaim_lock);
2602 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2603 	sx_sunlock(&uma_reclaim_lock);
2604 }
2605 
2606 void
2607 uma_zwait(uma_zone_t zone)
2608 {
2609 	void *item;
2610 
2611 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2612 	uma_zfree(zone, item);
2613 }
2614 
2615 void *
2616 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2617 {
2618 	void *item;
2619 #ifdef SMP
2620 	int i;
2621 
2622 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2623 #endif
2624 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2625 	if (item != NULL && (flags & M_ZERO)) {
2626 #ifdef SMP
2627 		for (i = 0; i <= mp_maxid; i++)
2628 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2629 #else
2630 		bzero(item, zone->uz_size);
2631 #endif
2632 	}
2633 	return (item);
2634 }
2635 
2636 /*
2637  * A stub while both regular and pcpu cases are identical.
2638  */
2639 void
2640 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2641 {
2642 
2643 #ifdef SMP
2644 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2645 #endif
2646 	uma_zfree_arg(zone, item, udata);
2647 }
2648 
2649 static inline void *
2650 bucket_pop(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket)
2651 {
2652 	void *item;
2653 
2654 	bucket->ub_cnt--;
2655 	item = bucket->ub_bucket[bucket->ub_cnt];
2656 #ifdef INVARIANTS
2657 	bucket->ub_bucket[bucket->ub_cnt] = NULL;
2658 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2659 #endif
2660 	cache->uc_allocs++;
2661 
2662 	return (item);
2663 }
2664 
2665 static inline void
2666 bucket_push(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket,
2667     void *item)
2668 {
2669 	KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2670 	    ("uma_zfree: Freeing to non free bucket index."));
2671 	bucket->ub_bucket[bucket->ub_cnt] = item;
2672 	bucket->ub_cnt++;
2673 	cache->uc_frees++;
2674 }
2675 
2676 static void *
2677 item_ctor(uma_zone_t zone, void *udata, int flags, void *item)
2678 {
2679 #ifdef INVARIANTS
2680 	bool skipdbg;
2681 
2682 	skipdbg = uma_dbg_zskip(zone, item);
2683 	if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2684 	    zone->uz_ctor != trash_ctor)
2685 		trash_ctor(item, zone->uz_size, udata, flags);
2686 #endif
2687 	if (__predict_false(zone->uz_ctor != NULL) &&
2688 	    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2689 		counter_u64_add(zone->uz_fails, 1);
2690 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2691 		return (NULL);
2692 	}
2693 #ifdef INVARIANTS
2694 	if (!skipdbg)
2695 		uma_dbg_alloc(zone, NULL, item);
2696 #endif
2697 	if (flags & M_ZERO)
2698 		uma_zero_item(item, zone);
2699 
2700 	return (item);
2701 }
2702 
2703 static inline void
2704 item_dtor(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
2705 {
2706 #ifdef INVARIANTS
2707 	bool skipdbg;
2708 
2709 	skipdbg = uma_dbg_zskip(zone, item);
2710 	if (skip == SKIP_NONE && !skipdbg) {
2711 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
2712 			uma_dbg_free(zone, udata, item);
2713 		else
2714 			uma_dbg_free(zone, NULL, item);
2715 	}
2716 #endif
2717 	if (skip < SKIP_DTOR) {
2718 		if (zone->uz_dtor != NULL)
2719 			zone->uz_dtor(item, zone->uz_size, udata);
2720 #ifdef INVARIANTS
2721 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2722 		    zone->uz_dtor != trash_dtor)
2723 			trash_dtor(item, zone->uz_size, udata);
2724 #endif
2725 	}
2726 }
2727 
2728 /* See uma.h */
2729 void *
2730 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2731 {
2732 	uma_bucket_t bucket;
2733 	uma_cache_t cache;
2734 	void *item;
2735 	int cpu, domain;
2736 
2737 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2738 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2739 
2740 	/* This is the fast path allocation */
2741 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2742 	    curthread, zone->uz_name, zone, flags);
2743 
2744 	if (flags & M_WAITOK) {
2745 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2746 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2747 	}
2748 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2749 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2750 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2751 	if (zone->uz_flags & UMA_ZONE_PCPU)
2752 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2753 		    "with M_ZERO passed"));
2754 
2755 #ifdef DEBUG_MEMGUARD
2756 	if (memguard_cmp_zone(zone)) {
2757 		item = memguard_alloc(zone->uz_size, flags);
2758 		if (item != NULL) {
2759 			if (zone->uz_init != NULL &&
2760 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2761 				return (NULL);
2762 			if (zone->uz_ctor != NULL &&
2763 			    zone->uz_ctor(item, zone->uz_size, udata,
2764 			    flags) != 0) {
2765 				counter_u64_add(zone->uz_fails, 1);
2766 			    	zone->uz_fini(item, zone->uz_size);
2767 				return (NULL);
2768 			}
2769 			return (item);
2770 		}
2771 		/* This is unfortunate but should not be fatal. */
2772 	}
2773 #endif
2774 	/*
2775 	 * If possible, allocate from the per-CPU cache.  There are two
2776 	 * requirements for safe access to the per-CPU cache: (1) the thread
2777 	 * accessing the cache must not be preempted or yield during access,
2778 	 * and (2) the thread must not migrate CPUs without switching which
2779 	 * cache it accesses.  We rely on a critical section to prevent
2780 	 * preemption and migration.  We release the critical section in
2781 	 * order to acquire the zone mutex if we are unable to allocate from
2782 	 * the current cache; when we re-acquire the critical section, we
2783 	 * must detect and handle migration if it has occurred.
2784 	 */
2785 	critical_enter();
2786 	do {
2787 		cpu = curcpu;
2788 		cache = &zone->uz_cpu[cpu];
2789 		bucket = cache->uc_allocbucket;
2790 		if (__predict_true(bucket != NULL && bucket->ub_cnt != 0)) {
2791 			item = bucket_pop(zone, cache, bucket);
2792 			critical_exit();
2793 			return (item_ctor(zone, udata, flags, item));
2794 		}
2795 	} while (cache_alloc(zone, cache, udata, flags));
2796 	critical_exit();
2797 
2798 	/*
2799 	 * We can not get a bucket so try to return a single item.
2800 	 */
2801 	if (zone->uz_flags & UMA_ZONE_NUMA)
2802 		domain = PCPU_GET(domain);
2803 	else
2804 		domain = UMA_ANYDOMAIN;
2805 	return (zone_alloc_item_locked(zone, udata, domain, flags));
2806 }
2807 
2808 /*
2809  * Replenish an alloc bucket and possibly restore an old one.  Called in
2810  * a critical section.  Returns in a critical section.
2811  *
2812  * A false return value indicates failure and returns with the zone lock
2813  * held.  A true return value indicates success and the caller should retry.
2814  */
2815 static __noinline bool
2816 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
2817 {
2818 	uma_zone_domain_t zdom;
2819 	uma_bucket_t bucket;
2820 	int cpu, domain;
2821 	bool lockfail;
2822 
2823 	CRITICAL_ASSERT(curthread);
2824 
2825 	/*
2826 	 * If we have run out of items in our alloc bucket see
2827 	 * if we can switch with the free bucket.
2828 	 */
2829 	bucket = cache->uc_freebucket;
2830 	if (bucket != NULL && bucket->ub_cnt != 0) {
2831 		cache->uc_freebucket = cache->uc_allocbucket;
2832 		cache->uc_allocbucket = bucket;
2833 		return (true);
2834 	}
2835 
2836 	/*
2837 	 * Discard any empty allocation bucket while we hold no locks.
2838 	 */
2839 	bucket = cache->uc_allocbucket;
2840 	cache->uc_allocbucket = NULL;
2841 	critical_exit();
2842 	if (bucket != NULL)
2843 		bucket_free(zone, bucket, udata);
2844 
2845 	/*
2846 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
2847 	 * we must go back to the zone.  This requires the zone lock, so we
2848 	 * must drop the critical section, then re-acquire it when we go back
2849 	 * to the cache.  Since the critical section is released, we may be
2850 	 * preempted or migrate.  As such, make sure not to maintain any
2851 	 * thread-local state specific to the cache from prior to releasing
2852 	 * the critical section.
2853 	 */
2854 	lockfail = 0;
2855 	if (ZONE_TRYLOCK(zone) == 0) {
2856 		/* Record contention to size the buckets. */
2857 		ZONE_LOCK(zone);
2858 		lockfail = 1;
2859 	}
2860 
2861 	critical_enter();
2862 	/* Short-circuit for zones without buckets and low memory. */
2863 	if (zone->uz_bucket_size == 0 || bucketdisable)
2864 		return (false);
2865 
2866 	cpu = curcpu;
2867 	cache = &zone->uz_cpu[cpu];
2868 
2869 	/* See if we lost the race to fill the cache. */
2870 	if (cache->uc_allocbucket != NULL) {
2871 		ZONE_UNLOCK(zone);
2872 		return (true);
2873 	}
2874 
2875 	/*
2876 	 * Check the zone's cache of buckets.
2877 	 */
2878 	if (zone->uz_flags & UMA_ZONE_NUMA) {
2879 		domain = PCPU_GET(domain);
2880 		zdom = &zone->uz_domain[domain];
2881 	} else {
2882 		domain = UMA_ANYDOMAIN;
2883 		zdom = &zone->uz_domain[0];
2884 	}
2885 
2886 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
2887 		ZONE_UNLOCK(zone);
2888 		KASSERT(bucket->ub_cnt != 0,
2889 		    ("uma_zalloc_arg: Returning an empty bucket."));
2890 		cache->uc_allocbucket = bucket;
2891 		return (true);
2892 	}
2893 	/* We are no longer associated with this CPU. */
2894 	critical_exit();
2895 
2896 	/*
2897 	 * We bump the uz count when the cache size is insufficient to
2898 	 * handle the working set.
2899 	 */
2900 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
2901 		zone->uz_bucket_size++;
2902 
2903 	/*
2904 	 * Fill a bucket and attempt to use it as the alloc bucket.
2905 	 */
2906 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
2907 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
2908 	    zone->uz_name, zone, bucket);
2909 	critical_enter();
2910 	if (bucket == NULL)
2911 		return (false);
2912 
2913 	/*
2914 	 * See if we lost the race or were migrated.  Cache the
2915 	 * initialized bucket to make this less likely or claim
2916 	 * the memory directly.
2917 	 */
2918 	cpu = curcpu;
2919 	cache = &zone->uz_cpu[cpu];
2920 	if (cache->uc_allocbucket == NULL &&
2921 	    ((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
2922 	    domain == PCPU_GET(domain))) {
2923 		cache->uc_allocbucket = bucket;
2924 		zdom->uzd_imax += bucket->ub_cnt;
2925 	} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
2926 		critical_exit();
2927 		ZONE_UNLOCK(zone);
2928 		bucket_drain(zone, bucket);
2929 		bucket_free(zone, bucket, udata);
2930 		critical_enter();
2931 		return (true);
2932 	} else
2933 		zone_put_bucket(zone, zdom, bucket, false);
2934 	ZONE_UNLOCK(zone);
2935 	return (true);
2936 }
2937 
2938 void *
2939 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
2940 {
2941 
2942 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2943 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2944 
2945 	/* This is the fast path allocation */
2946 	CTR5(KTR_UMA,
2947 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
2948 	    curthread, zone->uz_name, zone, domain, flags);
2949 
2950 	if (flags & M_WAITOK) {
2951 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2952 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
2953 	}
2954 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2955 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
2956 
2957 	return (zone_alloc_item(zone, udata, domain, flags));
2958 }
2959 
2960 /*
2961  * Find a slab with some space.  Prefer slabs that are partially used over those
2962  * that are totally full.  This helps to reduce fragmentation.
2963  *
2964  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
2965  * only 'domain'.
2966  */
2967 static uma_slab_t
2968 keg_first_slab(uma_keg_t keg, int domain, bool rr)
2969 {
2970 	uma_domain_t dom;
2971 	uma_slab_t slab;
2972 	int start;
2973 
2974 	KASSERT(domain >= 0 && domain < vm_ndomains,
2975 	    ("keg_first_slab: domain %d out of range", domain));
2976 	KEG_LOCK_ASSERT(keg);
2977 
2978 	slab = NULL;
2979 	start = domain;
2980 	do {
2981 		dom = &keg->uk_domain[domain];
2982 		if (!LIST_EMPTY(&dom->ud_part_slab))
2983 			return (LIST_FIRST(&dom->ud_part_slab));
2984 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
2985 			slab = LIST_FIRST(&dom->ud_free_slab);
2986 			LIST_REMOVE(slab, us_link);
2987 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
2988 			return (slab);
2989 		}
2990 		if (rr)
2991 			domain = (domain + 1) % vm_ndomains;
2992 	} while (domain != start);
2993 
2994 	return (NULL);
2995 }
2996 
2997 static uma_slab_t
2998 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
2999 {
3000 	uint32_t reserve;
3001 
3002 	KEG_LOCK_ASSERT(keg);
3003 
3004 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3005 	if (keg->uk_free <= reserve)
3006 		return (NULL);
3007 	return (keg_first_slab(keg, domain, rr));
3008 }
3009 
3010 static uma_slab_t
3011 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3012 {
3013 	struct vm_domainset_iter di;
3014 	uma_domain_t dom;
3015 	uma_slab_t slab;
3016 	int aflags, domain;
3017 	bool rr;
3018 
3019 restart:
3020 	KEG_LOCK_ASSERT(keg);
3021 
3022 	/*
3023 	 * Use the keg's policy if upper layers haven't already specified a
3024 	 * domain (as happens with first-touch zones).
3025 	 *
3026 	 * To avoid races we run the iterator with the keg lock held, but that
3027 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3028 	 * clear M_WAITOK and handle low memory conditions locally.
3029 	 */
3030 	rr = rdomain == UMA_ANYDOMAIN;
3031 	if (rr) {
3032 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3033 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3034 		    &aflags);
3035 	} else {
3036 		aflags = flags;
3037 		domain = rdomain;
3038 	}
3039 
3040 	for (;;) {
3041 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3042 		if (slab != NULL)
3043 			return (slab);
3044 
3045 		/*
3046 		 * M_NOVM means don't ask at all!
3047 		 */
3048 		if (flags & M_NOVM)
3049 			break;
3050 
3051 		KASSERT(zone->uz_max_items == 0 ||
3052 		    zone->uz_items <= zone->uz_max_items,
3053 		    ("%s: zone %p overflow", __func__, zone));
3054 
3055 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3056 		/*
3057 		 * If we got a slab here it's safe to mark it partially used
3058 		 * and return.  We assume that the caller is going to remove
3059 		 * at least one item.
3060 		 */
3061 		if (slab) {
3062 			dom = &keg->uk_domain[slab->us_domain];
3063 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3064 			return (slab);
3065 		}
3066 		KEG_LOCK(keg);
3067 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3068 			if ((flags & M_WAITOK) != 0) {
3069 				KEG_UNLOCK(keg);
3070 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3071 				KEG_LOCK(keg);
3072 				goto restart;
3073 			}
3074 			break;
3075 		}
3076 	}
3077 
3078 	/*
3079 	 * We might not have been able to get a slab but another cpu
3080 	 * could have while we were unlocked.  Check again before we
3081 	 * fail.
3082 	 */
3083 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
3084 		return (slab);
3085 	}
3086 	return (NULL);
3087 }
3088 
3089 static void *
3090 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3091 {
3092 	uma_domain_t dom;
3093 	void *item;
3094 	uint8_t freei;
3095 
3096 	KEG_LOCK_ASSERT(keg);
3097 
3098 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3099 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3100 	item = slab->us_data + (keg->uk_rsize * freei);
3101 	slab->us_freecount--;
3102 	keg->uk_free--;
3103 
3104 	/* Move this slab to the full list */
3105 	if (slab->us_freecount == 0) {
3106 		LIST_REMOVE(slab, us_link);
3107 		dom = &keg->uk_domain[slab->us_domain];
3108 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3109 	}
3110 
3111 	return (item);
3112 }
3113 
3114 static int
3115 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3116 {
3117 	uma_zone_t zone;
3118 	uma_slab_t slab;
3119 	uma_keg_t keg;
3120 #ifdef NUMA
3121 	int stripe;
3122 #endif
3123 	int i;
3124 
3125 	zone = arg;
3126 	slab = NULL;
3127 	keg = zone->uz_keg;
3128 	KEG_LOCK(keg);
3129 	/* Try to keep the buckets totally full */
3130 	for (i = 0; i < max; ) {
3131 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3132 			break;
3133 #ifdef NUMA
3134 		stripe = howmany(max, vm_ndomains);
3135 #endif
3136 		while (slab->us_freecount && i < max) {
3137 			bucket[i++] = slab_alloc_item(keg, slab);
3138 			if (keg->uk_free <= keg->uk_reserve)
3139 				break;
3140 #ifdef NUMA
3141 			/*
3142 			 * If the zone is striped we pick a new slab for every
3143 			 * N allocations.  Eliminating this conditional will
3144 			 * instead pick a new domain for each bucket rather
3145 			 * than stripe within each bucket.  The current option
3146 			 * produces more fragmentation and requires more cpu
3147 			 * time but yields better distribution.
3148 			 */
3149 			if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
3150 			    vm_ndomains > 1 && --stripe == 0)
3151 				break;
3152 #endif
3153 		}
3154 		/* Don't block if we allocated any successfully. */
3155 		flags &= ~M_WAITOK;
3156 		flags |= M_NOWAIT;
3157 	}
3158 	KEG_UNLOCK(keg);
3159 
3160 	return i;
3161 }
3162 
3163 static uma_bucket_t
3164 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
3165 {
3166 	uma_bucket_t bucket;
3167 	int maxbucket, cnt;
3168 
3169 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
3170 
3171 	/* Avoid allocs targeting empty domains. */
3172 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3173 		domain = UMA_ANYDOMAIN;
3174 
3175 	if (zone->uz_max_items > 0) {
3176 		if (zone->uz_items >= zone->uz_max_items)
3177 			return (false);
3178 		maxbucket = MIN(zone->uz_bucket_size,
3179 		    zone->uz_max_items - zone->uz_items);
3180 		zone->uz_items += maxbucket;
3181 	} else
3182 		maxbucket = zone->uz_bucket_size;
3183 	ZONE_UNLOCK(zone);
3184 
3185 	/* Don't wait for buckets, preserve caller's NOVM setting. */
3186 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
3187 	if (bucket == NULL) {
3188 		cnt = 0;
3189 		goto out;
3190 	}
3191 
3192 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
3193 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
3194 
3195 	/*
3196 	 * Initialize the memory if necessary.
3197 	 */
3198 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
3199 		int i;
3200 
3201 		for (i = 0; i < bucket->ub_cnt; i++)
3202 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
3203 			    flags) != 0)
3204 				break;
3205 		/*
3206 		 * If we couldn't initialize the whole bucket, put the
3207 		 * rest back onto the freelist.
3208 		 */
3209 		if (i != bucket->ub_cnt) {
3210 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
3211 			    bucket->ub_cnt - i);
3212 #ifdef INVARIANTS
3213 			bzero(&bucket->ub_bucket[i],
3214 			    sizeof(void *) * (bucket->ub_cnt - i));
3215 #endif
3216 			bucket->ub_cnt = i;
3217 		}
3218 	}
3219 
3220 	cnt = bucket->ub_cnt;
3221 	if (bucket->ub_cnt == 0) {
3222 		bucket_free(zone, bucket, udata);
3223 		counter_u64_add(zone->uz_fails, 1);
3224 		bucket = NULL;
3225 	}
3226 out:
3227 	ZONE_LOCK(zone);
3228 	if (zone->uz_max_items > 0 && cnt < maxbucket) {
3229 		MPASS(zone->uz_items >= maxbucket - cnt);
3230 		zone->uz_items -= maxbucket - cnt;
3231 		if (zone->uz_sleepers > 0 &&
3232 		    (cnt == 0 ? zone->uz_items + 1 : zone->uz_items) <
3233 		    zone->uz_max_items)
3234 			wakeup_one(zone);
3235 	}
3236 
3237 	return (bucket);
3238 }
3239 
3240 /*
3241  * Allocates a single item from a zone.
3242  *
3243  * Arguments
3244  *	zone   The zone to alloc for.
3245  *	udata  The data to be passed to the constructor.
3246  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3247  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
3248  *
3249  * Returns
3250  *	NULL if there is no memory and M_NOWAIT is set
3251  *	An item if successful
3252  */
3253 
3254 static void *
3255 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
3256 {
3257 
3258 	ZONE_LOCK(zone);
3259 	return (zone_alloc_item_locked(zone, udata, domain, flags));
3260 }
3261 
3262 /*
3263  * Returns with zone unlocked.
3264  */
3265 static void *
3266 zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
3267 {
3268 	void *item;
3269 
3270 	ZONE_LOCK_ASSERT(zone);
3271 
3272 	if (zone->uz_max_items > 0) {
3273 		if (zone->uz_items >= zone->uz_max_items) {
3274 			zone_log_warning(zone);
3275 			zone_maxaction(zone);
3276 			if (flags & M_NOWAIT) {
3277 				ZONE_UNLOCK(zone);
3278 				return (NULL);
3279 			}
3280 			zone->uz_sleeps++;
3281 			zone->uz_sleepers++;
3282 			while (zone->uz_items >= zone->uz_max_items)
3283 				mtx_sleep(zone, zone->uz_lockptr, PVM,
3284 				    "zonelimit", 0);
3285 			zone->uz_sleepers--;
3286 			if (zone->uz_sleepers > 0 &&
3287 			    zone->uz_items + 1 < zone->uz_max_items)
3288 				wakeup_one(zone);
3289 		}
3290 		zone->uz_items++;
3291 	}
3292 	ZONE_UNLOCK(zone);
3293 
3294 	/* Avoid allocs targeting empty domains. */
3295 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3296 		domain = UMA_ANYDOMAIN;
3297 
3298 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3299 		goto fail_cnt;
3300 
3301 	/*
3302 	 * We have to call both the zone's init (not the keg's init)
3303 	 * and the zone's ctor.  This is because the item is going from
3304 	 * a keg slab directly to the user, and the user is expecting it
3305 	 * to be both zone-init'd as well as zone-ctor'd.
3306 	 */
3307 	if (zone->uz_init != NULL) {
3308 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3309 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3310 			goto fail_cnt;
3311 		}
3312 	}
3313 	item = item_ctor(zone, udata, flags, item);
3314 	if (item == NULL)
3315 		goto fail;
3316 
3317 	counter_u64_add(zone->uz_allocs, 1);
3318 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3319 	    zone->uz_name, zone);
3320 
3321 	return (item);
3322 
3323 fail_cnt:
3324 	counter_u64_add(zone->uz_fails, 1);
3325 fail:
3326 	if (zone->uz_max_items > 0) {
3327 		ZONE_LOCK(zone);
3328 		/* XXX Decrement without wakeup */
3329 		zone->uz_items--;
3330 		ZONE_UNLOCK(zone);
3331 	}
3332 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3333 	    zone->uz_name, zone);
3334 	return (NULL);
3335 }
3336 
3337 /* See uma.h */
3338 void
3339 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3340 {
3341 	uma_cache_t cache;
3342 	uma_bucket_t bucket;
3343 	int cpu, domain, itemdomain;
3344 
3345 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3346 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3347 
3348 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3349 	    zone->uz_name);
3350 
3351 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3352 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3353 
3354         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3355         if (item == NULL)
3356                 return;
3357 #ifdef DEBUG_MEMGUARD
3358 	if (is_memguard_addr(item)) {
3359 		if (zone->uz_dtor != NULL)
3360 			zone->uz_dtor(item, zone->uz_size, udata);
3361 		if (zone->uz_fini != NULL)
3362 			zone->uz_fini(item, zone->uz_size);
3363 		memguard_free(item);
3364 		return;
3365 	}
3366 #endif
3367 	item_dtor(zone, item, udata, SKIP_NONE);
3368 
3369 	/*
3370 	 * The race here is acceptable.  If we miss it we'll just have to wait
3371 	 * a little longer for the limits to be reset.
3372 	 */
3373 	if (zone->uz_sleepers > 0)
3374 		goto zfree_item;
3375 
3376 	/*
3377 	 * If possible, free to the per-CPU cache.  There are two
3378 	 * requirements for safe access to the per-CPU cache: (1) the thread
3379 	 * accessing the cache must not be preempted or yield during access,
3380 	 * and (2) the thread must not migrate CPUs without switching which
3381 	 * cache it accesses.  We rely on a critical section to prevent
3382 	 * preemption and migration.  We release the critical section in
3383 	 * order to acquire the zone mutex if we are unable to free to the
3384 	 * current cache; when we re-acquire the critical section, we must
3385 	 * detect and handle migration if it has occurred.
3386 	 */
3387 	domain = itemdomain = 0;
3388 	critical_enter();
3389 	do {
3390 		cpu = curcpu;
3391 		cache = &zone->uz_cpu[cpu];
3392 		bucket = cache->uc_allocbucket;
3393 #ifdef UMA_XDOMAIN
3394 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3395 			itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3396 			domain = PCPU_GET(domain);
3397 		}
3398 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0 &&
3399 		    domain != itemdomain) {
3400 			bucket = cache->uc_crossbucket;
3401 		} else
3402 #endif
3403 
3404 		/*
3405 		 * Try to free into the allocbucket first to give LIFO ordering
3406 		 * for cache-hot datastructures.  Spill over into the freebucket
3407 		 * if necessary.  Alloc will swap them if one runs dry.
3408 		 */
3409 		if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
3410 			bucket = cache->uc_freebucket;
3411 		if (__predict_true(bucket != NULL &&
3412 		    bucket->ub_cnt < bucket->ub_entries)) {
3413 			bucket_push(zone, cache, bucket, item);
3414 			critical_exit();
3415 			return;
3416 		}
3417 	} while (cache_free(zone, cache, udata, item, itemdomain));
3418 	critical_exit();
3419 
3420 	/*
3421 	 * If nothing else caught this, we'll just do an internal free.
3422 	 */
3423 zfree_item:
3424 	zone_free_item(zone, item, udata, SKIP_DTOR);
3425 }
3426 
3427 static void
3428 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
3429     int domain, int itemdomain)
3430 {
3431 	uma_zone_domain_t zdom;
3432 
3433 #ifdef UMA_XDOMAIN
3434 	/*
3435 	 * Buckets coming from the wrong domain will be entirely for the
3436 	 * only other domain on two domain systems.  In this case we can
3437 	 * simply cache them.  Otherwise we need to sort them back to
3438 	 * correct domains by freeing the contents to the slab layer.
3439 	 */
3440 	if (domain != itemdomain && vm_ndomains > 2) {
3441 		CTR3(KTR_UMA,
3442 		    "uma_zfree: zone %s(%p) draining cross bucket %p",
3443 		    zone->uz_name, zone, bucket);
3444 		bucket_drain(zone, bucket);
3445 		bucket_free(zone, bucket, udata);
3446 		return;
3447 	}
3448 #endif
3449 	/*
3450 	 * Attempt to save the bucket in the zone's domain bucket cache.
3451 	 *
3452 	 * We bump the uz count when the cache size is insufficient to
3453 	 * handle the working set.
3454 	 */
3455 	if (ZONE_TRYLOCK(zone) == 0) {
3456 		/* Record contention to size the buckets. */
3457 		ZONE_LOCK(zone);
3458 		if (zone->uz_bucket_size < zone->uz_bucket_size_max)
3459 			zone->uz_bucket_size++;
3460 	}
3461 
3462 	CTR3(KTR_UMA,
3463 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3464 	    zone->uz_name, zone, bucket);
3465 	/* ub_cnt is pointing to the last free item */
3466 	KASSERT(bucket->ub_cnt == bucket->ub_entries,
3467 	    ("uma_zfree: Attempting to insert partial  bucket onto the full list.\n"));
3468 	if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3469 		ZONE_UNLOCK(zone);
3470 		bucket_drain(zone, bucket);
3471 		bucket_free(zone, bucket, udata);
3472 	} else {
3473 		zdom = &zone->uz_domain[itemdomain];
3474 		zone_put_bucket(zone, zdom, bucket, true);
3475 		ZONE_UNLOCK(zone);
3476 	}
3477 }
3478 
3479 /*
3480  * Populate a free or cross bucket for the current cpu cache.  Free any
3481  * existing full bucket either to the zone cache or back to the slab layer.
3482  *
3483  * Enters and returns in a critical section.  false return indicates that
3484  * we can not satisfy this free in the cache layer.  true indicates that
3485  * the caller should retry.
3486  */
3487 static __noinline bool
3488 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
3489     int itemdomain)
3490 {
3491 	uma_bucket_t bucket;
3492 	int cpu, domain;
3493 
3494 	CRITICAL_ASSERT(curthread);
3495 
3496 	if (zone->uz_bucket_size == 0 || bucketdisable)
3497 		return false;
3498 
3499 	cpu = curcpu;
3500 	cache = &zone->uz_cpu[cpu];
3501 
3502 	/*
3503 	 * NUMA domains need to free to the correct zdom.  When XDOMAIN
3504 	 * is enabled this is the zdom of the item and the bucket may be
3505 	 * the cross bucket if they do not match.
3506 	 */
3507 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3508 #ifdef UMA_XDOMAIN
3509 		domain = PCPU_GET(domain);
3510 #else
3511 		itemdomain = domain = PCPU_GET(domain);
3512 #endif
3513 	else
3514 		itemdomain = domain = 0;
3515 #ifdef UMA_XDOMAIN
3516 	if (domain != itemdomain) {
3517 		bucket = cache->uc_crossbucket;
3518 		cache->uc_crossbucket = NULL;
3519 		if (bucket != NULL)
3520 			atomic_add_64(&zone->uz_xdomain, bucket->ub_cnt);
3521 	} else
3522 #endif
3523 	{
3524 		bucket = cache->uc_freebucket;
3525 		cache->uc_freebucket = NULL;
3526 	}
3527 
3528 
3529 	/* We are no longer associated with this CPU. */
3530 	critical_exit();
3531 
3532 	if (bucket != NULL)
3533 		zone_free_bucket(zone, bucket, udata, domain, itemdomain);
3534 
3535 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3536 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3537 	    zone->uz_name, zone, bucket);
3538 	critical_enter();
3539 	if (bucket == NULL)
3540 		return (false);
3541 	cpu = curcpu;
3542 	cache = &zone->uz_cpu[cpu];
3543 #ifdef UMA_XDOMAIN
3544 	/*
3545 	 * Check to see if we should be populating the cross bucket.  If it
3546 	 * is already populated we will fall through and attempt to populate
3547 	 * the free bucket.
3548 	 */
3549 	if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
3550 		domain = PCPU_GET(domain);
3551 		if (domain != itemdomain && cache->uc_crossbucket == NULL) {
3552 			cache->uc_crossbucket = bucket;
3553 			return (true);
3554 		}
3555 	}
3556 #endif
3557 	/*
3558 	 * We may have lost the race to fill the bucket or switched CPUs.
3559 	 */
3560 	if (cache->uc_freebucket != NULL) {
3561 		critical_exit();
3562 		bucket_free(zone, bucket, udata);
3563 		critical_enter();
3564 	} else
3565 		cache->uc_freebucket = bucket;
3566 
3567 	return (true);
3568 }
3569 
3570 void
3571 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3572 {
3573 
3574 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3575 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3576 
3577 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3578 	    zone->uz_name);
3579 
3580 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3581 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3582 
3583         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3584         if (item == NULL)
3585                 return;
3586 	zone_free_item(zone, item, udata, SKIP_NONE);
3587 }
3588 
3589 static void
3590 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3591 {
3592 	uma_keg_t keg;
3593 	uma_domain_t dom;
3594 	uint8_t freei;
3595 
3596 	keg = zone->uz_keg;
3597 	MPASS(zone->uz_lockptr == &keg->uk_lock);
3598 	KEG_LOCK_ASSERT(keg);
3599 
3600 	dom = &keg->uk_domain[slab->us_domain];
3601 
3602 	/* Do we need to remove from any lists? */
3603 	if (slab->us_freecount+1 == keg->uk_ipers) {
3604 		LIST_REMOVE(slab, us_link);
3605 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3606 	} else if (slab->us_freecount == 0) {
3607 		LIST_REMOVE(slab, us_link);
3608 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3609 	}
3610 
3611 	/* Slab management. */
3612 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3613 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
3614 	slab->us_freecount++;
3615 
3616 	/* Keg statistics. */
3617 	keg->uk_free++;
3618 }
3619 
3620 static void
3621 zone_release(void *arg, void **bucket, int cnt)
3622 {
3623 	uma_zone_t zone;
3624 	void *item;
3625 	uma_slab_t slab;
3626 	uma_keg_t keg;
3627 	uint8_t *mem;
3628 	int i;
3629 
3630 	zone = arg;
3631 	keg = zone->uz_keg;
3632 	KEG_LOCK(keg);
3633 	for (i = 0; i < cnt; i++) {
3634 		item = bucket[i];
3635 		if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
3636 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3637 			if (zone->uz_flags & UMA_ZONE_HASH) {
3638 				slab = hash_sfind(&keg->uk_hash, mem);
3639 			} else {
3640 				mem += keg->uk_pgoff;
3641 				slab = (uma_slab_t)mem;
3642 			}
3643 		} else
3644 			slab = vtoslab((vm_offset_t)item);
3645 		slab_free_item(zone, slab, item);
3646 	}
3647 	KEG_UNLOCK(keg);
3648 }
3649 
3650 /*
3651  * Frees a single item to any zone.
3652  *
3653  * Arguments:
3654  *	zone   The zone to free to
3655  *	item   The item we're freeing
3656  *	udata  User supplied data for the dtor
3657  *	skip   Skip dtors and finis
3658  */
3659 static void
3660 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3661 {
3662 
3663 	item_dtor(zone, item, udata, skip);
3664 
3665 	if (skip < SKIP_FINI && zone->uz_fini)
3666 		zone->uz_fini(item, zone->uz_size);
3667 
3668 	zone->uz_release(zone->uz_arg, &item, 1);
3669 
3670 	if (skip & SKIP_CNT)
3671 		return;
3672 
3673 	counter_u64_add(zone->uz_frees, 1);
3674 
3675 	if (zone->uz_max_items > 0) {
3676 		ZONE_LOCK(zone);
3677 		zone->uz_items--;
3678 		if (zone->uz_sleepers > 0 &&
3679 		    zone->uz_items < zone->uz_max_items)
3680 			wakeup_one(zone);
3681 		ZONE_UNLOCK(zone);
3682 	}
3683 }
3684 
3685 /* See uma.h */
3686 int
3687 uma_zone_set_max(uma_zone_t zone, int nitems)
3688 {
3689 	struct uma_bucket_zone *ubz;
3690 	int count;
3691 
3692 	ZONE_LOCK(zone);
3693 	ubz = bucket_zone_max(zone, nitems);
3694 	count = ubz != NULL ? ubz->ubz_entries : 0;
3695 	zone->uz_bucket_size_max = zone->uz_bucket_size = count;
3696 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3697 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3698 	zone->uz_max_items = nitems;
3699 	ZONE_UNLOCK(zone);
3700 
3701 	return (nitems);
3702 }
3703 
3704 /* See uma.h */
3705 void
3706 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
3707 {
3708 	struct uma_bucket_zone *ubz;
3709 	int bpcpu;
3710 
3711 	ZONE_LOCK(zone);
3712 	ubz = bucket_zone_max(zone, nitems);
3713 	if (ubz != NULL) {
3714 		bpcpu = 2;
3715 #ifdef UMA_XDOMAIN
3716 		if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
3717 			/* Count the cross-domain bucket. */
3718 			bpcpu++;
3719 #endif
3720 		nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
3721 		zone->uz_bucket_size_max = ubz->ubz_entries;
3722 	} else {
3723 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
3724 	}
3725 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
3726 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
3727 	zone->uz_bkt_max = nitems;
3728 	ZONE_UNLOCK(zone);
3729 }
3730 
3731 /* See uma.h */
3732 int
3733 uma_zone_get_max(uma_zone_t zone)
3734 {
3735 	int nitems;
3736 
3737 	ZONE_LOCK(zone);
3738 	nitems = zone->uz_max_items;
3739 	ZONE_UNLOCK(zone);
3740 
3741 	return (nitems);
3742 }
3743 
3744 /* See uma.h */
3745 void
3746 uma_zone_set_warning(uma_zone_t zone, const char *warning)
3747 {
3748 
3749 	ZONE_LOCK(zone);
3750 	zone->uz_warning = warning;
3751 	ZONE_UNLOCK(zone);
3752 }
3753 
3754 /* See uma.h */
3755 void
3756 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3757 {
3758 
3759 	ZONE_LOCK(zone);
3760 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3761 	ZONE_UNLOCK(zone);
3762 }
3763 
3764 /* See uma.h */
3765 int
3766 uma_zone_get_cur(uma_zone_t zone)
3767 {
3768 	int64_t nitems;
3769 	u_int i;
3770 
3771 	ZONE_LOCK(zone);
3772 	nitems = counter_u64_fetch(zone->uz_allocs) -
3773 	    counter_u64_fetch(zone->uz_frees);
3774 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3775 		CPU_FOREACH(i) {
3776 			/*
3777 			 * See the comment in uma_vm_zone_stats() regarding
3778 			 * the safety of accessing the per-cpu caches. With
3779 			 * the zone lock held, it is safe, but can potentially
3780 			 * result in stale data.
3781 			 */
3782 			nitems += zone->uz_cpu[i].uc_allocs -
3783 			    zone->uz_cpu[i].uc_frees;
3784 		}
3785 	}
3786 	ZONE_UNLOCK(zone);
3787 
3788 	return (nitems < 0 ? 0 : nitems);
3789 }
3790 
3791 static uint64_t
3792 uma_zone_get_allocs(uma_zone_t zone)
3793 {
3794 	uint64_t nitems;
3795 	u_int i;
3796 
3797 	ZONE_LOCK(zone);
3798 	nitems = counter_u64_fetch(zone->uz_allocs);
3799 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3800 		CPU_FOREACH(i) {
3801 			/*
3802 			 * See the comment in uma_vm_zone_stats() regarding
3803 			 * the safety of accessing the per-cpu caches. With
3804 			 * the zone lock held, it is safe, but can potentially
3805 			 * result in stale data.
3806 			 */
3807 			nitems += zone->uz_cpu[i].uc_allocs;
3808 		}
3809 	}
3810 	ZONE_UNLOCK(zone);
3811 
3812 	return (nitems);
3813 }
3814 
3815 static uint64_t
3816 uma_zone_get_frees(uma_zone_t zone)
3817 {
3818 	uint64_t nitems;
3819 	u_int i;
3820 
3821 	ZONE_LOCK(zone);
3822 	nitems = counter_u64_fetch(zone->uz_frees);
3823 	if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
3824 		CPU_FOREACH(i) {
3825 			/*
3826 			 * See the comment in uma_vm_zone_stats() regarding
3827 			 * the safety of accessing the per-cpu caches. With
3828 			 * the zone lock held, it is safe, but can potentially
3829 			 * result in stale data.
3830 			 */
3831 			nitems += zone->uz_cpu[i].uc_frees;
3832 		}
3833 	}
3834 	ZONE_UNLOCK(zone);
3835 
3836 	return (nitems);
3837 }
3838 
3839 /* See uma.h */
3840 void
3841 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3842 {
3843 	uma_keg_t keg;
3844 
3845 	KEG_GET(zone, keg);
3846 	KEG_LOCK(keg);
3847 	KASSERT(keg->uk_pages == 0,
3848 	    ("uma_zone_set_init on non-empty keg"));
3849 	keg->uk_init = uminit;
3850 	KEG_UNLOCK(keg);
3851 }
3852 
3853 /* See uma.h */
3854 void
3855 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3856 {
3857 	uma_keg_t keg;
3858 
3859 	KEG_GET(zone, keg);
3860 	KEG_LOCK(keg);
3861 	KASSERT(keg->uk_pages == 0,
3862 	    ("uma_zone_set_fini on non-empty keg"));
3863 	keg->uk_fini = fini;
3864 	KEG_UNLOCK(keg);
3865 }
3866 
3867 /* See uma.h */
3868 void
3869 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3870 {
3871 
3872 	ZONE_LOCK(zone);
3873 	KASSERT(zone->uz_keg->uk_pages == 0,
3874 	    ("uma_zone_set_zinit on non-empty keg"));
3875 	zone->uz_init = zinit;
3876 	ZONE_UNLOCK(zone);
3877 }
3878 
3879 /* See uma.h */
3880 void
3881 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3882 {
3883 
3884 	ZONE_LOCK(zone);
3885 	KASSERT(zone->uz_keg->uk_pages == 0,
3886 	    ("uma_zone_set_zfini on non-empty keg"));
3887 	zone->uz_fini = zfini;
3888 	ZONE_UNLOCK(zone);
3889 }
3890 
3891 /* See uma.h */
3892 /* XXX uk_freef is not actually used with the zone locked */
3893 void
3894 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3895 {
3896 	uma_keg_t keg;
3897 
3898 	KEG_GET(zone, keg);
3899 	KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3900 	KEG_LOCK(keg);
3901 	keg->uk_freef = freef;
3902 	KEG_UNLOCK(keg);
3903 }
3904 
3905 /* See uma.h */
3906 /* XXX uk_allocf is not actually used with the zone locked */
3907 void
3908 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3909 {
3910 	uma_keg_t keg;
3911 
3912 	KEG_GET(zone, keg);
3913 	KEG_LOCK(keg);
3914 	keg->uk_allocf = allocf;
3915 	KEG_UNLOCK(keg);
3916 }
3917 
3918 /* See uma.h */
3919 void
3920 uma_zone_reserve(uma_zone_t zone, int items)
3921 {
3922 	uma_keg_t keg;
3923 
3924 	KEG_GET(zone, keg);
3925 	KEG_LOCK(keg);
3926 	keg->uk_reserve = items;
3927 	KEG_UNLOCK(keg);
3928 }
3929 
3930 /* See uma.h */
3931 int
3932 uma_zone_reserve_kva(uma_zone_t zone, int count)
3933 {
3934 	uma_keg_t keg;
3935 	vm_offset_t kva;
3936 	u_int pages;
3937 
3938 	KEG_GET(zone, keg);
3939 
3940 	pages = count / keg->uk_ipers;
3941 	if (pages * keg->uk_ipers < count)
3942 		pages++;
3943 	pages *= keg->uk_ppera;
3944 
3945 #ifdef UMA_MD_SMALL_ALLOC
3946 	if (keg->uk_ppera > 1) {
3947 #else
3948 	if (1) {
3949 #endif
3950 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
3951 		if (kva == 0)
3952 			return (0);
3953 	} else
3954 		kva = 0;
3955 
3956 	ZONE_LOCK(zone);
3957 	MPASS(keg->uk_kva == 0);
3958 	keg->uk_kva = kva;
3959 	keg->uk_offset = 0;
3960 	zone->uz_max_items = pages * keg->uk_ipers;
3961 #ifdef UMA_MD_SMALL_ALLOC
3962 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3963 #else
3964 	keg->uk_allocf = noobj_alloc;
3965 #endif
3966 	keg->uk_flags |= UMA_ZONE_NOFREE;
3967 	ZONE_UNLOCK(zone);
3968 
3969 	return (1);
3970 }
3971 
3972 /* See uma.h */
3973 void
3974 uma_prealloc(uma_zone_t zone, int items)
3975 {
3976 	struct vm_domainset_iter di;
3977 	uma_domain_t dom;
3978 	uma_slab_t slab;
3979 	uma_keg_t keg;
3980 	int aflags, domain, slabs;
3981 
3982 	KEG_GET(zone, keg);
3983 	KEG_LOCK(keg);
3984 	slabs = items / keg->uk_ipers;
3985 	if (slabs * keg->uk_ipers < items)
3986 		slabs++;
3987 	while (slabs-- > 0) {
3988 		aflags = M_NOWAIT;
3989 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3990 		    &aflags);
3991 		for (;;) {
3992 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
3993 			    aflags);
3994 			if (slab != NULL) {
3995 				dom = &keg->uk_domain[slab->us_domain];
3996 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
3997 				    us_link);
3998 				break;
3999 			}
4000 			KEG_LOCK(keg);
4001 			if (vm_domainset_iter_policy(&di, &domain) != 0) {
4002 				KEG_UNLOCK(keg);
4003 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
4004 				KEG_LOCK(keg);
4005 			}
4006 		}
4007 	}
4008 	KEG_UNLOCK(keg);
4009 }
4010 
4011 /* See uma.h */
4012 void
4013 uma_reclaim(int req)
4014 {
4015 
4016 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
4017 	sx_xlock(&uma_reclaim_lock);
4018 	bucket_enable();
4019 
4020 	switch (req) {
4021 	case UMA_RECLAIM_TRIM:
4022 		zone_foreach(zone_trim, NULL);
4023 		break;
4024 	case UMA_RECLAIM_DRAIN:
4025 	case UMA_RECLAIM_DRAIN_CPU:
4026 		zone_foreach(zone_drain, NULL);
4027 		if (req == UMA_RECLAIM_DRAIN_CPU) {
4028 			pcpu_cache_drain_safe(NULL);
4029 			zone_foreach(zone_drain, NULL);
4030 		}
4031 		break;
4032 	default:
4033 		panic("unhandled reclamation request %d", req);
4034 	}
4035 
4036 	/*
4037 	 * Some slabs may have been freed but this zone will be visited early
4038 	 * we visit again so that we can free pages that are empty once other
4039 	 * zones are drained.  We have to do the same for buckets.
4040 	 */
4041 	zone_drain(slabzone, NULL);
4042 	bucket_zone_drain();
4043 	sx_xunlock(&uma_reclaim_lock);
4044 }
4045 
4046 static volatile int uma_reclaim_needed;
4047 
4048 void
4049 uma_reclaim_wakeup(void)
4050 {
4051 
4052 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
4053 		wakeup(uma_reclaim);
4054 }
4055 
4056 void
4057 uma_reclaim_worker(void *arg __unused)
4058 {
4059 
4060 	for (;;) {
4061 		sx_xlock(&uma_reclaim_lock);
4062 		while (atomic_load_int(&uma_reclaim_needed) == 0)
4063 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
4064 			    hz);
4065 		sx_xunlock(&uma_reclaim_lock);
4066 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
4067 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
4068 		atomic_store_int(&uma_reclaim_needed, 0);
4069 		/* Don't fire more than once per-second. */
4070 		pause("umarclslp", hz);
4071 	}
4072 }
4073 
4074 /* See uma.h */
4075 void
4076 uma_zone_reclaim(uma_zone_t zone, int req)
4077 {
4078 
4079 	switch (req) {
4080 	case UMA_RECLAIM_TRIM:
4081 		zone_trim(zone, NULL);
4082 		break;
4083 	case UMA_RECLAIM_DRAIN:
4084 		zone_drain(zone, NULL);
4085 		break;
4086 	case UMA_RECLAIM_DRAIN_CPU:
4087 		pcpu_cache_drain_safe(zone);
4088 		zone_drain(zone, NULL);
4089 		break;
4090 	default:
4091 		panic("unhandled reclamation request %d", req);
4092 	}
4093 }
4094 
4095 /* See uma.h */
4096 int
4097 uma_zone_exhausted(uma_zone_t zone)
4098 {
4099 	int full;
4100 
4101 	ZONE_LOCK(zone);
4102 	full = zone->uz_sleepers > 0;
4103 	ZONE_UNLOCK(zone);
4104 	return (full);
4105 }
4106 
4107 int
4108 uma_zone_exhausted_nolock(uma_zone_t zone)
4109 {
4110 	return (zone->uz_sleepers > 0);
4111 }
4112 
4113 static void
4114 uma_zero_item(void *item, uma_zone_t zone)
4115 {
4116 
4117 	bzero(item, zone->uz_size);
4118 }
4119 
4120 unsigned long
4121 uma_limit(void)
4122 {
4123 
4124 	return (uma_kmem_limit);
4125 }
4126 
4127 void
4128 uma_set_limit(unsigned long limit)
4129 {
4130 
4131 	uma_kmem_limit = limit;
4132 }
4133 
4134 unsigned long
4135 uma_size(void)
4136 {
4137 
4138 	return (atomic_load_long(&uma_kmem_total));
4139 }
4140 
4141 long
4142 uma_avail(void)
4143 {
4144 
4145 	return (uma_kmem_limit - uma_size());
4146 }
4147 
4148 #ifdef DDB
4149 /*
4150  * Generate statistics across both the zone and its per-cpu cache's.  Return
4151  * desired statistics if the pointer is non-NULL for that statistic.
4152  *
4153  * Note: does not update the zone statistics, as it can't safely clear the
4154  * per-CPU cache statistic.
4155  *
4156  * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
4157  * safe from off-CPU; we should modify the caches to track this information
4158  * directly so that we don't have to.
4159  */
4160 static void
4161 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4162     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4163 {
4164 	uma_cache_t cache;
4165 	uint64_t allocs, frees, sleeps, xdomain;
4166 	int cachefree, cpu;
4167 
4168 	allocs = frees = sleeps = xdomain = 0;
4169 	cachefree = 0;
4170 	CPU_FOREACH(cpu) {
4171 		cache = &z->uz_cpu[cpu];
4172 		if (cache->uc_allocbucket != NULL)
4173 			cachefree += cache->uc_allocbucket->ub_cnt;
4174 		if (cache->uc_freebucket != NULL)
4175 			cachefree += cache->uc_freebucket->ub_cnt;
4176 		if (cache->uc_crossbucket != NULL) {
4177 			xdomain += cache->uc_crossbucket->ub_cnt;
4178 			cachefree += cache->uc_crossbucket->ub_cnt;
4179 		}
4180 		allocs += cache->uc_allocs;
4181 		frees += cache->uc_frees;
4182 	}
4183 	allocs += counter_u64_fetch(z->uz_allocs);
4184 	frees += counter_u64_fetch(z->uz_frees);
4185 	sleeps += z->uz_sleeps;
4186 	xdomain += z->uz_xdomain;
4187 	if (cachefreep != NULL)
4188 		*cachefreep = cachefree;
4189 	if (allocsp != NULL)
4190 		*allocsp = allocs;
4191 	if (freesp != NULL)
4192 		*freesp = frees;
4193 	if (sleepsp != NULL)
4194 		*sleepsp = sleeps;
4195 	if (xdomainp != NULL)
4196 		*xdomainp = xdomain;
4197 }
4198 #endif /* DDB */
4199 
4200 static int
4201 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4202 {
4203 	uma_keg_t kz;
4204 	uma_zone_t z;
4205 	int count;
4206 
4207 	count = 0;
4208 	rw_rlock(&uma_rwlock);
4209 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4210 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4211 			count++;
4212 	}
4213 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4214 		count++;
4215 
4216 	rw_runlock(&uma_rwlock);
4217 	return (sysctl_handle_int(oidp, &count, 0, req));
4218 }
4219 
4220 static void
4221 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4222     struct uma_percpu_stat *ups, bool internal)
4223 {
4224 	uma_zone_domain_t zdom;
4225 	uma_bucket_t bucket;
4226 	uma_cache_t cache;
4227 	int i;
4228 
4229 
4230 	for (i = 0; i < vm_ndomains; i++) {
4231 		zdom = &z->uz_domain[i];
4232 		uth->uth_zone_free += zdom->uzd_nitems;
4233 	}
4234 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4235 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4236 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4237 	uth->uth_sleeps = z->uz_sleeps;
4238 	uth->uth_xdomain = z->uz_xdomain;
4239 
4240 	/*
4241 	 * While it is not normally safe to access the cache bucket pointers
4242 	 * while not on the CPU that owns the cache, we only allow the pointers
4243 	 * to be exchanged without the zone lock held, not invalidated, so
4244 	 * accept the possible race associated with bucket exchange during
4245 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4246 	 * are loaded only once.
4247 	 */
4248 	for (i = 0; i < mp_maxid + 1; i++) {
4249 		bzero(&ups[i], sizeof(*ups));
4250 		if (internal || CPU_ABSENT(i))
4251 			continue;
4252 		cache = &z->uz_cpu[i];
4253 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
4254 		if (bucket != NULL)
4255 			ups[i].ups_cache_free += bucket->ub_cnt;
4256 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
4257 		if (bucket != NULL)
4258 			ups[i].ups_cache_free += bucket->ub_cnt;
4259 		bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
4260 		if (bucket != NULL)
4261 			ups[i].ups_cache_free += bucket->ub_cnt;
4262 		ups[i].ups_allocs = cache->uc_allocs;
4263 		ups[i].ups_frees = cache->uc_frees;
4264 	}
4265 }
4266 
4267 static int
4268 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4269 {
4270 	struct uma_stream_header ush;
4271 	struct uma_type_header uth;
4272 	struct uma_percpu_stat *ups;
4273 	struct sbuf sbuf;
4274 	uma_keg_t kz;
4275 	uma_zone_t z;
4276 	int count, error, i;
4277 
4278 	error = sysctl_wire_old_buffer(req, 0);
4279 	if (error != 0)
4280 		return (error);
4281 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4282 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4283 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4284 
4285 	count = 0;
4286 	rw_rlock(&uma_rwlock);
4287 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4288 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4289 			count++;
4290 	}
4291 
4292 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4293 		count++;
4294 
4295 	/*
4296 	 * Insert stream header.
4297 	 */
4298 	bzero(&ush, sizeof(ush));
4299 	ush.ush_version = UMA_STREAM_VERSION;
4300 	ush.ush_maxcpus = (mp_maxid + 1);
4301 	ush.ush_count = count;
4302 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4303 
4304 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4305 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4306 			bzero(&uth, sizeof(uth));
4307 			ZONE_LOCK(z);
4308 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4309 			uth.uth_align = kz->uk_align;
4310 			uth.uth_size = kz->uk_size;
4311 			uth.uth_rsize = kz->uk_rsize;
4312 			if (z->uz_max_items > 0)
4313 				uth.uth_pages = (z->uz_items / kz->uk_ipers) *
4314 					kz->uk_ppera;
4315 			else
4316 				uth.uth_pages = kz->uk_pages;
4317 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4318 			    kz->uk_ppera;
4319 			uth.uth_limit = z->uz_max_items;
4320 			uth.uth_keg_free = z->uz_keg->uk_free;
4321 
4322 			/*
4323 			 * A zone is secondary is it is not the first entry
4324 			 * on the keg's zone list.
4325 			 */
4326 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4327 			    (LIST_FIRST(&kz->uk_zones) != z))
4328 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4329 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4330 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4331 			ZONE_UNLOCK(z);
4332 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4333 			for (i = 0; i < mp_maxid + 1; i++)
4334 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4335 		}
4336 	}
4337 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4338 		bzero(&uth, sizeof(uth));
4339 		ZONE_LOCK(z);
4340 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4341 		uth.uth_size = z->uz_size;
4342 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4343 		ZONE_UNLOCK(z);
4344 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4345 		for (i = 0; i < mp_maxid + 1; i++)
4346 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4347 	}
4348 
4349 	rw_runlock(&uma_rwlock);
4350 	error = sbuf_finish(&sbuf);
4351 	sbuf_delete(&sbuf);
4352 	free(ups, M_TEMP);
4353 	return (error);
4354 }
4355 
4356 int
4357 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4358 {
4359 	uma_zone_t zone = *(uma_zone_t *)arg1;
4360 	int error, max;
4361 
4362 	max = uma_zone_get_max(zone);
4363 	error = sysctl_handle_int(oidp, &max, 0, req);
4364 	if (error || !req->newptr)
4365 		return (error);
4366 
4367 	uma_zone_set_max(zone, max);
4368 
4369 	return (0);
4370 }
4371 
4372 int
4373 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4374 {
4375 	uma_zone_t zone;
4376 	int cur;
4377 
4378 	/*
4379 	 * Some callers want to add sysctls for global zones that
4380 	 * may not yet exist so they pass a pointer to a pointer.
4381 	 */
4382 	if (arg2 == 0)
4383 		zone = *(uma_zone_t *)arg1;
4384 	else
4385 		zone = arg1;
4386 	cur = uma_zone_get_cur(zone);
4387 	return (sysctl_handle_int(oidp, &cur, 0, req));
4388 }
4389 
4390 static int
4391 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
4392 {
4393 	uma_zone_t zone = arg1;
4394 	uint64_t cur;
4395 
4396 	cur = uma_zone_get_allocs(zone);
4397 	return (sysctl_handle_64(oidp, &cur, 0, req));
4398 }
4399 
4400 static int
4401 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
4402 {
4403 	uma_zone_t zone = arg1;
4404 	uint64_t cur;
4405 
4406 	cur = uma_zone_get_frees(zone);
4407 	return (sysctl_handle_64(oidp, &cur, 0, req));
4408 }
4409 
4410 #ifdef INVARIANTS
4411 static uma_slab_t
4412 uma_dbg_getslab(uma_zone_t zone, void *item)
4413 {
4414 	uma_slab_t slab;
4415 	uma_keg_t keg;
4416 	uint8_t *mem;
4417 
4418 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4419 	if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
4420 		slab = vtoslab((vm_offset_t)mem);
4421 	} else {
4422 		/*
4423 		 * It is safe to return the slab here even though the
4424 		 * zone is unlocked because the item's allocation state
4425 		 * essentially holds a reference.
4426 		 */
4427 		if (zone->uz_lockptr == &zone->uz_lock)
4428 			return (NULL);
4429 		ZONE_LOCK(zone);
4430 		keg = zone->uz_keg;
4431 		if (keg->uk_flags & UMA_ZONE_HASH)
4432 			slab = hash_sfind(&keg->uk_hash, mem);
4433 		else
4434 			slab = (uma_slab_t)(mem + keg->uk_pgoff);
4435 		ZONE_UNLOCK(zone);
4436 	}
4437 
4438 	return (slab);
4439 }
4440 
4441 static bool
4442 uma_dbg_zskip(uma_zone_t zone, void *mem)
4443 {
4444 
4445 	if (zone->uz_lockptr == &zone->uz_lock)
4446 		return (true);
4447 
4448 	return (uma_dbg_kskip(zone->uz_keg, mem));
4449 }
4450 
4451 static bool
4452 uma_dbg_kskip(uma_keg_t keg, void *mem)
4453 {
4454 	uintptr_t idx;
4455 
4456 	if (dbg_divisor == 0)
4457 		return (true);
4458 
4459 	if (dbg_divisor == 1)
4460 		return (false);
4461 
4462 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4463 	if (keg->uk_ipers > 1) {
4464 		idx *= keg->uk_ipers;
4465 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4466 	}
4467 
4468 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4469 		counter_u64_add(uma_skip_cnt, 1);
4470 		return (true);
4471 	}
4472 	counter_u64_add(uma_dbg_cnt, 1);
4473 
4474 	return (false);
4475 }
4476 
4477 /*
4478  * Set up the slab's freei data such that uma_dbg_free can function.
4479  *
4480  */
4481 static void
4482 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4483 {
4484 	uma_keg_t keg;
4485 	int freei;
4486 
4487 	if (slab == NULL) {
4488 		slab = uma_dbg_getslab(zone, item);
4489 		if (slab == NULL)
4490 			panic("uma: item %p did not belong to zone %s\n",
4491 			    item, zone->uz_name);
4492 	}
4493 	keg = zone->uz_keg;
4494 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4495 
4496 	if (BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree))
4497 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4498 		    item, zone, zone->uz_name, slab, freei);
4499 	BIT_SET_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree);
4500 
4501 	return;
4502 }
4503 
4504 /*
4505  * Verifies freed addresses.  Checks for alignment, valid slab membership
4506  * and duplicate frees.
4507  *
4508  */
4509 static void
4510 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4511 {
4512 	uma_keg_t keg;
4513 	int freei;
4514 
4515 	if (slab == NULL) {
4516 		slab = uma_dbg_getslab(zone, item);
4517 		if (slab == NULL)
4518 			panic("uma: Freed item %p did not belong to zone %s\n",
4519 			    item, zone->uz_name);
4520 	}
4521 	keg = zone->uz_keg;
4522 	freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
4523 
4524 	if (freei >= keg->uk_ipers)
4525 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4526 		    item, zone, zone->uz_name, slab, freei);
4527 
4528 	if (((freei * keg->uk_rsize) + slab->us_data) != item)
4529 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4530 		    item, zone, zone->uz_name, slab, freei);
4531 
4532 	if (!BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree))
4533 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4534 		    item, zone, zone->uz_name, slab, freei);
4535 
4536 	BIT_CLR_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree);
4537 }
4538 #endif /* INVARIANTS */
4539 
4540 #ifdef DDB
4541 static int64_t
4542 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4543     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4544 {
4545 	uint64_t frees;
4546 	int i;
4547 
4548 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4549 		*allocs = counter_u64_fetch(z->uz_allocs);
4550 		frees = counter_u64_fetch(z->uz_frees);
4551 		*sleeps = z->uz_sleeps;
4552 		*cachefree = 0;
4553 		*xdomain = 0;
4554 	} else
4555 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4556 		    xdomain);
4557 	if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4558 	    (LIST_FIRST(&kz->uk_zones) != z)))
4559 		*cachefree += kz->uk_free;
4560 	for (i = 0; i < vm_ndomains; i++)
4561 		*cachefree += z->uz_domain[i].uzd_nitems;
4562 	*used = *allocs - frees;
4563 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4564 }
4565 
4566 DB_SHOW_COMMAND(uma, db_show_uma)
4567 {
4568 	const char *fmt_hdr, *fmt_entry;
4569 	uma_keg_t kz;
4570 	uma_zone_t z;
4571 	uint64_t allocs, used, sleeps, xdomain;
4572 	long cachefree;
4573 	/* variables for sorting */
4574 	uma_keg_t cur_keg;
4575 	uma_zone_t cur_zone, last_zone;
4576 	int64_t cur_size, last_size, size;
4577 	int ties;
4578 
4579 	/* /i option produces machine-parseable CSV output */
4580 	if (modif[0] == 'i') {
4581 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4582 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4583 	} else {
4584 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4585 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4586 	}
4587 
4588 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4589 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4590 
4591 	/* Sort the zones with largest size first. */
4592 	last_zone = NULL;
4593 	last_size = INT64_MAX;
4594 	for (;;) {
4595 		cur_zone = NULL;
4596 		cur_size = -1;
4597 		ties = 0;
4598 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4599 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4600 				/*
4601 				 * In the case of size ties, print out zones
4602 				 * in the order they are encountered.  That is,
4603 				 * when we encounter the most recently output
4604 				 * zone, we have already printed all preceding
4605 				 * ties, and we must print all following ties.
4606 				 */
4607 				if (z == last_zone) {
4608 					ties = 1;
4609 					continue;
4610 				}
4611 				size = get_uma_stats(kz, z, &allocs, &used,
4612 				    &sleeps, &cachefree, &xdomain);
4613 				if (size > cur_size && size < last_size + ties)
4614 				{
4615 					cur_size = size;
4616 					cur_zone = z;
4617 					cur_keg = kz;
4618 				}
4619 			}
4620 		}
4621 		if (cur_zone == NULL)
4622 			break;
4623 
4624 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
4625 		    &sleeps, &cachefree, &xdomain);
4626 		db_printf(fmt_entry, cur_zone->uz_name,
4627 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
4628 		    (uintmax_t)allocs, (uintmax_t)sleeps,
4629 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
4630 		    xdomain);
4631 
4632 		if (db_pager_quit)
4633 			return;
4634 		last_zone = cur_zone;
4635 		last_size = cur_size;
4636 	}
4637 }
4638 
4639 DB_SHOW_COMMAND(umacache, db_show_umacache)
4640 {
4641 	uma_zone_t z;
4642 	uint64_t allocs, frees;
4643 	long cachefree;
4644 	int i;
4645 
4646 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
4647 	    "Requests", "Bucket");
4648 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4649 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
4650 		for (i = 0; i < vm_ndomains; i++)
4651 			cachefree += z->uz_domain[i].uzd_nitems;
4652 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
4653 		    z->uz_name, (uintmax_t)z->uz_size,
4654 		    (intmax_t)(allocs - frees), cachefree,
4655 		    (uintmax_t)allocs, z->uz_bucket_size);
4656 		if (db_pager_quit)
4657 			return;
4658 	}
4659 }
4660 #endif	/* DDB */
4661