xref: /freebsd/sys/vm/uma_core.c (revision 1171c633fb097a19e1da87128604190bc6d27341)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
5  * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6  * Copyright (c) 2004-2006 Robert N. M. Watson
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice unmodified, this list of conditions, and the following
14  *    disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /*
32  * uma_core.c  Implementation of the Universal Memory allocator
33  *
34  * This allocator is intended to replace the multitude of similar object caches
35  * in the standard FreeBSD kernel.  The intent is to be flexible as well as
36  * efficient.  A primary design goal is to return unused memory to the rest of
37  * the system.  This will make the system as a whole more flexible due to the
38  * ability to move memory to subsystems which most need it instead of leaving
39  * pools of reserved memory unused.
40  *
41  * The basic ideas stem from similar slab/zone based allocators whose algorithms
42  * are well known.
43  *
44  */
45 
46 /*
47  * TODO:
48  *	- Improve memory usage for large allocations
49  *	- Investigate cache size adjustments
50  */
51 
52 #include <sys/cdefs.h>
53 __FBSDID("$FreeBSD$");
54 
55 #include "opt_ddb.h"
56 #include "opt_param.h"
57 #include "opt_vm.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bitset.h>
62 #include <sys/domainset.h>
63 #include <sys/eventhandler.h>
64 #include <sys/kernel.h>
65 #include <sys/types.h>
66 #include <sys/limits.h>
67 #include <sys/queue.h>
68 #include <sys/malloc.h>
69 #include <sys/ktr.h>
70 #include <sys/lock.h>
71 #include <sys/sysctl.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/random.h>
75 #include <sys/rwlock.h>
76 #include <sys/sbuf.h>
77 #include <sys/sched.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/smp.h>
80 #include <sys/taskqueue.h>
81 #include <sys/vmmeter.h>
82 
83 #include <vm/vm.h>
84 #include <vm/vm_domainset.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_param.h>
89 #include <vm/vm_phys.h>
90 #include <vm/vm_pagequeue.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/uma.h>
95 #include <vm/uma_int.h>
96 #include <vm/uma_dbg.h>
97 
98 #include <ddb/ddb.h>
99 
100 #ifdef DEBUG_MEMGUARD
101 #include <vm/memguard.h>
102 #endif
103 
104 /*
105  * This is the zone and keg from which all zones are spawned.
106  */
107 static uma_zone_t kegs;
108 static uma_zone_t zones;
109 
110 /* This is the zone from which all offpage uma_slab_ts are allocated. */
111 static uma_zone_t slabzone;
112 
113 /*
114  * The initial hash tables come out of this zone so they can be allocated
115  * prior to malloc coming up.
116  */
117 static uma_zone_t hashzone;
118 
119 /* The boot-time adjusted value for cache line alignment. */
120 int uma_align_cache = 64 - 1;
121 
122 static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
123 static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
124 
125 /*
126  * Are we allowed to allocate buckets?
127  */
128 static int bucketdisable = 1;
129 
130 /* Linked list of all kegs in the system */
131 static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
132 
133 /* Linked list of all cache-only zones in the system */
134 static LIST_HEAD(,uma_zone) uma_cachezones =
135     LIST_HEAD_INITIALIZER(uma_cachezones);
136 
137 /* This RW lock protects the keg list */
138 static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
139 
140 /*
141  * Pointer and counter to pool of pages, that is preallocated at
142  * startup to bootstrap UMA.
143  */
144 static char *bootmem;
145 static int boot_pages;
146 
147 static struct sx uma_reclaim_lock;
148 
149 /*
150  * kmem soft limit, initialized by uma_set_limit().  Ensure that early
151  * allocations don't trigger a wakeup of the reclaim thread.
152  */
153 unsigned long uma_kmem_limit = LONG_MAX;
154 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
155     "UMA kernel memory soft limit");
156 unsigned long uma_kmem_total;
157 SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
158     "UMA kernel memory usage");
159 
160 /* Is the VM done starting up? */
161 static enum {
162 	BOOT_COLD,
163 	BOOT_STRAPPED,
164 	BOOT_PAGEALLOC,
165 	BOOT_BUCKETS,
166 	BOOT_RUNNING,
167 	BOOT_SHUTDOWN,
168 } booted = BOOT_COLD;
169 
170 /*
171  * This is the handle used to schedule events that need to happen
172  * outside of the allocation fast path.
173  */
174 static struct callout uma_callout;
175 #define	UMA_TIMEOUT	20		/* Seconds for callout interval. */
176 
177 /*
178  * This structure is passed as the zone ctor arg so that I don't have to create
179  * a special allocation function just for zones.
180  */
181 struct uma_zctor_args {
182 	const char *name;
183 	size_t size;
184 	uma_ctor ctor;
185 	uma_dtor dtor;
186 	uma_init uminit;
187 	uma_fini fini;
188 	uma_import import;
189 	uma_release release;
190 	void *arg;
191 	uma_keg_t keg;
192 	int align;
193 	uint32_t flags;
194 };
195 
196 struct uma_kctor_args {
197 	uma_zone_t zone;
198 	size_t size;
199 	uma_init uminit;
200 	uma_fini fini;
201 	int align;
202 	uint32_t flags;
203 };
204 
205 struct uma_bucket_zone {
206 	uma_zone_t	ubz_zone;
207 	char		*ubz_name;
208 	int		ubz_entries;	/* Number of items it can hold. */
209 	int		ubz_maxsize;	/* Maximum allocation size per-item. */
210 };
211 
212 /*
213  * Compute the actual number of bucket entries to pack them in power
214  * of two sizes for more efficient space utilization.
215  */
216 #define	BUCKET_SIZE(n)						\
217     (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
218 
219 #define	BUCKET_MAX	BUCKET_SIZE(256)
220 #define	BUCKET_MIN	BUCKET_SIZE(4)
221 
222 struct uma_bucket_zone bucket_zones[] = {
223 	{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
224 	{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
225 	{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
226 	{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
227 	{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
228 	{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
229 	{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
230 	{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
231 	{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
232 	{ NULL, NULL, 0}
233 };
234 
235 /*
236  * Flags and enumerations to be passed to internal functions.
237  */
238 enum zfreeskip {
239 	SKIP_NONE =	0,
240 	SKIP_CNT =	0x00000001,
241 	SKIP_DTOR =	0x00010000,
242 	SKIP_FINI =	0x00020000,
243 };
244 
245 /* Prototypes.. */
246 
247 int	uma_startup_count(int);
248 void	uma_startup(void *, int);
249 void	uma_startup1(void);
250 void	uma_startup2(void);
251 
252 static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
253 static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
254 static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
255 static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
256 static void page_free(void *, vm_size_t, uint8_t);
257 static void pcpu_page_free(void *, vm_size_t, uint8_t);
258 static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
259 static void cache_drain(uma_zone_t);
260 static void bucket_drain(uma_zone_t, uma_bucket_t);
261 static void bucket_cache_reclaim(uma_zone_t zone, bool);
262 static int keg_ctor(void *, int, void *, int);
263 static void keg_dtor(void *, int, void *);
264 static int zone_ctor(void *, int, void *, int);
265 static void zone_dtor(void *, int, void *);
266 static int zero_init(void *, int, int);
267 static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
268 static void zone_timeout(uma_zone_t zone, void *);
269 static int hash_alloc(struct uma_hash *, u_int);
270 static int hash_expand(struct uma_hash *, struct uma_hash *);
271 static void hash_free(struct uma_hash *hash);
272 static void uma_timeout(void *);
273 static void uma_startup3(void);
274 static void uma_shutdown(void);
275 static void *zone_alloc_item(uma_zone_t, void *, int, int);
276 static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
277 static int zone_alloc_limit(uma_zone_t zone, int count, int flags);
278 static void zone_free_limit(uma_zone_t zone, int count);
279 static void bucket_enable(void);
280 static void bucket_init(void);
281 static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
282 static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
283 static void bucket_zone_drain(void);
284 static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
285 static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
286 static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
287 static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
288     uma_fini fini, int align, uint32_t flags);
289 static int zone_import(void *, void **, int, int, int);
290 static void zone_release(void *, void **, int);
291 static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
292 static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
293 
294 static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
295 static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
296 static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
297 static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
298 static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
299 static int sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS);
300 static int sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS);
301 
302 static uint64_t uma_zone_get_allocs(uma_zone_t zone);
303 
304 #ifdef INVARIANTS
305 static uint64_t uma_keg_get_allocs(uma_keg_t zone);
306 static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
307 
308 static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
309 static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
310 static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
311 static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
312 
313 static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
314     "Memory allocation debugging");
315 
316 static u_int dbg_divisor = 1;
317 SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
318     CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
319     "Debug & thrash every this item in memory allocator");
320 
321 static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
322 static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
323 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
324     &uma_dbg_cnt, "memory items debugged");
325 SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
326     &uma_skip_cnt, "memory items skipped, not debugged");
327 #endif
328 
329 SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
330 
331 SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
332 
333 SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
334     0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
335 
336 SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
337     0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
338 
339 static int zone_warnings = 1;
340 SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
341     "Warn when UMA zones becomes full");
342 
343 /*
344  * This routine checks to see whether or not it's safe to enable buckets.
345  */
346 static void
347 bucket_enable(void)
348 {
349 
350 	KASSERT(booted >= BOOT_BUCKETS, ("Bucket enable before init"));
351 	bucketdisable = vm_page_count_min();
352 }
353 
354 /*
355  * Initialize bucket_zones, the array of zones of buckets of various sizes.
356  *
357  * For each zone, calculate the memory required for each bucket, consisting
358  * of the header and an array of pointers.
359  */
360 static void
361 bucket_init(void)
362 {
363 	struct uma_bucket_zone *ubz;
364 	int size;
365 
366 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
367 		size = roundup(sizeof(struct uma_bucket), sizeof(void *));
368 		size += sizeof(void *) * ubz->ubz_entries;
369 		ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
370 		    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
371 		    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET |
372 		    UMA_ZONE_FIRSTTOUCH);
373 	}
374 }
375 
376 /*
377  * Given a desired number of entries for a bucket, return the zone from which
378  * to allocate the bucket.
379  */
380 static struct uma_bucket_zone *
381 bucket_zone_lookup(int entries)
382 {
383 	struct uma_bucket_zone *ubz;
384 
385 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
386 		if (ubz->ubz_entries >= entries)
387 			return (ubz);
388 	ubz--;
389 	return (ubz);
390 }
391 
392 static struct uma_bucket_zone *
393 bucket_zone_max(uma_zone_t zone, int nitems)
394 {
395 	struct uma_bucket_zone *ubz;
396 	int bpcpu;
397 
398 	bpcpu = 2;
399 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
400 		/* Count the cross-domain bucket. */
401 		bpcpu++;
402 
403 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
404 		if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
405 			break;
406 	if (ubz == &bucket_zones[0])
407 		ubz = NULL;
408 	else
409 		ubz--;
410 	return (ubz);
411 }
412 
413 static int
414 bucket_select(int size)
415 {
416 	struct uma_bucket_zone *ubz;
417 
418 	ubz = &bucket_zones[0];
419 	if (size > ubz->ubz_maxsize)
420 		return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
421 
422 	for (; ubz->ubz_entries != 0; ubz++)
423 		if (ubz->ubz_maxsize < size)
424 			break;
425 	ubz--;
426 	return (ubz->ubz_entries);
427 }
428 
429 static uma_bucket_t
430 bucket_alloc(uma_zone_t zone, void *udata, int flags)
431 {
432 	struct uma_bucket_zone *ubz;
433 	uma_bucket_t bucket;
434 
435 	/*
436 	 * This is to stop us from allocating per cpu buckets while we're
437 	 * running out of vm.boot_pages.  Otherwise, we would exhaust the
438 	 * boot pages.  This also prevents us from allocating buckets in
439 	 * low memory situations.
440 	 */
441 	if (bucketdisable)
442 		return (NULL);
443 	/*
444 	 * To limit bucket recursion we store the original zone flags
445 	 * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
446 	 * NOVM flag to persist even through deep recursions.  We also
447 	 * store ZFLAG_BUCKET once we have recursed attempting to allocate
448 	 * a bucket for a bucket zone so we do not allow infinite bucket
449 	 * recursion.  This cookie will even persist to frees of unused
450 	 * buckets via the allocation path or bucket allocations in the
451 	 * free path.
452 	 */
453 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
454 		udata = (void *)(uintptr_t)zone->uz_flags;
455 	else {
456 		if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
457 			return (NULL);
458 		udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
459 	}
460 	if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
461 		flags |= M_NOVM;
462 	ubz = bucket_zone_lookup(zone->uz_bucket_size);
463 	if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
464 		ubz++;
465 	bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
466 	if (bucket) {
467 #ifdef INVARIANTS
468 		bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
469 #endif
470 		bucket->ub_cnt = 0;
471 		bucket->ub_entries = ubz->ubz_entries;
472 	}
473 
474 	return (bucket);
475 }
476 
477 static void
478 bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
479 {
480 	struct uma_bucket_zone *ubz;
481 
482 	KASSERT(bucket->ub_cnt == 0,
483 	    ("bucket_free: Freeing a non free bucket."));
484 	if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
485 		udata = (void *)(uintptr_t)zone->uz_flags;
486 	ubz = bucket_zone_lookup(bucket->ub_entries);
487 	uma_zfree_arg(ubz->ubz_zone, bucket, udata);
488 }
489 
490 static void
491 bucket_zone_drain(void)
492 {
493 	struct uma_bucket_zone *ubz;
494 
495 	for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
496 		uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
497 }
498 
499 /*
500  * Attempt to satisfy an allocation by retrieving a full bucket from one of the
501  * zone's caches.
502  */
503 static uma_bucket_t
504 zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
505 {
506 	uma_bucket_t bucket;
507 
508 	ZONE_LOCK_ASSERT(zone);
509 
510 	if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
511 		MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
512 		TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
513 		zdom->uzd_nitems -= bucket->ub_cnt;
514 		if (zdom->uzd_imin > zdom->uzd_nitems)
515 			zdom->uzd_imin = zdom->uzd_nitems;
516 		zone->uz_bkt_count -= bucket->ub_cnt;
517 	}
518 	return (bucket);
519 }
520 
521 /*
522  * Insert a full bucket into the specified cache.  The "ws" parameter indicates
523  * whether the bucket's contents should be counted as part of the zone's working
524  * set.
525  */
526 static void
527 zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
528     const bool ws)
529 {
530 
531 	ZONE_LOCK_ASSERT(zone);
532 	KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
533 	    ("%s: zone %p overflow", __func__, zone));
534 
535 	if (ws)
536 		TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
537 	else
538 		TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
539 	zdom->uzd_nitems += bucket->ub_cnt;
540 	if (ws && zdom->uzd_imax < zdom->uzd_nitems)
541 		zdom->uzd_imax = zdom->uzd_nitems;
542 	zone->uz_bkt_count += bucket->ub_cnt;
543 }
544 
545 /* Pops an item out of a per-cpu cache bucket. */
546 static inline void *
547 cache_bucket_pop(uma_cache_t cache, uma_cache_bucket_t bucket)
548 {
549 	void *item;
550 
551 	CRITICAL_ASSERT(curthread);
552 
553 	bucket->ucb_cnt--;
554 	item = bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt];
555 #ifdef INVARIANTS
556 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = NULL;
557 	KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
558 #endif
559 	cache->uc_allocs++;
560 
561 	return (item);
562 }
563 
564 /* Pushes an item into a per-cpu cache bucket. */
565 static inline void
566 cache_bucket_push(uma_cache_t cache, uma_cache_bucket_t bucket, void *item)
567 {
568 
569 	CRITICAL_ASSERT(curthread);
570 	KASSERT(bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] == NULL,
571 	    ("uma_zfree: Freeing to non free bucket index."));
572 
573 	bucket->ucb_bucket->ub_bucket[bucket->ucb_cnt] = item;
574 	bucket->ucb_cnt++;
575 	cache->uc_frees++;
576 }
577 
578 /*
579  * Unload a UMA bucket from a per-cpu cache.
580  */
581 static inline uma_bucket_t
582 cache_bucket_unload(uma_cache_bucket_t bucket)
583 {
584 	uma_bucket_t b;
585 
586 	b = bucket->ucb_bucket;
587 	if (b != NULL) {
588 		MPASS(b->ub_entries == bucket->ucb_entries);
589 		b->ub_cnt = bucket->ucb_cnt;
590 		bucket->ucb_bucket = NULL;
591 		bucket->ucb_entries = bucket->ucb_cnt = 0;
592 	}
593 
594 	return (b);
595 }
596 
597 static inline uma_bucket_t
598 cache_bucket_unload_alloc(uma_cache_t cache)
599 {
600 
601 	return (cache_bucket_unload(&cache->uc_allocbucket));
602 }
603 
604 static inline uma_bucket_t
605 cache_bucket_unload_free(uma_cache_t cache)
606 {
607 
608 	return (cache_bucket_unload(&cache->uc_freebucket));
609 }
610 
611 static inline uma_bucket_t
612 cache_bucket_unload_cross(uma_cache_t cache)
613 {
614 
615 	return (cache_bucket_unload(&cache->uc_crossbucket));
616 }
617 
618 /*
619  * Load a bucket into a per-cpu cache bucket.
620  */
621 static inline void
622 cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
623 {
624 
625 	CRITICAL_ASSERT(curthread);
626 	MPASS(bucket->ucb_bucket == NULL);
627 
628 	bucket->ucb_bucket = b;
629 	bucket->ucb_cnt = b->ub_cnt;
630 	bucket->ucb_entries = b->ub_entries;
631 }
632 
633 static inline void
634 cache_bucket_load_alloc(uma_cache_t cache, uma_bucket_t b)
635 {
636 
637 	cache_bucket_load(&cache->uc_allocbucket, b);
638 }
639 
640 static inline void
641 cache_bucket_load_free(uma_cache_t cache, uma_bucket_t b)
642 {
643 
644 	cache_bucket_load(&cache->uc_freebucket, b);
645 }
646 
647 #ifdef NUMA
648 static inline void
649 cache_bucket_load_cross(uma_cache_t cache, uma_bucket_t b)
650 {
651 
652 	cache_bucket_load(&cache->uc_crossbucket, b);
653 }
654 #endif
655 
656 /*
657  * Copy and preserve ucb_spare.
658  */
659 static inline void
660 cache_bucket_copy(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
661 {
662 
663 	b1->ucb_bucket = b2->ucb_bucket;
664 	b1->ucb_entries = b2->ucb_entries;
665 	b1->ucb_cnt = b2->ucb_cnt;
666 }
667 
668 /*
669  * Swap two cache buckets.
670  */
671 static inline void
672 cache_bucket_swap(uma_cache_bucket_t b1, uma_cache_bucket_t b2)
673 {
674 	struct uma_cache_bucket b3;
675 
676 	CRITICAL_ASSERT(curthread);
677 
678 	cache_bucket_copy(&b3, b1);
679 	cache_bucket_copy(b1, b2);
680 	cache_bucket_copy(b2, &b3);
681 }
682 
683 static void
684 zone_log_warning(uma_zone_t zone)
685 {
686 	static const struct timeval warninterval = { 300, 0 };
687 
688 	if (!zone_warnings || zone->uz_warning == NULL)
689 		return;
690 
691 	if (ratecheck(&zone->uz_ratecheck, &warninterval))
692 		printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
693 }
694 
695 static inline void
696 zone_maxaction(uma_zone_t zone)
697 {
698 
699 	if (zone->uz_maxaction.ta_func != NULL)
700 		taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
701 }
702 
703 /*
704  * Routine called by timeout which is used to fire off some time interval
705  * based calculations.  (stats, hash size, etc.)
706  *
707  * Arguments:
708  *	arg   Unused
709  *
710  * Returns:
711  *	Nothing
712  */
713 static void
714 uma_timeout(void *unused)
715 {
716 	bucket_enable();
717 	zone_foreach(zone_timeout, NULL);
718 
719 	/* Reschedule this event */
720 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
721 }
722 
723 /*
724  * Update the working set size estimate for the zone's bucket cache.
725  * The constants chosen here are somewhat arbitrary.  With an update period of
726  * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
727  * last 100s.
728  */
729 static void
730 zone_domain_update_wss(uma_zone_domain_t zdom)
731 {
732 	long wss;
733 
734 	MPASS(zdom->uzd_imax >= zdom->uzd_imin);
735 	wss = zdom->uzd_imax - zdom->uzd_imin;
736 	zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
737 	zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
738 }
739 
740 /*
741  * Routine to perform timeout driven calculations.  This expands the
742  * hashes and does per cpu statistics aggregation.
743  *
744  *  Returns nothing.
745  */
746 static void
747 zone_timeout(uma_zone_t zone, void *unused)
748 {
749 	uma_keg_t keg;
750 	u_int slabs, pages;
751 
752 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
753 		goto update_wss;
754 
755 	keg = zone->uz_keg;
756 
757 	/*
758 	 * Hash zones are non-numa by definition so the first domain
759 	 * is the only one present.
760 	 */
761 	KEG_LOCK(keg, 0);
762 	pages = keg->uk_domain[0].ud_pages;
763 
764 	/*
765 	 * Expand the keg hash table.
766 	 *
767 	 * This is done if the number of slabs is larger than the hash size.
768 	 * What I'm trying to do here is completely reduce collisions.  This
769 	 * may be a little aggressive.  Should I allow for two collisions max?
770 	 */
771 	if ((slabs = pages / keg->uk_ppera) > keg->uk_hash.uh_hashsize) {
772 		struct uma_hash newhash;
773 		struct uma_hash oldhash;
774 		int ret;
775 
776 		/*
777 		 * This is so involved because allocating and freeing
778 		 * while the keg lock is held will lead to deadlock.
779 		 * I have to do everything in stages and check for
780 		 * races.
781 		 */
782 		KEG_UNLOCK(keg, 0);
783 		ret = hash_alloc(&newhash, 1 << fls(slabs));
784 		KEG_LOCK(keg, 0);
785 		if (ret) {
786 			if (hash_expand(&keg->uk_hash, &newhash)) {
787 				oldhash = keg->uk_hash;
788 				keg->uk_hash = newhash;
789 			} else
790 				oldhash = newhash;
791 
792 			KEG_UNLOCK(keg, 0);
793 			hash_free(&oldhash);
794 			goto update_wss;
795 		}
796 	}
797 	KEG_UNLOCK(keg, 0);
798 
799 update_wss:
800 	ZONE_LOCK(zone);
801 	for (int i = 0; i < vm_ndomains; i++)
802 		zone_domain_update_wss(&zone->uz_domain[i]);
803 	ZONE_UNLOCK(zone);
804 }
805 
806 /*
807  * Allocate and zero fill the next sized hash table from the appropriate
808  * backing store.
809  *
810  * Arguments:
811  *	hash  A new hash structure with the old hash size in uh_hashsize
812  *
813  * Returns:
814  *	1 on success and 0 on failure.
815  */
816 static int
817 hash_alloc(struct uma_hash *hash, u_int size)
818 {
819 	size_t alloc;
820 
821 	KASSERT(powerof2(size), ("hash size must be power of 2"));
822 	if (size > UMA_HASH_SIZE_INIT)  {
823 		hash->uh_hashsize = size;
824 		alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
825 		hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
826 	} else {
827 		alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
828 		hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
829 		    UMA_ANYDOMAIN, M_WAITOK);
830 		hash->uh_hashsize = UMA_HASH_SIZE_INIT;
831 	}
832 	if (hash->uh_slab_hash) {
833 		bzero(hash->uh_slab_hash, alloc);
834 		hash->uh_hashmask = hash->uh_hashsize - 1;
835 		return (1);
836 	}
837 
838 	return (0);
839 }
840 
841 /*
842  * Expands the hash table for HASH zones.  This is done from zone_timeout
843  * to reduce collisions.  This must not be done in the regular allocation
844  * path, otherwise, we can recurse on the vm while allocating pages.
845  *
846  * Arguments:
847  *	oldhash  The hash you want to expand
848  *	newhash  The hash structure for the new table
849  *
850  * Returns:
851  *	Nothing
852  *
853  * Discussion:
854  */
855 static int
856 hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
857 {
858 	uma_hash_slab_t slab;
859 	u_int hval;
860 	u_int idx;
861 
862 	if (!newhash->uh_slab_hash)
863 		return (0);
864 
865 	if (oldhash->uh_hashsize >= newhash->uh_hashsize)
866 		return (0);
867 
868 	/*
869 	 * I need to investigate hash algorithms for resizing without a
870 	 * full rehash.
871 	 */
872 
873 	for (idx = 0; idx < oldhash->uh_hashsize; idx++)
874 		while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
875 			slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
876 			LIST_REMOVE(slab, uhs_hlink);
877 			hval = UMA_HASH(newhash, slab->uhs_data);
878 			LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
879 			    slab, uhs_hlink);
880 		}
881 
882 	return (1);
883 }
884 
885 /*
886  * Free the hash bucket to the appropriate backing store.
887  *
888  * Arguments:
889  *	slab_hash  The hash bucket we're freeing
890  *	hashsize   The number of entries in that hash bucket
891  *
892  * Returns:
893  *	Nothing
894  */
895 static void
896 hash_free(struct uma_hash *hash)
897 {
898 	if (hash->uh_slab_hash == NULL)
899 		return;
900 	if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
901 		zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
902 	else
903 		free(hash->uh_slab_hash, M_UMAHASH);
904 }
905 
906 /*
907  * Frees all outstanding items in a bucket
908  *
909  * Arguments:
910  *	zone   The zone to free to, must be unlocked.
911  *	bucket The free/alloc bucket with items.
912  *
913  * Returns:
914  *	Nothing
915  */
916 
917 static void
918 bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
919 {
920 	int i;
921 
922 	if (bucket == NULL || bucket->ub_cnt == 0)
923 		return;
924 
925 	if (zone->uz_fini)
926 		for (i = 0; i < bucket->ub_cnt; i++)
927 			zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
928 	zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
929 	if (zone->uz_max_items > 0)
930 		zone_free_limit(zone, bucket->ub_cnt);
931 	bucket->ub_cnt = 0;
932 }
933 
934 /*
935  * Drains the per cpu caches for a zone.
936  *
937  * NOTE: This may only be called while the zone is being torn down, and not
938  * during normal operation.  This is necessary in order that we do not have
939  * to migrate CPUs to drain the per-CPU caches.
940  *
941  * Arguments:
942  *	zone     The zone to drain, must be unlocked.
943  *
944  * Returns:
945  *	Nothing
946  */
947 static void
948 cache_drain(uma_zone_t zone)
949 {
950 	uma_cache_t cache;
951 	uma_bucket_t bucket;
952 	int cpu;
953 
954 	/*
955 	 * XXX: It is safe to not lock the per-CPU caches, because we're
956 	 * tearing down the zone anyway.  I.e., there will be no further use
957 	 * of the caches at this point.
958 	 *
959 	 * XXX: It would good to be able to assert that the zone is being
960 	 * torn down to prevent improper use of cache_drain().
961 	 */
962 	CPU_FOREACH(cpu) {
963 		cache = &zone->uz_cpu[cpu];
964 		bucket = cache_bucket_unload_alloc(cache);
965 		if (bucket != NULL) {
966 			bucket_drain(zone, bucket);
967 			bucket_free(zone, bucket, NULL);
968 		}
969 		bucket = cache_bucket_unload_free(cache);
970 		if (bucket != NULL) {
971 			bucket_drain(zone, bucket);
972 			bucket_free(zone, bucket, NULL);
973 		}
974 		bucket = cache_bucket_unload_cross(cache);
975 		if (bucket != NULL) {
976 			bucket_drain(zone, bucket);
977 			bucket_free(zone, bucket, NULL);
978 		}
979 	}
980 	bucket_cache_reclaim(zone, true);
981 }
982 
983 static void
984 cache_shrink(uma_zone_t zone, void *unused)
985 {
986 
987 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
988 		return;
989 
990 	ZONE_LOCK(zone);
991 	zone->uz_bucket_size =
992 	    (zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
993 	ZONE_UNLOCK(zone);
994 }
995 
996 static void
997 cache_drain_safe_cpu(uma_zone_t zone, void *unused)
998 {
999 	uma_cache_t cache;
1000 	uma_bucket_t b1, b2, b3;
1001 	int domain;
1002 
1003 	if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
1004 		return;
1005 
1006 	b1 = b2 = b3 = NULL;
1007 	ZONE_LOCK(zone);
1008 	critical_enter();
1009 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
1010 		domain = PCPU_GET(domain);
1011 	else
1012 		domain = 0;
1013 	cache = &zone->uz_cpu[curcpu];
1014 	b1 = cache_bucket_unload_alloc(cache);
1015 	if (b1 != NULL && b1->ub_cnt != 0) {
1016 		zone_put_bucket(zone, &zone->uz_domain[domain], b1, false);
1017 		b1 = NULL;
1018 	}
1019 	b2 = cache_bucket_unload_free(cache);
1020 	if (b2 != NULL && b2->ub_cnt != 0) {
1021 		zone_put_bucket(zone, &zone->uz_domain[domain], b2, false);
1022 		b2 = NULL;
1023 	}
1024 	b3 = cache_bucket_unload_cross(cache);
1025 	critical_exit();
1026 	ZONE_UNLOCK(zone);
1027 	if (b1)
1028 		bucket_free(zone, b1, NULL);
1029 	if (b2)
1030 		bucket_free(zone, b2, NULL);
1031 	if (b3) {
1032 		bucket_drain(zone, b3);
1033 		bucket_free(zone, b3, NULL);
1034 	}
1035 }
1036 
1037 /*
1038  * Safely drain per-CPU caches of a zone(s) to alloc bucket.
1039  * This is an expensive call because it needs to bind to all CPUs
1040  * one by one and enter a critical section on each of them in order
1041  * to safely access their cache buckets.
1042  * Zone lock must not be held on call this function.
1043  */
1044 static void
1045 pcpu_cache_drain_safe(uma_zone_t zone)
1046 {
1047 	int cpu;
1048 
1049 	/*
1050 	 * Polite bucket sizes shrinking was not enough, shrink aggressively.
1051 	 */
1052 	if (zone)
1053 		cache_shrink(zone, NULL);
1054 	else
1055 		zone_foreach(cache_shrink, NULL);
1056 
1057 	CPU_FOREACH(cpu) {
1058 		thread_lock(curthread);
1059 		sched_bind(curthread, cpu);
1060 		thread_unlock(curthread);
1061 
1062 		if (zone)
1063 			cache_drain_safe_cpu(zone, NULL);
1064 		else
1065 			zone_foreach(cache_drain_safe_cpu, NULL);
1066 	}
1067 	thread_lock(curthread);
1068 	sched_unbind(curthread);
1069 	thread_unlock(curthread);
1070 }
1071 
1072 /*
1073  * Reclaim cached buckets from a zone.  All buckets are reclaimed if the caller
1074  * requested a drain, otherwise the per-domain caches are trimmed to either
1075  * estimated working set size.
1076  */
1077 static void
1078 bucket_cache_reclaim(uma_zone_t zone, bool drain)
1079 {
1080 	uma_zone_domain_t zdom;
1081 	uma_bucket_t bucket;
1082 	long target, tofree;
1083 	int i;
1084 
1085 	for (i = 0; i < vm_ndomains; i++) {
1086 		/*
1087 		 * The cross bucket is partially filled and not part of
1088 		 * the item count.  Reclaim it individually here.
1089 		 */
1090 		zdom = &zone->uz_domain[i];
1091 		ZONE_CROSS_LOCK(zone);
1092 		bucket = zdom->uzd_cross;
1093 		zdom->uzd_cross = NULL;
1094 		ZONE_CROSS_UNLOCK(zone);
1095 		if (bucket != NULL) {
1096 			bucket_drain(zone, bucket);
1097 			bucket_free(zone, bucket, NULL);
1098 		}
1099 
1100 		/*
1101 		 * Shrink the zone bucket size to ensure that the per-CPU caches
1102 		 * don't grow too large.
1103 		 */
1104 		ZONE_LOCK(zone);
1105 		if (i == 0 && zone->uz_bucket_size > zone->uz_bucket_size_min)
1106 			zone->uz_bucket_size--;
1107 
1108 		/*
1109 		 * If we were asked to drain the zone, we are done only once
1110 		 * this bucket cache is empty.  Otherwise, we reclaim items in
1111 		 * excess of the zone's estimated working set size.  If the
1112 		 * difference nitems - imin is larger than the WSS estimate,
1113 		 * then the estimate will grow at the end of this interval and
1114 		 * we ignore the historical average.
1115 		 */
1116 		target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
1117 		    zdom->uzd_imin);
1118 		while (zdom->uzd_nitems > target) {
1119 			bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
1120 			if (bucket == NULL)
1121 				break;
1122 			tofree = bucket->ub_cnt;
1123 			TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
1124 			zdom->uzd_nitems -= tofree;
1125 
1126 			/*
1127 			 * Shift the bounds of the current WSS interval to avoid
1128 			 * perturbing the estimate.
1129 			 */
1130 			zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
1131 			zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
1132 
1133 			ZONE_UNLOCK(zone);
1134 			bucket_drain(zone, bucket);
1135 			bucket_free(zone, bucket, NULL);
1136 			ZONE_LOCK(zone);
1137 		}
1138 		ZONE_UNLOCK(zone);
1139 	}
1140 }
1141 
1142 static void
1143 keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
1144 {
1145 	uint8_t *mem;
1146 	int i;
1147 	uint8_t flags;
1148 
1149 	CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
1150 	    keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
1151 
1152 	mem = slab_data(slab, keg);
1153 	flags = slab->us_flags;
1154 	i = start;
1155 	if (keg->uk_fini != NULL) {
1156 		for (i--; i > -1; i--)
1157 #ifdef INVARIANTS
1158 		/*
1159 		 * trash_fini implies that dtor was trash_dtor. trash_fini
1160 		 * would check that memory hasn't been modified since free,
1161 		 * which executed trash_dtor.
1162 		 * That's why we need to run uma_dbg_kskip() check here,
1163 		 * albeit we don't make skip check for other init/fini
1164 		 * invocations.
1165 		 */
1166 		if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
1167 		    keg->uk_fini != trash_fini)
1168 #endif
1169 			keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
1170 	}
1171 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1172 		zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1173 	keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
1174 	uma_total_dec(PAGE_SIZE * keg->uk_ppera);
1175 }
1176 
1177 /*
1178  * Frees pages from a keg back to the system.  This is done on demand from
1179  * the pageout daemon.
1180  *
1181  * Returns nothing.
1182  */
1183 static void
1184 keg_drain(uma_keg_t keg)
1185 {
1186 	struct slabhead freeslabs = { 0 };
1187 	uma_domain_t dom;
1188 	uma_slab_t slab, tmp;
1189 	int i, n;
1190 
1191 	/*
1192 	 * We don't want to take pages from statically allocated kegs at this
1193 	 * time
1194 	 */
1195 	if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
1196 		return;
1197 
1198 	for (i = 0; i < vm_ndomains; i++) {
1199 		CTR4(KTR_UMA, "keg_drain %s(%p) domain %d free items: %u",
1200 		    keg->uk_name, keg, i, dom->ud_free);
1201 		n = 0;
1202 		dom = &keg->uk_domain[i];
1203 		KEG_LOCK(keg, i);
1204 		LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
1205 			/* We have nowhere to free these to. */
1206 			if (slab->us_flags & UMA_SLAB_BOOT)
1207 				continue;
1208 			if (keg->uk_flags & UMA_ZFLAG_HASH)
1209 				UMA_HASH_REMOVE(&keg->uk_hash, slab);
1210 			n++;
1211 			LIST_REMOVE(slab, us_link);
1212 			LIST_INSERT_HEAD(&freeslabs, slab, us_link);
1213 		}
1214 		dom->ud_pages -= n * keg->uk_ppera;
1215 		dom->ud_free -= n * keg->uk_ipers;
1216 		KEG_UNLOCK(keg, i);
1217 	}
1218 
1219 	while ((slab = LIST_FIRST(&freeslabs)) != NULL) {
1220 		LIST_REMOVE(slab, us_link);
1221 		keg_free_slab(keg, slab, keg->uk_ipers);
1222 	}
1223 }
1224 
1225 static void
1226 zone_reclaim(uma_zone_t zone, int waitok, bool drain)
1227 {
1228 
1229 	/*
1230 	 * Set draining to interlock with zone_dtor() so we can release our
1231 	 * locks as we go.  Only dtor() should do a WAITOK call since it
1232 	 * is the only call that knows the structure will still be available
1233 	 * when it wakes up.
1234 	 */
1235 	ZONE_LOCK(zone);
1236 	while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
1237 		if (waitok == M_NOWAIT)
1238 			goto out;
1239 		msleep(zone, &zone->uz_lock, PVM, "zonedrain", 1);
1240 	}
1241 	zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
1242 	ZONE_UNLOCK(zone);
1243 	bucket_cache_reclaim(zone, drain);
1244 
1245 	/*
1246 	 * The DRAINING flag protects us from being freed while
1247 	 * we're running.  Normally the uma_rwlock would protect us but we
1248 	 * must be able to release and acquire the right lock for each keg.
1249 	 */
1250 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
1251 		keg_drain(zone->uz_keg);
1252 	ZONE_LOCK(zone);
1253 	zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
1254 	wakeup(zone);
1255 out:
1256 	ZONE_UNLOCK(zone);
1257 }
1258 
1259 static void
1260 zone_drain(uma_zone_t zone, void *unused)
1261 {
1262 
1263 	zone_reclaim(zone, M_NOWAIT, true);
1264 }
1265 
1266 static void
1267 zone_trim(uma_zone_t zone, void *unused)
1268 {
1269 
1270 	zone_reclaim(zone, M_NOWAIT, false);
1271 }
1272 
1273 /*
1274  * Allocate a new slab for a keg and inserts it into the partial slab list.
1275  * The keg should be unlocked on entry.  If the allocation succeeds it will
1276  * be locked on return.
1277  *
1278  * Arguments:
1279  *	flags   Wait flags for the item initialization routine
1280  *	aflags  Wait flags for the slab allocation
1281  *
1282  * Returns:
1283  *	The slab that was allocated or NULL if there is no memory and the
1284  *	caller specified M_NOWAIT.
1285  */
1286 static uma_slab_t
1287 keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
1288     int aflags)
1289 {
1290 	uma_domain_t dom;
1291 	uma_alloc allocf;
1292 	uma_slab_t slab;
1293 	unsigned long size;
1294 	uint8_t *mem;
1295 	uint8_t sflags;
1296 	int i;
1297 
1298 	KASSERT(domain >= 0 && domain < vm_ndomains,
1299 	    ("keg_alloc_slab: domain %d out of range", domain));
1300 
1301 	allocf = keg->uk_allocf;
1302 	slab = NULL;
1303 	mem = NULL;
1304 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
1305 		slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
1306 		if (slab == NULL)
1307 			goto fail;
1308 	}
1309 
1310 	/*
1311 	 * This reproduces the old vm_zone behavior of zero filling pages the
1312 	 * first time they are added to a zone.
1313 	 *
1314 	 * Malloced items are zeroed in uma_zalloc.
1315 	 */
1316 
1317 	if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1318 		aflags |= M_ZERO;
1319 	else
1320 		aflags &= ~M_ZERO;
1321 
1322 	if (keg->uk_flags & UMA_ZONE_NODUMP)
1323 		aflags |= M_NODUMP;
1324 
1325 	/* zone is passed for legacy reasons. */
1326 	size = keg->uk_ppera * PAGE_SIZE;
1327 	mem = allocf(zone, size, domain, &sflags, aflags);
1328 	if (mem == NULL) {
1329 		if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1330 			zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1331 		goto fail;
1332 	}
1333 	uma_total_inc(size);
1334 
1335 	/* For HASH zones all pages go to the same uma_domain. */
1336 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
1337 		domain = 0;
1338 
1339 	/* Point the slab into the allocated memory */
1340 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
1341 		slab = (uma_slab_t )(mem + keg->uk_pgoff);
1342 	else
1343 		((uma_hash_slab_t)slab)->uhs_data = mem;
1344 
1345 	if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
1346 		for (i = 0; i < keg->uk_ppera; i++)
1347 			vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
1348 			    zone, slab);
1349 
1350 	slab->us_freecount = keg->uk_ipers;
1351 	slab->us_flags = sflags;
1352 	slab->us_domain = domain;
1353 
1354 	BIT_FILL(keg->uk_ipers, &slab->us_free);
1355 #ifdef INVARIANTS
1356 	BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
1357 #endif
1358 
1359 	if (keg->uk_init != NULL) {
1360 		for (i = 0; i < keg->uk_ipers; i++)
1361 			if (keg->uk_init(slab_item(slab, keg, i),
1362 			    keg->uk_size, flags) != 0)
1363 				break;
1364 		if (i != keg->uk_ipers) {
1365 			keg_free_slab(keg, slab, i);
1366 			goto fail;
1367 		}
1368 	}
1369 	KEG_LOCK(keg, domain);
1370 
1371 	CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
1372 	    slab, keg->uk_name, keg);
1373 
1374 	if (keg->uk_flags & UMA_ZFLAG_HASH)
1375 		UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
1376 
1377 	/*
1378 	 * If we got a slab here it's safe to mark it partially used
1379 	 * and return.  We assume that the caller is going to remove
1380 	 * at least one item.
1381 	 */
1382 	dom = &keg->uk_domain[domain];
1383 	LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
1384 	dom->ud_pages += keg->uk_ppera;
1385 	dom->ud_free += keg->uk_ipers;
1386 
1387 	return (slab);
1388 
1389 fail:
1390 	return (NULL);
1391 }
1392 
1393 /*
1394  * This function is intended to be used early on in place of page_alloc() so
1395  * that we may use the boot time page cache to satisfy allocations before
1396  * the VM is ready.
1397  */
1398 static void *
1399 startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1400     int wait)
1401 {
1402 	uma_keg_t keg;
1403 	void *mem;
1404 	int pages;
1405 
1406 	keg = zone->uz_keg;
1407 	/*
1408 	 * If we are in BOOT_BUCKETS or higher, than switch to real
1409 	 * allocator.  Zones with page sized slabs switch at BOOT_PAGEALLOC.
1410 	 */
1411 	switch (booted) {
1412 		case BOOT_COLD:
1413 		case BOOT_STRAPPED:
1414 			break;
1415 		case BOOT_PAGEALLOC:
1416 			if (keg->uk_ppera > 1)
1417 				break;
1418 		default:
1419 #ifdef UMA_MD_SMALL_ALLOC
1420 			keg->uk_allocf = (keg->uk_ppera > 1) ?
1421 			    page_alloc : uma_small_alloc;
1422 #else
1423 			keg->uk_allocf = page_alloc;
1424 #endif
1425 			return keg->uk_allocf(zone, bytes, domain, pflag, wait);
1426 	}
1427 
1428 	/*
1429 	 * Check our small startup cache to see if it has pages remaining.
1430 	 */
1431 	pages = howmany(bytes, PAGE_SIZE);
1432 	KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
1433 	if (pages > boot_pages)
1434 		panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
1435 #ifdef DIAGNOSTIC
1436 	printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
1437 	    boot_pages);
1438 #endif
1439 	mem = bootmem;
1440 	boot_pages -= pages;
1441 	bootmem += pages * PAGE_SIZE;
1442 	*pflag = UMA_SLAB_BOOT;
1443 
1444 	return (mem);
1445 }
1446 
1447 /*
1448  * Allocates a number of pages from the system
1449  *
1450  * Arguments:
1451  *	bytes  The number of bytes requested
1452  *	wait  Shall we wait?
1453  *
1454  * Returns:
1455  *	A pointer to the alloced memory or possibly
1456  *	NULL if M_NOWAIT is set.
1457  */
1458 static void *
1459 page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1460     int wait)
1461 {
1462 	void *p;	/* Returned page */
1463 
1464 	*pflag = UMA_SLAB_KERNEL;
1465 	p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
1466 
1467 	return (p);
1468 }
1469 
1470 static void *
1471 pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
1472     int wait)
1473 {
1474 	struct pglist alloctail;
1475 	vm_offset_t addr, zkva;
1476 	int cpu, flags;
1477 	vm_page_t p, p_next;
1478 #ifdef NUMA
1479 	struct pcpu *pc;
1480 #endif
1481 
1482 	MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
1483 
1484 	TAILQ_INIT(&alloctail);
1485 	flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1486 	    malloc2vm_flags(wait);
1487 	*pflag = UMA_SLAB_KERNEL;
1488 	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1489 		if (CPU_ABSENT(cpu)) {
1490 			p = vm_page_alloc(NULL, 0, flags);
1491 		} else {
1492 #ifndef NUMA
1493 			p = vm_page_alloc(NULL, 0, flags);
1494 #else
1495 			pc = pcpu_find(cpu);
1496 			p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
1497 			if (__predict_false(p == NULL))
1498 				p = vm_page_alloc(NULL, 0, flags);
1499 #endif
1500 		}
1501 		if (__predict_false(p == NULL))
1502 			goto fail;
1503 		TAILQ_INSERT_TAIL(&alloctail, p, listq);
1504 	}
1505 	if ((addr = kva_alloc(bytes)) == 0)
1506 		goto fail;
1507 	zkva = addr;
1508 	TAILQ_FOREACH(p, &alloctail, listq) {
1509 		pmap_qenter(zkva, &p, 1);
1510 		zkva += PAGE_SIZE;
1511 	}
1512 	return ((void*)addr);
1513 fail:
1514 	TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1515 		vm_page_unwire_noq(p);
1516 		vm_page_free(p);
1517 	}
1518 	return (NULL);
1519 }
1520 
1521 /*
1522  * Allocates a number of pages from within an object
1523  *
1524  * Arguments:
1525  *	bytes  The number of bytes requested
1526  *	wait   Shall we wait?
1527  *
1528  * Returns:
1529  *	A pointer to the alloced memory or possibly
1530  *	NULL if M_NOWAIT is set.
1531  */
1532 static void *
1533 noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
1534     int wait)
1535 {
1536 	TAILQ_HEAD(, vm_page) alloctail;
1537 	u_long npages;
1538 	vm_offset_t retkva, zkva;
1539 	vm_page_t p, p_next;
1540 	uma_keg_t keg;
1541 
1542 	TAILQ_INIT(&alloctail);
1543 	keg = zone->uz_keg;
1544 
1545 	npages = howmany(bytes, PAGE_SIZE);
1546 	while (npages > 0) {
1547 		p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
1548 		    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
1549 		    ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
1550 		    VM_ALLOC_NOWAIT));
1551 		if (p != NULL) {
1552 			/*
1553 			 * Since the page does not belong to an object, its
1554 			 * listq is unused.
1555 			 */
1556 			TAILQ_INSERT_TAIL(&alloctail, p, listq);
1557 			npages--;
1558 			continue;
1559 		}
1560 		/*
1561 		 * Page allocation failed, free intermediate pages and
1562 		 * exit.
1563 		 */
1564 		TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1565 			vm_page_unwire_noq(p);
1566 			vm_page_free(p);
1567 		}
1568 		return (NULL);
1569 	}
1570 	*flags = UMA_SLAB_PRIV;
1571 	zkva = keg->uk_kva +
1572 	    atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1573 	retkva = zkva;
1574 	TAILQ_FOREACH(p, &alloctail, listq) {
1575 		pmap_qenter(zkva, &p, 1);
1576 		zkva += PAGE_SIZE;
1577 	}
1578 
1579 	return ((void *)retkva);
1580 }
1581 
1582 /*
1583  * Frees a number of pages to the system
1584  *
1585  * Arguments:
1586  *	mem   A pointer to the memory to be freed
1587  *	size  The size of the memory being freed
1588  *	flags The original p->us_flags field
1589  *
1590  * Returns:
1591  *	Nothing
1592  */
1593 static void
1594 page_free(void *mem, vm_size_t size, uint8_t flags)
1595 {
1596 
1597 	if ((flags & UMA_SLAB_KERNEL) == 0)
1598 		panic("UMA: page_free used with invalid flags %x", flags);
1599 
1600 	kmem_free((vm_offset_t)mem, size);
1601 }
1602 
1603 /*
1604  * Frees pcpu zone allocations
1605  *
1606  * Arguments:
1607  *	mem   A pointer to the memory to be freed
1608  *	size  The size of the memory being freed
1609  *	flags The original p->us_flags field
1610  *
1611  * Returns:
1612  *	Nothing
1613  */
1614 static void
1615 pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
1616 {
1617 	vm_offset_t sva, curva;
1618 	vm_paddr_t paddr;
1619 	vm_page_t m;
1620 
1621 	MPASS(size == (mp_maxid+1)*PAGE_SIZE);
1622 	sva = (vm_offset_t)mem;
1623 	for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
1624 		paddr = pmap_kextract(curva);
1625 		m = PHYS_TO_VM_PAGE(paddr);
1626 		vm_page_unwire_noq(m);
1627 		vm_page_free(m);
1628 	}
1629 	pmap_qremove(sva, size >> PAGE_SHIFT);
1630 	kva_free(sva, size);
1631 }
1632 
1633 
1634 /*
1635  * Zero fill initializer
1636  *
1637  * Arguments/Returns follow uma_init specifications
1638  */
1639 static int
1640 zero_init(void *mem, int size, int flags)
1641 {
1642 	bzero(mem, size);
1643 	return (0);
1644 }
1645 
1646 #ifdef INVARIANTS
1647 struct noslabbits *
1648 slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
1649 {
1650 
1651 	return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
1652 }
1653 #endif
1654 
1655 /*
1656  * Actual size of embedded struct slab (!OFFPAGE).
1657  */
1658 size_t
1659 slab_sizeof(int nitems)
1660 {
1661 	size_t s;
1662 
1663 	s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
1664 	return (roundup(s, UMA_ALIGN_PTR + 1));
1665 }
1666 
1667 /*
1668  * Size of memory for embedded slabs (!OFFPAGE).
1669  */
1670 size_t
1671 slab_space(int nitems)
1672 {
1673 	return (UMA_SLAB_SIZE - slab_sizeof(nitems));
1674 }
1675 
1676 #define	UMA_FIXPT_SHIFT	31
1677 #define	UMA_FRAC_FIXPT(n, d)						\
1678 	((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
1679 #define	UMA_FIXPT_PCT(f)						\
1680 	((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
1681 #define	UMA_PCT_FIXPT(pct)	UMA_FRAC_FIXPT((pct), 100)
1682 #define	UMA_MIN_EFF	UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
1683 
1684 /*
1685  * Compute the number of items that will fit in a slab.  If hdr is true, the
1686  * item count may be limited to provide space in the slab for an inline slab
1687  * header.  Otherwise, all slab space will be provided for item storage.
1688  */
1689 static u_int
1690 slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
1691 {
1692 	u_int ipers;
1693 	u_int padpi;
1694 
1695 	/* The padding between items is not needed after the last item. */
1696 	padpi = rsize - size;
1697 
1698 	if (hdr) {
1699 		/*
1700 		 * Start with the maximum item count and remove items until
1701 		 * the slab header first alongside the allocatable memory.
1702 		 */
1703 		for (ipers = MIN(SLAB_MAX_SETSIZE,
1704 		    (slabsize + padpi - slab_sizeof(1)) / rsize);
1705 		    ipers > 0 &&
1706 		    ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
1707 		    ipers--)
1708 			continue;
1709 	} else {
1710 		ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
1711 	}
1712 
1713 	return (ipers);
1714 }
1715 
1716 /*
1717  * Compute the number of items that will fit in a slab for a startup zone.
1718  */
1719 int
1720 slab_ipers(size_t size, int align)
1721 {
1722 	int rsize;
1723 
1724 	rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */
1725 	return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true));
1726 }
1727 
1728 /*
1729  * Determine the format of a uma keg.  This determines where the slab header
1730  * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
1731  *
1732  * Arguments
1733  *	keg  The zone we should initialize
1734  *
1735  * Returns
1736  *	Nothing
1737  */
1738 static void
1739 keg_layout(uma_keg_t keg)
1740 {
1741 	u_int alignsize;
1742 	u_int eff;
1743 	u_int eff_offpage;
1744 	u_int format;
1745 	u_int ipers;
1746 	u_int ipers_offpage;
1747 	u_int pages;
1748 	u_int rsize;
1749 	u_int slabsize;
1750 
1751 	KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1752 	    (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
1753 	     (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
1754 	    ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
1755 	     __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
1756 	     PRINT_UMA_ZFLAGS));
1757 	KASSERT((keg->uk_flags &
1758 	    (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) == 0 ||
1759 	    (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
1760 	    ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
1761 	     PRINT_UMA_ZFLAGS));
1762 
1763 	alignsize = keg->uk_align + 1;
1764 	format = 0;
1765 	ipers = 0;
1766 
1767 	/*
1768 	 * Calculate the size of each allocation (rsize) according to
1769 	 * alignment.  If the requested size is smaller than we have
1770 	 * allocation bits for we round it up.
1771 	 */
1772 	rsize = MAX(keg->uk_size, UMA_SLAB_SIZE / SLAB_MAX_SETSIZE);
1773 	rsize = roundup2(rsize, alignsize);
1774 
1775 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) {
1776 		slabsize = UMA_PCPU_ALLOC_SIZE;
1777 		pages = mp_maxid + 1;
1778 	} else if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
1779 		/*
1780 		 * We want one item to start on every align boundary in a page.
1781 		 * To do this we will span pages.  We will also extend the item
1782 		 * by the size of align if it is an even multiple of align.
1783 		 * Otherwise, it would fall on the same boundary every time.
1784 		 */
1785 		if ((rsize & alignsize) == 0)
1786 			rsize += alignsize;
1787 		slabsize = rsize * (PAGE_SIZE / alignsize);
1788 		slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
1789 		slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
1790 		pages = howmany(slabsize, PAGE_SIZE);
1791 		slabsize = ptoa(pages);
1792 	} else {
1793 		/*
1794 		 * Choose a slab size of as many pages as it takes to represent
1795 		 * a single item.  We will then try to fit as many additional
1796 		 * items into the slab as possible.  At some point, we may want
1797 		 * to increase the slab size for awkward item sizes in order to
1798 		 * increase efficiency.
1799 		 */
1800 		pages = howmany(keg->uk_size, PAGE_SIZE);
1801 		slabsize = ptoa(pages);
1802 	}
1803 
1804 	/* Evaluate an inline slab layout. */
1805 	if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
1806 		ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize, true);
1807 
1808 	/* TODO: vm_page-embedded slab. */
1809 
1810 	/*
1811 	 * We can't do OFFPAGE if we're internal or if we've been
1812 	 * asked to not go to the VM for buckets.  If we do this we
1813 	 * may end up going to the VM  for slabs which we do not
1814 	 * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1815 	 * of UMA_ZONE_VM, which clearly forbids it.
1816 	 */
1817 	if ((keg->uk_flags &
1818 	    (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) != 0) {
1819 		if (ipers == 0) {
1820 			/* We need an extra page for the slab header. */
1821 			pages++;
1822 			slabsize = ptoa(pages);
1823 			ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize,
1824 			    true);
1825 		}
1826 		goto out;
1827 	}
1828 
1829 	/*
1830 	 * See if using an OFFPAGE slab will improve our efficiency.
1831 	 * Only do this if we are below our efficiency threshold.
1832 	 *
1833 	 * XXX We could try growing slabsize to limit max waste as well.
1834 	 * Historically this was not done because the VM could not
1835 	 * efficiently handle contiguous allocations.
1836 	 */
1837 	eff = UMA_FRAC_FIXPT(ipers * rsize, slabsize);
1838 	ipers_offpage = slab_ipers_hdr(keg->uk_size, rsize, slabsize, false);
1839 	eff_offpage = UMA_FRAC_FIXPT(ipers_offpage * rsize,
1840 	    slabsize + slab_sizeof(SLAB_MAX_SETSIZE));
1841 	if (ipers == 0 || (eff < UMA_MIN_EFF && eff < eff_offpage)) {
1842 		CTR5(KTR_UMA, "UMA decided we need offpage slab headers for "
1843 		    "keg: %s(%p), minimum efficiency allowed = %u%%, "
1844 		    "old efficiency = %u%%, offpage efficiency = %u%%\n",
1845 		    keg->uk_name, keg, UMA_FIXPT_PCT(UMA_MIN_EFF),
1846 		    UMA_FIXPT_PCT(eff), UMA_FIXPT_PCT(eff_offpage));
1847 		format = UMA_ZFLAG_OFFPAGE;
1848 		ipers = ipers_offpage;
1849 	}
1850 
1851 out:
1852 	/*
1853 	 * How do we find the slab header if it is offpage or if not all item
1854 	 * start addresses are in the same page?  We could solve the latter
1855 	 * case with vaddr alignment, but we don't.
1856 	 */
1857 	if ((format & UMA_ZFLAG_OFFPAGE) != 0 ||
1858 	    (ipers - 1) * rsize >= PAGE_SIZE) {
1859 		if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
1860 			format |= UMA_ZFLAG_HASH;
1861 		else
1862 			format |= UMA_ZFLAG_VTOSLAB;
1863 	}
1864 	keg->uk_ipers = ipers;
1865 	keg->uk_rsize = rsize;
1866 	keg->uk_flags |= format;
1867 	keg->uk_ppera = pages;
1868 	CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u\n",
1869 	    __func__, keg->uk_name, keg->uk_flags, rsize, ipers, pages);
1870 	KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
1871 	    ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
1872 	     keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, ipers,
1873 	     pages));
1874 }
1875 
1876 /*
1877  * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1878  * the keg onto the global keg list.
1879  *
1880  * Arguments/Returns follow uma_ctor specifications
1881  *	udata  Actually uma_kctor_args
1882  */
1883 static int
1884 keg_ctor(void *mem, int size, void *udata, int flags)
1885 {
1886 	struct uma_kctor_args *arg = udata;
1887 	uma_keg_t keg = mem;
1888 	uma_zone_t zone;
1889 	int i;
1890 
1891 	bzero(keg, size);
1892 	keg->uk_size = arg->size;
1893 	keg->uk_init = arg->uminit;
1894 	keg->uk_fini = arg->fini;
1895 	keg->uk_align = arg->align;
1896 	keg->uk_reserve = 0;
1897 	keg->uk_flags = arg->flags;
1898 	keg->uk_slabzone = NULL;
1899 
1900 	/*
1901 	 * We use a global round-robin policy by default.  Zones with
1902 	 * UMA_ZONE_FIRSTTOUCH set will use first-touch instead, in which
1903 	 * case the iterator is never run.
1904 	 */
1905 	keg->uk_dr.dr_policy = DOMAINSET_RR();
1906 	keg->uk_dr.dr_iter = 0;
1907 
1908 	/*
1909 	 * The master zone is passed to us at keg-creation time.
1910 	 */
1911 	zone = arg->zone;
1912 	keg->uk_name = zone->uz_name;
1913 
1914 	if (arg->flags & UMA_ZONE_VM)
1915 		keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1916 
1917 	if (arg->flags & UMA_ZONE_ZINIT)
1918 		keg->uk_init = zero_init;
1919 
1920 	if (arg->flags & UMA_ZONE_MALLOC)
1921 		keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
1922 
1923 #ifndef SMP
1924 	keg->uk_flags &= ~UMA_ZONE_PCPU;
1925 #endif
1926 
1927 	keg_layout(keg);
1928 
1929 	/*
1930 	 * Use a first-touch NUMA policy for all kegs that pmap_extract()
1931 	 * will work on with the exception of critical VM structures
1932 	 * necessary for paging.
1933 	 *
1934 	 * Zones may override the default by specifying either.
1935 	 */
1936 #ifdef NUMA
1937 	if ((keg->uk_flags &
1938 	    (UMA_ZFLAG_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0)
1939 		keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
1940 	else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
1941 		keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
1942 #endif
1943 
1944 	if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
1945 		keg->uk_slabzone = slabzone;
1946 
1947 	/*
1948 	 * If we haven't booted yet we need allocations to go through the
1949 	 * startup cache until the vm is ready.
1950 	 */
1951 	if (booted < BOOT_PAGEALLOC)
1952 		keg->uk_allocf = startup_alloc;
1953 #ifdef UMA_MD_SMALL_ALLOC
1954 	else if (keg->uk_ppera == 1)
1955 		keg->uk_allocf = uma_small_alloc;
1956 #endif
1957 	else if (keg->uk_flags & UMA_ZONE_PCPU)
1958 		keg->uk_allocf = pcpu_page_alloc;
1959 	else
1960 		keg->uk_allocf = page_alloc;
1961 #ifdef UMA_MD_SMALL_ALLOC
1962 	if (keg->uk_ppera == 1)
1963 		keg->uk_freef = uma_small_free;
1964 	else
1965 #endif
1966 	if (keg->uk_flags & UMA_ZONE_PCPU)
1967 		keg->uk_freef = pcpu_page_free;
1968 	else
1969 		keg->uk_freef = page_free;
1970 
1971 	/*
1972 	 * Initialize keg's locks.
1973 	 */
1974 	for (i = 0; i < vm_ndomains; i++)
1975 		KEG_LOCK_INIT(keg, i, (arg->flags & UMA_ZONE_MTXCLASS));
1976 
1977 	/*
1978 	 * If we're putting the slab header in the actual page we need to
1979 	 * figure out where in each page it goes.  See slab_sizeof
1980 	 * definition.
1981 	 */
1982 	if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
1983 		size_t shsize;
1984 
1985 		shsize = slab_sizeof(keg->uk_ipers);
1986 		keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
1987 		/*
1988 		 * The only way the following is possible is if with our
1989 		 * UMA_ALIGN_PTR adjustments we are now bigger than
1990 		 * UMA_SLAB_SIZE.  I haven't checked whether this is
1991 		 * mathematically possible for all cases, so we make
1992 		 * sure here anyway.
1993 		 */
1994 		KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
1995 		    ("zone %s ipers %d rsize %d size %d slab won't fit",
1996 		    zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
1997 	}
1998 
1999 	if (keg->uk_flags & UMA_ZFLAG_HASH)
2000 		hash_alloc(&keg->uk_hash, 0);
2001 
2002 	CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)\n", keg, zone->uz_name, zone);
2003 
2004 	LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
2005 
2006 	rw_wlock(&uma_rwlock);
2007 	LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
2008 	rw_wunlock(&uma_rwlock);
2009 	return (0);
2010 }
2011 
2012 static void
2013 zone_alloc_counters(uma_zone_t zone, void *unused)
2014 {
2015 
2016 	zone->uz_allocs = counter_u64_alloc(M_WAITOK);
2017 	zone->uz_frees = counter_u64_alloc(M_WAITOK);
2018 	zone->uz_fails = counter_u64_alloc(M_WAITOK);
2019 }
2020 
2021 static void
2022 zone_alloc_sysctl(uma_zone_t zone, void *unused)
2023 {
2024 	uma_zone_domain_t zdom;
2025 	uma_domain_t dom;
2026 	uma_keg_t keg;
2027 	struct sysctl_oid *oid, *domainoid;
2028 	int domains, i, cnt;
2029 	static const char *nokeg = "cache zone";
2030 	char *c;
2031 
2032 	/*
2033 	 * Make a sysctl safe copy of the zone name by removing
2034 	 * any special characters and handling dups by appending
2035 	 * an index.
2036 	 */
2037 	if (zone->uz_namecnt != 0) {
2038 		/* Count the number of decimal digits and '_' separator. */
2039 		for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
2040 			cnt /= 10;
2041 		zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
2042 		    M_UMA, M_WAITOK);
2043 		sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
2044 		    zone->uz_namecnt);
2045 	} else
2046 		zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
2047 	for (c = zone->uz_ctlname; *c != '\0'; c++)
2048 		if (strchr("./\\ -", *c) != NULL)
2049 			*c = '_';
2050 
2051 	/*
2052 	 * Basic parameters at the root.
2053 	 */
2054 	zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
2055 	    OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
2056 	oid = zone->uz_oid;
2057 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2058 	    "size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
2059 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2060 	    "flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
2061 	    zone, 0, sysctl_handle_uma_zone_flags, "A",
2062 	    "Allocator configuration flags");
2063 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2064 	    "bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
2065 	    "Desired per-cpu cache size");
2066 	SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2067 	    "bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
2068 	    "Maximum allowed per-cpu cache size");
2069 
2070 	/*
2071 	 * keg if present.
2072 	 */
2073 	if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
2074 		domains = vm_ndomains;
2075 	else
2076 		domains = 1;
2077 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2078 	    "keg", CTLFLAG_RD, NULL, "");
2079 	keg = zone->uz_keg;
2080 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
2081 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2082 		    "name", CTLFLAG_RD, keg->uk_name, "Keg name");
2083 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2084 		    "rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
2085 		    "Real object size with alignment");
2086 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2087 		    "ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
2088 		    "pages per-slab allocation");
2089 		SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2090 		    "ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
2091 		    "items available per-slab");
2092 		SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2093 		    "align", CTLFLAG_RD, &keg->uk_align, 0,
2094 		    "item alignment mask");
2095 		SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2096 		    "efficiency", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2097 		    keg, 0, sysctl_handle_uma_slab_efficiency, "I",
2098 		    "Slab utilization (100 - internal fragmentation %)");
2099 		domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(oid),
2100 		    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
2101 		for (i = 0; i < domains; i++) {
2102 			dom = &keg->uk_domain[i];
2103 			oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2104 			    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD,
2105 			    NULL, "");
2106 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2107 			    "pages", CTLFLAG_RD, &dom->ud_pages, 0,
2108 			    "Total pages currently allocated from VM");
2109 			SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2110 			    "free", CTLFLAG_RD, &dom->ud_free, 0,
2111 			    "items free in the slab layer");
2112 		}
2113 	} else
2114 		SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2115 		    "name", CTLFLAG_RD, nokeg, "Keg name");
2116 
2117 	/*
2118 	 * Information about zone limits.
2119 	 */
2120 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2121 	    "limit", CTLFLAG_RD, NULL, "");
2122 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2123 	    "items", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2124 	    zone, 0, sysctl_handle_uma_zone_items, "QU",
2125 	    "current number of allocated items if limit is set");
2126 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2127 	    "max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
2128 	    "Maximum number of cached items");
2129 	SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2130 	    "sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
2131 	    "Number of threads sleeping at limit");
2132 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2133 	    "sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
2134 	    "Total zone limit sleeps");
2135 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2136 	    "bucket_max", CTLFLAG_RD, &zone->uz_bkt_max, 0,
2137 	    "Maximum number of items in the bucket cache");
2138 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2139 	    "bucket_cnt", CTLFLAG_RD, &zone->uz_bkt_count, 0,
2140 	    "Number of items in the bucket cache");
2141 
2142 	/*
2143 	 * Per-domain zone information.
2144 	 */
2145 	domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
2146 	    OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
2147 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0)
2148 		domains = 1;
2149 	for (i = 0; i < domains; i++) {
2150 		zdom = &zone->uz_domain[i];
2151 		oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
2152 		    OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
2153 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2154 		    "nitems", CTLFLAG_RD, &zdom->uzd_nitems,
2155 		    "number of items in this domain");
2156 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2157 		    "imax", CTLFLAG_RD, &zdom->uzd_imax,
2158 		    "maximum item count in this period");
2159 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2160 		    "imin", CTLFLAG_RD, &zdom->uzd_imin,
2161 		    "minimum item count in this period");
2162 		SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2163 		    "wss", CTLFLAG_RD, &zdom->uzd_wss,
2164 		    "Working set size");
2165 	}
2166 
2167 	/*
2168 	 * General statistics.
2169 	 */
2170 	oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
2171 	    "stats", CTLFLAG_RD, NULL, "");
2172 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2173 	    "current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
2174 	    zone, 1, sysctl_handle_uma_zone_cur, "I",
2175 	    "Current number of allocated items");
2176 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2177 	    "allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2178 	    zone, 0, sysctl_handle_uma_zone_allocs, "QU",
2179 	    "Total allocation calls");
2180 	SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2181 	    "frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
2182 	    zone, 0, sysctl_handle_uma_zone_frees, "QU",
2183 	    "Total free calls");
2184 	SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2185 	    "fails", CTLFLAG_RD, &zone->uz_fails,
2186 	    "Number of allocation failures");
2187 	SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
2188 	    "xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
2189 	    "Free calls from the wrong domain");
2190 }
2191 
2192 struct uma_zone_count {
2193 	const char	*name;
2194 	int		count;
2195 };
2196 
2197 static void
2198 zone_count(uma_zone_t zone, void *arg)
2199 {
2200 	struct uma_zone_count *cnt;
2201 
2202 	cnt = arg;
2203 	/*
2204 	 * Some zones are rapidly created with identical names and
2205 	 * destroyed out of order.  This can lead to gaps in the count.
2206 	 * Use one greater than the maximum observed for this name.
2207 	 */
2208 	if (strcmp(zone->uz_name, cnt->name) == 0)
2209 		cnt->count = MAX(cnt->count,
2210 		    zone->uz_namecnt + 1);
2211 }
2212 
2213 static void
2214 zone_update_caches(uma_zone_t zone)
2215 {
2216 	int i;
2217 
2218 	for (i = 0; i <= mp_maxid; i++) {
2219 		cache_set_uz_size(&zone->uz_cpu[i], zone->uz_size);
2220 		cache_set_uz_flags(&zone->uz_cpu[i], zone->uz_flags);
2221 	}
2222 }
2223 
2224 /*
2225  * Zone header ctor.  This initializes all fields, locks, etc.
2226  *
2227  * Arguments/Returns follow uma_ctor specifications
2228  *	udata  Actually uma_zctor_args
2229  */
2230 static int
2231 zone_ctor(void *mem, int size, void *udata, int flags)
2232 {
2233 	struct uma_zone_count cnt;
2234 	struct uma_zctor_args *arg = udata;
2235 	uma_zone_t zone = mem;
2236 	uma_zone_t z;
2237 	uma_keg_t keg;
2238 	int i;
2239 
2240 	bzero(zone, size);
2241 	zone->uz_name = arg->name;
2242 	zone->uz_ctor = arg->ctor;
2243 	zone->uz_dtor = arg->dtor;
2244 	zone->uz_init = NULL;
2245 	zone->uz_fini = NULL;
2246 	zone->uz_sleeps = 0;
2247 	zone->uz_xdomain = 0;
2248 	zone->uz_bucket_size = 0;
2249 	zone->uz_bucket_size_min = 0;
2250 	zone->uz_bucket_size_max = BUCKET_MAX;
2251 	zone->uz_flags = 0;
2252 	zone->uz_warning = NULL;
2253 	/* The domain structures follow the cpu structures. */
2254 	zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
2255 	zone->uz_bkt_max = ULONG_MAX;
2256 	timevalclear(&zone->uz_ratecheck);
2257 
2258 	/* Count the number of duplicate names. */
2259 	cnt.name = arg->name;
2260 	cnt.count = 0;
2261 	zone_foreach(zone_count, &cnt);
2262 	zone->uz_namecnt = cnt.count;
2263 	ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
2264 	ZONE_CROSS_LOCK_INIT(zone);
2265 
2266 	for (i = 0; i < vm_ndomains; i++)
2267 		TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
2268 
2269 #ifdef INVARIANTS
2270 	if (arg->uminit == trash_init && arg->fini == trash_fini)
2271 		zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
2272 #endif
2273 
2274 	/*
2275 	 * This is a pure cache zone, no kegs.
2276 	 */
2277 	if (arg->import) {
2278 		KASSERT((arg->flags & UMA_ZFLAG_CACHE) != 0,
2279 		    ("zone_ctor: Import specified for non-cache zone."));
2280 		if (arg->flags & UMA_ZONE_VM)
2281 			arg->flags |= UMA_ZFLAG_CACHEONLY;
2282 		zone->uz_flags = arg->flags;
2283 		zone->uz_size = arg->size;
2284 		zone->uz_import = arg->import;
2285 		zone->uz_release = arg->release;
2286 		zone->uz_arg = arg->arg;
2287 		rw_wlock(&uma_rwlock);
2288 		LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
2289 		rw_wunlock(&uma_rwlock);
2290 		goto out;
2291 	}
2292 
2293 	/*
2294 	 * Use the regular zone/keg/slab allocator.
2295 	 */
2296 	zone->uz_import = zone_import;
2297 	zone->uz_release = zone_release;
2298 	zone->uz_arg = zone;
2299 	keg = arg->keg;
2300 
2301 	if (arg->flags & UMA_ZONE_SECONDARY) {
2302 		KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
2303 		    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
2304 		KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
2305 		zone->uz_init = arg->uminit;
2306 		zone->uz_fini = arg->fini;
2307 		zone->uz_flags |= UMA_ZONE_SECONDARY;
2308 		rw_wlock(&uma_rwlock);
2309 		ZONE_LOCK(zone);
2310 		LIST_FOREACH(z, &keg->uk_zones, uz_link) {
2311 			if (LIST_NEXT(z, uz_link) == NULL) {
2312 				LIST_INSERT_AFTER(z, zone, uz_link);
2313 				break;
2314 			}
2315 		}
2316 		ZONE_UNLOCK(zone);
2317 		rw_wunlock(&uma_rwlock);
2318 	} else if (keg == NULL) {
2319 		if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
2320 		    arg->align, arg->flags)) == NULL)
2321 			return (ENOMEM);
2322 	} else {
2323 		struct uma_kctor_args karg;
2324 		int error;
2325 
2326 		/* We should only be here from uma_startup() */
2327 		karg.size = arg->size;
2328 		karg.uminit = arg->uminit;
2329 		karg.fini = arg->fini;
2330 		karg.align = arg->align;
2331 		karg.flags = arg->flags;
2332 		karg.zone = zone;
2333 		error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
2334 		    flags);
2335 		if (error)
2336 			return (error);
2337 	}
2338 
2339 	/* Inherit properties from the keg. */
2340 	zone->uz_keg = keg;
2341 	zone->uz_size = keg->uk_size;
2342 	zone->uz_flags |= (keg->uk_flags &
2343 	    (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
2344 
2345 out:
2346 	if (__predict_true(booted >= BOOT_RUNNING)) {
2347 		zone_alloc_counters(zone, NULL);
2348 		zone_alloc_sysctl(zone, NULL);
2349 	} else {
2350 		zone->uz_allocs = EARLY_COUNTER;
2351 		zone->uz_frees = EARLY_COUNTER;
2352 		zone->uz_fails = EARLY_COUNTER;
2353 	}
2354 
2355 	KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
2356 	    (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
2357 	    ("Invalid zone flag combination"));
2358 	if (arg->flags & UMA_ZFLAG_INTERNAL)
2359 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
2360 	if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
2361 		zone->uz_bucket_size = BUCKET_MAX;
2362 	else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
2363 		zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
2364 	else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
2365 		zone->uz_bucket_size = 0;
2366 	else
2367 		zone->uz_bucket_size = bucket_select(zone->uz_size);
2368 	zone->uz_bucket_size_min = zone->uz_bucket_size;
2369 	if (zone->uz_dtor != NULL || zone->uz_ctor != NULL)
2370 		zone->uz_flags |= UMA_ZFLAG_CTORDTOR;
2371 	zone_update_caches(zone);
2372 
2373 	return (0);
2374 }
2375 
2376 /*
2377  * Keg header dtor.  This frees all data, destroys locks, frees the hash
2378  * table and removes the keg from the global list.
2379  *
2380  * Arguments/Returns follow uma_dtor specifications
2381  *	udata  unused
2382  */
2383 static void
2384 keg_dtor(void *arg, int size, void *udata)
2385 {
2386 	uma_keg_t keg;
2387 	uint32_t free, pages;
2388 	int i;
2389 
2390 	keg = (uma_keg_t)arg;
2391 	free = pages = 0;
2392 	for (i = 0; i < vm_ndomains; i++) {
2393 		free += keg->uk_domain[i].ud_free;
2394 		pages += keg->uk_domain[i].ud_pages;
2395 		KEG_LOCK_FINI(keg, i);
2396 	}
2397 	if (free != 0)
2398 		printf("Freed UMA keg (%s) was not empty (%u items). "
2399 		    " Lost %u pages of memory.\n",
2400 		    keg->uk_name ? keg->uk_name : "",
2401 		    free, pages);
2402 
2403 	hash_free(&keg->uk_hash);
2404 }
2405 
2406 /*
2407  * Zone header dtor.
2408  *
2409  * Arguments/Returns follow uma_dtor specifications
2410  *	udata  unused
2411  */
2412 static void
2413 zone_dtor(void *arg, int size, void *udata)
2414 {
2415 	uma_zone_t zone;
2416 	uma_keg_t keg;
2417 
2418 	zone = (uma_zone_t)arg;
2419 
2420 	sysctl_remove_oid(zone->uz_oid, 1, 1);
2421 
2422 	if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
2423 		cache_drain(zone);
2424 
2425 	rw_wlock(&uma_rwlock);
2426 	LIST_REMOVE(zone, uz_link);
2427 	rw_wunlock(&uma_rwlock);
2428 	/*
2429 	 * XXX there are some races here where
2430 	 * the zone can be drained but zone lock
2431 	 * released and then refilled before we
2432 	 * remove it... we dont care for now
2433 	 */
2434 	zone_reclaim(zone, M_WAITOK, true);
2435 	/*
2436 	 * We only destroy kegs from non secondary/non cache zones.
2437 	 */
2438 	if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
2439 		keg = zone->uz_keg;
2440 		rw_wlock(&uma_rwlock);
2441 		LIST_REMOVE(keg, uk_link);
2442 		rw_wunlock(&uma_rwlock);
2443 		zone_free_item(kegs, keg, NULL, SKIP_NONE);
2444 	}
2445 	counter_u64_free(zone->uz_allocs);
2446 	counter_u64_free(zone->uz_frees);
2447 	counter_u64_free(zone->uz_fails);
2448 	free(zone->uz_ctlname, M_UMA);
2449 	ZONE_LOCK_FINI(zone);
2450 	ZONE_CROSS_LOCK_FINI(zone);
2451 }
2452 
2453 /*
2454  * Traverses every zone in the system and calls a callback
2455  *
2456  * Arguments:
2457  *	zfunc  A pointer to a function which accepts a zone
2458  *		as an argument.
2459  *
2460  * Returns:
2461  *	Nothing
2462  */
2463 static void
2464 zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
2465 {
2466 	uma_keg_t keg;
2467 	uma_zone_t zone;
2468 
2469 	/*
2470 	 * Before BOOT_RUNNING we are guaranteed to be single
2471 	 * threaded, so locking isn't needed. Startup functions
2472 	 * are allowed to use M_WAITOK.
2473 	 */
2474 	if (__predict_true(booted >= BOOT_RUNNING))
2475 		rw_rlock(&uma_rwlock);
2476 	LIST_FOREACH(keg, &uma_kegs, uk_link) {
2477 		LIST_FOREACH(zone, &keg->uk_zones, uz_link)
2478 			zfunc(zone, arg);
2479 	}
2480 	LIST_FOREACH(zone, &uma_cachezones, uz_link)
2481 		zfunc(zone, arg);
2482 	if (__predict_true(booted >= BOOT_RUNNING))
2483 		rw_runlock(&uma_rwlock);
2484 }
2485 
2486 /*
2487  * Count how many pages do we need to bootstrap.  VM supplies
2488  * its need in early zones in the argument, we add up our zones,
2489  * which consist of the UMA Slabs, UMA Hash and 9 Bucket zones.  The
2490  * zone of zones and zone of kegs are accounted separately.
2491  */
2492 #define	UMA_BOOT_ZONES	11
2493 static int zsize, ksize;
2494 int
2495 uma_startup_count(int vm_zones)
2496 {
2497 	int zones, pages;
2498 	u_int zppera, zipers;
2499 	u_int kppera, kipers;
2500 	size_t space, size;
2501 
2502 	ksize = sizeof(struct uma_keg) +
2503 	    (sizeof(struct uma_domain) * vm_ndomains);
2504 	ksize = roundup(ksize, UMA_SUPER_ALIGN);
2505 	zsize = sizeof(struct uma_zone) +
2506 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
2507 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
2508 	zsize = roundup(zsize, UMA_SUPER_ALIGN);
2509 
2510 	/*
2511 	 * Memory for the zone of kegs and its keg, and for zone
2512 	 * of zones.  Allocated directly in uma_startup().
2513 	 */
2514 	pages = howmany(zsize * 2 + ksize, PAGE_SIZE);
2515 
2516 #ifdef	UMA_MD_SMALL_ALLOC
2517 	zones = UMA_BOOT_ZONES;
2518 #else
2519 	zones = UMA_BOOT_ZONES + vm_zones;
2520 	vm_zones = 0;
2521 #endif
2522 	size = slab_sizeof(SLAB_MAX_SETSIZE);
2523 	space = slab_space(SLAB_MAX_SETSIZE);
2524 
2525 	/* Memory for the rest of startup zones, UMA and VM, ... */
2526 	if (zsize > space) {
2527 		/* See keg_large_init(). */
2528 		zppera = howmany(zsize + slab_sizeof(1), PAGE_SIZE);
2529 		zipers = 1;
2530 		zones += vm_zones;
2531 	} else {
2532 		zppera = 1;
2533 		zipers = space / zsize;
2534 	}
2535 	pages += howmany(zones, zipers) * zppera;
2536 
2537 	/* ... and their kegs. Note that zone of zones allocates a keg! */
2538 	if (ksize > space) {
2539 		/* See keg_large_init(). */
2540 		kppera = howmany(ksize + slab_sizeof(1), PAGE_SIZE);
2541 		kipers = 1;
2542 	} else {
2543 		kppera = 1;
2544 		kipers = space / ksize;
2545 	}
2546 	pages += howmany(zones + 1, kipers) * kppera;
2547 
2548 	/*
2549 	 * Allocate an additional slab for zones and kegs on NUMA
2550 	 * systems.  The round-robin allocation policy will populate at
2551 	 * least one slab per-domain.
2552 	 */
2553 	pages += (vm_ndomains - 1) * (zppera + kppera);
2554 
2555 	return (pages);
2556 }
2557 
2558 void
2559 uma_startup(void *mem, int npages)
2560 {
2561 	struct uma_zctor_args args;
2562 	uma_keg_t masterkeg;
2563 	uintptr_t m;
2564 
2565 #ifdef DIAGNOSTIC
2566 	printf("Entering %s with %d boot pages configured\n", __func__, npages);
2567 #endif
2568 
2569 	rw_init(&uma_rwlock, "UMA lock");
2570 
2571 	/* Use bootpages memory for the zone of zones and zone of kegs. */
2572 	m = (uintptr_t)mem;
2573 	zones = (uma_zone_t)m;
2574 	m += zsize;
2575 	kegs = (uma_zone_t)m;
2576 	m += zsize;
2577 	masterkeg = (uma_keg_t)m;
2578 	m += ksize;
2579 	m = roundup(m, PAGE_SIZE);
2580 	npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
2581 	mem = (void *)m;
2582 
2583 	/* "manually" create the initial zone */
2584 	memset(&args, 0, sizeof(args));
2585 	args.name = "UMA Kegs";
2586 	args.size = ksize;
2587 	args.ctor = keg_ctor;
2588 	args.dtor = keg_dtor;
2589 	args.uminit = zero_init;
2590 	args.fini = NULL;
2591 	args.keg = masterkeg;
2592 	args.align = UMA_SUPER_ALIGN - 1;
2593 	args.flags = UMA_ZFLAG_INTERNAL;
2594 	zone_ctor(kegs, zsize, &args, M_WAITOK);
2595 
2596 	bootmem = mem;
2597 	boot_pages = npages;
2598 
2599 	args.name = "UMA Zones";
2600 	args.size = zsize;
2601 	args.ctor = zone_ctor;
2602 	args.dtor = zone_dtor;
2603 	args.uminit = zero_init;
2604 	args.fini = NULL;
2605 	args.keg = NULL;
2606 	args.align = UMA_SUPER_ALIGN - 1;
2607 	args.flags = UMA_ZFLAG_INTERNAL;
2608 	zone_ctor(zones, zsize, &args, M_WAITOK);
2609 
2610 	/* Now make a zone for slab headers */
2611 	slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_hash_slab),
2612 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2613 
2614 	hashzone = uma_zcreate("UMA Hash",
2615 	    sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
2616 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
2617 
2618 	booted = BOOT_STRAPPED;
2619 }
2620 
2621 void
2622 uma_startup1(void)
2623 {
2624 
2625 #ifdef DIAGNOSTIC
2626 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2627 #endif
2628 	booted = BOOT_PAGEALLOC;
2629 }
2630 
2631 void
2632 uma_startup2(void)
2633 {
2634 
2635 #ifdef DIAGNOSTIC
2636 	printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
2637 #endif
2638 	sx_init(&uma_reclaim_lock, "umareclaim");
2639 	bucket_init();
2640 	booted = BOOT_BUCKETS;
2641 	bucket_enable();
2642 }
2643 
2644 static void
2645 uma_startup3(void)
2646 {
2647 
2648 #ifdef INVARIANTS
2649 	TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
2650 	uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
2651 	uma_skip_cnt = counter_u64_alloc(M_WAITOK);
2652 #endif
2653 	zone_foreach(zone_alloc_counters, NULL);
2654 	zone_foreach(zone_alloc_sysctl, NULL);
2655 	callout_init(&uma_callout, 1);
2656 	callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
2657 	booted = BOOT_RUNNING;
2658 
2659 	EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL,
2660 	    EVENTHANDLER_PRI_FIRST);
2661 }
2662 
2663 static void
2664 uma_shutdown(void)
2665 {
2666 
2667 	booted = BOOT_SHUTDOWN;
2668 }
2669 
2670 static uma_keg_t
2671 uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
2672 		int align, uint32_t flags)
2673 {
2674 	struct uma_kctor_args args;
2675 
2676 	args.size = size;
2677 	args.uminit = uminit;
2678 	args.fini = fini;
2679 	args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
2680 	args.flags = flags;
2681 	args.zone = zone;
2682 	return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
2683 }
2684 
2685 /* Public functions */
2686 /* See uma.h */
2687 void
2688 uma_set_align(int align)
2689 {
2690 
2691 	if (align != UMA_ALIGN_CACHE)
2692 		uma_align_cache = align;
2693 }
2694 
2695 /* See uma.h */
2696 uma_zone_t
2697 uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
2698 		uma_init uminit, uma_fini fini, int align, uint32_t flags)
2699 
2700 {
2701 	struct uma_zctor_args args;
2702 	uma_zone_t res;
2703 	bool locked;
2704 
2705 	KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
2706 	    align, name));
2707 
2708 	/* This stuff is essential for the zone ctor */
2709 	memset(&args, 0, sizeof(args));
2710 	args.name = name;
2711 	args.size = size;
2712 	args.ctor = ctor;
2713 	args.dtor = dtor;
2714 	args.uminit = uminit;
2715 	args.fini = fini;
2716 #ifdef  INVARIANTS
2717 	/*
2718 	 * Inject procedures which check for memory use after free if we are
2719 	 * allowed to scramble the memory while it is not allocated.  This
2720 	 * requires that: UMA is actually able to access the memory, no init
2721 	 * or fini procedures, no dependency on the initial value of the
2722 	 * memory, and no (legitimate) use of the memory after free.  Note,
2723 	 * the ctor and dtor do not need to be empty.
2724 	 */
2725 	if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
2726 	    UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
2727 		args.uminit = trash_init;
2728 		args.fini = trash_fini;
2729 	}
2730 #endif
2731 	args.align = align;
2732 	args.flags = flags;
2733 	args.keg = NULL;
2734 
2735 	if (booted < BOOT_BUCKETS) {
2736 		locked = false;
2737 	} else {
2738 		sx_slock(&uma_reclaim_lock);
2739 		locked = true;
2740 	}
2741 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2742 	if (locked)
2743 		sx_sunlock(&uma_reclaim_lock);
2744 	return (res);
2745 }
2746 
2747 /* See uma.h */
2748 uma_zone_t
2749 uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
2750 		    uma_init zinit, uma_fini zfini, uma_zone_t master)
2751 {
2752 	struct uma_zctor_args args;
2753 	uma_keg_t keg;
2754 	uma_zone_t res;
2755 	bool locked;
2756 
2757 	keg = master->uz_keg;
2758 	memset(&args, 0, sizeof(args));
2759 	args.name = name;
2760 	args.size = keg->uk_size;
2761 	args.ctor = ctor;
2762 	args.dtor = dtor;
2763 	args.uminit = zinit;
2764 	args.fini = zfini;
2765 	args.align = keg->uk_align;
2766 	args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2767 	args.keg = keg;
2768 
2769 	if (booted < BOOT_BUCKETS) {
2770 		locked = false;
2771 	} else {
2772 		sx_slock(&uma_reclaim_lock);
2773 		locked = true;
2774 	}
2775 	/* XXX Attaches only one keg of potentially many. */
2776 	res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
2777 	if (locked)
2778 		sx_sunlock(&uma_reclaim_lock);
2779 	return (res);
2780 }
2781 
2782 /* See uma.h */
2783 uma_zone_t
2784 uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2785 		    uma_init zinit, uma_fini zfini, uma_import zimport,
2786 		    uma_release zrelease, void *arg, int flags)
2787 {
2788 	struct uma_zctor_args args;
2789 
2790 	memset(&args, 0, sizeof(args));
2791 	args.name = name;
2792 	args.size = size;
2793 	args.ctor = ctor;
2794 	args.dtor = dtor;
2795 	args.uminit = zinit;
2796 	args.fini = zfini;
2797 	args.import = zimport;
2798 	args.release = zrelease;
2799 	args.arg = arg;
2800 	args.align = 0;
2801 	args.flags = flags | UMA_ZFLAG_CACHE;
2802 
2803 	return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
2804 }
2805 
2806 /* See uma.h */
2807 void
2808 uma_zdestroy(uma_zone_t zone)
2809 {
2810 
2811 	/*
2812 	 * Large slabs are expensive to reclaim, so don't bother doing
2813 	 * unnecessary work if we're shutting down.
2814 	 */
2815 	if (booted == BOOT_SHUTDOWN &&
2816 	    zone->uz_fini == NULL && zone->uz_release == zone_release)
2817 		return;
2818 	sx_slock(&uma_reclaim_lock);
2819 	zone_free_item(zones, zone, NULL, SKIP_NONE);
2820 	sx_sunlock(&uma_reclaim_lock);
2821 }
2822 
2823 void
2824 uma_zwait(uma_zone_t zone)
2825 {
2826 	void *item;
2827 
2828 	item = uma_zalloc_arg(zone, NULL, M_WAITOK);
2829 	uma_zfree(zone, item);
2830 }
2831 
2832 void *
2833 uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
2834 {
2835 	void *item;
2836 #ifdef SMP
2837 	int i;
2838 
2839 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2840 #endif
2841 	item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
2842 	if (item != NULL && (flags & M_ZERO)) {
2843 #ifdef SMP
2844 		for (i = 0; i <= mp_maxid; i++)
2845 			bzero(zpcpu_get_cpu(item, i), zone->uz_size);
2846 #else
2847 		bzero(item, zone->uz_size);
2848 #endif
2849 	}
2850 	return (item);
2851 }
2852 
2853 /*
2854  * A stub while both regular and pcpu cases are identical.
2855  */
2856 void
2857 uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
2858 {
2859 
2860 #ifdef SMP
2861 	MPASS(zone->uz_flags & UMA_ZONE_PCPU);
2862 #endif
2863 	uma_zfree_arg(zone, item, udata);
2864 }
2865 
2866 #ifdef INVARIANTS
2867 #define	UMA_ALWAYS_CTORDTOR	1
2868 #else
2869 #define	UMA_ALWAYS_CTORDTOR	0
2870 #endif
2871 
2872 static void *
2873 item_ctor(uma_zone_t zone, int size, void *udata, int flags, void *item)
2874 {
2875 #ifdef INVARIANTS
2876 	bool skipdbg;
2877 
2878 	skipdbg = uma_dbg_zskip(zone, item);
2879 	if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2880 	    zone->uz_ctor != trash_ctor)
2881 		trash_ctor(item, size, udata, flags);
2882 #endif
2883 	if (__predict_false(zone->uz_ctor != NULL) &&
2884 	    zone->uz_ctor(item, size, udata, flags) != 0) {
2885 		counter_u64_add(zone->uz_fails, 1);
2886 		zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
2887 		return (NULL);
2888 	}
2889 #ifdef INVARIANTS
2890 	if (!skipdbg)
2891 		uma_dbg_alloc(zone, NULL, item);
2892 #endif
2893 	if (flags & M_ZERO)
2894 		bzero(item, size);
2895 
2896 	return (item);
2897 }
2898 
2899 static inline void
2900 item_dtor(uma_zone_t zone, void *item, int size, void *udata,
2901     enum zfreeskip skip)
2902 {
2903 #ifdef INVARIANTS
2904 	bool skipdbg;
2905 
2906 	skipdbg = uma_dbg_zskip(zone, item);
2907 	if (skip == SKIP_NONE && !skipdbg) {
2908 		if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
2909 			uma_dbg_free(zone, udata, item);
2910 		else
2911 			uma_dbg_free(zone, NULL, item);
2912 	}
2913 #endif
2914 	if (__predict_true(skip < SKIP_DTOR)) {
2915 		if (zone->uz_dtor != NULL)
2916 			zone->uz_dtor(item, size, udata);
2917 #ifdef INVARIANTS
2918 		if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
2919 		    zone->uz_dtor != trash_dtor)
2920 			trash_dtor(item, size, udata);
2921 #endif
2922 	}
2923 }
2924 
2925 /* See uma.h */
2926 void *
2927 uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2928 {
2929 	uma_cache_bucket_t bucket;
2930 	uma_cache_t cache;
2931 	void *item;
2932 	int domain, size, uz_flags;
2933 
2934 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2935 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
2936 
2937 	/* This is the fast path allocation */
2938 	CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
2939 	    curthread, zone->uz_name, zone, flags);
2940 
2941 #ifdef WITNESS
2942 	if (flags & M_WAITOK) {
2943 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2944 		    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2945 	}
2946 #endif
2947 
2948 #ifdef INVARIANTS
2949 	KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
2950 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2951 	    ("uma_zalloc_arg: called with spinlock or critical section held"));
2952 	if (zone->uz_flags & UMA_ZONE_PCPU)
2953 		KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
2954 		    "with M_ZERO passed"));
2955 #endif
2956 
2957 #ifdef DEBUG_MEMGUARD
2958 	if (memguard_cmp_zone(zone)) {
2959 		item = memguard_alloc(zone->uz_size, flags);
2960 		if (item != NULL) {
2961 			if (zone->uz_init != NULL &&
2962 			    zone->uz_init(item, zone->uz_size, flags) != 0)
2963 				return (NULL);
2964 			if (zone->uz_ctor != NULL &&
2965 			    zone->uz_ctor(item, zone->uz_size, udata,
2966 			    flags) != 0) {
2967 				counter_u64_add(zone->uz_fails, 1);
2968 			    	zone->uz_fini(item, zone->uz_size);
2969 				return (NULL);
2970 			}
2971 			return (item);
2972 		}
2973 		/* This is unfortunate but should not be fatal. */
2974 	}
2975 #endif
2976 	/*
2977 	 * If possible, allocate from the per-CPU cache.  There are two
2978 	 * requirements for safe access to the per-CPU cache: (1) the thread
2979 	 * accessing the cache must not be preempted or yield during access,
2980 	 * and (2) the thread must not migrate CPUs without switching which
2981 	 * cache it accesses.  We rely on a critical section to prevent
2982 	 * preemption and migration.  We release the critical section in
2983 	 * order to acquire the zone mutex if we are unable to allocate from
2984 	 * the current cache; when we re-acquire the critical section, we
2985 	 * must detect and handle migration if it has occurred.
2986 	 */
2987 	critical_enter();
2988 	do {
2989 		cache = &zone->uz_cpu[curcpu];
2990 		bucket = &cache->uc_allocbucket;
2991 		size = cache_uz_size(cache);
2992 		uz_flags = cache_uz_flags(cache);
2993 		if (__predict_true(bucket->ucb_cnt != 0)) {
2994 			item = cache_bucket_pop(cache, bucket);
2995 			critical_exit();
2996 			if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0 ||
2997 			    UMA_ALWAYS_CTORDTOR))
2998 				return (item_ctor(zone, size, udata, flags, item));
2999 			if (flags & M_ZERO)
3000 				bzero(item, size);
3001 			return (item);
3002 		}
3003 	} while (cache_alloc(zone, cache, udata, flags));
3004 	critical_exit();
3005 
3006 	/*
3007 	 * We can not get a bucket so try to return a single item.
3008 	 */
3009 	if (uz_flags & UMA_ZONE_FIRSTTOUCH)
3010 		domain = PCPU_GET(domain);
3011 	else
3012 		domain = UMA_ANYDOMAIN;
3013 	return (zone_alloc_item(zone, udata, domain, flags));
3014 }
3015 
3016 /*
3017  * Replenish an alloc bucket and possibly restore an old one.  Called in
3018  * a critical section.  Returns in a critical section.
3019  *
3020  * A false return value indicates an allocation failure.
3021  * A true return value indicates success and the caller should retry.
3022  */
3023 static __noinline bool
3024 cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
3025 {
3026 	uma_zone_domain_t zdom;
3027 	uma_bucket_t bucket;
3028 	int domain;
3029 	bool lockfail;
3030 
3031 	CRITICAL_ASSERT(curthread);
3032 
3033 	/*
3034 	 * If we have run out of items in our alloc bucket see
3035 	 * if we can switch with the free bucket.
3036 	 */
3037 	if (cache->uc_freebucket.ucb_cnt != 0) {
3038 		cache_bucket_swap(&cache->uc_freebucket, &cache->uc_allocbucket);
3039 		return (true);
3040 	}
3041 
3042 	/*
3043 	 * Discard any empty allocation bucket while we hold no locks.
3044 	 */
3045 	bucket = cache_bucket_unload_alloc(cache);
3046 	critical_exit();
3047 	if (bucket != NULL)
3048 		bucket_free(zone, bucket, udata);
3049 
3050 	/* Short-circuit for zones without buckets and low memory. */
3051 	if (zone->uz_bucket_size == 0 || bucketdisable) {
3052 		critical_enter();
3053 		return (false);
3054 	}
3055 
3056 	/*
3057 	 * Attempt to retrieve the item from the per-CPU cache has failed, so
3058 	 * we must go back to the zone.  This requires the zone lock, so we
3059 	 * must drop the critical section, then re-acquire it when we go back
3060 	 * to the cache.  Since the critical section is released, we may be
3061 	 * preempted or migrate.  As such, make sure not to maintain any
3062 	 * thread-local state specific to the cache from prior to releasing
3063 	 * the critical section.
3064 	 */
3065 	lockfail = 0;
3066 	if (ZONE_TRYLOCK(zone) == 0) {
3067 		/* Record contention to size the buckets. */
3068 		ZONE_LOCK(zone);
3069 		lockfail = 1;
3070 	}
3071 
3072 	/* See if we lost the race to fill the cache. */
3073 	critical_enter();
3074 	cache = &zone->uz_cpu[curcpu];
3075 	if (cache->uc_allocbucket.ucb_bucket != NULL) {
3076 		ZONE_UNLOCK(zone);
3077 		return (true);
3078 	}
3079 
3080 	/*
3081 	 * Check the zone's cache of buckets.
3082 	 */
3083 	if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH) {
3084 		domain = PCPU_GET(domain);
3085 		zdom = &zone->uz_domain[domain];
3086 	} else {
3087 		domain = UMA_ANYDOMAIN;
3088 		zdom = &zone->uz_domain[0];
3089 	}
3090 
3091 	if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
3092 		ZONE_UNLOCK(zone);
3093 		KASSERT(bucket->ub_cnt != 0,
3094 		    ("uma_zalloc_arg: Returning an empty bucket."));
3095 		cache_bucket_load_alloc(cache, bucket);
3096 		return (true);
3097 	}
3098 	/* We are no longer associated with this CPU. */
3099 	critical_exit();
3100 
3101 	/*
3102 	 * We bump the uz count when the cache size is insufficient to
3103 	 * handle the working set.
3104 	 */
3105 	if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
3106 		zone->uz_bucket_size++;
3107 	ZONE_UNLOCK(zone);
3108 
3109 	/*
3110 	 * Fill a bucket and attempt to use it as the alloc bucket.
3111 	 */
3112 	bucket = zone_alloc_bucket(zone, udata, domain, flags);
3113 	CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
3114 	    zone->uz_name, zone, bucket);
3115 	if (bucket == NULL) {
3116 		critical_enter();
3117 		return (false);
3118 	}
3119 
3120 	/*
3121 	 * See if we lost the race or were migrated.  Cache the
3122 	 * initialized bucket to make this less likely or claim
3123 	 * the memory directly.
3124 	 */
3125 	ZONE_LOCK(zone);
3126 	critical_enter();
3127 	cache = &zone->uz_cpu[curcpu];
3128 	if (cache->uc_allocbucket.ucb_bucket == NULL &&
3129 	    ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) == 0 ||
3130 	    domain == PCPU_GET(domain))) {
3131 		cache_bucket_load_alloc(cache, bucket);
3132 		zdom->uzd_imax += bucket->ub_cnt;
3133 	} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3134 		critical_exit();
3135 		ZONE_UNLOCK(zone);
3136 		bucket_drain(zone, bucket);
3137 		bucket_free(zone, bucket, udata);
3138 		critical_enter();
3139 		return (true);
3140 	} else
3141 		zone_put_bucket(zone, zdom, bucket, false);
3142 	ZONE_UNLOCK(zone);
3143 	return (true);
3144 }
3145 
3146 void *
3147 uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
3148 {
3149 
3150 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3151 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3152 
3153 	/* This is the fast path allocation */
3154 	CTR5(KTR_UMA,
3155 	    "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
3156 	    curthread, zone->uz_name, zone, domain, flags);
3157 
3158 	if (flags & M_WAITOK) {
3159 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
3160 		    "uma_zalloc_domain: zone \"%s\"", zone->uz_name);
3161 	}
3162 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3163 	    ("uma_zalloc_domain: called with spinlock or critical section held"));
3164 
3165 	return (zone_alloc_item(zone, udata, domain, flags));
3166 }
3167 
3168 /*
3169  * Find a slab with some space.  Prefer slabs that are partially used over those
3170  * that are totally full.  This helps to reduce fragmentation.
3171  *
3172  * If 'rr' is 1, search all domains starting from 'domain'.  Otherwise check
3173  * only 'domain'.
3174  */
3175 static uma_slab_t
3176 keg_first_slab(uma_keg_t keg, int domain, bool rr)
3177 {
3178 	uma_domain_t dom;
3179 	uma_slab_t slab;
3180 	int start;
3181 
3182 	KASSERT(domain >= 0 && domain < vm_ndomains,
3183 	    ("keg_first_slab: domain %d out of range", domain));
3184 	KEG_LOCK_ASSERT(keg, domain);
3185 
3186 	slab = NULL;
3187 	start = domain;
3188 	do {
3189 		dom = &keg->uk_domain[domain];
3190 		if (!LIST_EMPTY(&dom->ud_part_slab))
3191 			return (LIST_FIRST(&dom->ud_part_slab));
3192 		if (!LIST_EMPTY(&dom->ud_free_slab)) {
3193 			slab = LIST_FIRST(&dom->ud_free_slab);
3194 			LIST_REMOVE(slab, us_link);
3195 			LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3196 			return (slab);
3197 		}
3198 		if (rr)
3199 			domain = (domain + 1) % vm_ndomains;
3200 	} while (domain != start);
3201 
3202 	return (NULL);
3203 }
3204 
3205 /*
3206  * Fetch an existing slab from a free or partial list.  Returns with the
3207  * keg domain lock held if a slab was found or unlocked if not.
3208  */
3209 static uma_slab_t
3210 keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
3211 {
3212 	uma_slab_t slab;
3213 	uint32_t reserve;
3214 
3215 	/* HASH has a single free list. */
3216 	if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
3217 		domain = 0;
3218 
3219 	KEG_LOCK(keg, domain);
3220 	reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
3221 	if (keg->uk_domain[domain].ud_free <= reserve ||
3222 	    (slab = keg_first_slab(keg, domain, rr)) == NULL) {
3223 		KEG_UNLOCK(keg, domain);
3224 		return (NULL);
3225 	}
3226 	return (slab);
3227 }
3228 
3229 static uma_slab_t
3230 keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
3231 {
3232 	struct vm_domainset_iter di;
3233 	uma_slab_t slab;
3234 	int aflags, domain;
3235 	bool rr;
3236 
3237 restart:
3238 	/*
3239 	 * Use the keg's policy if upper layers haven't already specified a
3240 	 * domain (as happens with first-touch zones).
3241 	 *
3242 	 * To avoid races we run the iterator with the keg lock held, but that
3243 	 * means that we cannot allow the vm_domainset layer to sleep.  Thus,
3244 	 * clear M_WAITOK and handle low memory conditions locally.
3245 	 */
3246 	rr = rdomain == UMA_ANYDOMAIN;
3247 	if (rr) {
3248 		aflags = (flags & ~M_WAITOK) | M_NOWAIT;
3249 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
3250 		    &aflags);
3251 	} else {
3252 		aflags = flags;
3253 		domain = rdomain;
3254 	}
3255 
3256 	for (;;) {
3257 		slab = keg_fetch_free_slab(keg, domain, rr, flags);
3258 		if (slab != NULL)
3259 			return (slab);
3260 
3261 		/*
3262 		 * M_NOVM means don't ask at all!
3263 		 */
3264 		if (flags & M_NOVM)
3265 			break;
3266 
3267 		slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
3268 		if (slab != NULL)
3269 			return (slab);
3270 		if (!rr && (flags & M_WAITOK) == 0)
3271 			break;
3272 		if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
3273 			if ((flags & M_WAITOK) != 0) {
3274 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
3275 				goto restart;
3276 			}
3277 			break;
3278 		}
3279 	}
3280 
3281 	/*
3282 	 * We might not have been able to get a slab but another cpu
3283 	 * could have while we were unlocked.  Check again before we
3284 	 * fail.
3285 	 */
3286 	if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL)
3287 		return (slab);
3288 
3289 	return (NULL);
3290 }
3291 
3292 static void *
3293 slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
3294 {
3295 	uma_domain_t dom;
3296 	void *item;
3297 	uint8_t freei;
3298 
3299 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3300 
3301 	dom = &keg->uk_domain[slab->us_domain];
3302 	freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
3303 	BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
3304 	item = slab_item(slab, keg, freei);
3305 	slab->us_freecount--;
3306 	dom->ud_free--;
3307 
3308 	/* Move this slab to the full list */
3309 	if (slab->us_freecount == 0) {
3310 		LIST_REMOVE(slab, us_link);
3311 		LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
3312 	}
3313 
3314 	return (item);
3315 }
3316 
3317 static int
3318 zone_import(void *arg, void **bucket, int max, int domain, int flags)
3319 {
3320 	uma_domain_t dom;
3321 	uma_zone_t zone;
3322 	uma_slab_t slab;
3323 	uma_keg_t keg;
3324 #ifdef NUMA
3325 	int stripe;
3326 #endif
3327 	int i;
3328 
3329 	zone = arg;
3330 	slab = NULL;
3331 	keg = zone->uz_keg;
3332 	/* Try to keep the buckets totally full */
3333 	for (i = 0; i < max; ) {
3334 		if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
3335 			break;
3336 #ifdef NUMA
3337 		stripe = howmany(max, vm_ndomains);
3338 #endif
3339 		dom = &keg->uk_domain[slab->us_domain];
3340 		while (slab->us_freecount && i < max) {
3341 			bucket[i++] = slab_alloc_item(keg, slab);
3342 			if (dom->ud_free <= keg->uk_reserve)
3343 				break;
3344 #ifdef NUMA
3345 			/*
3346 			 * If the zone is striped we pick a new slab for every
3347 			 * N allocations.  Eliminating this conditional will
3348 			 * instead pick a new domain for each bucket rather
3349 			 * than stripe within each bucket.  The current option
3350 			 * produces more fragmentation and requires more cpu
3351 			 * time but yields better distribution.
3352 			 */
3353 			if ((zone->uz_flags & UMA_ZONE_ROUNDROBIN) != 0 &&
3354 			    vm_ndomains > 1 && --stripe == 0)
3355 				break;
3356 #endif
3357 		}
3358 		KEG_UNLOCK(keg, slab->us_domain);
3359 		/* Don't block if we allocated any successfully. */
3360 		flags &= ~M_WAITOK;
3361 		flags |= M_NOWAIT;
3362 	}
3363 
3364 	return i;
3365 }
3366 
3367 static int
3368 zone_alloc_limit_hard(uma_zone_t zone, int count, int flags)
3369 {
3370 	uint64_t old, new, total, max;
3371 
3372 	/*
3373 	 * The hard case.  We're going to sleep because there were existing
3374 	 * sleepers or because we ran out of items.  This routine enforces
3375 	 * fairness by keeping fifo order.
3376 	 *
3377 	 * First release our ill gotten gains and make some noise.
3378 	 */
3379 	for (;;) {
3380 		zone_free_limit(zone, count);
3381 		zone_log_warning(zone);
3382 		zone_maxaction(zone);
3383 		if (flags & M_NOWAIT)
3384 			return (0);
3385 
3386 		/*
3387 		 * We need to allocate an item or set ourself as a sleeper
3388 		 * while the sleepq lock is held to avoid wakeup races.  This
3389 		 * is essentially a home rolled semaphore.
3390 		 */
3391 		sleepq_lock(&zone->uz_max_items);
3392 		old = zone->uz_items;
3393 		do {
3394 			MPASS(UZ_ITEMS_SLEEPERS(old) < UZ_ITEMS_SLEEPERS_MAX);
3395 			/* Cache the max since we will evaluate twice. */
3396 			max = zone->uz_max_items;
3397 			if (UZ_ITEMS_SLEEPERS(old) != 0 ||
3398 			    UZ_ITEMS_COUNT(old) >= max)
3399 				new = old + UZ_ITEMS_SLEEPER;
3400 			else
3401 				new = old + MIN(count, max - old);
3402 		} while (atomic_fcmpset_64(&zone->uz_items, &old, new) == 0);
3403 
3404 		/* We may have successfully allocated under the sleepq lock. */
3405 		if (UZ_ITEMS_SLEEPERS(new) == 0) {
3406 			sleepq_release(&zone->uz_max_items);
3407 			return (new - old);
3408 		}
3409 
3410 		/*
3411 		 * This is in a different cacheline from uz_items so that we
3412 		 * don't constantly invalidate the fastpath cacheline when we
3413 		 * adjust item counts.  This could be limited to toggling on
3414 		 * transitions.
3415 		 */
3416 		atomic_add_32(&zone->uz_sleepers, 1);
3417 		atomic_add_64(&zone->uz_sleeps, 1);
3418 
3419 		/*
3420 		 * We have added ourselves as a sleeper.  The sleepq lock
3421 		 * protects us from wakeup races.  Sleep now and then retry.
3422 		 */
3423 		sleepq_add(&zone->uz_max_items, NULL, "zonelimit", 0, 0);
3424 		sleepq_wait(&zone->uz_max_items, PVM);
3425 
3426 		/*
3427 		 * After wakeup, remove ourselves as a sleeper and try
3428 		 * again.  We no longer have the sleepq lock for protection.
3429 		 *
3430 		 * Subract ourselves as a sleeper while attempting to add
3431 		 * our count.
3432 		 */
3433 		atomic_subtract_32(&zone->uz_sleepers, 1);
3434 		old = atomic_fetchadd_64(&zone->uz_items,
3435 		    -(UZ_ITEMS_SLEEPER - count));
3436 		/* We're no longer a sleeper. */
3437 		old -= UZ_ITEMS_SLEEPER;
3438 
3439 		/*
3440 		 * If we're still at the limit, restart.  Notably do not
3441 		 * block on other sleepers.  Cache the max value to protect
3442 		 * against changes via sysctl.
3443 		 */
3444 		total = UZ_ITEMS_COUNT(old);
3445 		max = zone->uz_max_items;
3446 		if (total >= max)
3447 			continue;
3448 		/* Truncate if necessary, otherwise wake other sleepers. */
3449 		if (total + count > max) {
3450 			zone_free_limit(zone, total + count - max);
3451 			count = max - total;
3452 		} else if (total + count < max && UZ_ITEMS_SLEEPERS(old) != 0)
3453 			wakeup_one(&zone->uz_max_items);
3454 
3455 		return (count);
3456 	}
3457 }
3458 
3459 /*
3460  * Allocate 'count' items from our max_items limit.  Returns the number
3461  * available.  If M_NOWAIT is not specified it will sleep until at least
3462  * one item can be allocated.
3463  */
3464 static int
3465 zone_alloc_limit(uma_zone_t zone, int count, int flags)
3466 {
3467 	uint64_t old;
3468 	uint64_t max;
3469 
3470 	max = zone->uz_max_items;
3471 	MPASS(max > 0);
3472 
3473 	/*
3474 	 * We expect normal allocations to succeed with a simple
3475 	 * fetchadd.
3476 	 */
3477 	old = atomic_fetchadd_64(&zone->uz_items, count);
3478 	if (__predict_true(old + count <= max))
3479 		return (count);
3480 
3481 	/*
3482 	 * If we had some items and no sleepers just return the
3483 	 * truncated value.  We have to release the excess space
3484 	 * though because that may wake sleepers who weren't woken
3485 	 * because we were temporarily over the limit.
3486 	 */
3487 	if (old < max) {
3488 		zone_free_limit(zone, (old + count) - max);
3489 		return (max - old);
3490 	}
3491 	return (zone_alloc_limit_hard(zone, count, flags));
3492 }
3493 
3494 /*
3495  * Free a number of items back to the limit.
3496  */
3497 static void
3498 zone_free_limit(uma_zone_t zone, int count)
3499 {
3500 	uint64_t old;
3501 
3502 	MPASS(count > 0);
3503 
3504 	/*
3505 	 * In the common case we either have no sleepers or
3506 	 * are still over the limit and can just return.
3507 	 */
3508 	old = atomic_fetchadd_64(&zone->uz_items, -count);
3509 	if (__predict_true(UZ_ITEMS_SLEEPERS(old) == 0 ||
3510 	   UZ_ITEMS_COUNT(old) - count >= zone->uz_max_items))
3511 		return;
3512 
3513 	/*
3514 	 * Moderate the rate of wakeups.  Sleepers will continue
3515 	 * to generate wakeups if necessary.
3516 	 */
3517 	wakeup_one(&zone->uz_max_items);
3518 }
3519 
3520 static uma_bucket_t
3521 zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
3522 {
3523 	uma_bucket_t bucket;
3524 	int maxbucket, cnt;
3525 
3526 	CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
3527 
3528 	/* Avoid allocs targeting empty domains. */
3529 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3530 		domain = UMA_ANYDOMAIN;
3531 
3532 	if (zone->uz_max_items > 0)
3533 		maxbucket = zone_alloc_limit(zone, zone->uz_bucket_size,
3534 		    M_NOWAIT);
3535 	else
3536 		maxbucket = zone->uz_bucket_size;
3537 	if (maxbucket == 0)
3538 		return (false);
3539 
3540 	/* Don't wait for buckets, preserve caller's NOVM setting. */
3541 	bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
3542 	if (bucket == NULL) {
3543 		cnt = 0;
3544 		goto out;
3545 	}
3546 
3547 	bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
3548 	    MIN(maxbucket, bucket->ub_entries), domain, flags);
3549 
3550 	/*
3551 	 * Initialize the memory if necessary.
3552 	 */
3553 	if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
3554 		int i;
3555 
3556 		for (i = 0; i < bucket->ub_cnt; i++)
3557 			if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
3558 			    flags) != 0)
3559 				break;
3560 		/*
3561 		 * If we couldn't initialize the whole bucket, put the
3562 		 * rest back onto the freelist.
3563 		 */
3564 		if (i != bucket->ub_cnt) {
3565 			zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
3566 			    bucket->ub_cnt - i);
3567 #ifdef INVARIANTS
3568 			bzero(&bucket->ub_bucket[i],
3569 			    sizeof(void *) * (bucket->ub_cnt - i));
3570 #endif
3571 			bucket->ub_cnt = i;
3572 		}
3573 	}
3574 
3575 	cnt = bucket->ub_cnt;
3576 	if (bucket->ub_cnt == 0) {
3577 		bucket_free(zone, bucket, udata);
3578 		counter_u64_add(zone->uz_fails, 1);
3579 		bucket = NULL;
3580 	}
3581 out:
3582 	if (zone->uz_max_items > 0 && cnt < maxbucket)
3583 		zone_free_limit(zone, maxbucket - cnt);
3584 
3585 	return (bucket);
3586 }
3587 
3588 /*
3589  * Allocates a single item from a zone.
3590  *
3591  * Arguments
3592  *	zone   The zone to alloc for.
3593  *	udata  The data to be passed to the constructor.
3594  *	domain The domain to allocate from or UMA_ANYDOMAIN.
3595  *	flags  M_WAITOK, M_NOWAIT, M_ZERO.
3596  *
3597  * Returns
3598  *	NULL if there is no memory and M_NOWAIT is set
3599  *	An item if successful
3600  */
3601 
3602 static void *
3603 zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
3604 {
3605 	void *item;
3606 
3607 	if (zone->uz_max_items > 0 && zone_alloc_limit(zone, 1, flags) == 0)
3608 		return (NULL);
3609 
3610 	/* Avoid allocs targeting empty domains. */
3611 	if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
3612 		domain = UMA_ANYDOMAIN;
3613 
3614 	if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
3615 		goto fail_cnt;
3616 
3617 	/*
3618 	 * We have to call both the zone's init (not the keg's init)
3619 	 * and the zone's ctor.  This is because the item is going from
3620 	 * a keg slab directly to the user, and the user is expecting it
3621 	 * to be both zone-init'd as well as zone-ctor'd.
3622 	 */
3623 	if (zone->uz_init != NULL) {
3624 		if (zone->uz_init(item, zone->uz_size, flags) != 0) {
3625 			zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
3626 			goto fail_cnt;
3627 		}
3628 	}
3629 	item = item_ctor(zone, zone->uz_size, udata, flags, item);
3630 	if (item == NULL)
3631 		goto fail;
3632 
3633 	counter_u64_add(zone->uz_allocs, 1);
3634 	CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
3635 	    zone->uz_name, zone);
3636 
3637 	return (item);
3638 
3639 fail_cnt:
3640 	counter_u64_add(zone->uz_fails, 1);
3641 fail:
3642 	if (zone->uz_max_items > 0)
3643 		zone_free_limit(zone, 1);
3644 	CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
3645 	    zone->uz_name, zone);
3646 
3647 	return (NULL);
3648 }
3649 
3650 /* See uma.h */
3651 void
3652 uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
3653 {
3654 	uma_cache_t cache;
3655 	uma_cache_bucket_t bucket;
3656 	int domain, itemdomain, uz_flags;
3657 
3658 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3659 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3660 
3661 	CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
3662 	    zone->uz_name);
3663 
3664 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3665 	    ("uma_zfree_arg: called with spinlock or critical section held"));
3666 
3667         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3668         if (item == NULL)
3669                 return;
3670 #ifdef DEBUG_MEMGUARD
3671 	if (is_memguard_addr(item)) {
3672 		if (zone->uz_dtor != NULL)
3673 			zone->uz_dtor(item, zone->uz_size, udata);
3674 		if (zone->uz_fini != NULL)
3675 			zone->uz_fini(item, zone->uz_size);
3676 		memguard_free(item);
3677 		return;
3678 	}
3679 #endif
3680 
3681 	/*
3682 	 * We are accessing the per-cpu cache without a critical section to
3683 	 * fetch size and flags.  This is acceptable, if we are preempted we
3684 	 * will simply read another cpu's line.
3685 	 */
3686 	cache = &zone->uz_cpu[curcpu];
3687 	uz_flags = cache_uz_flags(cache);
3688 	if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0 ||
3689 	    UMA_ALWAYS_CTORDTOR))
3690 		item_dtor(zone, item, cache_uz_size(cache), udata, SKIP_NONE);
3691 
3692 	/*
3693 	 * The race here is acceptable.  If we miss it we'll just have to wait
3694 	 * a little longer for the limits to be reset.
3695 	 */
3696 	if (__predict_false(uz_flags & UMA_ZFLAG_LIMIT)) {
3697 		if (zone->uz_sleepers > 0)
3698 			goto zfree_item;
3699 	}
3700 
3701 	/*
3702 	 * If possible, free to the per-CPU cache.  There are two
3703 	 * requirements for safe access to the per-CPU cache: (1) the thread
3704 	 * accessing the cache must not be preempted or yield during access,
3705 	 * and (2) the thread must not migrate CPUs without switching which
3706 	 * cache it accesses.  We rely on a critical section to prevent
3707 	 * preemption and migration.  We release the critical section in
3708 	 * order to acquire the zone mutex if we are unable to free to the
3709 	 * current cache; when we re-acquire the critical section, we must
3710 	 * detect and handle migration if it has occurred.
3711 	 */
3712 	domain = itemdomain = 0;
3713 #ifdef NUMA
3714 	if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
3715 		itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3716 #endif
3717 	critical_enter();
3718 	do {
3719 		cache = &zone->uz_cpu[curcpu];
3720 #ifdef NUMA
3721 		domain = PCPU_GET(domain);
3722 		if ((uz_flags & UMA_ZONE_FIRSTTOUCH) != 0 &&
3723 		    domain != itemdomain) {
3724 			bucket = &cache->uc_crossbucket;
3725 		} else
3726 #endif
3727 		{
3728 			/*
3729 			 * Try to free into the allocbucket first to give LIFO
3730 			 * ordering for cache-hot datastructures.  Spill over
3731 			 * into the freebucket if necessary.  Alloc will swap
3732 			 * them if one runs dry.
3733 			 */
3734 			bucket = &cache->uc_allocbucket;
3735 			if (__predict_false(bucket->ucb_cnt >=
3736 			    bucket->ucb_entries))
3737 				bucket = &cache->uc_freebucket;
3738 		}
3739 		if (__predict_true(bucket->ucb_cnt < bucket->ucb_entries)) {
3740 			cache_bucket_push(cache, bucket, item);
3741 			critical_exit();
3742 			return;
3743 		}
3744 	} while (cache_free(zone, cache, udata, item, itemdomain));
3745 	critical_exit();
3746 
3747 	/*
3748 	 * If nothing else caught this, we'll just do an internal free.
3749 	 */
3750 zfree_item:
3751 	zone_free_item(zone, item, udata, SKIP_DTOR);
3752 }
3753 
3754 #ifdef NUMA
3755 /*
3756  * sort crossdomain free buckets to domain correct buckets and cache
3757  * them.
3758  */
3759 static void
3760 zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
3761 {
3762 	struct uma_bucketlist fullbuckets;
3763 	uma_zone_domain_t zdom;
3764 	uma_bucket_t b;
3765 	void *item;
3766 	int domain;
3767 
3768 	CTR3(KTR_UMA,
3769 	    "uma_zfree: zone %s(%p) draining cross bucket %p",
3770 	    zone->uz_name, zone, bucket);
3771 
3772 	TAILQ_INIT(&fullbuckets);
3773 
3774 	/*
3775 	 * To avoid having ndomain * ndomain buckets for sorting we have a
3776 	 * lock on the current crossfree bucket.  A full matrix with
3777 	 * per-domain locking could be used if necessary.
3778 	 */
3779 	ZONE_CROSS_LOCK(zone);
3780 	while (bucket->ub_cnt > 0) {
3781 		item = bucket->ub_bucket[bucket->ub_cnt - 1];
3782 		domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
3783 		zdom = &zone->uz_domain[domain];
3784 		if (zdom->uzd_cross == NULL) {
3785 			zdom->uzd_cross = bucket_alloc(zone, udata, M_NOWAIT);
3786 			if (zdom->uzd_cross == NULL)
3787 				break;
3788 		}
3789 		zdom->uzd_cross->ub_bucket[zdom->uzd_cross->ub_cnt++] = item;
3790 		if (zdom->uzd_cross->ub_cnt == zdom->uzd_cross->ub_entries) {
3791 			TAILQ_INSERT_HEAD(&fullbuckets, zdom->uzd_cross,
3792 			    ub_link);
3793 			zdom->uzd_cross = NULL;
3794 		}
3795 		bucket->ub_cnt--;
3796 	}
3797 	ZONE_CROSS_UNLOCK(zone);
3798 	if (!TAILQ_EMPTY(&fullbuckets)) {
3799 		ZONE_LOCK(zone);
3800 		while ((b = TAILQ_FIRST(&fullbuckets)) != NULL) {
3801 			TAILQ_REMOVE(&fullbuckets, b, ub_link);
3802 			if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3803 				ZONE_UNLOCK(zone);
3804 				bucket_drain(zone, b);
3805 				bucket_free(zone, b, udata);
3806 				ZONE_LOCK(zone);
3807 			} else {
3808 				domain = _vm_phys_domain(
3809 				    pmap_kextract(
3810 				    (vm_offset_t)b->ub_bucket[0]));
3811 				zdom = &zone->uz_domain[domain];
3812 				zone_put_bucket(zone, zdom, b, true);
3813 			}
3814 		}
3815 		ZONE_UNLOCK(zone);
3816 	}
3817 	if (bucket->ub_cnt != 0)
3818 		bucket_drain(zone, bucket);
3819 	bucket_free(zone, bucket, udata);
3820 }
3821 #endif
3822 
3823 static void
3824 zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
3825     int domain, int itemdomain)
3826 {
3827 	uma_zone_domain_t zdom;
3828 
3829 #ifdef NUMA
3830 	/*
3831 	 * Buckets coming from the wrong domain will be entirely for the
3832 	 * only other domain on two domain systems.  In this case we can
3833 	 * simply cache them.  Otherwise we need to sort them back to
3834 	 * correct domains.
3835 	 */
3836 	if (domain != itemdomain && vm_ndomains > 2) {
3837 		zone_free_cross(zone, bucket, udata);
3838 		return;
3839 	}
3840 #endif
3841 
3842 	/*
3843 	 * Attempt to save the bucket in the zone's domain bucket cache.
3844 	 *
3845 	 * We bump the uz count when the cache size is insufficient to
3846 	 * handle the working set.
3847 	 */
3848 	if (ZONE_TRYLOCK(zone) == 0) {
3849 		/* Record contention to size the buckets. */
3850 		ZONE_LOCK(zone);
3851 		if (zone->uz_bucket_size < zone->uz_bucket_size_max)
3852 			zone->uz_bucket_size++;
3853 	}
3854 
3855 	CTR3(KTR_UMA,
3856 	    "uma_zfree: zone %s(%p) putting bucket %p on free list",
3857 	    zone->uz_name, zone, bucket);
3858 	/* ub_cnt is pointing to the last free item */
3859 	KASSERT(bucket->ub_cnt == bucket->ub_entries,
3860 	    ("uma_zfree: Attempting to insert partial  bucket onto the full list.\n"));
3861 	if (zone->uz_bkt_count >= zone->uz_bkt_max) {
3862 		ZONE_UNLOCK(zone);
3863 		bucket_drain(zone, bucket);
3864 		bucket_free(zone, bucket, udata);
3865 	} else {
3866 		zdom = &zone->uz_domain[itemdomain];
3867 		zone_put_bucket(zone, zdom, bucket, true);
3868 		ZONE_UNLOCK(zone);
3869 	}
3870 }
3871 
3872 /*
3873  * Populate a free or cross bucket for the current cpu cache.  Free any
3874  * existing full bucket either to the zone cache or back to the slab layer.
3875  *
3876  * Enters and returns in a critical section.  false return indicates that
3877  * we can not satisfy this free in the cache layer.  true indicates that
3878  * the caller should retry.
3879  */
3880 static __noinline bool
3881 cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
3882     int itemdomain)
3883 {
3884 	uma_cache_bucket_t cbucket;
3885 	uma_bucket_t bucket;
3886 	int domain;
3887 
3888 	CRITICAL_ASSERT(curthread);
3889 
3890 	if (zone->uz_bucket_size == 0 || bucketdisable)
3891 		return false;
3892 
3893 	cache = &zone->uz_cpu[curcpu];
3894 
3895 	/*
3896 	 * FIRSTTOUCH domains need to free to the correct zdom.  When
3897 	 * enabled this is the zdom of the item.   The bucket is the
3898 	 * cross bucket if the current domain and itemdomain do not match.
3899 	 */
3900 	cbucket = &cache->uc_freebucket;
3901 #ifdef NUMA
3902 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) {
3903 		domain = PCPU_GET(domain);
3904 		if (domain != itemdomain) {
3905 			cbucket = &cache->uc_crossbucket;
3906 			if (cbucket->ucb_cnt != 0)
3907 				atomic_add_64(&zone->uz_xdomain,
3908 				    cbucket->ucb_cnt);
3909 		}
3910 	} else
3911 #endif
3912 		itemdomain = domain = 0;
3913 	bucket = cache_bucket_unload(cbucket);
3914 
3915 	/* We are no longer associated with this CPU. */
3916 	critical_exit();
3917 
3918 	if (bucket != NULL)
3919 		zone_free_bucket(zone, bucket, udata, domain, itemdomain);
3920 
3921 	bucket = bucket_alloc(zone, udata, M_NOWAIT);
3922 	CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
3923 	    zone->uz_name, zone, bucket);
3924 	critical_enter();
3925 	if (bucket == NULL)
3926 		return (false);
3927 	cache = &zone->uz_cpu[curcpu];
3928 #ifdef NUMA
3929 	/*
3930 	 * Check to see if we should be populating the cross bucket.  If it
3931 	 * is already populated we will fall through and attempt to populate
3932 	 * the free bucket.
3933 	 */
3934 	if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0) {
3935 		domain = PCPU_GET(domain);
3936 		if (domain != itemdomain &&
3937 		    cache->uc_crossbucket.ucb_bucket == NULL) {
3938 			cache_bucket_load_cross(cache, bucket);
3939 			return (true);
3940 		}
3941 	}
3942 #endif
3943 	/*
3944 	 * We may have lost the race to fill the bucket or switched CPUs.
3945 	 */
3946 	if (cache->uc_freebucket.ucb_bucket != NULL) {
3947 		critical_exit();
3948 		bucket_free(zone, bucket, udata);
3949 		critical_enter();
3950 	} else
3951 		cache_bucket_load_free(cache, bucket);
3952 
3953 	return (true);
3954 }
3955 
3956 void
3957 uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
3958 {
3959 
3960 	/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
3961 	random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
3962 
3963 	CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
3964 	    zone->uz_name);
3965 
3966 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
3967 	    ("uma_zfree_domain: called with spinlock or critical section held"));
3968 
3969         /* uma_zfree(..., NULL) does nothing, to match free(9). */
3970         if (item == NULL)
3971                 return;
3972 	zone_free_item(zone, item, udata, SKIP_NONE);
3973 }
3974 
3975 static void
3976 slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
3977 {
3978 	uma_keg_t keg;
3979 	uma_domain_t dom;
3980 	uint8_t freei;
3981 
3982 	keg = zone->uz_keg;
3983 	KEG_LOCK_ASSERT(keg, slab->us_domain);
3984 
3985 	/* Do we need to remove from any lists? */
3986 	dom = &keg->uk_domain[slab->us_domain];
3987 	if (slab->us_freecount+1 == keg->uk_ipers) {
3988 		LIST_REMOVE(slab, us_link);
3989 		LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
3990 	} else if (slab->us_freecount == 0) {
3991 		LIST_REMOVE(slab, us_link);
3992 		LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
3993 	}
3994 
3995 	/* Slab management. */
3996 	freei = slab_item_index(slab, keg, item);
3997 	BIT_SET(keg->uk_ipers, freei, &slab->us_free);
3998 	slab->us_freecount++;
3999 
4000 	/* Keg statistics. */
4001 	dom->ud_free++;
4002 }
4003 
4004 static void
4005 zone_release(void *arg, void **bucket, int cnt)
4006 {
4007 	struct mtx *lock;
4008 	uma_zone_t zone;
4009 	uma_slab_t slab;
4010 	uma_keg_t keg;
4011 	uint8_t *mem;
4012 	void *item;
4013 	int i;
4014 
4015 	zone = arg;
4016 	keg = zone->uz_keg;
4017 	lock = NULL;
4018 	if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
4019 		lock = KEG_LOCK(keg, 0);
4020 	for (i = 0; i < cnt; i++) {
4021 		item = bucket[i];
4022 		if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
4023 			slab = vtoslab((vm_offset_t)item);
4024 		} else {
4025 			mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4026 			if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
4027 				slab = hash_sfind(&keg->uk_hash, mem);
4028 			else
4029 				slab = (uma_slab_t)(mem + keg->uk_pgoff);
4030 		}
4031 		if (lock != KEG_LOCKPTR(keg, slab->us_domain)) {
4032 			if (lock != NULL)
4033 				mtx_unlock(lock);
4034 			lock = KEG_LOCK(keg, slab->us_domain);
4035 		}
4036 		slab_free_item(zone, slab, item);
4037 	}
4038 	if (lock != NULL)
4039 		mtx_unlock(lock);
4040 }
4041 
4042 /*
4043  * Frees a single item to any zone.
4044  *
4045  * Arguments:
4046  *	zone   The zone to free to
4047  *	item   The item we're freeing
4048  *	udata  User supplied data for the dtor
4049  *	skip   Skip dtors and finis
4050  */
4051 static void
4052 zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
4053 {
4054 
4055 	item_dtor(zone, item, zone->uz_size, udata, skip);
4056 
4057 	if (skip < SKIP_FINI && zone->uz_fini)
4058 		zone->uz_fini(item, zone->uz_size);
4059 
4060 	zone->uz_release(zone->uz_arg, &item, 1);
4061 
4062 	if (skip & SKIP_CNT)
4063 		return;
4064 
4065 	counter_u64_add(zone->uz_frees, 1);
4066 
4067 	if (zone->uz_max_items > 0)
4068 		zone_free_limit(zone, 1);
4069 }
4070 
4071 /* See uma.h */
4072 int
4073 uma_zone_set_max(uma_zone_t zone, int nitems)
4074 {
4075 	struct uma_bucket_zone *ubz;
4076 	int count;
4077 
4078 	/*
4079 	 * XXX This can misbehave if the zone has any allocations with
4080 	 * no limit and a limit is imposed.  There is currently no
4081 	 * way to clear a limit.
4082 	 */
4083 	ZONE_LOCK(zone);
4084 	ubz = bucket_zone_max(zone, nitems);
4085 	count = ubz != NULL ? ubz->ubz_entries : 0;
4086 	zone->uz_bucket_size_max = zone->uz_bucket_size = count;
4087 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4088 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4089 	zone->uz_max_items = nitems;
4090 	zone->uz_flags |= UMA_ZFLAG_LIMIT;
4091 	zone_update_caches(zone);
4092 	/* We may need to wake waiters. */
4093 	wakeup(&zone->uz_max_items);
4094 	ZONE_UNLOCK(zone);
4095 
4096 	return (nitems);
4097 }
4098 
4099 /* See uma.h */
4100 void
4101 uma_zone_set_maxcache(uma_zone_t zone, int nitems)
4102 {
4103 	struct uma_bucket_zone *ubz;
4104 	int bpcpu;
4105 
4106 	ZONE_LOCK(zone);
4107 	ubz = bucket_zone_max(zone, nitems);
4108 	if (ubz != NULL) {
4109 		bpcpu = 2;
4110 		if ((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0)
4111 			/* Count the cross-domain bucket. */
4112 			bpcpu++;
4113 		nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
4114 		zone->uz_bucket_size_max = ubz->ubz_entries;
4115 	} else {
4116 		zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
4117 	}
4118 	if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
4119 		zone->uz_bucket_size_min = zone->uz_bucket_size_max;
4120 	zone->uz_bkt_max = nitems;
4121 	ZONE_UNLOCK(zone);
4122 }
4123 
4124 /* See uma.h */
4125 int
4126 uma_zone_get_max(uma_zone_t zone)
4127 {
4128 	int nitems;
4129 
4130 	nitems = atomic_load_64(&zone->uz_max_items);
4131 
4132 	return (nitems);
4133 }
4134 
4135 /* See uma.h */
4136 void
4137 uma_zone_set_warning(uma_zone_t zone, const char *warning)
4138 {
4139 
4140 	ZONE_ASSERT_COLD(zone);
4141 	zone->uz_warning = warning;
4142 }
4143 
4144 /* See uma.h */
4145 void
4146 uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
4147 {
4148 
4149 	ZONE_ASSERT_COLD(zone);
4150 	TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
4151 }
4152 
4153 /* See uma.h */
4154 int
4155 uma_zone_get_cur(uma_zone_t zone)
4156 {
4157 	int64_t nitems;
4158 	u_int i;
4159 
4160 	nitems = 0;
4161 	if (zone->uz_allocs != EARLY_COUNTER && zone->uz_frees != EARLY_COUNTER)
4162 		nitems = counter_u64_fetch(zone->uz_allocs) -
4163 		    counter_u64_fetch(zone->uz_frees);
4164 	CPU_FOREACH(i)
4165 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs) -
4166 		    atomic_load_64(&zone->uz_cpu[i].uc_frees);
4167 
4168 	return (nitems < 0 ? 0 : nitems);
4169 }
4170 
4171 static uint64_t
4172 uma_zone_get_allocs(uma_zone_t zone)
4173 {
4174 	uint64_t nitems;
4175 	u_int i;
4176 
4177 	nitems = 0;
4178 	if (zone->uz_allocs != EARLY_COUNTER)
4179 		nitems = counter_u64_fetch(zone->uz_allocs);
4180 	CPU_FOREACH(i)
4181 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_allocs);
4182 
4183 	return (nitems);
4184 }
4185 
4186 static uint64_t
4187 uma_zone_get_frees(uma_zone_t zone)
4188 {
4189 	uint64_t nitems;
4190 	u_int i;
4191 
4192 	nitems = 0;
4193 	if (zone->uz_frees != EARLY_COUNTER)
4194 		nitems = counter_u64_fetch(zone->uz_frees);
4195 	CPU_FOREACH(i)
4196 		nitems += atomic_load_64(&zone->uz_cpu[i].uc_frees);
4197 
4198 	return (nitems);
4199 }
4200 
4201 #ifdef INVARIANTS
4202 /* Used only for KEG_ASSERT_COLD(). */
4203 static uint64_t
4204 uma_keg_get_allocs(uma_keg_t keg)
4205 {
4206 	uma_zone_t z;
4207 	uint64_t nitems;
4208 
4209 	nitems = 0;
4210 	LIST_FOREACH(z, &keg->uk_zones, uz_link)
4211 		nitems += uma_zone_get_allocs(z);
4212 
4213 	return (nitems);
4214 }
4215 #endif
4216 
4217 /* See uma.h */
4218 void
4219 uma_zone_set_init(uma_zone_t zone, uma_init uminit)
4220 {
4221 	uma_keg_t keg;
4222 
4223 	KEG_GET(zone, keg);
4224 	KEG_ASSERT_COLD(keg);
4225 	keg->uk_init = uminit;
4226 }
4227 
4228 /* See uma.h */
4229 void
4230 uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
4231 {
4232 	uma_keg_t keg;
4233 
4234 	KEG_GET(zone, keg);
4235 	KEG_ASSERT_COLD(keg);
4236 	keg->uk_fini = fini;
4237 }
4238 
4239 /* See uma.h */
4240 void
4241 uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
4242 {
4243 
4244 	ZONE_ASSERT_COLD(zone);
4245 	zone->uz_init = zinit;
4246 }
4247 
4248 /* See uma.h */
4249 void
4250 uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
4251 {
4252 
4253 	ZONE_ASSERT_COLD(zone);
4254 	zone->uz_fini = zfini;
4255 }
4256 
4257 /* See uma.h */
4258 void
4259 uma_zone_set_freef(uma_zone_t zone, uma_free freef)
4260 {
4261 	uma_keg_t keg;
4262 
4263 	KEG_GET(zone, keg);
4264 	KEG_ASSERT_COLD(keg);
4265 	keg->uk_freef = freef;
4266 }
4267 
4268 /* See uma.h */
4269 void
4270 uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
4271 {
4272 	uma_keg_t keg;
4273 
4274 	KEG_GET(zone, keg);
4275 	KEG_ASSERT_COLD(keg);
4276 	keg->uk_allocf = allocf;
4277 }
4278 
4279 /* See uma.h */
4280 void
4281 uma_zone_reserve(uma_zone_t zone, int items)
4282 {
4283 	uma_keg_t keg;
4284 
4285 	KEG_GET(zone, keg);
4286 	KEG_ASSERT_COLD(keg);
4287 	keg->uk_reserve = items;
4288 }
4289 
4290 /* See uma.h */
4291 int
4292 uma_zone_reserve_kva(uma_zone_t zone, int count)
4293 {
4294 	uma_keg_t keg;
4295 	vm_offset_t kva;
4296 	u_int pages;
4297 
4298 	KEG_GET(zone, keg);
4299 	KEG_ASSERT_COLD(keg);
4300 	ZONE_ASSERT_COLD(zone);
4301 
4302 	pages = howmany(count, keg->uk_ipers) * keg->uk_ppera;
4303 
4304 #ifdef UMA_MD_SMALL_ALLOC
4305 	if (keg->uk_ppera > 1) {
4306 #else
4307 	if (1) {
4308 #endif
4309 		kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
4310 		if (kva == 0)
4311 			return (0);
4312 	} else
4313 		kva = 0;
4314 
4315 	ZONE_LOCK(zone);
4316 	MPASS(keg->uk_kva == 0);
4317 	keg->uk_kva = kva;
4318 	keg->uk_offset = 0;
4319 	zone->uz_max_items = pages * keg->uk_ipers;
4320 #ifdef UMA_MD_SMALL_ALLOC
4321 	keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
4322 #else
4323 	keg->uk_allocf = noobj_alloc;
4324 #endif
4325 	keg->uk_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
4326 	zone->uz_flags |= UMA_ZFLAG_LIMIT | UMA_ZONE_NOFREE;
4327 	zone_update_caches(zone);
4328 	ZONE_UNLOCK(zone);
4329 
4330 	return (1);
4331 }
4332 
4333 /* See uma.h */
4334 void
4335 uma_prealloc(uma_zone_t zone, int items)
4336 {
4337 	struct vm_domainset_iter di;
4338 	uma_domain_t dom;
4339 	uma_slab_t slab;
4340 	uma_keg_t keg;
4341 	int aflags, domain, slabs;
4342 
4343 	KEG_GET(zone, keg);
4344 	slabs = howmany(items, keg->uk_ipers);
4345 	while (slabs-- > 0) {
4346 		aflags = M_NOWAIT;
4347 		vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
4348 		    &aflags);
4349 		for (;;) {
4350 			slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
4351 			    aflags);
4352 			if (slab != NULL) {
4353 				dom = &keg->uk_domain[slab->us_domain];
4354 				LIST_REMOVE(slab, us_link);
4355 				LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
4356 				    us_link);
4357 				KEG_UNLOCK(keg, slab->us_domain);
4358 				break;
4359 			}
4360 			if (vm_domainset_iter_policy(&di, &domain) != 0)
4361 				vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
4362 		}
4363 	}
4364 }
4365 
4366 /* See uma.h */
4367 void
4368 uma_reclaim(int req)
4369 {
4370 
4371 	CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
4372 	sx_xlock(&uma_reclaim_lock);
4373 	bucket_enable();
4374 
4375 	switch (req) {
4376 	case UMA_RECLAIM_TRIM:
4377 		zone_foreach(zone_trim, NULL);
4378 		break;
4379 	case UMA_RECLAIM_DRAIN:
4380 	case UMA_RECLAIM_DRAIN_CPU:
4381 		zone_foreach(zone_drain, NULL);
4382 		if (req == UMA_RECLAIM_DRAIN_CPU) {
4383 			pcpu_cache_drain_safe(NULL);
4384 			zone_foreach(zone_drain, NULL);
4385 		}
4386 		break;
4387 	default:
4388 		panic("unhandled reclamation request %d", req);
4389 	}
4390 
4391 	/*
4392 	 * Some slabs may have been freed but this zone will be visited early
4393 	 * we visit again so that we can free pages that are empty once other
4394 	 * zones are drained.  We have to do the same for buckets.
4395 	 */
4396 	zone_drain(slabzone, NULL);
4397 	bucket_zone_drain();
4398 	sx_xunlock(&uma_reclaim_lock);
4399 }
4400 
4401 static volatile int uma_reclaim_needed;
4402 
4403 void
4404 uma_reclaim_wakeup(void)
4405 {
4406 
4407 	if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
4408 		wakeup(uma_reclaim);
4409 }
4410 
4411 void
4412 uma_reclaim_worker(void *arg __unused)
4413 {
4414 
4415 	for (;;) {
4416 		sx_xlock(&uma_reclaim_lock);
4417 		while (atomic_load_int(&uma_reclaim_needed) == 0)
4418 			sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
4419 			    hz);
4420 		sx_xunlock(&uma_reclaim_lock);
4421 		EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
4422 		uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
4423 		atomic_store_int(&uma_reclaim_needed, 0);
4424 		/* Don't fire more than once per-second. */
4425 		pause("umarclslp", hz);
4426 	}
4427 }
4428 
4429 /* See uma.h */
4430 void
4431 uma_zone_reclaim(uma_zone_t zone, int req)
4432 {
4433 
4434 	switch (req) {
4435 	case UMA_RECLAIM_TRIM:
4436 		zone_trim(zone, NULL);
4437 		break;
4438 	case UMA_RECLAIM_DRAIN:
4439 		zone_drain(zone, NULL);
4440 		break;
4441 	case UMA_RECLAIM_DRAIN_CPU:
4442 		pcpu_cache_drain_safe(zone);
4443 		zone_drain(zone, NULL);
4444 		break;
4445 	default:
4446 		panic("unhandled reclamation request %d", req);
4447 	}
4448 }
4449 
4450 /* See uma.h */
4451 int
4452 uma_zone_exhausted(uma_zone_t zone)
4453 {
4454 
4455 	return (atomic_load_32(&zone->uz_sleepers) > 0);
4456 }
4457 
4458 unsigned long
4459 uma_limit(void)
4460 {
4461 
4462 	return (uma_kmem_limit);
4463 }
4464 
4465 void
4466 uma_set_limit(unsigned long limit)
4467 {
4468 
4469 	uma_kmem_limit = limit;
4470 }
4471 
4472 unsigned long
4473 uma_size(void)
4474 {
4475 
4476 	return (atomic_load_long(&uma_kmem_total));
4477 }
4478 
4479 long
4480 uma_avail(void)
4481 {
4482 
4483 	return (uma_kmem_limit - uma_size());
4484 }
4485 
4486 #ifdef DDB
4487 /*
4488  * Generate statistics across both the zone and its per-cpu cache's.  Return
4489  * desired statistics if the pointer is non-NULL for that statistic.
4490  *
4491  * Note: does not update the zone statistics, as it can't safely clear the
4492  * per-CPU cache statistic.
4493  *
4494  */
4495 static void
4496 uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
4497     uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
4498 {
4499 	uma_cache_t cache;
4500 	uint64_t allocs, frees, sleeps, xdomain;
4501 	int cachefree, cpu;
4502 
4503 	allocs = frees = sleeps = xdomain = 0;
4504 	cachefree = 0;
4505 	CPU_FOREACH(cpu) {
4506 		cache = &z->uz_cpu[cpu];
4507 		cachefree += cache->uc_allocbucket.ucb_cnt;
4508 		cachefree += cache->uc_freebucket.ucb_cnt;
4509 		xdomain += cache->uc_crossbucket.ucb_cnt;
4510 		cachefree += cache->uc_crossbucket.ucb_cnt;
4511 		allocs += cache->uc_allocs;
4512 		frees += cache->uc_frees;
4513 	}
4514 	allocs += counter_u64_fetch(z->uz_allocs);
4515 	frees += counter_u64_fetch(z->uz_frees);
4516 	sleeps += z->uz_sleeps;
4517 	xdomain += z->uz_xdomain;
4518 	if (cachefreep != NULL)
4519 		*cachefreep = cachefree;
4520 	if (allocsp != NULL)
4521 		*allocsp = allocs;
4522 	if (freesp != NULL)
4523 		*freesp = frees;
4524 	if (sleepsp != NULL)
4525 		*sleepsp = sleeps;
4526 	if (xdomainp != NULL)
4527 		*xdomainp = xdomain;
4528 }
4529 #endif /* DDB */
4530 
4531 static int
4532 sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
4533 {
4534 	uma_keg_t kz;
4535 	uma_zone_t z;
4536 	int count;
4537 
4538 	count = 0;
4539 	rw_rlock(&uma_rwlock);
4540 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4541 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4542 			count++;
4543 	}
4544 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4545 		count++;
4546 
4547 	rw_runlock(&uma_rwlock);
4548 	return (sysctl_handle_int(oidp, &count, 0, req));
4549 }
4550 
4551 static void
4552 uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
4553     struct uma_percpu_stat *ups, bool internal)
4554 {
4555 	uma_zone_domain_t zdom;
4556 	uma_cache_t cache;
4557 	int i;
4558 
4559 
4560 	for (i = 0; i < vm_ndomains; i++) {
4561 		zdom = &z->uz_domain[i];
4562 		uth->uth_zone_free += zdom->uzd_nitems;
4563 	}
4564 	uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
4565 	uth->uth_frees = counter_u64_fetch(z->uz_frees);
4566 	uth->uth_fails = counter_u64_fetch(z->uz_fails);
4567 	uth->uth_sleeps = z->uz_sleeps;
4568 	uth->uth_xdomain = z->uz_xdomain;
4569 
4570 	/*
4571 	 * While it is not normally safe to access the cache bucket pointers
4572 	 * while not on the CPU that owns the cache, we only allow the pointers
4573 	 * to be exchanged without the zone lock held, not invalidated, so
4574 	 * accept the possible race associated with bucket exchange during
4575 	 * monitoring.  Use atomic_load_ptr() to ensure that the bucket pointers
4576 	 * are loaded only once.
4577 	 */
4578 	for (i = 0; i < mp_maxid + 1; i++) {
4579 		bzero(&ups[i], sizeof(*ups));
4580 		if (internal || CPU_ABSENT(i))
4581 			continue;
4582 		cache = &z->uz_cpu[i];
4583 		ups[i].ups_cache_free += cache->uc_allocbucket.ucb_cnt;
4584 		ups[i].ups_cache_free += cache->uc_freebucket.ucb_cnt;
4585 		ups[i].ups_cache_free += cache->uc_crossbucket.ucb_cnt;
4586 		ups[i].ups_allocs = cache->uc_allocs;
4587 		ups[i].ups_frees = cache->uc_frees;
4588 	}
4589 }
4590 
4591 static int
4592 sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
4593 {
4594 	struct uma_stream_header ush;
4595 	struct uma_type_header uth;
4596 	struct uma_percpu_stat *ups;
4597 	struct sbuf sbuf;
4598 	uma_keg_t kz;
4599 	uma_zone_t z;
4600 	uint64_t items;
4601 	uint32_t kfree, pages;
4602 	int count, error, i;
4603 
4604 	error = sysctl_wire_old_buffer(req, 0);
4605 	if (error != 0)
4606 		return (error);
4607 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
4608 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
4609 	ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
4610 
4611 	count = 0;
4612 	rw_rlock(&uma_rwlock);
4613 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4614 		LIST_FOREACH(z, &kz->uk_zones, uz_link)
4615 			count++;
4616 	}
4617 
4618 	LIST_FOREACH(z, &uma_cachezones, uz_link)
4619 		count++;
4620 
4621 	/*
4622 	 * Insert stream header.
4623 	 */
4624 	bzero(&ush, sizeof(ush));
4625 	ush.ush_version = UMA_STREAM_VERSION;
4626 	ush.ush_maxcpus = (mp_maxid + 1);
4627 	ush.ush_count = count;
4628 	(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
4629 
4630 	LIST_FOREACH(kz, &uma_kegs, uk_link) {
4631 		kfree = pages = 0;
4632 		for (i = 0; i < vm_ndomains; i++) {
4633 			kfree += kz->uk_domain[i].ud_free;
4634 			pages += kz->uk_domain[i].ud_pages;
4635 		}
4636 		LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4637 			bzero(&uth, sizeof(uth));
4638 			ZONE_LOCK(z);
4639 			strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4640 			uth.uth_align = kz->uk_align;
4641 			uth.uth_size = kz->uk_size;
4642 			uth.uth_rsize = kz->uk_rsize;
4643 			if (z->uz_max_items > 0) {
4644 				items = UZ_ITEMS_COUNT(z->uz_items);
4645 				uth.uth_pages = (items / kz->uk_ipers) *
4646 					kz->uk_ppera;
4647 			} else
4648 				uth.uth_pages = pages;
4649 			uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
4650 			    kz->uk_ppera;
4651 			uth.uth_limit = z->uz_max_items;
4652 			uth.uth_keg_free = kfree;
4653 
4654 			/*
4655 			 * A zone is secondary is it is not the first entry
4656 			 * on the keg's zone list.
4657 			 */
4658 			if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
4659 			    (LIST_FIRST(&kz->uk_zones) != z))
4660 				uth.uth_zone_flags = UTH_ZONE_SECONDARY;
4661 			uma_vm_zone_stats(&uth, z, &sbuf, ups,
4662 			    kz->uk_flags & UMA_ZFLAG_INTERNAL);
4663 			ZONE_UNLOCK(z);
4664 			(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4665 			for (i = 0; i < mp_maxid + 1; i++)
4666 				(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4667 		}
4668 	}
4669 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
4670 		bzero(&uth, sizeof(uth));
4671 		ZONE_LOCK(z);
4672 		strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
4673 		uth.uth_size = z->uz_size;
4674 		uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
4675 		ZONE_UNLOCK(z);
4676 		(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
4677 		for (i = 0; i < mp_maxid + 1; i++)
4678 			(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
4679 	}
4680 
4681 	rw_runlock(&uma_rwlock);
4682 	error = sbuf_finish(&sbuf);
4683 	sbuf_delete(&sbuf);
4684 	free(ups, M_TEMP);
4685 	return (error);
4686 }
4687 
4688 int
4689 sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
4690 {
4691 	uma_zone_t zone = *(uma_zone_t *)arg1;
4692 	int error, max;
4693 
4694 	max = uma_zone_get_max(zone);
4695 	error = sysctl_handle_int(oidp, &max, 0, req);
4696 	if (error || !req->newptr)
4697 		return (error);
4698 
4699 	uma_zone_set_max(zone, max);
4700 
4701 	return (0);
4702 }
4703 
4704 int
4705 sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
4706 {
4707 	uma_zone_t zone;
4708 	int cur;
4709 
4710 	/*
4711 	 * Some callers want to add sysctls for global zones that
4712 	 * may not yet exist so they pass a pointer to a pointer.
4713 	 */
4714 	if (arg2 == 0)
4715 		zone = *(uma_zone_t *)arg1;
4716 	else
4717 		zone = arg1;
4718 	cur = uma_zone_get_cur(zone);
4719 	return (sysctl_handle_int(oidp, &cur, 0, req));
4720 }
4721 
4722 static int
4723 sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
4724 {
4725 	uma_zone_t zone = arg1;
4726 	uint64_t cur;
4727 
4728 	cur = uma_zone_get_allocs(zone);
4729 	return (sysctl_handle_64(oidp, &cur, 0, req));
4730 }
4731 
4732 static int
4733 sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
4734 {
4735 	uma_zone_t zone = arg1;
4736 	uint64_t cur;
4737 
4738 	cur = uma_zone_get_frees(zone);
4739 	return (sysctl_handle_64(oidp, &cur, 0, req));
4740 }
4741 
4742 static int
4743 sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
4744 {
4745 	struct sbuf sbuf;
4746 	uma_zone_t zone = arg1;
4747 	int error;
4748 
4749 	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
4750 	if (zone->uz_flags != 0)
4751 		sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
4752 	else
4753 		sbuf_printf(&sbuf, "0");
4754 	error = sbuf_finish(&sbuf);
4755 	sbuf_delete(&sbuf);
4756 
4757 	return (error);
4758 }
4759 
4760 static int
4761 sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
4762 {
4763 	uma_keg_t keg = arg1;
4764 	int avail, effpct, total;
4765 
4766 	total = keg->uk_ppera * PAGE_SIZE;
4767 	if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
4768 		total += slab_sizeof(SLAB_MAX_SETSIZE);
4769 	/*
4770 	 * We consider the client's requested size and alignment here, not the
4771 	 * real size determination uk_rsize, because we also adjust the real
4772 	 * size for internal implementation reasons (max bitset size).
4773 	 */
4774 	avail = keg->uk_ipers * roundup2(keg->uk_size, keg->uk_align + 1);
4775 	if ((keg->uk_flags & UMA_ZONE_PCPU) != 0)
4776 		avail *= mp_maxid + 1;
4777 	effpct = 100 * avail / total;
4778 	return (sysctl_handle_int(oidp, &effpct, 0, req));
4779 }
4780 
4781 static int
4782 sysctl_handle_uma_zone_items(SYSCTL_HANDLER_ARGS)
4783 {
4784 	uma_zone_t zone = arg1;
4785 	uint64_t cur;
4786 
4787 	cur = UZ_ITEMS_COUNT(atomic_load_64(&zone->uz_items));
4788 	return (sysctl_handle_64(oidp, &cur, 0, req));
4789 }
4790 
4791 #ifdef INVARIANTS
4792 static uma_slab_t
4793 uma_dbg_getslab(uma_zone_t zone, void *item)
4794 {
4795 	uma_slab_t slab;
4796 	uma_keg_t keg;
4797 	uint8_t *mem;
4798 
4799 	/*
4800 	 * It is safe to return the slab here even though the
4801 	 * zone is unlocked because the item's allocation state
4802 	 * essentially holds a reference.
4803 	 */
4804 	mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
4805 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
4806 		return (NULL);
4807 	if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
4808 		return (vtoslab((vm_offset_t)mem));
4809 	keg = zone->uz_keg;
4810 	if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
4811 		return ((uma_slab_t)(mem + keg->uk_pgoff));
4812 	KEG_LOCK(keg, 0);
4813 	slab = hash_sfind(&keg->uk_hash, mem);
4814 	KEG_UNLOCK(keg, 0);
4815 
4816 	return (slab);
4817 }
4818 
4819 static bool
4820 uma_dbg_zskip(uma_zone_t zone, void *mem)
4821 {
4822 
4823 	if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
4824 		return (true);
4825 
4826 	return (uma_dbg_kskip(zone->uz_keg, mem));
4827 }
4828 
4829 static bool
4830 uma_dbg_kskip(uma_keg_t keg, void *mem)
4831 {
4832 	uintptr_t idx;
4833 
4834 	if (dbg_divisor == 0)
4835 		return (true);
4836 
4837 	if (dbg_divisor == 1)
4838 		return (false);
4839 
4840 	idx = (uintptr_t)mem >> PAGE_SHIFT;
4841 	if (keg->uk_ipers > 1) {
4842 		idx *= keg->uk_ipers;
4843 		idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
4844 	}
4845 
4846 	if ((idx / dbg_divisor) * dbg_divisor != idx) {
4847 		counter_u64_add(uma_skip_cnt, 1);
4848 		return (true);
4849 	}
4850 	counter_u64_add(uma_dbg_cnt, 1);
4851 
4852 	return (false);
4853 }
4854 
4855 /*
4856  * Set up the slab's freei data such that uma_dbg_free can function.
4857  *
4858  */
4859 static void
4860 uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
4861 {
4862 	uma_keg_t keg;
4863 	int freei;
4864 
4865 	if (slab == NULL) {
4866 		slab = uma_dbg_getslab(zone, item);
4867 		if (slab == NULL)
4868 			panic("uma: item %p did not belong to zone %s\n",
4869 			    item, zone->uz_name);
4870 	}
4871 	keg = zone->uz_keg;
4872 	freei = slab_item_index(slab, keg, item);
4873 
4874 	if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
4875 		panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
4876 		    item, zone, zone->uz_name, slab, freei);
4877 	BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
4878 }
4879 
4880 /*
4881  * Verifies freed addresses.  Checks for alignment, valid slab membership
4882  * and duplicate frees.
4883  *
4884  */
4885 static void
4886 uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
4887 {
4888 	uma_keg_t keg;
4889 	int freei;
4890 
4891 	if (slab == NULL) {
4892 		slab = uma_dbg_getslab(zone, item);
4893 		if (slab == NULL)
4894 			panic("uma: Freed item %p did not belong to zone %s\n",
4895 			    item, zone->uz_name);
4896 	}
4897 	keg = zone->uz_keg;
4898 	freei = slab_item_index(slab, keg, item);
4899 
4900 	if (freei >= keg->uk_ipers)
4901 		panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
4902 		    item, zone, zone->uz_name, slab, freei);
4903 
4904 	if (slab_item(slab, keg, freei) != item)
4905 		panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
4906 		    item, zone, zone->uz_name, slab, freei);
4907 
4908 	if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
4909 		panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
4910 		    item, zone, zone->uz_name, slab, freei);
4911 
4912 	BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
4913 }
4914 #endif /* INVARIANTS */
4915 
4916 #ifdef DDB
4917 static int64_t
4918 get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
4919     uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
4920 {
4921 	uint64_t frees;
4922 	int i;
4923 
4924 	if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
4925 		*allocs = counter_u64_fetch(z->uz_allocs);
4926 		frees = counter_u64_fetch(z->uz_frees);
4927 		*sleeps = z->uz_sleeps;
4928 		*cachefree = 0;
4929 		*xdomain = 0;
4930 	} else
4931 		uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
4932 		    xdomain);
4933 	for (i = 0; i < vm_ndomains; i++) {
4934 		*cachefree += z->uz_domain[i].uzd_nitems;
4935 		if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
4936 		    (LIST_FIRST(&kz->uk_zones) != z)))
4937 			*cachefree += kz->uk_domain[i].ud_free;
4938 	}
4939 	*used = *allocs - frees;
4940 	return (((int64_t)*used + *cachefree) * kz->uk_size);
4941 }
4942 
4943 DB_SHOW_COMMAND(uma, db_show_uma)
4944 {
4945 	const char *fmt_hdr, *fmt_entry;
4946 	uma_keg_t kz;
4947 	uma_zone_t z;
4948 	uint64_t allocs, used, sleeps, xdomain;
4949 	long cachefree;
4950 	/* variables for sorting */
4951 	uma_keg_t cur_keg;
4952 	uma_zone_t cur_zone, last_zone;
4953 	int64_t cur_size, last_size, size;
4954 	int ties;
4955 
4956 	/* /i option produces machine-parseable CSV output */
4957 	if (modif[0] == 'i') {
4958 		fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
4959 		fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
4960 	} else {
4961 		fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
4962 		fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
4963 	}
4964 
4965 	db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
4966 	    "Sleeps", "Bucket", "Total Mem", "XFree");
4967 
4968 	/* Sort the zones with largest size first. */
4969 	last_zone = NULL;
4970 	last_size = INT64_MAX;
4971 	for (;;) {
4972 		cur_zone = NULL;
4973 		cur_size = -1;
4974 		ties = 0;
4975 		LIST_FOREACH(kz, &uma_kegs, uk_link) {
4976 			LIST_FOREACH(z, &kz->uk_zones, uz_link) {
4977 				/*
4978 				 * In the case of size ties, print out zones
4979 				 * in the order they are encountered.  That is,
4980 				 * when we encounter the most recently output
4981 				 * zone, we have already printed all preceding
4982 				 * ties, and we must print all following ties.
4983 				 */
4984 				if (z == last_zone) {
4985 					ties = 1;
4986 					continue;
4987 				}
4988 				size = get_uma_stats(kz, z, &allocs, &used,
4989 				    &sleeps, &cachefree, &xdomain);
4990 				if (size > cur_size && size < last_size + ties)
4991 				{
4992 					cur_size = size;
4993 					cur_zone = z;
4994 					cur_keg = kz;
4995 				}
4996 			}
4997 		}
4998 		if (cur_zone == NULL)
4999 			break;
5000 
5001 		size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
5002 		    &sleeps, &cachefree, &xdomain);
5003 		db_printf(fmt_entry, cur_zone->uz_name,
5004 		    (uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
5005 		    (uintmax_t)allocs, (uintmax_t)sleeps,
5006 		    (unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
5007 		    xdomain);
5008 
5009 		if (db_pager_quit)
5010 			return;
5011 		last_zone = cur_zone;
5012 		last_size = cur_size;
5013 	}
5014 }
5015 
5016 DB_SHOW_COMMAND(umacache, db_show_umacache)
5017 {
5018 	uma_zone_t z;
5019 	uint64_t allocs, frees;
5020 	long cachefree;
5021 	int i;
5022 
5023 	db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
5024 	    "Requests", "Bucket");
5025 	LIST_FOREACH(z, &uma_cachezones, uz_link) {
5026 		uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
5027 		for (i = 0; i < vm_ndomains; i++)
5028 			cachefree += z->uz_domain[i].uzd_nitems;
5029 		db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
5030 		    z->uz_name, (uintmax_t)z->uz_size,
5031 		    (intmax_t)(allocs - frees), cachefree,
5032 		    (uintmax_t)allocs, z->uz_bucket_size);
5033 		if (db_pager_quit)
5034 			return;
5035 	}
5036 }
5037 #endif	/* DDB */
5038