xref: /freebsd/sys/kern/subr_vmem.c (revision 137a344c6341d1469432e9deb3a25593f96672ad)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * Copyright (c) 2013 EMC Corp.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * From:
32  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34  */
35 
36 /*
37  * reference:
38  * -	Magazines and Vmem: Extending the Slab Allocator
39  *	to Many CPUs and Arbitrary Resources
40  *	http://www.usenix.org/event/usenix01/bonwick.html
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
53 #include <sys/hash.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/smp.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/vmem.h>
62 
63 #include "opt_vm.h"
64 
65 #include <vm/uma.h>
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_param.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pageout.h>
75 
76 #define	VMEM_OPTORDER		5
77 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
78 #define	VMEM_MAXORDER						\
79     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
80 
81 #define	VMEM_HASHSIZE_MIN	16
82 #define	VMEM_HASHSIZE_MAX	131072
83 
84 #define	VMEM_QCACHE_IDX_MAX	16
85 
86 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT)
87 
88 #define	VMEM_FLAGS						\
89     (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
90 
91 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
92 
93 #define	QC_NAME_MAX	16
94 
95 /*
96  * Data structures private to vmem.
97  */
98 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
99 
100 typedef struct vmem_btag bt_t;
101 
102 TAILQ_HEAD(vmem_seglist, vmem_btag);
103 LIST_HEAD(vmem_freelist, vmem_btag);
104 LIST_HEAD(vmem_hashlist, vmem_btag);
105 
106 struct qcache {
107 	uma_zone_t	qc_cache;
108 	vmem_t 		*qc_vmem;
109 	vmem_size_t	qc_size;
110 	char		qc_name[QC_NAME_MAX];
111 };
112 typedef struct qcache qcache_t;
113 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
114 
115 #define	VMEM_NAME_MAX	16
116 
117 /* vmem arena */
118 struct vmem {
119 	struct mtx_padalign	vm_lock;
120 	struct cv		vm_cv;
121 	char			vm_name[VMEM_NAME_MAX+1];
122 	LIST_ENTRY(vmem)	vm_alllist;
123 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
124 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
125 	struct vmem_seglist	vm_seglist;
126 	struct vmem_hashlist	*vm_hashlist;
127 	vmem_size_t		vm_hashsize;
128 
129 	/* Constant after init */
130 	vmem_size_t		vm_qcache_max;
131 	vmem_size_t		vm_quantum_mask;
132 	vmem_size_t		vm_import_quantum;
133 	int			vm_quantum_shift;
134 
135 	/* Written on alloc/free */
136 	LIST_HEAD(, vmem_btag)	vm_freetags;
137 	int			vm_nfreetags;
138 	int			vm_nbusytag;
139 	vmem_size_t		vm_inuse;
140 	vmem_size_t		vm_size;
141 	vmem_size_t		vm_limit;
142 
143 	/* Used on import. */
144 	vmem_import_t		*vm_importfn;
145 	vmem_release_t		*vm_releasefn;
146 	void			*vm_arg;
147 
148 	/* Space exhaustion callback. */
149 	vmem_reclaim_t		*vm_reclaimfn;
150 
151 	/* quantum cache */
152 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
153 };
154 
155 /* boundary tag */
156 struct vmem_btag {
157 	TAILQ_ENTRY(vmem_btag) bt_seglist;
158 	union {
159 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
160 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
161 	} bt_u;
162 #define	bt_hashlist	bt_u.u_hashlist
163 #define	bt_freelist	bt_u.u_freelist
164 	vmem_addr_t	bt_start;
165 	vmem_size_t	bt_size;
166 	int		bt_type;
167 };
168 
169 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
170 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
171 #define	BT_TYPE_FREE		3	/* Available space. */
172 #define	BT_TYPE_BUSY		4	/* Used space. */
173 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
174 
175 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
176 
177 #if defined(DIAGNOSTIC)
178 static int enable_vmem_check = 1;
179 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
180     &enable_vmem_check, 0, "Enable vmem check");
181 static void vmem_check(vmem_t *);
182 #endif
183 
184 static struct callout	vmem_periodic_ch;
185 static int		vmem_periodic_interval;
186 static struct task	vmem_periodic_wk;
187 
188 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
189 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
190 static uma_zone_t vmem_zone;
191 
192 /* ---- misc */
193 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
194 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
195 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
196 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
197 
198 
199 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
200 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
201 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
202 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
203 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
204 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
205 
206 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
207 
208 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
209 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
210 
211 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
212     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
213 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
214     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
215 
216 /*
217  * Maximum number of boundary tags that may be required to satisfy an
218  * allocation.  Two may be required to import.  Another two may be
219  * required to clip edges.
220  */
221 #define	BT_MAXALLOC	4
222 
223 /*
224  * Max free limits the number of locally cached boundary tags.  We
225  * just want to avoid hitting the zone allocator for every call.
226  */
227 #define BT_MAXFREE	(BT_MAXALLOC * 8)
228 
229 /* Allocator for boundary tags. */
230 static uma_zone_t vmem_bt_zone;
231 
232 /* boot time arena storage. */
233 static struct vmem kernel_arena_storage;
234 static struct vmem buffer_arena_storage;
235 static struct vmem transient_arena_storage;
236 /* kernel and kmem arenas are aliased for backwards KPI compat. */
237 vmem_t *kernel_arena = &kernel_arena_storage;
238 vmem_t *kmem_arena = &kernel_arena_storage;
239 vmem_t *buffer_arena = &buffer_arena_storage;
240 vmem_t *transient_arena = &transient_arena_storage;
241 
242 #ifdef DEBUG_MEMGUARD
243 static struct vmem memguard_arena_storage;
244 vmem_t *memguard_arena = &memguard_arena_storage;
245 #endif
246 
247 /*
248  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
249  * allocation will not fail once bt_fill() passes.  To do so we cache
250  * at least the maximum possible tag allocations in the arena.
251  */
252 static int
253 bt_fill(vmem_t *vm, int flags)
254 {
255 	bt_t *bt;
256 
257 	VMEM_ASSERT_LOCKED(vm);
258 
259 	/*
260 	 * Only allow the kernel arena and arenas derived from kernel arena to
261 	 * dip into reserve tags.  They are where new tags come from.
262 	 */
263 	flags &= BT_FLAGS;
264 	if (vm != kernel_arena && vm->vm_arg != kernel_arena)
265 		flags &= ~M_USE_RESERVE;
266 
267 	/*
268 	 * Loop until we meet the reserve.  To minimize the lock shuffle
269 	 * and prevent simultaneous fills we first try a NOWAIT regardless
270 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
271 	 * holding a vmem lock.
272 	 */
273 	while (vm->vm_nfreetags < BT_MAXALLOC) {
274 		bt = uma_zalloc(vmem_bt_zone,
275 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
276 		if (bt == NULL) {
277 			VMEM_UNLOCK(vm);
278 			bt = uma_zalloc(vmem_bt_zone, flags);
279 			VMEM_LOCK(vm);
280 			if (bt == NULL && (flags & M_NOWAIT) != 0)
281 				break;
282 		}
283 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
284 		vm->vm_nfreetags++;
285 	}
286 
287 	if (vm->vm_nfreetags < BT_MAXALLOC)
288 		return ENOMEM;
289 
290 	return 0;
291 }
292 
293 /*
294  * Pop a tag off of the freetag stack.
295  */
296 static bt_t *
297 bt_alloc(vmem_t *vm)
298 {
299 	bt_t *bt;
300 
301 	VMEM_ASSERT_LOCKED(vm);
302 	bt = LIST_FIRST(&vm->vm_freetags);
303 	MPASS(bt != NULL);
304 	LIST_REMOVE(bt, bt_freelist);
305 	vm->vm_nfreetags--;
306 
307 	return bt;
308 }
309 
310 /*
311  * Trim the per-vmem free list.  Returns with the lock released to
312  * avoid allocator recursions.
313  */
314 static void
315 bt_freetrim(vmem_t *vm, int freelimit)
316 {
317 	LIST_HEAD(, vmem_btag) freetags;
318 	bt_t *bt;
319 
320 	LIST_INIT(&freetags);
321 	VMEM_ASSERT_LOCKED(vm);
322 	while (vm->vm_nfreetags > freelimit) {
323 		bt = LIST_FIRST(&vm->vm_freetags);
324 		LIST_REMOVE(bt, bt_freelist);
325 		vm->vm_nfreetags--;
326 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
327 	}
328 	VMEM_UNLOCK(vm);
329 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
330 		LIST_REMOVE(bt, bt_freelist);
331 		uma_zfree(vmem_bt_zone, bt);
332 	}
333 }
334 
335 static inline void
336 bt_free(vmem_t *vm, bt_t *bt)
337 {
338 
339 	VMEM_ASSERT_LOCKED(vm);
340 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
341 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
342 	vm->vm_nfreetags++;
343 }
344 
345 /*
346  * freelist[0] ... [1, 1]
347  * freelist[1] ... [2, 2]
348  *  :
349  * freelist[29] ... [30, 30]
350  * freelist[30] ... [31, 31]
351  * freelist[31] ... [32, 63]
352  * freelist[33] ... [64, 127]
353  *  :
354  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
355  *  :
356  */
357 
358 static struct vmem_freelist *
359 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
360 {
361 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
362 	const int idx = SIZE2ORDER(qsize);
363 
364 	MPASS(size != 0 && qsize != 0);
365 	MPASS((size & vm->vm_quantum_mask) == 0);
366 	MPASS(idx >= 0);
367 	MPASS(idx < VMEM_MAXORDER);
368 
369 	return &vm->vm_freelist[idx];
370 }
371 
372 /*
373  * bt_freehead_toalloc: return the freelist for the given size and allocation
374  * strategy.
375  *
376  * For M_FIRSTFIT, return the list in which any blocks are large enough
377  * for the requested size.  otherwise, return the list which can have blocks
378  * large enough for the requested size.
379  */
380 static struct vmem_freelist *
381 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
382 {
383 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
384 	int idx = SIZE2ORDER(qsize);
385 
386 	MPASS(size != 0 && qsize != 0);
387 	MPASS((size & vm->vm_quantum_mask) == 0);
388 
389 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
390 		idx++;
391 		/* check too large request? */
392 	}
393 	MPASS(idx >= 0);
394 	MPASS(idx < VMEM_MAXORDER);
395 
396 	return &vm->vm_freelist[idx];
397 }
398 
399 /* ---- boundary tag hash */
400 
401 static struct vmem_hashlist *
402 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
403 {
404 	struct vmem_hashlist *list;
405 	unsigned int hash;
406 
407 	hash = hash32_buf(&addr, sizeof(addr), 0);
408 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
409 
410 	return list;
411 }
412 
413 static bt_t *
414 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
415 {
416 	struct vmem_hashlist *list;
417 	bt_t *bt;
418 
419 	VMEM_ASSERT_LOCKED(vm);
420 	list = bt_hashhead(vm, addr);
421 	LIST_FOREACH(bt, list, bt_hashlist) {
422 		if (bt->bt_start == addr) {
423 			break;
424 		}
425 	}
426 
427 	return bt;
428 }
429 
430 static void
431 bt_rembusy(vmem_t *vm, bt_t *bt)
432 {
433 
434 	VMEM_ASSERT_LOCKED(vm);
435 	MPASS(vm->vm_nbusytag > 0);
436 	vm->vm_inuse -= bt->bt_size;
437 	vm->vm_nbusytag--;
438 	LIST_REMOVE(bt, bt_hashlist);
439 }
440 
441 static void
442 bt_insbusy(vmem_t *vm, bt_t *bt)
443 {
444 	struct vmem_hashlist *list;
445 
446 	VMEM_ASSERT_LOCKED(vm);
447 	MPASS(bt->bt_type == BT_TYPE_BUSY);
448 
449 	list = bt_hashhead(vm, bt->bt_start);
450 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
451 	vm->vm_nbusytag++;
452 	vm->vm_inuse += bt->bt_size;
453 }
454 
455 /* ---- boundary tag list */
456 
457 static void
458 bt_remseg(vmem_t *vm, bt_t *bt)
459 {
460 
461 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
462 	bt_free(vm, bt);
463 }
464 
465 static void
466 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
467 {
468 
469 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
470 }
471 
472 static void
473 bt_insseg_tail(vmem_t *vm, bt_t *bt)
474 {
475 
476 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
477 }
478 
479 static void
480 bt_remfree(vmem_t *vm, bt_t *bt)
481 {
482 
483 	MPASS(bt->bt_type == BT_TYPE_FREE);
484 
485 	LIST_REMOVE(bt, bt_freelist);
486 }
487 
488 static void
489 bt_insfree(vmem_t *vm, bt_t *bt)
490 {
491 	struct vmem_freelist *list;
492 
493 	list = bt_freehead_tofree(vm, bt->bt_size);
494 	LIST_INSERT_HEAD(list, bt, bt_freelist);
495 }
496 
497 /* ---- vmem internal functions */
498 
499 /*
500  * Import from the arena into the quantum cache in UMA.
501  */
502 static int
503 qc_import(void *arg, void **store, int cnt, int domain, int flags)
504 {
505 	qcache_t *qc;
506 	vmem_addr_t addr;
507 	int i;
508 
509 	qc = arg;
510 	if ((flags & VMEM_FITMASK) == 0)
511 		flags |= M_BESTFIT;
512 	for (i = 0; i < cnt; i++) {
513 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
514 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
515 			break;
516 		store[i] = (void *)addr;
517 		/* Only guarantee one allocation. */
518 		flags &= ~M_WAITOK;
519 		flags |= M_NOWAIT;
520 	}
521 	return i;
522 }
523 
524 /*
525  * Release memory from the UMA cache to the arena.
526  */
527 static void
528 qc_release(void *arg, void **store, int cnt)
529 {
530 	qcache_t *qc;
531 	int i;
532 
533 	qc = arg;
534 	for (i = 0; i < cnt; i++)
535 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
536 }
537 
538 static void
539 qc_init(vmem_t *vm, vmem_size_t qcache_max)
540 {
541 	qcache_t *qc;
542 	vmem_size_t size;
543 	int qcache_idx_max;
544 	int i;
545 
546 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
547 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
548 	    VMEM_QCACHE_IDX_MAX);
549 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
550 	for (i = 0; i < qcache_idx_max; i++) {
551 		qc = &vm->vm_qcache[i];
552 		size = (i + 1) << vm->vm_quantum_shift;
553 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
554 		    vm->vm_name, size);
555 		qc->qc_vmem = vm;
556 		qc->qc_size = size;
557 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
558 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
559 		    UMA_ZONE_VM);
560 		MPASS(qc->qc_cache);
561 	}
562 }
563 
564 static void
565 qc_destroy(vmem_t *vm)
566 {
567 	int qcache_idx_max;
568 	int i;
569 
570 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
571 	for (i = 0; i < qcache_idx_max; i++)
572 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
573 }
574 
575 static void
576 qc_drain(vmem_t *vm)
577 {
578 	int qcache_idx_max;
579 	int i;
580 
581 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
582 	for (i = 0; i < qcache_idx_max; i++)
583 		zone_drain(vm->vm_qcache[i].qc_cache);
584 }
585 
586 #ifndef UMA_MD_SMALL_ALLOC
587 
588 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
589 
590 /*
591  * vmem_bt_alloc:  Allocate a new page of boundary tags.
592  *
593  * On architectures with uma_small_alloc there is no recursion; no address
594  * space need be allocated to allocate boundary tags.  For the others, we
595  * must handle recursion.  Boundary tags are necessary to allocate new
596  * boundary tags.
597  *
598  * UMA guarantees that enough tags are held in reserve to allocate a new
599  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
600  * when allocating the page to hold new boundary tags.  In this way the
601  * reserve is automatically filled by the allocation that uses the reserve.
602  *
603  * We still have to guarantee that the new tags are allocated atomically since
604  * many threads may try concurrently.  The bt_lock provides this guarantee.
605  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
606  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
607  * loop again after checking to see if we lost the race to allocate.
608  *
609  * There is a small race between vmem_bt_alloc() returning the page and the
610  * zone lock being acquired to add the page to the zone.  For WAITOK
611  * allocations we just pause briefly.  NOWAIT may experience a transient
612  * failure.  To alleviate this we permit a small number of simultaneous
613  * fills to proceed concurrently so NOWAIT is less likely to fail unless
614  * we are really out of KVA.
615  */
616 static void *
617 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
618     int wait)
619 {
620 	vmem_addr_t addr;
621 
622 	*pflag = UMA_SLAB_KERNEL;
623 
624 	/*
625 	 * Single thread boundary tag allocation so that the address space
626 	 * and memory are added in one atomic operation.
627 	 */
628 	mtx_lock(&vmem_bt_lock);
629 	if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
630 	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
631 	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
632 		if (kmem_back_domain(domain, kernel_object, addr, bytes,
633 		    M_NOWAIT | M_USE_RESERVE) == 0) {
634 			mtx_unlock(&vmem_bt_lock);
635 			return ((void *)addr);
636 		}
637 		vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
638 		mtx_unlock(&vmem_bt_lock);
639 		/*
640 		 * Out of memory, not address space.  This may not even be
641 		 * possible due to M_USE_RESERVE page allocation.
642 		 */
643 		if (wait & M_WAITOK)
644 			VM_WAIT;
645 		return (NULL);
646 	}
647 	mtx_unlock(&vmem_bt_lock);
648 	/*
649 	 * We're either out of address space or lost a fill race.
650 	 */
651 	if (wait & M_WAITOK)
652 		pause("btalloc", 1);
653 
654 	return (NULL);
655 }
656 #endif
657 
658 void
659 vmem_startup(void)
660 {
661 
662 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
663 	vmem_zone = uma_zcreate("vmem",
664 	    sizeof(struct vmem), NULL, NULL, NULL, NULL,
665 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
666 	vmem_bt_zone = uma_zcreate("vmem btag",
667 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
668 	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
669 #ifndef UMA_MD_SMALL_ALLOC
670 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
671 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
672 	/*
673 	 * Reserve enough tags to allocate new tags.  We allow multiple
674 	 * CPUs to attempt to allocate new tags concurrently to limit
675 	 * false restarts in UMA.
676 	 */
677 	uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
678 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
679 #endif
680 }
681 
682 /* ---- rehash */
683 
684 static int
685 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
686 {
687 	bt_t *bt;
688 	int i;
689 	struct vmem_hashlist *newhashlist;
690 	struct vmem_hashlist *oldhashlist;
691 	vmem_size_t oldhashsize;
692 
693 	MPASS(newhashsize > 0);
694 
695 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
696 	    M_VMEM, M_NOWAIT);
697 	if (newhashlist == NULL)
698 		return ENOMEM;
699 	for (i = 0; i < newhashsize; i++) {
700 		LIST_INIT(&newhashlist[i]);
701 	}
702 
703 	VMEM_LOCK(vm);
704 	oldhashlist = vm->vm_hashlist;
705 	oldhashsize = vm->vm_hashsize;
706 	vm->vm_hashlist = newhashlist;
707 	vm->vm_hashsize = newhashsize;
708 	if (oldhashlist == NULL) {
709 		VMEM_UNLOCK(vm);
710 		return 0;
711 	}
712 	for (i = 0; i < oldhashsize; i++) {
713 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
714 			bt_rembusy(vm, bt);
715 			bt_insbusy(vm, bt);
716 		}
717 	}
718 	VMEM_UNLOCK(vm);
719 
720 	if (oldhashlist != vm->vm_hash0) {
721 		free(oldhashlist, M_VMEM);
722 	}
723 
724 	return 0;
725 }
726 
727 static void
728 vmem_periodic_kick(void *dummy)
729 {
730 
731 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
732 }
733 
734 static void
735 vmem_periodic(void *unused, int pending)
736 {
737 	vmem_t *vm;
738 	vmem_size_t desired;
739 	vmem_size_t current;
740 
741 	mtx_lock(&vmem_list_lock);
742 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
743 #ifdef DIAGNOSTIC
744 		/* Convenient time to verify vmem state. */
745 		if (enable_vmem_check == 1) {
746 			VMEM_LOCK(vm);
747 			vmem_check(vm);
748 			VMEM_UNLOCK(vm);
749 		}
750 #endif
751 		desired = 1 << flsl(vm->vm_nbusytag);
752 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
753 		    VMEM_HASHSIZE_MAX);
754 		current = vm->vm_hashsize;
755 
756 		/* Grow in powers of two.  Shrink less aggressively. */
757 		if (desired >= current * 2 || desired * 4 <= current)
758 			vmem_rehash(vm, desired);
759 
760 		/*
761 		 * Periodically wake up threads waiting for resources,
762 		 * so they could ask for reclamation again.
763 		 */
764 		VMEM_CONDVAR_BROADCAST(vm);
765 	}
766 	mtx_unlock(&vmem_list_lock);
767 
768 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
769 	    vmem_periodic_kick, NULL);
770 }
771 
772 static void
773 vmem_start_callout(void *unused)
774 {
775 
776 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
777 	vmem_periodic_interval = hz * 10;
778 	callout_init(&vmem_periodic_ch, 1);
779 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
780 	    vmem_periodic_kick, NULL);
781 }
782 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
783 
784 static void
785 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
786 {
787 	bt_t *btspan;
788 	bt_t *btfree;
789 
790 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
791 	MPASS((size & vm->vm_quantum_mask) == 0);
792 
793 	btspan = bt_alloc(vm);
794 	btspan->bt_type = type;
795 	btspan->bt_start = addr;
796 	btspan->bt_size = size;
797 	bt_insseg_tail(vm, btspan);
798 
799 	btfree = bt_alloc(vm);
800 	btfree->bt_type = BT_TYPE_FREE;
801 	btfree->bt_start = addr;
802 	btfree->bt_size = size;
803 	bt_insseg(vm, btfree, btspan);
804 	bt_insfree(vm, btfree);
805 
806 	vm->vm_size += size;
807 }
808 
809 static void
810 vmem_destroy1(vmem_t *vm)
811 {
812 	bt_t *bt;
813 
814 	/*
815 	 * Drain per-cpu quantum caches.
816 	 */
817 	qc_destroy(vm);
818 
819 	/*
820 	 * The vmem should now only contain empty segments.
821 	 */
822 	VMEM_LOCK(vm);
823 	MPASS(vm->vm_nbusytag == 0);
824 
825 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
826 		bt_remseg(vm, bt);
827 
828 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
829 		free(vm->vm_hashlist, M_VMEM);
830 
831 	bt_freetrim(vm, 0);
832 
833 	VMEM_CONDVAR_DESTROY(vm);
834 	VMEM_LOCK_DESTROY(vm);
835 	uma_zfree(vmem_zone, vm);
836 }
837 
838 static int
839 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
840 {
841 	vmem_addr_t addr;
842 	int error;
843 
844 	if (vm->vm_importfn == NULL)
845 		return (EINVAL);
846 
847 	/*
848 	 * To make sure we get a span that meets the alignment we double it
849 	 * and add the size to the tail.  This slightly overestimates.
850 	 */
851 	if (align != vm->vm_quantum_mask + 1)
852 		size = (align * 2) + size;
853 	size = roundup(size, vm->vm_import_quantum);
854 
855 	if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
856 		return (ENOMEM);
857 
858 	/*
859 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
860 	 * span and the tag we want to allocate from it.
861 	 */
862 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
863 	vm->vm_nfreetags -= BT_MAXALLOC;
864 	VMEM_UNLOCK(vm);
865 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
866 	VMEM_LOCK(vm);
867 	vm->vm_nfreetags += BT_MAXALLOC;
868 	if (error)
869 		return (ENOMEM);
870 
871 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
872 
873 	return 0;
874 }
875 
876 /*
877  * vmem_fit: check if a bt can satisfy the given restrictions.
878  *
879  * it's a caller's responsibility to ensure the region is big enough
880  * before calling us.
881  */
882 static int
883 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
884     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
885     vmem_addr_t maxaddr, vmem_addr_t *addrp)
886 {
887 	vmem_addr_t start;
888 	vmem_addr_t end;
889 
890 	MPASS(size > 0);
891 	MPASS(bt->bt_size >= size); /* caller's responsibility */
892 
893 	/*
894 	 * XXX assumption: vmem_addr_t and vmem_size_t are
895 	 * unsigned integer of the same size.
896 	 */
897 
898 	start = bt->bt_start;
899 	if (start < minaddr) {
900 		start = minaddr;
901 	}
902 	end = BT_END(bt);
903 	if (end > maxaddr)
904 		end = maxaddr;
905 	if (start > end)
906 		return (ENOMEM);
907 
908 	start = VMEM_ALIGNUP(start - phase, align) + phase;
909 	if (start < bt->bt_start)
910 		start += align;
911 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
912 		MPASS(align < nocross);
913 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
914 	}
915 	if (start <= end && end - start >= size - 1) {
916 		MPASS((start & (align - 1)) == phase);
917 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
918 		MPASS(minaddr <= start);
919 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
920 		MPASS(bt->bt_start <= start);
921 		MPASS(BT_END(bt) - start >= size - 1);
922 		*addrp = start;
923 
924 		return (0);
925 	}
926 	return (ENOMEM);
927 }
928 
929 /*
930  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
931  */
932 static void
933 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
934 {
935 	bt_t *btnew;
936 	bt_t *btprev;
937 
938 	VMEM_ASSERT_LOCKED(vm);
939 	MPASS(bt->bt_type == BT_TYPE_FREE);
940 	MPASS(bt->bt_size >= size);
941 	bt_remfree(vm, bt);
942 	if (bt->bt_start != start) {
943 		btprev = bt_alloc(vm);
944 		btprev->bt_type = BT_TYPE_FREE;
945 		btprev->bt_start = bt->bt_start;
946 		btprev->bt_size = start - bt->bt_start;
947 		bt->bt_start = start;
948 		bt->bt_size -= btprev->bt_size;
949 		bt_insfree(vm, btprev);
950 		bt_insseg(vm, btprev,
951 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
952 	}
953 	MPASS(bt->bt_start == start);
954 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
955 		/* split */
956 		btnew = bt_alloc(vm);
957 		btnew->bt_type = BT_TYPE_BUSY;
958 		btnew->bt_start = bt->bt_start;
959 		btnew->bt_size = size;
960 		bt->bt_start = bt->bt_start + size;
961 		bt->bt_size -= size;
962 		bt_insfree(vm, bt);
963 		bt_insseg(vm, btnew,
964 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
965 		bt_insbusy(vm, btnew);
966 		bt = btnew;
967 	} else {
968 		bt->bt_type = BT_TYPE_BUSY;
969 		bt_insbusy(vm, bt);
970 	}
971 	MPASS(bt->bt_size >= size);
972 	bt->bt_type = BT_TYPE_BUSY;
973 }
974 
975 /* ---- vmem API */
976 
977 void
978 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
979      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
980 {
981 
982 	VMEM_LOCK(vm);
983 	vm->vm_importfn = importfn;
984 	vm->vm_releasefn = releasefn;
985 	vm->vm_arg = arg;
986 	vm->vm_import_quantum = import_quantum;
987 	VMEM_UNLOCK(vm);
988 }
989 
990 void
991 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
992 {
993 
994 	VMEM_LOCK(vm);
995 	vm->vm_limit = limit;
996 	VMEM_UNLOCK(vm);
997 }
998 
999 void
1000 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1001 {
1002 
1003 	VMEM_LOCK(vm);
1004 	vm->vm_reclaimfn = reclaimfn;
1005 	VMEM_UNLOCK(vm);
1006 }
1007 
1008 /*
1009  * vmem_init: Initializes vmem arena.
1010  */
1011 vmem_t *
1012 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1013     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1014 {
1015 	int i;
1016 
1017 	MPASS(quantum > 0);
1018 	MPASS((quantum & (quantum - 1)) == 0);
1019 
1020 	bzero(vm, sizeof(*vm));
1021 
1022 	VMEM_CONDVAR_INIT(vm, name);
1023 	VMEM_LOCK_INIT(vm, name);
1024 	vm->vm_nfreetags = 0;
1025 	LIST_INIT(&vm->vm_freetags);
1026 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1027 	vm->vm_quantum_mask = quantum - 1;
1028 	vm->vm_quantum_shift = flsl(quantum) - 1;
1029 	vm->vm_nbusytag = 0;
1030 	vm->vm_size = 0;
1031 	vm->vm_limit = 0;
1032 	vm->vm_inuse = 0;
1033 	qc_init(vm, qcache_max);
1034 
1035 	TAILQ_INIT(&vm->vm_seglist);
1036 	for (i = 0; i < VMEM_MAXORDER; i++) {
1037 		LIST_INIT(&vm->vm_freelist[i]);
1038 	}
1039 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1040 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1041 	vm->vm_hashlist = vm->vm_hash0;
1042 
1043 	if (size != 0) {
1044 		if (vmem_add(vm, base, size, flags) != 0) {
1045 			vmem_destroy1(vm);
1046 			return NULL;
1047 		}
1048 	}
1049 
1050 	mtx_lock(&vmem_list_lock);
1051 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1052 	mtx_unlock(&vmem_list_lock);
1053 
1054 	return vm;
1055 }
1056 
1057 /*
1058  * vmem_create: create an arena.
1059  */
1060 vmem_t *
1061 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1062     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1063 {
1064 
1065 	vmem_t *vm;
1066 
1067 	vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1068 	if (vm == NULL)
1069 		return (NULL);
1070 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1071 	    flags) == NULL)
1072 		return (NULL);
1073 	return (vm);
1074 }
1075 
1076 void
1077 vmem_destroy(vmem_t *vm)
1078 {
1079 
1080 	mtx_lock(&vmem_list_lock);
1081 	LIST_REMOVE(vm, vm_alllist);
1082 	mtx_unlock(&vmem_list_lock);
1083 
1084 	vmem_destroy1(vm);
1085 }
1086 
1087 vmem_size_t
1088 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1089 {
1090 
1091 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1092 }
1093 
1094 /*
1095  * vmem_alloc: allocate resource from the arena.
1096  */
1097 int
1098 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1099 {
1100 	const int strat __unused = flags & VMEM_FITMASK;
1101 	qcache_t *qc;
1102 
1103 	flags &= VMEM_FLAGS;
1104 	MPASS(size > 0);
1105 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1106 	if ((flags & M_NOWAIT) == 0)
1107 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1108 
1109 	if (size <= vm->vm_qcache_max) {
1110 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1111 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
1112 		if (*addrp == 0)
1113 			return (ENOMEM);
1114 		return (0);
1115 	}
1116 
1117 	return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1118 	    flags, addrp);
1119 }
1120 
1121 int
1122 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1123     const vmem_size_t phase, const vmem_size_t nocross,
1124     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1125     vmem_addr_t *addrp)
1126 {
1127 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1128 	struct vmem_freelist *list;
1129 	struct vmem_freelist *first;
1130 	struct vmem_freelist *end;
1131 	vmem_size_t avail;
1132 	bt_t *bt;
1133 	int error;
1134 	int strat;
1135 
1136 	flags &= VMEM_FLAGS;
1137 	strat = flags & VMEM_FITMASK;
1138 	MPASS(size0 > 0);
1139 	MPASS(size > 0);
1140 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1141 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1142 	if ((flags & M_NOWAIT) == 0)
1143 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1144 	MPASS((align & vm->vm_quantum_mask) == 0);
1145 	MPASS((align & (align - 1)) == 0);
1146 	MPASS((phase & vm->vm_quantum_mask) == 0);
1147 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1148 	MPASS((nocross & (nocross - 1)) == 0);
1149 	MPASS((align == 0 && phase == 0) || phase < align);
1150 	MPASS(nocross == 0 || nocross >= size);
1151 	MPASS(minaddr <= maxaddr);
1152 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1153 
1154 	if (align == 0)
1155 		align = vm->vm_quantum_mask + 1;
1156 
1157 	*addrp = 0;
1158 	end = &vm->vm_freelist[VMEM_MAXORDER];
1159 	/*
1160 	 * choose a free block from which we allocate.
1161 	 */
1162 	first = bt_freehead_toalloc(vm, size, strat);
1163 	VMEM_LOCK(vm);
1164 	for (;;) {
1165 		/*
1166 		 * Make sure we have enough tags to complete the
1167 		 * operation.
1168 		 */
1169 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1170 		    bt_fill(vm, flags) != 0) {
1171 			error = ENOMEM;
1172 			break;
1173 		}
1174 		/*
1175 	 	 * Scan freelists looking for a tag that satisfies the
1176 		 * allocation.  If we're doing BESTFIT we may encounter
1177 		 * sizes below the request.  If we're doing FIRSTFIT we
1178 		 * inspect only the first element from each list.
1179 		 */
1180 		for (list = first; list < end; list++) {
1181 			LIST_FOREACH(bt, list, bt_freelist) {
1182 				if (bt->bt_size >= size) {
1183 					error = vmem_fit(bt, size, align, phase,
1184 					    nocross, minaddr, maxaddr, addrp);
1185 					if (error == 0) {
1186 						vmem_clip(vm, bt, *addrp, size);
1187 						goto out;
1188 					}
1189 				}
1190 				/* FIRST skips to the next list. */
1191 				if (strat == M_FIRSTFIT)
1192 					break;
1193 			}
1194 		}
1195 		/*
1196 		 * Retry if the fast algorithm failed.
1197 		 */
1198 		if (strat == M_FIRSTFIT) {
1199 			strat = M_BESTFIT;
1200 			first = bt_freehead_toalloc(vm, size, strat);
1201 			continue;
1202 		}
1203 		/*
1204 		 * XXX it is possible to fail to meet restrictions with the
1205 		 * imported region.  It is up to the user to specify the
1206 		 * import quantum such that it can satisfy any allocation.
1207 		 */
1208 		if (vmem_import(vm, size, align, flags) == 0)
1209 			continue;
1210 
1211 		/*
1212 		 * Try to free some space from the quantum cache or reclaim
1213 		 * functions if available.
1214 		 */
1215 		if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1216 			avail = vm->vm_size - vm->vm_inuse;
1217 			VMEM_UNLOCK(vm);
1218 			if (vm->vm_qcache_max != 0)
1219 				qc_drain(vm);
1220 			if (vm->vm_reclaimfn != NULL)
1221 				vm->vm_reclaimfn(vm, flags);
1222 			VMEM_LOCK(vm);
1223 			/* If we were successful retry even NOWAIT. */
1224 			if (vm->vm_size - vm->vm_inuse > avail)
1225 				continue;
1226 		}
1227 		if ((flags & M_NOWAIT) != 0) {
1228 			error = ENOMEM;
1229 			break;
1230 		}
1231 		VMEM_CONDVAR_WAIT(vm);
1232 	}
1233 out:
1234 	VMEM_UNLOCK(vm);
1235 	if (error != 0 && (flags & M_NOWAIT) == 0)
1236 		panic("failed to allocate waiting allocation\n");
1237 
1238 	return (error);
1239 }
1240 
1241 /*
1242  * vmem_free: free the resource to the arena.
1243  */
1244 void
1245 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1246 {
1247 	qcache_t *qc;
1248 	MPASS(size > 0);
1249 
1250 	if (size <= vm->vm_qcache_max) {
1251 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1252 		uma_zfree(qc->qc_cache, (void *)addr);
1253 	} else
1254 		vmem_xfree(vm, addr, size);
1255 }
1256 
1257 void
1258 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1259 {
1260 	bt_t *bt;
1261 	bt_t *t;
1262 
1263 	MPASS(size > 0);
1264 
1265 	VMEM_LOCK(vm);
1266 	bt = bt_lookupbusy(vm, addr);
1267 	MPASS(bt != NULL);
1268 	MPASS(bt->bt_start == addr);
1269 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1270 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1271 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1272 	bt_rembusy(vm, bt);
1273 	bt->bt_type = BT_TYPE_FREE;
1274 
1275 	/* coalesce */
1276 	t = TAILQ_NEXT(bt, bt_seglist);
1277 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1278 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1279 		bt->bt_size += t->bt_size;
1280 		bt_remfree(vm, t);
1281 		bt_remseg(vm, t);
1282 	}
1283 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1284 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1285 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1286 		bt->bt_size += t->bt_size;
1287 		bt->bt_start = t->bt_start;
1288 		bt_remfree(vm, t);
1289 		bt_remseg(vm, t);
1290 	}
1291 
1292 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1293 	MPASS(t != NULL);
1294 	MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1295 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1296 	    t->bt_size == bt->bt_size) {
1297 		vmem_addr_t spanaddr;
1298 		vmem_size_t spansize;
1299 
1300 		MPASS(t->bt_start == bt->bt_start);
1301 		spanaddr = bt->bt_start;
1302 		spansize = bt->bt_size;
1303 		bt_remseg(vm, bt);
1304 		bt_remseg(vm, t);
1305 		vm->vm_size -= spansize;
1306 		VMEM_CONDVAR_BROADCAST(vm);
1307 		bt_freetrim(vm, BT_MAXFREE);
1308 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1309 	} else {
1310 		bt_insfree(vm, bt);
1311 		VMEM_CONDVAR_BROADCAST(vm);
1312 		bt_freetrim(vm, BT_MAXFREE);
1313 	}
1314 }
1315 
1316 /*
1317  * vmem_add:
1318  *
1319  */
1320 int
1321 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1322 {
1323 	int error;
1324 
1325 	error = 0;
1326 	flags &= VMEM_FLAGS;
1327 	VMEM_LOCK(vm);
1328 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1329 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1330 	else
1331 		error = ENOMEM;
1332 	VMEM_UNLOCK(vm);
1333 
1334 	return (error);
1335 }
1336 
1337 /*
1338  * vmem_size: information about arenas size
1339  */
1340 vmem_size_t
1341 vmem_size(vmem_t *vm, int typemask)
1342 {
1343 	int i;
1344 
1345 	switch (typemask) {
1346 	case VMEM_ALLOC:
1347 		return vm->vm_inuse;
1348 	case VMEM_FREE:
1349 		return vm->vm_size - vm->vm_inuse;
1350 	case VMEM_FREE|VMEM_ALLOC:
1351 		return vm->vm_size;
1352 	case VMEM_MAXFREE:
1353 		VMEM_LOCK(vm);
1354 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1355 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1356 				continue;
1357 			VMEM_UNLOCK(vm);
1358 			return ((vmem_size_t)ORDER2SIZE(i) <<
1359 			    vm->vm_quantum_shift);
1360 		}
1361 		VMEM_UNLOCK(vm);
1362 		return (0);
1363 	default:
1364 		panic("vmem_size");
1365 	}
1366 }
1367 
1368 /* ---- debug */
1369 
1370 #if defined(DDB) || defined(DIAGNOSTIC)
1371 
1372 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1373     __printflike(1, 2));
1374 
1375 static const char *
1376 bt_type_string(int type)
1377 {
1378 
1379 	switch (type) {
1380 	case BT_TYPE_BUSY:
1381 		return "busy";
1382 	case BT_TYPE_FREE:
1383 		return "free";
1384 	case BT_TYPE_SPAN:
1385 		return "span";
1386 	case BT_TYPE_SPAN_STATIC:
1387 		return "static span";
1388 	default:
1389 		break;
1390 	}
1391 	return "BOGUS";
1392 }
1393 
1394 static void
1395 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1396 {
1397 
1398 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1399 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1400 	    bt->bt_type, bt_type_string(bt->bt_type));
1401 }
1402 
1403 static void
1404 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1405 {
1406 	const bt_t *bt;
1407 	int i;
1408 
1409 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1410 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1411 		bt_dump(bt, pr);
1412 	}
1413 
1414 	for (i = 0; i < VMEM_MAXORDER; i++) {
1415 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1416 
1417 		if (LIST_EMPTY(fl)) {
1418 			continue;
1419 		}
1420 
1421 		(*pr)("freelist[%d]\n", i);
1422 		LIST_FOREACH(bt, fl, bt_freelist) {
1423 			bt_dump(bt, pr);
1424 		}
1425 	}
1426 }
1427 
1428 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1429 
1430 #if defined(DDB)
1431 #include <ddb/ddb.h>
1432 
1433 static bt_t *
1434 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1435 {
1436 	bt_t *bt;
1437 
1438 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1439 		if (BT_ISSPAN_P(bt)) {
1440 			continue;
1441 		}
1442 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1443 			return bt;
1444 		}
1445 	}
1446 
1447 	return NULL;
1448 }
1449 
1450 void
1451 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1452 {
1453 	vmem_t *vm;
1454 
1455 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1456 		bt_t *bt;
1457 
1458 		bt = vmem_whatis_lookup(vm, addr);
1459 		if (bt == NULL) {
1460 			continue;
1461 		}
1462 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1463 		    (void *)addr, (void *)bt->bt_start,
1464 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1465 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1466 	}
1467 }
1468 
1469 void
1470 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1471 {
1472 	const vmem_t *vm;
1473 
1474 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1475 		vmem_dump(vm, pr);
1476 	}
1477 }
1478 
1479 void
1480 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1481 {
1482 	const vmem_t *vm = (const void *)addr;
1483 
1484 	vmem_dump(vm, pr);
1485 }
1486 
1487 DB_SHOW_COMMAND(vmemdump, vmemdump)
1488 {
1489 
1490 	if (!have_addr) {
1491 		db_printf("usage: show vmemdump <addr>\n");
1492 		return;
1493 	}
1494 
1495 	vmem_dump((const vmem_t *)addr, db_printf);
1496 }
1497 
1498 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1499 {
1500 	const vmem_t *vm;
1501 
1502 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1503 		vmem_dump(vm, db_printf);
1504 }
1505 
1506 DB_SHOW_COMMAND(vmem, vmem_summ)
1507 {
1508 	const vmem_t *vm = (const void *)addr;
1509 	const bt_t *bt;
1510 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1511 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1512 	int ord;
1513 
1514 	if (!have_addr) {
1515 		db_printf("usage: show vmem <addr>\n");
1516 		return;
1517 	}
1518 
1519 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1520 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1521 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1522 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1523 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1524 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1525 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1526 
1527 	memset(&ft, 0, sizeof(ft));
1528 	memset(&ut, 0, sizeof(ut));
1529 	memset(&fs, 0, sizeof(fs));
1530 	memset(&us, 0, sizeof(us));
1531 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1532 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1533 		if (bt->bt_type == BT_TYPE_BUSY) {
1534 			ut[ord]++;
1535 			us[ord] += bt->bt_size;
1536 		} else if (bt->bt_type == BT_TYPE_FREE) {
1537 			ft[ord]++;
1538 			fs[ord] += bt->bt_size;
1539 		}
1540 	}
1541 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1542 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1543 		if (ut[ord] == 0 && ft[ord] == 0)
1544 			continue;
1545 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1546 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1547 		    ut[ord], us[ord], ft[ord], fs[ord]);
1548 	}
1549 }
1550 
1551 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1552 {
1553 	const vmem_t *vm;
1554 
1555 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1556 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1557 }
1558 #endif /* defined(DDB) */
1559 
1560 #define vmem_printf printf
1561 
1562 #if defined(DIAGNOSTIC)
1563 
1564 static bool
1565 vmem_check_sanity(vmem_t *vm)
1566 {
1567 	const bt_t *bt, *bt2;
1568 
1569 	MPASS(vm != NULL);
1570 
1571 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1572 		if (bt->bt_start > BT_END(bt)) {
1573 			printf("corrupted tag\n");
1574 			bt_dump(bt, vmem_printf);
1575 			return false;
1576 		}
1577 	}
1578 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1579 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1580 			if (bt == bt2) {
1581 				continue;
1582 			}
1583 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1584 				continue;
1585 			}
1586 			if (bt->bt_start <= BT_END(bt2) &&
1587 			    bt2->bt_start <= BT_END(bt)) {
1588 				printf("overwrapped tags\n");
1589 				bt_dump(bt, vmem_printf);
1590 				bt_dump(bt2, vmem_printf);
1591 				return false;
1592 			}
1593 		}
1594 	}
1595 
1596 	return true;
1597 }
1598 
1599 static void
1600 vmem_check(vmem_t *vm)
1601 {
1602 
1603 	if (!vmem_check_sanity(vm)) {
1604 		panic("insanity vmem %p", vm);
1605 	}
1606 }
1607 
1608 #endif /* defined(DIAGNOSTIC) */
1609