xref: /freebsd/sys/kern/subr_vmem.c (revision 640235e2c2ba32947f7c59d168437ffa1280f1e6)
1 /*-
2  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
3  * Copyright (c) 2013 EMC Corp.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * From:
30  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
31  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
32  */
33 
34 /*
35  * reference:
36  * -	Magazines and Vmem: Extending the Slab Allocator
37  *	to Many CPUs and Arbitrary Resources
38  *	http://www.usenix.org/event/usenix01/bonwick.html
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/queue.h>
50 #include <sys/callout.h>
51 #include <sys/hash.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/condvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/taskqueue.h>
59 #include <sys/vmem.h>
60 
61 #include "opt_vm.h"
62 
63 #include <vm/uma.h>
64 #include <vm/vm.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_param.h>
71 #include <vm/vm_pageout.h>
72 
73 #define	VMEM_OPTORDER		5
74 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
75 #define	VMEM_MAXORDER						\
76     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
77 
78 #define	VMEM_HASHSIZE_MIN	16
79 #define	VMEM_HASHSIZE_MAX	131072
80 
81 #define	VMEM_QCACHE_IDX_MAX	16
82 
83 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT)
84 
85 #define	VMEM_FLAGS						\
86     (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
87 
88 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
89 
90 #define	QC_NAME_MAX	16
91 
92 /*
93  * Data structures private to vmem.
94  */
95 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
96 
97 typedef struct vmem_btag bt_t;
98 
99 TAILQ_HEAD(vmem_seglist, vmem_btag);
100 LIST_HEAD(vmem_freelist, vmem_btag);
101 LIST_HEAD(vmem_hashlist, vmem_btag);
102 
103 struct qcache {
104 	uma_zone_t	qc_cache;
105 	vmem_t 		*qc_vmem;
106 	vmem_size_t	qc_size;
107 	char		qc_name[QC_NAME_MAX];
108 };
109 typedef struct qcache qcache_t;
110 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
111 
112 #define	VMEM_NAME_MAX	16
113 
114 /* vmem arena */
115 struct vmem {
116 	struct mtx_padalign	vm_lock;
117 	struct cv		vm_cv;
118 	char			vm_name[VMEM_NAME_MAX+1];
119 	LIST_ENTRY(vmem)	vm_alllist;
120 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
121 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
122 	struct vmem_seglist	vm_seglist;
123 	struct vmem_hashlist	*vm_hashlist;
124 	vmem_size_t		vm_hashsize;
125 
126 	/* Constant after init */
127 	vmem_size_t		vm_qcache_max;
128 	vmem_size_t		vm_quantum_mask;
129 	vmem_size_t		vm_import_quantum;
130 	int			vm_quantum_shift;
131 
132 	/* Written on alloc/free */
133 	LIST_HEAD(, vmem_btag)	vm_freetags;
134 	int			vm_nfreetags;
135 	int			vm_nbusytag;
136 	vmem_size_t		vm_inuse;
137 	vmem_size_t		vm_size;
138 
139 	/* Used on import. */
140 	vmem_import_t		*vm_importfn;
141 	vmem_release_t		*vm_releasefn;
142 	void			*vm_arg;
143 
144 	/* Space exhaustion callback. */
145 	vmem_reclaim_t		*vm_reclaimfn;
146 
147 	/* quantum cache */
148 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
149 };
150 
151 /* boundary tag */
152 struct vmem_btag {
153 	TAILQ_ENTRY(vmem_btag) bt_seglist;
154 	union {
155 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
156 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
157 	} bt_u;
158 #define	bt_hashlist	bt_u.u_hashlist
159 #define	bt_freelist	bt_u.u_freelist
160 	vmem_addr_t	bt_start;
161 	vmem_size_t	bt_size;
162 	int		bt_type;
163 };
164 
165 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
166 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
167 #define	BT_TYPE_FREE		3	/* Available space. */
168 #define	BT_TYPE_BUSY		4	/* Used space. */
169 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
170 
171 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
172 
173 #if defined(DIAGNOSTIC)
174 static int enable_vmem_check = 1;
175 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
176     &enable_vmem_check, 0, "Enable vmem check");
177 static void vmem_check(vmem_t *);
178 #endif
179 
180 static struct callout	vmem_periodic_ch;
181 static int		vmem_periodic_interval;
182 static struct task	vmem_periodic_wk;
183 
184 static struct mtx_padalign vmem_list_lock;
185 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
186 
187 /* ---- misc */
188 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
189 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
190 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
191 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
192 
193 
194 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
195 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
196 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
197 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
198 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
199 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
200 
201 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
202 
203 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
204 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
205 
206 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
207     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
208 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
209     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
210 
211 /*
212  * Maximum number of boundary tags that may be required to satisfy an
213  * allocation.  Two may be required to import.  Another two may be
214  * required to clip edges.
215  */
216 #define	BT_MAXALLOC	4
217 
218 /*
219  * Max free limits the number of locally cached boundary tags.  We
220  * just want to avoid hitting the zone allocator for every call.
221  */
222 #define BT_MAXFREE	(BT_MAXALLOC * 8)
223 
224 /* Allocator for boundary tags. */
225 static uma_zone_t vmem_bt_zone;
226 
227 /* boot time arena storage. */
228 static struct vmem kernel_arena_storage;
229 static struct vmem kmem_arena_storage;
230 static struct vmem buffer_arena_storage;
231 static struct vmem transient_arena_storage;
232 vmem_t *kernel_arena = &kernel_arena_storage;
233 vmem_t *kmem_arena = &kmem_arena_storage;
234 vmem_t *buffer_arena = &buffer_arena_storage;
235 vmem_t *transient_arena = &transient_arena_storage;
236 
237 #ifdef DEBUG_MEMGUARD
238 static struct vmem memguard_arena_storage;
239 vmem_t *memguard_arena = &memguard_arena_storage;
240 #endif
241 
242 /*
243  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
244  * allocation will not fail once bt_fill() passes.  To do so we cache
245  * at least the maximum possible tag allocations in the arena.
246  */
247 static int
248 bt_fill(vmem_t *vm, int flags)
249 {
250 	bt_t *bt;
251 
252 	VMEM_ASSERT_LOCKED(vm);
253 
254 	/*
255 	 * Only allow the kmem arena to dip into reserve tags.  It is the
256 	 * vmem where new tags come from.
257 	 */
258 	flags &= BT_FLAGS;
259 	if (vm != kmem_arena)
260 		flags &= ~M_USE_RESERVE;
261 
262 	/*
263 	 * Loop until we meet the reserve.  To minimize the lock shuffle
264 	 * and prevent simultaneous fills we first try a NOWAIT regardless
265 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
266 	 * holding a vmem lock.
267 	 */
268 	while (vm->vm_nfreetags < BT_MAXALLOC) {
269 		bt = uma_zalloc(vmem_bt_zone,
270 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
271 		if (bt == NULL) {
272 			VMEM_UNLOCK(vm);
273 			bt = uma_zalloc(vmem_bt_zone, flags);
274 			VMEM_LOCK(vm);
275 			if (bt == NULL && (flags & M_NOWAIT) != 0)
276 				break;
277 		}
278 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
279 		vm->vm_nfreetags++;
280 	}
281 
282 	if (vm->vm_nfreetags < BT_MAXALLOC)
283 		return ENOMEM;
284 
285 	return 0;
286 }
287 
288 /*
289  * Pop a tag off of the freetag stack.
290  */
291 static bt_t *
292 bt_alloc(vmem_t *vm)
293 {
294 	bt_t *bt;
295 
296 	VMEM_ASSERT_LOCKED(vm);
297 	bt = LIST_FIRST(&vm->vm_freetags);
298 	MPASS(bt != NULL);
299 	LIST_REMOVE(bt, bt_freelist);
300 	vm->vm_nfreetags--;
301 
302 	return bt;
303 }
304 
305 /*
306  * Trim the per-vmem free list.  Returns with the lock released to
307  * avoid allocator recursions.
308  */
309 static void
310 bt_freetrim(vmem_t *vm, int freelimit)
311 {
312 	LIST_HEAD(, vmem_btag) freetags;
313 	bt_t *bt;
314 
315 	LIST_INIT(&freetags);
316 	VMEM_ASSERT_LOCKED(vm);
317 	while (vm->vm_nfreetags > freelimit) {
318 		bt = LIST_FIRST(&vm->vm_freetags);
319 		LIST_REMOVE(bt, bt_freelist);
320 		vm->vm_nfreetags--;
321 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
322 	}
323 	VMEM_UNLOCK(vm);
324 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
325 		LIST_REMOVE(bt, bt_freelist);
326 		uma_zfree(vmem_bt_zone, bt);
327 	}
328 }
329 
330 static inline void
331 bt_free(vmem_t *vm, bt_t *bt)
332 {
333 
334 	VMEM_ASSERT_LOCKED(vm);
335 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
336 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
337 	vm->vm_nfreetags++;
338 }
339 
340 /*
341  * freelist[0] ... [1, 1]
342  * freelist[1] ... [2, 2]
343  *  :
344  * freelist[29] ... [30, 30]
345  * freelist[30] ... [31, 31]
346  * freelist[31] ... [32, 63]
347  * freelist[33] ... [64, 127]
348  *  :
349  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
350  *  :
351  */
352 
353 static struct vmem_freelist *
354 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
355 {
356 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
357 	const int idx = SIZE2ORDER(qsize);
358 
359 	MPASS(size != 0 && qsize != 0);
360 	MPASS((size & vm->vm_quantum_mask) == 0);
361 	MPASS(idx >= 0);
362 	MPASS(idx < VMEM_MAXORDER);
363 
364 	return &vm->vm_freelist[idx];
365 }
366 
367 /*
368  * bt_freehead_toalloc: return the freelist for the given size and allocation
369  * strategy.
370  *
371  * For M_FIRSTFIT, return the list in which any blocks are large enough
372  * for the requested size.  otherwise, return the list which can have blocks
373  * large enough for the requested size.
374  */
375 static struct vmem_freelist *
376 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
377 {
378 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
379 	int idx = SIZE2ORDER(qsize);
380 
381 	MPASS(size != 0 && qsize != 0);
382 	MPASS((size & vm->vm_quantum_mask) == 0);
383 
384 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
385 		idx++;
386 		/* check too large request? */
387 	}
388 	MPASS(idx >= 0);
389 	MPASS(idx < VMEM_MAXORDER);
390 
391 	return &vm->vm_freelist[idx];
392 }
393 
394 /* ---- boundary tag hash */
395 
396 static struct vmem_hashlist *
397 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
398 {
399 	struct vmem_hashlist *list;
400 	unsigned int hash;
401 
402 	hash = hash32_buf(&addr, sizeof(addr), 0);
403 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
404 
405 	return list;
406 }
407 
408 static bt_t *
409 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
410 {
411 	struct vmem_hashlist *list;
412 	bt_t *bt;
413 
414 	VMEM_ASSERT_LOCKED(vm);
415 	list = bt_hashhead(vm, addr);
416 	LIST_FOREACH(bt, list, bt_hashlist) {
417 		if (bt->bt_start == addr) {
418 			break;
419 		}
420 	}
421 
422 	return bt;
423 }
424 
425 static void
426 bt_rembusy(vmem_t *vm, bt_t *bt)
427 {
428 
429 	VMEM_ASSERT_LOCKED(vm);
430 	MPASS(vm->vm_nbusytag > 0);
431 	vm->vm_inuse -= bt->bt_size;
432 	vm->vm_nbusytag--;
433 	LIST_REMOVE(bt, bt_hashlist);
434 }
435 
436 static void
437 bt_insbusy(vmem_t *vm, bt_t *bt)
438 {
439 	struct vmem_hashlist *list;
440 
441 	VMEM_ASSERT_LOCKED(vm);
442 	MPASS(bt->bt_type == BT_TYPE_BUSY);
443 
444 	list = bt_hashhead(vm, bt->bt_start);
445 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
446 	vm->vm_nbusytag++;
447 	vm->vm_inuse += bt->bt_size;
448 }
449 
450 /* ---- boundary tag list */
451 
452 static void
453 bt_remseg(vmem_t *vm, bt_t *bt)
454 {
455 
456 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
457 	bt_free(vm, bt);
458 }
459 
460 static void
461 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
462 {
463 
464 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
465 }
466 
467 static void
468 bt_insseg_tail(vmem_t *vm, bt_t *bt)
469 {
470 
471 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
472 }
473 
474 static void
475 bt_remfree(vmem_t *vm, bt_t *bt)
476 {
477 
478 	MPASS(bt->bt_type == BT_TYPE_FREE);
479 
480 	LIST_REMOVE(bt, bt_freelist);
481 }
482 
483 static void
484 bt_insfree(vmem_t *vm, bt_t *bt)
485 {
486 	struct vmem_freelist *list;
487 
488 	list = bt_freehead_tofree(vm, bt->bt_size);
489 	LIST_INSERT_HEAD(list, bt, bt_freelist);
490 }
491 
492 /* ---- vmem internal functions */
493 
494 /*
495  * Import from the arena into the quantum cache in UMA.
496  */
497 static int
498 qc_import(void *arg, void **store, int cnt, int flags)
499 {
500 	qcache_t *qc;
501 	vmem_addr_t addr;
502 	int i;
503 
504 	qc = arg;
505 	if ((flags & VMEM_FITMASK) == 0)
506 		flags |= M_BESTFIT;
507 	for (i = 0; i < cnt; i++) {
508 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
509 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
510 			break;
511 		store[i] = (void *)addr;
512 		/* Only guarantee one allocation. */
513 		flags &= ~M_WAITOK;
514 		flags |= M_NOWAIT;
515 	}
516 	return i;
517 }
518 
519 /*
520  * Release memory from the UMA cache to the arena.
521  */
522 static void
523 qc_release(void *arg, void **store, int cnt)
524 {
525 	qcache_t *qc;
526 	int i;
527 
528 	qc = arg;
529 	for (i = 0; i < cnt; i++)
530 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
531 }
532 
533 static void
534 qc_init(vmem_t *vm, vmem_size_t qcache_max)
535 {
536 	qcache_t *qc;
537 	vmem_size_t size;
538 	int qcache_idx_max;
539 	int i;
540 
541 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
542 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
543 	    VMEM_QCACHE_IDX_MAX);
544 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
545 	for (i = 0; i < qcache_idx_max; i++) {
546 		qc = &vm->vm_qcache[i];
547 		size = (i + 1) << vm->vm_quantum_shift;
548 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
549 		    vm->vm_name, size);
550 		qc->qc_vmem = vm;
551 		qc->qc_size = size;
552 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
553 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
554 		    UMA_ZONE_VM);
555 		MPASS(qc->qc_cache);
556 	}
557 }
558 
559 static void
560 qc_destroy(vmem_t *vm)
561 {
562 	int qcache_idx_max;
563 	int i;
564 
565 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
566 	for (i = 0; i < qcache_idx_max; i++)
567 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
568 }
569 
570 static void
571 qc_drain(vmem_t *vm)
572 {
573 	int qcache_idx_max;
574 	int i;
575 
576 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
577 	for (i = 0; i < qcache_idx_max; i++)
578 		zone_drain(vm->vm_qcache[i].qc_cache);
579 }
580 
581 #ifndef UMA_MD_SMALL_ALLOC
582 
583 static struct mtx_padalign vmem_bt_lock;
584 
585 /*
586  * vmem_bt_alloc:  Allocate a new page of boundary tags.
587  *
588  * On architectures with uma_small_alloc there is no recursion; no address
589  * space need be allocated to allocate boundary tags.  For the others, we
590  * must handle recursion.  Boundary tags are necessary to allocate new
591  * boundary tags.
592  *
593  * UMA guarantees that enough tags are held in reserve to allocate a new
594  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
595  * when allocating the page to hold new boundary tags.  In this way the
596  * reserve is automatically filled by the allocation that uses the reserve.
597  *
598  * We still have to guarantee that the new tags are allocated atomically since
599  * many threads may try concurrently.  The bt_lock provides this guarantee.
600  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
601  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
602  * loop again after checking to see if we lost the race to allocate.
603  *
604  * There is a small race between vmem_bt_alloc() returning the page and the
605  * zone lock being acquired to add the page to the zone.  For WAITOK
606  * allocations we just pause briefly.  NOWAIT may experience a transient
607  * failure.  To alleviate this we permit a small number of simultaneous
608  * fills to proceed concurrently so NOWAIT is less likely to fail unless
609  * we are really out of KVA.
610  */
611 static void *
612 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
613 {
614 	vmem_addr_t addr;
615 
616 	*pflag = UMA_SLAB_KMEM;
617 
618 	/*
619 	 * Single thread boundary tag allocation so that the address space
620 	 * and memory are added in one atomic operation.
621 	 */
622 	mtx_lock(&vmem_bt_lock);
623 	if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN,
624 	    VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT,
625 	    &addr) == 0) {
626 		if (kmem_back(kmem_object, addr, bytes,
627 		    M_NOWAIT | M_USE_RESERVE) == 0) {
628 			mtx_unlock(&vmem_bt_lock);
629 			return ((void *)addr);
630 		}
631 		vmem_xfree(kmem_arena, addr, bytes);
632 		mtx_unlock(&vmem_bt_lock);
633 		/*
634 		 * Out of memory, not address space.  This may not even be
635 		 * possible due to M_USE_RESERVE page allocation.
636 		 */
637 		if (wait & M_WAITOK)
638 			VM_WAIT;
639 		return (NULL);
640 	}
641 	mtx_unlock(&vmem_bt_lock);
642 	/*
643 	 * We're either out of address space or lost a fill race.
644 	 */
645 	if (wait & M_WAITOK)
646 		pause("btalloc", 1);
647 
648 	return (NULL);
649 }
650 #endif
651 
652 void
653 vmem_startup(void)
654 {
655 
656 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
657 	vmem_bt_zone = uma_zcreate("vmem btag",
658 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
659 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
660 #ifndef UMA_MD_SMALL_ALLOC
661 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
662 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
663 	/*
664 	 * Reserve enough tags to allocate new tags.  We allow multiple
665 	 * CPUs to attempt to allocate new tags concurrently to limit
666 	 * false restarts in UMA.
667 	 */
668 	uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
669 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
670 #endif
671 }
672 
673 /* ---- rehash */
674 
675 static int
676 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
677 {
678 	bt_t *bt;
679 	int i;
680 	struct vmem_hashlist *newhashlist;
681 	struct vmem_hashlist *oldhashlist;
682 	vmem_size_t oldhashsize;
683 
684 	MPASS(newhashsize > 0);
685 
686 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
687 	    M_VMEM, M_NOWAIT);
688 	if (newhashlist == NULL)
689 		return ENOMEM;
690 	for (i = 0; i < newhashsize; i++) {
691 		LIST_INIT(&newhashlist[i]);
692 	}
693 
694 	VMEM_LOCK(vm);
695 	oldhashlist = vm->vm_hashlist;
696 	oldhashsize = vm->vm_hashsize;
697 	vm->vm_hashlist = newhashlist;
698 	vm->vm_hashsize = newhashsize;
699 	if (oldhashlist == NULL) {
700 		VMEM_UNLOCK(vm);
701 		return 0;
702 	}
703 	for (i = 0; i < oldhashsize; i++) {
704 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
705 			bt_rembusy(vm, bt);
706 			bt_insbusy(vm, bt);
707 		}
708 	}
709 	VMEM_UNLOCK(vm);
710 
711 	if (oldhashlist != vm->vm_hash0) {
712 		free(oldhashlist, M_VMEM);
713 	}
714 
715 	return 0;
716 }
717 
718 static void
719 vmem_periodic_kick(void *dummy)
720 {
721 
722 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
723 }
724 
725 static void
726 vmem_periodic(void *unused, int pending)
727 {
728 	vmem_t *vm;
729 	vmem_size_t desired;
730 	vmem_size_t current;
731 
732 	mtx_lock(&vmem_list_lock);
733 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
734 #ifdef DIAGNOSTIC
735 		/* Convenient time to verify vmem state. */
736 		if (enable_vmem_check == 1) {
737 			VMEM_LOCK(vm);
738 			vmem_check(vm);
739 			VMEM_UNLOCK(vm);
740 		}
741 #endif
742 		desired = 1 << flsl(vm->vm_nbusytag);
743 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
744 		    VMEM_HASHSIZE_MAX);
745 		current = vm->vm_hashsize;
746 
747 		/* Grow in powers of two.  Shrink less aggressively. */
748 		if (desired >= current * 2 || desired * 4 <= current)
749 			vmem_rehash(vm, desired);
750 
751 		/*
752 		 * Periodically wake up threads waiting for resources,
753 		 * so they could ask for reclamation again.
754 		 */
755 		VMEM_CONDVAR_BROADCAST(vm);
756 	}
757 	mtx_unlock(&vmem_list_lock);
758 
759 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
760 	    vmem_periodic_kick, NULL);
761 }
762 
763 static void
764 vmem_start_callout(void *unused)
765 {
766 
767 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
768 	vmem_periodic_interval = hz * 10;
769 	callout_init(&vmem_periodic_ch, 1);
770 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
771 	    vmem_periodic_kick, NULL);
772 }
773 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
774 
775 static void
776 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
777 {
778 	bt_t *btspan;
779 	bt_t *btfree;
780 
781 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
782 	MPASS((size & vm->vm_quantum_mask) == 0);
783 
784 	btspan = bt_alloc(vm);
785 	btspan->bt_type = type;
786 	btspan->bt_start = addr;
787 	btspan->bt_size = size;
788 	bt_insseg_tail(vm, btspan);
789 
790 	btfree = bt_alloc(vm);
791 	btfree->bt_type = BT_TYPE_FREE;
792 	btfree->bt_start = addr;
793 	btfree->bt_size = size;
794 	bt_insseg(vm, btfree, btspan);
795 	bt_insfree(vm, btfree);
796 
797 	vm->vm_size += size;
798 }
799 
800 static void
801 vmem_destroy1(vmem_t *vm)
802 {
803 	bt_t *bt;
804 
805 	/*
806 	 * Drain per-cpu quantum caches.
807 	 */
808 	qc_destroy(vm);
809 
810 	/*
811 	 * The vmem should now only contain empty segments.
812 	 */
813 	VMEM_LOCK(vm);
814 	MPASS(vm->vm_nbusytag == 0);
815 
816 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
817 		bt_remseg(vm, bt);
818 
819 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
820 		free(vm->vm_hashlist, M_VMEM);
821 
822 	bt_freetrim(vm, 0);
823 
824 	VMEM_CONDVAR_DESTROY(vm);
825 	VMEM_LOCK_DESTROY(vm);
826 	free(vm, M_VMEM);
827 }
828 
829 static int
830 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
831 {
832 	vmem_addr_t addr;
833 	int error;
834 
835 	if (vm->vm_importfn == NULL)
836 		return EINVAL;
837 
838 	/*
839 	 * To make sure we get a span that meets the alignment we double it
840 	 * and add the size to the tail.  This slightly overestimates.
841 	 */
842 	if (align != vm->vm_quantum_mask + 1)
843 		size = (align * 2) + size;
844 	size = roundup(size, vm->vm_import_quantum);
845 
846 	/*
847 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
848 	 * span and the tag we want to allocate from it.
849 	 */
850 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
851 	vm->vm_nfreetags -= BT_MAXALLOC;
852 	VMEM_UNLOCK(vm);
853 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
854 	VMEM_LOCK(vm);
855 	vm->vm_nfreetags += BT_MAXALLOC;
856 	if (error)
857 		return ENOMEM;
858 
859 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
860 
861 	return 0;
862 }
863 
864 /*
865  * vmem_fit: check if a bt can satisfy the given restrictions.
866  *
867  * it's a caller's responsibility to ensure the region is big enough
868  * before calling us.
869  */
870 static int
871 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
872     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
873     vmem_addr_t maxaddr, vmem_addr_t *addrp)
874 {
875 	vmem_addr_t start;
876 	vmem_addr_t end;
877 
878 	MPASS(size > 0);
879 	MPASS(bt->bt_size >= size); /* caller's responsibility */
880 
881 	/*
882 	 * XXX assumption: vmem_addr_t and vmem_size_t are
883 	 * unsigned integer of the same size.
884 	 */
885 
886 	start = bt->bt_start;
887 	if (start < minaddr) {
888 		start = minaddr;
889 	}
890 	end = BT_END(bt);
891 	if (end > maxaddr)
892 		end = maxaddr;
893 	if (start > end)
894 		return (ENOMEM);
895 
896 	start = VMEM_ALIGNUP(start - phase, align) + phase;
897 	if (start < bt->bt_start)
898 		start += align;
899 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
900 		MPASS(align < nocross);
901 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
902 	}
903 	if (start <= end && end - start >= size - 1) {
904 		MPASS((start & (align - 1)) == phase);
905 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
906 		MPASS(minaddr <= start);
907 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
908 		MPASS(bt->bt_start <= start);
909 		MPASS(BT_END(bt) - start >= size - 1);
910 		*addrp = start;
911 
912 		return (0);
913 	}
914 	return (ENOMEM);
915 }
916 
917 /*
918  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
919  */
920 static void
921 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
922 {
923 	bt_t *btnew;
924 	bt_t *btprev;
925 
926 	VMEM_ASSERT_LOCKED(vm);
927 	MPASS(bt->bt_type == BT_TYPE_FREE);
928 	MPASS(bt->bt_size >= size);
929 	bt_remfree(vm, bt);
930 	if (bt->bt_start != start) {
931 		btprev = bt_alloc(vm);
932 		btprev->bt_type = BT_TYPE_FREE;
933 		btprev->bt_start = bt->bt_start;
934 		btprev->bt_size = start - bt->bt_start;
935 		bt->bt_start = start;
936 		bt->bt_size -= btprev->bt_size;
937 		bt_insfree(vm, btprev);
938 		bt_insseg(vm, btprev,
939 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
940 	}
941 	MPASS(bt->bt_start == start);
942 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
943 		/* split */
944 		btnew = bt_alloc(vm);
945 		btnew->bt_type = BT_TYPE_BUSY;
946 		btnew->bt_start = bt->bt_start;
947 		btnew->bt_size = size;
948 		bt->bt_start = bt->bt_start + size;
949 		bt->bt_size -= size;
950 		bt_insfree(vm, bt);
951 		bt_insseg(vm, btnew,
952 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
953 		bt_insbusy(vm, btnew);
954 		bt = btnew;
955 	} else {
956 		bt->bt_type = BT_TYPE_BUSY;
957 		bt_insbusy(vm, bt);
958 	}
959 	MPASS(bt->bt_size >= size);
960 	bt->bt_type = BT_TYPE_BUSY;
961 }
962 
963 /* ---- vmem API */
964 
965 void
966 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
967      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
968 {
969 
970 	VMEM_LOCK(vm);
971 	vm->vm_importfn = importfn;
972 	vm->vm_releasefn = releasefn;
973 	vm->vm_arg = arg;
974 	vm->vm_import_quantum = import_quantum;
975 	VMEM_UNLOCK(vm);
976 }
977 
978 void
979 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
980 {
981 
982 	VMEM_LOCK(vm);
983 	vm->vm_reclaimfn = reclaimfn;
984 	VMEM_UNLOCK(vm);
985 }
986 
987 /*
988  * vmem_init: Initializes vmem arena.
989  */
990 vmem_t *
991 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
992     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
993 {
994 	int i;
995 
996 	MPASS(quantum > 0);
997 	MPASS((quantum & (quantum - 1)) == 0);
998 
999 	bzero(vm, sizeof(*vm));
1000 
1001 	VMEM_CONDVAR_INIT(vm, name);
1002 	VMEM_LOCK_INIT(vm, name);
1003 	vm->vm_nfreetags = 0;
1004 	LIST_INIT(&vm->vm_freetags);
1005 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1006 	vm->vm_quantum_mask = quantum - 1;
1007 	vm->vm_quantum_shift = flsl(quantum) - 1;
1008 	vm->vm_nbusytag = 0;
1009 	vm->vm_size = 0;
1010 	vm->vm_inuse = 0;
1011 	qc_init(vm, qcache_max);
1012 
1013 	TAILQ_INIT(&vm->vm_seglist);
1014 	for (i = 0; i < VMEM_MAXORDER; i++) {
1015 		LIST_INIT(&vm->vm_freelist[i]);
1016 	}
1017 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1018 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1019 	vm->vm_hashlist = vm->vm_hash0;
1020 
1021 	if (size != 0) {
1022 		if (vmem_add(vm, base, size, flags) != 0) {
1023 			vmem_destroy1(vm);
1024 			return NULL;
1025 		}
1026 	}
1027 
1028 	mtx_lock(&vmem_list_lock);
1029 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1030 	mtx_unlock(&vmem_list_lock);
1031 
1032 	return vm;
1033 }
1034 
1035 /*
1036  * vmem_create: create an arena.
1037  */
1038 vmem_t *
1039 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1040     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1041 {
1042 
1043 	vmem_t *vm;
1044 
1045 	vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
1046 	if (vm == NULL)
1047 		return (NULL);
1048 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1049 	    flags) == NULL)
1050 		return (NULL);
1051 	return (vm);
1052 }
1053 
1054 void
1055 vmem_destroy(vmem_t *vm)
1056 {
1057 
1058 	mtx_lock(&vmem_list_lock);
1059 	LIST_REMOVE(vm, vm_alllist);
1060 	mtx_unlock(&vmem_list_lock);
1061 
1062 	vmem_destroy1(vm);
1063 }
1064 
1065 vmem_size_t
1066 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1067 {
1068 
1069 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1070 }
1071 
1072 /*
1073  * vmem_alloc: allocate resource from the arena.
1074  */
1075 int
1076 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1077 {
1078 	const int strat __unused = flags & VMEM_FITMASK;
1079 	qcache_t *qc;
1080 
1081 	flags &= VMEM_FLAGS;
1082 	MPASS(size > 0);
1083 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1084 	if ((flags & M_NOWAIT) == 0)
1085 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1086 
1087 	if (size <= vm->vm_qcache_max) {
1088 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1089 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
1090 		if (*addrp == 0)
1091 			return (ENOMEM);
1092 		return (0);
1093 	}
1094 
1095 	return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1096 	    flags, addrp);
1097 }
1098 
1099 int
1100 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1101     const vmem_size_t phase, const vmem_size_t nocross,
1102     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1103     vmem_addr_t *addrp)
1104 {
1105 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1106 	struct vmem_freelist *list;
1107 	struct vmem_freelist *first;
1108 	struct vmem_freelist *end;
1109 	vmem_size_t avail;
1110 	bt_t *bt;
1111 	int error;
1112 	int strat;
1113 
1114 	flags &= VMEM_FLAGS;
1115 	strat = flags & VMEM_FITMASK;
1116 	MPASS(size0 > 0);
1117 	MPASS(size > 0);
1118 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1119 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1120 	if ((flags & M_NOWAIT) == 0)
1121 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1122 	MPASS((align & vm->vm_quantum_mask) == 0);
1123 	MPASS((align & (align - 1)) == 0);
1124 	MPASS((phase & vm->vm_quantum_mask) == 0);
1125 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1126 	MPASS((nocross & (nocross - 1)) == 0);
1127 	MPASS((align == 0 && phase == 0) || phase < align);
1128 	MPASS(nocross == 0 || nocross >= size);
1129 	MPASS(minaddr <= maxaddr);
1130 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1131 
1132 	if (align == 0)
1133 		align = vm->vm_quantum_mask + 1;
1134 
1135 	*addrp = 0;
1136 	end = &vm->vm_freelist[VMEM_MAXORDER];
1137 	/*
1138 	 * choose a free block from which we allocate.
1139 	 */
1140 	first = bt_freehead_toalloc(vm, size, strat);
1141 	VMEM_LOCK(vm);
1142 	for (;;) {
1143 		/*
1144 		 * Make sure we have enough tags to complete the
1145 		 * operation.
1146 		 */
1147 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1148 		    bt_fill(vm, flags) != 0) {
1149 			error = ENOMEM;
1150 			break;
1151 		}
1152 		/*
1153 	 	 * Scan freelists looking for a tag that satisfies the
1154 		 * allocation.  If we're doing BESTFIT we may encounter
1155 		 * sizes below the request.  If we're doing FIRSTFIT we
1156 		 * inspect only the first element from each list.
1157 		 */
1158 		for (list = first; list < end; list++) {
1159 			LIST_FOREACH(bt, list, bt_freelist) {
1160 				if (bt->bt_size >= size) {
1161 					error = vmem_fit(bt, size, align, phase,
1162 					    nocross, minaddr, maxaddr, addrp);
1163 					if (error == 0) {
1164 						vmem_clip(vm, bt, *addrp, size);
1165 						goto out;
1166 					}
1167 				}
1168 				/* FIRST skips to the next list. */
1169 				if (strat == M_FIRSTFIT)
1170 					break;
1171 			}
1172 		}
1173 		/*
1174 		 * Retry if the fast algorithm failed.
1175 		 */
1176 		if (strat == M_FIRSTFIT) {
1177 			strat = M_BESTFIT;
1178 			first = bt_freehead_toalloc(vm, size, strat);
1179 			continue;
1180 		}
1181 		/*
1182 		 * XXX it is possible to fail to meet restrictions with the
1183 		 * imported region.  It is up to the user to specify the
1184 		 * import quantum such that it can satisfy any allocation.
1185 		 */
1186 		if (vmem_import(vm, size, align, flags) == 0)
1187 			continue;
1188 
1189 		/*
1190 		 * Try to free some space from the quantum cache or reclaim
1191 		 * functions if available.
1192 		 */
1193 		if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1194 			avail = vm->vm_size - vm->vm_inuse;
1195 			VMEM_UNLOCK(vm);
1196 			if (vm->vm_qcache_max != 0)
1197 				qc_drain(vm);
1198 			if (vm->vm_reclaimfn != NULL)
1199 				vm->vm_reclaimfn(vm, flags);
1200 			VMEM_LOCK(vm);
1201 			/* If we were successful retry even NOWAIT. */
1202 			if (vm->vm_size - vm->vm_inuse > avail)
1203 				continue;
1204 		}
1205 		if ((flags & M_NOWAIT) != 0) {
1206 			error = ENOMEM;
1207 			break;
1208 		}
1209 		VMEM_CONDVAR_WAIT(vm);
1210 	}
1211 out:
1212 	VMEM_UNLOCK(vm);
1213 	if (error != 0 && (flags & M_NOWAIT) == 0)
1214 		panic("failed to allocate waiting allocation\n");
1215 
1216 	return (error);
1217 }
1218 
1219 /*
1220  * vmem_free: free the resource to the arena.
1221  */
1222 void
1223 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1224 {
1225 	qcache_t *qc;
1226 	MPASS(size > 0);
1227 
1228 	if (size <= vm->vm_qcache_max) {
1229 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1230 		uma_zfree(qc->qc_cache, (void *)addr);
1231 	} else
1232 		vmem_xfree(vm, addr, size);
1233 }
1234 
1235 void
1236 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1237 {
1238 	bt_t *bt;
1239 	bt_t *t;
1240 
1241 	MPASS(size > 0);
1242 
1243 	VMEM_LOCK(vm);
1244 	bt = bt_lookupbusy(vm, addr);
1245 	MPASS(bt != NULL);
1246 	MPASS(bt->bt_start == addr);
1247 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1248 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1249 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1250 	bt_rembusy(vm, bt);
1251 	bt->bt_type = BT_TYPE_FREE;
1252 
1253 	/* coalesce */
1254 	t = TAILQ_NEXT(bt, bt_seglist);
1255 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1256 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1257 		bt->bt_size += t->bt_size;
1258 		bt_remfree(vm, t);
1259 		bt_remseg(vm, t);
1260 	}
1261 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1262 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1263 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1264 		bt->bt_size += t->bt_size;
1265 		bt->bt_start = t->bt_start;
1266 		bt_remfree(vm, t);
1267 		bt_remseg(vm, t);
1268 	}
1269 
1270 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1271 	MPASS(t != NULL);
1272 	MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1273 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1274 	    t->bt_size == bt->bt_size) {
1275 		vmem_addr_t spanaddr;
1276 		vmem_size_t spansize;
1277 
1278 		MPASS(t->bt_start == bt->bt_start);
1279 		spanaddr = bt->bt_start;
1280 		spansize = bt->bt_size;
1281 		bt_remseg(vm, bt);
1282 		bt_remseg(vm, t);
1283 		vm->vm_size -= spansize;
1284 		VMEM_CONDVAR_BROADCAST(vm);
1285 		bt_freetrim(vm, BT_MAXFREE);
1286 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1287 	} else {
1288 		bt_insfree(vm, bt);
1289 		VMEM_CONDVAR_BROADCAST(vm);
1290 		bt_freetrim(vm, BT_MAXFREE);
1291 	}
1292 }
1293 
1294 /*
1295  * vmem_add:
1296  *
1297  */
1298 int
1299 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1300 {
1301 	int error;
1302 
1303 	error = 0;
1304 	flags &= VMEM_FLAGS;
1305 	VMEM_LOCK(vm);
1306 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1307 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1308 	else
1309 		error = ENOMEM;
1310 	VMEM_UNLOCK(vm);
1311 
1312 	return (error);
1313 }
1314 
1315 /*
1316  * vmem_size: information about arenas size
1317  */
1318 vmem_size_t
1319 vmem_size(vmem_t *vm, int typemask)
1320 {
1321 	int i;
1322 
1323 	switch (typemask) {
1324 	case VMEM_ALLOC:
1325 		return vm->vm_inuse;
1326 	case VMEM_FREE:
1327 		return vm->vm_size - vm->vm_inuse;
1328 	case VMEM_FREE|VMEM_ALLOC:
1329 		return vm->vm_size;
1330 	case VMEM_MAXFREE:
1331 		VMEM_LOCK(vm);
1332 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1333 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1334 				continue;
1335 			VMEM_UNLOCK(vm);
1336 			return ((vmem_size_t)ORDER2SIZE(i) <<
1337 			    vm->vm_quantum_shift);
1338 		}
1339 		VMEM_UNLOCK(vm);
1340 		return (0);
1341 	default:
1342 		panic("vmem_size");
1343 	}
1344 }
1345 
1346 /* ---- debug */
1347 
1348 #if defined(DDB) || defined(DIAGNOSTIC)
1349 
1350 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1351     __printflike(1, 2));
1352 
1353 static const char *
1354 bt_type_string(int type)
1355 {
1356 
1357 	switch (type) {
1358 	case BT_TYPE_BUSY:
1359 		return "busy";
1360 	case BT_TYPE_FREE:
1361 		return "free";
1362 	case BT_TYPE_SPAN:
1363 		return "span";
1364 	case BT_TYPE_SPAN_STATIC:
1365 		return "static span";
1366 	default:
1367 		break;
1368 	}
1369 	return "BOGUS";
1370 }
1371 
1372 static void
1373 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1374 {
1375 
1376 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1377 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1378 	    bt->bt_type, bt_type_string(bt->bt_type));
1379 }
1380 
1381 static void
1382 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1383 {
1384 	const bt_t *bt;
1385 	int i;
1386 
1387 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1388 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1389 		bt_dump(bt, pr);
1390 	}
1391 
1392 	for (i = 0; i < VMEM_MAXORDER; i++) {
1393 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1394 
1395 		if (LIST_EMPTY(fl)) {
1396 			continue;
1397 		}
1398 
1399 		(*pr)("freelist[%d]\n", i);
1400 		LIST_FOREACH(bt, fl, bt_freelist) {
1401 			bt_dump(bt, pr);
1402 		}
1403 	}
1404 }
1405 
1406 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1407 
1408 #if defined(DDB)
1409 #include <ddb/ddb.h>
1410 
1411 static bt_t *
1412 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1413 {
1414 	bt_t *bt;
1415 
1416 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1417 		if (BT_ISSPAN_P(bt)) {
1418 			continue;
1419 		}
1420 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1421 			return bt;
1422 		}
1423 	}
1424 
1425 	return NULL;
1426 }
1427 
1428 void
1429 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1430 {
1431 	vmem_t *vm;
1432 
1433 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1434 		bt_t *bt;
1435 
1436 		bt = vmem_whatis_lookup(vm, addr);
1437 		if (bt == NULL) {
1438 			continue;
1439 		}
1440 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1441 		    (void *)addr, (void *)bt->bt_start,
1442 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1443 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1444 	}
1445 }
1446 
1447 void
1448 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1449 {
1450 	const vmem_t *vm;
1451 
1452 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1453 		vmem_dump(vm, pr);
1454 	}
1455 }
1456 
1457 void
1458 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1459 {
1460 	const vmem_t *vm = (const void *)addr;
1461 
1462 	vmem_dump(vm, pr);
1463 }
1464 
1465 DB_SHOW_COMMAND(vmemdump, vmemdump)
1466 {
1467 
1468 	if (!have_addr) {
1469 		db_printf("usage: show vmemdump <addr>\n");
1470 		return;
1471 	}
1472 
1473 	vmem_dump((const vmem_t *)addr, db_printf);
1474 }
1475 
1476 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1477 {
1478 	const vmem_t *vm;
1479 
1480 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1481 		vmem_dump(vm, db_printf);
1482 }
1483 
1484 DB_SHOW_COMMAND(vmem, vmem_summ)
1485 {
1486 	const vmem_t *vm = (const void *)addr;
1487 	const bt_t *bt;
1488 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1489 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1490 	int ord;
1491 
1492 	if (!have_addr) {
1493 		db_printf("usage: show vmem <addr>\n");
1494 		return;
1495 	}
1496 
1497 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1498 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1499 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1500 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1501 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1502 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1503 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1504 
1505 	memset(&ft, 0, sizeof(ft));
1506 	memset(&ut, 0, sizeof(ut));
1507 	memset(&fs, 0, sizeof(fs));
1508 	memset(&us, 0, sizeof(us));
1509 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1510 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1511 		if (bt->bt_type == BT_TYPE_BUSY) {
1512 			ut[ord]++;
1513 			us[ord] += bt->bt_size;
1514 		} else if (bt->bt_type == BT_TYPE_FREE) {
1515 			ft[ord]++;
1516 			fs[ord] += bt->bt_size;
1517 		}
1518 	}
1519 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1520 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1521 		if (ut[ord] == 0 && ft[ord] == 0)
1522 			continue;
1523 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1524 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1525 		    ut[ord], us[ord], ft[ord], fs[ord]);
1526 	}
1527 }
1528 
1529 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1530 {
1531 	const vmem_t *vm;
1532 
1533 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1534 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1535 }
1536 #endif /* defined(DDB) */
1537 
1538 #define vmem_printf printf
1539 
1540 #if defined(DIAGNOSTIC)
1541 
1542 static bool
1543 vmem_check_sanity(vmem_t *vm)
1544 {
1545 	const bt_t *bt, *bt2;
1546 
1547 	MPASS(vm != NULL);
1548 
1549 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1550 		if (bt->bt_start > BT_END(bt)) {
1551 			printf("corrupted tag\n");
1552 			bt_dump(bt, vmem_printf);
1553 			return false;
1554 		}
1555 	}
1556 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1557 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1558 			if (bt == bt2) {
1559 				continue;
1560 			}
1561 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1562 				continue;
1563 			}
1564 			if (bt->bt_start <= BT_END(bt2) &&
1565 			    bt2->bt_start <= BT_END(bt)) {
1566 				printf("overwrapped tags\n");
1567 				bt_dump(bt, vmem_printf);
1568 				bt_dump(bt2, vmem_printf);
1569 				return false;
1570 			}
1571 		}
1572 	}
1573 
1574 	return true;
1575 }
1576 
1577 static void
1578 vmem_check(vmem_t *vm)
1579 {
1580 
1581 	if (!vmem_check_sanity(vm)) {
1582 		panic("insanity vmem %p", vm);
1583 	}
1584 }
1585 
1586 #endif /* defined(DIAGNOSTIC) */
1587