xref: /freebsd/sys/kern/subr_vmem.c (revision 1de7b4b805ddbf2429da511c053686ac4591ed89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * Copyright (c) 2013 EMC Corp.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * From:
32  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34  */
35 
36 /*
37  * reference:
38  * -	Magazines and Vmem: Extending the Slab Allocator
39  *	to Many CPUs and Arbitrary Resources
40  *	http://www.usenix.org/event/usenix01/bonwick.html
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
53 #include <sys/hash.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/smp.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/vmem.h>
62 
63 #include "opt_vm.h"
64 
65 #include <vm/uma.h>
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_param.h>
73 #include <vm/vm_pageout.h>
74 
75 #define	VMEM_OPTORDER		5
76 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
77 #define	VMEM_MAXORDER						\
78     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
79 
80 #define	VMEM_HASHSIZE_MIN	16
81 #define	VMEM_HASHSIZE_MAX	131072
82 
83 #define	VMEM_QCACHE_IDX_MAX	16
84 
85 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT)
86 
87 #define	VMEM_FLAGS						\
88     (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT)
89 
90 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
91 
92 #define	QC_NAME_MAX	16
93 
94 /*
95  * Data structures private to vmem.
96  */
97 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
98 
99 typedef struct vmem_btag bt_t;
100 
101 TAILQ_HEAD(vmem_seglist, vmem_btag);
102 LIST_HEAD(vmem_freelist, vmem_btag);
103 LIST_HEAD(vmem_hashlist, vmem_btag);
104 
105 struct qcache {
106 	uma_zone_t	qc_cache;
107 	vmem_t 		*qc_vmem;
108 	vmem_size_t	qc_size;
109 	char		qc_name[QC_NAME_MAX];
110 };
111 typedef struct qcache qcache_t;
112 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
113 
114 #define	VMEM_NAME_MAX	16
115 
116 /* vmem arena */
117 struct vmem {
118 	struct mtx_padalign	vm_lock;
119 	struct cv		vm_cv;
120 	char			vm_name[VMEM_NAME_MAX+1];
121 	LIST_ENTRY(vmem)	vm_alllist;
122 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
123 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
124 	struct vmem_seglist	vm_seglist;
125 	struct vmem_hashlist	*vm_hashlist;
126 	vmem_size_t		vm_hashsize;
127 
128 	/* Constant after init */
129 	vmem_size_t		vm_qcache_max;
130 	vmem_size_t		vm_quantum_mask;
131 	vmem_size_t		vm_import_quantum;
132 	int			vm_quantum_shift;
133 
134 	/* Written on alloc/free */
135 	LIST_HEAD(, vmem_btag)	vm_freetags;
136 	int			vm_nfreetags;
137 	int			vm_nbusytag;
138 	vmem_size_t		vm_inuse;
139 	vmem_size_t		vm_size;
140 
141 	/* Used on import. */
142 	vmem_import_t		*vm_importfn;
143 	vmem_release_t		*vm_releasefn;
144 	void			*vm_arg;
145 
146 	/* Space exhaustion callback. */
147 	vmem_reclaim_t		*vm_reclaimfn;
148 
149 	/* quantum cache */
150 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
151 };
152 
153 /* boundary tag */
154 struct vmem_btag {
155 	TAILQ_ENTRY(vmem_btag) bt_seglist;
156 	union {
157 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
158 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
159 	} bt_u;
160 #define	bt_hashlist	bt_u.u_hashlist
161 #define	bt_freelist	bt_u.u_freelist
162 	vmem_addr_t	bt_start;
163 	vmem_size_t	bt_size;
164 	int		bt_type;
165 };
166 
167 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
168 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
169 #define	BT_TYPE_FREE		3	/* Available space. */
170 #define	BT_TYPE_BUSY		4	/* Used space. */
171 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
172 
173 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
174 
175 #if defined(DIAGNOSTIC)
176 static int enable_vmem_check = 1;
177 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
178     &enable_vmem_check, 0, "Enable vmem check");
179 static void vmem_check(vmem_t *);
180 #endif
181 
182 static struct callout	vmem_periodic_ch;
183 static int		vmem_periodic_interval;
184 static struct task	vmem_periodic_wk;
185 
186 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
187 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
188 
189 /* ---- misc */
190 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
191 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
192 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
193 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
194 
195 
196 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
197 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
198 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
199 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
200 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
201 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
202 
203 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
204 
205 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
206 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
207 
208 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
209     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
210 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
211     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
212 
213 /*
214  * Maximum number of boundary tags that may be required to satisfy an
215  * allocation.  Two may be required to import.  Another two may be
216  * required to clip edges.
217  */
218 #define	BT_MAXALLOC	4
219 
220 /*
221  * Max free limits the number of locally cached boundary tags.  We
222  * just want to avoid hitting the zone allocator for every call.
223  */
224 #define BT_MAXFREE	(BT_MAXALLOC * 8)
225 
226 /* Allocator for boundary tags. */
227 static uma_zone_t vmem_bt_zone;
228 
229 /* boot time arena storage. */
230 static struct vmem kernel_arena_storage;
231 static struct vmem kmem_arena_storage;
232 static struct vmem buffer_arena_storage;
233 static struct vmem transient_arena_storage;
234 vmem_t *kernel_arena = &kernel_arena_storage;
235 vmem_t *kmem_arena = &kmem_arena_storage;
236 vmem_t *buffer_arena = &buffer_arena_storage;
237 vmem_t *transient_arena = &transient_arena_storage;
238 
239 #ifdef DEBUG_MEMGUARD
240 static struct vmem memguard_arena_storage;
241 vmem_t *memguard_arena = &memguard_arena_storage;
242 #endif
243 
244 /*
245  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
246  * allocation will not fail once bt_fill() passes.  To do so we cache
247  * at least the maximum possible tag allocations in the arena.
248  */
249 static int
250 bt_fill(vmem_t *vm, int flags)
251 {
252 	bt_t *bt;
253 
254 	VMEM_ASSERT_LOCKED(vm);
255 
256 	/*
257 	 * Only allow the kmem arena to dip into reserve tags.  It is the
258 	 * vmem where new tags come from.
259 	 */
260 	flags &= BT_FLAGS;
261 	if (vm != kmem_arena)
262 		flags &= ~M_USE_RESERVE;
263 
264 	/*
265 	 * Loop until we meet the reserve.  To minimize the lock shuffle
266 	 * and prevent simultaneous fills we first try a NOWAIT regardless
267 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
268 	 * holding a vmem lock.
269 	 */
270 	while (vm->vm_nfreetags < BT_MAXALLOC) {
271 		bt = uma_zalloc(vmem_bt_zone,
272 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
273 		if (bt == NULL) {
274 			VMEM_UNLOCK(vm);
275 			bt = uma_zalloc(vmem_bt_zone, flags);
276 			VMEM_LOCK(vm);
277 			if (bt == NULL && (flags & M_NOWAIT) != 0)
278 				break;
279 		}
280 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
281 		vm->vm_nfreetags++;
282 	}
283 
284 	if (vm->vm_nfreetags < BT_MAXALLOC)
285 		return ENOMEM;
286 
287 	return 0;
288 }
289 
290 /*
291  * Pop a tag off of the freetag stack.
292  */
293 static bt_t *
294 bt_alloc(vmem_t *vm)
295 {
296 	bt_t *bt;
297 
298 	VMEM_ASSERT_LOCKED(vm);
299 	bt = LIST_FIRST(&vm->vm_freetags);
300 	MPASS(bt != NULL);
301 	LIST_REMOVE(bt, bt_freelist);
302 	vm->vm_nfreetags--;
303 
304 	return bt;
305 }
306 
307 /*
308  * Trim the per-vmem free list.  Returns with the lock released to
309  * avoid allocator recursions.
310  */
311 static void
312 bt_freetrim(vmem_t *vm, int freelimit)
313 {
314 	LIST_HEAD(, vmem_btag) freetags;
315 	bt_t *bt;
316 
317 	LIST_INIT(&freetags);
318 	VMEM_ASSERT_LOCKED(vm);
319 	while (vm->vm_nfreetags > freelimit) {
320 		bt = LIST_FIRST(&vm->vm_freetags);
321 		LIST_REMOVE(bt, bt_freelist);
322 		vm->vm_nfreetags--;
323 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
324 	}
325 	VMEM_UNLOCK(vm);
326 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
327 		LIST_REMOVE(bt, bt_freelist);
328 		uma_zfree(vmem_bt_zone, bt);
329 	}
330 }
331 
332 static inline void
333 bt_free(vmem_t *vm, bt_t *bt)
334 {
335 
336 	VMEM_ASSERT_LOCKED(vm);
337 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
338 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
339 	vm->vm_nfreetags++;
340 }
341 
342 /*
343  * freelist[0] ... [1, 1]
344  * freelist[1] ... [2, 2]
345  *  :
346  * freelist[29] ... [30, 30]
347  * freelist[30] ... [31, 31]
348  * freelist[31] ... [32, 63]
349  * freelist[33] ... [64, 127]
350  *  :
351  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
352  *  :
353  */
354 
355 static struct vmem_freelist *
356 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
357 {
358 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
359 	const int idx = SIZE2ORDER(qsize);
360 
361 	MPASS(size != 0 && qsize != 0);
362 	MPASS((size & vm->vm_quantum_mask) == 0);
363 	MPASS(idx >= 0);
364 	MPASS(idx < VMEM_MAXORDER);
365 
366 	return &vm->vm_freelist[idx];
367 }
368 
369 /*
370  * bt_freehead_toalloc: return the freelist for the given size and allocation
371  * strategy.
372  *
373  * For M_FIRSTFIT, return the list in which any blocks are large enough
374  * for the requested size.  otherwise, return the list which can have blocks
375  * large enough for the requested size.
376  */
377 static struct vmem_freelist *
378 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
379 {
380 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
381 	int idx = SIZE2ORDER(qsize);
382 
383 	MPASS(size != 0 && qsize != 0);
384 	MPASS((size & vm->vm_quantum_mask) == 0);
385 
386 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
387 		idx++;
388 		/* check too large request? */
389 	}
390 	MPASS(idx >= 0);
391 	MPASS(idx < VMEM_MAXORDER);
392 
393 	return &vm->vm_freelist[idx];
394 }
395 
396 /* ---- boundary tag hash */
397 
398 static struct vmem_hashlist *
399 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
400 {
401 	struct vmem_hashlist *list;
402 	unsigned int hash;
403 
404 	hash = hash32_buf(&addr, sizeof(addr), 0);
405 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
406 
407 	return list;
408 }
409 
410 static bt_t *
411 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
412 {
413 	struct vmem_hashlist *list;
414 	bt_t *bt;
415 
416 	VMEM_ASSERT_LOCKED(vm);
417 	list = bt_hashhead(vm, addr);
418 	LIST_FOREACH(bt, list, bt_hashlist) {
419 		if (bt->bt_start == addr) {
420 			break;
421 		}
422 	}
423 
424 	return bt;
425 }
426 
427 static void
428 bt_rembusy(vmem_t *vm, bt_t *bt)
429 {
430 
431 	VMEM_ASSERT_LOCKED(vm);
432 	MPASS(vm->vm_nbusytag > 0);
433 	vm->vm_inuse -= bt->bt_size;
434 	vm->vm_nbusytag--;
435 	LIST_REMOVE(bt, bt_hashlist);
436 }
437 
438 static void
439 bt_insbusy(vmem_t *vm, bt_t *bt)
440 {
441 	struct vmem_hashlist *list;
442 
443 	VMEM_ASSERT_LOCKED(vm);
444 	MPASS(bt->bt_type == BT_TYPE_BUSY);
445 
446 	list = bt_hashhead(vm, bt->bt_start);
447 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
448 	vm->vm_nbusytag++;
449 	vm->vm_inuse += bt->bt_size;
450 }
451 
452 /* ---- boundary tag list */
453 
454 static void
455 bt_remseg(vmem_t *vm, bt_t *bt)
456 {
457 
458 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
459 	bt_free(vm, bt);
460 }
461 
462 static void
463 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
464 {
465 
466 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
467 }
468 
469 static void
470 bt_insseg_tail(vmem_t *vm, bt_t *bt)
471 {
472 
473 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
474 }
475 
476 static void
477 bt_remfree(vmem_t *vm, bt_t *bt)
478 {
479 
480 	MPASS(bt->bt_type == BT_TYPE_FREE);
481 
482 	LIST_REMOVE(bt, bt_freelist);
483 }
484 
485 static void
486 bt_insfree(vmem_t *vm, bt_t *bt)
487 {
488 	struct vmem_freelist *list;
489 
490 	list = bt_freehead_tofree(vm, bt->bt_size);
491 	LIST_INSERT_HEAD(list, bt, bt_freelist);
492 }
493 
494 /* ---- vmem internal functions */
495 
496 /*
497  * Import from the arena into the quantum cache in UMA.
498  */
499 static int
500 qc_import(void *arg, void **store, int cnt, int flags)
501 {
502 	qcache_t *qc;
503 	vmem_addr_t addr;
504 	int i;
505 
506 	qc = arg;
507 	if ((flags & VMEM_FITMASK) == 0)
508 		flags |= M_BESTFIT;
509 	for (i = 0; i < cnt; i++) {
510 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
511 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
512 			break;
513 		store[i] = (void *)addr;
514 		/* Only guarantee one allocation. */
515 		flags &= ~M_WAITOK;
516 		flags |= M_NOWAIT;
517 	}
518 	return i;
519 }
520 
521 /*
522  * Release memory from the UMA cache to the arena.
523  */
524 static void
525 qc_release(void *arg, void **store, int cnt)
526 {
527 	qcache_t *qc;
528 	int i;
529 
530 	qc = arg;
531 	for (i = 0; i < cnt; i++)
532 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
533 }
534 
535 static void
536 qc_init(vmem_t *vm, vmem_size_t qcache_max)
537 {
538 	qcache_t *qc;
539 	vmem_size_t size;
540 	int qcache_idx_max;
541 	int i;
542 
543 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
544 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
545 	    VMEM_QCACHE_IDX_MAX);
546 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
547 	for (i = 0; i < qcache_idx_max; i++) {
548 		qc = &vm->vm_qcache[i];
549 		size = (i + 1) << vm->vm_quantum_shift;
550 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
551 		    vm->vm_name, size);
552 		qc->qc_vmem = vm;
553 		qc->qc_size = size;
554 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
555 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
556 		    UMA_ZONE_VM);
557 		MPASS(qc->qc_cache);
558 	}
559 }
560 
561 static void
562 qc_destroy(vmem_t *vm)
563 {
564 	int qcache_idx_max;
565 	int i;
566 
567 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
568 	for (i = 0; i < qcache_idx_max; i++)
569 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
570 }
571 
572 static void
573 qc_drain(vmem_t *vm)
574 {
575 	int qcache_idx_max;
576 	int i;
577 
578 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
579 	for (i = 0; i < qcache_idx_max; i++)
580 		zone_drain(vm->vm_qcache[i].qc_cache);
581 }
582 
583 #ifndef UMA_MD_SMALL_ALLOC
584 
585 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
586 
587 /*
588  * vmem_bt_alloc:  Allocate a new page of boundary tags.
589  *
590  * On architectures with uma_small_alloc there is no recursion; no address
591  * space need be allocated to allocate boundary tags.  For the others, we
592  * must handle recursion.  Boundary tags are necessary to allocate new
593  * boundary tags.
594  *
595  * UMA guarantees that enough tags are held in reserve to allocate a new
596  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
597  * when allocating the page to hold new boundary tags.  In this way the
598  * reserve is automatically filled by the allocation that uses the reserve.
599  *
600  * We still have to guarantee that the new tags are allocated atomically since
601  * many threads may try concurrently.  The bt_lock provides this guarantee.
602  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
603  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
604  * loop again after checking to see if we lost the race to allocate.
605  *
606  * There is a small race between vmem_bt_alloc() returning the page and the
607  * zone lock being acquired to add the page to the zone.  For WAITOK
608  * allocations we just pause briefly.  NOWAIT may experience a transient
609  * failure.  To alleviate this we permit a small number of simultaneous
610  * fills to proceed concurrently so NOWAIT is less likely to fail unless
611  * we are really out of KVA.
612  */
613 static void *
614 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
615 {
616 	vmem_addr_t addr;
617 
618 	*pflag = UMA_SLAB_KMEM;
619 
620 	/*
621 	 * Single thread boundary tag allocation so that the address space
622 	 * and memory are added in one atomic operation.
623 	 */
624 	mtx_lock(&vmem_bt_lock);
625 	if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN,
626 	    VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT,
627 	    &addr) == 0) {
628 		if (kmem_back(kmem_object, addr, bytes,
629 		    M_NOWAIT | M_USE_RESERVE) == 0) {
630 			mtx_unlock(&vmem_bt_lock);
631 			return ((void *)addr);
632 		}
633 		vmem_xfree(kmem_arena, addr, bytes);
634 		mtx_unlock(&vmem_bt_lock);
635 		/*
636 		 * Out of memory, not address space.  This may not even be
637 		 * possible due to M_USE_RESERVE page allocation.
638 		 */
639 		if (wait & M_WAITOK)
640 			VM_WAIT;
641 		return (NULL);
642 	}
643 	mtx_unlock(&vmem_bt_lock);
644 	/*
645 	 * We're either out of address space or lost a fill race.
646 	 */
647 	if (wait & M_WAITOK)
648 		pause("btalloc", 1);
649 
650 	return (NULL);
651 }
652 #endif
653 
654 void
655 vmem_startup(void)
656 {
657 
658 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
659 	vmem_bt_zone = uma_zcreate("vmem btag",
660 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
661 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
662 #ifndef UMA_MD_SMALL_ALLOC
663 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
664 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
665 	/*
666 	 * Reserve enough tags to allocate new tags.  We allow multiple
667 	 * CPUs to attempt to allocate new tags concurrently to limit
668 	 * false restarts in UMA.
669 	 */
670 	uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2);
671 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
672 #endif
673 }
674 
675 /* ---- rehash */
676 
677 static int
678 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
679 {
680 	bt_t *bt;
681 	int i;
682 	struct vmem_hashlist *newhashlist;
683 	struct vmem_hashlist *oldhashlist;
684 	vmem_size_t oldhashsize;
685 
686 	MPASS(newhashsize > 0);
687 
688 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
689 	    M_VMEM, M_NOWAIT);
690 	if (newhashlist == NULL)
691 		return ENOMEM;
692 	for (i = 0; i < newhashsize; i++) {
693 		LIST_INIT(&newhashlist[i]);
694 	}
695 
696 	VMEM_LOCK(vm);
697 	oldhashlist = vm->vm_hashlist;
698 	oldhashsize = vm->vm_hashsize;
699 	vm->vm_hashlist = newhashlist;
700 	vm->vm_hashsize = newhashsize;
701 	if (oldhashlist == NULL) {
702 		VMEM_UNLOCK(vm);
703 		return 0;
704 	}
705 	for (i = 0; i < oldhashsize; i++) {
706 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
707 			bt_rembusy(vm, bt);
708 			bt_insbusy(vm, bt);
709 		}
710 	}
711 	VMEM_UNLOCK(vm);
712 
713 	if (oldhashlist != vm->vm_hash0) {
714 		free(oldhashlist, M_VMEM);
715 	}
716 
717 	return 0;
718 }
719 
720 static void
721 vmem_periodic_kick(void *dummy)
722 {
723 
724 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
725 }
726 
727 static void
728 vmem_periodic(void *unused, int pending)
729 {
730 	vmem_t *vm;
731 	vmem_size_t desired;
732 	vmem_size_t current;
733 
734 	mtx_lock(&vmem_list_lock);
735 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
736 #ifdef DIAGNOSTIC
737 		/* Convenient time to verify vmem state. */
738 		if (enable_vmem_check == 1) {
739 			VMEM_LOCK(vm);
740 			vmem_check(vm);
741 			VMEM_UNLOCK(vm);
742 		}
743 #endif
744 		desired = 1 << flsl(vm->vm_nbusytag);
745 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
746 		    VMEM_HASHSIZE_MAX);
747 		current = vm->vm_hashsize;
748 
749 		/* Grow in powers of two.  Shrink less aggressively. */
750 		if (desired >= current * 2 || desired * 4 <= current)
751 			vmem_rehash(vm, desired);
752 
753 		/*
754 		 * Periodically wake up threads waiting for resources,
755 		 * so they could ask for reclamation again.
756 		 */
757 		VMEM_CONDVAR_BROADCAST(vm);
758 	}
759 	mtx_unlock(&vmem_list_lock);
760 
761 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
762 	    vmem_periodic_kick, NULL);
763 }
764 
765 static void
766 vmem_start_callout(void *unused)
767 {
768 
769 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
770 	vmem_periodic_interval = hz * 10;
771 	callout_init(&vmem_periodic_ch, 1);
772 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
773 	    vmem_periodic_kick, NULL);
774 }
775 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
776 
777 static void
778 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
779 {
780 	bt_t *btspan;
781 	bt_t *btfree;
782 
783 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
784 	MPASS((size & vm->vm_quantum_mask) == 0);
785 
786 	btspan = bt_alloc(vm);
787 	btspan->bt_type = type;
788 	btspan->bt_start = addr;
789 	btspan->bt_size = size;
790 	bt_insseg_tail(vm, btspan);
791 
792 	btfree = bt_alloc(vm);
793 	btfree->bt_type = BT_TYPE_FREE;
794 	btfree->bt_start = addr;
795 	btfree->bt_size = size;
796 	bt_insseg(vm, btfree, btspan);
797 	bt_insfree(vm, btfree);
798 
799 	vm->vm_size += size;
800 }
801 
802 static void
803 vmem_destroy1(vmem_t *vm)
804 {
805 	bt_t *bt;
806 
807 	/*
808 	 * Drain per-cpu quantum caches.
809 	 */
810 	qc_destroy(vm);
811 
812 	/*
813 	 * The vmem should now only contain empty segments.
814 	 */
815 	VMEM_LOCK(vm);
816 	MPASS(vm->vm_nbusytag == 0);
817 
818 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
819 		bt_remseg(vm, bt);
820 
821 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
822 		free(vm->vm_hashlist, M_VMEM);
823 
824 	bt_freetrim(vm, 0);
825 
826 	VMEM_CONDVAR_DESTROY(vm);
827 	VMEM_LOCK_DESTROY(vm);
828 	free(vm, M_VMEM);
829 }
830 
831 static int
832 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
833 {
834 	vmem_addr_t addr;
835 	int error;
836 
837 	if (vm->vm_importfn == NULL)
838 		return EINVAL;
839 
840 	/*
841 	 * To make sure we get a span that meets the alignment we double it
842 	 * and add the size to the tail.  This slightly overestimates.
843 	 */
844 	if (align != vm->vm_quantum_mask + 1)
845 		size = (align * 2) + size;
846 	size = roundup(size, vm->vm_import_quantum);
847 
848 	/*
849 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
850 	 * span and the tag we want to allocate from it.
851 	 */
852 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
853 	vm->vm_nfreetags -= BT_MAXALLOC;
854 	VMEM_UNLOCK(vm);
855 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
856 	VMEM_LOCK(vm);
857 	vm->vm_nfreetags += BT_MAXALLOC;
858 	if (error)
859 		return ENOMEM;
860 
861 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
862 
863 	return 0;
864 }
865 
866 /*
867  * vmem_fit: check if a bt can satisfy the given restrictions.
868  *
869  * it's a caller's responsibility to ensure the region is big enough
870  * before calling us.
871  */
872 static int
873 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
874     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
875     vmem_addr_t maxaddr, vmem_addr_t *addrp)
876 {
877 	vmem_addr_t start;
878 	vmem_addr_t end;
879 
880 	MPASS(size > 0);
881 	MPASS(bt->bt_size >= size); /* caller's responsibility */
882 
883 	/*
884 	 * XXX assumption: vmem_addr_t and vmem_size_t are
885 	 * unsigned integer of the same size.
886 	 */
887 
888 	start = bt->bt_start;
889 	if (start < minaddr) {
890 		start = minaddr;
891 	}
892 	end = BT_END(bt);
893 	if (end > maxaddr)
894 		end = maxaddr;
895 	if (start > end)
896 		return (ENOMEM);
897 
898 	start = VMEM_ALIGNUP(start - phase, align) + phase;
899 	if (start < bt->bt_start)
900 		start += align;
901 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
902 		MPASS(align < nocross);
903 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
904 	}
905 	if (start <= end && end - start >= size - 1) {
906 		MPASS((start & (align - 1)) == phase);
907 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
908 		MPASS(minaddr <= start);
909 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
910 		MPASS(bt->bt_start <= start);
911 		MPASS(BT_END(bt) - start >= size - 1);
912 		*addrp = start;
913 
914 		return (0);
915 	}
916 	return (ENOMEM);
917 }
918 
919 /*
920  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
921  */
922 static void
923 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
924 {
925 	bt_t *btnew;
926 	bt_t *btprev;
927 
928 	VMEM_ASSERT_LOCKED(vm);
929 	MPASS(bt->bt_type == BT_TYPE_FREE);
930 	MPASS(bt->bt_size >= size);
931 	bt_remfree(vm, bt);
932 	if (bt->bt_start != start) {
933 		btprev = bt_alloc(vm);
934 		btprev->bt_type = BT_TYPE_FREE;
935 		btprev->bt_start = bt->bt_start;
936 		btprev->bt_size = start - bt->bt_start;
937 		bt->bt_start = start;
938 		bt->bt_size -= btprev->bt_size;
939 		bt_insfree(vm, btprev);
940 		bt_insseg(vm, btprev,
941 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
942 	}
943 	MPASS(bt->bt_start == start);
944 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
945 		/* split */
946 		btnew = bt_alloc(vm);
947 		btnew->bt_type = BT_TYPE_BUSY;
948 		btnew->bt_start = bt->bt_start;
949 		btnew->bt_size = size;
950 		bt->bt_start = bt->bt_start + size;
951 		bt->bt_size -= size;
952 		bt_insfree(vm, bt);
953 		bt_insseg(vm, btnew,
954 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
955 		bt_insbusy(vm, btnew);
956 		bt = btnew;
957 	} else {
958 		bt->bt_type = BT_TYPE_BUSY;
959 		bt_insbusy(vm, bt);
960 	}
961 	MPASS(bt->bt_size >= size);
962 	bt->bt_type = BT_TYPE_BUSY;
963 }
964 
965 /* ---- vmem API */
966 
967 void
968 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
969      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
970 {
971 
972 	VMEM_LOCK(vm);
973 	vm->vm_importfn = importfn;
974 	vm->vm_releasefn = releasefn;
975 	vm->vm_arg = arg;
976 	vm->vm_import_quantum = import_quantum;
977 	VMEM_UNLOCK(vm);
978 }
979 
980 void
981 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
982 {
983 
984 	VMEM_LOCK(vm);
985 	vm->vm_reclaimfn = reclaimfn;
986 	VMEM_UNLOCK(vm);
987 }
988 
989 /*
990  * vmem_init: Initializes vmem arena.
991  */
992 vmem_t *
993 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
994     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
995 {
996 	int i;
997 
998 	MPASS(quantum > 0);
999 	MPASS((quantum & (quantum - 1)) == 0);
1000 
1001 	bzero(vm, sizeof(*vm));
1002 
1003 	VMEM_CONDVAR_INIT(vm, name);
1004 	VMEM_LOCK_INIT(vm, name);
1005 	vm->vm_nfreetags = 0;
1006 	LIST_INIT(&vm->vm_freetags);
1007 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1008 	vm->vm_quantum_mask = quantum - 1;
1009 	vm->vm_quantum_shift = flsl(quantum) - 1;
1010 	vm->vm_nbusytag = 0;
1011 	vm->vm_size = 0;
1012 	vm->vm_inuse = 0;
1013 	qc_init(vm, qcache_max);
1014 
1015 	TAILQ_INIT(&vm->vm_seglist);
1016 	for (i = 0; i < VMEM_MAXORDER; i++) {
1017 		LIST_INIT(&vm->vm_freelist[i]);
1018 	}
1019 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1020 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1021 	vm->vm_hashlist = vm->vm_hash0;
1022 
1023 	if (size != 0) {
1024 		if (vmem_add(vm, base, size, flags) != 0) {
1025 			vmem_destroy1(vm);
1026 			return NULL;
1027 		}
1028 	}
1029 
1030 	mtx_lock(&vmem_list_lock);
1031 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1032 	mtx_unlock(&vmem_list_lock);
1033 
1034 	return vm;
1035 }
1036 
1037 /*
1038  * vmem_create: create an arena.
1039  */
1040 vmem_t *
1041 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1042     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1043 {
1044 
1045 	vmem_t *vm;
1046 
1047 	vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
1048 	if (vm == NULL)
1049 		return (NULL);
1050 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1051 	    flags) == NULL)
1052 		return (NULL);
1053 	return (vm);
1054 }
1055 
1056 void
1057 vmem_destroy(vmem_t *vm)
1058 {
1059 
1060 	mtx_lock(&vmem_list_lock);
1061 	LIST_REMOVE(vm, vm_alllist);
1062 	mtx_unlock(&vmem_list_lock);
1063 
1064 	vmem_destroy1(vm);
1065 }
1066 
1067 vmem_size_t
1068 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1069 {
1070 
1071 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1072 }
1073 
1074 /*
1075  * vmem_alloc: allocate resource from the arena.
1076  */
1077 int
1078 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1079 {
1080 	const int strat __unused = flags & VMEM_FITMASK;
1081 	qcache_t *qc;
1082 
1083 	flags &= VMEM_FLAGS;
1084 	MPASS(size > 0);
1085 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1086 	if ((flags & M_NOWAIT) == 0)
1087 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1088 
1089 	if (size <= vm->vm_qcache_max) {
1090 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1091 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags);
1092 		if (*addrp == 0)
1093 			return (ENOMEM);
1094 		return (0);
1095 	}
1096 
1097 	return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1098 	    flags, addrp);
1099 }
1100 
1101 int
1102 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1103     const vmem_size_t phase, const vmem_size_t nocross,
1104     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1105     vmem_addr_t *addrp)
1106 {
1107 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1108 	struct vmem_freelist *list;
1109 	struct vmem_freelist *first;
1110 	struct vmem_freelist *end;
1111 	vmem_size_t avail;
1112 	bt_t *bt;
1113 	int error;
1114 	int strat;
1115 
1116 	flags &= VMEM_FLAGS;
1117 	strat = flags & VMEM_FITMASK;
1118 	MPASS(size0 > 0);
1119 	MPASS(size > 0);
1120 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT);
1121 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1122 	if ((flags & M_NOWAIT) == 0)
1123 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1124 	MPASS((align & vm->vm_quantum_mask) == 0);
1125 	MPASS((align & (align - 1)) == 0);
1126 	MPASS((phase & vm->vm_quantum_mask) == 0);
1127 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1128 	MPASS((nocross & (nocross - 1)) == 0);
1129 	MPASS((align == 0 && phase == 0) || phase < align);
1130 	MPASS(nocross == 0 || nocross >= size);
1131 	MPASS(minaddr <= maxaddr);
1132 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1133 
1134 	if (align == 0)
1135 		align = vm->vm_quantum_mask + 1;
1136 
1137 	*addrp = 0;
1138 	end = &vm->vm_freelist[VMEM_MAXORDER];
1139 	/*
1140 	 * choose a free block from which we allocate.
1141 	 */
1142 	first = bt_freehead_toalloc(vm, size, strat);
1143 	VMEM_LOCK(vm);
1144 	for (;;) {
1145 		/*
1146 		 * Make sure we have enough tags to complete the
1147 		 * operation.
1148 		 */
1149 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1150 		    bt_fill(vm, flags) != 0) {
1151 			error = ENOMEM;
1152 			break;
1153 		}
1154 		/*
1155 	 	 * Scan freelists looking for a tag that satisfies the
1156 		 * allocation.  If we're doing BESTFIT we may encounter
1157 		 * sizes below the request.  If we're doing FIRSTFIT we
1158 		 * inspect only the first element from each list.
1159 		 */
1160 		for (list = first; list < end; list++) {
1161 			LIST_FOREACH(bt, list, bt_freelist) {
1162 				if (bt->bt_size >= size) {
1163 					error = vmem_fit(bt, size, align, phase,
1164 					    nocross, minaddr, maxaddr, addrp);
1165 					if (error == 0) {
1166 						vmem_clip(vm, bt, *addrp, size);
1167 						goto out;
1168 					}
1169 				}
1170 				/* FIRST skips to the next list. */
1171 				if (strat == M_FIRSTFIT)
1172 					break;
1173 			}
1174 		}
1175 		/*
1176 		 * Retry if the fast algorithm failed.
1177 		 */
1178 		if (strat == M_FIRSTFIT) {
1179 			strat = M_BESTFIT;
1180 			first = bt_freehead_toalloc(vm, size, strat);
1181 			continue;
1182 		}
1183 		/*
1184 		 * XXX it is possible to fail to meet restrictions with the
1185 		 * imported region.  It is up to the user to specify the
1186 		 * import quantum such that it can satisfy any allocation.
1187 		 */
1188 		if (vmem_import(vm, size, align, flags) == 0)
1189 			continue;
1190 
1191 		/*
1192 		 * Try to free some space from the quantum cache or reclaim
1193 		 * functions if available.
1194 		 */
1195 		if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1196 			avail = vm->vm_size - vm->vm_inuse;
1197 			VMEM_UNLOCK(vm);
1198 			if (vm->vm_qcache_max != 0)
1199 				qc_drain(vm);
1200 			if (vm->vm_reclaimfn != NULL)
1201 				vm->vm_reclaimfn(vm, flags);
1202 			VMEM_LOCK(vm);
1203 			/* If we were successful retry even NOWAIT. */
1204 			if (vm->vm_size - vm->vm_inuse > avail)
1205 				continue;
1206 		}
1207 		if ((flags & M_NOWAIT) != 0) {
1208 			error = ENOMEM;
1209 			break;
1210 		}
1211 		VMEM_CONDVAR_WAIT(vm);
1212 	}
1213 out:
1214 	VMEM_UNLOCK(vm);
1215 	if (error != 0 && (flags & M_NOWAIT) == 0)
1216 		panic("failed to allocate waiting allocation\n");
1217 
1218 	return (error);
1219 }
1220 
1221 /*
1222  * vmem_free: free the resource to the arena.
1223  */
1224 void
1225 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1226 {
1227 	qcache_t *qc;
1228 	MPASS(size > 0);
1229 
1230 	if (size <= vm->vm_qcache_max) {
1231 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1232 		uma_zfree(qc->qc_cache, (void *)addr);
1233 	} else
1234 		vmem_xfree(vm, addr, size);
1235 }
1236 
1237 void
1238 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1239 {
1240 	bt_t *bt;
1241 	bt_t *t;
1242 
1243 	MPASS(size > 0);
1244 
1245 	VMEM_LOCK(vm);
1246 	bt = bt_lookupbusy(vm, addr);
1247 	MPASS(bt != NULL);
1248 	MPASS(bt->bt_start == addr);
1249 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1250 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1251 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1252 	bt_rembusy(vm, bt);
1253 	bt->bt_type = BT_TYPE_FREE;
1254 
1255 	/* coalesce */
1256 	t = TAILQ_NEXT(bt, bt_seglist);
1257 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1258 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1259 		bt->bt_size += t->bt_size;
1260 		bt_remfree(vm, t);
1261 		bt_remseg(vm, t);
1262 	}
1263 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1264 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1265 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1266 		bt->bt_size += t->bt_size;
1267 		bt->bt_start = t->bt_start;
1268 		bt_remfree(vm, t);
1269 		bt_remseg(vm, t);
1270 	}
1271 
1272 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1273 	MPASS(t != NULL);
1274 	MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY);
1275 	if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN &&
1276 	    t->bt_size == bt->bt_size) {
1277 		vmem_addr_t spanaddr;
1278 		vmem_size_t spansize;
1279 
1280 		MPASS(t->bt_start == bt->bt_start);
1281 		spanaddr = bt->bt_start;
1282 		spansize = bt->bt_size;
1283 		bt_remseg(vm, bt);
1284 		bt_remseg(vm, t);
1285 		vm->vm_size -= spansize;
1286 		VMEM_CONDVAR_BROADCAST(vm);
1287 		bt_freetrim(vm, BT_MAXFREE);
1288 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
1289 	} else {
1290 		bt_insfree(vm, bt);
1291 		VMEM_CONDVAR_BROADCAST(vm);
1292 		bt_freetrim(vm, BT_MAXFREE);
1293 	}
1294 }
1295 
1296 /*
1297  * vmem_add:
1298  *
1299  */
1300 int
1301 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1302 {
1303 	int error;
1304 
1305 	error = 0;
1306 	flags &= VMEM_FLAGS;
1307 	VMEM_LOCK(vm);
1308 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1309 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1310 	else
1311 		error = ENOMEM;
1312 	VMEM_UNLOCK(vm);
1313 
1314 	return (error);
1315 }
1316 
1317 /*
1318  * vmem_size: information about arenas size
1319  */
1320 vmem_size_t
1321 vmem_size(vmem_t *vm, int typemask)
1322 {
1323 	int i;
1324 
1325 	switch (typemask) {
1326 	case VMEM_ALLOC:
1327 		return vm->vm_inuse;
1328 	case VMEM_FREE:
1329 		return vm->vm_size - vm->vm_inuse;
1330 	case VMEM_FREE|VMEM_ALLOC:
1331 		return vm->vm_size;
1332 	case VMEM_MAXFREE:
1333 		VMEM_LOCK(vm);
1334 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1335 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1336 				continue;
1337 			VMEM_UNLOCK(vm);
1338 			return ((vmem_size_t)ORDER2SIZE(i) <<
1339 			    vm->vm_quantum_shift);
1340 		}
1341 		VMEM_UNLOCK(vm);
1342 		return (0);
1343 	default:
1344 		panic("vmem_size");
1345 	}
1346 }
1347 
1348 /* ---- debug */
1349 
1350 #if defined(DDB) || defined(DIAGNOSTIC)
1351 
1352 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1353     __printflike(1, 2));
1354 
1355 static const char *
1356 bt_type_string(int type)
1357 {
1358 
1359 	switch (type) {
1360 	case BT_TYPE_BUSY:
1361 		return "busy";
1362 	case BT_TYPE_FREE:
1363 		return "free";
1364 	case BT_TYPE_SPAN:
1365 		return "span";
1366 	case BT_TYPE_SPAN_STATIC:
1367 		return "static span";
1368 	default:
1369 		break;
1370 	}
1371 	return "BOGUS";
1372 }
1373 
1374 static void
1375 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1376 {
1377 
1378 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1379 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1380 	    bt->bt_type, bt_type_string(bt->bt_type));
1381 }
1382 
1383 static void
1384 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1385 {
1386 	const bt_t *bt;
1387 	int i;
1388 
1389 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1390 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1391 		bt_dump(bt, pr);
1392 	}
1393 
1394 	for (i = 0; i < VMEM_MAXORDER; i++) {
1395 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1396 
1397 		if (LIST_EMPTY(fl)) {
1398 			continue;
1399 		}
1400 
1401 		(*pr)("freelist[%d]\n", i);
1402 		LIST_FOREACH(bt, fl, bt_freelist) {
1403 			bt_dump(bt, pr);
1404 		}
1405 	}
1406 }
1407 
1408 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1409 
1410 #if defined(DDB)
1411 #include <ddb/ddb.h>
1412 
1413 static bt_t *
1414 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1415 {
1416 	bt_t *bt;
1417 
1418 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1419 		if (BT_ISSPAN_P(bt)) {
1420 			continue;
1421 		}
1422 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1423 			return bt;
1424 		}
1425 	}
1426 
1427 	return NULL;
1428 }
1429 
1430 void
1431 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1432 {
1433 	vmem_t *vm;
1434 
1435 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1436 		bt_t *bt;
1437 
1438 		bt = vmem_whatis_lookup(vm, addr);
1439 		if (bt == NULL) {
1440 			continue;
1441 		}
1442 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1443 		    (void *)addr, (void *)bt->bt_start,
1444 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1445 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1446 	}
1447 }
1448 
1449 void
1450 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1451 {
1452 	const vmem_t *vm;
1453 
1454 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1455 		vmem_dump(vm, pr);
1456 	}
1457 }
1458 
1459 void
1460 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1461 {
1462 	const vmem_t *vm = (const void *)addr;
1463 
1464 	vmem_dump(vm, pr);
1465 }
1466 
1467 DB_SHOW_COMMAND(vmemdump, vmemdump)
1468 {
1469 
1470 	if (!have_addr) {
1471 		db_printf("usage: show vmemdump <addr>\n");
1472 		return;
1473 	}
1474 
1475 	vmem_dump((const vmem_t *)addr, db_printf);
1476 }
1477 
1478 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1479 {
1480 	const vmem_t *vm;
1481 
1482 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1483 		vmem_dump(vm, db_printf);
1484 }
1485 
1486 DB_SHOW_COMMAND(vmem, vmem_summ)
1487 {
1488 	const vmem_t *vm = (const void *)addr;
1489 	const bt_t *bt;
1490 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1491 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1492 	int ord;
1493 
1494 	if (!have_addr) {
1495 		db_printf("usage: show vmem <addr>\n");
1496 		return;
1497 	}
1498 
1499 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1500 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1501 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1502 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1503 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1504 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1505 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1506 
1507 	memset(&ft, 0, sizeof(ft));
1508 	memset(&ut, 0, sizeof(ut));
1509 	memset(&fs, 0, sizeof(fs));
1510 	memset(&us, 0, sizeof(us));
1511 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1512 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1513 		if (bt->bt_type == BT_TYPE_BUSY) {
1514 			ut[ord]++;
1515 			us[ord] += bt->bt_size;
1516 		} else if (bt->bt_type == BT_TYPE_FREE) {
1517 			ft[ord]++;
1518 			fs[ord] += bt->bt_size;
1519 		}
1520 	}
1521 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1522 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1523 		if (ut[ord] == 0 && ft[ord] == 0)
1524 			continue;
1525 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1526 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1527 		    ut[ord], us[ord], ft[ord], fs[ord]);
1528 	}
1529 }
1530 
1531 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1532 {
1533 	const vmem_t *vm;
1534 
1535 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1536 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1537 }
1538 #endif /* defined(DDB) */
1539 
1540 #define vmem_printf printf
1541 
1542 #if defined(DIAGNOSTIC)
1543 
1544 static bool
1545 vmem_check_sanity(vmem_t *vm)
1546 {
1547 	const bt_t *bt, *bt2;
1548 
1549 	MPASS(vm != NULL);
1550 
1551 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1552 		if (bt->bt_start > BT_END(bt)) {
1553 			printf("corrupted tag\n");
1554 			bt_dump(bt, vmem_printf);
1555 			return false;
1556 		}
1557 	}
1558 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1559 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1560 			if (bt == bt2) {
1561 				continue;
1562 			}
1563 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1564 				continue;
1565 			}
1566 			if (bt->bt_start <= BT_END(bt2) &&
1567 			    bt2->bt_start <= BT_END(bt)) {
1568 				printf("overwrapped tags\n");
1569 				bt_dump(bt, vmem_printf);
1570 				bt_dump(bt2, vmem_printf);
1571 				return false;
1572 			}
1573 		}
1574 	}
1575 
1576 	return true;
1577 }
1578 
1579 static void
1580 vmem_check(vmem_t *vm)
1581 {
1582 
1583 	if (!vmem_check_sanity(vm)) {
1584 		panic("insanity vmem %p", vm);
1585 	}
1586 }
1587 
1588 #endif /* defined(DIAGNOSTIC) */
1589