xref: /freebsd/sys/kern/subr_vmem.c (revision 4b50c451720d8b427757a6da1dd2bb4c52cd9e35)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * Copyright (c) 2013 EMC Corp.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * From:
32  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34  */
35 
36 /*
37  * reference:
38  * -	Magazines and Vmem: Extending the Slab Allocator
39  *	to Many CPUs and Arbitrary Resources
40  *	http://www.usenix.org/event/usenix01/bonwick.html
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
53 #include <sys/hash.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/smp.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/vmem.h>
62 #include <sys/vmmeter.h>
63 
64 #include "opt_vm.h"
65 
66 #include <vm/uma.h>
67 #include <vm/vm.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_phys.h>
77 #include <vm/vm_pagequeue.h>
78 #include <vm/uma_int.h>
79 
80 int	vmem_startup_count(void);
81 
82 #define	VMEM_OPTORDER		5
83 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
84 #define	VMEM_MAXORDER						\
85     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
86 
87 #define	VMEM_HASHSIZE_MIN	16
88 #define	VMEM_HASHSIZE_MAX	131072
89 
90 #define	VMEM_QCACHE_IDX_MAX	16
91 
92 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
93 
94 #define	VMEM_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM |	\
95     M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
96 
97 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
98 
99 #define	QC_NAME_MAX	16
100 
101 /*
102  * Data structures private to vmem.
103  */
104 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
105 
106 typedef struct vmem_btag bt_t;
107 
108 TAILQ_HEAD(vmem_seglist, vmem_btag);
109 LIST_HEAD(vmem_freelist, vmem_btag);
110 LIST_HEAD(vmem_hashlist, vmem_btag);
111 
112 struct qcache {
113 	uma_zone_t	qc_cache;
114 	vmem_t 		*qc_vmem;
115 	vmem_size_t	qc_size;
116 	char		qc_name[QC_NAME_MAX];
117 };
118 typedef struct qcache qcache_t;
119 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
120 
121 #define	VMEM_NAME_MAX	16
122 
123 /* boundary tag */
124 struct vmem_btag {
125 	TAILQ_ENTRY(vmem_btag) bt_seglist;
126 	union {
127 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
128 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
129 	} bt_u;
130 #define	bt_hashlist	bt_u.u_hashlist
131 #define	bt_freelist	bt_u.u_freelist
132 	vmem_addr_t	bt_start;
133 	vmem_size_t	bt_size;
134 	int		bt_type;
135 };
136 
137 /* vmem arena */
138 struct vmem {
139 	struct mtx_padalign	vm_lock;
140 	struct cv		vm_cv;
141 	char			vm_name[VMEM_NAME_MAX+1];
142 	LIST_ENTRY(vmem)	vm_alllist;
143 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
144 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
145 	struct vmem_seglist	vm_seglist;
146 	struct vmem_hashlist	*vm_hashlist;
147 	vmem_size_t		vm_hashsize;
148 
149 	/* Constant after init */
150 	vmem_size_t		vm_qcache_max;
151 	vmem_size_t		vm_quantum_mask;
152 	vmem_size_t		vm_import_quantum;
153 	int			vm_quantum_shift;
154 
155 	/* Written on alloc/free */
156 	LIST_HEAD(, vmem_btag)	vm_freetags;
157 	int			vm_nfreetags;
158 	int			vm_nbusytag;
159 	vmem_size_t		vm_inuse;
160 	vmem_size_t		vm_size;
161 	vmem_size_t		vm_limit;
162 	struct vmem_btag	vm_cursor;
163 
164 	/* Used on import. */
165 	vmem_import_t		*vm_importfn;
166 	vmem_release_t		*vm_releasefn;
167 	void			*vm_arg;
168 
169 	/* Space exhaustion callback. */
170 	vmem_reclaim_t		*vm_reclaimfn;
171 
172 	/* quantum cache */
173 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
174 };
175 
176 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
177 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
178 #define	BT_TYPE_FREE		3	/* Available space. */
179 #define	BT_TYPE_BUSY		4	/* Used space. */
180 #define	BT_TYPE_CURSOR		5	/* Cursor for nextfit allocations. */
181 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
182 
183 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
184 
185 #if defined(DIAGNOSTIC)
186 static int enable_vmem_check = 1;
187 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
188     &enable_vmem_check, 0, "Enable vmem check");
189 static void vmem_check(vmem_t *);
190 #endif
191 
192 static struct callout	vmem_periodic_ch;
193 static int		vmem_periodic_interval;
194 static struct task	vmem_periodic_wk;
195 
196 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
197 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
198 static uma_zone_t vmem_zone;
199 
200 /* ---- misc */
201 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
202 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
203 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
204 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
205 
206 
207 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
208 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
209 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
210 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
211 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
212 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
213 
214 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
215 
216 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
217 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
218 
219 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
220     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
221 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
222     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
223 
224 /*
225  * Maximum number of boundary tags that may be required to satisfy an
226  * allocation.  Two may be required to import.  Another two may be
227  * required to clip edges.
228  */
229 #define	BT_MAXALLOC	4
230 
231 /*
232  * Max free limits the number of locally cached boundary tags.  We
233  * just want to avoid hitting the zone allocator for every call.
234  */
235 #define BT_MAXFREE	(BT_MAXALLOC * 8)
236 
237 /* Allocator for boundary tags. */
238 static uma_zone_t vmem_bt_zone;
239 
240 /* boot time arena storage. */
241 static struct vmem kernel_arena_storage;
242 static struct vmem buffer_arena_storage;
243 static struct vmem transient_arena_storage;
244 /* kernel and kmem arenas are aliased for backwards KPI compat. */
245 vmem_t *kernel_arena = &kernel_arena_storage;
246 vmem_t *kmem_arena = &kernel_arena_storage;
247 vmem_t *buffer_arena = &buffer_arena_storage;
248 vmem_t *transient_arena = &transient_arena_storage;
249 
250 #ifdef DEBUG_MEMGUARD
251 static struct vmem memguard_arena_storage;
252 vmem_t *memguard_arena = &memguard_arena_storage;
253 #endif
254 
255 /*
256  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
257  * allocation will not fail once bt_fill() passes.  To do so we cache
258  * at least the maximum possible tag allocations in the arena.
259  */
260 static int
261 bt_fill(vmem_t *vm, int flags)
262 {
263 	bt_t *bt;
264 
265 	VMEM_ASSERT_LOCKED(vm);
266 
267 	/*
268 	 * Only allow the kernel arena and arenas derived from kernel arena to
269 	 * dip into reserve tags.  They are where new tags come from.
270 	 */
271 	flags &= BT_FLAGS;
272 	if (vm != kernel_arena && vm->vm_arg != kernel_arena)
273 		flags &= ~M_USE_RESERVE;
274 
275 	/*
276 	 * Loop until we meet the reserve.  To minimize the lock shuffle
277 	 * and prevent simultaneous fills we first try a NOWAIT regardless
278 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
279 	 * holding a vmem lock.
280 	 */
281 	while (vm->vm_nfreetags < BT_MAXALLOC) {
282 		bt = uma_zalloc(vmem_bt_zone,
283 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
284 		if (bt == NULL) {
285 			VMEM_UNLOCK(vm);
286 			bt = uma_zalloc(vmem_bt_zone, flags);
287 			VMEM_LOCK(vm);
288 			if (bt == NULL)
289 				break;
290 		}
291 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
292 		vm->vm_nfreetags++;
293 	}
294 
295 	if (vm->vm_nfreetags < BT_MAXALLOC)
296 		return ENOMEM;
297 
298 	return 0;
299 }
300 
301 /*
302  * Pop a tag off of the freetag stack.
303  */
304 static bt_t *
305 bt_alloc(vmem_t *vm)
306 {
307 	bt_t *bt;
308 
309 	VMEM_ASSERT_LOCKED(vm);
310 	bt = LIST_FIRST(&vm->vm_freetags);
311 	MPASS(bt != NULL);
312 	LIST_REMOVE(bt, bt_freelist);
313 	vm->vm_nfreetags--;
314 
315 	return bt;
316 }
317 
318 /*
319  * Trim the per-vmem free list.  Returns with the lock released to
320  * avoid allocator recursions.
321  */
322 static void
323 bt_freetrim(vmem_t *vm, int freelimit)
324 {
325 	LIST_HEAD(, vmem_btag) freetags;
326 	bt_t *bt;
327 
328 	LIST_INIT(&freetags);
329 	VMEM_ASSERT_LOCKED(vm);
330 	while (vm->vm_nfreetags > freelimit) {
331 		bt = LIST_FIRST(&vm->vm_freetags);
332 		LIST_REMOVE(bt, bt_freelist);
333 		vm->vm_nfreetags--;
334 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
335 	}
336 	VMEM_UNLOCK(vm);
337 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
338 		LIST_REMOVE(bt, bt_freelist);
339 		uma_zfree(vmem_bt_zone, bt);
340 	}
341 }
342 
343 static inline void
344 bt_free(vmem_t *vm, bt_t *bt)
345 {
346 
347 	VMEM_ASSERT_LOCKED(vm);
348 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
349 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
350 	vm->vm_nfreetags++;
351 }
352 
353 /*
354  * freelist[0] ... [1, 1]
355  * freelist[1] ... [2, 2]
356  *  :
357  * freelist[29] ... [30, 30]
358  * freelist[30] ... [31, 31]
359  * freelist[31] ... [32, 63]
360  * freelist[33] ... [64, 127]
361  *  :
362  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
363  *  :
364  */
365 
366 static struct vmem_freelist *
367 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
368 {
369 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
370 	const int idx = SIZE2ORDER(qsize);
371 
372 	MPASS(size != 0 && qsize != 0);
373 	MPASS((size & vm->vm_quantum_mask) == 0);
374 	MPASS(idx >= 0);
375 	MPASS(idx < VMEM_MAXORDER);
376 
377 	return &vm->vm_freelist[idx];
378 }
379 
380 /*
381  * bt_freehead_toalloc: return the freelist for the given size and allocation
382  * strategy.
383  *
384  * For M_FIRSTFIT, return the list in which any blocks are large enough
385  * for the requested size.  otherwise, return the list which can have blocks
386  * large enough for the requested size.
387  */
388 static struct vmem_freelist *
389 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
390 {
391 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
392 	int idx = SIZE2ORDER(qsize);
393 
394 	MPASS(size != 0 && qsize != 0);
395 	MPASS((size & vm->vm_quantum_mask) == 0);
396 
397 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
398 		idx++;
399 		/* check too large request? */
400 	}
401 	MPASS(idx >= 0);
402 	MPASS(idx < VMEM_MAXORDER);
403 
404 	return &vm->vm_freelist[idx];
405 }
406 
407 /* ---- boundary tag hash */
408 
409 static struct vmem_hashlist *
410 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
411 {
412 	struct vmem_hashlist *list;
413 	unsigned int hash;
414 
415 	hash = hash32_buf(&addr, sizeof(addr), 0);
416 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
417 
418 	return list;
419 }
420 
421 static bt_t *
422 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
423 {
424 	struct vmem_hashlist *list;
425 	bt_t *bt;
426 
427 	VMEM_ASSERT_LOCKED(vm);
428 	list = bt_hashhead(vm, addr);
429 	LIST_FOREACH(bt, list, bt_hashlist) {
430 		if (bt->bt_start == addr) {
431 			break;
432 		}
433 	}
434 
435 	return bt;
436 }
437 
438 static void
439 bt_rembusy(vmem_t *vm, bt_t *bt)
440 {
441 
442 	VMEM_ASSERT_LOCKED(vm);
443 	MPASS(vm->vm_nbusytag > 0);
444 	vm->vm_inuse -= bt->bt_size;
445 	vm->vm_nbusytag--;
446 	LIST_REMOVE(bt, bt_hashlist);
447 }
448 
449 static void
450 bt_insbusy(vmem_t *vm, bt_t *bt)
451 {
452 	struct vmem_hashlist *list;
453 
454 	VMEM_ASSERT_LOCKED(vm);
455 	MPASS(bt->bt_type == BT_TYPE_BUSY);
456 
457 	list = bt_hashhead(vm, bt->bt_start);
458 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
459 	vm->vm_nbusytag++;
460 	vm->vm_inuse += bt->bt_size;
461 }
462 
463 /* ---- boundary tag list */
464 
465 static void
466 bt_remseg(vmem_t *vm, bt_t *bt)
467 {
468 
469 	MPASS(bt->bt_type != BT_TYPE_CURSOR);
470 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
471 	bt_free(vm, bt);
472 }
473 
474 static void
475 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
476 {
477 
478 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
479 }
480 
481 static void
482 bt_insseg_tail(vmem_t *vm, bt_t *bt)
483 {
484 
485 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
486 }
487 
488 static void
489 bt_remfree(vmem_t *vm, bt_t *bt)
490 {
491 
492 	MPASS(bt->bt_type == BT_TYPE_FREE);
493 
494 	LIST_REMOVE(bt, bt_freelist);
495 }
496 
497 static void
498 bt_insfree(vmem_t *vm, bt_t *bt)
499 {
500 	struct vmem_freelist *list;
501 
502 	list = bt_freehead_tofree(vm, bt->bt_size);
503 	LIST_INSERT_HEAD(list, bt, bt_freelist);
504 }
505 
506 /* ---- vmem internal functions */
507 
508 /*
509  * Import from the arena into the quantum cache in UMA.
510  *
511  * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
512  * failure, so UMA can't be used to cache a resource with value 0.
513  */
514 static int
515 qc_import(void *arg, void **store, int cnt, int domain, int flags)
516 {
517 	qcache_t *qc;
518 	vmem_addr_t addr;
519 	int i;
520 
521 	KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
522 
523 	qc = arg;
524 	for (i = 0; i < cnt; i++) {
525 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
526 		    VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
527 			break;
528 		store[i] = (void *)addr;
529 	}
530 	return (i);
531 }
532 
533 /*
534  * Release memory from the UMA cache to the arena.
535  */
536 static void
537 qc_release(void *arg, void **store, int cnt)
538 {
539 	qcache_t *qc;
540 	int i;
541 
542 	qc = arg;
543 	for (i = 0; i < cnt; i++)
544 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
545 }
546 
547 static void
548 qc_init(vmem_t *vm, vmem_size_t qcache_max)
549 {
550 	qcache_t *qc;
551 	vmem_size_t size;
552 	int qcache_idx_max;
553 	int i;
554 
555 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
556 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
557 	    VMEM_QCACHE_IDX_MAX);
558 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
559 	for (i = 0; i < qcache_idx_max; i++) {
560 		qc = &vm->vm_qcache[i];
561 		size = (i + 1) << vm->vm_quantum_shift;
562 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
563 		    vm->vm_name, size);
564 		qc->qc_vmem = vm;
565 		qc->qc_size = size;
566 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
567 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc,
568 		    UMA_ZONE_VM);
569 		MPASS(qc->qc_cache);
570 	}
571 }
572 
573 static void
574 qc_destroy(vmem_t *vm)
575 {
576 	int qcache_idx_max;
577 	int i;
578 
579 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
580 	for (i = 0; i < qcache_idx_max; i++)
581 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
582 }
583 
584 static void
585 qc_drain(vmem_t *vm)
586 {
587 	int qcache_idx_max;
588 	int i;
589 
590 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
591 	for (i = 0; i < qcache_idx_max; i++)
592 		uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
593 }
594 
595 #ifndef UMA_MD_SMALL_ALLOC
596 
597 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
598 
599 /*
600  * vmem_bt_alloc:  Allocate a new page of boundary tags.
601  *
602  * On architectures with uma_small_alloc there is no recursion; no address
603  * space need be allocated to allocate boundary tags.  For the others, we
604  * must handle recursion.  Boundary tags are necessary to allocate new
605  * boundary tags.
606  *
607  * UMA guarantees that enough tags are held in reserve to allocate a new
608  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
609  * when allocating the page to hold new boundary tags.  In this way the
610  * reserve is automatically filled by the allocation that uses the reserve.
611  *
612  * We still have to guarantee that the new tags are allocated atomically since
613  * many threads may try concurrently.  The bt_lock provides this guarantee.
614  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
615  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
616  * loop again after checking to see if we lost the race to allocate.
617  *
618  * There is a small race between vmem_bt_alloc() returning the page and the
619  * zone lock being acquired to add the page to the zone.  For WAITOK
620  * allocations we just pause briefly.  NOWAIT may experience a transient
621  * failure.  To alleviate this we permit a small number of simultaneous
622  * fills to proceed concurrently so NOWAIT is less likely to fail unless
623  * we are really out of KVA.
624  */
625 static void *
626 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
627     int wait)
628 {
629 	vmem_addr_t addr;
630 
631 	*pflag = UMA_SLAB_KERNEL;
632 
633 	/*
634 	 * Single thread boundary tag allocation so that the address space
635 	 * and memory are added in one atomic operation.
636 	 */
637 	mtx_lock(&vmem_bt_lock);
638 	if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
639 	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
640 	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
641 		if (kmem_back_domain(domain, kernel_object, addr, bytes,
642 		    M_NOWAIT | M_USE_RESERVE) == 0) {
643 			mtx_unlock(&vmem_bt_lock);
644 			return ((void *)addr);
645 		}
646 		vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
647 		mtx_unlock(&vmem_bt_lock);
648 		/*
649 		 * Out of memory, not address space.  This may not even be
650 		 * possible due to M_USE_RESERVE page allocation.
651 		 */
652 		if (wait & M_WAITOK)
653 			vm_wait_domain(domain);
654 		return (NULL);
655 	}
656 	mtx_unlock(&vmem_bt_lock);
657 	/*
658 	 * We're either out of address space or lost a fill race.
659 	 */
660 	if (wait & M_WAITOK)
661 		pause("btalloc", 1);
662 
663 	return (NULL);
664 }
665 
666 /*
667  * How many pages do we need to startup_alloc.
668  */
669 int
670 vmem_startup_count(void)
671 {
672 
673 	return (howmany(BT_MAXALLOC, slab_ipers(sizeof(struct vmem_btag),
674 	    UMA_ALIGN_PTR)));
675 }
676 #endif
677 
678 void
679 vmem_startup(void)
680 {
681 
682 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
683 	vmem_zone = uma_zcreate("vmem",
684 	    sizeof(struct vmem), NULL, NULL, NULL, NULL,
685 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
686 	vmem_bt_zone = uma_zcreate("vmem btag",
687 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
688 	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
689 #ifndef UMA_MD_SMALL_ALLOC
690 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
691 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
692 	/*
693 	 * Reserve enough tags to allocate new tags.  We allow multiple
694 	 * CPUs to attempt to allocate new tags concurrently to limit
695 	 * false restarts in UMA.  vmem_bt_alloc() allocates from a per-domain
696 	 * arena, which may involve importing a range from the kernel arena,
697 	 * so we need to keep at least 2 * BT_MAXALLOC tags reserved.
698 	 */
699 	uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
700 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
701 #endif
702 }
703 
704 /* ---- rehash */
705 
706 static int
707 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
708 {
709 	bt_t *bt;
710 	int i;
711 	struct vmem_hashlist *newhashlist;
712 	struct vmem_hashlist *oldhashlist;
713 	vmem_size_t oldhashsize;
714 
715 	MPASS(newhashsize > 0);
716 
717 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
718 	    M_VMEM, M_NOWAIT);
719 	if (newhashlist == NULL)
720 		return ENOMEM;
721 	for (i = 0; i < newhashsize; i++) {
722 		LIST_INIT(&newhashlist[i]);
723 	}
724 
725 	VMEM_LOCK(vm);
726 	oldhashlist = vm->vm_hashlist;
727 	oldhashsize = vm->vm_hashsize;
728 	vm->vm_hashlist = newhashlist;
729 	vm->vm_hashsize = newhashsize;
730 	if (oldhashlist == NULL) {
731 		VMEM_UNLOCK(vm);
732 		return 0;
733 	}
734 	for (i = 0; i < oldhashsize; i++) {
735 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
736 			bt_rembusy(vm, bt);
737 			bt_insbusy(vm, bt);
738 		}
739 	}
740 	VMEM_UNLOCK(vm);
741 
742 	if (oldhashlist != vm->vm_hash0) {
743 		free(oldhashlist, M_VMEM);
744 	}
745 
746 	return 0;
747 }
748 
749 static void
750 vmem_periodic_kick(void *dummy)
751 {
752 
753 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
754 }
755 
756 static void
757 vmem_periodic(void *unused, int pending)
758 {
759 	vmem_t *vm;
760 	vmem_size_t desired;
761 	vmem_size_t current;
762 
763 	mtx_lock(&vmem_list_lock);
764 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
765 #ifdef DIAGNOSTIC
766 		/* Convenient time to verify vmem state. */
767 		if (enable_vmem_check == 1) {
768 			VMEM_LOCK(vm);
769 			vmem_check(vm);
770 			VMEM_UNLOCK(vm);
771 		}
772 #endif
773 		desired = 1 << flsl(vm->vm_nbusytag);
774 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
775 		    VMEM_HASHSIZE_MAX);
776 		current = vm->vm_hashsize;
777 
778 		/* Grow in powers of two.  Shrink less aggressively. */
779 		if (desired >= current * 2 || desired * 4 <= current)
780 			vmem_rehash(vm, desired);
781 
782 		/*
783 		 * Periodically wake up threads waiting for resources,
784 		 * so they could ask for reclamation again.
785 		 */
786 		VMEM_CONDVAR_BROADCAST(vm);
787 	}
788 	mtx_unlock(&vmem_list_lock);
789 
790 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
791 	    vmem_periodic_kick, NULL);
792 }
793 
794 static void
795 vmem_start_callout(void *unused)
796 {
797 
798 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
799 	vmem_periodic_interval = hz * 10;
800 	callout_init(&vmem_periodic_ch, 1);
801 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
802 	    vmem_periodic_kick, NULL);
803 }
804 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
805 
806 static void
807 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
808 {
809 	bt_t *btspan;
810 	bt_t *btfree;
811 
812 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
813 	MPASS((size & vm->vm_quantum_mask) == 0);
814 
815 	btspan = bt_alloc(vm);
816 	btspan->bt_type = type;
817 	btspan->bt_start = addr;
818 	btspan->bt_size = size;
819 	bt_insseg_tail(vm, btspan);
820 
821 	btfree = bt_alloc(vm);
822 	btfree->bt_type = BT_TYPE_FREE;
823 	btfree->bt_start = addr;
824 	btfree->bt_size = size;
825 	bt_insseg(vm, btfree, btspan);
826 	bt_insfree(vm, btfree);
827 
828 	vm->vm_size += size;
829 }
830 
831 static void
832 vmem_destroy1(vmem_t *vm)
833 {
834 	bt_t *bt;
835 
836 	/*
837 	 * Drain per-cpu quantum caches.
838 	 */
839 	qc_destroy(vm);
840 
841 	/*
842 	 * The vmem should now only contain empty segments.
843 	 */
844 	VMEM_LOCK(vm);
845 	MPASS(vm->vm_nbusytag == 0);
846 
847 	TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
848 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
849 		bt_remseg(vm, bt);
850 
851 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
852 		free(vm->vm_hashlist, M_VMEM);
853 
854 	bt_freetrim(vm, 0);
855 
856 	VMEM_CONDVAR_DESTROY(vm);
857 	VMEM_LOCK_DESTROY(vm);
858 	uma_zfree(vmem_zone, vm);
859 }
860 
861 static int
862 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
863 {
864 	vmem_addr_t addr;
865 	int error;
866 
867 	if (vm->vm_importfn == NULL)
868 		return (EINVAL);
869 
870 	/*
871 	 * To make sure we get a span that meets the alignment we double it
872 	 * and add the size to the tail.  This slightly overestimates.
873 	 */
874 	if (align != vm->vm_quantum_mask + 1)
875 		size = (align * 2) + size;
876 	size = roundup(size, vm->vm_import_quantum);
877 
878 	if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
879 		return (ENOMEM);
880 
881 	/*
882 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
883 	 * span and the tag we want to allocate from it.
884 	 */
885 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
886 	vm->vm_nfreetags -= BT_MAXALLOC;
887 	VMEM_UNLOCK(vm);
888 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
889 	VMEM_LOCK(vm);
890 	vm->vm_nfreetags += BT_MAXALLOC;
891 	if (error)
892 		return (ENOMEM);
893 
894 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
895 
896 	return 0;
897 }
898 
899 /*
900  * vmem_fit: check if a bt can satisfy the given restrictions.
901  *
902  * it's a caller's responsibility to ensure the region is big enough
903  * before calling us.
904  */
905 static int
906 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
907     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
908     vmem_addr_t maxaddr, vmem_addr_t *addrp)
909 {
910 	vmem_addr_t start;
911 	vmem_addr_t end;
912 
913 	MPASS(size > 0);
914 	MPASS(bt->bt_size >= size); /* caller's responsibility */
915 
916 	/*
917 	 * XXX assumption: vmem_addr_t and vmem_size_t are
918 	 * unsigned integer of the same size.
919 	 */
920 
921 	start = bt->bt_start;
922 	if (start < minaddr) {
923 		start = minaddr;
924 	}
925 	end = BT_END(bt);
926 	if (end > maxaddr)
927 		end = maxaddr;
928 	if (start > end)
929 		return (ENOMEM);
930 
931 	start = VMEM_ALIGNUP(start - phase, align) + phase;
932 	if (start < bt->bt_start)
933 		start += align;
934 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
935 		MPASS(align < nocross);
936 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
937 	}
938 	if (start <= end && end - start >= size - 1) {
939 		MPASS((start & (align - 1)) == phase);
940 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
941 		MPASS(minaddr <= start);
942 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
943 		MPASS(bt->bt_start <= start);
944 		MPASS(BT_END(bt) - start >= size - 1);
945 		*addrp = start;
946 
947 		return (0);
948 	}
949 	return (ENOMEM);
950 }
951 
952 /*
953  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
954  */
955 static void
956 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
957 {
958 	bt_t *btnew;
959 	bt_t *btprev;
960 
961 	VMEM_ASSERT_LOCKED(vm);
962 	MPASS(bt->bt_type == BT_TYPE_FREE);
963 	MPASS(bt->bt_size >= size);
964 	bt_remfree(vm, bt);
965 	if (bt->bt_start != start) {
966 		btprev = bt_alloc(vm);
967 		btprev->bt_type = BT_TYPE_FREE;
968 		btprev->bt_start = bt->bt_start;
969 		btprev->bt_size = start - bt->bt_start;
970 		bt->bt_start = start;
971 		bt->bt_size -= btprev->bt_size;
972 		bt_insfree(vm, btprev);
973 		bt_insseg(vm, btprev,
974 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
975 	}
976 	MPASS(bt->bt_start == start);
977 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
978 		/* split */
979 		btnew = bt_alloc(vm);
980 		btnew->bt_type = BT_TYPE_BUSY;
981 		btnew->bt_start = bt->bt_start;
982 		btnew->bt_size = size;
983 		bt->bt_start = bt->bt_start + size;
984 		bt->bt_size -= size;
985 		bt_insfree(vm, bt);
986 		bt_insseg(vm, btnew,
987 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
988 		bt_insbusy(vm, btnew);
989 		bt = btnew;
990 	} else {
991 		bt->bt_type = BT_TYPE_BUSY;
992 		bt_insbusy(vm, bt);
993 	}
994 	MPASS(bt->bt_size >= size);
995 }
996 
997 static int
998 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
999 {
1000 	vmem_size_t avail;
1001 
1002 	VMEM_ASSERT_LOCKED(vm);
1003 
1004 	/*
1005 	 * XXX it is possible to fail to meet xalloc constraints with the
1006 	 * imported region.  It is up to the user to specify the
1007 	 * import quantum such that it can satisfy any allocation.
1008 	 */
1009 	if (vmem_import(vm, size, align, flags) == 0)
1010 		return (1);
1011 
1012 	/*
1013 	 * Try to free some space from the quantum cache or reclaim
1014 	 * functions if available.
1015 	 */
1016 	if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1017 		avail = vm->vm_size - vm->vm_inuse;
1018 		VMEM_UNLOCK(vm);
1019 		if (vm->vm_qcache_max != 0)
1020 			qc_drain(vm);
1021 		if (vm->vm_reclaimfn != NULL)
1022 			vm->vm_reclaimfn(vm, flags);
1023 		VMEM_LOCK(vm);
1024 		/* If we were successful retry even NOWAIT. */
1025 		if (vm->vm_size - vm->vm_inuse > avail)
1026 			return (1);
1027 	}
1028 	if ((flags & M_NOWAIT) != 0)
1029 		return (0);
1030 	VMEM_CONDVAR_WAIT(vm);
1031 	return (1);
1032 }
1033 
1034 static int
1035 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1036 {
1037 	struct vmem_btag *prev;
1038 
1039 	MPASS(bt->bt_type == BT_TYPE_FREE);
1040 
1041 	if (vm->vm_releasefn == NULL)
1042 		return (0);
1043 
1044 	prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1045 	MPASS(prev != NULL);
1046 	MPASS(prev->bt_type != BT_TYPE_FREE);
1047 
1048 	if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
1049 		vmem_addr_t spanaddr;
1050 		vmem_size_t spansize;
1051 
1052 		MPASS(prev->bt_start == bt->bt_start);
1053 		spanaddr = prev->bt_start;
1054 		spansize = prev->bt_size;
1055 		if (remfree)
1056 			bt_remfree(vm, bt);
1057 		bt_remseg(vm, bt);
1058 		bt_remseg(vm, prev);
1059 		vm->vm_size -= spansize;
1060 		VMEM_CONDVAR_BROADCAST(vm);
1061 		bt_freetrim(vm, BT_MAXFREE);
1062 		vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1063 		return (1);
1064 	}
1065 	return (0);
1066 }
1067 
1068 static int
1069 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1070     const vmem_size_t phase, const vmem_size_t nocross, int flags,
1071     vmem_addr_t *addrp)
1072 {
1073 	struct vmem_btag *bt, *cursor, *next, *prev;
1074 	int error;
1075 
1076 	error = ENOMEM;
1077 	VMEM_LOCK(vm);
1078 retry:
1079 	/*
1080 	 * Make sure we have enough tags to complete the operation.
1081 	 */
1082 	if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0)
1083 		goto out;
1084 
1085 	/*
1086 	 * Find the next free tag meeting our constraints.  If one is found,
1087 	 * perform the allocation.
1088 	 */
1089 	for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1090 	    bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
1091 		if (bt == NULL)
1092 			bt = TAILQ_FIRST(&vm->vm_seglist);
1093 		if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
1094 		    (error = vmem_fit(bt, size, align, phase, nocross,
1095 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1096 			vmem_clip(vm, bt, *addrp, size);
1097 			break;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Try to coalesce free segments around the cursor.  If we succeed, and
1103 	 * have not yet satisfied the allocation request, try again with the
1104 	 * newly coalesced segment.
1105 	 */
1106 	if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1107 	    (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1108 	    next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
1109 	    prev->bt_start + prev->bt_size == next->bt_start) {
1110 		prev->bt_size += next->bt_size;
1111 		bt_remfree(vm, next);
1112 		bt_remseg(vm, next);
1113 
1114 		/*
1115 		 * The coalesced segment might be able to satisfy our request.
1116 		 * If not, we might need to release it from the arena.
1117 		 */
1118 		if (error == ENOMEM && prev->bt_size >= size &&
1119 		    (error = vmem_fit(prev, size, align, phase, nocross,
1120 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1121 			vmem_clip(vm, prev, *addrp, size);
1122 			bt = prev;
1123 		} else
1124 			(void)vmem_try_release(vm, prev, true);
1125 	}
1126 
1127 	/*
1128 	 * If the allocation was successful, advance the cursor.
1129 	 */
1130 	if (error == 0) {
1131 		TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1132 		for (; bt != NULL && bt->bt_start < *addrp + size;
1133 		    bt = TAILQ_NEXT(bt, bt_seglist))
1134 			;
1135 		if (bt != NULL)
1136 			TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
1137 		else
1138 			TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1139 	}
1140 
1141 	/*
1142 	 * Attempt to bring additional resources into the arena.  If that fails
1143 	 * and M_WAITOK is specified, sleep waiting for resources to be freed.
1144 	 */
1145 	if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1146 		goto retry;
1147 
1148 out:
1149 	VMEM_UNLOCK(vm);
1150 	return (error);
1151 }
1152 
1153 /* ---- vmem API */
1154 
1155 void
1156 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1157      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
1158 {
1159 
1160 	VMEM_LOCK(vm);
1161 	vm->vm_importfn = importfn;
1162 	vm->vm_releasefn = releasefn;
1163 	vm->vm_arg = arg;
1164 	vm->vm_import_quantum = import_quantum;
1165 	VMEM_UNLOCK(vm);
1166 }
1167 
1168 void
1169 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1170 {
1171 
1172 	VMEM_LOCK(vm);
1173 	vm->vm_limit = limit;
1174 	VMEM_UNLOCK(vm);
1175 }
1176 
1177 void
1178 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1179 {
1180 
1181 	VMEM_LOCK(vm);
1182 	vm->vm_reclaimfn = reclaimfn;
1183 	VMEM_UNLOCK(vm);
1184 }
1185 
1186 /*
1187  * vmem_init: Initializes vmem arena.
1188  */
1189 vmem_t *
1190 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1191     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1192 {
1193 	int i;
1194 
1195 	MPASS(quantum > 0);
1196 	MPASS((quantum & (quantum - 1)) == 0);
1197 
1198 	bzero(vm, sizeof(*vm));
1199 
1200 	VMEM_CONDVAR_INIT(vm, name);
1201 	VMEM_LOCK_INIT(vm, name);
1202 	vm->vm_nfreetags = 0;
1203 	LIST_INIT(&vm->vm_freetags);
1204 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1205 	vm->vm_quantum_mask = quantum - 1;
1206 	vm->vm_quantum_shift = flsl(quantum) - 1;
1207 	vm->vm_nbusytag = 0;
1208 	vm->vm_size = 0;
1209 	vm->vm_limit = 0;
1210 	vm->vm_inuse = 0;
1211 	qc_init(vm, qcache_max);
1212 
1213 	TAILQ_INIT(&vm->vm_seglist);
1214 	vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1215 	vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1216 	TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1217 
1218 	for (i = 0; i < VMEM_MAXORDER; i++)
1219 		LIST_INIT(&vm->vm_freelist[i]);
1220 
1221 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1222 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1223 	vm->vm_hashlist = vm->vm_hash0;
1224 
1225 	if (size != 0) {
1226 		if (vmem_add(vm, base, size, flags) != 0) {
1227 			vmem_destroy1(vm);
1228 			return NULL;
1229 		}
1230 	}
1231 
1232 	mtx_lock(&vmem_list_lock);
1233 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1234 	mtx_unlock(&vmem_list_lock);
1235 
1236 	return vm;
1237 }
1238 
1239 /*
1240  * vmem_create: create an arena.
1241  */
1242 vmem_t *
1243 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1244     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1245 {
1246 
1247 	vmem_t *vm;
1248 
1249 	vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1250 	if (vm == NULL)
1251 		return (NULL);
1252 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1253 	    flags) == NULL)
1254 		return (NULL);
1255 	return (vm);
1256 }
1257 
1258 void
1259 vmem_destroy(vmem_t *vm)
1260 {
1261 
1262 	mtx_lock(&vmem_list_lock);
1263 	LIST_REMOVE(vm, vm_alllist);
1264 	mtx_unlock(&vmem_list_lock);
1265 
1266 	vmem_destroy1(vm);
1267 }
1268 
1269 vmem_size_t
1270 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1271 {
1272 
1273 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1274 }
1275 
1276 /*
1277  * vmem_alloc: allocate resource from the arena.
1278  */
1279 int
1280 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1281 {
1282 	const int strat __unused = flags & VMEM_FITMASK;
1283 	qcache_t *qc;
1284 
1285 	flags &= VMEM_FLAGS;
1286 	MPASS(size > 0);
1287 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1288 	if ((flags & M_NOWAIT) == 0)
1289 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1290 
1291 	if (size <= vm->vm_qcache_max) {
1292 		/*
1293 		 * Resource 0 cannot be cached, so avoid a blocking allocation
1294 		 * in qc_import() and give the vmem_xalloc() call below a chance
1295 		 * to return 0.
1296 		 */
1297 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1298 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1299 		    (flags & ~M_WAITOK) | M_NOWAIT);
1300 		if (__predict_true(*addrp != 0))
1301 			return (0);
1302 	}
1303 
1304 	return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1305 	    flags, addrp));
1306 }
1307 
1308 int
1309 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1310     const vmem_size_t phase, const vmem_size_t nocross,
1311     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1312     vmem_addr_t *addrp)
1313 {
1314 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1315 	struct vmem_freelist *list;
1316 	struct vmem_freelist *first;
1317 	struct vmem_freelist *end;
1318 	bt_t *bt;
1319 	int error;
1320 	int strat;
1321 
1322 	flags &= VMEM_FLAGS;
1323 	strat = flags & VMEM_FITMASK;
1324 	MPASS(size0 > 0);
1325 	MPASS(size > 0);
1326 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1327 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1328 	if ((flags & M_NOWAIT) == 0)
1329 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1330 	MPASS((align & vm->vm_quantum_mask) == 0);
1331 	MPASS((align & (align - 1)) == 0);
1332 	MPASS((phase & vm->vm_quantum_mask) == 0);
1333 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1334 	MPASS((nocross & (nocross - 1)) == 0);
1335 	MPASS((align == 0 && phase == 0) || phase < align);
1336 	MPASS(nocross == 0 || nocross >= size);
1337 	MPASS(minaddr <= maxaddr);
1338 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1339 	if (strat == M_NEXTFIT)
1340 		MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1341 
1342 	if (align == 0)
1343 		align = vm->vm_quantum_mask + 1;
1344 	*addrp = 0;
1345 
1346 	/*
1347 	 * Next-fit allocations don't use the freelists.
1348 	 */
1349 	if (strat == M_NEXTFIT)
1350 		return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1351 		    flags, addrp));
1352 
1353 	end = &vm->vm_freelist[VMEM_MAXORDER];
1354 	/*
1355 	 * choose a free block from which we allocate.
1356 	 */
1357 	first = bt_freehead_toalloc(vm, size, strat);
1358 	VMEM_LOCK(vm);
1359 	for (;;) {
1360 		/*
1361 		 * Make sure we have enough tags to complete the
1362 		 * operation.
1363 		 */
1364 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1365 		    bt_fill(vm, flags) != 0) {
1366 			error = ENOMEM;
1367 			break;
1368 		}
1369 
1370 		/*
1371 	 	 * Scan freelists looking for a tag that satisfies the
1372 		 * allocation.  If we're doing BESTFIT we may encounter
1373 		 * sizes below the request.  If we're doing FIRSTFIT we
1374 		 * inspect only the first element from each list.
1375 		 */
1376 		for (list = first; list < end; list++) {
1377 			LIST_FOREACH(bt, list, bt_freelist) {
1378 				if (bt->bt_size >= size) {
1379 					error = vmem_fit(bt, size, align, phase,
1380 					    nocross, minaddr, maxaddr, addrp);
1381 					if (error == 0) {
1382 						vmem_clip(vm, bt, *addrp, size);
1383 						goto out;
1384 					}
1385 				}
1386 				/* FIRST skips to the next list. */
1387 				if (strat == M_FIRSTFIT)
1388 					break;
1389 			}
1390 		}
1391 
1392 		/*
1393 		 * Retry if the fast algorithm failed.
1394 		 */
1395 		if (strat == M_FIRSTFIT) {
1396 			strat = M_BESTFIT;
1397 			first = bt_freehead_toalloc(vm, size, strat);
1398 			continue;
1399 		}
1400 
1401 		/*
1402 		 * Try a few measures to bring additional resources into the
1403 		 * arena.  If all else fails, we will sleep waiting for
1404 		 * resources to be freed.
1405 		 */
1406 		if (!vmem_try_fetch(vm, size, align, flags)) {
1407 			error = ENOMEM;
1408 			break;
1409 		}
1410 	}
1411 out:
1412 	VMEM_UNLOCK(vm);
1413 	if (error != 0 && (flags & M_NOWAIT) == 0)
1414 		panic("failed to allocate waiting allocation\n");
1415 
1416 	return (error);
1417 }
1418 
1419 /*
1420  * vmem_free: free the resource to the arena.
1421  */
1422 void
1423 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1424 {
1425 	qcache_t *qc;
1426 	MPASS(size > 0);
1427 
1428 	if (size <= vm->vm_qcache_max &&
1429 	    __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1430 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1431 		uma_zfree(qc->qc_cache, (void *)addr);
1432 	} else
1433 		vmem_xfree(vm, addr, size);
1434 }
1435 
1436 void
1437 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1438 {
1439 	bt_t *bt;
1440 	bt_t *t;
1441 
1442 	MPASS(size > 0);
1443 
1444 	VMEM_LOCK(vm);
1445 	bt = bt_lookupbusy(vm, addr);
1446 	MPASS(bt != NULL);
1447 	MPASS(bt->bt_start == addr);
1448 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1449 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1450 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1451 	bt_rembusy(vm, bt);
1452 	bt->bt_type = BT_TYPE_FREE;
1453 
1454 	/* coalesce */
1455 	t = TAILQ_NEXT(bt, bt_seglist);
1456 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1457 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1458 		bt->bt_size += t->bt_size;
1459 		bt_remfree(vm, t);
1460 		bt_remseg(vm, t);
1461 	}
1462 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1463 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1464 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1465 		bt->bt_size += t->bt_size;
1466 		bt->bt_start = t->bt_start;
1467 		bt_remfree(vm, t);
1468 		bt_remseg(vm, t);
1469 	}
1470 
1471 	if (!vmem_try_release(vm, bt, false)) {
1472 		bt_insfree(vm, bt);
1473 		VMEM_CONDVAR_BROADCAST(vm);
1474 		bt_freetrim(vm, BT_MAXFREE);
1475 	}
1476 }
1477 
1478 /*
1479  * vmem_add:
1480  *
1481  */
1482 int
1483 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1484 {
1485 	int error;
1486 
1487 	error = 0;
1488 	flags &= VMEM_FLAGS;
1489 	VMEM_LOCK(vm);
1490 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1491 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1492 	else
1493 		error = ENOMEM;
1494 	VMEM_UNLOCK(vm);
1495 
1496 	return (error);
1497 }
1498 
1499 /*
1500  * vmem_size: information about arenas size
1501  */
1502 vmem_size_t
1503 vmem_size(vmem_t *vm, int typemask)
1504 {
1505 	int i;
1506 
1507 	switch (typemask) {
1508 	case VMEM_ALLOC:
1509 		return vm->vm_inuse;
1510 	case VMEM_FREE:
1511 		return vm->vm_size - vm->vm_inuse;
1512 	case VMEM_FREE|VMEM_ALLOC:
1513 		return vm->vm_size;
1514 	case VMEM_MAXFREE:
1515 		VMEM_LOCK(vm);
1516 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1517 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1518 				continue;
1519 			VMEM_UNLOCK(vm);
1520 			return ((vmem_size_t)ORDER2SIZE(i) <<
1521 			    vm->vm_quantum_shift);
1522 		}
1523 		VMEM_UNLOCK(vm);
1524 		return (0);
1525 	default:
1526 		panic("vmem_size");
1527 	}
1528 }
1529 
1530 /* ---- debug */
1531 
1532 #if defined(DDB) || defined(DIAGNOSTIC)
1533 
1534 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1535     __printflike(1, 2));
1536 
1537 static const char *
1538 bt_type_string(int type)
1539 {
1540 
1541 	switch (type) {
1542 	case BT_TYPE_BUSY:
1543 		return "busy";
1544 	case BT_TYPE_FREE:
1545 		return "free";
1546 	case BT_TYPE_SPAN:
1547 		return "span";
1548 	case BT_TYPE_SPAN_STATIC:
1549 		return "static span";
1550 	case BT_TYPE_CURSOR:
1551 		return "cursor";
1552 	default:
1553 		break;
1554 	}
1555 	return "BOGUS";
1556 }
1557 
1558 static void
1559 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1560 {
1561 
1562 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1563 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1564 	    bt->bt_type, bt_type_string(bt->bt_type));
1565 }
1566 
1567 static void
1568 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1569 {
1570 	const bt_t *bt;
1571 	int i;
1572 
1573 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1574 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1575 		bt_dump(bt, pr);
1576 	}
1577 
1578 	for (i = 0; i < VMEM_MAXORDER; i++) {
1579 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1580 
1581 		if (LIST_EMPTY(fl)) {
1582 			continue;
1583 		}
1584 
1585 		(*pr)("freelist[%d]\n", i);
1586 		LIST_FOREACH(bt, fl, bt_freelist) {
1587 			bt_dump(bt, pr);
1588 		}
1589 	}
1590 }
1591 
1592 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1593 
1594 #if defined(DDB)
1595 #include <ddb/ddb.h>
1596 
1597 static bt_t *
1598 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1599 {
1600 	bt_t *bt;
1601 
1602 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1603 		if (BT_ISSPAN_P(bt)) {
1604 			continue;
1605 		}
1606 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1607 			return bt;
1608 		}
1609 	}
1610 
1611 	return NULL;
1612 }
1613 
1614 void
1615 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1616 {
1617 	vmem_t *vm;
1618 
1619 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1620 		bt_t *bt;
1621 
1622 		bt = vmem_whatis_lookup(vm, addr);
1623 		if (bt == NULL) {
1624 			continue;
1625 		}
1626 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1627 		    (void *)addr, (void *)bt->bt_start,
1628 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1629 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1630 	}
1631 }
1632 
1633 void
1634 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1635 {
1636 	const vmem_t *vm;
1637 
1638 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1639 		vmem_dump(vm, pr);
1640 	}
1641 }
1642 
1643 void
1644 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1645 {
1646 	const vmem_t *vm = (const void *)addr;
1647 
1648 	vmem_dump(vm, pr);
1649 }
1650 
1651 DB_SHOW_COMMAND(vmemdump, vmemdump)
1652 {
1653 
1654 	if (!have_addr) {
1655 		db_printf("usage: show vmemdump <addr>\n");
1656 		return;
1657 	}
1658 
1659 	vmem_dump((const vmem_t *)addr, db_printf);
1660 }
1661 
1662 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1663 {
1664 	const vmem_t *vm;
1665 
1666 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1667 		vmem_dump(vm, db_printf);
1668 }
1669 
1670 DB_SHOW_COMMAND(vmem, vmem_summ)
1671 {
1672 	const vmem_t *vm = (const void *)addr;
1673 	const bt_t *bt;
1674 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1675 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1676 	int ord;
1677 
1678 	if (!have_addr) {
1679 		db_printf("usage: show vmem <addr>\n");
1680 		return;
1681 	}
1682 
1683 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1684 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1685 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1686 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1687 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1688 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1689 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1690 
1691 	memset(&ft, 0, sizeof(ft));
1692 	memset(&ut, 0, sizeof(ut));
1693 	memset(&fs, 0, sizeof(fs));
1694 	memset(&us, 0, sizeof(us));
1695 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1696 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1697 		if (bt->bt_type == BT_TYPE_BUSY) {
1698 			ut[ord]++;
1699 			us[ord] += bt->bt_size;
1700 		} else if (bt->bt_type == BT_TYPE_FREE) {
1701 			ft[ord]++;
1702 			fs[ord] += bt->bt_size;
1703 		}
1704 	}
1705 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1706 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1707 		if (ut[ord] == 0 && ft[ord] == 0)
1708 			continue;
1709 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1710 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1711 		    ut[ord], us[ord], ft[ord], fs[ord]);
1712 	}
1713 }
1714 
1715 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1716 {
1717 	const vmem_t *vm;
1718 
1719 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1720 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1721 }
1722 #endif /* defined(DDB) */
1723 
1724 #define vmem_printf printf
1725 
1726 #if defined(DIAGNOSTIC)
1727 
1728 static bool
1729 vmem_check_sanity(vmem_t *vm)
1730 {
1731 	const bt_t *bt, *bt2;
1732 
1733 	MPASS(vm != NULL);
1734 
1735 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1736 		if (bt->bt_start > BT_END(bt)) {
1737 			printf("corrupted tag\n");
1738 			bt_dump(bt, vmem_printf);
1739 			return false;
1740 		}
1741 	}
1742 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1743 		if (bt->bt_type == BT_TYPE_CURSOR) {
1744 			if (bt->bt_start != 0 || bt->bt_size != 0) {
1745 				printf("corrupted cursor\n");
1746 				return false;
1747 			}
1748 			continue;
1749 		}
1750 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1751 			if (bt == bt2) {
1752 				continue;
1753 			}
1754 			if (bt2->bt_type == BT_TYPE_CURSOR) {
1755 				continue;
1756 			}
1757 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1758 				continue;
1759 			}
1760 			if (bt->bt_start <= BT_END(bt2) &&
1761 			    bt2->bt_start <= BT_END(bt)) {
1762 				printf("overwrapped tags\n");
1763 				bt_dump(bt, vmem_printf);
1764 				bt_dump(bt2, vmem_printf);
1765 				return false;
1766 			}
1767 		}
1768 	}
1769 
1770 	return true;
1771 }
1772 
1773 static void
1774 vmem_check(vmem_t *vm)
1775 {
1776 
1777 	if (!vmem_check_sanity(vm)) {
1778 		panic("insanity vmem %p", vm);
1779 	}
1780 }
1781 
1782 #endif /* defined(DIAGNOSTIC) */
1783