xref: /freebsd/sys/kern/subr_vmem.c (revision e5ecee7440496904939e936501d0db93bed15415)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5  * Copyright (c) 2013 EMC Corp.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * From:
32  *	$NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33  *	$NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34  */
35 
36 /*
37  * reference:
38  * -	Magazines and Vmem: Extending the Slab Allocator
39  *	to Many CPUs and Arbitrary Resources
40  *	http://www.usenix.org/event/usenix01/bonwick.html
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
52 #include <sys/callout.h>
53 #include <sys/hash.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mutex.h>
57 #include <sys/smp.h>
58 #include <sys/condvar.h>
59 #include <sys/sysctl.h>
60 #include <sys/taskqueue.h>
61 #include <sys/vmem.h>
62 #include <sys/vmmeter.h>
63 
64 #include "opt_vm.h"
65 
66 #include <vm/uma.h>
67 #include <vm/vm.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_extern.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_phys.h>
77 #include <vm/vm_pagequeue.h>
78 #include <vm/uma_int.h>
79 
80 #define	VMEM_OPTORDER		5
81 #define	VMEM_OPTVALUE		(1 << VMEM_OPTORDER)
82 #define	VMEM_MAXORDER						\
83     (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
84 
85 #define	VMEM_HASHSIZE_MIN	16
86 #define	VMEM_HASHSIZE_MAX	131072
87 
88 #define	VMEM_QCACHE_IDX_MAX	16
89 
90 #define	VMEM_FITMASK	(M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
91 
92 #define	VMEM_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM |	\
93     M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
94 
95 #define	BT_FLAGS	(M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
96 
97 #define	QC_NAME_MAX	16
98 
99 /*
100  * Data structures private to vmem.
101  */
102 MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
103 
104 typedef struct vmem_btag bt_t;
105 
106 TAILQ_HEAD(vmem_seglist, vmem_btag);
107 LIST_HEAD(vmem_freelist, vmem_btag);
108 LIST_HEAD(vmem_hashlist, vmem_btag);
109 
110 struct qcache {
111 	uma_zone_t	qc_cache;
112 	vmem_t 		*qc_vmem;
113 	vmem_size_t	qc_size;
114 	char		qc_name[QC_NAME_MAX];
115 };
116 typedef struct qcache qcache_t;
117 #define	QC_POOL_TO_QCACHE(pool)	((qcache_t *)(pool->pr_qcache))
118 
119 #define	VMEM_NAME_MAX	16
120 
121 /* boundary tag */
122 struct vmem_btag {
123 	TAILQ_ENTRY(vmem_btag) bt_seglist;
124 	union {
125 		LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
126 		LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
127 	} bt_u;
128 #define	bt_hashlist	bt_u.u_hashlist
129 #define	bt_freelist	bt_u.u_freelist
130 	vmem_addr_t	bt_start;
131 	vmem_size_t	bt_size;
132 	int		bt_type;
133 };
134 
135 /* vmem arena */
136 struct vmem {
137 	struct mtx_padalign	vm_lock;
138 	struct cv		vm_cv;
139 	char			vm_name[VMEM_NAME_MAX+1];
140 	LIST_ENTRY(vmem)	vm_alllist;
141 	struct vmem_hashlist	vm_hash0[VMEM_HASHSIZE_MIN];
142 	struct vmem_freelist	vm_freelist[VMEM_MAXORDER];
143 	struct vmem_seglist	vm_seglist;
144 	struct vmem_hashlist	*vm_hashlist;
145 	vmem_size_t		vm_hashsize;
146 
147 	/* Constant after init */
148 	vmem_size_t		vm_qcache_max;
149 	vmem_size_t		vm_quantum_mask;
150 	vmem_size_t		vm_import_quantum;
151 	int			vm_quantum_shift;
152 
153 	/* Written on alloc/free */
154 	LIST_HEAD(, vmem_btag)	vm_freetags;
155 	int			vm_nfreetags;
156 	int			vm_nbusytag;
157 	vmem_size_t		vm_inuse;
158 	vmem_size_t		vm_size;
159 	vmem_size_t		vm_limit;
160 	struct vmem_btag	vm_cursor;
161 
162 	/* Used on import. */
163 	vmem_import_t		*vm_importfn;
164 	vmem_release_t		*vm_releasefn;
165 	void			*vm_arg;
166 
167 	/* Space exhaustion callback. */
168 	vmem_reclaim_t		*vm_reclaimfn;
169 
170 	/* quantum cache */
171 	qcache_t		vm_qcache[VMEM_QCACHE_IDX_MAX];
172 };
173 
174 #define	BT_TYPE_SPAN		1	/* Allocated from importfn */
175 #define	BT_TYPE_SPAN_STATIC	2	/* vmem_add() or create. */
176 #define	BT_TYPE_FREE		3	/* Available space. */
177 #define	BT_TYPE_BUSY		4	/* Used space. */
178 #define	BT_TYPE_CURSOR		5	/* Cursor for nextfit allocations. */
179 #define	BT_ISSPAN_P(bt)	((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
180 
181 #define	BT_END(bt)	((bt)->bt_start + (bt)->bt_size - 1)
182 
183 #if defined(DIAGNOSTIC)
184 static int enable_vmem_check = 1;
185 SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
186     &enable_vmem_check, 0, "Enable vmem check");
187 static void vmem_check(vmem_t *);
188 #endif
189 
190 static struct callout	vmem_periodic_ch;
191 static int		vmem_periodic_interval;
192 static struct task	vmem_periodic_wk;
193 
194 static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
195 static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
196 static uma_zone_t vmem_zone;
197 
198 /* ---- misc */
199 #define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
200 #define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
201 #define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
202 #define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
203 
204 #define	VMEM_LOCK(vm)		mtx_lock(&vm->vm_lock)
205 #define	VMEM_TRYLOCK(vm)	mtx_trylock(&vm->vm_lock)
206 #define	VMEM_UNLOCK(vm)		mtx_unlock(&vm->vm_lock)
207 #define	VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
208 #define	VMEM_LOCK_DESTROY(vm)	mtx_destroy(&vm->vm_lock)
209 #define	VMEM_ASSERT_LOCKED(vm)	mtx_assert(&vm->vm_lock, MA_OWNED);
210 
211 #define	VMEM_ALIGNUP(addr, align)	(-(-(addr) & -(align)))
212 
213 #define	VMEM_CROSS_P(addr1, addr2, boundary) \
214 	((((addr1) ^ (addr2)) & -(boundary)) != 0)
215 
216 #define	ORDER2SIZE(order)	((order) < VMEM_OPTVALUE ? ((order) + 1) : \
217     (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
218 #define	SIZE2ORDER(size)	((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
219     (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
220 
221 /*
222  * Maximum number of boundary tags that may be required to satisfy an
223  * allocation.  Two may be required to import.  Another two may be
224  * required to clip edges.
225  */
226 #define	BT_MAXALLOC	4
227 
228 /*
229  * Max free limits the number of locally cached boundary tags.  We
230  * just want to avoid hitting the zone allocator for every call.
231  */
232 #define BT_MAXFREE	(BT_MAXALLOC * 8)
233 
234 /* Allocator for boundary tags. */
235 static uma_zone_t vmem_bt_zone;
236 
237 /* boot time arena storage. */
238 static struct vmem kernel_arena_storage;
239 static struct vmem buffer_arena_storage;
240 static struct vmem transient_arena_storage;
241 /* kernel and kmem arenas are aliased for backwards KPI compat. */
242 vmem_t *kernel_arena = &kernel_arena_storage;
243 vmem_t *kmem_arena = &kernel_arena_storage;
244 vmem_t *buffer_arena = &buffer_arena_storage;
245 vmem_t *transient_arena = &transient_arena_storage;
246 
247 #ifdef DEBUG_MEMGUARD
248 static struct vmem memguard_arena_storage;
249 vmem_t *memguard_arena = &memguard_arena_storage;
250 #endif
251 
252 static bool
253 bt_isbusy(bt_t *bt)
254 {
255 	return (bt->bt_type == BT_TYPE_BUSY);
256 }
257 
258 static bool
259 bt_isfree(bt_t *bt)
260 {
261 	return (bt->bt_type == BT_TYPE_FREE);
262 }
263 
264 /*
265  * Fill the vmem's boundary tag cache.  We guarantee that boundary tag
266  * allocation will not fail once bt_fill() passes.  To do so we cache
267  * at least the maximum possible tag allocations in the arena.
268  */
269 static int
270 bt_fill(vmem_t *vm, int flags)
271 {
272 	bt_t *bt;
273 
274 	VMEM_ASSERT_LOCKED(vm);
275 
276 	/*
277 	 * Only allow the kernel arena and arenas derived from kernel arena to
278 	 * dip into reserve tags.  They are where new tags come from.
279 	 */
280 	flags &= BT_FLAGS;
281 	if (vm != kernel_arena && vm->vm_arg != kernel_arena)
282 		flags &= ~M_USE_RESERVE;
283 
284 	/*
285 	 * Loop until we meet the reserve.  To minimize the lock shuffle
286 	 * and prevent simultaneous fills we first try a NOWAIT regardless
287 	 * of the caller's flags.  Specify M_NOVM so we don't recurse while
288 	 * holding a vmem lock.
289 	 */
290 	while (vm->vm_nfreetags < BT_MAXALLOC) {
291 		bt = uma_zalloc(vmem_bt_zone,
292 		    (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
293 		if (bt == NULL) {
294 			VMEM_UNLOCK(vm);
295 			bt = uma_zalloc(vmem_bt_zone, flags);
296 			VMEM_LOCK(vm);
297 			if (bt == NULL)
298 				break;
299 		}
300 		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
301 		vm->vm_nfreetags++;
302 	}
303 
304 	if (vm->vm_nfreetags < BT_MAXALLOC)
305 		return ENOMEM;
306 
307 	return 0;
308 }
309 
310 /*
311  * Pop a tag off of the freetag stack.
312  */
313 static bt_t *
314 bt_alloc(vmem_t *vm)
315 {
316 	bt_t *bt;
317 
318 	VMEM_ASSERT_LOCKED(vm);
319 	bt = LIST_FIRST(&vm->vm_freetags);
320 	MPASS(bt != NULL);
321 	LIST_REMOVE(bt, bt_freelist);
322 	vm->vm_nfreetags--;
323 
324 	return bt;
325 }
326 
327 /*
328  * Trim the per-vmem free list.  Returns with the lock released to
329  * avoid allocator recursions.
330  */
331 static void
332 bt_freetrim(vmem_t *vm, int freelimit)
333 {
334 	LIST_HEAD(, vmem_btag) freetags;
335 	bt_t *bt;
336 
337 	LIST_INIT(&freetags);
338 	VMEM_ASSERT_LOCKED(vm);
339 	while (vm->vm_nfreetags > freelimit) {
340 		bt = LIST_FIRST(&vm->vm_freetags);
341 		LIST_REMOVE(bt, bt_freelist);
342 		vm->vm_nfreetags--;
343 		LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
344 	}
345 	VMEM_UNLOCK(vm);
346 	while ((bt = LIST_FIRST(&freetags)) != NULL) {
347 		LIST_REMOVE(bt, bt_freelist);
348 		uma_zfree(vmem_bt_zone, bt);
349 	}
350 }
351 
352 static inline void
353 bt_free(vmem_t *vm, bt_t *bt)
354 {
355 
356 	VMEM_ASSERT_LOCKED(vm);
357 	MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
358 	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
359 	vm->vm_nfreetags++;
360 }
361 
362 /*
363  * freelist[0] ... [1, 1]
364  * freelist[1] ... [2, 2]
365  *  :
366  * freelist[29] ... [30, 30]
367  * freelist[30] ... [31, 31]
368  * freelist[31] ... [32, 63]
369  * freelist[33] ... [64, 127]
370  *  :
371  * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
372  *  :
373  */
374 
375 static struct vmem_freelist *
376 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
377 {
378 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
379 	const int idx = SIZE2ORDER(qsize);
380 
381 	MPASS(size != 0 && qsize != 0);
382 	MPASS((size & vm->vm_quantum_mask) == 0);
383 	MPASS(idx >= 0);
384 	MPASS(idx < VMEM_MAXORDER);
385 
386 	return &vm->vm_freelist[idx];
387 }
388 
389 /*
390  * bt_freehead_toalloc: return the freelist for the given size and allocation
391  * strategy.
392  *
393  * For M_FIRSTFIT, return the list in which any blocks are large enough
394  * for the requested size.  otherwise, return the list which can have blocks
395  * large enough for the requested size.
396  */
397 static struct vmem_freelist *
398 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
399 {
400 	const vmem_size_t qsize = size >> vm->vm_quantum_shift;
401 	int idx = SIZE2ORDER(qsize);
402 
403 	MPASS(size != 0 && qsize != 0);
404 	MPASS((size & vm->vm_quantum_mask) == 0);
405 
406 	if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
407 		idx++;
408 		/* check too large request? */
409 	}
410 	MPASS(idx >= 0);
411 	MPASS(idx < VMEM_MAXORDER);
412 
413 	return &vm->vm_freelist[idx];
414 }
415 
416 /* ---- boundary tag hash */
417 
418 static struct vmem_hashlist *
419 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
420 {
421 	struct vmem_hashlist *list;
422 	unsigned int hash;
423 
424 	hash = hash32_buf(&addr, sizeof(addr), 0);
425 	list = &vm->vm_hashlist[hash % vm->vm_hashsize];
426 
427 	return list;
428 }
429 
430 static bt_t *
431 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
432 {
433 	struct vmem_hashlist *list;
434 	bt_t *bt;
435 
436 	VMEM_ASSERT_LOCKED(vm);
437 	list = bt_hashhead(vm, addr);
438 	LIST_FOREACH(bt, list, bt_hashlist) {
439 		if (bt->bt_start == addr) {
440 			break;
441 		}
442 	}
443 
444 	return bt;
445 }
446 
447 static void
448 bt_rembusy(vmem_t *vm, bt_t *bt)
449 {
450 
451 	VMEM_ASSERT_LOCKED(vm);
452 	MPASS(vm->vm_nbusytag > 0);
453 	vm->vm_inuse -= bt->bt_size;
454 	vm->vm_nbusytag--;
455 	LIST_REMOVE(bt, bt_hashlist);
456 }
457 
458 static void
459 bt_insbusy(vmem_t *vm, bt_t *bt)
460 {
461 	struct vmem_hashlist *list;
462 
463 	VMEM_ASSERT_LOCKED(vm);
464 	MPASS(bt->bt_type == BT_TYPE_BUSY);
465 
466 	list = bt_hashhead(vm, bt->bt_start);
467 	LIST_INSERT_HEAD(list, bt, bt_hashlist);
468 	vm->vm_nbusytag++;
469 	vm->vm_inuse += bt->bt_size;
470 }
471 
472 /* ---- boundary tag list */
473 
474 static void
475 bt_remseg(vmem_t *vm, bt_t *bt)
476 {
477 
478 	MPASS(bt->bt_type != BT_TYPE_CURSOR);
479 	TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
480 	bt_free(vm, bt);
481 }
482 
483 static void
484 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
485 {
486 
487 	TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
488 }
489 
490 static void
491 bt_insseg_tail(vmem_t *vm, bt_t *bt)
492 {
493 
494 	TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
495 }
496 
497 static void
498 bt_remfree(vmem_t *vm, bt_t *bt)
499 {
500 
501 	MPASS(bt->bt_type == BT_TYPE_FREE);
502 
503 	LIST_REMOVE(bt, bt_freelist);
504 }
505 
506 static void
507 bt_insfree(vmem_t *vm, bt_t *bt)
508 {
509 	struct vmem_freelist *list;
510 
511 	list = bt_freehead_tofree(vm, bt->bt_size);
512 	LIST_INSERT_HEAD(list, bt, bt_freelist);
513 }
514 
515 /* ---- vmem internal functions */
516 
517 /*
518  * Import from the arena into the quantum cache in UMA.
519  *
520  * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
521  * failure, so UMA can't be used to cache a resource with value 0.
522  */
523 static int
524 qc_import(void *arg, void **store, int cnt, int domain, int flags)
525 {
526 	qcache_t *qc;
527 	vmem_addr_t addr;
528 	int i;
529 
530 	KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
531 
532 	qc = arg;
533 	for (i = 0; i < cnt; i++) {
534 		if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
535 		    VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
536 			break;
537 		store[i] = (void *)addr;
538 	}
539 	return (i);
540 }
541 
542 /*
543  * Release memory from the UMA cache to the arena.
544  */
545 static void
546 qc_release(void *arg, void **store, int cnt)
547 {
548 	qcache_t *qc;
549 	int i;
550 
551 	qc = arg;
552 	for (i = 0; i < cnt; i++)
553 		vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
554 }
555 
556 static void
557 qc_init(vmem_t *vm, vmem_size_t qcache_max)
558 {
559 	qcache_t *qc;
560 	vmem_size_t size;
561 	int qcache_idx_max;
562 	int i;
563 
564 	MPASS((qcache_max & vm->vm_quantum_mask) == 0);
565 	qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
566 	    VMEM_QCACHE_IDX_MAX);
567 	vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
568 	for (i = 0; i < qcache_idx_max; i++) {
569 		qc = &vm->vm_qcache[i];
570 		size = (i + 1) << vm->vm_quantum_shift;
571 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
572 		    vm->vm_name, size);
573 		qc->qc_vmem = vm;
574 		qc->qc_size = size;
575 		qc->qc_cache = uma_zcache_create(qc->qc_name, size,
576 		    NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 0);
577 		MPASS(qc->qc_cache);
578 	}
579 }
580 
581 static void
582 qc_destroy(vmem_t *vm)
583 {
584 	int qcache_idx_max;
585 	int i;
586 
587 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
588 	for (i = 0; i < qcache_idx_max; i++)
589 		uma_zdestroy(vm->vm_qcache[i].qc_cache);
590 }
591 
592 static void
593 qc_drain(vmem_t *vm)
594 {
595 	int qcache_idx_max;
596 	int i;
597 
598 	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
599 	for (i = 0; i < qcache_idx_max; i++)
600 		uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
601 }
602 
603 #ifndef UMA_MD_SMALL_ALLOC
604 
605 static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
606 
607 /*
608  * vmem_bt_alloc:  Allocate a new page of boundary tags.
609  *
610  * On architectures with uma_small_alloc there is no recursion; no address
611  * space need be allocated to allocate boundary tags.  For the others, we
612  * must handle recursion.  Boundary tags are necessary to allocate new
613  * boundary tags.
614  *
615  * UMA guarantees that enough tags are held in reserve to allocate a new
616  * page of kva.  We dip into this reserve by specifying M_USE_RESERVE only
617  * when allocating the page to hold new boundary tags.  In this way the
618  * reserve is automatically filled by the allocation that uses the reserve.
619  *
620  * We still have to guarantee that the new tags are allocated atomically since
621  * many threads may try concurrently.  The bt_lock provides this guarantee.
622  * We convert WAITOK allocations to NOWAIT and then handle the blocking here
623  * on failure.  It's ok to return NULL for a WAITOK allocation as UMA will
624  * loop again after checking to see if we lost the race to allocate.
625  *
626  * There is a small race between vmem_bt_alloc() returning the page and the
627  * zone lock being acquired to add the page to the zone.  For WAITOK
628  * allocations we just pause briefly.  NOWAIT may experience a transient
629  * failure.  To alleviate this we permit a small number of simultaneous
630  * fills to proceed concurrently so NOWAIT is less likely to fail unless
631  * we are really out of KVA.
632  */
633 static void *
634 vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
635     int wait)
636 {
637 	vmem_addr_t addr;
638 
639 	*pflag = UMA_SLAB_KERNEL;
640 
641 	/*
642 	 * Single thread boundary tag allocation so that the address space
643 	 * and memory are added in one atomic operation.
644 	 */
645 	mtx_lock(&vmem_bt_lock);
646 	if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
647 	    VMEM_ADDR_MIN, VMEM_ADDR_MAX,
648 	    M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
649 		if (kmem_back_domain(domain, kernel_object, addr, bytes,
650 		    M_NOWAIT | M_USE_RESERVE) == 0) {
651 			mtx_unlock(&vmem_bt_lock);
652 			return ((void *)addr);
653 		}
654 		vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
655 		mtx_unlock(&vmem_bt_lock);
656 		/*
657 		 * Out of memory, not address space.  This may not even be
658 		 * possible due to M_USE_RESERVE page allocation.
659 		 */
660 		if (wait & M_WAITOK)
661 			vm_wait_domain(domain);
662 		return (NULL);
663 	}
664 	mtx_unlock(&vmem_bt_lock);
665 	/*
666 	 * We're either out of address space or lost a fill race.
667 	 */
668 	if (wait & M_WAITOK)
669 		pause("btalloc", 1);
670 
671 	return (NULL);
672 }
673 #endif
674 
675 void
676 vmem_startup(void)
677 {
678 
679 	mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
680 	vmem_zone = uma_zcreate("vmem",
681 	    sizeof(struct vmem), NULL, NULL, NULL, NULL,
682 	    UMA_ALIGN_PTR, 0);
683 #ifdef UMA_MD_SMALL_ALLOC
684 	vmem_bt_zone = uma_zcreate("vmem btag",
685 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
686 	    UMA_ALIGN_PTR, UMA_ZONE_VM);
687 #else
688 	vmem_bt_zone = uma_zcreate("vmem btag",
689 	    sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
690 	    UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
691 	mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
692 	uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
693 	/*
694 	 * Reserve enough tags to allocate new tags.  We allow multiple
695 	 * CPUs to attempt to allocate new tags concurrently to limit
696 	 * false restarts in UMA.  vmem_bt_alloc() allocates from a per-domain
697 	 * arena, which may involve importing a range from the kernel arena,
698 	 * so we need to keep at least 2 * BT_MAXALLOC tags reserved.
699 	 */
700 	uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
701 	uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
702 #endif
703 }
704 
705 /* ---- rehash */
706 
707 static int
708 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
709 {
710 	bt_t *bt;
711 	int i;
712 	struct vmem_hashlist *newhashlist;
713 	struct vmem_hashlist *oldhashlist;
714 	vmem_size_t oldhashsize;
715 
716 	MPASS(newhashsize > 0);
717 
718 	newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
719 	    M_VMEM, M_NOWAIT);
720 	if (newhashlist == NULL)
721 		return ENOMEM;
722 	for (i = 0; i < newhashsize; i++) {
723 		LIST_INIT(&newhashlist[i]);
724 	}
725 
726 	VMEM_LOCK(vm);
727 	oldhashlist = vm->vm_hashlist;
728 	oldhashsize = vm->vm_hashsize;
729 	vm->vm_hashlist = newhashlist;
730 	vm->vm_hashsize = newhashsize;
731 	if (oldhashlist == NULL) {
732 		VMEM_UNLOCK(vm);
733 		return 0;
734 	}
735 	for (i = 0; i < oldhashsize; i++) {
736 		while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
737 			bt_rembusy(vm, bt);
738 			bt_insbusy(vm, bt);
739 		}
740 	}
741 	VMEM_UNLOCK(vm);
742 
743 	if (oldhashlist != vm->vm_hash0) {
744 		free(oldhashlist, M_VMEM);
745 	}
746 
747 	return 0;
748 }
749 
750 static void
751 vmem_periodic_kick(void *dummy)
752 {
753 
754 	taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
755 }
756 
757 static void
758 vmem_periodic(void *unused, int pending)
759 {
760 	vmem_t *vm;
761 	vmem_size_t desired;
762 	vmem_size_t current;
763 
764 	mtx_lock(&vmem_list_lock);
765 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
766 #ifdef DIAGNOSTIC
767 		/* Convenient time to verify vmem state. */
768 		if (enable_vmem_check == 1) {
769 			VMEM_LOCK(vm);
770 			vmem_check(vm);
771 			VMEM_UNLOCK(vm);
772 		}
773 #endif
774 		desired = 1 << flsl(vm->vm_nbusytag);
775 		desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
776 		    VMEM_HASHSIZE_MAX);
777 		current = vm->vm_hashsize;
778 
779 		/* Grow in powers of two.  Shrink less aggressively. */
780 		if (desired >= current * 2 || desired * 4 <= current)
781 			vmem_rehash(vm, desired);
782 
783 		/*
784 		 * Periodically wake up threads waiting for resources,
785 		 * so they could ask for reclamation again.
786 		 */
787 		VMEM_CONDVAR_BROADCAST(vm);
788 	}
789 	mtx_unlock(&vmem_list_lock);
790 
791 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
792 	    vmem_periodic_kick, NULL);
793 }
794 
795 static void
796 vmem_start_callout(void *unused)
797 {
798 
799 	TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
800 	vmem_periodic_interval = hz * 10;
801 	callout_init(&vmem_periodic_ch, 1);
802 	callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
803 	    vmem_periodic_kick, NULL);
804 }
805 SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
806 
807 static void
808 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
809 {
810 	bt_t *btfree, *btprev, *btspan;
811 
812 	VMEM_ASSERT_LOCKED(vm);
813 	MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
814 	MPASS((size & vm->vm_quantum_mask) == 0);
815 
816 	if (vm->vm_releasefn == NULL) {
817 		/*
818 		 * The new segment will never be released, so see if it is
819 		 * contiguous with respect to an existing segment.  In this case
820 		 * a span tag is not needed, and it may be possible now or in
821 		 * the future to coalesce the new segment with an existing free
822 		 * segment.
823 		 */
824 		btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
825 		if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
826 		    btprev->bt_start + btprev->bt_size != addr)
827 			btprev = NULL;
828 	} else {
829 		btprev = NULL;
830 	}
831 
832 	if (btprev == NULL || bt_isbusy(btprev)) {
833 		if (btprev == NULL) {
834 			btspan = bt_alloc(vm);
835 			btspan->bt_type = type;
836 			btspan->bt_start = addr;
837 			btspan->bt_size = size;
838 			bt_insseg_tail(vm, btspan);
839 		}
840 
841 		btfree = bt_alloc(vm);
842 		btfree->bt_type = BT_TYPE_FREE;
843 		btfree->bt_start = addr;
844 		btfree->bt_size = size;
845 		bt_insseg_tail(vm, btfree);
846 		bt_insfree(vm, btfree);
847 	} else {
848 		bt_remfree(vm, btprev);
849 		btprev->bt_size += size;
850 		bt_insfree(vm, btprev);
851 	}
852 
853 	vm->vm_size += size;
854 }
855 
856 static void
857 vmem_destroy1(vmem_t *vm)
858 {
859 	bt_t *bt;
860 
861 	/*
862 	 * Drain per-cpu quantum caches.
863 	 */
864 	qc_destroy(vm);
865 
866 	/*
867 	 * The vmem should now only contain empty segments.
868 	 */
869 	VMEM_LOCK(vm);
870 	MPASS(vm->vm_nbusytag == 0);
871 
872 	TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
873 	while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
874 		bt_remseg(vm, bt);
875 
876 	if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
877 		free(vm->vm_hashlist, M_VMEM);
878 
879 	bt_freetrim(vm, 0);
880 
881 	VMEM_CONDVAR_DESTROY(vm);
882 	VMEM_LOCK_DESTROY(vm);
883 	uma_zfree(vmem_zone, vm);
884 }
885 
886 static int
887 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
888 {
889 	vmem_addr_t addr;
890 	int error;
891 
892 	if (vm->vm_importfn == NULL)
893 		return (EINVAL);
894 
895 	/*
896 	 * To make sure we get a span that meets the alignment we double it
897 	 * and add the size to the tail.  This slightly overestimates.
898 	 */
899 	if (align != vm->vm_quantum_mask + 1)
900 		size = (align * 2) + size;
901 	size = roundup(size, vm->vm_import_quantum);
902 
903 	if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
904 		return (ENOMEM);
905 
906 	/*
907 	 * Hide MAXALLOC tags so we're guaranteed to be able to add this
908 	 * span and the tag we want to allocate from it.
909 	 */
910 	MPASS(vm->vm_nfreetags >= BT_MAXALLOC);
911 	vm->vm_nfreetags -= BT_MAXALLOC;
912 	VMEM_UNLOCK(vm);
913 	error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
914 	VMEM_LOCK(vm);
915 	vm->vm_nfreetags += BT_MAXALLOC;
916 	if (error)
917 		return (ENOMEM);
918 
919 	vmem_add1(vm, addr, size, BT_TYPE_SPAN);
920 
921 	return 0;
922 }
923 
924 /*
925  * vmem_fit: check if a bt can satisfy the given restrictions.
926  *
927  * it's a caller's responsibility to ensure the region is big enough
928  * before calling us.
929  */
930 static int
931 vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
932     vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
933     vmem_addr_t maxaddr, vmem_addr_t *addrp)
934 {
935 	vmem_addr_t start;
936 	vmem_addr_t end;
937 
938 	MPASS(size > 0);
939 	MPASS(bt->bt_size >= size); /* caller's responsibility */
940 
941 	/*
942 	 * XXX assumption: vmem_addr_t and vmem_size_t are
943 	 * unsigned integer of the same size.
944 	 */
945 
946 	start = bt->bt_start;
947 	if (start < minaddr) {
948 		start = minaddr;
949 	}
950 	end = BT_END(bt);
951 	if (end > maxaddr)
952 		end = maxaddr;
953 	if (start > end)
954 		return (ENOMEM);
955 
956 	start = VMEM_ALIGNUP(start - phase, align) + phase;
957 	if (start < bt->bt_start)
958 		start += align;
959 	if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
960 		MPASS(align < nocross);
961 		start = VMEM_ALIGNUP(start - phase, nocross) + phase;
962 	}
963 	if (start <= end && end - start >= size - 1) {
964 		MPASS((start & (align - 1)) == phase);
965 		MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
966 		MPASS(minaddr <= start);
967 		MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
968 		MPASS(bt->bt_start <= start);
969 		MPASS(BT_END(bt) - start >= size - 1);
970 		*addrp = start;
971 
972 		return (0);
973 	}
974 	return (ENOMEM);
975 }
976 
977 /*
978  * vmem_clip:  Trim the boundary tag edges to the requested start and size.
979  */
980 static void
981 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
982 {
983 	bt_t *btnew;
984 	bt_t *btprev;
985 
986 	VMEM_ASSERT_LOCKED(vm);
987 	MPASS(bt->bt_type == BT_TYPE_FREE);
988 	MPASS(bt->bt_size >= size);
989 	bt_remfree(vm, bt);
990 	if (bt->bt_start != start) {
991 		btprev = bt_alloc(vm);
992 		btprev->bt_type = BT_TYPE_FREE;
993 		btprev->bt_start = bt->bt_start;
994 		btprev->bt_size = start - bt->bt_start;
995 		bt->bt_start = start;
996 		bt->bt_size -= btprev->bt_size;
997 		bt_insfree(vm, btprev);
998 		bt_insseg(vm, btprev,
999 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1000 	}
1001 	MPASS(bt->bt_start == start);
1002 	if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1003 		/* split */
1004 		btnew = bt_alloc(vm);
1005 		btnew->bt_type = BT_TYPE_BUSY;
1006 		btnew->bt_start = bt->bt_start;
1007 		btnew->bt_size = size;
1008 		bt->bt_start = bt->bt_start + size;
1009 		bt->bt_size -= size;
1010 		bt_insfree(vm, bt);
1011 		bt_insseg(vm, btnew,
1012 		    TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1013 		bt_insbusy(vm, btnew);
1014 		bt = btnew;
1015 	} else {
1016 		bt->bt_type = BT_TYPE_BUSY;
1017 		bt_insbusy(vm, bt);
1018 	}
1019 	MPASS(bt->bt_size >= size);
1020 }
1021 
1022 static int
1023 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
1024 {
1025 	vmem_size_t avail;
1026 
1027 	VMEM_ASSERT_LOCKED(vm);
1028 
1029 	/*
1030 	 * XXX it is possible to fail to meet xalloc constraints with the
1031 	 * imported region.  It is up to the user to specify the
1032 	 * import quantum such that it can satisfy any allocation.
1033 	 */
1034 	if (vmem_import(vm, size, align, flags) == 0)
1035 		return (1);
1036 
1037 	/*
1038 	 * Try to free some space from the quantum cache or reclaim
1039 	 * functions if available.
1040 	 */
1041 	if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1042 		avail = vm->vm_size - vm->vm_inuse;
1043 		VMEM_UNLOCK(vm);
1044 		if (vm->vm_qcache_max != 0)
1045 			qc_drain(vm);
1046 		if (vm->vm_reclaimfn != NULL)
1047 			vm->vm_reclaimfn(vm, flags);
1048 		VMEM_LOCK(vm);
1049 		/* If we were successful retry even NOWAIT. */
1050 		if (vm->vm_size - vm->vm_inuse > avail)
1051 			return (1);
1052 	}
1053 	if ((flags & M_NOWAIT) != 0)
1054 		return (0);
1055 	VMEM_CONDVAR_WAIT(vm);
1056 	return (1);
1057 }
1058 
1059 static int
1060 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1061 {
1062 	struct vmem_btag *prev;
1063 
1064 	MPASS(bt->bt_type == BT_TYPE_FREE);
1065 
1066 	if (vm->vm_releasefn == NULL)
1067 		return (0);
1068 
1069 	prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1070 	MPASS(prev != NULL);
1071 	MPASS(prev->bt_type != BT_TYPE_FREE);
1072 
1073 	if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
1074 		vmem_addr_t spanaddr;
1075 		vmem_size_t spansize;
1076 
1077 		MPASS(prev->bt_start == bt->bt_start);
1078 		spanaddr = prev->bt_start;
1079 		spansize = prev->bt_size;
1080 		if (remfree)
1081 			bt_remfree(vm, bt);
1082 		bt_remseg(vm, bt);
1083 		bt_remseg(vm, prev);
1084 		vm->vm_size -= spansize;
1085 		VMEM_CONDVAR_BROADCAST(vm);
1086 		bt_freetrim(vm, BT_MAXFREE);
1087 		vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1088 		return (1);
1089 	}
1090 	return (0);
1091 }
1092 
1093 static int
1094 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1095     const vmem_size_t phase, const vmem_size_t nocross, int flags,
1096     vmem_addr_t *addrp)
1097 {
1098 	struct vmem_btag *bt, *cursor, *next, *prev;
1099 	int error;
1100 
1101 	error = ENOMEM;
1102 	VMEM_LOCK(vm);
1103 retry:
1104 	/*
1105 	 * Make sure we have enough tags to complete the operation.
1106 	 */
1107 	if (vm->vm_nfreetags < BT_MAXALLOC && bt_fill(vm, flags) != 0)
1108 		goto out;
1109 
1110 	/*
1111 	 * Find the next free tag meeting our constraints.  If one is found,
1112 	 * perform the allocation.
1113 	 */
1114 	for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1115 	    bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
1116 		if (bt == NULL)
1117 			bt = TAILQ_FIRST(&vm->vm_seglist);
1118 		if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
1119 		    (error = vmem_fit(bt, size, align, phase, nocross,
1120 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1121 			vmem_clip(vm, bt, *addrp, size);
1122 			break;
1123 		}
1124 	}
1125 
1126 	/*
1127 	 * Try to coalesce free segments around the cursor.  If we succeed, and
1128 	 * have not yet satisfied the allocation request, try again with the
1129 	 * newly coalesced segment.
1130 	 */
1131 	if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1132 	    (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1133 	    next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
1134 	    prev->bt_start + prev->bt_size == next->bt_start) {
1135 		prev->bt_size += next->bt_size;
1136 		bt_remfree(vm, next);
1137 		bt_remseg(vm, next);
1138 
1139 		/*
1140 		 * The coalesced segment might be able to satisfy our request.
1141 		 * If not, we might need to release it from the arena.
1142 		 */
1143 		if (error == ENOMEM && prev->bt_size >= size &&
1144 		    (error = vmem_fit(prev, size, align, phase, nocross,
1145 		    VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1146 			vmem_clip(vm, prev, *addrp, size);
1147 			bt = prev;
1148 		} else
1149 			(void)vmem_try_release(vm, prev, true);
1150 	}
1151 
1152 	/*
1153 	 * If the allocation was successful, advance the cursor.
1154 	 */
1155 	if (error == 0) {
1156 		TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1157 		for (; bt != NULL && bt->bt_start < *addrp + size;
1158 		    bt = TAILQ_NEXT(bt, bt_seglist))
1159 			;
1160 		if (bt != NULL)
1161 			TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
1162 		else
1163 			TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1164 	}
1165 
1166 	/*
1167 	 * Attempt to bring additional resources into the arena.  If that fails
1168 	 * and M_WAITOK is specified, sleep waiting for resources to be freed.
1169 	 */
1170 	if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1171 		goto retry;
1172 
1173 out:
1174 	VMEM_UNLOCK(vm);
1175 	return (error);
1176 }
1177 
1178 /* ---- vmem API */
1179 
1180 void
1181 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1182      vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
1183 {
1184 
1185 	VMEM_LOCK(vm);
1186 	KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
1187 	vm->vm_importfn = importfn;
1188 	vm->vm_releasefn = releasefn;
1189 	vm->vm_arg = arg;
1190 	vm->vm_import_quantum = import_quantum;
1191 	VMEM_UNLOCK(vm);
1192 }
1193 
1194 void
1195 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1196 {
1197 
1198 	VMEM_LOCK(vm);
1199 	vm->vm_limit = limit;
1200 	VMEM_UNLOCK(vm);
1201 }
1202 
1203 void
1204 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1205 {
1206 
1207 	VMEM_LOCK(vm);
1208 	vm->vm_reclaimfn = reclaimfn;
1209 	VMEM_UNLOCK(vm);
1210 }
1211 
1212 /*
1213  * vmem_init: Initializes vmem arena.
1214  */
1215 vmem_t *
1216 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1217     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1218 {
1219 	int i;
1220 
1221 	MPASS(quantum > 0);
1222 	MPASS((quantum & (quantum - 1)) == 0);
1223 
1224 	bzero(vm, sizeof(*vm));
1225 
1226 	VMEM_CONDVAR_INIT(vm, name);
1227 	VMEM_LOCK_INIT(vm, name);
1228 	vm->vm_nfreetags = 0;
1229 	LIST_INIT(&vm->vm_freetags);
1230 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1231 	vm->vm_quantum_mask = quantum - 1;
1232 	vm->vm_quantum_shift = flsl(quantum) - 1;
1233 	vm->vm_nbusytag = 0;
1234 	vm->vm_size = 0;
1235 	vm->vm_limit = 0;
1236 	vm->vm_inuse = 0;
1237 	qc_init(vm, qcache_max);
1238 
1239 	TAILQ_INIT(&vm->vm_seglist);
1240 	vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1241 	vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1242 	TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1243 
1244 	for (i = 0; i < VMEM_MAXORDER; i++)
1245 		LIST_INIT(&vm->vm_freelist[i]);
1246 
1247 	memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1248 	vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1249 	vm->vm_hashlist = vm->vm_hash0;
1250 
1251 	if (size != 0) {
1252 		if (vmem_add(vm, base, size, flags) != 0) {
1253 			vmem_destroy1(vm);
1254 			return NULL;
1255 		}
1256 	}
1257 
1258 	mtx_lock(&vmem_list_lock);
1259 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1260 	mtx_unlock(&vmem_list_lock);
1261 
1262 	return vm;
1263 }
1264 
1265 /*
1266  * vmem_create: create an arena.
1267  */
1268 vmem_t *
1269 vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1270     vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1271 {
1272 
1273 	vmem_t *vm;
1274 
1275 	vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1276 	if (vm == NULL)
1277 		return (NULL);
1278 	if (vmem_init(vm, name, base, size, quantum, qcache_max,
1279 	    flags) == NULL)
1280 		return (NULL);
1281 	return (vm);
1282 }
1283 
1284 void
1285 vmem_destroy(vmem_t *vm)
1286 {
1287 
1288 	mtx_lock(&vmem_list_lock);
1289 	LIST_REMOVE(vm, vm_alllist);
1290 	mtx_unlock(&vmem_list_lock);
1291 
1292 	vmem_destroy1(vm);
1293 }
1294 
1295 vmem_size_t
1296 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1297 {
1298 
1299 	return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1300 }
1301 
1302 /*
1303  * vmem_alloc: allocate resource from the arena.
1304  */
1305 int
1306 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1307 {
1308 	const int strat __unused = flags & VMEM_FITMASK;
1309 	qcache_t *qc;
1310 
1311 	flags &= VMEM_FLAGS;
1312 	MPASS(size > 0);
1313 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1314 	if ((flags & M_NOWAIT) == 0)
1315 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1316 
1317 	if (size <= vm->vm_qcache_max) {
1318 		/*
1319 		 * Resource 0 cannot be cached, so avoid a blocking allocation
1320 		 * in qc_import() and give the vmem_xalloc() call below a chance
1321 		 * to return 0.
1322 		 */
1323 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1324 		*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1325 		    (flags & ~M_WAITOK) | M_NOWAIT);
1326 		if (__predict_true(*addrp != 0))
1327 			return (0);
1328 	}
1329 
1330 	return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1331 	    flags, addrp));
1332 }
1333 
1334 int
1335 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1336     const vmem_size_t phase, const vmem_size_t nocross,
1337     const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1338     vmem_addr_t *addrp)
1339 {
1340 	const vmem_size_t size = vmem_roundup_size(vm, size0);
1341 	struct vmem_freelist *list;
1342 	struct vmem_freelist *first;
1343 	struct vmem_freelist *end;
1344 	bt_t *bt;
1345 	int error;
1346 	int strat;
1347 
1348 	flags &= VMEM_FLAGS;
1349 	strat = flags & VMEM_FITMASK;
1350 	MPASS(size0 > 0);
1351 	MPASS(size > 0);
1352 	MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1353 	MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1354 	if ((flags & M_NOWAIT) == 0)
1355 		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1356 	MPASS((align & vm->vm_quantum_mask) == 0);
1357 	MPASS((align & (align - 1)) == 0);
1358 	MPASS((phase & vm->vm_quantum_mask) == 0);
1359 	MPASS((nocross & vm->vm_quantum_mask) == 0);
1360 	MPASS((nocross & (nocross - 1)) == 0);
1361 	MPASS((align == 0 && phase == 0) || phase < align);
1362 	MPASS(nocross == 0 || nocross >= size);
1363 	MPASS(minaddr <= maxaddr);
1364 	MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1365 	if (strat == M_NEXTFIT)
1366 		MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1367 
1368 	if (align == 0)
1369 		align = vm->vm_quantum_mask + 1;
1370 	*addrp = 0;
1371 
1372 	/*
1373 	 * Next-fit allocations don't use the freelists.
1374 	 */
1375 	if (strat == M_NEXTFIT)
1376 		return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1377 		    flags, addrp));
1378 
1379 	end = &vm->vm_freelist[VMEM_MAXORDER];
1380 	/*
1381 	 * choose a free block from which we allocate.
1382 	 */
1383 	first = bt_freehead_toalloc(vm, size, strat);
1384 	VMEM_LOCK(vm);
1385 	for (;;) {
1386 		/*
1387 		 * Make sure we have enough tags to complete the
1388 		 * operation.
1389 		 */
1390 		if (vm->vm_nfreetags < BT_MAXALLOC &&
1391 		    bt_fill(vm, flags) != 0) {
1392 			error = ENOMEM;
1393 			break;
1394 		}
1395 
1396 		/*
1397 	 	 * Scan freelists looking for a tag that satisfies the
1398 		 * allocation.  If we're doing BESTFIT we may encounter
1399 		 * sizes below the request.  If we're doing FIRSTFIT we
1400 		 * inspect only the first element from each list.
1401 		 */
1402 		for (list = first; list < end; list++) {
1403 			LIST_FOREACH(bt, list, bt_freelist) {
1404 				if (bt->bt_size >= size) {
1405 					error = vmem_fit(bt, size, align, phase,
1406 					    nocross, minaddr, maxaddr, addrp);
1407 					if (error == 0) {
1408 						vmem_clip(vm, bt, *addrp, size);
1409 						goto out;
1410 					}
1411 				}
1412 				/* FIRST skips to the next list. */
1413 				if (strat == M_FIRSTFIT)
1414 					break;
1415 			}
1416 		}
1417 
1418 		/*
1419 		 * Retry if the fast algorithm failed.
1420 		 */
1421 		if (strat == M_FIRSTFIT) {
1422 			strat = M_BESTFIT;
1423 			first = bt_freehead_toalloc(vm, size, strat);
1424 			continue;
1425 		}
1426 
1427 		/*
1428 		 * Try a few measures to bring additional resources into the
1429 		 * arena.  If all else fails, we will sleep waiting for
1430 		 * resources to be freed.
1431 		 */
1432 		if (!vmem_try_fetch(vm, size, align, flags)) {
1433 			error = ENOMEM;
1434 			break;
1435 		}
1436 	}
1437 out:
1438 	VMEM_UNLOCK(vm);
1439 	if (error != 0 && (flags & M_NOWAIT) == 0)
1440 		panic("failed to allocate waiting allocation\n");
1441 
1442 	return (error);
1443 }
1444 
1445 /*
1446  * vmem_free: free the resource to the arena.
1447  */
1448 void
1449 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1450 {
1451 	qcache_t *qc;
1452 	MPASS(size > 0);
1453 
1454 	if (size <= vm->vm_qcache_max &&
1455 	    __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1456 		qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1457 		uma_zfree(qc->qc_cache, (void *)addr);
1458 	} else
1459 		vmem_xfree(vm, addr, size);
1460 }
1461 
1462 void
1463 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1464 {
1465 	bt_t *bt;
1466 	bt_t *t;
1467 
1468 	MPASS(size > 0);
1469 
1470 	VMEM_LOCK(vm);
1471 	bt = bt_lookupbusy(vm, addr);
1472 	MPASS(bt != NULL);
1473 	MPASS(bt->bt_start == addr);
1474 	MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1475 	    bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1476 	MPASS(bt->bt_type == BT_TYPE_BUSY);
1477 	bt_rembusy(vm, bt);
1478 	bt->bt_type = BT_TYPE_FREE;
1479 
1480 	/* coalesce */
1481 	t = TAILQ_NEXT(bt, bt_seglist);
1482 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1483 		MPASS(BT_END(bt) < t->bt_start);	/* YYY */
1484 		bt->bt_size += t->bt_size;
1485 		bt_remfree(vm, t);
1486 		bt_remseg(vm, t);
1487 	}
1488 	t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1489 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1490 		MPASS(BT_END(t) < bt->bt_start);	/* YYY */
1491 		bt->bt_size += t->bt_size;
1492 		bt->bt_start = t->bt_start;
1493 		bt_remfree(vm, t);
1494 		bt_remseg(vm, t);
1495 	}
1496 
1497 	if (!vmem_try_release(vm, bt, false)) {
1498 		bt_insfree(vm, bt);
1499 		VMEM_CONDVAR_BROADCAST(vm);
1500 		bt_freetrim(vm, BT_MAXFREE);
1501 	}
1502 }
1503 
1504 /*
1505  * vmem_add:
1506  *
1507  */
1508 int
1509 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1510 {
1511 	int error;
1512 
1513 	error = 0;
1514 	flags &= VMEM_FLAGS;
1515 	VMEM_LOCK(vm);
1516 	if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0)
1517 		vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1518 	else
1519 		error = ENOMEM;
1520 	VMEM_UNLOCK(vm);
1521 
1522 	return (error);
1523 }
1524 
1525 /*
1526  * vmem_size: information about arenas size
1527  */
1528 vmem_size_t
1529 vmem_size(vmem_t *vm, int typemask)
1530 {
1531 	int i;
1532 
1533 	switch (typemask) {
1534 	case VMEM_ALLOC:
1535 		return vm->vm_inuse;
1536 	case VMEM_FREE:
1537 		return vm->vm_size - vm->vm_inuse;
1538 	case VMEM_FREE|VMEM_ALLOC:
1539 		return vm->vm_size;
1540 	case VMEM_MAXFREE:
1541 		VMEM_LOCK(vm);
1542 		for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1543 			if (LIST_EMPTY(&vm->vm_freelist[i]))
1544 				continue;
1545 			VMEM_UNLOCK(vm);
1546 			return ((vmem_size_t)ORDER2SIZE(i) <<
1547 			    vm->vm_quantum_shift);
1548 		}
1549 		VMEM_UNLOCK(vm);
1550 		return (0);
1551 	default:
1552 		panic("vmem_size");
1553 	}
1554 }
1555 
1556 /* ---- debug */
1557 
1558 #if defined(DDB) || defined(DIAGNOSTIC)
1559 
1560 static void bt_dump(const bt_t *, int (*)(const char *, ...)
1561     __printflike(1, 2));
1562 
1563 static const char *
1564 bt_type_string(int type)
1565 {
1566 
1567 	switch (type) {
1568 	case BT_TYPE_BUSY:
1569 		return "busy";
1570 	case BT_TYPE_FREE:
1571 		return "free";
1572 	case BT_TYPE_SPAN:
1573 		return "span";
1574 	case BT_TYPE_SPAN_STATIC:
1575 		return "static span";
1576 	case BT_TYPE_CURSOR:
1577 		return "cursor";
1578 	default:
1579 		break;
1580 	}
1581 	return "BOGUS";
1582 }
1583 
1584 static void
1585 bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1586 {
1587 
1588 	(*pr)("\t%p: %jx %jx, %d(%s)\n",
1589 	    bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1590 	    bt->bt_type, bt_type_string(bt->bt_type));
1591 }
1592 
1593 static void
1594 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1595 {
1596 	const bt_t *bt;
1597 	int i;
1598 
1599 	(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1600 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1601 		bt_dump(bt, pr);
1602 	}
1603 
1604 	for (i = 0; i < VMEM_MAXORDER; i++) {
1605 		const struct vmem_freelist *fl = &vm->vm_freelist[i];
1606 
1607 		if (LIST_EMPTY(fl)) {
1608 			continue;
1609 		}
1610 
1611 		(*pr)("freelist[%d]\n", i);
1612 		LIST_FOREACH(bt, fl, bt_freelist) {
1613 			bt_dump(bt, pr);
1614 		}
1615 	}
1616 }
1617 
1618 #endif /* defined(DDB) || defined(DIAGNOSTIC) */
1619 
1620 #if defined(DDB)
1621 #include <ddb/ddb.h>
1622 
1623 static bt_t *
1624 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1625 {
1626 	bt_t *bt;
1627 
1628 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1629 		if (BT_ISSPAN_P(bt)) {
1630 			continue;
1631 		}
1632 		if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1633 			return bt;
1634 		}
1635 	}
1636 
1637 	return NULL;
1638 }
1639 
1640 void
1641 vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1642 {
1643 	vmem_t *vm;
1644 
1645 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1646 		bt_t *bt;
1647 
1648 		bt = vmem_whatis_lookup(vm, addr);
1649 		if (bt == NULL) {
1650 			continue;
1651 		}
1652 		(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1653 		    (void *)addr, (void *)bt->bt_start,
1654 		    (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1655 		    (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1656 	}
1657 }
1658 
1659 void
1660 vmem_printall(const char *modif, int (*pr)(const char *, ...))
1661 {
1662 	const vmem_t *vm;
1663 
1664 	LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1665 		vmem_dump(vm, pr);
1666 	}
1667 }
1668 
1669 void
1670 vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1671 {
1672 	const vmem_t *vm = (const void *)addr;
1673 
1674 	vmem_dump(vm, pr);
1675 }
1676 
1677 DB_SHOW_COMMAND(vmemdump, vmemdump)
1678 {
1679 
1680 	if (!have_addr) {
1681 		db_printf("usage: show vmemdump <addr>\n");
1682 		return;
1683 	}
1684 
1685 	vmem_dump((const vmem_t *)addr, db_printf);
1686 }
1687 
1688 DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1689 {
1690 	const vmem_t *vm;
1691 
1692 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1693 		vmem_dump(vm, db_printf);
1694 }
1695 
1696 DB_SHOW_COMMAND(vmem, vmem_summ)
1697 {
1698 	const vmem_t *vm = (const void *)addr;
1699 	const bt_t *bt;
1700 	size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1701 	size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1702 	int ord;
1703 
1704 	if (!have_addr) {
1705 		db_printf("usage: show vmem <addr>\n");
1706 		return;
1707 	}
1708 
1709 	db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1710 	db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1711 	db_printf("\tsize:\t%zu\n", vm->vm_size);
1712 	db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1713 	db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1714 	db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1715 	db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1716 
1717 	memset(&ft, 0, sizeof(ft));
1718 	memset(&ut, 0, sizeof(ut));
1719 	memset(&fs, 0, sizeof(fs));
1720 	memset(&us, 0, sizeof(us));
1721 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1722 		ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1723 		if (bt->bt_type == BT_TYPE_BUSY) {
1724 			ut[ord]++;
1725 			us[ord] += bt->bt_size;
1726 		} else if (bt->bt_type == BT_TYPE_FREE) {
1727 			ft[ord]++;
1728 			fs[ord] += bt->bt_size;
1729 		}
1730 	}
1731 	db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1732 	for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1733 		if (ut[ord] == 0 && ft[ord] == 0)
1734 			continue;
1735 		db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1736 		    ORDER2SIZE(ord) << vm->vm_quantum_shift,
1737 		    ut[ord], us[ord], ft[ord], fs[ord]);
1738 	}
1739 }
1740 
1741 DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1742 {
1743 	const vmem_t *vm;
1744 
1745 	LIST_FOREACH(vm, &vmem_list, vm_alllist)
1746 		vmem_summ((db_expr_t)vm, TRUE, count, modif);
1747 }
1748 #endif /* defined(DDB) */
1749 
1750 #define vmem_printf printf
1751 
1752 #if defined(DIAGNOSTIC)
1753 
1754 static bool
1755 vmem_check_sanity(vmem_t *vm)
1756 {
1757 	const bt_t *bt, *bt2;
1758 
1759 	MPASS(vm != NULL);
1760 
1761 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1762 		if (bt->bt_start > BT_END(bt)) {
1763 			printf("corrupted tag\n");
1764 			bt_dump(bt, vmem_printf);
1765 			return false;
1766 		}
1767 	}
1768 	TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1769 		if (bt->bt_type == BT_TYPE_CURSOR) {
1770 			if (bt->bt_start != 0 || bt->bt_size != 0) {
1771 				printf("corrupted cursor\n");
1772 				return false;
1773 			}
1774 			continue;
1775 		}
1776 		TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1777 			if (bt == bt2) {
1778 				continue;
1779 			}
1780 			if (bt2->bt_type == BT_TYPE_CURSOR) {
1781 				continue;
1782 			}
1783 			if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1784 				continue;
1785 			}
1786 			if (bt->bt_start <= BT_END(bt2) &&
1787 			    bt2->bt_start <= BT_END(bt)) {
1788 				printf("overwrapped tags\n");
1789 				bt_dump(bt, vmem_printf);
1790 				bt_dump(bt2, vmem_printf);
1791 				return false;
1792 			}
1793 		}
1794 	}
1795 
1796 	return true;
1797 }
1798 
1799 static void
1800 vmem_check(vmem_t *vm)
1801 {
1802 
1803 	if (!vmem_check_sanity(vm)) {
1804 		panic("insanity vmem %p", vm);
1805 	}
1806 }
1807 
1808 #endif /* defined(DIAGNOSTIC) */
1809