Lines Matching full:vm

64 #include <vm/uma.h>
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pageout.h>
74 #include <vm/vm_phys.h>
75 #include <vm/vm_pagequeue.h>
76 #include <vm/uma_int.h>
197 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
198 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
199 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
200 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
202 #define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
203 #define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
204 #define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
205 #define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
206 #define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
207 #define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
266 _bt_fill(vmem_t *vm, int flags)
270 VMEM_ASSERT_LOCKED(vm);
277 if (vm != kernel_arena && vm->vm_arg != kernel_arena)
286 while (vm->vm_nfreetags < BT_MAXALLOC) {
290 VMEM_UNLOCK(vm);
292 VMEM_LOCK(vm);
296 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
297 vm->vm_nfreetags++;
300 if (vm->vm_nfreetags < BT_MAXALLOC)
307 bt_fill(vmem_t *vm, int flags)
309 if (vm->vm_nfreetags >= BT_MAXALLOC)
311 return (_bt_fill(vm, flags));
318 bt_alloc(vmem_t *vm)
322 VMEM_ASSERT_LOCKED(vm);
323 bt = LIST_FIRST(&vm->vm_freetags);
326 vm->vm_nfreetags--;
336 bt_freetrim(vmem_t *vm, int freelimit)
342 VMEM_ASSERT_LOCKED(vm);
343 while (vm->vm_nfreetags > freelimit) {
344 bt = LIST_FIRST(&vm->vm_freetags);
346 vm->vm_nfreetags--;
349 VMEM_UNLOCK(vm);
357 bt_free(vmem_t *vm, bt_t *bt)
360 VMEM_ASSERT_LOCKED(vm);
361 MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
362 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
363 vm->vm_nfreetags++;
371 bt_save(vmem_t *vm)
373 KASSERT(vm->vm_nfreetags >= BT_MAXALLOC,
374 ("%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
375 vm->vm_nfreetags -= BT_MAXALLOC;
379 bt_restore(vmem_t *vm)
381 vm->vm_nfreetags += BT_MAXALLOC;
398 bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
400 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
404 MPASS((size & vm->vm_quantum_mask) == 0);
408 return &vm->vm_freelist[idx];
420 bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
422 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
426 MPASS((size & vm->vm_quantum_mask) == 0);
435 return &vm->vm_freelist[idx];
441 bt_hashhead(vmem_t *vm, vmem_addr_t addr)
447 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
453 bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
458 VMEM_ASSERT_LOCKED(vm);
459 list = bt_hashhead(vm, addr);
470 bt_rembusy(vmem_t *vm, bt_t *bt)
473 VMEM_ASSERT_LOCKED(vm);
474 MPASS(vm->vm_nbusytag > 0);
475 vm->vm_inuse -= bt->bt_size;
476 vm->vm_nbusytag--;
481 bt_insbusy(vmem_t *vm, bt_t *bt)
485 VMEM_ASSERT_LOCKED(vm);
488 list = bt_hashhead(vm, bt->bt_start);
490 vm->vm_nbusytag++;
491 vm->vm_inuse += bt->bt_size;
497 bt_remseg(vmem_t *vm, bt_t *bt)
501 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
502 bt_free(vm, bt);
506 bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
509 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
513 bt_insseg_tail(vmem_t *vm, bt_t *bt)
516 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
520 bt_remfree(vmem_t *vm __unused, bt_t *bt)
529 bt_insfree(vmem_t *vm, bt_t *bt)
533 list = bt_freehead_tofree(vm, bt->bt_size);
579 qc_init(vmem_t *vm, vmem_size_t qcache_max)
586 MPASS((qcache_max & vm->vm_quantum_mask) == 0);
587 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
589 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
591 qc = &vm->vm_qcache[i];
592 size = (i + 1) << vm->vm_quantum_shift;
594 vm->vm_name, size);
595 qc->qc_vmem = vm;
604 qc_destroy(vmem_t *vm)
609 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
611 uma_zdestroy(vm->vm_qcache[i].qc_cache);
615 qc_drain(vmem_t *vm)
620 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
622 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
726 vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
743 VMEM_LOCK(vm);
744 oldhashlist = vm->vm_hashlist;
745 oldhashsize = vm->vm_hashsize;
746 vm->vm_hashlist = newhashlist;
747 vm->vm_hashsize = newhashsize;
749 VMEM_UNLOCK(vm);
754 bt_rembusy(vm, bt);
755 bt_insbusy(vm, bt);
758 VMEM_UNLOCK(vm);
760 if (oldhashlist != vm->vm_hash0)
776 vmem_t *vm;
781 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
785 VMEM_LOCK(vm);
786 vmem_check(vm);
787 VMEM_UNLOCK(vm);
790 desired = 1 << flsl(vm->vm_nbusytag);
793 current = vm->vm_hashsize;
797 vmem_rehash(vm, desired);
803 VMEM_CONDVAR_BROADCAST(vm);
824 vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
828 VMEM_ASSERT_LOCKED(vm);
830 MPASS((size & vm->vm_quantum_mask) == 0);
832 if (vm->vm_releasefn == NULL) {
840 btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
850 btspan = bt_alloc(vm);
854 bt_insseg_tail(vm, btspan);
857 btfree = bt_alloc(vm);
861 bt_insseg_tail(vm, btfree);
862 bt_insfree(vm, btfree);
864 bt_remfree(vm, btprev);
866 bt_insfree(vm, btprev);
869 vm->vm_size += size;
873 vmem_destroy1(vmem_t *vm)
880 qc_destroy(vm);
885 VMEM_LOCK(vm);
886 MPASS(vm->vm_nbusytag == 0);
888 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
889 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
890 bt_remseg(vm, bt);
892 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
893 free(vm->vm_hashlist, M_VMEM);
895 bt_freetrim(vm, 0);
897 VMEM_CONDVAR_DESTROY(vm);
898 VMEM_LOCK_DESTROY(vm);
899 uma_zfree(vmem_zone, vm);
903 vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
908 if (vm->vm_importfn == NULL)
915 if (align != vm->vm_quantum_mask + 1)
917 size = roundup(size, vm->vm_import_quantum);
919 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
922 bt_save(vm);
923 VMEM_UNLOCK(vm);
924 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
925 VMEM_LOCK(vm);
926 bt_restore(vm);
930 vmem_add1(vm, addr, size, BT_TYPE_SPAN);
992 vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
997 VMEM_ASSERT_LOCKED(vm);
1000 bt_remfree(vm, bt);
1002 btprev = bt_alloc(vm);
1008 bt_insfree(vm, btprev);
1009 bt_insseg(vm, btprev,
1013 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1015 btnew = bt_alloc(vm);
1021 bt_insfree(vm, bt);
1022 bt_insseg(vm, btnew,
1024 bt_insbusy(vm, btnew);
1028 bt_insbusy(vm, bt);
1034 vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
1038 VMEM_ASSERT_LOCKED(vm);
1045 if (vmem_import(vm, size, align, flags) == 0)
1052 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1053 avail = vm->vm_size - vm->vm_inuse;
1054 bt_save(vm);
1055 VMEM_UNLOCK(vm);
1056 if (vm->vm_qcache_max != 0)
1057 qc_drain(vm);
1058 if (vm->vm_reclaimfn != NULL)
1059 vm->vm_reclaimfn(vm, flags);
1060 VMEM_LOCK(vm);
1061 bt_restore(vm);
1063 if (vm->vm_size - vm->vm_inuse > avail)
1068 bt_save(vm);
1069 VMEM_CONDVAR_WAIT(vm);
1070 bt_restore(vm);
1075 vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1081 if (vm->vm_releasefn == NULL)
1096 bt_remfree(vm, bt);
1097 bt_remseg(vm, bt);
1098 bt_remseg(vm, prev);
1099 vm->vm_size -= spansize;
1100 VMEM_CONDVAR_BROADCAST(vm);
1101 bt_freetrim(vm, BT_MAXFREE);
1102 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1109 vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1117 VMEM_LOCK(vm);
1122 if (bt_fill(vm, flags) != 0)
1130 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1133 bt = TAILQ_FIRST(&vm->vm_seglist);
1137 vmem_clip(vm, bt, *addrp, size);
1152 bt_remfree(vm, next);
1153 bt_remseg(vm, next);
1162 vmem_clip(vm, prev, *addrp, size);
1165 (void)vmem_try_release(vm, prev, true);
1172 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1179 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1186 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1190 VMEM_UNLOCK(vm);
1197 vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1201 VMEM_LOCK(vm);
1202 KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
1203 vm->vm_importfn = importfn;
1204 vm->vm_releasefn = releasefn;
1205 vm->vm_arg = arg;
1206 vm->vm_import_quantum = import_quantum;
1207 VMEM_UNLOCK(vm);
1211 vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1214 VMEM_LOCK(vm);
1215 vm->vm_limit = limit;
1216 VMEM_UNLOCK(vm);
1220 vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1223 VMEM_LOCK(vm);
1224 vm->vm_reclaimfn = reclaimfn;
1225 VMEM_UNLOCK(vm);
1232 vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1240 bzero(vm, sizeof(*vm));
1242 VMEM_CONDVAR_INIT(vm, name);
1243 VMEM_LOCK_INIT(vm, name);
1244 vm->vm_nfreetags = 0;
1245 LIST_INIT(&vm->vm_freetags);
1246 strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1247 vm->vm_quantum_mask = quantum - 1;
1248 vm->vm_quantum_shift = flsl(quantum) - 1;
1249 vm->vm_nbusytag = 0;
1250 vm->vm_size = 0;
1251 vm->vm_limit = 0;
1252 vm->vm_inuse = 0;
1253 qc_init(vm, qcache_max);
1255 TAILQ_INIT(&vm->vm_seglist);
1256 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1257 vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1258 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1261 LIST_INIT(&vm->vm_freelist[i]);
1263 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1264 vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1265 vm->vm_hashlist = vm->vm_hash0;
1268 if (vmem_add(vm, base, size, flags) != 0) {
1269 vmem_destroy1(vm);
1275 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1278 return vm;
1289 vmem_t *vm;
1291 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1292 if (vm == NULL)
1294 if (vmem_init(vm, name, base, size, quantum, qcache_max,
1297 return (vm);
1301 vmem_destroy(vmem_t *vm)
1305 LIST_REMOVE(vm, vm_alllist);
1308 vmem_destroy1(vm);
1312 vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1315 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1322 vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1333 if (size <= vm->vm_qcache_max) {
1339 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1346 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1351 vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1356 const vmem_size_t size = vmem_roundup_size(vm, size0);
1372 MPASS((align & vm->vm_quantum_mask) == 0);
1374 MPASS((phase & vm->vm_quantum_mask) == 0);
1375 MPASS((nocross & vm->vm_quantum_mask) == 0);
1385 align = vm->vm_quantum_mask + 1;
1392 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1395 end = &vm->vm_freelist[VMEM_MAXORDER];
1399 first = bt_freehead_toalloc(vm, size, strat);
1400 VMEM_LOCK(vm);
1405 error = bt_fill(vm, flags);
1421 vmem_clip(vm, bt, *addrp, size);
1436 first = bt_freehead_toalloc(vm, size, strat);
1445 if (!vmem_try_fetch(vm, size, align, flags)) {
1451 VMEM_UNLOCK(vm);
1462 vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1467 if (size <= vm->vm_qcache_max &&
1469 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1472 vmem_xfree(vm, addr, size);
1476 vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
1483 VMEM_LOCK(vm);
1484 bt = bt_lookupbusy(vm, addr);
1487 MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1488 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1490 bt_rembusy(vm, bt);
1498 bt_remfree(vm, t);
1499 bt_remseg(vm, t);
1506 bt_remfree(vm, t);
1507 bt_remseg(vm, t);
1510 if (!vmem_try_release(vm, bt, false)) {
1511 bt_insfree(vm, bt);
1512 VMEM_CONDVAR_BROADCAST(vm);
1513 bt_freetrim(vm, BT_MAXFREE);
1522 vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1528 VMEM_LOCK(vm);
1529 error = bt_fill(vm, flags);
1531 vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
1532 VMEM_UNLOCK(vm);
1541 vmem_size(vmem_t *vm, int typemask)
1547 return vm->vm_inuse;
1549 return vm->vm_size - vm->vm_inuse;
1551 return vm->vm_size;
1553 VMEM_LOCK(vm);
1555 if (LIST_EMPTY(&vm->vm_freelist[i]))
1557 VMEM_UNLOCK(vm);
1559 vm->vm_quantum_shift);
1561 VMEM_UNLOCK(vm);
1606 vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1611 (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1612 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1617 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1636 vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1640 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1655 vmem_t *vm;
1657 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1660 bt = vmem_whatis_lookup(vm, addr);
1666 (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1674 const vmem_t *vm;
1676 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1677 vmem_dump(vm, pr);
1684 const vmem_t *vm = (const void *)addr;
1686 vmem_dump(vm, pr);
1702 const vmem_t *vm;
1704 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1705 vmem_dump(vm, db_printf);
1710 const vmem_t *vm = (const void *)addr;
1721 db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1722 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1723 db_printf("\tsize:\t%zu\n", vm->vm_size);
1724 db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1725 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1726 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1727 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1733 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1734 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1748 ORDER2SIZE(ord) << vm->vm_quantum_shift,
1755 const vmem_t *vm;
1757 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1758 vmem_summ((db_expr_t)vm, TRUE, count, modif);
1767 vmem_check_sanity(vmem_t *vm)
1771 MPASS(vm != NULL);
1773 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1780 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1788 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1812 vmem_check(vmem_t *vm)
1815 if (!vmem_check_sanity(vm)) {
1816 panic("insanity vmem %p", vm);