Lines Matching +full:alc +full:- +full:enable

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2006 Rice University
5 * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
81 * Temporarily simulate two-level reservations. Effectively, VM_LEVEL_0_* is
129 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
151 * c - constant after boot
152 * d - vm_reserv_domain_lock
153 * o - vm_reserv_object_lock
154 * r - vm_reserv_lock
155 * s - vm_reserv_domain_scan_lock
159 TAILQ_ENTRY(vm_reserv) partpopq; /* (d, r) per-domain queue. */
174 #define vm_reserv_lockptr(rv) (&(rv)->lock)
187 * reservation structures exist to trade-off space for time in the
197 * An "active" reservation is a valid reservation structure that has a non-NULL
198 * "object" field and a non-zero "popcnt" field. In other words, every active
205 * The per-domain partially populated reservation queues
207 * These queues enable the fast recovery of an unused free small page from a
211 * Access to this queue is synchronized by the per-domain reservation lock.
212 * Threads reclaiming free pages from the queue must hold the per-domain scan
303 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); in sysctl_vm_reserv_fullpop()
305 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - in sysctl_vm_reserv_fullpop()
306 (seg->start >> VM_LEVEL_0_SHIFT); in sysctl_vm_reserv_fullpop()
311 VM_LEVEL_0_SIZE <= seg->end) { in sysctl_vm_reserv_fullpop()
312 fullpop += rv->popcnt == VM_LEVEL_0_NPAGES; in sysctl_vm_reserv_fullpop()
336 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) { in sysctl_vm_reserv_partpopq()
344 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt; in sysctl_vm_reserv_partpopq()
367 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_remove()
368 KASSERT(rv->object != NULL, in vm_reserv_remove()
370 KASSERT(!rv->inpartpopq, in vm_reserv_remove()
372 object = rv->object; in vm_reserv_remove()
375 rv->object = NULL; in vm_reserv_remove()
389 __FUNCTION__, rv, rv->pages, rv->object, object, in vm_reserv_insert()
390 rv->popcnt); in vm_reserv_insert()
391 KASSERT(rv->object == NULL, in vm_reserv_insert()
393 KASSERT(rv->popcnt == 0, in vm_reserv_insert()
395 KASSERT(!rv->inpartpopq, in vm_reserv_insert()
397 KASSERT(bit_ntest(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1, 0), in vm_reserv_insert()
400 rv->pindex = pindex; in vm_reserv_insert()
401 rv->object = object; in vm_reserv_insert()
402 rv->lasttick = ticks; in vm_reserv_insert()
403 LIST_INSERT_HEAD(&object->rvq, rv, objq); in vm_reserv_insert()
417 return (((uint16_t *)rv->popmap)[index / 16] == UINT16_MAX); in vm_reserv_is_sublevel_full()
420 return (((uint64_t *)rv->popmap)[index] == UINT64_MAX && in vm_reserv_is_sublevel_full()
421 ((uint64_t *)rv->popmap)[index + 1] == UINT64_MAX); in vm_reserv_is_sublevel_full()
432 * population count is non-zero.
441 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_depopulate()
442 KASSERT(rv->object != NULL, in vm_reserv_depopulate()
444 KASSERT(bit_test(rv->popmap, index), in vm_reserv_depopulate()
447 KASSERT(rv->popcnt > 0, in vm_reserv_depopulate()
449 KASSERT(rv->domain < vm_ndomains, in vm_reserv_depopulate()
451 rv, rv->domain)); in vm_reserv_depopulate()
452 if (rv->popcnt == VM_LEVEL_0_NPAGES) { in vm_reserv_depopulate()
453 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND, in vm_reserv_depopulate()
456 rv->pages->psind = VM_LEVEL_0_PSIND - 1; in vm_reserv_depopulate()
460 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 0; in vm_reserv_depopulate()
462 bit_clear(rv->popmap, index); in vm_reserv_depopulate()
463 rv->popcnt--; in vm_reserv_depopulate()
464 if ((unsigned)(ticks - rv->lasttick) >= PARTPOPSLOP || in vm_reserv_depopulate()
465 rv->popcnt == 0) { in vm_reserv_depopulate()
466 vm_reserv_domain_lock(rv->domain); in vm_reserv_depopulate()
467 if (rv->inpartpopq) { in vm_reserv_depopulate()
468 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); in vm_reserv_depopulate()
469 rv->inpartpopq = FALSE; in vm_reserv_depopulate()
471 if (rv->popcnt != 0) { in vm_reserv_depopulate()
472 rv->inpartpopq = TRUE; in vm_reserv_depopulate()
473 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, in vm_reserv_depopulate()
476 vm_reserv_domain_unlock(rv->domain); in vm_reserv_depopulate()
477 rv->lasttick = ticks; in vm_reserv_depopulate()
479 vmd = VM_DOMAIN(rv->domain); in vm_reserv_depopulate()
480 if (rv->popcnt == 0) { in vm_reserv_depopulate()
483 vm_phys_free_pages(rv->pages, VM_FREEPOOL_DEFAULT, in vm_reserv_depopulate()
500 seg = &vm_phys_segs[m->segind]; in vm_reserv_from_page()
501 return (seg->first_reserv + (VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT) - in vm_reserv_from_page()
502 (seg->start >> VM_LEVEL_0_SHIFT)); in vm_reserv_from_page()
520 KASSERT(mpred->object == object, in vm_reserv_from_object()
522 KASSERT(mpred->pindex < pindex, in vm_reserv_from_object()
525 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) in vm_reserv_from_object()
529 msucc = TAILQ_FIRST(&object->memq); in vm_reserv_from_object()
531 KASSERT(msucc->pindex > pindex, in vm_reserv_from_object()
534 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) in vm_reserv_from_object()
553 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0); in vm_reserv_has_pindex()
566 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_populate()
567 KASSERT(rv->object != NULL, in vm_reserv_populate()
569 KASSERT(!bit_test(rv->popmap, index), in vm_reserv_populate()
572 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES, in vm_reserv_populate()
574 KASSERT(rv->pages->psind >= 0 && in vm_reserv_populate()
575 rv->pages->psind < VM_LEVEL_0_PSIND, in vm_reserv_populate()
577 KASSERT(rv->domain < vm_ndomains, in vm_reserv_populate()
579 rv, rv->domain)); in vm_reserv_populate()
580 bit_set(rv->popmap, index); in vm_reserv_populate()
583 rv->pages[rounddown2(index, VM_SUBLEVEL_0_NPAGES)].psind = 1; in vm_reserv_populate()
585 rv->popcnt++; in vm_reserv_populate()
586 if ((unsigned)(ticks - rv->lasttick) < PARTPOPSLOP && in vm_reserv_populate()
587 rv->inpartpopq && rv->popcnt != VM_LEVEL_0_NPAGES) in vm_reserv_populate()
589 rv->lasttick = ticks; in vm_reserv_populate()
590 vm_reserv_domain_lock(rv->domain); in vm_reserv_populate()
591 if (rv->inpartpopq) { in vm_reserv_populate()
592 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); in vm_reserv_populate()
593 rv->inpartpopq = FALSE; in vm_reserv_populate()
595 if (rv->popcnt < VM_LEVEL_0_NPAGES) { in vm_reserv_populate()
596 rv->inpartpopq = TRUE; in vm_reserv_populate()
597 TAILQ_INSERT_TAIL(&vm_rvd[rv->domain].partpop, rv, partpopq); in vm_reserv_populate()
599 KASSERT(rv->pages->psind == VM_LEVEL_0_PSIND - 1, in vm_reserv_populate()
602 rv->pages->psind = VM_LEVEL_0_PSIND; in vm_reserv_populate()
604 vm_reserv_domain_unlock(rv->domain); in vm_reserv_populate()
613 * "boundary" is non-zero, then the set of physical pages cannot cross any
642 pindex + npages > object->size) in vm_reserv_alloc_contig()
665 KASSERT(object != kernel_object || rv->domain == domain, in vm_reserv_alloc_contig()
671 domain = rv->domain; in vm_reserv_alloc_contig()
675 if (rv->object != object) in vm_reserv_alloc_contig()
677 m = &rv->pages[index]; in vm_reserv_alloc_contig()
683 if (!bit_ntest(rv->popmap, index, index + npages - 1, 0)) in vm_reserv_alloc_contig()
705 first = pindex - VM_RESERV_INDEX(object, pindex); in vm_reserv_alloc_contig()
711 if ((rv = vm_reserv_from_page(mpred))->object != object) in vm_reserv_alloc_contig()
712 leftcap = mpred->pindex + 1; in vm_reserv_alloc_contig()
714 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; in vm_reserv_alloc_contig()
721 if ((rv = vm_reserv_from_page(msucc))->object != object) in vm_reserv_alloc_contig()
722 rightcap = msucc->pindex; in vm_reserv_alloc_contig()
724 rightcap = rv->pindex; in vm_reserv_alloc_contig()
748 if ((object->flags & OBJ_ANON) == 0 && in vm_reserv_alloc_contig()
749 first + maxpages > object->size) { in vm_reserv_alloc_contig()
789 KASSERT(rv->pages == m, in vm_reserv_alloc_contig()
794 n = ulmin(VM_LEVEL_0_NPAGES - index, npages); in vm_reserv_alloc_contig()
797 npages -= n; in vm_reserv_alloc_contig()
799 m_ret = &rv->pages[index]; in vm_reserv_alloc_contig()
805 allocpages -= VM_LEVEL_0_NPAGES; in vm_reserv_alloc_contig()
834 pindex >= object->size) in vm_reserv_alloc_page()
842 KASSERT(object != kernel_object || rv->domain == domain, in vm_reserv_alloc_page()
844 domain = rv->domain; in vm_reserv_alloc_page()
847 m = &rv->pages[index]; in vm_reserv_alloc_page()
850 if (rv->object != object || in vm_reserv_alloc_page()
852 bit_test(rv->popmap, index)) { in vm_reserv_alloc_page()
873 first = pindex - VM_RESERV_INDEX(object, pindex); in vm_reserv_alloc_page()
876 if ((rv = vm_reserv_from_page(mpred))->object != object) in vm_reserv_alloc_page()
877 leftcap = mpred->pindex + 1; in vm_reserv_alloc_page()
879 leftcap = rv->pindex + VM_LEVEL_0_NPAGES; in vm_reserv_alloc_page()
886 if ((rv = vm_reserv_from_page(msucc))->object != object) in vm_reserv_alloc_page()
887 rightcap = msucc->pindex; in vm_reserv_alloc_page()
889 rightcap = rv->pindex; in vm_reserv_alloc_page()
903 if ((object->flags & OBJ_ANON) == 0 && in vm_reserv_alloc_page()
904 first + VM_LEVEL_0_NPAGES > object->size) in vm_reserv_alloc_page()
925 KASSERT(rv->pages == m, in vm_reserv_alloc_page()
932 return (&rv->pages[index]); in vm_reserv_alloc_page()
951 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_break()
953 m = rv->pages; in vm_reserv_break()
955 for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES) in vm_reserv_break()
957 m->psind = 0; in vm_reserv_break()
958 pos0 = bit_test(rv->popmap, 0) ? -1 : 0; in vm_reserv_break()
959 pos1 = -1 - pos0; in vm_reserv_break()
962 bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES, in vm_reserv_break()
964 if (pos == -1) in vm_reserv_break()
972 vm_domain_free_lock(VM_DOMAIN(rv->domain)); in vm_reserv_break()
973 vm_phys_enqueue_contig(&rv->pages[pos0], VM_FREEPOOL_DEFAULT, in vm_reserv_break()
974 pos1 - pos0); in vm_reserv_break()
975 vm_domain_free_unlock(VM_DOMAIN(rv->domain)); in vm_reserv_break()
977 bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1); in vm_reserv_break()
978 rv->popcnt = 0; in vm_reserv_break()
991 * This access of object->rvq is unsynchronized so that the in vm_reserv_break_all()
997 while ((rv = LIST_FIRST(&object->rvq)) != NULL) { in vm_reserv_break_all()
1000 if (rv->object != object) { in vm_reserv_break_all()
1004 vm_reserv_domain_lock(rv->domain); in vm_reserv_break_all()
1005 if (rv->inpartpopq) { in vm_reserv_break_all()
1006 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); in vm_reserv_break_all()
1007 rv->inpartpopq = FALSE; in vm_reserv_break_all()
1009 vm_reserv_domain_unlock(rv->domain); in vm_reserv_break_all()
1026 if (rv->object == NULL) in vm_reserv_free_page()
1029 /* Re-validate after lock. */ in vm_reserv_free_page()
1030 if (rv->object != NULL) { in vm_reserv_free_page()
1031 vm_reserv_depopulate(rv, m - rv->pages); in vm_reserv_free_page()
1068 seg->first_reserv = &vm_reserv_array[used]; in vm_reserv_init()
1069 used += howmany(seg->end, VM_LEVEL_0_SIZE) - in vm_reserv_init()
1070 seg->start / VM_LEVEL_0_SIZE; in vm_reserv_init()
1072 seg->first_reserv = in vm_reserv_init()
1073 &vm_reserv_array[seg->start >> VM_LEVEL_0_SHIFT]; in vm_reserv_init()
1075 paddr = roundup2(seg->start, VM_LEVEL_0_SIZE); in vm_reserv_init()
1076 rv = seg->first_reserv + (paddr >> VM_LEVEL_0_SHIFT) - in vm_reserv_init()
1077 (seg->start >> VM_LEVEL_0_SHIFT); in vm_reserv_init()
1079 VM_LEVEL_0_SIZE <= seg->end) { in vm_reserv_init()
1080 rv->pages = PHYS_TO_VM_PAGE(paddr); in vm_reserv_init()
1081 rv->domain = seg->domain; in vm_reserv_init()
1082 mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); in vm_reserv_init()
1089 mtx_init(&rvd->lock, "vm reserv domain", NULL, MTX_DEF); in vm_reserv_init()
1090 TAILQ_INIT(&rvd->partpop); in vm_reserv_init()
1091 mtx_init(&rvd->marker.lock, "vm reserv marker", NULL, MTX_DEF); in vm_reserv_init()
1097 rvd->marker.popcnt = VM_LEVEL_0_NPAGES; in vm_reserv_init()
1098 bit_nset(rvd->marker.popmap, 0, VM_LEVEL_0_NPAGES - 1); in vm_reserv_init()
1116 if (rv->object == NULL) in vm_reserv_is_page_free()
1118 return (!bit_test(rv->popmap, m - rv->pages)); in vm_reserv_is_page_free()
1136 if (rv->object == NULL) in vm_reserv_is_populated()
1138 index = rounddown2(m - rv->pages, npages); in vm_reserv_is_populated()
1139 return (bit_ntest(rv->popmap, index, index + npages - 1, 1)); in vm_reserv_is_populated()
1144 * reservation. Otherwise, returns -1.
1153 return (rv->object != NULL ? 1 : -1); in vm_reserv_level()
1155 return (rv->object != NULL ? 0 : -1); in vm_reserv_level()
1161 * reservation and -1 otherwise.
1169 if (rv->popcnt == VM_LEVEL_0_NPAGES) { in vm_reserv_level_iffullpop()
1172 } else if (rv->pages != NULL && in vm_reserv_level_iffullpop()
1173 vm_reserv_is_sublevel_full(rv, m - rv->pages)) { in vm_reserv_level_iffullpop()
1177 return (-1); in vm_reserv_level_iffullpop()
1187 vm_reserv_domain_assert_locked(rv->domain); in vm_reserv_dequeue()
1190 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_dequeue()
1191 KASSERT(rv->inpartpopq, in vm_reserv_dequeue()
1194 TAILQ_REMOVE(&vm_rvd[rv->domain].partpop, rv, partpopq); in vm_reserv_dequeue()
1195 rv->inpartpopq = FALSE; in vm_reserv_dequeue()
1208 __FUNCTION__, rv, rv->object, rv->popcnt, rv->inpartpopq); in vm_reserv_reclaim()
1209 if (rv->inpartpopq) { in vm_reserv_reclaim()
1210 vm_reserv_domain_lock(rv->domain); in vm_reserv_reclaim()
1212 vm_reserv_domain_unlock(rv->domain); in vm_reserv_reclaim()
1252 * of the first satisfactory free page, or -1 if none is found.
1260 KASSERT(npages <= VM_LEVEL_0_NPAGES - 1, in vm_reserv_find_contig()
1270 while (bit_ffc_area_at(rv->popmap, lo, hi, npages, &lo), lo != -1) { in vm_reserv_find_contig()
1277 /* Skip to next boundary-matching page. */ in vm_reserv_find_contig()
1281 return (-1); in vm_reserv_find_contig()
1303 if (npages > VM_LEVEL_0_NPAGES - 1) in vm_reserv_reclaim_contig()
1307 * Ensure that a free range starting at a boundary-multiple in vm_reserv_reclaim_contig()
1308 * doesn't include a boundary-multiple within it. Otherwise, in vm_reserv_reclaim_contig()
1309 * no boundary-constrained allocation is possible. in vm_reserv_reclaim_contig()
1316 * Compute shifted alignment, boundary values for page-based in vm_reserv_reclaim_contig()
1329 pa = VM_PAGE_TO_PHYS(&rv->pages[0]); in vm_reserv_reclaim_contig()
1330 if (pa + VM_LEVEL_0_SIZE - size < low) { in vm_reserv_reclaim_contig()
1360 (int)((low + PAGE_MASK - pa) >> PAGE_SHIFT); in vm_reserv_reclaim_contig()
1362 (int)((high - pa) >> PAGE_SHIFT); in vm_reserv_reclaim_contig()
1368 rv->popcnt += npages; in vm_reserv_reclaim_contig()
1369 bit_nset(rv->popmap, posn, posn + npages - 1); in vm_reserv_reclaim_contig()
1372 m_ret = &rv->pages[posn]; in vm_reserv_reclaim_contig()
1402 if (rv->object == old_object) { in vm_reserv_rename()
1406 __FUNCTION__, rv, rv->object, new_object, rv->popcnt, in vm_reserv_rename()
1407 rv->inpartpopq); in vm_reserv_rename()
1408 if (rv->object == old_object) { in vm_reserv_rename()
1410 rv->object = NULL; in vm_reserv_rename()
1414 rv->object = new_object; in vm_reserv_rename()
1415 rv->pindex -= old_object_offset; in vm_reserv_rename()
1416 LIST_INSERT_HEAD(&new_object->rvq, rv, objq); in vm_reserv_rename()
1437 case -1: in vm_reserv_size()
1459 count += howmany(vm_phys_segs[i].end, VM_LEVEL_0_SIZE) - in vm_reserv_startup()
1469 count += howmany(phys_avail[i + 1], VM_LEVEL_0_SIZE) - in vm_reserv_startup()
1490 new_end = end - round_page(size); in vm_reserv_startup()
1509 VM_OBJECT_ASSERT_LOCKED(m->object); in vm_reserv_to_superpage()
1511 if (rv->object == m->object) { in vm_reserv_to_superpage()
1512 if (rv->popcnt == VM_LEVEL_0_NPAGES) in vm_reserv_to_superpage()
1513 return (rv->pages); in vm_reserv_to_superpage()
1515 if (vm_reserv_is_sublevel_full(rv, m - rv->pages)) in vm_reserv_to_superpage()
1516 return (rv->pages + rounddown2(m - rv->pages, in vm_reserv_to_superpage()